Chat Completions API OpenAI-compatible interface for MiniMax-M2 chat.
The chat completions endpoint mirrors OpenAI’s v1/chat/completions contract. Use it to stream or fetch MiniMax-M2 responses with minimal code changes.
Endpoint : POST https://minimax-m2.com/api/v1/chat/completions
Auth : Authorization: Bearer <api-key>
Model : MiniMax-M2
POST /api/v1/chat/completions HTTP / 1.1
Host : minimax-m2.com
Authorization : Bearer sk-live-...
Content-Type : application/json
{
"model" : "MiniMax-M2" ,
"messages" : [
{ "role" : "system" , "content" : "You are a precise financial analyst." },
{ "role" : "user" , "content" : "Summarize Q4 revenue trends for APAC." }
],
"stream" : false ,
"reasoning_split" : true
}
curl https://minimax-m2.com/api/v1/chat/completions \
-H "content-type: application/json" \
-H "authorization: Bearer $MINIMAX_API_KEY " \
-d '{
"model": "MiniMax-M2",
"messages": [
{ "role": "system", "content": "You are a precise financial analyst." },
{ "role": "user", "content": "Summarize Q4 revenue trends for APAC." }
],
"reasoning_split": true
}'
import fetch from 'node-fetch' ;
const response = await fetch ( 'https://minimax-m2.com/api/v1/chat/completions' , {
method: 'POST' ,
headers: {
'Content-Type' : 'application/json' ,
Authorization: `Bearer ${ process . env . MINIMAX_API_KEY }` ,
},
body: JSON . stringify ({
model: 'MiniMax-M2' ,
messages: [
{ role: 'system' , content: 'You are a precise financial analyst.' },
{ role: 'user' , content: 'Summarize Q4 revenue trends for APAC.' },
],
reasoning_split: true ,
}),
});
const data = await response. json ();
console. log (data.choices[ 0 ].message?.content);
import requests
headers = {
"Authorization" : f "Bearer {API_KEY} " ,
"Content-Type" : "application/json" ,
}
payload = {
"model" : "MiniMax-M2" ,
"messages" : [
{ "role" : "system" , "content" : "You are a precise financial analyst." },
{ "role" : "user" , "content" : "Summarize Q4 revenue trends for APAC." }
],
"reasoning_split" : True
}
resp = requests.post( "https://minimax-m2.com/api/v1/chat/completions" , json = payload, headers = headers)
resp.raise_for_status()
print (resp.json()[ "choices" ][ 0 ][ "message" ][ "reasoning_details" ][ 0 ][ 'text' ])
print (resp.json()[ "choices" ][ 0 ][ "message" ][ "content" ])
from openai import OpenAI
client = OpenAI(
base_url = "https://minimax-m2.com/api/v1/" ,
api_key = "MINIMAX_API_KEY" ,
)
response = client.chat.completions.create(
model = "MiniMax-M2" ,
messages = [
{ "role" : "system" , "content" : "You are a helpful assistant." },
{ "role" : "user" , "content" : "Hi, how are you?" },
],
extra_body = { "reasoning_split" : True },
)
print ( f "Thinking: \\ n { response.choices[ 0 ].message.reasoning_details[ 0 ][ 'text' ] }\\ n" )
print ( f "Text: \\ n { response.choices[ 0 ].message.content }\\ n" )
Set stream: true to receive Server-Sent Events (SSE). The data format matches OpenAI’s, enabling drop-in use of existing clients.
data : {"id":"chatcmpl-...","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"你好"}}],"model":"minimax-m2"}
data : {"id":"chatcmpl-...","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"你好"}}],"model":"minimax-m2"}
...
data : [DONE]
Responses include token usage in the OpenAI schema (usage.prompt_tokens, usage.completion_tokens). These values feed billing and are visible in the dashboard usage explorer.