Request body | Text inputPythonimport os
import dashscope
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
# The preceding base_url is for the Singapore region.
messages = [
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'Who are you?'}
]
response = dashscope.Generation.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-plus", # This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
messages=messages,
result_format='message'
)
print(response)
Java// Use DashScope SDK V2.12.0 or later.
import java.util.Arrays;
import java.lang.System;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.utils.JsonUtils;
import com.alibaba.dashscope.protocol.Protocol;
public class Main {
public static GenerationResult callWithMessage() throws ApiException, NoApiKeyException, InputRequiredException {
Generation gen = new Generation(Protocol.HTTP.getValue(), "https://dashscope-intl.aliyuncs.com/api/v1");
// The preceding base_url is for the Singapore region.
Message systemMsg = Message.builder()
.role(Role.SYSTEM.getValue())
.content("You are a helpful assistant.")
.build();
Message userMsg = Message.builder()
.role(Role.USER.getValue())
.content("Who are you?")
.build();
GenerationParam param = GenerationParam.builder()
// If you have not configured an environment variable, replace the following line with your Model Studio API key: .apiKey("sk-xxx")
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(Arrays.asList(systemMsg, userMsg))
.resultFormat(GenerationParam.ResultFormat.MESSAGE)
.build();
return gen.call(param);
}
public static void main(String[] args) {
try {
GenerationResult result = callWithMessage();
System.out.println(JsonUtils.toJson(result));
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
// Use a logging framework to record the exception.
System.err.println("An error occurred while calling the generation service: " + e.getMessage());
}
System.exit(0);
}
}
PHP (HTTP)<?php
$url = "https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation";
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
$apiKey = getenv('DASHSCOPE_API_KEY');
$data = [
// This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
"model" => "qwen-plus",
"input" => [
"messages" => [
[
"role" => "system",
"content" => "You are a helpful assistant."
],
[
"role" => "user",
"content" => "Who are you?"
]
]
],
"parameters" => [
"result_format" => "message"
]
];
$jsonData = json_encode($data);
$ch = curl_init($url);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_POST, true);
curl_setopt($ch, CURLOPT_POSTFIELDS, $jsonData);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, [
"Authorization: Bearer $apiKey",
"Content-Type: application/json"
]);
$response = curl_exec($ch);
$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);
if ($httpCode == 200) {
echo "Response: " . $response;
} else {
echo "Error: " . $httpCode . " - " . $response;
}
curl_close($ch);
?>
Node.js (HTTP)DashScope does not provide an SDK for Node.js. To make calls using the OpenAI Node.js SDK, see the OpenAI section in this topic. import fetch from 'node-fetch';
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
const apiKey = process.env.DASHSCOPE_API_KEY;
const data = {
model: "qwen-plus", // This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
input: {
messages: [
{
role: "system",
content: "You are a helpful assistant."
},
{
role: "user",
content: "Who are you?"
}
]
},
parameters: {
result_format: "message"
}
};
fetch('https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation', {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
})
.then(response => response.json())
.then(data => {
console.log(JSON.stringify(data));
})
.catch(error => {
console.error('Error:', error);
});
C# (HTTP)using System.Net.Http.Headers;
using System.Text;
class Program
{
private static readonly HttpClient httpClient = new HttpClient();
static async Task Main(string[] args)
{
// If you have not configured an environment variable, replace the following line with your Model Studio API key: string? apiKey = "sk-xxx";
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
string? apiKey = Environment.GetEnvironmentVariable("DASHSCOPE_API_KEY");
if (string.IsNullOrEmpty(apiKey))
{
Console.WriteLine("The API key is not set. Make sure that the 'DASHSCOPE_API_KEY' environment variable is set.");
return;
}
// Set the request URL and content.
string url = "https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation";
// This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
string jsonContent = @"{
""model"": ""qwen-plus"",
""input"": {
""messages"": [
{
""role"": ""system"",
""content"": ""You are a helpful assistant.""
},
{
""role"": ""user"",
""content"": ""Who are you?""
}
]
},
""parameters"": {
""result_format"": ""message""
}
}";
// Send the request and get the response.
string result = await SendPostRequestAsync(url, jsonContent, apiKey);
// Print the result.
Console.WriteLine(result);
}
private static async Task<string> SendPostRequestAsync(string url, string jsonContent, string apiKey)
{
using (var content = new StringContent(jsonContent, Encoding.UTF8, "application/json"))
{
// Set the request headers.
httpClient.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", apiKey);
httpClient.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
// Send the request and get the response.
HttpResponseMessage response = await httpClient.PostAsync(url, content);
// Handle the response.
if (response.IsSuccessStatusCode)
{
return await response.Content.ReadAsStringAsync();
}
else
{
return $"Request failed: {response.StatusCode}";
}
}
}
}
Go (HTTP)DashScope does not provide an SDK for Go. To make calls using the OpenAI Go SDK, see the OpenAI-Go section in this topic. package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
)
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
type Input struct {
Messages []Message `json:"messages"`
}
type Parameters struct {
ResultFormat string `json:"result_format"`
}
type RequestBody struct {
Model string `json:"model"`
Input Input `json:"input"`
Parameters Parameters `json:"parameters"`
}
func main() {
// Create an HTTP client.
client := &http.Client{}
// Build the request body.
requestBody := RequestBody{
// This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
Model: "qwen-plus",
Input: Input{
Messages: []Message{
{
Role: "system",
Content: "You are a helpful assistant.",
},
{
Role: "user",
Content: "Who are you?",
},
},
},
Parameters: Parameters{
ResultFormat: "message",
},
}
jsonData, err := json.Marshal(requestBody)
if err != nil {
log.Fatal(err)
}
// Create a POST request.
req, err := http.NewRequest("POST", "https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation", bytes.NewBuffer(jsonData))
if err != nil {
log.Fatal(err)
}
// Set the request headers.
// If you have not configured an environment variable, replace the following line with your Model Studio API key: apiKey := "sk-xxx"
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
apiKey := os.Getenv("DASHSCOPE_API_KEY")
req.Header.Set("Authorization", "Bearer "+apiKey)
req.Header.Set("Content-Type", "application/json")
// Send the request.
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
// Read the response body.
bodyText, err := io.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
// Print the response content.
fmt.Printf("%s\n", bodyText)
}
curlThe API keys for the Singapore, Virginia, and Beijing regions are different. For more information, see Obtain an API key curl --location "https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--data '{
"model": "qwen-plus",
"input":{
"messages":[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who are you?"
}
]
},
"parameters": {
"result_format": "message"
}
}'
Streaming outputFor more information, see Streaming output. Text generation modelsPythonimport os
import dashscope
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
messages = [
{'role':'system','content':'you are a helpful assistant'},
{'role': 'user','content': 'Who are you?'}
]
responses = dashscope.Generation.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
api_key=os.getenv('DASHSCOPE_API_KEY'),
# This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
model="qwen-plus",
messages=messages,
result_format='message',
stream=True,
incremental_output=True
)
for response in responses:
print(response)
Javaimport java.util.Arrays;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.utils.JsonUtils;
import io.reactivex.Flowable;
import java.lang.System;
import com.alibaba.dashscope.protocol.Protocol;
public class Main {
private static final Logger logger = LoggerFactory.getLogger(Main.class);
private static void handleGenerationResult(GenerationResult message) {
System.out.println(JsonUtils.toJson(message));
}
public static void streamCallWithMessage(Generation gen, Message userMsg)
throws NoApiKeyException, ApiException, InputRequiredException {
GenerationParam param = buildGenerationParam(userMsg);
Flowable<GenerationResult> result = gen.streamCall(param);
result.blockingForEach(message -> handleGenerationResult(message));
}
private static GenerationParam buildGenerationParam(Message userMsg) {
return GenerationParam.builder()
// If you have not configured an environment variable, replace the following line with your Model Studio API key: .apiKey("sk-xxx")
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(Arrays.asList(userMsg))
.resultFormat(GenerationParam.ResultFormat.MESSAGE)
.incrementalOutput(true)
.build();
}
public static void main(String[] args) {
try {
Generation gen = new Generation(Protocol.HTTP.getValue(), "https://dashscope-intl.aliyuncs.com/api/v1");
Message userMsg = Message.builder().role(Role.USER.getValue()).content("Who are you?").build();
streamCallWithMessage(gen, userMsg);
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
logger.error("An exception occurred: {}", e.getMessage());
}
System.exit(0);
}
}
curlcurl --location "https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--header "X-DashScope-SSE: enable" \
--data '{
"model": "qwen-plus",
"input":{
"messages":[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who are you?"
}
]
},
"parameters": {
"result_format": "message",
"incremental_output":true
}
}'
Multimodal modelsPythonimport os
from dashscope import MultiModalConversation
import dashscope
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
messages = [
{
"role": "user",
"content": [
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"},
{"text": "What is depicted in the image?"}
]
}
]
responses = MultiModalConversation.call(
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx",
api_key=os.getenv("DASHSCOPE_API_KEY"),
model='qwen3-vl-plus', # You can replace the model with another multimodal model and modify the messages accordingly.
messages=messages,
stream=True,
incremental_output=True)
full_content = ""
print("Streaming output:")
for response in responses:
if response["output"]["choices"][0]["message"].content:
print(response.output.choices[0].message.content[0]['text'])
full_content += response.output.choices[0].message.content[0]['text']
print(f"Full content: {full_content}")
Javaimport java.util.Arrays;
import java.util.Collections;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import io.reactivex.Flowable;
import com.alibaba.dashscope.utils.Constants;
public class Main {
static {
Constants.baseHttpApiUrl="https://dashscope-intl.aliyuncs.com/api/v1";
}
public static void streamCall()
throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
// must create mutable map.
MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
.content(Arrays.asList(Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"),
Collections.singletonMap("text", "What is depicted in the image?"))).build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
// If you have not configured an environment variable, replace the following line with your Model Studio API key: .apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
.model("qwen3-vl-plus") // You can replace the model with another multimodal model and modify the messages accordingly.
.messages(Arrays.asList(userMessage))
.incrementalOutput(true)
.build();
Flowable<MultiModalConversationResult> result = conv.streamCall(param);
result.blockingForEach(item -> {
try {
var content = item.getOutput().getChoices().get(0).getMessage().getContent();
// Check if the content exists and is not empty.
if (content != null && !content.isEmpty()) {
System.out.println(content.get(0).get("text"));
}
} catch (Exception e){
System.exit(0);
}
});
}
public static void main(String[] args) {
try {
streamCall();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curlcurl -X POST https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation \
-H "Authorization: Bearer $DASHSCOPE_API_KEY" \
-H 'Content-Type: application/json' \
-H 'X-DashScope-SSE: enable' \
-d '{
"model": "qwen3-vl-plus",
"input":{
"messages":[
{
"role": "user",
"content": [
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"},
{"text": "What is depicted in the image?"}
]
}
]
},
"parameters": {
"incremental_output": true
}
}'
Image inputFor more information about how to use large models to analyze images, see Visual understanding. Pythonimport os
import dashscope
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
messages = [
{
"role": "user",
"content": [
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/rabbit.png"},
{"text": "What are these?"}
]
}
]
response = dashscope.MultiModalConversation.call(
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
api_key=os.getenv('DASHSCOPE_API_KEY'),
# This example uses qwen-vl-max. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
model='qwen-vl-max',
messages=messages
)
print(response)
Java// Copyright (c) Alibaba, Inc. and its affiliates.
import java.util.Arrays;
import java.util.Collections;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
import com.alibaba.dashscope.utils.Constants;
public class Main {
static {
Constants.baseHttpApiUrl="https://dashscope-intl.aliyuncs.com/api/v1";
}
public static void simpleMultiModalConversationCall()
throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
.content(Arrays.asList(
Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"),
Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"),
Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/rabbit.png"),
Collections.singletonMap("text", "What are these?"))).build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
// If you have not configured an environment variable, replace the following line with your Model Studio API key: .apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// This example uses qwen-vl-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
.model("qwen-vl-plus")
.message(userMessage)
.build();
MultiModalConversationResult result = conv.call(param);
System.out.println(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
simpleMultiModalConversationCall();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curlThe API keys for the Singapore/Virginia and Beijing regions are different. For more information, see Obtain and configure an API key curl --location 'https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation' \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header 'Content-Type: application/json' \
--data '{
"model": "qwen-vl-plus",
"input":{
"messages":[
{
"role": "user",
"content": [
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/rabbit.png"},
{"text": "What are these?"}
]
}
]
}
}'
Video inputThe following code is an example of how to pass video frames. For more information about usage, such as passing a video file, see Visual understanding. Pythonimport os
# Your DashScope SDK for Python must be V1.20.10 or later.
import dashscope
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
messages = [{"role": "user",
"content": [
# If the model is in the Qwen2.5-VL series and an image list is passed, you can set the fps parameter. This parameter indicates that the image list is extracted from the original video at an interval of 1/fps seconds.
{"video":["https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/xzsgiz/football1.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/tdescd/football2.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/zefdja/football3.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/aedbqh/football4.jpg"],
"fps":2},
{"text": "Describe the process shown in this video"}]}]
response = dashscope.MultiModalConversation.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
api_key=os.getenv("DASHSCOPE_API_KEY"),
model='qwen2.5-vl-72b-instruct', # This example uses qwen2.5-vl-72b-instruct. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/models
messages=messages
)
print(response["output"]["choices"][0]["message"].content[0]["text"])
Java// Your DashScope SDK for Java must be V2.18.3 or later.
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.Constants;
public class Main {
static {
Constants.baseHttpApiUrl="https://dashscope-intl.aliyuncs.com/api/v1";
}
private static final String MODEL_NAME = "qwen2.5-vl-72b-instruct"; // This example uses qwen2.5-vl-72b-instruct. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/models
public static void videoImageListSample() throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
MultiModalMessage systemMessage = MultiModalMessage.builder()
.role(Role.SYSTEM.getValue())
.content(Arrays.asList(Collections.singletonMap("text", "You are a helpful assistant.")))
.build();
// If the model is in the Qwen2.5-VL series and an image list is passed, you can set the fps parameter. This parameter indicates that the image list is extracted from the original video at an interval of 1/fps seconds.
Map<String, Object> params = Map.of(
"video", Arrays.asList("https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/xzsgiz/football1.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/tdescd/football2.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/zefdja/football3.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/aedbqh/football4.jpg"),
"fps",2);
MultiModalMessage userMessage = MultiModalMessage.builder()
.role(Role.USER.getValue())
.content(Arrays.asList(
params,
Collections.singletonMap("text", "Describe the process shown in this video")))
.build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
// If you have not configured an environment variable, replace the following line with your Model Studio API key: .apiKey("sk-xxx")
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
.model(MODEL_NAME)
.messages(Arrays.asList(systemMessage, userMessage)).build();
MultiModalConversationResult result = conv.call(param);
System.out.print(result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text"));
}
public static void main(String[] args) {
try {
videoImageListSample();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curlThe API keys for the Singapore, Virginia, and Beijing regions are different. For more information, see Obtain and configure an API key curl -X POST https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation \
-H "Authorization: Bearer $DASHSCOPE_API_KEY" \
-H 'Content-Type: application/json' \
-d '{
"model": "qwen2.5-vl-72b-instruct",
"input": {
"messages": [
{
"role": "user",
"content": [
{
"video": [
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/xzsgiz/football1.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/tdescd/football2.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/zefdja/football3.jpg",
"https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/aedbqh/football4.jpg"
],
"fps":2
},
{
"text": "Describe the process shown in this video"
}
]
}
]
}
}'
Tool callingFor the complete code for the Function caling flow, see Overview of text generation models. Pythonimport os
import dashscope
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
tools = [
{
"type": "function",
"function": {
"name": "get_current_time",
"description": "This is useful when you want to know the current time.",
"parameters": {}
}
},
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "This is useful when you want to query the weather of a specified city.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "A city or a district, such as Beijing, Hangzhou, or Yuhang District."
}
}
},
"required": [
"location"
]
}
}
]
messages = [{"role": "user", "content": "What is the weather in Hangzhou?"}]
response = dashscope.Generation.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
api_key=os.getenv('DASHSCOPE_API_KEY'),
# This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
model='qwen-plus',
messages=messages,
tools=tools,
result_format='message'
)
print(response)
Javaimport java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.alibaba.dashscope.aigc.conversation.ConversationParam.ResultFormat;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.tools.FunctionDefinition;
import com.alibaba.dashscope.tools.ToolFunction;
import com.alibaba.dashscope.utils.JsonUtils;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.github.victools.jsonschema.generator.Option;
import com.github.victools.jsonschema.generator.OptionPreset;
import com.github.victools.jsonschema.generator.SchemaGenerator;
import com.github.victools.jsonschema.generator.SchemaGeneratorConfig;
import com.github.victools.jsonschema.generator.SchemaGeneratorConfigBuilder;
import com.github.victools.jsonschema.generator.SchemaVersion;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import com.alibaba.dashscope.protocol.Protocol;
public class Main {
public class GetWeatherTool {
private String location;
public GetWeatherTool(String location) {
this.location = location;
}
public String call() {
return location + " is sunny today";
}
}
public class GetTimeTool {
public GetTimeTool() {
}
public String call() {
LocalDateTime now = LocalDateTime.now();
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
String currentTime = "Current time: " + now.format(formatter) + ".";
return currentTime;
}
}
public static void SelectTool()
throws NoApiKeyException, ApiException, InputRequiredException {
SchemaGeneratorConfigBuilder configBuilder =
new SchemaGeneratorConfigBuilder(SchemaVersion.DRAFT_2020_12, OptionPreset.PLAIN_JSON);
SchemaGeneratorConfig config = configBuilder.with(Option.EXTRA_OPEN_API_FORMAT_VALUES)
.without(Option.FLATTENED_ENUMS_FROM_TOSTRING).build();
SchemaGenerator generator = new SchemaGenerator(config);
ObjectNode jsonSchema_weather = generator.generateSchema(GetWeatherTool.class);
ObjectNode jsonSchema_time = generator.generateSchema(GetTimeTool.class);
FunctionDefinition fdWeather = FunctionDefinition.builder().name("get_current_weather").description("Get the weather of a specified region")
.parameters(JsonUtils.parseString(jsonSchema_weather.toString()).getAsJsonObject()).build();
FunctionDefinition fdTime = FunctionDefinition.builder().name("get_current_time").description("Get the current time")
.parameters(JsonUtils.parseString(jsonSchema_time.toString()).getAsJsonObject()).build();
Message systemMsg = Message.builder().role(Role.SYSTEM.getValue())
.content("You are a helpful assistant. When asked a question, use tools wherever possible.")
.build();
Message userMsg = Message.builder().role(Role.USER.getValue()).content("Weather in Hangzhou").build();
List<Message> messages = new ArrayList<>();
messages.addAll(Arrays.asList(systemMsg, userMsg));
GenerationParam param = GenerationParam.builder()
// The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(messages)
.resultFormat(ResultFormat.MESSAGE)
.tools(Arrays.asList(
ToolFunction.builder().function(fdWeather).build(),
ToolFunction.builder().function(fdTime).build()))
.build();
Generation gen = new Generation(Protocol.HTTP.getValue(), "https://dashscope-intl.aliyuncs.com/api/v1");
// The preceding base_url is for the Singapore region.
GenerationResult result = gen.call(param);
System.out.println(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
SelectTool();
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
System.out.println(String.format("Exception %s", e.getMessage()));
}
System.exit(0);
}
}
curlThe API keys for the Singapore/Virginia and Beijing regions are different. For more information, see Obtain and configure an API key The following URL is for the Singapore region. curl --location "https://dashscope-intl.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--data '{
"model": "qwen-plus",
"input": {
"messages": [{
"role": "user",
"content": "What is the weather in Hangzhou?"
}]
},
"parameters": {
"result_format": "message",
"tools": [{
"type": "function",
"function": {
"name": "get_current_time",
"description": "This is useful when you want to know the current time.",
"parameters": {}
}
},{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "This is useful when you want to query the weather of a specified city.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "A city or a district, such as Beijing, Hangzhou, or Yuhang District."
}
}
},
"required": ["location"]
}
}]
}
}'
Asynchronous invocation# Your DashScope SDK for Python must be V1.19.0 or later.
import asyncio
import platform
import os
import dashscope
from dashscope.aigc.generation import AioGeneration
dashscope.base_http_api_url = 'https://dashscope-intl.aliyuncs.com/api/v1'
# The preceding base_url is for the Singapore region.
async def main():
response = await AioGeneration.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
# The API keys for the Singapore/Virginia and Beijing regions are different. To get an API key, see https://www.alibabacloud.com/help/zh/model-studio/get-api-key
api_key=os.getenv('DASHSCOPE_API_KEY'),
# This example uses qwen-plus. You can change the model name as needed. For a list of models, see https://www.alibabacloud.com/help/zh/model-studio/getting-started/models
model="qwen-plus",
messages=[{"role": "user", "content": "Who are you"}],
result_format="message",
)
print(response)
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
Document understandingPythonimport os
import dashscope
# Currently, you can call the qwen-long-latest model only in the China (Beijing) region.
dashscope.base_http_api_url = 'https://dashscope.aliyuncs.com/api/v1'
messages = [
{'role': 'system', 'content': 'you are a helpful assisstant'},
# Replace '{FILE_ID}' with the file ID that you use in the actual conversation scenario.
{'role':'system','content':f'fileid://{FILE_ID}'},
{'role': 'user', 'content': 'What is this article about?'}]
response = dashscope.Generation.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-long-latest",
messages=messages,
result_format='message'
)
print(response)
Javaimport os
import dashscope
# Currently, you can call the qwen-long-latest model only in the China (Beijing) region.
dashscope.base_http_api_url = 'https://dashscope.aliyuncs.com/api/v1'
messages = [
{'role': 'system', 'content': 'you are a helpful assisstant'},
# Replace '{FILE_ID}' with the file ID that you use in the actual conversation scenario.
{'role':'system','content':f'fileid://{FILE_ID}'},
{'role': 'user', 'content': 'What is this article about?'}]
response = dashscope.Generation.call(
# If you have not configured an environment variable, replace the following line with your Model Studio API key: api_key="sk-xxx"
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-long-latest",
messages=messages,
result_format='message'
)
print(response)
curlCurrently, you can call the document understanding model only in the China (Beijing) region. Replace {FILE_ID} with the file ID that you use in the actual conversation scenario. curl --location "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--data '{
"model": "qwen-long-latest",
"input":{
"messages":[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "system",
"content": "fileid://{FILE_ID}"
},
{
"role": "user",
"content": "What is this article about?"
}
]
},
"parameters": {
"result_format": "message"
}
}'
|
model string (Required) The name of the model. Supported models include Qwen large language models (commercial and open source), Qwen-VL, Qwen-Coder. For specific model names and billing information, see Models. |
messages array (Required) The context to pass to the large language model, arranged in conversational order. When you make an HTTP call, place messages in the input object. Message types System Message object (Optional) A system message that sets the role, tone, task objectives, or constraints for the large language model. This message is usually placed at the beginning of the messages array. Do not set a System Message for QwQ models. Setting a System Message for QVQ models has no effect. Properties content string (Required) The message content. role string (Required) The role for a system message. The value is fixed as system. User Message object (Required) A user message that passes questions, instructions, or context to the model. Properties content string or array (Required) The message content. The value is a string for text-only input. The value is an array for multimodal input, such as images, or if explicit caching is enabled. Properties text string (Required) The input text. image string (Optional) The image file for image understanding. You can pass the image in one of the following three ways: A public URL of the image. The Base64 encoding of the image, in the format data:image/<format>;base64,<data>. The absolute path of a local file.
Applicable models: Qwen-VL, QVQ Example value: {"image":"https://xxxx.jpeg"} video array or string (Optional) The video to pass to the Qwen-VL model or QVQ model. If you pass a list of images, the value is an array. If you pass a video file, the value is a string.
To pass a local file, see Local files (Qwen-VL) or Local files (QVQ). Example values: List of images: {"video":["https://xx1.jpg",...,"https://xxn.jpg"]} Video file: {"video":"https://xxx.mp4"}
fps float (Optional) The number of frames to extract per second. The value must be in the range of [0.1, 10]. The default value is 2.0. Two features are available: When you input a video file, this parameter controls the frame extraction frequency. One frame is extracted every fps1 seconds. Applicable to the Qwen-VL model and QVQ model. This parameter informs the model of the time interval between adjacent frames. This helps the model better understand the temporal dynamics of the video. This function applies to both video file and image list inputs. It is suitable for scenarios such as event time localization or segment content summarization. Supports the Qwen2.5-VL and Qwen3-VL models, and the QVQ model.
Example values: List of images input: {"video":["https://xx1.jpg",...,"https://xxn.jpg"], "fps":2} Video file input: {"video": "https://xx1.mp4", "fps":2}
A larger fps value is suitable for high-speed motion scenarios, such as sports events and action movies. A smaller fps value is suitable for long videos or content with static scenes. min_pixels integer (Optional) Sets the minimum pixel threshold for an input image or video frame. If the pixel count of an input image or video frame is less than min_pixels, the image or frame is scaled up until its total pixel count is greater than min_pixels. max_pixels integer (Optional) Sets the maximum pixel threshold for an input image or video frame. If the pixel count of an input image or video is within the [min_pixels, max_pixels] range, the model processes the original image. If the input image's pixel count is greater than max_pixels, the image is scaled down until its total pixel count is less than max_pixels. total_pixels integer (Optional) Limits the total number of pixels for all frames extracted from a video (pixels per frame × total number of frames). If the total pixel count of the video exceeds this limit, the system scales down the video frames. However, it still ensures that the pixel value of a single frame remains within the [min_pixels, max_pixels] range. For long videos with a high number of extracted frames, you can lower this value to reduce token consumption and processing time. However, this may cause a loss of image detail. cache_control object (Optional) Enables explicit caching. This parameter is supported only by models that support explicit cache. Properties type string (Required) The value is fixed as ephemeral. role string (Required) The role for a user message. The value is fixed as user. Assistant Message object (Optional) The model's reply to the user's message. Properties content string (Optional) The message content. This parameter is optional only when the tool_calls parameter is specified in the assistant message. role string (Required) The value is fixed as assistant. partial boolean (Optional) Specifies whether to enable partial mode. For more information, see Partial mode. Supported models Qwen-Max series qwen3-max, qwen3-max-2025-09-23, qwen3-max-preview (non-thinking mode), qwen-max, qwen-max-latest, and snapshot models from qwen-max-2025-01-25 or later Qwen-Plus series (non-thinking mode) qwen-plus, qwen-plus-latest, and snapshot models from qwen-plus-2025-01-25 or later Qwen-Flash series (non-thinking mode) qwen-flash, and snapshot models from qwen-flash-2025-07-28 or later Qwen-Coder series qwen3-coder-plus, qwen3-coder-flash, qwen3-coder-480b-a35b-instruct, qwen3-coder-30b-a3b-instruct Qwen-VL series qwen3-vl-plus series (non-thinking mode) qwen3-vl-plus, and snapshot models from qwen3-vl-plus-2025-09-23 or later qwen3-vl-flash series (non-thinking mode) qwen3-vl-flash, and snapshot models from qwen3-vl-flash-2025-10-15 or later qwen-vl-max series qwen-vl-max, qwen-vl-max-latest, and snapshot models from qwen-vl-max-2025-04-08 or later qwen-vl-plus series qwen-vl-plus, qwen-vl-plus-latest, and snapshot models from qwen-vl-plus-2025-01-25 or later
Qwen-Turbo series (non-thinking mode) qwen-turbo, qwen-turbo-latest, and snapshot models from qwen-turbo-2024-11-01 or later Qwen open-source series Qwen3 open-source models (non-thinking mode), Qwen2.5 series text models, Qwen3-VL open-source models (non-thinking mode)
tool_calls array (Optional) The tool and input parameter information that is returned after you initiate a function cal. This parameter contains one or more objects and is obtained from the tool_calls field of the previous model response. Properties id string The ID of the tool response. type string The tool type. Currently, only function is supported. function object The tool and input parameter information. Properties name string The tool name. arguments string The input parameter information, in a JSON string format. index integer The index of the current tool information in the tool_calls array. Tool Message object (Optional) The output information of the tool. Properties content string (Required) The output content of the tool function. The value must be a string. role string (Required) The value is fixed as tool. tool_call_id string (Optional) The ID that is returned after you initiate a function cal. You can obtain the ID from response.output.choices[0].message.tool_calls[$index]["id"]. This parameter marks the Tool Message that corresponds to the tool. |
temperature float (Optional) The sampling temperature. This value controls the diversity of text that the model generates. A higher temperature results in more diverse text. A lower temperature produces more deterministic text. The value must be in the range of [0, 2). When making an HTTP call, place temperature in the parameters object. Do not modify the default temperature value for QVQ models. |
top_p float (Optional) The probability threshold for nucleus sampling. It controls the diversity of the text that the model generates. A higher top_p value results in more diverse text. A lower top_p value produces more deterministic text. The value must be in the range of (0, 1.0]. Default top_p values Qwen3 (non-thinking mode), Qwen3-Instruct series, Qwen3-Coder series, qwen-max series, qwen-plus series (non-thinking mode), qwen-flash series (non-thinking mode), qwen-turbo series (non-thinking mode), Qwen open source series, qwen-vl-max-2025-08-13, and Qwen3-VL (non-thinking mode): 0.8 qwen-vl-plus series, qwen-vl-max, qwen-vl-max-latest, qwen-vl-max-2025-04-08, qwen2.5-vl-3b-instruct, qwen2.5-vl-7b-instruct, qwen2.5-vl-32b-instruct, and qwen2.5-vl-72b-instruct: 0.001 QVQ series, qwen-vl-plus-2025-07-10, qwen-vl-plus-2025-08-15: 0.5 qwen3-max-preview (thinking mode), Qwen3-Omni-Flash series: 1.0 Qwen3 (thinking mode), Qwen3-VL (thinking mode), Qwen3-Thinking, QwQ series, Qwen3-Omni-Captioner: 0.95 In the Java SDK, the parameter is topP. When you call the API over HTTP, add top_p to the parameters object. Do not modify the default top_p value for QVQ models. |
top_k integer (Optional) The size of the candidate set for sampling during generation. For example, if you set the value to 50, only the 50 highest-scoring tokens in a single generation are used as the candidate set for random sampling. A larger value results in more random output, and a smaller value results in more deterministic output. A value of `null` or a value greater than 100 disables the `top_k` policy. In this case, only the `top_p` policy is effective. The value must be greater than or equal to 0. Default top_k values QVQ series, qwen-vl-plus-2025-07-10, and qwen-vl-plus-2025-08-15: 10 QwQ series: 40 Other qwen-vl-plus series, models before qwen-vl-max-2025-08-13, qwen2.5-omni-7b: 1 Qwen3-Omni-Flash series: 50 All other models: 20 In the Java SDK, use the topK. For HTTP calls, add top_k to the parameters object. Do not modify the default top_k value for QVQ models. |
enable_thinking boolean (Optional) Specifies whether to enable thinking mode when you use a hybrid thinking model. This parameter applies to the Qwen3 and Qwen3-VL models. For more information, see Deep thinking. Valid values: true
If this parameter is enabled, the thinking content is returned in the reasoning_content field. false
Default values for different models: Supported models In the Java SDK, this parameter is named enableThinking. When making an HTTP call, place enable_thinking in the parameters object. |
thinking_budget integer (Optional) The maximum number of tokens for the thinking process. This applies to Qwen3-VL, and the commercial and open source versions of Qwen3 models. For more information, see Limit thinking length. The default value is the model's maximum chain-of-thought length. For more information, see Models. In the Java SDK, this parameter is named thinkingBudget. When you make a call using HTTP, place thinking_budget in the parameters object. The value defaults to the model's maximum chain-of-thought length. |
enable_code_interpreter boolean (Optional) Default value: false Specifies whether to enable the code interpreter feature. This parameter applies only to qwen3-max-preview in thinking mode. For more information, see Code interpreter. Valid values: This parameter is not supported by the Java SDK. When calling using HTTP, place enable_code_interpreter in the parameters object. |
repetition_penalty float (Optional) The penalty for repeating consecutive sequences during model generation. A higher repetition_penalty value reduces repetition in the generated text. A value of 1.0 means no penalty. The value must be greater than 0. In the Java SDK, this parameter is called repetitionPenalty. When making an HTTP call, place repetition_penalty in the parameters object. If you use the qwen-vl-plus_2025-01-25 model for text extraction, set repetition_penalty to 1.0. Do not modify the default repetition_penalty value for QVQ models. |
presence_penalty float (Optional) Controls content repetition in the text generated by the model. Value range: [-2.0, 2.0]. Positive values reduce repetition, and negative values increase it. For scenarios that require diversity and creativity, such as creative writing or brainstorming, increase this value. For scenarios that require consistency and terminological accuracy, such as technical documents or formal text, decrease this value. Default presence_penalty values qwen3-max-preview (thinking mode), Qwen3 (non-thinking mode), Qwen3-Instruct series, qwen3-0.6b/1.7b/4b (thinking mode), QVQ series, qwen-max, qwen-max-latest, qwen-max-latest, qwen2.5-vl series, qwen-vl-max series, qwen-vl-plus, Qwen3-VL (non-thinking): 1.5. qwen-vl-plus-latest, qwen-vl-plus-2025-08-15: 1.2. qwen-vl-plus-2025-01-25: 1.0. qwen3-8b/14b/32b/30b-a3b/235b-a22b (thinking mode), qwen-plus/qwen-plus-latest/2025-04-28 (thinking mode), qwen-turbo/qwen-turbo/2025-04-28 (thinking mode): 0.5. All other models: 0.0. How it works If the parameter value is positive, the model penalizes tokens that already exist in the text. The penalty amount does not depend on how many times the token appears. This reduces the chance of these tokens reappearing. As a result, content repetition decreases and word diversity increases. Example Prompt: Translate this sentence into English: "Esta película es buena. La trama es buena, la actuación es buena, la música es buena, y en general, toda la película es simplemente buena. Es realmente buena, de hecho. La trama es tan buena, y la actuación es tan buena, y la música es tan buena." Parameter value 2.0: This movie is very good. The plot is great, the acting is great, the music is also very good, and overall, the whole movie is incredibly good. In fact, it is truly excellent. The plot is very exciting, the acting is outstanding, and the music is so beautiful. Parameter value 0.0: This movie is good. The plot is good, the acting is good, the music is also good, and overall, the whole movie is very good. In fact, it is really great. The plot is very good, the acting is also very outstanding, and the music is also excellent. Parameter value -2.0: This movie is very good. The plot is very good, the acting is very good, the music is also very good, and overall, the whole movie is very good. In fact, it is really great. The plot is very good, the acting is also very good, and the music is also very good. When using the qwen-vl-plus-2025-01-25 model for text extraction, set presence_penalty to 1.5. Do not modify the default presence_penalty value for QVQ models. The Java SDK does not support this parameter. When you make a call using HTTP, place presence_penalty in the parameters object. |
vl_high_resolution_images boolean (Optional) Default value: false Specifies whether to increase the maximum pixel limit for input images to the pixel value that corresponds to 16384 tokens. For more information, see Process high-resolution images. vl_high_resolution_images: true: A fixed-resolution strategy is used, and the max_pixels setting is ignored. If an image exceeds this resolution, its total pixels are downscaled to this limit.
Click to view the pixel limits for each model When vl_high_resolution_images is True, different models have different pixel limits: Qwen3-VL series, qwen-vl-max, qwen-vl-max-latest, qwen-vl-max-0813, qwen-vl-plus, qwen-vl-plus-latest, qwen-vl-plus-0815: 16777216 (each Token corresponds to 32*32 pixels, i.e., 16384*32*32)
QVQ series and other Qwen2.5-VL series models: 12845056 (each Token corresponds to 28*28 pixels, i.e., 16384*28*28)
If vl_high_resolution_images is false, the actual resolution is determined by both max_pixels and the default limit. The maximum of the two values is used. If the image exceeds this pixel limit, it is downscaled to this limit. Click to view the default pixel limits for each model When vl_high_resolution_images is false, different models have different default pixel limits: Qwen3-VL series: 2621440 (2560*32*32, meaning the default Token limit is 2560)
qwen-vl-max, qwen-vl-max-latest, qwen-vl-max-0813, qwen-vl-plus, qwen-vl-plus-latest, qwen-vl-plus-0815: 1310720 (1280*32*32, meaning the default Token limit is 1280)
QVQ series and other Qwen2.5-VL series models: 1003520 (1280*28*28, meaning the default Token limit is 1280)
In the Java SDK, the parameter is vlHighResolutionImages. The minimum required version is 2.20.8. When making an HTTP call, place vl_high_resolution_images |
vl_enable_image_hw_output boolean (Optional) Default value: false Specifies whether to return the dimensions of the scaled image. The model scales the input image. If this parameter is set to true, the model returns the height and width of the scaled image. If streaming output is enabled, this information is returned in the last chunk. This parameter is supported by the Qwen-VL model. In the Java SDK, this parameter is named vlEnableImageHwOutput. The minimum Java SDK version is 2.20.8. When making HTTP calls, place vl_enable_image_hw_output in the parameters object. |
max_input_tokens integer (Optional) The maximum allowed token length for the input. This parameter is currently supported only by the qwen-plus-0728 and qwen-plus-latest models. qwen-plus-latest default value: 129,024 The default value may be adjusted to 1,000,000 in the future. qwen-plus-2025-07-28 default value: 1,000,000
The Java SDK does not currently support this parameter. When you make a call using HTTP, place max_input_tokens in the parameters object. |
max_tokens integer (Optional) Limits the maximum number of tokens in the model's output. If the generated content exceeds this value, generation stops, and the returned finish_reason is length. The default and maximum values are the model's maximum output length. For more information, see Models. This parameter is useful for controlling the output length in scenarios such as generating summaries or keywords, or for reducing costs and shortening response times. When max_tokens is triggered, the finish_reason field of the response is length. max_tokens does not limit the length of the chain-of-thought.
In the Java SDK, this parameter is maxTokens. For Qwen-VL models, it is maxLength in the Java SDK, but versions after 2.18.4 also support maxTokens. When you make a call using HTTP, place max_tokens in the parameters |
seed integer (Optional) A random number seed. This parameter ensures that results are reproducible for the same input and parameters. If you pass the same seed value in a call and other parameters remain unchanged, the model returns the same result as much as possible. Value range: [0,231−1]. When you make a call using HTTP, place seed in the parameters object. |
stream boolean (Optional) Default value: false Specifies whether to return the response in streaming output mode. Parameter values: false: The model returns the complete result at once after the generation is complete. true: The output is generated and sent incrementally. This means that a chunk is returned as soon as a part of the content is generated.
This parameter is supported only by the Python SDK. To implement streaming output using the Java SDK, call the streamCall interface. To implement streaming output using HTTP, specify X-DashScope-SSE as enable in the request header. The commercial edition of Qwen3 (thinking mode), the open source edition of Qwen3, QwQ, and QVQ support only streaming output. |
incremental_output boolean (Optional) Default: false. The default for Qwen3-Max, Qwen3-VL, Qwen3 open source models, QwQ, and QVQ models is true. Specifies whether to enable incremental output in streaming output mode. Set this parameter to true. Parameter values: false: Each output is the entire sequence that is generated. The last output is the complete result. I
I like
I like apple
I like apple.
true (recommended): The output is incremental. This means that subsequent output does not include previously generated content. Read these fragments in real time to get the complete result. I
like
apple
.
In the Java SDK, this corresponds to incrementalOutput. When calling via HTTP, add incremental_output to the parameters object. QwQ models and Qwen3 models in thinking mode support only the true value. Because the default value for Qwen3 commercial models is false, manually set this parameter to true when using thinking mode. Qwen3 open source models do not support the false value. |
response_format object (Optional) Default value: {"type": "text"}. The format of the returned content. Valid values: {"type": "text"}: Outputs a text response.
{"type": "json_object"}: Outputs a standard JSON string.
{"type": "json_schema","json_schema": {...} }: Outputs a JSON string in a specified format.
For more information, see Structured output. For information about supported models, see Supported models. If you set this parameter to {"type": "json_object"}, you must explicitly instruct the model to output JSON in the prompt. For example, use a prompt such as "Please output in JSON format." Otherwise, an error is reported. In the Java SDK, this parameter is named responseFormat. When making an HTTP call, place response_format in the parameters object. Properties type string (Required) The format of the returned content. Valid values: text: Outputs a text response.
json_object: Outputs a standard JSON string.
json_schema: Outputs a JSON string in a specified format.
json_schema object This field is required when type is set to json_schema. It defines the configuration for structured output. Properties name string (Required) The unique name of the schema. It can contain only letters (case-insensitive), digits, underscores (_), and hyphens (-). The name can be up to 64 characters long. description string (Optional) A description of the schema's purpose. This helps the model understand the semantic context of the output. schema object (Optional) An object that complies with the JSON Schema standard. It defines the data structure of the model's output. To learn how to build a JSON Schema, see JSON Schema strict boolean (Optional) Default value: false. Controls whether the model must strictly follow all constraints of the schema. true (Recommended) The model strictly follows all constraints, such as field types, required fields, and formats. This ensures 100% compliance of the output. false (Not recommended) The model only loosely follows the schema. This may generate non-compliant output and cause validation to fail.
|
result_format string (Optional) Default value: text (The default for Qwen3-Max, Qwen3-VL, QwQ models, Qwen3 open source models (except qwen3-next-80b-a3b-instruct) is message) The format of the returned data. Set this parameter to message to simplify multi-turn conversations. The platform will update the default value to message in a future release. In the Java SDK, this parameter is named resultFormat When making calls over HTTP, place result_format in the parameters object. For Qwen-VL, QVQ models, setting text has no effect. The Qwen3-Max, Qwen3-VL, and Qwen3 models in thinking mode support only the message value. Because the default value for Qwen3 commercial models is text, set this parameter to message. If you use the Java SDK to call a Qwen3 open source model and pass text, the response is still returned in the message format. |
logprobs boolean (Optional) Default value: false Specifies whether to return the log probabilities of the output tokens. Valid values: The following models are supported: When you make a call using HTTP, place logprobs in the parameters object. |
top_logprobs integer (Optional) Default: 0 Specifies the number of most likely candidate tokens to return at each generation step. Value range: [0, 5] This parameter takes effect only when logprobs is set to true. In the Java SDK, the parameter is topLogprobs. For HTTP calls, place top_logprobs in the parameters object. |
n integer (Optional) Default: 1 The number of responses to generate. The value range is 1-4. For scenarios that require you to generate multiple responses, such as creative writing or ad copy, you can set a larger n value. This parameter is supported only by the qwen-plus, Qwen3 (non-thinking mode) models. The value is fixed to 1 when the tools parameter is passed. Setting a larger value for n does not increase input token consumption, but it increases output token consumption. When making HTTP calls, place n in the parameters object. |
stop string or array (Optional) Specifies stop words. When a string or token_id that is specified in stop appears in the text generated by the model, generation stops immediately. Pass sensitive words to control the model's output. When stop is an array, do not use a token_id and a string as elements at the same time. For example, ["Hello",104307] is not a valid value. When you make a call using HTTP, place stop in the parameters object. |
tools array (Optional) An array of one or more tool objects for the model to call in Function caling. For more information, see Function caling. When using the tools parameter, set the result_format parameter to message. Set the tools parameter when you initiate Function caling or submit tool execution results. Properties type string (Required) The tool type. Currently, only function is supported. function object (Required) Properties name string (Required) The name of the tool function. The name must contain only letters, numbers, underscores (_), and hyphens (-). The maximum length is 64 characters. description string (Required) A description of the tool function. This helps the model decide when and how to call the tool function. parameters object (Required) A description of the tool's parameters. The value must be a valid JSON Schema, see JSON schema. If the parameters parameter is empty, the function has no input parameters. When making HTTP calls, place tools in the parameters object. This parameter is not currently supported by the qwen-vl series models. |
tool_choice string or object (Optional) Default value: auto The tool selection policy. Set this parameter to force a tool call for a specific type of question, such as always using a specific tool or disabling all tools. auto
The model automatically selects a tool. none
To temporarily disable tool calling in a specific request, set the tool_choice parameter to none. {"type": "function", "function": {"name": "the_function_to_call"}}
To force a call to a specific tool, set the tool_choice parameter to {"type": "function", "function": {"name": "the_function_to_call"}}, where the_function_to_call is the name of the specified tool function. Models in thinking mode do not support forcing a call to a specific tool.
In the Java SDK, this parameter is named toolChoice. When making an HTTP call, place tool_choice in the parameters object. |
parallel_tool_calls boolean (Optional) Default value: false Specifies whether to enable parallel tool calling. Valid values: true: Enabled
false: Disabled.
For more information about parallel tool calling, see Parallel tool calling. For the Java SDK, you can use parallelToolCalls. For HTTP calls, you can add parallel_tool_calls to the parameters object. |