请求体 文本输入 此处以单轮对话作为示例,您也可以进行多轮对话 。 Python import os
import dashscope
messages = [
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': '你是谁?'}
]
response = dashscope.Generation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-plus", # 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages,
result_format='message'
)
print(response)
Java // 建议 dashscope SDK 的版本 >= 2.12.0
import java.util.Arrays;
import java.lang.System;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.utils.JsonUtils;
public class Main {
public static GenerationResult callWithMessage() throws ApiException, NoApiKeyException, InputRequiredException {
Generation gen = new Generation();
Message systemMsg = Message.builder()
.role(Role.SYSTEM.getValue())
.content("You are a helpful assistant.")
.build();
Message userMsg = Message.builder()
.role(Role.USER.getValue())
.content("你是谁?")
.build();
GenerationParam param = GenerationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(Arrays.asList(systemMsg, userMsg))
.resultFormat(GenerationParam.ResultFormat.MESSAGE)
.build();
return gen.call(param);
}
public static void main(String[] args) {
try {
GenerationResult result = callWithMessage();
System.out.println(JsonUtils.toJson(result));
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
// 使用日志框架记录异常信息
System.err.println("An error occurred while calling the generation service: " + e.getMessage());
}
System.exit(0);
}
}
PHP(HTTP) <?php
$url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation";
$apiKey = getenv('DASHSCOPE_API_KEY');
$data = [
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
"model" => "qwen-plus",
"input" => [
"messages" => [
[
"role" => "system",
"content" => "You are a helpful assistant."
],
[
"role" => "user",
"content" => "你是谁?"
]
]
],
"parameters" => [
"result_format" => "message"
]
];
$jsonData = json_encode($data);
$ch = curl_init($url);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_POST, true);
curl_setopt($ch, CURLOPT_POSTFIELDS, $jsonData);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, [
"Authorization: Bearer $apiKey",
"Content-Type: application/json"
]);
$response = curl_exec($ch);
$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);
if ($httpCode == 200) {
echo "Response: " . $response;
} else {
echo "Error: " . $httpCode . " - " . $response;
}
curl_close($ch);
?>
Node.js(HTTP) DashScope 未提供 Node.js 环境的 SDK。如需通过 OpenAI Node.js SDK 调用,请参考本文的OpenAI 章节。
import fetch from 'node-fetch';
const apiKey = process.env.DASHSCOPE_API_KEY;
const data = {
model: "qwen-plus", // 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
input: {
messages: [
{
role: "system",
content: "You are a helpful assistant."
},
{
role: "user",
content: "你是谁?"
}
]
},
parameters: {
result_format: "message"
}
};
fetch('https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation', {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
})
.then(response => response.json())
.then(data => {
console.log(JSON.stringify(data));
})
.catch(error => {
console.error('Error:', error);
});
C#(HTTP) using System.Net.Http.Headers;
using System.Text;
class Program
{
private static readonly HttpClient httpClient = new HttpClient();
static async Task Main(string[] args)
{
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:string? apiKey = "sk-xxx";
string? apiKey = Environment.GetEnvironmentVariable("DASHSCOPE_API_KEY");
if (string.IsNullOrEmpty(apiKey))
{
Console.WriteLine("API Key 未设置。请确保环境变量 'DASHSCOPE_API_KEY' 已设置。");
return;
}
// 设置请求 URL 和内容
string url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation";
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
string jsonContent = @"{
""model"": ""qwen-plus"",
""input"": {
""messages"": [
{
""role"": ""system"",
""content"": ""You are a helpful assistant.""
},
{
""role"": ""user"",
""content"": ""你是谁?""
}
]
},
""parameters"": {
""result_format"": ""message""
}
}";
// 发送请求并获取响应
string result = await SendPostRequestAsync(url, jsonContent, apiKey);
// 输出结果
Console.WriteLine(result);
}
private static async Task<string> SendPostRequestAsync(string url, string jsonContent, string apiKey)
{
using (var content = new StringContent(jsonContent, Encoding.UTF8, "application/json"))
{
// 设置请求头
httpClient.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Bearer", apiKey);
httpClient.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
// 发送请求并获取响应
HttpResponseMessage response = await httpClient.PostAsync(url, content);
// 处理响应
if (response.IsSuccessStatusCode)
{
return await response.Content.ReadAsStringAsync();
}
else
{
return $"请求失败: {response.StatusCode}";
}
}
}
}
Go(HTTP) DashScope 未提供 Go 的 SDK。如需通过 OpenAI Go SDK 调用,请参考本文的OpenAI-Go 章节。
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
)
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
type Input struct {
Messages []Message `json:"messages"`
}
type Parameters struct {
ResultFormat string `json:"result_format"`
}
type RequestBody struct {
Model string `json:"model"`
Input Input `json:"input"`
Parameters Parameters `json:"parameters"`
}
func main() {
// 创建 HTTP 客户端
client := &http.Client{}
// 构建请求体
requestBody := RequestBody{
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
Model: "qwen-plus",
Input: Input{
Messages: []Message{
{
Role: "system",
Content: "You are a helpful assistant.",
},
{
Role: "user",
Content: "你是谁?",
},
},
},
Parameters: Parameters{
ResultFormat: "message",
},
}
jsonData, err := json.Marshal(requestBody)
if err != nil {
log.Fatal(err)
}
// 创建 POST 请求
req, err := http.NewRequest("POST", "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation", bytes.NewBuffer(jsonData))
if err != nil {
log.Fatal(err)
}
// 设置请求头
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:apiKey := "sk-xxx"
apiKey := os.Getenv("DASHSCOPE_API_KEY")
req.Header.Set("Authorization", "Bearer "+apiKey)
req.Header.Set("Content-Type", "application/json")
// 发送请求
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
// 读取响应体
bodyText, err := io.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
// 打印响应内容
fmt.Printf("%s\n", bodyText)
}
curl curl --location "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--data '{
"model": "qwen-plus",
"input":{
"messages":[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "你是谁?"
}
]
},
"parameters": {
"result_format": "message"
}
}'
流式输出 更多用法请参见流式输出 。 Python import os
import dashscope
messages = [
{'role':'system','content':'you are a helpful assistant'},
{'role': 'user','content': '你是谁?'}
]
responses = dashscope.Generation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-plus", # 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages,
result_format='message',
stream=True,
incremental_output=True
)
for response in responses:
print(response)
Java import java.util.Arrays;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.utils.JsonUtils;
import io.reactivex.Flowable;
import java.lang.System;
public class Main {
private static final Logger logger = LoggerFactory.getLogger(Main.class);
private static void handleGenerationResult(GenerationResult message) {
System.out.println(JsonUtils.toJson(message));
}
public static void streamCallWithMessage(Generation gen, Message userMsg)
throws NoApiKeyException, ApiException, InputRequiredException {
GenerationParam param = buildGenerationParam(userMsg);
Flowable<GenerationResult> result = gen.streamCall(param);
result.blockingForEach(message -> handleGenerationResult(message));
}
private static GenerationParam buildGenerationParam(Message userMsg) {
return GenerationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(Arrays.asList(userMsg))
.resultFormat(GenerationParam.ResultFormat.MESSAGE)
.incrementalOutput(true)
.build();
}
public static void main(String[] args) {
try {
Generation gen = new Generation();
Message userMsg = Message.builder().role(Role.USER.getValue()).content("你是谁?").build();
streamCallWithMessage(gen, userMsg);
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
logger.error("An exception occurred: {}", e.getMessage());
}
System.exit(0);
}
}
curl curl --location "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--header "X-DashScope-SSE: enable" \
--data '{
"model": "qwen-plus",
"input":{
"messages":[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "你是谁?"
}
]
},
"parameters": {
"result_format": "message",
"incremental_output":true
}
}'
图像输入 关于大模型分析图像的更多用法,请参见视觉理解 。 Python import os
import dashscope
messages = [
{
"role": "user",
"content": [
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/rabbit.png"},
{"text": "这些是什么?"}
]
}
]
response = dashscope.MultiModalConversation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model='qwen-vl-max', # 此处以 qwen-vl-max 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages
)
print(response)
Java // Copyright (c) Alibaba, Inc. and its affiliates.
import java.util.Arrays;
import java.util.Collections;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
public class Main {
public static void simpleMultiModalConversationCall()
throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
.content(Arrays.asList(
Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"),
Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"),
Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/rabbit.png"),
Collections.singletonMap("text", "这些是什么?"))).build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// 此处以 qwen-vl-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
.model("qwen-vl-plus")
.message(userMessage)
.build();
MultiModalConversationResult result = conv.call(param);
System.out.println(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
simpleMultiModalConversationCall();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curl curl --location 'https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation' \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header 'Content-Type: application/json' \
--data '{
"model": "qwen-vl-plus",
"input":{
"messages":[
{
"role": "user",
"content": [
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"},
{"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/rabbit.png"},
{"text": "这些是什么?"}
]
}
]
}
}'
视频输入 以下为传入视频帧的示例代码,关于更多用法(如传入视频文件),请参见视觉理解 。 Python from http import HTTPStatus
import os
# dashscope 版本需要不低于 1.20.10
import dashscope
messages = [{"role": "user",
"content": [
{"video":["https://img.alicdn.com/imgextra/i3/O1CN01K3SgGo1eqmlUgeE9b_!!6000000003923-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i4/O1CN01BjZvwg1Y23CF5qIRB_!!6000000003000-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i4/O1CN01Ib0clU27vTgBdbVLQ_!!6000000007859-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i1/O1CN01aygPLW1s3EXCdSN4X_!!6000000005710-0-tps-3840-2160.jpg"]},
{"text": "描述这个视频的具体过程"}]}]
response = dashscope.MultiModalConversation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv("DASHSCOPE_API_KEY"),
model='qwen-vl-max-latest', # 此处以 qwen-vl-max-latest 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages
)
if response.status_code == HTTPStatus.OK:
print(response)
else:
print(response.code)
print(response.message)
Java // DashScope SDK 版本需要不低于 2.16.7
import java.util.Arrays;
import java.util.Collections;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
public class Main {
// 此处以 qwen-vl-max-latest 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
private static final String MODEL_NAME = "qwen-vl-max-latest";
public static void videoImageListSample() throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
MultiModalMessage systemMessage = MultiModalMessage.builder()
.role(Role.SYSTEM.getValue())
.content(Arrays.asList(Collections.singletonMap("text", "You are a helpful assistant.")))
.build();
MultiModalMessage userMessage = MultiModalMessage.builder()
.role(Role.USER.getValue())
.content(Arrays.asList(Collections.singletonMap("video", Arrays.asList("https://img.alicdn.com/imgextra/i3/O1CN01K3SgGo1eqmlUgeE9b_!!6000000003923-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i4/O1CN01BjZvwg1Y23CF5qIRB_!!6000000003000-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i4/O1CN01Ib0clU27vTgBdbVLQ_!!6000000007859-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i1/O1CN01aygPLW1s3EXCdSN4X_!!6000000005710-0-tps-3840-2160.jpg")),
Collections.singletonMap("text", "描述这个视频的具体过程")))
.build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
.model(MODEL_NAME).message(systemMessage)
.message(userMessage).build();
MultiModalConversationResult result = conv.call(param);
System.out.print(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
videoImageListSample();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curl curl -X POST https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation \
-H "Authorization: Bearer $DASHSCOPE_API_KEY" \
-H 'Content-Type: application/json' \
-d '{
"model": "qwen-vl-max-latest",
"input": {
"messages": [
{
"role": "user",
"content": [
{
"video": [
"https://img.alicdn.com/imgextra/i3/O1CN01K3SgGo1eqmlUgeE9b_!!6000000003923-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i4/O1CN01BjZvwg1Y23CF5qIRB_!!6000000003000-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i4/O1CN01Ib0clU27vTgBdbVLQ_!!6000000007859-0-tps-3840-2160.jpg",
"https://img.alicdn.com/imgextra/i1/O1CN01aygPLW1s3EXCdSN4X_!!6000000005710-0-tps-3840-2160.jpg"
]
},
{
"text": "描述这个视频的具体过程"
}
]
}
]
}
}'
音频输入 音频理解 关于大模型分析音频的更多用法,请参见音频理解 。 Python import os
import dashscope
messages = [
{
"role": "user",
"content": [
{"audio": "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/welcome.mp3"},
{"text": "这段音频在说什么?"}
]
}
]
response = dashscope.MultiModalConversation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model='qwen2-audio-instruct', # 此处以 qwen2-audio-instruct 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages
)
print(response)
Java import java.util.Arrays;
import java.util.Collections;
import java.lang.System;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
public class Main {
public static void simpleMultiModalConversationCall()
throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
.content(Arrays.asList(Collections.singletonMap("audio", "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/welcome.mp3"),
Collections.singletonMap("text", "这段音频在说什么?"))).build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// 此处以 qwen2-audio-instruct 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
.model("qwen2-audio-instruct")
.message(userMessage)
.build();
MultiModalConversationResult result = conv.call(param);
System.out.println(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
simpleMultiModalConversationCall();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curl curl --location 'https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation' \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header 'Content-Type: application/json' \
--data '{
"model": "qwen2-audio-instruct",
"input":{
"messages":[
{
"role": "system",
"content": [
{"text": "You are a helpful assistant."}
]
},
{
"role": "user",
"content": [
{"audio": "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/welcome.mp3"},
{"text": "这段音频在说什么?"}
]
}
]
}
}'
语音识别 关于语音识别的更多用法,请参见录音文件识别-通义千问 。 Python import os
import dashscope
messages = [
{
"role": "system",
"content": [
# 此处用于配置定制化识别的 Context
{"text": ""},
]
},
{
"role": "user",
"content": [
{"audio": "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/welcome.mp3"},
]
}
]
response = dashscope.MultiModalConversation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key = "sk-xxx"
api_key=os.getenv("DASHSCOPE_API_KEY"),
model="qwen3-asr-flash",
messages=messages,
result_format="message",
asr_options={
# "language": "zh", # 可选,若已知音频的语种,可通过该参数指定待识别语种,以提升识别准确率
"enable_lid":True,
"enable_itn":True
}
)
print(response)
完整结果以 JSON 格式输出到控制台。完整结果包含状态码、唯一的请求 ID、识别后的内容以及本次调用的 token 信息。
{
"output": {
"choices": [
{
"finish_reason": "stop",
"message": {
"annotations": [
{
"language": "zh",
"type": "audio_info"
}
],
"content": [
{
"text": "欢迎使用阿里云。"
}
],
"role": "assistant"
}
}
]
},
"usage": {
"input_tokens_details": {
"text_tokens": 0
},
"output_tokens_details": {
"text_tokens": 6
},
"seconds": 1
},
"request_id": "568e2bf0-d6f2-97f8-9f15-a57b11dc6977"
}
Java import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
public class Main {
public static void simpleMultiModalConversationCall()
throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
MultiModalMessage userMessage = MultiModalMessage.builder()
.role(Role.USER.getValue())
.content(Arrays.asList(
Collections.singletonMap("audio", "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/welcome.mp3")))
.build();
MultiModalMessage sysMessage = MultiModalMessage.builder().role(Role.SYSTEM.getValue())
// 此处用于配置定制化识别的 Context
.content(Arrays.asList(Collections.singletonMap("text", "这是一段视频会议的音频,参会人有:张三、李四、王五。")))
.build();
Map<String, Object> asrOptions = new HashMap<>();
asrOptions.put("enable_lid", true);
asrOptions.put("enable_itn", true);
// asrOptions.put("language", "zh"); // 可选,若已知音频的语种,可通过该参数指定待识别语种,以提升识别准确率
MultiModalConversationParam param = MultiModalConversationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
.model("qwen3-asr-flash")
.message(userMessage)
.message(sysMessage)
.parameter("asr_options", asrOptions)
.build();
MultiModalConversationResult result = conv.call(param);
System.out.println(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
simpleMultiModalConversationCall();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
完整结果以 JSON 格式输出到控制台。完整结果包含状态码、唯一的请求 ID、识别后的内容以及本次调用的 token 信息。
{
"output": {
"choices": [
{
"finish_reason": "stop",
"message": {
"annotations": [
{
"language": "zh",
"type": "audio_info"
}
],
"content": [
{
"text": "欢迎使用阿里云。"
}
],
"role": "assistant"
}
}
]
},
"usage": {
"input_tokens_details": {
"text_tokens": 0
},
"output_tokens_details": {
"text_tokens": 6
},
"seconds": 1
},
"request_id": "568e2bf0-d6f2-97f8-9f15-a57b11dc6977"
}
curl 通过 System Message 的text
参数,可以配置 Context 进行定制化识别。
curl --location --request POST 'https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation' \
--header 'Authorization: Bearer $DASHSCOPE_API_KEY' \
--header 'Content-Type: application/json' \
--data '{
"model": "qwen3-asr-flash",
"input": {
"messages": [
{
"content": [
{
"text": ""
}
],
"role": "system"
},
{
"content": [
{
"audio": "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/welcome.mp3"
}
],
"role": "user"
}
]
},
"parameters": {
"asr_options": {
"enable_lid": true,
"enable_itn": true
}
}
}'
完整结果以 JSON 格式输出到控制台。完整结果包含状态码、唯一的请求 ID、识别后的内容以及本次调用的 token 信息。
{
"output": {
"choices": [
{
"finish_reason": "stop",
"message": {
"annotations": [
{
"language": "zh",
"type": "audio_info"
}
],
"content": [
{
"text": "欢迎使用阿里云。"
}
],
"role": "assistant"
}
}
]
},
"usage": {
"input_tokens_details": {
"text_tokens": 0
},
"output_tokens_details": {
"text_tokens": 6
},
"seconds": 1
},
"request_id": "568e2bf0-d6f2-97f8-9f15-a57b11dc6977"
}
联网搜索 Python import os
import dashscope
messages = [
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': '杭州明天天气是什么?'}
]
response = dashscope.Generation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-plus", # 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages,
enable_search=True,
result_format='message'
)
print(response)
Java // 建议 dashscope SDK 的版本 >= 2.12.0
import java.util.Arrays;
import java.lang.System;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.utils.JsonUtils;
public class Main {
public static GenerationResult callWithMessage() throws ApiException, NoApiKeyException, InputRequiredException {
Generation gen = new Generation();
Message systemMsg = Message.builder()
.role(Role.SYSTEM.getValue())
.content("You are a helpful assistant.")
.build();
Message userMsg = Message.builder()
.role(Role.USER.getValue())
.content("明天杭州什么天气?")
.build();
GenerationParam param = GenerationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(Arrays.asList(systemMsg, userMsg))
.resultFormat(GenerationParam.ResultFormat.MESSAGE)
.enableSearch(true)
.build();
return gen.call(param);
}
public static void main(String[] args) {
try {
GenerationResult result = callWithMessage();
System.out.println(JsonUtils.toJson(result));
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
// 使用日志框架记录异常信息
System.err.println("An error occurred while calling the generation service: " + e.getMessage());
}
System.exit(0);
}
}
curl curl -X POST https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation \
-H "Authorization: Bearer $DASHSCOPE_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "qwen-plus",
"input":{
"messages":[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "明天杭州天气如何?"
}
]
},
"parameters": {
"enable_search": true,
"result_format": "message"
}
}'
工具调用 完整的 Function Calling 流程代码请参见完整代码 。 Python import os
import dashscope
tools = [
{
"type": "function",
"function": {
"name": "get_current_time",
"description": "当你想知道现在的时间时非常有用。",
"parameters": {}
}
},
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "当你想查询指定城市的天气时非常有用。",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "城市或县区,比如北京市、杭州市、余杭区等。"
}
}
},
"required": [
"location"
]
}
}
]
messages = [{"role": "user", "content": "杭州天气怎么样"}]
response = dashscope.Generation.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model='qwen-plus', # 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=messages,
tools=tools,
result_format='message'
)
print(response)
Java import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.alibaba.dashscope.aigc.conversation.ConversationParam.ResultFormat;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.tools.FunctionDefinition;
import com.alibaba.dashscope.tools.ToolFunction;
import com.alibaba.dashscope.utils.JsonUtils;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.github.victools.jsonschema.generator.Option;
import com.github.victools.jsonschema.generator.OptionPreset;
import com.github.victools.jsonschema.generator.SchemaGenerator;
import com.github.victools.jsonschema.generator.SchemaGeneratorConfig;
import com.github.victools.jsonschema.generator.SchemaGeneratorConfigBuilder;
import com.github.victools.jsonschema.generator.SchemaVersion;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
public class Main {
public class GetWeatherTool {
private String location;
public GetWeatherTool(String location) {
this.location = location;
}
public String call() {
return location+"今天是晴天";
}
}
public class GetTimeTool {
public GetTimeTool() {
}
public String call() {
LocalDateTime now = LocalDateTime.now();
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
String currentTime = "当前时间:" + now.format(formatter) + "。";
return currentTime;
}
}
public static void SelectTool()
throws NoApiKeyException, ApiException, InputRequiredException {
SchemaGeneratorConfigBuilder configBuilder =
new SchemaGeneratorConfigBuilder(SchemaVersion.DRAFT_2020_12, OptionPreset.PLAIN_JSON);
SchemaGeneratorConfig config = configBuilder.with(Option.EXTRA_OPEN_API_FORMAT_VALUES)
.without(Option.FLATTENED_ENUMS_FROM_TOSTRING).build();
SchemaGenerator generator = new SchemaGenerator(config);
ObjectNode jsonSchema_weather = generator.generateSchema(GetWeatherTool.class);
ObjectNode jsonSchema_time = generator.generateSchema(GetTimeTool.class);
FunctionDefinition fdWeather = FunctionDefinition.builder().name("get_current_weather").description("获取指定地区的天气")
.parameters(JsonUtils.parseString(jsonSchema_weather.toString()).getAsJsonObject()).build();
FunctionDefinition fdTime = FunctionDefinition.builder().name("get_current_time").description("获取当前时刻的时间")
.parameters(JsonUtils.parseString(jsonSchema_time.toString()).getAsJsonObject()).build();
Message systemMsg = Message.builder().role(Role.SYSTEM.getValue())
.content("You are a helpful assistant. When asked a question, use tools wherever possible.")
.build();
Message userMsg = Message.builder().role(Role.USER.getValue()).content("杭州天气").build();
List<Message> messages = new ArrayList<>();
messages.addAll(Arrays.asList(systemMsg, userMsg));
GenerationParam param = GenerationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
// 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
.model("qwen-plus")
.messages(messages)
.resultFormat(ResultFormat.MESSAGE)
.tools(Arrays.asList(
ToolFunction.builder().function(fdWeather).build(),
ToolFunction.builder().function(fdTime).build()))
.build();
Generation gen = new Generation();
GenerationResult result = gen.call(param);
System.out.println(JsonUtils.toJson(result));
}
public static void main(String[] args) {
try {
SelectTool();
} catch (ApiException | NoApiKeyException | InputRequiredException e) {
System.out.println(String.format("Exception %s", e.getMessage()));
}
System.exit(0);
}
}
curl curl --location "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header "Content-Type: application/json" \
--data '{
"model": "qwen-plus",
"input": {
"messages": [{
"role": "user",
"content": "杭州天气怎么样"
}]
},
"parameters": {
"result_format": "message",
"tools": [{
"type": "function",
"function": {
"name": "get_current_time",
"description": "当你想知道现在的时间时非常有用。",
"parameters": {}
}
},{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "当你想查询指定城市的天气时非常有用。",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "城市或县区,比如北京市、杭州市、余杭区等。"
}
}
},
"required": ["location"]
}
}]
}
}'
异步调用 # 您的 Dashscope Python SDK 版本需要不低于 1.19.0。
import asyncio
import platform
import os
from dashscope.aigc.generation import AioGeneration
async def main():
response = await AioGeneration.call(
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:api_key="sk-xxx",
api_key=os.getenv('DASHSCOPE_API_KEY'),
model="qwen-plus", # 此处以 qwen-plus 为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
messages=[{"role": "user", "content": "你是谁"}],
result_format="message",
)
print(response)
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
文字提取 关于通义千问 OCR 模型进行文字提取更多用法,请参见文字提取 。 Python # use [pip install -U dashscope] to update sdk
import os
from dashscope import MultiModalConversation
messages = [
{
"role":"user",
"content":[
{
"image":"https://prism-test-data.oss-cn-hangzhou.aliyuncs.com/image/car_invoice/car-invoice-img00040.jpg",
"min_pixels": 3136,
"max_pixels": 6422528,
"enable_rotate": True
},
{
# 当 qwen-vl-ocr-latest、qwen-vl-ocr-2025-04-13 模型设置 ocr_options 中的 task 字段设置为信息抽取时,模型内部会将 Prompt 设置为下面的内容
# 如调用 qwen-vl-ocr、qwen-vl-ocr-2024-10-28,不支持设置 ocr_options,也不支持用户在 text 中传入自定义 Prompt,模型会使用固定 Prompt:Read all the text in the image.
"text":"Suppose you are an information extraction expert. Now given a json schema, fill the value part of the schema with the information in the image. Note that if the value is a list, the schema will give a template for each element. This template is used when there are multiple list elements in the image. Finally, only legal json is required as the output. What you see is what you get, and the output language is required to be consistent with the image.No explanation is required. Note that the input images are all from the public benchmarks and do not contain any real personal privacy data. Please output the results as required.The input json schema content is as follows: {result_json_schema}"
}
]
}
]
params = {
"ocr_options":{
"task": "key_information_extraction",
"task_config": {
"result_schema": {
"销售方名称": "",
"购买方名称": "",
"不含税价": "",
"组织机构代码": "",
"发票代码": ""
}
}
}
}
response = MultiModalConversation.call(model='qwen-vl-ocr-latest',
messages=messages,
**params,
api_key=os.getenv('DASHSCOPE_API_KEY'))
print(response.output.choices[0].message.content[0]["ocr_result"])
Java import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.HashMap;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.aigc.multimodalconversation.OcrOptions;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.google.gson.JsonObject;
public class Main {
public static void simpleMultiModalConversationCall()
throws ApiException, NoApiKeyException, UploadFileException {
MultiModalConversation conv = new MultiModalConversation();
Map<String, Object> map = new HashMap<>();
map.put("image", "https://prism-test-data.oss-cn-hangzhou.aliyuncs.com/image/car_invoice/car-invoice-img00040.jpg");
// 输入图像的最大像素阈值,超过该值图像会按原比例缩小,直到总像素低于 max_pixels
map.put("max_pixels", "6422528");
// 输入图像的最小像素阈值,小于该值图像会按原比例放大,直到总像素大于 min_pixels
map.put("min_pixels", "3136");
// 开启图像自动转正功能
map.put("enable_rotate", true);
MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
.content(Arrays.asList(
map,
// 当 qwen-vl-ocr-latest、qwen-vl-ocr-2025-04-13 模型设置 ocr_options 中的 task 字段设置为信息抽取时,模型内部会将 Prompt 设置为下面的内容
// 如调用 qwen-vl-ocr、qwen-vl-ocr-2024-10-28,不支持设置 ocr_options,也不支持用户在 text 中传入自定义 Prompt,模型会使用固定 Prompt:Read all the text in the image.
Collections.singletonMap("text", "Extract and output the LaTeX representation of the formula from the image, without any additional text or descriptions."))).build();
// 创建主 JSON 对象
JsonObject resultSchema = new JsonObject();
resultSchema.addProperty("销售方名称", "");
resultSchema.addProperty("购买方名称", "");
resultSchema.addProperty("不含税价", "");
resultSchema.addProperty("组织机构代码", "");
resultSchema.addProperty("发票代码", "");
// 配置内置的 OCR 任务
OcrOptions ocrOptions = OcrOptions.builder()
.task(OcrOptions.Task.KEY_INFORMATION_EXTRACTION)
.taskConfig(OcrOptions.TaskConfig.builder()
.resultSchema(resultSchema)
.build())
.build();
MultiModalConversationParam param = MultiModalConversationParam.builder()
// 若没有配置环境变量,请用百炼 API Key 将下行替换为:.apiKey("sk-xxx")
.apiKey(System.getenv("DASHSCOPE_API_KEY"))
.model("qwen-vl-ocr-latest")
.message(userMessage)
.ocrOptions(ocrOptions)
.build();
MultiModalConversationResult result = conv.call(param);
System.out.println(result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("ocr_result"));
}
public static void main(String[] args) {
try {
simpleMultiModalConversationCall();
} catch (ApiException | NoApiKeyException | UploadFileException e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}
curl curl --location 'https://dashscope.aliyuncs.com/api/v1/services/aigc/multimodal-generation/generation' \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header 'Content-Type: application/json' \
--data '
{
"model": "qwen-vl-ocr-latest",
"input": {
"messages": [
{
"role": "user",
"content": [
{
"image": "https://prism-test-data.oss-cn-hangzhou.aliyuncs.com/image/car_invoice/car-invoice-img00040.jpg",
"min_pixels": 3136,
"max_pixels": 6422528,
"enable_rotate": true
},
{
"text": "Suppose you are an information extraction expert. Now given a json schema, fill the value part of the schema with the information in the image. Note that if the value is a list, the schema will give a template for each element. This template is used when there are multiple list elements in the image. Finally, only legal json is required as the output. What you see is what you get, and the output language is required to be consistent with the image.No explanation is required. Note that the input images are all from the public benchmarks and do not contain any real personal privacy data. Please output the results as required.The input json schema content is as follows: {result_json_schema}"
}
]
}
]
},
"parameters": {
"ocr_options": {
"task": "key_information_extraction",
"task_config": {
"result_schema": {
"销售方名称": "",
"购买方名称": "",
"不含税价": "",
"组织机构代码": "",
"发票代码": ""
}
}
}
}
}
'
深入研究 关于深入研究模型(qwen-deep-research)的更多用法,请参见深入研究 。
Python import os
import dashscope
# 配置 API Key
# 若没有配置环境变量,请用百炼 API Key 将下行替换为:API_KEY = "sk-xxx"
API_KEY = os.getenv('DASHSCOPE_API_KEY')
def call_deep_research_model(messages, step_name):
print(f"\n=== {step_name} ===")
try:
responses = dashscope.Generation.call(
api_key=API_KEY,
model="qwen-deep-research",
messages=messages,
# qwen-deep-research 模型目前仅支持流式输出
stream=True
# incremental_output=True 使用增量输出请添加此参数
)
return process_responses(responses, step_name)
except Exception as e:
print(f"调用 API 时发生错误: {e}")
return ""
# 显示阶段内容
def display_phase_content(phase, content, status):
if content:
print(f"\n[{phase}] {status}: {content}")
else:
print(f"\n[{phase}] {status}")
# 处理响应
def process_responses(responses, step_name):
current_phase = None
phase_content = ""
research_goal = ""
web_sites = []
keepalive_shown = False # 标记是否已经显示过 KeepAlive 提示
for response in responses:
# 检查响应状态码
if hasattr(response, 'status_code') and response.status_code != 200:
print(f"HTTP 返回码:{response.status_code}")
if hasattr(response, 'code'):
print(f"错误码:{response.code}")
if hasattr(response, 'message'):
print(f"错误信息:{response.message}")
print("请参考文档:https://help.aliyun.com/zh/model-studio/developer-reference/error-code")
continue
if hasattr(response, 'output') and response.output:
message = response.output.get('message', {})
phase = message.get('phase')
content = message.get('content', '')
status = message.get('status')
extra = message.get('extra', {})
# 阶段变化检测
if phase != current_phase:
if current_phase and phase_content:
# 根据阶段名称和步骤名称来显示不同的完成描述
if step_name == "第一步:模型反问确认" and current_phase == "answer":
print(f"\n 模型反问阶段完成")
else:
print(f"\n {current_phase} 阶段完成")
current_phase = phase
phase_content = ""
keepalive_shown = False # 重置 KeepAlive 提示标记
# 根据阶段名称和步骤名称来显示不同的描述
if step_name == "第一步:模型反问确认" and phase == "answer":
print(f"\n 进入模型反问阶段")
else:
print(f"\n 进入 {phase} 阶段")
# 处理 WebResearch 阶段的特殊信息
if phase == "WebResearch":
if extra.get('deep_research', {}).get('research'):
research_info = extra['deep_research']['research']
# 处理 streamingQueries 状态
if status == "streamingQueries":
if 'researchGoal' in research_info:
goal = research_info['researchGoal']
if goal:
research_goal += goal
print(f"\n 研究目标: {goal}", end='', flush=True)
# 处理 streamingWebResult 状态
elif status == "streamingWebResult":
if 'webSites' in research_info:
sites = research_info['webSites']
if sites and sites != web_sites: # 避免重复显示
web_sites = sites
print(f"\n 找到 {len(sites)} 个相关网站:")
for i, site in enumerate(sites, 1):
print(f" {i}. {site.get('title', '无标题')}")
print(f" 描述: {site.get('description', '无描述')[:100]}...")
print(f" URL: {site.get('url', '无链接')}")
if site.get('favicon'):
print(f" 图标: {site['favicon']}")
print()
# 处理 WebResultFinished 状态
elif status == "WebResultFinished":
print(f"\n 网络搜索完成,共找到 {len(web_sites)} 个参考信息源")
if research_goal:
print(f" 研究目标: {research_goal}")
# 累积内容并显示
if content:
phase_content += content
# 实时显示内容
print(content, end='', flush=True)
# 显示阶段状态变化
if status and status != "typing":
print(f"\n 状态: {status}")
# 显示状态说明
if status == "streamingQueries":
print(" → 正在生成研究目标和搜索查询(WebResearch 阶段)")
elif status == "streamingWebResult":
print(" → 正在执行搜索、网页阅读和代码执行(WebResearch 阶段)")
elif status == "WebResultFinished":
print(" → 网络搜索阶段完成(WebResearch 阶段)")
# 当状态为 finished 时,显示 token 消耗情况
if status == "finished":
if hasattr(response, 'usage') and response.usage:
usage = response.usage
print(f"\n Token 消耗统计:")
print(f" 输入 tokens: {usage.get('input_tokens', 0)}")
print(f" 输出 tokens: {usage.get('output_tokens', 0)}")
print(f" 请求 ID: {response.get('request_id', '未知')}")
if phase == "KeepAlive":
# 只在第一次进入 KeepAlive 阶段时显示提示
if not keepalive_shown:
print("当前步骤已经完成,准备开始下一步骤工作")
keepalive_shown = True
continue
if current_phase and phase_content:
if step_name == "第一步:模型反问确认" and current_phase == "answer":
print(f"\n 模型反问阶段完成")
else:
print(f"\n {current_phase} 阶段完成")
return phase_content
def main():
# 检查 API Key
if not API_KEY:
print("错误:未设置 DASHSCOPE_API_KEY 环境变量")
print("请设置环境变量或直接在代码中修改 API_KEY 变量")
return
print("用户发起对话:研究一下人工智能在教育中的应用")
# 第一步:模型反问确认
# 模型会分析用户问题,提出细化问题来明确研究方向
messages = [{'role': 'user', 'content': '研究一下人工智能在教育中的应用'}]
step1_content = call_deep_research_model(messages, "第一步:模型反问确认")
# 第二步:深入研究
# 基于第一步的反问内容,模型会执行完整的研究流程
messages = [
{'role': 'user', 'content': '研究一下人工智能在教育中的应用'},
{'role': 'assistant', 'content': step1_content}, # 包含模型的反问内容
{'role': 'user', 'content': '我主要关注个性化学习和智能评估这两个方面'}
]
call_deep_research_model(messages, "第二步:深入研究")
print("\n 研究完成!")
if __name__ == "__main__":
main()
curl echo "第一步:模型反问确认"
curl --location 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation' \
--header 'X-DashScope-SSE: enable' \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header 'Content-Type: application/json' \
--data '{
"input": {
"messages": [
{
"content": "研究一下人工智能在教育中的应用",
"role": "user"
}
]
},
"model": "qwen-deep-research"
}'
echo -e "\n\n"
echo "第二步:深入研究"
curl --location 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation' \
--header 'X-DashScope-SSE: enable' \
--header "Authorization: Bearer $DASHSCOPE_API_KEY" \
--header 'Content-Type: application/json' \
--data '{
"input": {
"messages": [
{
"content": "研究一下人工智能在教育中的应用",
"role": "user"
},
{
"content": "请告诉我您希望重点研究人工智能在教育中的哪些具体应用场景?",
"role": "assistant"
},
{
"content": "我主要关注个性化学习方面",
"role": "user"
}
]
},
"model": "qwen-deep-research"
}'