Skip to content

Commit 7098634

Browse files
authored
Merge pull request #87 from cnblogs/support-deepseek-reasoning
feat: support reasoning models
2 parents 5b31511 + 1d41b16 commit 7098634

20 files changed

+2472
-29
lines changed

README.md

+54-8
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ builder.AddDashScopeClient(builder.Configuration);
4747
```json
4848
{
4949
"DashScope": {
50-
"ApiKey": "your-api-key"
50+
"ApiKey": "your-api-key",
5151
}
5252
}
5353
```
@@ -66,21 +66,54 @@ public class YourService(IDashScopeClient client)
6666

6767
# Supported APIs
6868

69-
- Text Embedding API - `dashScopeClient.GetTextEmbeddingsAsync()`
70-
- Text Generation API(qwen-turbo, qwen-max, etc.) - `dashScopeClient.GetQwenCompletionAsync()` and `dashScopeClient.GetQWenCompletionStreamAsync()`
71-
- BaiChuan Models - Use `dashScopeClient.GetBaiChuanTextCompletionAsync()`
72-
- LLaMa2 Models - `dashScopeClient.GetLlama2TextCompletionAsync()`
73-
- Multimodal Generation API(qwen-vl-max, etc.) - `dashScopeClient.GetQWenMultimodalCompletionAsync()` and `dashScopeClient.GetQWenMultimodalCompletionStreamAsync()`
69+
- Text Embedding API - `GetTextEmbeddingsAsync()`
70+
- Text Generation API(qwen-turbo, qwen-max, etc.) - `GetQWenCompletionAsync()` and `GetQWenCompletionStreamAsync()`
71+
- DeepSeek Models - `GetDeepSeekCompletionAsync()` and `GetDeepSeekCompletionStreamAsync()`
72+
- BaiChuan Models - Use `GetBaiChuanTextCompletionAsync()`
73+
- LLaMa2 Models - `GetLlama2TextCompletionAsync()`
74+
- Multimodal Generation API(qwen-vl-max, etc.) - `GetQWenMultimodalCompletionAsync()` and `GetQWenMultimodalCompletionStreamAsync()`
7475
- Wanx Models(Image generation, background generation, etc)
7576
- Image Synthesis - `CreateWanxImageSynthesisTaskAsync()` and `GetWanxImageSynthesisTaskAsync()`
7677
- Image Generation - `CreateWanxImageGenerationTaskAsync()` and `GetWanxImageGenerationTaskAsync()`
7778
- Background Image Generation - `CreateWanxBackgroundGenerationTaskAsync()` and `GetWanxBackgroundGenerationTaskAsync()`
78-
- File API that used by Qwen-Long - `dashScopeClient.UploadFileAsync()` and `dashScopeClient.DeleteFileAsync`
79+
- File API that used by Qwen-Long - `UploadFileAsync()` and `DeleteFileAsync`
7980
- Application call - `GetApplicationResponseAsync()` and `GetApplicationResponseStreamAsync()`
8081

8182
# Examples
8283

83-
Visit [tests](./test) for more usage of each api.
84+
Visit [snapshots](./test/Cnblogs.DashScope.Sdk.UnitTests/Utils/Snapshots.cs) for calling samples.
85+
86+
Visit [tests](./test/Cnblogs.DashScope.Sdk.UnitTests) for more usage of each api.
87+
88+
## General Text Completion API
89+
90+
Use `client.GetTextCompletionAsync` and `client.GetTextCompletionStreamAsync` to access text generation api directly.
91+
92+
```csharp
93+
var completion = await dashScopeClient.GetTextCompletionAsync(
94+
new ModelRequest<TextGenerationInput, ITextGenerationParameters>
95+
{
96+
Model = "your-model-name",
97+
Input = new TextGenerationInput { Prompt = prompt },
98+
Parameters = new TextGenerationParameters()
99+
{
100+
// control parameters as you wish.
101+
EnableSearch = true
102+
}
103+
});
104+
var completions = dashScopeClient.GetTextCompletionStreamAsync(
105+
new ModelRequest<TextGenerationInput, ITextGenerationParameters>
106+
{
107+
Model = "your-model-name",
108+
Input = new TextGenerationInput { Messages = [TextChatMessage.System("you are a helpful assistant"), TextChatMessage.User("How are you?")] },
109+
Parameters = new TextGenerationParameters()
110+
{
111+
// control parameters as you wish.
112+
EnableSearch = true,
113+
IncreamentalOutput = true
114+
}
115+
});
116+
```
84117

85118
## Single Text Completion
86119

@@ -90,6 +123,19 @@ var completion = await client.GetQWenCompletionAsync(QWenLlm.QWenMax, prompt);
90123
Console.WriteLine(completion.Output.Text);
91124
```
92125

126+
## Reasoning
127+
128+
Use `completion.Output.Choices![0].Message.ReasoningContent` to access the reasoning content from model.
129+
130+
```csharp
131+
var history = new List<ChatMessage>
132+
{
133+
ChatMessage.User("Calculate 1+1")
134+
};
135+
var completion = await client.GetDeepSeekChatCompletionAsync(DeepSeekLlm.DeepSeekR1, history);
136+
Console.WriteLine(completion.Output.Choices[0]!.Message.ReasoningContent);
137+
```
138+
93139
## Multi-round chat
94140

95141
```csharp

README.zh-Hans.md

+59-10
Original file line numberDiff line numberDiff line change
@@ -66,23 +66,59 @@ public class YourService(IDashScopeClient client)
6666

6767
# 支持的 API
6868

69-
- 通用文本向量 - `dashScopeClient.GetTextEmbeddingsAsync()`
70-
- 通义千问(`qwen-turbo``qwen-max` 等) - `dashScopeClient.GetQwenCompletionAsync()` and `dashScopeClient.GetQWenCompletionStreamAsync()`
71-
- 百川开源大模型 - Use `dashScopeClient.GetBaiChuanTextCompletionAsync()`
72-
- LLaMa2 大语言模型 - `dashScopeClient.GetLlama2TextCompletionAsync()`
73-
- 通义千问 VL 和通义千问 Audio(`qwen-vl-max``qwen-audio`) - `dashScopeClient.GetQWenMultimodalCompletionAsync()` and `dashScopeClient.GetQWenMultimodalCompletionStreamAsync()`
69+
- 通用文本向量 - `GetTextEmbeddingsAsync()`
70+
- 通义千问(`qwen-turbo``qwen-max` 等) - `GetQWenCompletionAsync()``GetQWenCompletionStreamAsync()`
71+
- DeepSeek 系列模型(`deepseek-r1``deepseek-v3` 等) - `GetDeepSeekChatCompletionAsync()``GetDeepSeekChatCompletionStreamAsync()`
72+
- 百川开源大模型 - `GetBaiChuanTextCompletionAsync()`
73+
- LLaMa2 大语言模型 - `GetLlama2TextCompletionAsync()`
74+
- 通义千问 VL 和通义千问 Audio(`qwen-vl-max``qwen-audio`) - `GetQWenMultimodalCompletionAsync()``GetQWenMultimodalCompletionStreamAsync()`
7475
- 通义万相系列
75-
- 文生图 - `CreateWanxImageSynthesisTaskAsync()` and `GetWanxImageSynthesisTaskAsync()`
76-
- 人像风格重绘 - `CreateWanxImageGenerationTaskAsync()` and `GetWanxImageGenerationTaskAsync()`
77-
- 图像背景生成 - `CreateWanxBackgroundGenerationTaskAsync()` and `GetWanxBackgroundGenerationTaskAsync()`
78-
- 适用于 QWen-Long 的文件 API `dashScopeClient.UploadFileAsync()` and `dashScopeClient.DeleteFileAsync`
79-
- 应用调用 `dashScopeClient.GetApplicationResponseAsync``dashScopeClient.GetApplicationResponseStreamAsync()`
76+
- 文生图 - `CreateWanxImageSynthesisTaskAsync()` `GetWanxImageSynthesisTaskAsync()`
77+
- 人像风格重绘 - `CreateWanxImageGenerationTaskAsync()` `GetWanxImageGenerationTaskAsync()`
78+
- 图像背景生成 - `CreateWanxBackgroundGenerationTaskAsync()` `GetWanxBackgroundGenerationTaskAsync()`
79+
- 适用于 QWen-Long 的文件 API `UploadFileAsync()` `DeleteFileAsync`
80+
- 应用调用 `GetApplicationResponseAsync``GetApplicationResponseStreamAsync()`
8081
- 其他使用相同 Endpoint 的模型
8182

8283
# 示例
8384

85+
查看 [Snapshots.cs](./test/Cnblogs.DashScope.Sdk.UnitTests/Utils/Snapshots.cs) 获得 API 调用参数示例.
86+
8487
查看 [测试](./test) 获得更多 API 使用示例。
8588

89+
## 文本生成
90+
91+
使用 `dashScopeClient.GetTextCompletionAsync``dashScopeClient.GetTextCompletionStreamAsync` 来直接访问文本生成接口。
92+
93+
相关文档:https://help.aliyun.com/zh/model-studio/user-guide/text-generation/
94+
95+
```csharp
96+
var completion = await dashScopeClient.GetTextCompletionAsync(
97+
new ModelRequest<TextGenerationInput, ITextGenerationParameters>
98+
{
99+
Model = "your-model-name",
100+
Input = new TextGenerationInput { Prompt = prompt },
101+
Parameters = new TextGenerationParameters()
102+
{
103+
// control parameters as you wish.
104+
EnableSearch = true
105+
}
106+
});
107+
108+
var completions = dashScopeClient.GetTextCompletionStreamAsync(
109+
new ModelRequest<TextGenerationInput, ITextGenerationParameters>
110+
{
111+
Model = "your-model-name",
112+
Input = new TextGenerationInput { Messages = [TextChatMessage.System("you are a helpful assistant"), TextChatMessage.User("How are you?")] },
113+
Parameters = new TextGenerationParameters()
114+
{
115+
// control parameters as you wish.
116+
EnableSearch = true,
117+
IncreamentalOutput = true
118+
}
119+
});
120+
```
121+
86122
## 单轮对话
87123

88124
```csharp
@@ -108,6 +144,19 @@ var completion = await client.GetQWenChatCompletionAsync(QWenLlm.QWenMax, histor
108144
Console.WriteLine(completion.Output.Choices[0].Message.Content); // The number is 42
109145
```
110146

147+
## 推理
148+
149+
使用推理模型时,模型的思考过程可以通过 `ReasoningContent` 属性获取。
150+
151+
```csharp
152+
var history = new List<TextChatMessage>
153+
{
154+
TextChatMessage.User("Calculate 1+1")
155+
};
156+
var completion = await client.GetDeepSeekChatCompletionAsync(DeepSeekLlm.DeepSeekR1, history);
157+
Console.WriteLine(completion.Output.Choices[0]!.Message.ReasoningContent);
158+
```
159+
111160
## 工具调用
112161

113162
创建一个可供模型使用的方法。

src/Cnblogs.DashScope.AI/DashScopeChatClient.cs

+3-2
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
183183
{
184184
update.Contents.Add(
185185
new UsageContent(
186-
new UsageDetails()
186+
new UsageDetails
187187
{
188188
InputTokenCount = response.Usage.InputTokens,
189189
OutputTokenCount = response.Usage.OutputTokens,
@@ -208,7 +208,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
208208
RawRepresentation = completion.Messages[0].RawRepresentation,
209209
CreatedAt = completion.CreatedAt,
210210
FinishReason = completion.FinishReason,
211-
ModelId = completion.ModelId,
211+
ModelId = completion.ModelId
212212
};
213213
}
214214
else
@@ -467,6 +467,7 @@ private IEnumerable<TextChatMessage> ToTextChatMessages(
467467
from.Text,
468468
from.AuthorName,
469469
null,
470+
null,
470471
functionCall.Count > 0 ? functionCall : null);
471472
}
472473
}

src/Cnblogs.DashScope.Core/TextChatMessage.cs

+10-2
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,15 @@ namespace Cnblogs.DashScope.Core;
1010
/// <param name="Content">The content of this message.</param>
1111
/// <param name="Name">Used when role is tool, represents the function name of this message generated by.</param>
1212
/// <param name="Partial">Notify model that next message should use this message as prefix.</param>
13+
/// <param name="ReasoningContent">Reasoning content for reasoning model.</param>
1314
/// <param name="ToolCalls">Calls to the function.</param>
1415
[method: JsonConstructor]
1516
public record TextChatMessage(
1617
string Role,
1718
string Content,
1819
string? Name = null,
1920
bool? Partial = null,
21+
string? ReasoningContent = null,
2022
List<ToolCall>? ToolCalls = null) : IMessage<string>
2123
{
2224
/// <summary>
@@ -84,11 +86,17 @@ public static TextChatMessage System(string content)
8486
/// <param name="content">The content of the message.</param>
8587
/// <param name="partial">When set to true, content of this message would be the prefix of next model output.</param>
8688
/// <param name="name">Author name.</param>
89+
/// <param name="reasoningContent">Think content when reasoning.</param>
8790
/// <param name="toolCalls">Tool calls by model.</param>
8891
/// <returns></returns>
89-
public static TextChatMessage Assistant(string content, bool? partial = null, string? name = null, List<ToolCall>? toolCalls = null)
92+
public static TextChatMessage Assistant(
93+
string content,
94+
bool? partial = null,
95+
string? name = null,
96+
string? reasoningContent = null,
97+
List<ToolCall>? toolCalls = null)
9098
{
91-
return new TextChatMessage(DashScopeRoleNames.Assistant, content, name, partial, toolCalls);
99+
return new TextChatMessage(DashScopeRoleNames.Assistant, content, name, partial, reasoningContent, toolCalls);
92100
}
93101

94102
/// <summary>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
namespace Cnblogs.DashScope.Sdk.DeepSeek;
2+
3+
/// <summary>
4+
/// DeepSeek models.
5+
/// </summary>
6+
public enum DeepSeekLlm
7+
{
8+
/// <summary>
9+
/// deepseek-v3 model.
10+
/// </summary>
11+
DeepSeekV3 = 1,
12+
13+
/// <summary>
14+
/// deepseek-r1 model.
15+
/// </summary>
16+
DeepSeekR1 = 2
17+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
namespace Cnblogs.DashScope.Sdk.DeepSeek;
2+
3+
internal static class DeepSeekLlmName
4+
{
5+
public static string GetModelName(this DeepSeekLlm model)
6+
{
7+
return model switch
8+
{
9+
DeepSeekLlm.DeepSeekR1 => "deepseek-r1",
10+
DeepSeekLlm.DeepSeekV3 => "deepseek-v3",
11+
_ => ThrowHelper.UnknownModelName(nameof(model), model)
12+
};
13+
}
14+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
using Cnblogs.DashScope.Core;
2+
3+
namespace Cnblogs.DashScope.Sdk.DeepSeek;
4+
5+
/// <summary>
6+
/// Extensions for calling DeepSeek models, see: https://help.aliyun.com/zh/model-studio/developer-reference/deepseek
7+
/// </summary>
8+
public static class DeepSeekTextGenerationApi
9+
{
10+
private static TextGenerationParameters StreamingParameters { get; } = new() { IncrementalOutput = true };
11+
12+
/// <summary>
13+
/// Get text completion from deepseek model.
14+
/// </summary>
15+
/// <param name="client">The <see cref="IDashScopeClient"/>.</param>
16+
/// <param name="model">The model name.</param>
17+
/// <param name="messages">The context messages.</param>
18+
/// <returns></returns>
19+
public static async Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
20+
GetDeepSeekChatCompletionAsync(
21+
this IDashScopeClient client,
22+
DeepSeekLlm model,
23+
IEnumerable<TextChatMessage> messages)
24+
{
25+
return await client.GetDeepSeekChatCompletionAsync(model.GetModelName(), messages);
26+
}
27+
28+
/// <summary>
29+
/// Get text completion from deepseek model.
30+
/// </summary>
31+
/// <param name="client">The <see cref="IDashScopeClient"/>.</param>
32+
/// <param name="model">The model name.</param>
33+
/// <param name="messages">The context messages.</param>
34+
/// <returns></returns>
35+
public static async Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
36+
GetDeepSeekChatCompletionAsync(
37+
this IDashScopeClient client,
38+
string model,
39+
IEnumerable<TextChatMessage> messages)
40+
{
41+
return await client.GetTextCompletionAsync(
42+
new ModelRequest<TextGenerationInput, ITextGenerationParameters>
43+
{
44+
Model = model,
45+
Input = new TextGenerationInput { Messages = messages },
46+
Parameters = null
47+
});
48+
}
49+
50+
/// <summary>
51+
/// Get streamed completion from deepseek model.
52+
/// </summary>
53+
/// <param name="client"></param>
54+
/// <param name="model"></param>
55+
/// <param name="messages"></param>
56+
/// <returns></returns>
57+
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
58+
GetDeepSeekChatCompletionStreamAsync(
59+
this IDashScopeClient client,
60+
DeepSeekLlm model,
61+
IEnumerable<TextChatMessage> messages)
62+
{
63+
return client.GetDeepSeekChatCompletionStreamAsync(model.GetModelName(), messages);
64+
}
65+
66+
/// <summary>
67+
/// Get streamed completion from deepseek model.
68+
/// </summary>
69+
/// <param name="client"></param>
70+
/// <param name="model"></param>
71+
/// <param name="messages"></param>
72+
/// <returns></returns>
73+
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
74+
GetDeepSeekChatCompletionStreamAsync(
75+
this IDashScopeClient client,
76+
string model,
77+
IEnumerable<TextChatMessage> messages)
78+
{
79+
return client.GetTextCompletionStreamAsync(
80+
new ModelRequest<TextGenerationInput, ITextGenerationParameters>
81+
{
82+
Model = model,
83+
Input = new TextGenerationInput { Messages = messages },
84+
Parameters = StreamingParameters
85+
});
86+
}
87+
}

src/Cnblogs.DashScope.Sdk/QWen/QWenLlm.cs

+6-1
Original file line numberDiff line numberDiff line change
@@ -133,5 +133,10 @@ public enum QWenLlm
133133
/// <summary>
134134
/// qvq-72b-preview
135135
/// </summary>
136-
QwQ72BPreview = 25
136+
QvQ72BPreview = 25,
137+
138+
/// <summary>
139+
/// qwq-32b
140+
/// </summary>
141+
QwQ32B = 26
137142
}

src/Cnblogs.DashScope.Sdk/QWen/QWenLlmNames.cs

+2-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ public static string GetModelName(this QWenLlm llm)
3030
QWenLlm.QWenPlusLatest => "qwen-plus-latest",
3131
QWenLlm.QWenTurboLatest => "qwen-turbo-latest",
3232
QWenLlm.QwQ32BPreview => "qwq-32b-preview",
33-
QWenLlm.QwQ72BPreview => "qwq-72b-preview",
33+
QWenLlm.QvQ72BPreview => "qvq-72b-preview",
34+
QWenLlm.QwQ32B => "qwq-32b",
3435
_ => ThrowHelper.UnknownModelName(nameof(llm), llm)
3536
};
3637
}

0 commit comments

Comments
 (0)