Skip to content

Commit ec529da

Browse files
committed
chore: obsolete qwen, qwen-vl and deepseek shorthand overrides
1 parent 01beef6 commit ec529da

File tree

3 files changed

+273
-0
lines changed

3 files changed

+273
-0
lines changed

src/Cnblogs.DashScope.Sdk/DeepSeek/DeepSeekTextGenerationApi.cs

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ namespace Cnblogs.DashScope.Sdk.DeepSeek;
55
/// <summary>
66
/// Extensions for calling DeepSeek models, see: https://help.aliyun.com/zh/model-studio/developer-reference/deepseek
77
/// </summary>
8+
[Obsolete("Use generic GetTextStreamCompletionAsync instead")]
89
public static class DeepSeekTextGenerationApi
910
{
1011
private static TextGenerationParameters StreamingParameters { get; } = new() { IncrementalOutput = true };
@@ -16,6 +17,23 @@ public static class DeepSeekTextGenerationApi
1617
/// <param name="model">The model name.</param>
1718
/// <param name="messages">The context messages.</param>
1819
/// <returns></returns>
20+
/// <remarks>
21+
/// Migrate from
22+
/// <code>
23+
/// client.GetDeepSeekChatCompletionAsync(DeepSeekLlm.DeepSeekV3, messages);
24+
/// </code>
25+
/// to
26+
/// <code>
27+
/// client.GetTextCompletionAsync(
28+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
29+
/// {
30+
/// Model = "deepseek-v3",
31+
/// Input = new TextGenerationInput { Messages = messages },
32+
/// Parameters = StreamingParameters
33+
/// });
34+
/// </code>
35+
/// </remarks>
36+
[Obsolete("Use GetTextCompletionAsync() instead, check remarks section.")]
1937
public static async Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
2038
GetDeepSeekChatCompletionAsync(
2139
this IDashScopeClient client,
@@ -32,6 +50,23 @@ public static async Task<ModelResponse<TextGenerationOutput, TextGenerationToken
3250
/// <param name="model">The model name.</param>
3351
/// <param name="messages">The context messages.</param>
3452
/// <returns></returns>
53+
/// <remarks>
54+
/// Migrate from
55+
/// <code>
56+
/// client.GetDeepSeekChatCompletionAsync(model, messages);
57+
/// </code>
58+
/// to
59+
/// <code>
60+
/// client.GetTextCompletionAsync(
61+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
62+
/// {
63+
/// Model = model,
64+
/// Input = new TextGenerationInput { Messages = messages },
65+
/// Parameters = StreamingParameters
66+
/// });
67+
/// </code>
68+
/// </remarks>
69+
[Obsolete("Use GetTextCompletionAsync() instead, check remarks section.")]
3570
public static async Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
3671
GetDeepSeekChatCompletionAsync(
3772
this IDashScopeClient client,
@@ -54,6 +89,23 @@ public static async Task<ModelResponse<TextGenerationOutput, TextGenerationToken
5489
/// <param name="model"></param>
5590
/// <param name="messages"></param>
5691
/// <returns></returns>
92+
/// <remarks>
93+
/// Migrate from
94+
/// <code>
95+
/// client.GetDeepSeekChatCompletionStreamAsync(DeepSeekLlm.DeepSeekV3, messages);
96+
/// </code>
97+
/// to
98+
/// <code>
99+
/// client.GetTextCompletionStreamAsync(
100+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
101+
/// {
102+
/// Model = "deepseek-v3",
103+
/// Input = new TextGenerationInput { Messages = messages },
104+
/// Parameters = StreamingParameters
105+
/// });
106+
/// </code>
107+
/// </remarks>
108+
[Obsolete("Use GetTextCompletionStreamAsync() instead, check remarks section.")]
57109
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
58110
GetDeepSeekChatCompletionStreamAsync(
59111
this IDashScopeClient client,
@@ -70,6 +122,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
70122
/// <param name="model"></param>
71123
/// <param name="messages"></param>
72124
/// <returns></returns>
125+
/// <remarks>
126+
/// Migrate from
127+
/// <code>
128+
/// client.GetDeepSeekChatCompletionStreamAsync(model, messages);
129+
/// </code>
130+
/// to
131+
/// <code>
132+
/// client.GetTextCompletionStreamAsync(
133+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
134+
/// {
135+
/// Model = model,
136+
/// Input = new TextGenerationInput { Messages = messages },
137+
/// Parameters = StreamingParameters
138+
/// });
139+
/// </code>
140+
/// </remarks>
141+
[Obsolete("Use GetTextCompletionStreamAsync() instead, check remarks section.")]
73142
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
74143
GetDeepSeekChatCompletionStreamAsync(
75144
this IDashScopeClient client,

src/Cnblogs.DashScope.Sdk/QWen/QWenTextGenerationApi.cs

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,23 @@ public static class QWenTextGenerationApi
1717
/// <param name="cancellationToken">The cancellation token to use.</param>
1818
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
1919
/// <returns></returns>
20+
/// <remarks>
21+
/// Migrate from
22+
/// <code>
23+
/// client.GetQWenChatCompletionAsync("qwen-plus", messages, parameters);
24+
/// </code>
25+
/// to
26+
/// <code>
27+
/// client.GetTextCompletionStreamAsync(
28+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
29+
/// {
30+
/// Model = "qwen-plus",
31+
/// Input = new TextGenerationInput { Messages = messages },
32+
/// Parameters = parameters
33+
/// });
34+
/// </code>
35+
/// </remarks>
36+
[Obsolete("Use GetTextCompletionStreamAsync instead")]
2037
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetQWenChatStreamAsync(
2138
this IDashScopeClient dashScopeClient,
2239
QWenLlm model,
@@ -45,6 +62,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
4562
/// <param name="cancellationToken">The cancellation token to use.</param>
4663
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
4764
/// <returns></returns>
65+
/// <remarks>
66+
/// Migrate from
67+
/// <code>
68+
/// client.GetQWenChatCompletionAsync("qwen-plus", messages, parameters);
69+
/// </code>
70+
/// to
71+
/// <code>
72+
/// client.GetTextCompletionStreamAsync(
73+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
74+
/// {
75+
/// Model = "qwen-plus",
76+
/// Input = new TextGenerationInput { Messages = messages },
77+
/// Parameters = parameters
78+
/// });
79+
/// </code>
80+
/// </remarks>
81+
[Obsolete("Use GetTextCompletionStreamAsync instead")]
4882
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetQWenChatStreamAsync(
4983
this IDashScopeClient dashScopeClient,
5084
string model,
@@ -72,6 +106,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
72106
/// <param name="cancellationToken">The cancellation token to use.</param>
73107
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
74108
/// <returns></returns>
109+
/// <remarks>
110+
/// Migrate from
111+
/// <code>
112+
/// client.GetQWenChatCompletionAsync(QWenLlm.QwQ32B, messages, parameters);
113+
/// </code>
114+
/// to
115+
/// <code>
116+
/// client.GetTextCompletionAsync(
117+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
118+
/// {
119+
/// Model = "qwq-32b",
120+
/// Input = new TextGenerationInput { Messages = messages },
121+
/// Parameters = parameters
122+
/// });
123+
/// </code>
124+
/// </remarks>
125+
[Obsolete("Use GetTextCompletionAsync instead")]
75126
public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetQWenChatCompletionAsync(
76127
this IDashScopeClient dashScopeClient,
77128
QWenLlm model,
@@ -96,6 +147,23 @@ public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>
96147
/// <param name="cancellationToken">The cancellation token to use.</param>
97148
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
98149
/// <returns></returns>
150+
/// <remarks>
151+
/// Migrate from
152+
/// <code>
153+
/// client.GetQWenChatCompletionAsync("qwen-plus", messages, parameters);
154+
/// </code>
155+
/// to
156+
/// <code>
157+
/// client.GetTextCompletionAsync(
158+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
159+
/// {
160+
/// Model = "qwen-plus",
161+
/// Input = new TextGenerationInput { Messages = messages },
162+
/// Parameters = parameters
163+
/// });
164+
/// </code>
165+
/// </remarks>
166+
[Obsolete("Use GetTextCompletionAsync instead")]
99167
public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetQWenChatCompletionAsync(
100168
this IDashScopeClient dashScopeClient,
101169
string model,
@@ -122,6 +190,23 @@ public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>
122190
/// <param name="parameters">The optional parameters for this completion request.</param>
123191
/// <param name="cancellationToken">The cancellation token to use.</param>
124192
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
193+
/// <remarks>
194+
/// Migrate from
195+
/// <code>
196+
/// client.GetQWenCompletionStreamAsync(QWenLlm.QwQ32B, "prompt", parameters);
197+
/// </code>
198+
/// to
199+
/// <code>
200+
/// client.GetTextCompletionStreamAsync(
201+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
202+
/// {
203+
/// Model = "qwq-32b",
204+
/// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
205+
/// Parameters = parameters
206+
/// });
207+
/// </code>
208+
/// </remarks>
209+
[Obsolete("Use GetTextCompletionStreamAsync instead")]
125210
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
126211
GetQWenCompletionStreamAsync(
127212
this IDashScopeClient dashScopeClient,
@@ -146,6 +231,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
146231
/// <param name="parameters">The optional parameters for this completion request.</param>
147232
/// <param name="cancellationToken">The cancellation token to use.</param>
148233
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
234+
/// <remarks>
235+
/// Migrate from
236+
/// <code>
237+
/// client.GetQWenCompletionStreamAsync("qwq-32b", "prompt", parameters);
238+
/// </code>
239+
/// to
240+
/// <code>
241+
/// client.GetTextCompletionStreamAsync(
242+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
243+
/// {
244+
/// Model = "qwq-32b",
245+
/// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
246+
/// Parameters = parameters
247+
/// });
248+
/// </code>
249+
/// </remarks>
250+
[Obsolete("Use GetTextCompletionStreamAsync instead")]
149251
public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
150252
GetQWenCompletionStreamAsync(
151253
this IDashScopeClient dashScopeClient,
@@ -173,6 +275,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
173275
/// <param name="parameters">The optional parameters for this completion request.</param>
174276
/// <param name="cancellationToken">The cancellation token to use.</param>
175277
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
278+
/// <remarks>
279+
/// Migrate from
280+
/// <code>
281+
/// client.GetQWenCompletionAsync(QWenLlm.QwQ32B, "prompt", parameters);
282+
/// </code>
283+
/// to
284+
/// <code>
285+
/// client.GetTextCompletionAsync(
286+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
287+
/// {
288+
/// Model = "qwq-32b",
289+
/// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
290+
/// Parameters = parameters
291+
/// });
292+
/// </code>
293+
/// </remarks>
294+
[Obsolete("Use GetTextCompletionAsync instead")]
176295
public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetQWenCompletionAsync(
177296
this IDashScopeClient dashScopeClient,
178297
QWenLlm model,
@@ -192,6 +311,23 @@ public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>
192311
/// <param name="parameters">The optional parameters for this completion request.</param>
193312
/// <param name="cancellationToken">The cancellation token to use.</param>
194313
/// <exception cref="DashScopeException">Request for generation is failed.</exception>
314+
/// <remarks>
315+
/// Migrate from
316+
/// <code>
317+
/// client.GetQWenCompletionAsync("qwq-32b", "prompt", parameters);
318+
/// </code>
319+
/// to
320+
/// <code>
321+
/// client.GetTextCompletionAsync(
322+
/// new ModelRequest&lt;TextGenerationInput, ITextGenerationParameters&gt;
323+
/// {
324+
/// Model = "qwq-32b",
325+
/// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
326+
/// Parameters = parameters
327+
/// });
328+
/// </code>
329+
/// </remarks>
330+
[Obsolete("Use GetTextCompletionAsync instead")]
195331
public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetQWenCompletionAsync(
196332
this IDashScopeClient dashScopeClient,
197333
string model,

src/Cnblogs.DashScope.Sdk/QWenMultimodal/QWenMultimodalGenerationApi.cs

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,23 @@ public static class QWenMultimodalGenerationApi
1616
/// <param name="parameters">The optional configuration for this request.</param>
1717
/// <param name="cancellationToken">The <see cref="CancellationToken"/> to use.</param>
1818
/// <returns></returns>
19+
/// <remarks>
20+
/// Migrate from
21+
/// <code>
22+
/// client.GetQWenMultimodalCompletionAsync(QWenMultimodalModel.QWenVlPlus, messages, parameters);
23+
/// </code>
24+
/// to
25+
/// <code>
26+
/// client.GetMultimodalGenerationAsync(
27+
/// new ModelRequest&lt;MultimodalInput, IMultimodalParameters&gt;
28+
/// {
29+
/// Model = "qwen-vl-plus",
30+
/// Input = new MultimodalInput { Messages = messages },
31+
/// Parameters = parameters
32+
/// });
33+
/// </code>
34+
/// </remarks>
35+
[Obsolete("Use GetMultimodalGenerationAsync instead")]
1936
public static Task<ModelResponse<MultimodalOutput, MultimodalTokenUsage>> GetQWenMultimodalCompletionAsync(
2037
this IDashScopeClient client,
2138
QWenMultimodalModel model,
@@ -35,6 +52,23 @@ public static Task<ModelResponse<MultimodalOutput, MultimodalTokenUsage>> GetQWe
3552
/// <param name="parameters">The optional configuration for this request.</param>
3653
/// <param name="cancellationToken">The <see cref="CancellationToken"/> to use.</param>
3754
/// <returns></returns>
55+
/// <remarks>
56+
/// Migrate from
57+
/// <code>
58+
/// client.GetQWenMultimodalCompletionAsync("qwen-vl-plus", messages, parameters);
59+
/// </code>
60+
/// to
61+
/// <code>
62+
/// client.GetMultimodalGenerationAsync(
63+
/// new ModelRequest&lt;MultimodalInput, IMultimodalParameters&gt;
64+
/// {
65+
/// Model = "qwen-vl-plus",
66+
/// Input = new MultimodalInput { Messages = messages },
67+
/// Parameters = parameters
68+
/// });
69+
/// </code>
70+
/// </remarks>
71+
[Obsolete("Use GetMultimodalGenerationAsync instead")]
3872
public static Task<ModelResponse<MultimodalOutput, MultimodalTokenUsage>> GetQWenMultimodalCompletionAsync(
3973
this IDashScopeClient client,
4074
string model,
@@ -61,6 +95,23 @@ public static Task<ModelResponse<MultimodalOutput, MultimodalTokenUsage>> GetQWe
6195
/// <param name="parameters">The optional configuration for this request.</param>
6296
/// <param name="cancellationToken">The <see cref="CancellationToken"/> to use.</param>
6397
/// <returns></returns>
98+
/// <remarks>
99+
/// Migrate from
100+
/// <code>
101+
/// client.GetQWenMultimodalCompletionStreamAsync("qwen-vl-plus", messages, parameters);
102+
/// </code>
103+
/// to
104+
/// <code>
105+
/// client.GetMultimodalGenerationStreamAsync(
106+
/// new ModelRequest&lt;MultimodalInput, IMultimodalParameters&gt;
107+
/// {
108+
/// Model = "qwen-vl-plus",
109+
/// Input = new MultimodalInput { Messages = messages },
110+
/// Parameters = parameters
111+
/// });
112+
/// </code>
113+
/// </remarks>
114+
[Obsolete("Use GetMultimodalGenerationStreamAsync instead")]
64115
public static IAsyncEnumerable<ModelResponse<MultimodalOutput, MultimodalTokenUsage>>
65116
GetQWenMultimodalCompletionStreamAsync(
66117
this IDashScopeClient client,
@@ -85,6 +136,23 @@ public static IAsyncEnumerable<ModelResponse<MultimodalOutput, MultimodalTokenUs
85136
/// <param name="parameters">The optional configuration for this request.</param>
86137
/// <param name="cancellationToken">The <see cref="CancellationToken"/> to use.</param>
87138
/// <returns></returns>
139+
/// <remarks>
140+
/// Migrate from
141+
/// <code>
142+
/// client.GetQWenMultimodalCompletionStreamAsync("qwen-vl-plus", messages, parameters);
143+
/// </code>
144+
/// to
145+
/// <code>
146+
/// client.GetMultimodalGenerationStreamAsync(
147+
/// new ModelRequest&lt;MultimodalInput, IMultimodalParameters&gt;
148+
/// {
149+
/// Model = "qwen-vl-plus",
150+
/// Input = new MultimodalInput { Messages = messages },
151+
/// Parameters = parameters
152+
/// });
153+
/// </code>
154+
/// </remarks>
155+
[Obsolete("Use GetMultimodalGenerationStreamAsync instead")]
88156
public static IAsyncEnumerable<ModelResponse<MultimodalOutput, MultimodalTokenUsage>>
89157
GetQWenMultimodalCompletionStreamAsync(
90158
this IDashScopeClient client,

0 commit comments

Comments
 (0)