@@ -17,6 +17,23 @@ public static class QWenTextGenerationApi
1717 /// <param name="cancellationToken">The cancellation token to use.</param>
1818 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
1919 /// <returns></returns>
20+ /// <remarks>
21+ /// Migrate from
22+ /// <code>
23+ /// client.GetQWenChatCompletionAsync("qwen-plus", messages, parameters);
24+ /// </code>
25+ /// to
26+ /// <code>
27+ /// client.GetTextCompletionStreamAsync(
28+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
29+ /// {
30+ /// Model = "qwen-plus",
31+ /// Input = new TextGenerationInput { Messages = messages },
32+ /// Parameters = parameters
33+ /// });
34+ /// </code>
35+ /// </remarks>
36+ [ Obsolete ( "Use GetTextCompletionStreamAsync instead" ) ]
2037 public static IAsyncEnumerable < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > > GetQWenChatStreamAsync (
2138 this IDashScopeClient dashScopeClient ,
2239 QWenLlm model ,
@@ -45,6 +62,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
4562 /// <param name="cancellationToken">The cancellation token to use.</param>
4663 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
4764 /// <returns></returns>
65+ /// <remarks>
66+ /// Migrate from
67+ /// <code>
68+ /// client.GetQWenChatCompletionAsync("qwen-plus", messages, parameters);
69+ /// </code>
70+ /// to
71+ /// <code>
72+ /// client.GetTextCompletionStreamAsync(
73+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
74+ /// {
75+ /// Model = "qwen-plus",
76+ /// Input = new TextGenerationInput { Messages = messages },
77+ /// Parameters = parameters
78+ /// });
79+ /// </code>
80+ /// </remarks>
81+ [ Obsolete ( "Use GetTextCompletionStreamAsync instead" ) ]
4882 public static IAsyncEnumerable < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > > GetQWenChatStreamAsync (
4983 this IDashScopeClient dashScopeClient ,
5084 string model ,
@@ -72,6 +106,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
72106 /// <param name="cancellationToken">The cancellation token to use.</param>
73107 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
74108 /// <returns></returns>
109+ /// <remarks>
110+ /// Migrate from
111+ /// <code>
112+ /// client.GetQWenChatCompletionAsync(QWenLlm.QwQ32B, messages, parameters);
113+ /// </code>
114+ /// to
115+ /// <code>
116+ /// client.GetTextCompletionAsync(
117+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
118+ /// {
119+ /// Model = "qwq-32b",
120+ /// Input = new TextGenerationInput { Messages = messages },
121+ /// Parameters = parameters
122+ /// });
123+ /// </code>
124+ /// </remarks>
125+ [ Obsolete ( "Use GetTextCompletionAsync instead" ) ]
75126 public static Task < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > > GetQWenChatCompletionAsync (
76127 this IDashScopeClient dashScopeClient ,
77128 QWenLlm model ,
@@ -96,6 +147,23 @@ public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>
96147 /// <param name="cancellationToken">The cancellation token to use.</param>
97148 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
98149 /// <returns></returns>
150+ /// <remarks>
151+ /// Migrate from
152+ /// <code>
153+ /// client.GetQWenChatCompletionAsync("qwen-plus", messages, parameters);
154+ /// </code>
155+ /// to
156+ /// <code>
157+ /// client.GetTextCompletionAsync(
158+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
159+ /// {
160+ /// Model = "qwen-plus",
161+ /// Input = new TextGenerationInput { Messages = messages },
162+ /// Parameters = parameters
163+ /// });
164+ /// </code>
165+ /// </remarks>
166+ [ Obsolete ( "Use GetTextCompletionAsync instead" ) ]
99167 public static Task < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > > GetQWenChatCompletionAsync (
100168 this IDashScopeClient dashScopeClient ,
101169 string model ,
@@ -122,6 +190,23 @@ public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>
122190 /// <param name="parameters">The optional parameters for this completion request.</param>
123191 /// <param name="cancellationToken">The cancellation token to use.</param>
124192 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
193+ /// <remarks>
194+ /// Migrate from
195+ /// <code>
196+ /// client.GetQWenCompletionStreamAsync(QWenLlm.QwQ32B, "prompt", parameters);
197+ /// </code>
198+ /// to
199+ /// <code>
200+ /// client.GetTextCompletionStreamAsync(
201+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
202+ /// {
203+ /// Model = "qwq-32b",
204+ /// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
205+ /// Parameters = parameters
206+ /// });
207+ /// </code>
208+ /// </remarks>
209+ [ Obsolete ( "Use GetTextCompletionStreamAsync instead" ) ]
125210 public static IAsyncEnumerable < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > >
126211 GetQWenCompletionStreamAsync (
127212 this IDashScopeClient dashScopeClient ,
@@ -146,6 +231,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
146231 /// <param name="parameters">The optional parameters for this completion request.</param>
147232 /// <param name="cancellationToken">The cancellation token to use.</param>
148233 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
234+ /// <remarks>
235+ /// Migrate from
236+ /// <code>
237+ /// client.GetQWenCompletionStreamAsync("qwq-32b", "prompt", parameters);
238+ /// </code>
239+ /// to
240+ /// <code>
241+ /// client.GetTextCompletionStreamAsync(
242+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
243+ /// {
244+ /// Model = "qwq-32b",
245+ /// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
246+ /// Parameters = parameters
247+ /// });
248+ /// </code>
249+ /// </remarks>
250+ [ Obsolete ( "Use GetTextCompletionStreamAsync instead" ) ]
149251 public static IAsyncEnumerable < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > >
150252 GetQWenCompletionStreamAsync (
151253 this IDashScopeClient dashScopeClient ,
@@ -173,6 +275,23 @@ public static IAsyncEnumerable<ModelResponse<TextGenerationOutput, TextGeneratio
173275 /// <param name="parameters">The optional parameters for this completion request.</param>
174276 /// <param name="cancellationToken">The cancellation token to use.</param>
175277 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
278+ /// <remarks>
279+ /// Migrate from
280+ /// <code>
281+ /// client.GetQWenCompletionAsync(QWenLlm.QwQ32B, "prompt", parameters);
282+ /// </code>
283+ /// to
284+ /// <code>
285+ /// client.GetTextCompletionAsync(
286+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
287+ /// {
288+ /// Model = "qwq-32b",
289+ /// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
290+ /// Parameters = parameters
291+ /// });
292+ /// </code>
293+ /// </remarks>
294+ [ Obsolete ( "Use GetTextCompletionAsync instead" ) ]
176295 public static Task < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > > GetQWenCompletionAsync (
177296 this IDashScopeClient dashScopeClient ,
178297 QWenLlm model ,
@@ -192,6 +311,23 @@ public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>
192311 /// <param name="parameters">The optional parameters for this completion request.</param>
193312 /// <param name="cancellationToken">The cancellation token to use.</param>
194313 /// <exception cref="DashScopeException">Request for generation is failed.</exception>
314+ /// <remarks>
315+ /// Migrate from
316+ /// <code>
317+ /// client.GetQWenCompletionAsync("qwq-32b", "prompt", parameters);
318+ /// </code>
319+ /// to
320+ /// <code>
321+ /// client.GetTextCompletionAsync(
322+ /// new ModelRequest<TextGenerationInput, ITextGenerationParameters>
323+ /// {
324+ /// Model = "qwq-32b",
325+ /// Input = new TextGenerationInput { Messages = [TextChatMessage.User("prompt")] },
326+ /// Parameters = parameters
327+ /// });
328+ /// </code>
329+ /// </remarks>
330+ [ Obsolete ( "Use GetTextCompletionAsync instead" ) ]
195331 public static Task < ModelResponse < TextGenerationOutput , TextGenerationTokenUsage > > GetQWenCompletionAsync (
196332 this IDashScopeClient dashScopeClient ,
197333 string model ,
0 commit comments