@@ -17,7 +17,7 @@ public sealed class DashScopeChatClient : IChatClient
1717 private readonly string _modelId ;
1818
1919 private static readonly JsonSchema EmptyObjectSchema =
20- JsonSchema . FromText ( """{ "type": "object", "required":[],"properties":{}}"" " ) ;
20+ JsonSchema . FromText ( "{ \ " type\" : \ " object\" , \ " required\ " :[],\ " properties\ " :{}}" ) ;
2121
2222 private static readonly TextGenerationParameters
2323 DefaultTextGenerationParameter = new ( ) { ResultFormat = "message" } ;
@@ -55,15 +55,15 @@ public async Task<ChatResponse> GetResponseAsync(
5555 if ( useVl )
5656 {
5757 var response = await _dashScopeClient . GetMultimodalGenerationAsync (
58- new ModelRequest < MultimodalInput , IMultimodalParameters > ( )
58+ new ModelRequest < MultimodalInput , IMultimodalParameters >
5959 {
6060 Input = new MultimodalInput { Messages = ToMultimodalMessages ( chatMessages ) } ,
6161 Parameters = ToMultimodalParameters ( options ) ,
6262 Model = modelId
6363 } ,
6464 cancellationToken ) ;
6565
66- var returnMessage = new ChatMessage ( )
66+ var returnMessage = new ChatMessage
6767 {
6868 RawRepresentation = response , Role = ToChatRole ( response . Output . Choices [ 0 ] . Message . Role ) ,
6969 } ;
@@ -80,7 +80,7 @@ public async Task<ChatResponse> GetResponseAsync(
8080
8181 if ( response . Usage != null )
8282 {
83- completion . Usage = new UsageDetails ( )
83+ completion . Usage = new UsageDetails
8484 {
8585 InputTokenCount = response . Usage . InputTokens , OutputTokenCount = response . Usage . OutputTokens ,
8686 } ;
@@ -92,7 +92,7 @@ public async Task<ChatResponse> GetResponseAsync(
9292 {
9393 var parameters = ToTextGenerationParameters ( options ) ?? DefaultTextGenerationParameter ;
9494 var response = await _dashScopeClient . GetTextCompletionAsync (
95- new ModelRequest < TextGenerationInput , ITextGenerationParameters > ( )
95+ new ModelRequest < TextGenerationInput , ITextGenerationParameters >
9696 {
9797 Input = new TextGenerationInput
9898 {
@@ -116,7 +116,7 @@ public async Task<ChatResponse> GetResponseAsync(
116116
117117 if ( response . Usage != null )
118118 {
119- completion . Usage = new UsageDetails ( )
119+ completion . Usage = new UsageDetails
120120 {
121121 InputTokenCount = response . Usage . InputTokens ,
122122 OutputTokenCount = response . Usage . OutputTokens ,
@@ -147,7 +147,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
147147 var parameter = ToMultimodalParameters ( options ) ;
148148 parameter . IncrementalOutput = true ;
149149 var stream = _dashScopeClient . GetMultimodalGenerationStreamAsync (
150- new ModelRequest < MultimodalInput , IMultimodalParameters > ( )
150+ new ModelRequest < MultimodalInput , IMultimodalParameters >
151151 {
152152 Input = new MultimodalInput { Messages = ToMultimodalMessages ( chatMessages ) } ,
153153 Parameters = parameter ,
@@ -164,7 +164,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
164164 : ToFinishReason ( response . Output . Choices [ 0 ] . FinishReason ) ;
165165 completionId ??= response . RequestId ;
166166
167- var update = new ChatResponseUpdate ( )
167+ var update = new ChatResponseUpdate
168168 {
169169 ResponseId = completionId ,
170170 CreatedAt = DateTimeOffset . Now ,
@@ -199,7 +199,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
199199 {
200200 // qwen does not support streaming with function call, fallback to non-streaming
201201 var completion = await GetResponseAsync ( chatMessages , options , cancellationToken ) ;
202- yield return new ChatResponseUpdate ( )
202+ yield return new ChatResponseUpdate
203203 {
204204 ResponseId = completion . ResponseId ,
205205 Role = completion . Messages [ 0 ] . Role ,
@@ -216,7 +216,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
216216 var parameters = ToTextGenerationParameters ( options ) ?? DefaultTextGenerationParameter ;
217217 parameters . IncrementalOutput = true ;
218218 var stream = _dashScopeClient . GetTextCompletionStreamAsync (
219- new ModelRequest < TextGenerationInput , ITextGenerationParameters > ( )
219+ new ModelRequest < TextGenerationInput , ITextGenerationParameters >
220220 {
221221 Input = new TextGenerationInput
222222 {
@@ -238,7 +238,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
238238 : ToFinishReason ( response . Output . Choices [ 0 ] . FinishReason ) ;
239239 completionId ??= response . RequestId ;
240240
241- var update = new ChatResponseUpdate ( )
241+ var update = new ChatResponseUpdate
242242 {
243243 ResponseId = completionId ,
244244 CreatedAt = DateTimeOffset . Now ,
@@ -257,7 +257,7 @@ public async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseAsync(
257257 {
258258 update . Contents . Add (
259259 new UsageContent (
260- new UsageDetails ( )
260+ new UsageDetails
261261 {
262262 InputTokenCount = response . Usage . InputTokens ,
263263 OutputTokenCount = response . Usage . OutputTokens ,
@@ -299,7 +299,7 @@ public void Dispose()
299299
300300 private static ChatMessage ToChatMessage ( TextChatMessage message )
301301 {
302- var returnMessage = new ChatMessage ( )
302+ var returnMessage = new ChatMessage
303303 {
304304 RawRepresentation = message , Role = ToChatRole ( message . Role ) ,
305305 } ;
@@ -485,7 +485,7 @@ private IEnumerable<TextChatMessage> ToTextChatMessages(
485485 format = "json_object" ;
486486 }
487487
488- return new TextGenerationParameters ( )
488+ return new TextGenerationParameters
489489 {
490490 ResultFormat = format ,
491491 Temperature = options . Temperature ,
0 commit comments