Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/gallery-agent/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ require (
github.com/onsi/gomega v1.38.2
github.com/sashabaranov/go-openai v1.41.2
github.com/tmc/langchaingo v0.1.13
gopkg.in/yaml.v3 v3.0.1
)

require (
Expand Down
68 changes: 43 additions & 25 deletions core/http/endpoints/openai/chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
Expand Down Expand Up @@ -91,7 +90,7 @@
ID: id,
Created: created,
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant", Content: &textContentToReturn}}},
Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant"}, Index: 0, FinishReason: nil}},
Object: "chat.completion.chunk",
}
responses <- initialMessage
Expand All @@ -111,7 +110,7 @@
ID: id,
Created: created,
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0}},
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0, FinishReason: nil}},
Object: "chat.completion.chunk",
Usage: usage,
}
Expand Down Expand Up @@ -145,7 +144,7 @@
ID: id,
Created: created,
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant", Content: &textContentToReturn}}},
Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant"}, Index: 0, FinishReason: nil}},
Object: "chat.completion.chunk",
}
responses <- initialMessage
Expand All @@ -169,7 +168,7 @@
ID: id,
Created: created,
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{{Delta: &schema.Message{Content: &result}, Index: 0}},
Choices: []schema.Choice{{Delta: &schema.Message{Content: &result}, Index: 0, FinishReason: nil}},
Object: "chat.completion.chunk",
Usage: usage,
}
Expand Down Expand Up @@ -197,7 +196,10 @@
},
},
},
}}},
},
Index: 0,
FinishReason: nil,
}},
Object: "chat.completion.chunk",
}
responses <- initialMessage
Expand All @@ -220,7 +222,10 @@
},
},
},
}}},
},
Index: 0,
FinishReason: nil,
}},
Object: "chat.completion.chunk",
}
}
Expand Down Expand Up @@ -427,11 +432,14 @@
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
toolsCalled = true
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(ev)
log.Debug().Msgf("Sending chunk: %s", buf.String())
_, err := fmt.Fprintf(w, "data: %v\n", buf.String())
respData, err := json.Marshal(ev)
if err != nil {
log.Debug().Msgf("Failed to marshal response: %v", err)
input.Cancel()
continue
}
log.Debug().Msgf("Sending chunk: %s", string(respData))
_, err = fmt.Fprintf(w, "data: %s\n\n", string(respData))
if err != nil {
log.Debug().Msgf("Sending chunk failed: %v", err)
input.Cancel()
Expand All @@ -443,34 +451,40 @@
}
log.Error().Msgf("Stream ended with error: %v", err)

stopReason := FinishReasonStop
resp := &schema.OpenAIResponse{
ID: id,
Created: created,
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{
{
FinishReason: "stop",
FinishReason: &stopReason,
Index: 0,
Delta: &schema.Message{Content: "Internal error: " + err.Error()},
}},
Object: "chat.completion.chunk",
Usage: *usage,
}
respData, _ := json.Marshal(resp)

w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
respData, marshalErr := json.Marshal(resp)
if marshalErr != nil {
log.Error().Msgf("Failed to marshal error response: %v", marshalErr)
// Send a simple error message as fallback
w.WriteString("data: {\"error\":\"Internal error\"}\n\n")

Check warning

Code scanning / gosec

Errors unhandled Warning

Errors unhandled
} else {
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))

Check warning

Code scanning / gosec

Errors unhandled Warning

Errors unhandled
}
w.WriteString("data: [DONE]\n\n")
w.Flush()

return
}
}

finishReason := "stop"
finishReason := FinishReasonStop
if toolsCalled && len(input.Tools) > 0 {
finishReason = "tool_calls"
finishReason = FinishReasonToolCalls
} else if toolsCalled {
finishReason = "function_call"
finishReason = FinishReasonFunctionCall
}

resp := &schema.OpenAIResponse{
Expand All @@ -479,9 +493,9 @@
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{
{
FinishReason: finishReason,
FinishReason: &finishReason,
Index: 0,
Delta: &schema.Message{Content: &textContentToReturn},
Delta: &schema.Message{},
Copy link

Copilot AI Nov 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The final streaming chunk sends an empty Delta message, but should include the content to maintain consistency. According to OpenAI's streaming specification, the final delta with finish_reason can include an empty Delta object, but earlier in the code (line 93, 147) an initial message with role 'assistant' is sent. Consider whether the final chunk should maintain consistency with the initial message format or truly be empty. If this is intentional API behavior change, it may break client expectations.

Suggested change
Delta: &schema.Message{},
Delta: &schema.Message{Role: "assistant"},

Copilot uses AI. Check for mistakes.
}},
Object: "chat.completion.chunk",
Usage: *usage,
Expand All @@ -502,7 +516,8 @@
tokenCallback := func(s string, c *[]schema.Choice) {
if !shouldUseFn {
// no function is called, just reply and use stop as finish reason
*c = append(*c, schema.Choice{FinishReason: "stop", Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}})
stopReason := FinishReasonStop
*c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}})
return
}

Expand All @@ -520,12 +535,14 @@
return
}

stopReason := FinishReasonStop
*c = append(*c, schema.Choice{
FinishReason: "stop",
FinishReason: &stopReason,
Message: &schema.Message{Role: "assistant", Content: &result}})
default:
toolCallsReason := FinishReasonToolCalls
toolChoice := schema.Choice{
FinishReason: "tool_calls",
FinishReason: &toolCallsReason,
Message: &schema.Message{
Role: "assistant",
},
Expand All @@ -549,8 +566,9 @@
)
} else {
// otherwise we return more choices directly (deprecated)
functionCallReason := FinishReasonFunctionCall
*c = append(*c, schema.Choice{
FinishReason: "function_call",
FinishReason: &functionCallReason,
Message: &schema.Message{
Role: "assistant",
Content: &textContentToReturn,
Expand Down
48 changes: 37 additions & 11 deletions core/http/endpoints/openai/completion.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package openai

import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
Expand Down Expand Up @@ -47,8 +46,9 @@ func CompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, eva
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{
{
Index: 0,
Text: s,
Index: 0,
Text: s,
FinishReason: nil,
},
},
Object: "text_completion",
Expand Down Expand Up @@ -140,32 +140,57 @@ func CompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, eva
log.Debug().Msgf("No choices in the response, skipping")
continue
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(ev)
respData, err := json.Marshal(ev)
if err != nil {
log.Debug().Msgf("Failed to marshal response: %v", err)
continue
}

log.Debug().Msgf("Sending chunk: %s", buf.String())
fmt.Fprintf(w, "data: %v\n", buf.String())
log.Debug().Msgf("Sending chunk: %s", string(respData))
fmt.Fprintf(w, "data: %s\n\n", string(respData))
w.Flush()
case err := <-ended:
if err == nil {
break LOOP
}
log.Error().Msgf("Stream ended with error: %v", err)
fmt.Fprintf(w, "data: %v\n", "Internal error: "+err.Error())

stopReason := FinishReasonStop
errorResp := schema.OpenAIResponse{
ID: id,
Created: created,
Model: input.Model,
Choices: []schema.Choice{
{
Index: 0,
FinishReason: &stopReason,
Text: "Internal error: " + err.Error(),
},
},
Object: "text_completion",
}
errorData, marshalErr := json.Marshal(errorResp)
if marshalErr != nil {
log.Error().Msgf("Failed to marshal error response: %v", marshalErr)
// Send a simple error message as fallback
fmt.Fprintf(w, "data: {\"error\":\"Internal error\"}\n\n")
} else {
fmt.Fprintf(w, "data: %s\n\n", string(errorData))
}
w.Flush()
break LOOP
}
}

stopReason := FinishReasonStop
resp := &schema.OpenAIResponse{
ID: id,
Created: created,
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{
{
Index: 0,
FinishReason: "stop",
FinishReason: &stopReason,
},
},
Object: "text_completion",
Expand Down Expand Up @@ -197,7 +222,8 @@ func CompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, eva

r, tokenUsage, err := ComputeChoices(
input, i, config, cl, appConfig, ml, func(s string, c *[]schema.Choice) {
*c = append(*c, schema.Choice{Text: s, FinishReason: "stop", Index: k})
stopReason := FinishReasonStop
*c = append(*c, schema.Choice{Text: s, FinishReason: &stopReason, Index: k})
}, nil)
if err != nil {
return err
Expand Down
8 changes: 8 additions & 0 deletions core/http/endpoints/openai/constants.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
package openai

// Finish reason constants for OpenAI API responses
const (
FinishReasonStop = "stop"
FinishReasonToolCalls = "tool_calls"
FinishReasonFunctionCall = "function_call"
)
9 changes: 6 additions & 3 deletions core/http/endpoints/openai/realtime.go
Original file line number Diff line number Diff line change
Expand Up @@ -1072,7 +1072,8 @@ func processTextResponse(config *config.ModelConfig, session *Session, prompt st
result, tokenUsage, err := ComputeChoices(input, prompt, config, startupOptions, ml, func(s string, c *[]schema.Choice) {
if !shouldUseFn {
// no function is called, just reply and use stop as finish reason
*c = append(*c, schema.Choice{FinishReason: "stop", Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}})
stopReason := FinishReasonStop
*c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}})
return
}

Expand All @@ -1099,7 +1100,8 @@ func processTextResponse(config *config.ModelConfig, session *Session, prompt st
}

if len(input.Tools) > 0 {
toolChoice.FinishReason = "tool_calls"
toolCallsReason := FinishReasonToolCalls
toolChoice.FinishReason = &toolCallsReason
}

for _, ss := range results {
Expand All @@ -1120,8 +1122,9 @@ func processTextResponse(config *config.ModelConfig, session *Session, prompt st
)
} else {
// otherwise we return more choices directly
functionCallReason := FinishReasonFunctionCall
*c = append(*c, schema.Choice{
FinishReason: "function_call",
FinishReason: &functionCallReason,
Message: &schema.Message{
Role: "assistant",
Content: &textContentToReturn,
Expand Down
2 changes: 1 addition & 1 deletion core/schema/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ type OpenAIResponse struct {

type Choice struct {
Index int `json:"index"`
FinishReason string `json:"finish_reason"`
FinishReason *string `json:"finish_reason"`
Message *Message `json:"message,omitempty"`
Delta *Message `json:"delta,omitempty"`
Text string `json:"text,omitempty"`
Expand Down
2 changes: 0 additions & 2 deletions docs/go.mod
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
module github.com/McShelby/hugo-theme-relearn.git

go 1.19

require github.com/gohugoio/hugo-mod-bootstrap-scss/v5 v5.20300.20200 // indirect
4 changes: 0 additions & 4 deletions docs/go.sum
Original file line number Diff line number Diff line change
@@ -1,4 +0,0 @@
github.com/gohugoio/hugo-mod-bootstrap-scss/v5 v5.20300.20200 h1:SmpwwN3DNzJWbV+IT8gaFu07ENUFpCvKou5BHYUKuVs=
github.com/gohugoio/hugo-mod-bootstrap-scss/v5 v5.20300.20200/go.mod h1:kx8MBj9T7SFR8ZClWvKZPmmUxBaltkoXvnWlZZcSnYA=
github.com/gohugoio/hugo-mod-jslibs-dist/popperjs/v2 v2.21100.20000/go.mod h1:mFberT6ZtcchrsDtfvJM7aAH2bDKLdOnruUHl0hlapI=
github.com/twbs/bootstrap v5.3.2+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
Loading