@@ -9,8 +9,8 @@ import type { ChatMessage } from './chatgpt'
99import { abortChatProcess , chatConfig , chatReplyProcess , containsSensitiveWords , initAuditService } from './chatgpt'
1010import { auth , getUserId } from './middleware/auth'
1111import { clearApiKeyCache , clearConfigCache , getApiKeys , getCacheApiKeys , getCacheConfig , getOriginConfig } from './storage/config'
12- import type { AuditConfig , CHATMODEL , ChatInfo , ChatOptions , Config , KeyConfig , MailConfig , SiteConfig , UserConfig , UserInfo } from './storage/model'
13- import { Status , UsageResponse , UserRole , chatModelOptions } from './storage/model'
12+ import type { AuditConfig , ChatInfo , ChatOptions , Config , KeyConfig , MailConfig , SiteConfig , UserConfig , UserInfo } from './storage/model'
13+ import { Status , UsageResponse , UserRole } from './storage/model'
1414import {
1515 clearChat ,
1616 createChatRoom ,
@@ -136,7 +136,7 @@ router.post('/room-prompt', auth, async (req, res) => {
136136router . post ( '/room-chatmodel' , auth , async ( req , res ) => {
137137 try {
138138 const userId = req . headers . userId as string
139- const { chatModel, roomId } = req . body as { chatModel : CHATMODEL ; roomId : number }
139+ const { chatModel, roomId } = req . body as { chatModel : string ; roomId : number }
140140 const success = await updateRoomChatModel ( userId , roomId , chatModel )
141141 if ( success )
142142 res . send ( { status : 'Success' , message : 'Saved successfully' , data : null } )
@@ -423,8 +423,8 @@ router.post('/chat-process', [auth, limiter], async (req, res) => {
423423 result . data . detail = { }
424424 result . data . detail . usage = new UsageResponse ( )
425425 // 因为 token 本身不计算, 所以这里默认以 gpt 3.5 的算做一个伪统计
426- result . data . detail . usage . prompt_tokens = textTokens ( prompt , 'gpt-3.5-turbo-0613 ' )
427- result . data . detail . usage . completion_tokens = textTokens ( result . data . text , 'gpt-3.5-turbo-0613 ' )
426+ result . data . detail . usage . prompt_tokens = textTokens ( prompt , 'gpt-3.5-turbo' )
427+ result . data . detail . usage . completion_tokens = textTokens ( result . data . text , 'gpt-3.5-turbo' )
428428 result . data . detail . usage . total_tokens = result . data . detail . usage . prompt_tokens + result . data . detail . usage . completion_tokens
429429 result . data . detail . usage . estimated = true
430430 }
@@ -579,6 +579,18 @@ router.post('/session', async (req, res) => {
579579 key : string
580580 value : string
581581 } [ ] = [ ]
582+
583+ const chatModelOptions = config . siteConfig . chatModels . split ( ',' ) . map ( ( model : string ) => {
584+ let label = model
585+ if ( model === 'text-davinci-002-render-sha-mobile' )
586+ label = 'gpt-3.5-mobile'
587+ return {
588+ label,
589+ key : model ,
590+ value : model ,
591+ }
592+ } )
593+
582594 let userInfo : { name : string ; description : string ; avatar : string ; userId : string ; root : boolean ; roles : UserRole [ ] ; config : UserConfig }
583595 if ( userId != null ) {
584596 const user = await getUserById ( userId )
@@ -740,7 +752,7 @@ router.post('/user-info', auth, async (req, res) => {
740752
741753router . post ( '/user-chat-model' , auth , async ( req , res ) => {
742754 try {
743- const { chatModel } = req . body as { chatModel : CHATMODEL }
755+ const { chatModel } = req . body as { chatModel : string }
744756 const userId = req . headers . userId . toString ( )
745757
746758 const user = await getUserById ( userId )
0 commit comments