@@ -9,6 +9,8 @@ const configuration = new Configuration({
99} ) ;
1010const openai = new OpenAIApi ( configuration ) ;
1111
12+ const MAX_TOKENS = 2049 ;
13+ const RESPONSE_TOKENS = 64 ;
1214const BANNED_PATTERNS = / [ ` \[ \] { } \( \) ] | h t t p / g;
1315const COOLDOWN = 60 ;
1416const GRZESIU_DELAY = 1500 ;
@@ -26,7 +28,7 @@ const grzesiu: Command = {
2628 const response = await openai . createCompletion ( 'text-davinci-001' , {
2729 prompt,
2830 temperature : 1 ,
29- max_tokens : 64 ,
31+ max_tokens : RESPONSE_TOKENS ,
3032 top_p : 1 ,
3133 frequency_penalty : 0 ,
3234 presence_penalty : 2 ,
@@ -59,8 +61,6 @@ const grzesiu: Command = {
5961
6062export default grzesiu ;
6163
62- const MAX_TOKENS = 2049 ;
63-
6464const getRandomInt = ( len : number ) => Math . floor ( Math . random ( ) * len ) ;
6565
6666const getRandomIndices = ( num : number , max : number ) => {
@@ -85,7 +85,7 @@ const generateGrzesiuPrompt = async (username: string, question: string) => {
8585 const txt = uniqueLines . reduce ( ( txt , line ) => {
8686 const newTxt = txt + `${ GRZESIU_NAME } : ` + line + '\n' ;
8787 const fullConvo = getFullConvo ( newTxt , username , question ) ;
88- return fullConvo . length <= MAX_TOKENS ? newTxt : txt ;
88+ return fullConvo . length <= MAX_TOKENS - RESPONSE_TOKENS ? newTxt : txt ;
8989 } , '' ) ;
9090 return getFullConvo ( txt , username , question ) ;
9191} ;
0 commit comments