Browse Source

fix: #3186 enable max_tokens in chat payload

Yidadaa 1 year ago
parent
commit
d0a1d910d4
3 changed files with 5 additions and 4 deletions
  1. 1 0
      app/client/platforms/openai.ts
  2. 2 2
      app/components/model-config.tsx
  3. 2 2
      app/store/config.ts

+ 1 - 0
app/client/platforms/openai.ts

@@ -70,6 +70,7 @@ export class ChatGPTApi implements LLMApi {
       presence_penalty: modelConfig.presence_penalty,
       frequency_penalty: modelConfig.frequency_penalty,
       top_p: modelConfig.top_p,
+      max_tokens: Math.max(modelConfig.max_tokens, 1024),
     };
 
     console.log("[Request] openai payload: ", requestPayload);

+ 2 - 2
app/components/model-config.tsx

@@ -76,8 +76,8 @@ export function ModelConfigList(props: {
       >
         <input
           type="number"
-          min={100}
-          max={100000}
+          min={1024}
+          max={512000}
           value={props.modelConfig.max_tokens}
           onChange={(e) =>
             props.updateConfig(

+ 2 - 2
app/store/config.ts

@@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = {
     model: "gpt-3.5-turbo" as ModelType,
     temperature: 0.5,
     top_p: 1,
-    max_tokens: 2000,
+    max_tokens: 8192,
     presence_penalty: 0,
     frequency_penalty: 0,
     sendMemory: true,
@@ -82,7 +82,7 @@ export const ModalConfigValidator = {
     return x as ModelType;
   },
   max_tokens(x: number) {
-    return limitNumber(x, 0, 100000, 2000);
+    return limitNumber(x, 0, 512000, 1024);
   },
   presence_penalty(x: number) {
     return limitNumber(x, -2, 2, 0);