config.ts 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. import { create } from "zustand";
  2. import { persist } from "zustand/middleware";
  3. import { LLMModel } from "../client/api";
  4. import { getClientConfig } from "../config/client";
  5. import { DEFAULT_INPUT_TEMPLATE, DEFAULT_MODELS, StoreKey } from "../constant";
  6. export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
  7. export enum SubmitKey {
  8. Enter = "Enter",
  9. CtrlEnter = "Ctrl + Enter",
  10. ShiftEnter = "Shift + Enter",
  11. AltEnter = "Alt + Enter",
  12. MetaEnter = "Meta + Enter",
  13. }
  14. export enum Theme {
  15. Auto = "auto",
  16. Dark = "dark",
  17. Light = "light",
  18. }
  19. export const DEFAULT_CONFIG = {
  20. submitKey: SubmitKey.CtrlEnter as SubmitKey,
  21. avatar: "1f603",
  22. fontSize: 14,
  23. theme: Theme.Auto as Theme,
  24. tightBorder: !!getClientConfig()?.isApp,
  25. sendPreviewBubble: true,
  26. sidebarWidth: 300,
  27. disablePromptHint: false,
  28. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  29. models: DEFAULT_MODELS as any as LLMModel[],
  30. modelConfig: {
  31. model: "gpt-3.5-turbo" as ModelType,
  32. temperature: 0.5,
  33. top_p: 1,
  34. max_tokens: 2000,
  35. presence_penalty: 0,
  36. frequency_penalty: 0,
  37. sendMemory: true,
  38. historyMessageCount: 4,
  39. compressMessageLengthThreshold: 1000,
  40. template: DEFAULT_INPUT_TEMPLATE,
  41. },
  42. };
  43. export type ChatConfig = typeof DEFAULT_CONFIG;
  44. export type ChatConfigStore = ChatConfig & {
  45. reset: () => void;
  46. update: (updater: (config: ChatConfig) => void) => void;
  47. mergeModels: (newModels: LLMModel[]) => void;
  48. };
  49. export type ModelConfig = ChatConfig["modelConfig"];
  50. export function limitNumber(
  51. x: number,
  52. min: number,
  53. max: number,
  54. defaultValue: number,
  55. ) {
  56. if (typeof x !== "number" || isNaN(x)) {
  57. return defaultValue;
  58. }
  59. return Math.min(max, Math.max(min, x));
  60. }
  61. export function limitModel(name: string) {
  62. const allModels = useAppConfig.getState().models;
  63. return allModels.some((m) => m.name === name && m.available)
  64. ? name
  65. : "gpt-3.5-turbo";
  66. }
  67. export const ModalConfigValidator = {
  68. model(x: string) {
  69. return limitModel(x) as ModelType;
  70. },
  71. max_tokens(x: number) {
  72. return limitNumber(x, 0, 32000, 2000);
  73. },
  74. presence_penalty(x: number) {
  75. return limitNumber(x, -2, 2, 0);
  76. },
  77. frequency_penalty(x: number) {
  78. return limitNumber(x, -2, 2, 0);
  79. },
  80. temperature(x: number) {
  81. return limitNumber(x, 0, 1, 1);
  82. },
  83. top_p(x: number) {
  84. return limitNumber(x, 0, 1, 1);
  85. },
  86. };
  87. export const useAppConfig = create<ChatConfigStore>()(
  88. persist(
  89. (set, get) => ({
  90. ...DEFAULT_CONFIG,
  91. reset() {
  92. set(() => ({ ...DEFAULT_CONFIG }));
  93. },
  94. update(updater) {
  95. const config = { ...get() };
  96. updater(config);
  97. set(() => config);
  98. },
  99. mergeModels(newModels) {
  100. const oldModels = get().models;
  101. const modelMap: Record<string, LLMModel> = {};
  102. for (const model of oldModels) {
  103. model.available = false;
  104. modelMap[model.name] = model;
  105. }
  106. for (const model of newModels) {
  107. model.available = true;
  108. modelMap[model.name] = model;
  109. }
  110. set(() => ({
  111. models: Object.values(modelMap),
  112. }));
  113. },
  114. }),
  115. {
  116. name: StoreKey.Config,
  117. version: 3.3,
  118. migrate(persistedState, version) {
  119. if (version === 3.3) return persistedState as any;
  120. const state = persistedState as ChatConfig;
  121. state.modelConfig.sendMemory = true;
  122. state.modelConfig.historyMessageCount = 4;
  123. state.modelConfig.compressMessageLengthThreshold = 1000;
  124. state.modelConfig.frequency_penalty = 0;
  125. state.modelConfig.top_p = 1;
  126. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  127. state.dontShowMaskSplashScreen = false;
  128. return state;
  129. },
  130. },
  131. ),
  132. );