config.ts 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. import { create } from "zustand";
  2. import { persist } from "zustand/middleware";
  3. import { getClientConfig } from "../config/client";
  4. import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant";
  5. export enum SubmitKey {
  6. Enter = "Enter",
  7. CtrlEnter = "Ctrl + Enter",
  8. ShiftEnter = "Shift + Enter",
  9. AltEnter = "Alt + Enter",
  10. MetaEnter = "Meta + Enter",
  11. }
  12. export enum Theme {
  13. Auto = "auto",
  14. Dark = "dark",
  15. Light = "light",
  16. }
  17. export const DEFAULT_CONFIG = {
  18. submitKey: SubmitKey.CtrlEnter as SubmitKey,
  19. avatar: "1f603",
  20. fontSize: 14,
  21. theme: Theme.Auto as Theme,
  22. tightBorder: !!getClientConfig()?.isApp,
  23. sendPreviewBubble: true,
  24. sidebarWidth: 300,
  25. disablePromptHint: false,
  26. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  27. dontAddBuiltinMasks: false, // dont add builtin masks
  28. modelConfig: {
  29. model: "gpt-3.5-turbo" as ModelType,
  30. temperature: 0.5,
  31. top_p: 1,
  32. max_tokens: 2000,
  33. presence_penalty: 0,
  34. frequency_penalty: 0,
  35. sendMemory: true,
  36. historyMessageCount: 4,
  37. compressMessageLengthThreshold: 1000,
  38. template: DEFAULT_INPUT_TEMPLATE,
  39. },
  40. };
  41. export type ChatConfig = typeof DEFAULT_CONFIG;
  42. export type ChatConfigStore = ChatConfig & {
  43. reset: () => void;
  44. update: (updater: (config: ChatConfig) => void) => void;
  45. };
  46. export type ModelConfig = ChatConfig["modelConfig"];
  47. const ENABLE_GPT4 = true;
  48. export const ALL_MODELS = [
  49. {
  50. name: "gpt-4",
  51. available: ENABLE_GPT4,
  52. },
  53. {
  54. name: "gpt-4-0314",
  55. available: ENABLE_GPT4,
  56. },
  57. {
  58. name: "gpt-4-0613",
  59. available: ENABLE_GPT4,
  60. },
  61. {
  62. name: "gpt-4-32k",
  63. available: ENABLE_GPT4,
  64. },
  65. {
  66. name: "gpt-4-32k-0314",
  67. available: ENABLE_GPT4,
  68. },
  69. {
  70. name: "gpt-4-32k-0613",
  71. available: ENABLE_GPT4,
  72. },
  73. {
  74. name: "gpt-3.5-turbo",
  75. available: true,
  76. },
  77. {
  78. name: "gpt-3.5-turbo-0301",
  79. available: true,
  80. },
  81. {
  82. name: "gpt-3.5-turbo-0613",
  83. available: true,
  84. },
  85. {
  86. name: "gpt-3.5-turbo-16k",
  87. available: true,
  88. },
  89. {
  90. name: "gpt-3.5-turbo-16k-0613",
  91. available: true,
  92. },
  93. {
  94. name: "qwen-v1", // 通义千问
  95. available: false,
  96. },
  97. {
  98. name: "ernie", // 文心一言
  99. available: false,
  100. },
  101. {
  102. name: "spark", // 讯飞星火
  103. available: false,
  104. },
  105. {
  106. name: "llama", // llama
  107. available: false,
  108. },
  109. {
  110. name: "chatglm", // chatglm-6b
  111. available: false,
  112. },
  113. ] as const;
  114. export type ModelType = (typeof ALL_MODELS)[number]["name"];
  115. export function limitNumber(
  116. x: number,
  117. min: number,
  118. max: number,
  119. defaultValue: number,
  120. ) {
  121. if (typeof x !== "number" || isNaN(x)) {
  122. return defaultValue;
  123. }
  124. return Math.min(max, Math.max(min, x));
  125. }
  126. export function limitModel(name: string) {
  127. return ALL_MODELS.some((m) => m.name === name && m.available)
  128. ? name
  129. : "gpt-3.5-turbo";
  130. }
  131. export const ModalConfigValidator = {
  132. model(x: string) {
  133. return limitModel(x) as ModelType;
  134. },
  135. max_tokens(x: number) {
  136. return limitNumber(x, 0, 32000, 2000);
  137. },
  138. presence_penalty(x: number) {
  139. return limitNumber(x, -2, 2, 0);
  140. },
  141. frequency_penalty(x: number) {
  142. return limitNumber(x, -2, 2, 0);
  143. },
  144. temperature(x: number) {
  145. return limitNumber(x, 0, 1, 1);
  146. },
  147. top_p(x: number) {
  148. return limitNumber(x, 0, 1, 1);
  149. },
  150. };
  151. export const useAppConfig = create<ChatConfigStore>()(
  152. persist(
  153. (set, get) => ({
  154. ...DEFAULT_CONFIG,
  155. reset() {
  156. set(() => ({ ...DEFAULT_CONFIG }));
  157. },
  158. update(updater) {
  159. const config = { ...get() };
  160. updater(config);
  161. set(() => config);
  162. },
  163. }),
  164. {
  165. name: StoreKey.Config,
  166. version: 3.3,
  167. migrate(persistedState, version) {
  168. if (version === 3.3) return persistedState as any;
  169. const state = persistedState as ChatConfig;
  170. state.modelConfig.sendMemory = true;
  171. state.modelConfig.historyMessageCount = 4;
  172. state.modelConfig.compressMessageLengthThreshold = 1000;
  173. state.modelConfig.frequency_penalty = 0;
  174. state.modelConfig.top_p = 1;
  175. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  176. state.dontShowMaskSplashScreen = false;
  177. return state;
  178. },
  179. },
  180. ),
  181. );