config.ts 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. import { create } from "zustand";
  2. import { persist } from "zustand/middleware";
  3. import { getClientConfig } from "../config/client";
  4. import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant";
  5. export enum SubmitKey {
  6. Enter = "Enter",
  7. CtrlEnter = "Ctrl + Enter",
  8. ShiftEnter = "Shift + Enter",
  9. AltEnter = "Alt + Enter",
  10. MetaEnter = "Meta + Enter",
  11. }
  12. export enum Theme {
  13. Auto = "auto",
  14. Dark = "dark",
  15. Light = "light",
  16. }
  17. export const DEFAULT_CONFIG = {
  18. submitKey: SubmitKey.CtrlEnter as SubmitKey,
  19. avatar: "1f603",
  20. fontSize: 14,
  21. theme: Theme.Auto as Theme,
  22. tightBorder: !!getClientConfig()?.isApp,
  23. sendPreviewBubble: true,
  24. sidebarWidth: 300,
  25. disablePromptHint: false,
  26. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  27. modelConfig: {
  28. model: "gpt-3.5-turbo" as ModelType,
  29. temperature: 0.5,
  30. top_p: 1,
  31. max_tokens: 2000,
  32. presence_penalty: 0,
  33. frequency_penalty: 0,
  34. sendMemory: true,
  35. historyMessageCount: 4,
  36. compressMessageLengthThreshold: 1000,
  37. template: DEFAULT_INPUT_TEMPLATE,
  38. },
  39. };
  40. export type ChatConfig = typeof DEFAULT_CONFIG;
  41. export type ChatConfigStore = ChatConfig & {
  42. reset: () => void;
  43. update: (updater: (config: ChatConfig) => void) => void;
  44. };
  45. export type ModelConfig = ChatConfig["modelConfig"];
  46. const ENABLE_GPT4 = true;
  47. export const ALL_MODELS = [
  48. {
  49. name: "gpt-4",
  50. available: ENABLE_GPT4,
  51. },
  52. {
  53. name: "gpt-4-0314",
  54. available: ENABLE_GPT4,
  55. },
  56. {
  57. name: "gpt-4-0613",
  58. available: ENABLE_GPT4,
  59. },
  60. {
  61. name: "gpt-4-32k",
  62. available: ENABLE_GPT4,
  63. },
  64. {
  65. name: "gpt-4-32k-0314",
  66. available: ENABLE_GPT4,
  67. },
  68. {
  69. name: "gpt-4-32k-0613",
  70. available: ENABLE_GPT4,
  71. },
  72. {
  73. name: "gpt-3.5-turbo",
  74. available: true,
  75. },
  76. {
  77. name: "gpt-3.5-turbo-0301",
  78. available: true,
  79. },
  80. {
  81. name: "gpt-3.5-turbo-0613",
  82. available: true,
  83. },
  84. {
  85. name: "gpt-3.5-turbo-16k",
  86. available: true,
  87. },
  88. {
  89. name: "gpt-3.5-turbo-16k-0613",
  90. available: true,
  91. },
  92. {
  93. name: "qwen-v1", // 通义千问
  94. available: false,
  95. },
  96. {
  97. name: "ernie", // 文心一言
  98. available: false,
  99. },
  100. {
  101. name: "spark", // 讯飞星火
  102. available: false,
  103. },
  104. {
  105. name: "llama", // llama
  106. available: false,
  107. },
  108. {
  109. name: "chatglm", // chatglm-6b
  110. available: false,
  111. },
  112. ] as const;
  113. export type ModelType = (typeof ALL_MODELS)[number]["name"];
  114. export function limitNumber(
  115. x: number,
  116. min: number,
  117. max: number,
  118. defaultValue: number,
  119. ) {
  120. if (typeof x !== "number" || isNaN(x)) {
  121. return defaultValue;
  122. }
  123. return Math.min(max, Math.max(min, x));
  124. }
  125. export function limitModel(name: string) {
  126. return ALL_MODELS.some((m) => m.name === name && m.available)
  127. ? name
  128. : "gpt-3.5-turbo";
  129. }
  130. export const ModalConfigValidator = {
  131. model(x: string) {
  132. return limitModel(x) as ModelType;
  133. },
  134. max_tokens(x: number) {
  135. return limitNumber(x, 0, 32000, 2000);
  136. },
  137. presence_penalty(x: number) {
  138. return limitNumber(x, -2, 2, 0);
  139. },
  140. frequency_penalty(x: number) {
  141. return limitNumber(x, -2, 2, 0);
  142. },
  143. temperature(x: number) {
  144. return limitNumber(x, 0, 1, 1);
  145. },
  146. top_p(x: number) {
  147. return limitNumber(x, 0, 1, 1);
  148. },
  149. };
  150. export const useAppConfig = create<ChatConfigStore>()(
  151. persist(
  152. (set, get) => ({
  153. ...DEFAULT_CONFIG,
  154. reset() {
  155. set(() => ({ ...DEFAULT_CONFIG }));
  156. },
  157. update(updater) {
  158. const config = { ...get() };
  159. updater(config);
  160. set(() => config);
  161. },
  162. }),
  163. {
  164. name: StoreKey.Config,
  165. version: 3.3,
  166. migrate(persistedState, version) {
  167. if (version === 3.3) return persistedState as any;
  168. const state = persistedState as ChatConfig;
  169. state.modelConfig.sendMemory = true;
  170. state.modelConfig.historyMessageCount = 4;
  171. state.modelConfig.compressMessageLengthThreshold = 1000;
  172. state.modelConfig.frequency_penalty = 0;
  173. state.modelConfig.top_p = 1;
  174. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  175. state.dontShowMaskSplashScreen = false;
  176. return state;
  177. },
  178. },
  179. ),
  180. );