config.ts 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. import { create } from "zustand";
  2. import { persist } from "zustand/middleware";
  3. import { StoreKey } from "../constant";
  4. export enum SubmitKey {
  5. Enter = "Enter",
  6. CtrlEnter = "Ctrl + Enter",
  7. ShiftEnter = "Shift + Enter",
  8. AltEnter = "Alt + Enter",
  9. MetaEnter = "Meta + Enter",
  10. }
  11. export enum Theme {
  12. Auto = "auto",
  13. Dark = "dark",
  14. Light = "light",
  15. }
  16. export const DEFAULT_CONFIG = {
  17. submitKey: SubmitKey.CtrlEnter as SubmitKey,
  18. avatar: "1f603",
  19. fontSize: 14,
  20. theme: Theme.Auto as Theme,
  21. tightBorder: false,
  22. sendPreviewBubble: true,
  23. sidebarWidth: 300,
  24. disablePromptHint: false,
  25. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  26. modelConfig: {
  27. model: "gpt-3.5-turbo" as ModelType,
  28. temperature: 1,
  29. max_tokens: 2000,
  30. presence_penalty: 0,
  31. sendMemory: true,
  32. historyMessageCount: 4,
  33. compressMessageLengthThreshold: 1000,
  34. },
  35. };
  36. export type ChatConfig = typeof DEFAULT_CONFIG;
  37. export type ChatConfigStore = ChatConfig & {
  38. reset: () => void;
  39. update: (updater: (config: ChatConfig) => void) => void;
  40. };
  41. export type ModelConfig = ChatConfig["modelConfig"];
  42. const ENABLE_GPT4 = true;
  43. export const ALL_MODELS = [
  44. {
  45. name: "gpt-4",
  46. available: ENABLE_GPT4,
  47. },
  48. {
  49. name: "gpt-4-0314",
  50. available: ENABLE_GPT4,
  51. },
  52. {
  53. name: "gpt-4-32k",
  54. available: ENABLE_GPT4,
  55. },
  56. {
  57. name: "gpt-4-32k-0314",
  58. available: ENABLE_GPT4,
  59. },
  60. {
  61. name: "gpt-3.5-turbo",
  62. available: true,
  63. },
  64. {
  65. name: "gpt-3.5-turbo-0301",
  66. available: true,
  67. },
  68. ] as const;
  69. export type ModelType = (typeof ALL_MODELS)[number]["name"];
  70. export function limitNumber(
  71. x: number,
  72. min: number,
  73. max: number,
  74. defaultValue: number,
  75. ) {
  76. if (typeof x !== "number" || isNaN(x)) {
  77. return defaultValue;
  78. }
  79. return Math.min(max, Math.max(min, x));
  80. }
  81. export function limitModel(name: string) {
  82. return ALL_MODELS.some((m) => m.name === name && m.available)
  83. ? name
  84. : ALL_MODELS[4].name;
  85. }
  86. export const ModalConfigValidator = {
  87. model(x: string) {
  88. return limitModel(x) as ModelType;
  89. },
  90. max_tokens(x: number) {
  91. return limitNumber(x, 0, 32000, 2000);
  92. },
  93. presence_penalty(x: number) {
  94. return limitNumber(x, -2, 2, 0);
  95. },
  96. temperature(x: number) {
  97. return limitNumber(x, 0, 1, 1);
  98. },
  99. };
  100. export const useAppConfig = create<ChatConfigStore>()(
  101. persist(
  102. (set, get) => ({
  103. ...DEFAULT_CONFIG,
  104. reset() {
  105. set(() => ({ ...DEFAULT_CONFIG }));
  106. },
  107. update(updater) {
  108. const config = { ...get() };
  109. updater(config);
  110. set(() => config);
  111. },
  112. }),
  113. {
  114. name: StoreKey.Config,
  115. version: 2,
  116. migrate(persistedState, version) {
  117. if (version === 2) return persistedState as any;
  118. const state = persistedState as ChatConfig;
  119. state.modelConfig.sendMemory = true;
  120. state.modelConfig.historyMessageCount = 4;
  121. state.modelConfig.compressMessageLengthThreshold = 1000;
  122. state.dontShowMaskSplashScreen = false;
  123. return state;
  124. },
  125. },
  126. ),
  127. );