chat.ts 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. import { trimTopic } from "../utils";
  2. import Locale, { getLang } from "../locales";
  3. import { showToast } from "../components/ui-lib";
  4. import { ModelConfig, ModelType, useAppConfig } from "./config";
  5. import { createEmptyMask, Mask } from "./mask";
  6. import {
  7. DEFAULT_INPUT_TEMPLATE,
  8. DEFAULT_SYSTEM_TEMPLATE,
  9. KnowledgeCutOffDate,
  10. StoreKey,
  11. SUMMARIZE_MODEL,
  12. } from "../constant";
  13. import { api, RequestMessage } from "../client/api";
  14. import { ChatControllerPool } from "../client/controller";
  15. import { prettyObject } from "../utils/format";
  16. import { estimateTokenLength } from "../utils/token";
  17. import { nanoid } from "nanoid";
  18. import { createPersistStore } from "../utils/store";
  19. export type ChatMessage = RequestMessage & {
  20. date: string;
  21. streaming?: boolean;
  22. isError?: boolean;
  23. id: string;
  24. model?: ModelType;
  25. };
  26. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  27. return {
  28. id: nanoid(),
  29. date: new Date().toLocaleString(),
  30. role: "user",
  31. content: "",
  32. ...override,
  33. };
  34. }
  35. export interface ChatStat {
  36. tokenCount: number;
  37. wordCount: number;
  38. charCount: number;
  39. }
  40. export interface ChatSession {
  41. id: string;
  42. topic: string;
  43. memoryPrompt: string;
  44. messages: ChatMessage[];
  45. stat: ChatStat;
  46. lastUpdate: number;
  47. lastSummarizeIndex: number;
  48. clearContextIndex?: number;
  49. mask: Mask;
  50. }
  51. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  52. export const BOT_HELLO: ChatMessage = createMessage({
  53. role: "assistant",
  54. content: Locale.Store.BotHello,
  55. });
  56. function createEmptySession(): ChatSession {
  57. return {
  58. id: nanoid(),
  59. topic: DEFAULT_TOPIC,
  60. memoryPrompt: "",
  61. messages: [],
  62. stat: {
  63. tokenCount: 0,
  64. wordCount: 0,
  65. charCount: 0,
  66. },
  67. lastUpdate: Date.now(),
  68. lastSummarizeIndex: 0,
  69. mask: createEmptyMask(),
  70. };
  71. }
  72. function getSummarizeModel(currentModel: string) {
  73. // if it is using gpt-* models, force to use 3.5 to summarize
  74. return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
  75. }
  76. function countMessages(msgs: ChatMessage[]) {
  77. return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
  78. }
  79. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  80. let cutoff =
  81. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  82. const vars = {
  83. cutoff,
  84. model: modelConfig.model,
  85. time: new Date().toLocaleString(),
  86. lang: getLang(),
  87. input: input,
  88. };
  89. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  90. // must contains {{input}}
  91. const inputVar = "{{input}}";
  92. if (!output.includes(inputVar)) {
  93. output += "\n" + inputVar;
  94. }
  95. Object.entries(vars).forEach(([name, value]) => {
  96. output = output.replaceAll(`{{${name}}}`, value);
  97. });
  98. return output;
  99. }
  100. const DEFAULT_CHAT_STATE = {
  101. sessions: [createEmptySession()],
  102. currentSessionIndex: 0,
  103. };
  104. export const useChatStore = createPersistStore(
  105. DEFAULT_CHAT_STATE,
  106. (set, _get) => {
  107. function get() {
  108. return {
  109. ..._get(),
  110. ...methods,
  111. };
  112. }
  113. const methods = {
  114. clearSessions() {
  115. set(() => ({
  116. sessions: [createEmptySession()],
  117. currentSessionIndex: 0,
  118. }));
  119. },
  120. selectSession(index: number) {
  121. set({
  122. currentSessionIndex: index,
  123. });
  124. },
  125. moveSession(from: number, to: number) {
  126. set((state) => {
  127. const { sessions, currentSessionIndex: oldIndex } = state;
  128. // move the session
  129. const newSessions = [...sessions];
  130. const session = newSessions[from];
  131. newSessions.splice(from, 1);
  132. newSessions.splice(to, 0, session);
  133. // modify current session id
  134. let newIndex = oldIndex === from ? to : oldIndex;
  135. if (oldIndex > from && oldIndex <= to) {
  136. newIndex -= 1;
  137. } else if (oldIndex < from && oldIndex >= to) {
  138. newIndex += 1;
  139. }
  140. return {
  141. currentSessionIndex: newIndex,
  142. sessions: newSessions,
  143. };
  144. });
  145. },
  146. newSession(mask?: Mask) {
  147. const session = createEmptySession();
  148. if (mask) {
  149. const config = useAppConfig.getState();
  150. const globalModelConfig = config.modelConfig;
  151. session.mask = {
  152. ...mask,
  153. modelConfig: {
  154. ...globalModelConfig,
  155. ...mask.modelConfig,
  156. },
  157. };
  158. session.topic = mask.name;
  159. }
  160. set((state) => ({
  161. currentSessionIndex: 0,
  162. sessions: [session].concat(state.sessions),
  163. }));
  164. },
  165. nextSession(delta: number) {
  166. const n = get().sessions.length;
  167. const limit = (x: number) => (x + n) % n;
  168. const i = get().currentSessionIndex;
  169. get().selectSession(limit(i + delta));
  170. },
  171. deleteSession(index: number) {
  172. const deletingLastSession = get().sessions.length === 1;
  173. const deletedSession = get().sessions.at(index);
  174. if (!deletedSession) return;
  175. const sessions = get().sessions.slice();
  176. sessions.splice(index, 1);
  177. const currentIndex = get().currentSessionIndex;
  178. let nextIndex = Math.min(
  179. currentIndex - Number(index < currentIndex),
  180. sessions.length - 1,
  181. );
  182. if (deletingLastSession) {
  183. nextIndex = 0;
  184. sessions.push(createEmptySession());
  185. }
  186. // for undo delete action
  187. const restoreState = {
  188. currentSessionIndex: get().currentSessionIndex,
  189. sessions: get().sessions.slice(),
  190. };
  191. set(() => ({
  192. currentSessionIndex: nextIndex,
  193. sessions,
  194. }));
  195. showToast(
  196. Locale.Home.DeleteToast,
  197. {
  198. text: Locale.Home.Revert,
  199. onClick() {
  200. set(() => restoreState);
  201. },
  202. },
  203. 5000,
  204. );
  205. },
  206. currentSession() {
  207. let index = get().currentSessionIndex;
  208. const sessions = get().sessions;
  209. if (index < 0 || index >= sessions.length) {
  210. index = Math.min(sessions.length - 1, Math.max(0, index));
  211. set(() => ({ currentSessionIndex: index }));
  212. }
  213. const session = sessions[index];
  214. return session;
  215. },
  216. onNewMessage(message: ChatMessage) {
  217. get().updateCurrentSession((session) => {
  218. session.messages = session.messages.concat();
  219. session.lastUpdate = Date.now();
  220. });
  221. get().updateStat(message);
  222. get().summarizeSession();
  223. },
  224. async onUserInput(content: string) {
  225. const session = get().currentSession();
  226. const modelConfig = session.mask.modelConfig;
  227. const userContent = fillTemplateWith(content, modelConfig);
  228. console.log("[User Input] after template: ", userContent);
  229. const userMessage: ChatMessage = createMessage({
  230. role: "user",
  231. content: userContent,
  232. });
  233. const botMessage: ChatMessage = createMessage({
  234. role: "assistant",
  235. streaming: true,
  236. model: modelConfig.model,
  237. });
  238. // get recent messages
  239. const recentMessages = get().getMessagesWithMemory();
  240. const sendMessages = recentMessages.concat(userMessage);
  241. const messageIndex = get().currentSession().messages.length + 1;
  242. // save user's and bot's message
  243. get().updateCurrentSession((session) => {
  244. const savedUserMessage = {
  245. ...userMessage,
  246. content,
  247. };
  248. session.messages = session.messages.concat([
  249. savedUserMessage,
  250. botMessage,
  251. ]);
  252. });
  253. // make request
  254. api.llm.chat({
  255. messages: sendMessages,
  256. config: { ...modelConfig, stream: true },
  257. onUpdate(message) {
  258. botMessage.streaming = true;
  259. if (message) {
  260. botMessage.content = message;
  261. }
  262. get().updateCurrentSession((session) => {
  263. session.messages = session.messages.concat();
  264. });
  265. },
  266. onFinish(message) {
  267. botMessage.streaming = false;
  268. if (message) {
  269. botMessage.content = message;
  270. get().onNewMessage(botMessage);
  271. }
  272. ChatControllerPool.remove(session.id, botMessage.id);
  273. },
  274. onError(error) {
  275. const isAborted = error.message.includes("aborted");
  276. botMessage.content +=
  277. "\n\n" +
  278. prettyObject({
  279. error: true,
  280. message: error.message,
  281. });
  282. botMessage.streaming = false;
  283. userMessage.isError = !isAborted;
  284. botMessage.isError = !isAborted;
  285. get().updateCurrentSession((session) => {
  286. session.messages = session.messages.concat();
  287. });
  288. ChatControllerPool.remove(
  289. session.id,
  290. botMessage.id ?? messageIndex,
  291. );
  292. console.error("[Chat] failed ", error);
  293. },
  294. onController(controller) {
  295. // collect controller for stop/retry
  296. ChatControllerPool.addController(
  297. session.id,
  298. botMessage.id ?? messageIndex,
  299. controller,
  300. );
  301. },
  302. });
  303. },
  304. getMemoryPrompt() {
  305. const session = get().currentSession();
  306. return {
  307. role: "system",
  308. content:
  309. session.memoryPrompt.length > 0
  310. ? Locale.Store.Prompt.History(session.memoryPrompt)
  311. : "",
  312. date: "",
  313. } as ChatMessage;
  314. },
  315. getMessagesWithMemory() {
  316. const session = get().currentSession();
  317. const modelConfig = session.mask.modelConfig;
  318. const clearContextIndex = session.clearContextIndex ?? 0;
  319. const messages = session.messages.slice();
  320. const totalMessageCount = session.messages.length;
  321. // in-context prompts
  322. const contextPrompts = session.mask.context.slice();
  323. // system prompts, to get close to OpenAI Web ChatGPT
  324. const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
  325. const systemPrompts = shouldInjectSystemPrompts
  326. ? [
  327. createMessage({
  328. role: "system",
  329. content: fillTemplateWith("", {
  330. ...modelConfig,
  331. template: DEFAULT_SYSTEM_TEMPLATE,
  332. }),
  333. }),
  334. ]
  335. : [];
  336. if (shouldInjectSystemPrompts) {
  337. console.log(
  338. "[Global System Prompt] ",
  339. systemPrompts.at(0)?.content ?? "empty",
  340. );
  341. }
  342. // long term memory
  343. const shouldSendLongTermMemory =
  344. modelConfig.sendMemory &&
  345. session.memoryPrompt &&
  346. session.memoryPrompt.length > 0 &&
  347. session.lastSummarizeIndex > clearContextIndex;
  348. const longTermMemoryPrompts = shouldSendLongTermMemory
  349. ? [get().getMemoryPrompt()]
  350. : [];
  351. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  352. // short term memory
  353. const shortTermMemoryStartIndex = Math.max(
  354. 0,
  355. totalMessageCount - modelConfig.historyMessageCount,
  356. );
  357. // lets concat send messages, including 4 parts:
  358. // 0. system prompt: to get close to OpenAI Web ChatGPT
  359. // 1. long term memory: summarized memory messages
  360. // 2. pre-defined in-context prompts
  361. // 3. short term memory: latest n messages
  362. // 4. newest input message
  363. const memoryStartIndex = shouldSendLongTermMemory
  364. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  365. : shortTermMemoryStartIndex;
  366. // and if user has cleared history messages, we should exclude the memory too.
  367. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  368. const maxTokenThreshold = modelConfig.max_tokens;
  369. // get recent messages as much as possible
  370. const reversedRecentMessages = [];
  371. for (
  372. let i = totalMessageCount - 1, tokenCount = 0;
  373. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  374. i -= 1
  375. ) {
  376. const msg = messages[i];
  377. if (!msg || msg.isError) continue;
  378. tokenCount += estimateTokenLength(msg.content);
  379. reversedRecentMessages.push(msg);
  380. }
  381. // concat all messages
  382. const recentMessages = [
  383. ...systemPrompts,
  384. ...longTermMemoryPrompts,
  385. ...contextPrompts,
  386. ...reversedRecentMessages.reverse(),
  387. ];
  388. return recentMessages;
  389. },
  390. updateMessage(
  391. sessionIndex: number,
  392. messageIndex: number,
  393. updater: (message?: ChatMessage) => void,
  394. ) {
  395. const sessions = get().sessions;
  396. const session = sessions.at(sessionIndex);
  397. const messages = session?.messages;
  398. updater(messages?.at(messageIndex));
  399. set(() => ({ sessions }));
  400. },
  401. resetSession() {
  402. get().updateCurrentSession((session) => {
  403. session.messages = [];
  404. session.memoryPrompt = "";
  405. });
  406. },
  407. summarizeSession() {
  408. const config = useAppConfig.getState();
  409. const session = get().currentSession();
  410. // remove error messages if any
  411. const messages = session.messages;
  412. // should summarize topic after chating more than 50 words
  413. const SUMMARIZE_MIN_LEN = 50;
  414. if (
  415. config.enableAutoGenerateTitle &&
  416. session.topic === DEFAULT_TOPIC &&
  417. countMessages(messages) >= SUMMARIZE_MIN_LEN
  418. ) {
  419. const topicMessages = messages.concat(
  420. createMessage({
  421. role: "user",
  422. content: Locale.Store.Prompt.Topic,
  423. }),
  424. );
  425. api.llm.chat({
  426. messages: topicMessages,
  427. config: {
  428. model: getSummarizeModel(session.mask.modelConfig.model),
  429. },
  430. onFinish(message) {
  431. get().updateCurrentSession(
  432. (session) =>
  433. (session.topic =
  434. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  435. );
  436. },
  437. });
  438. }
  439. const modelConfig = session.mask.modelConfig;
  440. const summarizeIndex = Math.max(
  441. session.lastSummarizeIndex,
  442. session.clearContextIndex ?? 0,
  443. );
  444. let toBeSummarizedMsgs = messages
  445. .filter((msg) => !msg.isError)
  446. .slice(summarizeIndex);
  447. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  448. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  449. const n = toBeSummarizedMsgs.length;
  450. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  451. Math.max(0, n - modelConfig.historyMessageCount),
  452. );
  453. }
  454. // add memory prompt
  455. toBeSummarizedMsgs.unshift(get().getMemoryPrompt());
  456. const lastSummarizeIndex = session.messages.length;
  457. console.log(
  458. "[Chat History] ",
  459. toBeSummarizedMsgs,
  460. historyMsgLength,
  461. modelConfig.compressMessageLengthThreshold,
  462. );
  463. if (
  464. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  465. modelConfig.sendMemory
  466. ) {
  467. api.llm.chat({
  468. messages: toBeSummarizedMsgs.concat(
  469. createMessage({
  470. role: "system",
  471. content: Locale.Store.Prompt.Summarize,
  472. date: "",
  473. }),
  474. ),
  475. config: {
  476. ...modelConfig,
  477. stream: true,
  478. model: getSummarizeModel(session.mask.modelConfig.model),
  479. },
  480. onUpdate(message) {
  481. session.memoryPrompt = message;
  482. },
  483. onFinish(message) {
  484. console.log("[Memory] ", message);
  485. session.lastSummarizeIndex = lastSummarizeIndex;
  486. },
  487. onError(err) {
  488. console.error("[Summarize] ", err);
  489. },
  490. });
  491. }
  492. },
  493. updateStat(message: ChatMessage) {
  494. get().updateCurrentSession((session) => {
  495. session.stat.charCount += message.content.length;
  496. // TODO: should update chat count and word count
  497. });
  498. },
  499. updateCurrentSession(updater: (session: ChatSession) => void) {
  500. const sessions = get().sessions;
  501. const index = get().currentSessionIndex;
  502. updater(sessions[index]);
  503. set(() => ({ sessions }));
  504. },
  505. clearAllData() {
  506. localStorage.clear();
  507. location.reload();
  508. },
  509. };
  510. return methods;
  511. },
  512. {
  513. name: StoreKey.Chat,
  514. version: 3.1,
  515. migrate(persistedState, version) {
  516. const state = persistedState as any;
  517. const newState = JSON.parse(
  518. JSON.stringify(state),
  519. ) as typeof DEFAULT_CHAT_STATE;
  520. if (version < 2) {
  521. newState.sessions = [];
  522. const oldSessions = state.sessions;
  523. for (const oldSession of oldSessions) {
  524. const newSession = createEmptySession();
  525. newSession.topic = oldSession.topic;
  526. newSession.messages = [...oldSession.messages];
  527. newSession.mask.modelConfig.sendMemory = true;
  528. newSession.mask.modelConfig.historyMessageCount = 4;
  529. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  530. newState.sessions.push(newSession);
  531. }
  532. }
  533. if (version < 3) {
  534. // migrate id to nanoid
  535. newState.sessions.forEach((s) => {
  536. s.id = nanoid();
  537. s.messages.forEach((m) => (m.id = nanoid()));
  538. });
  539. }
  540. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  541. // Resolve issue of old sessions not automatically enabling.
  542. if (version < 3.1) {
  543. newState.sessions.forEach((s) => {
  544. if (
  545. // Exclude those already set by user
  546. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  547. ) {
  548. // Because users may have changed this configuration,
  549. // the user's current configuration is used instead of the default
  550. const config = useAppConfig.getState();
  551. s.mask.modelConfig.enableInjectSystemPrompts =
  552. config.modelConfig.enableInjectSystemPrompts;
  553. }
  554. });
  555. }
  556. return newState as any;
  557. },
  558. },
  559. );