openai.ts 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. import { REQUEST_TIMEOUT_MS } from "@/app/constant";
  2. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  3. import {
  4. EventStreamContentType,
  5. fetchEventSource,
  6. } from "@microsoft/fetch-event-source";
  7. import { ChatOptions, LLMApi, LLMUsage } from "../api";
  8. export class ChatGPTApi implements LLMApi {
  9. public ChatPath = "v1/chat/completions";
  10. path(path: string): string {
  11. const openaiUrl = useAccessStore.getState().openaiUrl;
  12. if (openaiUrl.endsWith("/")) openaiUrl.slice(0, openaiUrl.length - 1);
  13. return [openaiUrl, path].join("/");
  14. }
  15. extractMessage(res: any) {
  16. return res.choices?.at(0)?.message?.content ?? "";
  17. }
  18. async chat(options: ChatOptions) {
  19. const messages = options.messages.map((v) => ({
  20. role: v.role,
  21. content: v.content,
  22. }));
  23. const modelConfig = {
  24. ...useAppConfig.getState().modelConfig,
  25. ...useChatStore.getState().currentSession().mask.modelConfig,
  26. ...{
  27. model: options.model,
  28. },
  29. };
  30. const requestPayload = {
  31. messages,
  32. stream: options.config.stream,
  33. model: modelConfig.model,
  34. temperature: modelConfig.temperature,
  35. presence_penalty: modelConfig.presence_penalty,
  36. };
  37. console.log("[Request] openai payload: ", requestPayload);
  38. const shouldStream = !!options.config.stream;
  39. const controller = new AbortController();
  40. try {
  41. const chatPath = this.path(this.ChatPath);
  42. const chatPayload = {
  43. method: "POST",
  44. body: JSON.stringify(requestPayload),
  45. signal: controller.signal,
  46. };
  47. // make a fetch request
  48. const reqestTimeoutId = setTimeout(
  49. () => controller.abort(),
  50. REQUEST_TIMEOUT_MS,
  51. );
  52. if (shouldStream) {
  53. let responseText = "";
  54. fetchEventSource(chatPath, {
  55. ...chatPayload,
  56. async onopen(res) {
  57. if (
  58. res.ok &&
  59. res.headers.get("Content-Type") === EventStreamContentType
  60. ) {
  61. return;
  62. }
  63. if (res.status === 401) {
  64. // TODO: Unauthorized 401
  65. responseText += "\n\n";
  66. } else if (res.status !== 200) {
  67. console.error("[Request] response", res);
  68. throw new Error("[Request] server error");
  69. }
  70. },
  71. onmessage: (ev) => {
  72. if (ev.data === "[DONE]") {
  73. return options.onFinish(responseText);
  74. }
  75. try {
  76. const resJson = JSON.parse(ev.data);
  77. const message = this.extractMessage(resJson);
  78. responseText += message;
  79. options.onUpdate(responseText, message);
  80. } catch (e) {
  81. console.error("[Request] stream error", e);
  82. options.onError(e as Error);
  83. }
  84. },
  85. onclose() {
  86. options.onError(new Error("stream closed unexpected"));
  87. },
  88. onerror(err) {
  89. options.onError(err);
  90. },
  91. });
  92. } else {
  93. const res = await fetch(chatPath, chatPayload);
  94. const resJson = await res.json();
  95. const message = this.extractMessage(resJson);
  96. options.onFinish(message);
  97. }
  98. clearTimeout(reqestTimeoutId);
  99. } catch (e) {
  100. console.log("[Request] failed to make a chat reqeust", e);
  101. options.onError(e as Error);
  102. }
  103. }
  104. async usage() {
  105. return {
  106. used: 0,
  107. total: 0,
  108. } as LLMUsage;
  109. }
  110. }