openai.ts 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. import { OpenaiPath, REQUEST_TIMEOUT_MS } from "@/app/constant";
  2. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  3. import { ChatOptions, getHeaders, LLMApi, LLMUsage } from "../api";
  4. import Locale from "../../locales";
  5. import {
  6. EventStreamContentType,
  7. fetchEventSource,
  8. } from "@fortaine/fetch-event-source";
  9. import { prettyObject } from "@/app/utils/format";
  10. export class ChatGPTApi implements LLMApi {
  11. path(path: string): string {
  12. let openaiUrl = useAccessStore.getState().openaiUrl;
  13. if (openaiUrl.endsWith("/")) {
  14. openaiUrl = openaiUrl.slice(0, openaiUrl.length - 1);
  15. }
  16. return [openaiUrl, path].join("/");
  17. }
  18. extractMessage(res: any) {
  19. return res.choices?.at(0)?.message?.content ?? "";
  20. }
  21. async chat(options: ChatOptions) {
  22. const messages = options.messages.map((v) => ({
  23. role: v.role,
  24. content: v.content,
  25. }));
  26. const modelConfig = {
  27. ...useAppConfig.getState().modelConfig,
  28. ...useChatStore.getState().currentSession().mask.modelConfig,
  29. ...{
  30. model: options.config.model,
  31. },
  32. };
  33. const requestPayload = {
  34. messages,
  35. stream: options.config.stream,
  36. model: modelConfig.model,
  37. temperature: modelConfig.temperature,
  38. presence_penalty: modelConfig.presence_penalty,
  39. frequency_penalty: modelConfig.frequency_penalty,
  40. };
  41. console.log("[Request] openai payload: ", requestPayload);
  42. const shouldStream = !!options.config.stream;
  43. const controller = new AbortController();
  44. options.onController?.(controller);
  45. try {
  46. const chatPath = this.path(OpenaiPath.ChatPath);
  47. const chatPayload = {
  48. method: "POST",
  49. body: JSON.stringify(requestPayload),
  50. signal: controller.signal,
  51. headers: getHeaders(),
  52. };
  53. // make a fetch request
  54. const requestTimeoutId = setTimeout(
  55. () => controller.abort(),
  56. REQUEST_TIMEOUT_MS,
  57. );
  58. if (shouldStream) {
  59. let responseText = "";
  60. let finished = false;
  61. const finish = () => {
  62. if (!finished) {
  63. options.onFinish(responseText);
  64. finished = true;
  65. }
  66. };
  67. controller.signal.onabort = finish;
  68. fetchEventSource(chatPath, {
  69. ...chatPayload,
  70. async onopen(res) {
  71. clearTimeout(requestTimeoutId);
  72. const contentType = res.headers.get("content-type");
  73. console.log(
  74. "[OpenAI] request response content type: ",
  75. contentType,
  76. );
  77. if (contentType?.startsWith("text/plain")) {
  78. responseText = await res.clone().text();
  79. return finish();
  80. }
  81. if (
  82. !res.ok ||
  83. !res.headers
  84. .get("content-type")
  85. ?.startsWith(EventStreamContentType) ||
  86. res.status !== 200
  87. ) {
  88. const responseTexts = [responseText];
  89. let extraInfo = await res.clone().text();
  90. try {
  91. const resJson = await res.clone().json();
  92. extraInfo = prettyObject(resJson);
  93. } catch {}
  94. if (res.status === 401) {
  95. responseTexts.push(Locale.Error.Unauthorized);
  96. }
  97. if (extraInfo) {
  98. responseTexts.push(extraInfo);
  99. }
  100. responseText = responseTexts.join("\n\n");
  101. return finish();
  102. }
  103. },
  104. onmessage(msg) {
  105. if (msg.data === "[DONE]" || finished) {
  106. return finish();
  107. }
  108. const text = msg.data;
  109. try {
  110. const json = JSON.parse(text);
  111. const delta = json.choices[0].delta.content;
  112. if (delta) {
  113. responseText += delta;
  114. options.onUpdate?.(responseText, delta);
  115. }
  116. } catch (e) {
  117. console.error("[Request] parse error", text, msg);
  118. }
  119. },
  120. onclose() {
  121. finish();
  122. },
  123. onerror(e) {
  124. options.onError?.(e);
  125. throw e;
  126. },
  127. openWhenHidden: true,
  128. });
  129. } else {
  130. const res = await fetch(chatPath, chatPayload);
  131. clearTimeout(requestTimeoutId);
  132. const resJson = await res.json();
  133. const message = this.extractMessage(resJson);
  134. options.onFinish(message);
  135. }
  136. } catch (e) {
  137. console.log("[Request] failed to make a chat reqeust", e);
  138. options.onError?.(e as Error);
  139. }
  140. }
  141. async usage() {
  142. const formatDate = (d: Date) =>
  143. `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
  144. .getDate()
  145. .toString()
  146. .padStart(2, "0")}`;
  147. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  148. const now = new Date();
  149. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  150. const startDate = formatDate(startOfMonth);
  151. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  152. const [used, subs] = await Promise.all([
  153. fetch(
  154. this.path(
  155. `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
  156. ),
  157. {
  158. method: "GET",
  159. headers: getHeaders(),
  160. },
  161. ),
  162. fetch(this.path(OpenaiPath.SubsPath), {
  163. method: "GET",
  164. headers: getHeaders(),
  165. }),
  166. ]);
  167. if (used.status === 401) {
  168. throw new Error(Locale.Error.Unauthorized);
  169. }
  170. if (!used.ok || !subs.ok) {
  171. throw new Error("Failed to query usage from openai");
  172. }
  173. const response = (await used.json()) as {
  174. total_usage?: number;
  175. error?: {
  176. type: string;
  177. message: string;
  178. };
  179. };
  180. const total = (await subs.json()) as {
  181. hard_limit_usd?: number;
  182. };
  183. if (response.error && response.error.type) {
  184. throw Error(response.error.message);
  185. }
  186. if (response.total_usage) {
  187. response.total_usage = Math.round(response.total_usage) / 100;
  188. }
  189. if (total.hard_limit_usd) {
  190. total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
  191. }
  192. return {
  193. used: response.total_usage,
  194. total: total.hard_limit_usd,
  195. } as LLMUsage;
  196. }
  197. }
  198. export { OpenaiPath };