openai.ts 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. import {
  2. DEFAULT_API_HOST,
  3. DEFAULT_MODELS,
  4. OpenaiPath,
  5. REQUEST_TIMEOUT_MS,
  6. } from "@/app/constant";
  7. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  8. import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
  9. import Locale from "../../locales";
  10. import {
  11. EventStreamContentType,
  12. fetchEventSource,
  13. } from "@fortaine/fetch-event-source";
  14. import { prettyObject } from "@/app/utils/format";
  15. import { getClientConfig } from "@/app/config/client";
  16. export interface OpenAIListModelResponse {
  17. object: string;
  18. data: Array<{
  19. id: string;
  20. object: string;
  21. root: string;
  22. }>;
  23. }
  24. export class ChatGPTApi implements LLMApi {
  25. private disableListModels = true;
  26. path(path: string): string {
  27. let openaiUrl = useAccessStore.getState().openaiUrl;
  28. const apiPath = "/api/openai";
  29. if (openaiUrl.length === 0) {
  30. const isApp = !!getClientConfig()?.isApp;
  31. openaiUrl = isApp ? DEFAULT_API_HOST : apiPath;
  32. }
  33. if (openaiUrl.endsWith("/")) {
  34. openaiUrl = openaiUrl.slice(0, openaiUrl.length - 1);
  35. }
  36. if (!openaiUrl.startsWith("http") && !openaiUrl.startsWith(apiPath)) {
  37. openaiUrl = "https://" + openaiUrl;
  38. }
  39. return [openaiUrl, path].join("/");
  40. }
  41. extractMessage(res: any) {
  42. return res.choices?.at(0)?.message?.content ?? "";
  43. }
  44. async chat(options: ChatOptions) {
  45. const messages = options.messages.map((v) => ({
  46. role: v.role,
  47. content: v.content,
  48. }));
  49. const modelConfig = {
  50. ...useAppConfig.getState().modelConfig,
  51. ...useChatStore.getState().currentSession().mask.modelConfig,
  52. ...{
  53. model: options.config.model,
  54. },
  55. };
  56. const requestPayload = {
  57. messages,
  58. stream: options.config.stream,
  59. model: modelConfig.model,
  60. temperature: modelConfig.temperature,
  61. presence_penalty: modelConfig.presence_penalty,
  62. frequency_penalty: modelConfig.frequency_penalty,
  63. top_p: modelConfig.top_p,
  64. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  65. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  66. };
  67. console.log("[Request] openai payload: ", requestPayload);
  68. const shouldStream = !!options.config.stream;
  69. const controller = new AbortController();
  70. options.onController?.(controller);
  71. try {
  72. const chatPath = this.path(OpenaiPath.ChatPath);
  73. const chatPayload = {
  74. method: "POST",
  75. body: JSON.stringify(requestPayload),
  76. signal: controller.signal,
  77. headers: getHeaders(),
  78. };
  79. // make a fetch request
  80. const requestTimeoutId = setTimeout(
  81. () => controller.abort(),
  82. REQUEST_TIMEOUT_MS,
  83. );
  84. if (shouldStream) {
  85. let responseText = "";
  86. let finished = false;
  87. const finish = () => {
  88. if (!finished) {
  89. options.onFinish(responseText);
  90. finished = true;
  91. }
  92. };
  93. controller.signal.onabort = finish;
  94. fetchEventSource(chatPath, {
  95. ...chatPayload,
  96. async onopen(res) {
  97. clearTimeout(requestTimeoutId);
  98. const contentType = res.headers.get("content-type");
  99. console.log(
  100. "[OpenAI] request response content type: ",
  101. contentType,
  102. );
  103. if (contentType?.startsWith("text/plain")) {
  104. responseText = await res.clone().text();
  105. return finish();
  106. }
  107. if (
  108. !res.ok ||
  109. !res.headers
  110. .get("content-type")
  111. ?.startsWith(EventStreamContentType) ||
  112. res.status !== 200
  113. ) {
  114. const responseTexts = [responseText];
  115. let extraInfo = await res.clone().text();
  116. try {
  117. const resJson = await res.clone().json();
  118. extraInfo = prettyObject(resJson);
  119. } catch {}
  120. if (res.status === 401) {
  121. responseTexts.push(Locale.Error.Unauthorized);
  122. }
  123. if (extraInfo) {
  124. responseTexts.push(extraInfo);
  125. }
  126. responseText = responseTexts.join("\n\n");
  127. return finish();
  128. }
  129. },
  130. onmessage(msg) {
  131. if (msg.data === "[DONE]" || finished) {
  132. return finish();
  133. }
  134. const text = msg.data;
  135. try {
  136. const json = JSON.parse(text);
  137. const delta = json.choices[0].delta.content;
  138. if (delta) {
  139. responseText += delta;
  140. options.onUpdate?.(responseText, delta);
  141. }
  142. } catch (e) {
  143. console.error("[Request] parse error", text, msg);
  144. }
  145. },
  146. onclose() {
  147. finish();
  148. },
  149. onerror(e) {
  150. options.onError?.(e);
  151. throw e;
  152. },
  153. openWhenHidden: true,
  154. });
  155. } else {
  156. const res = await fetch(chatPath, chatPayload);
  157. clearTimeout(requestTimeoutId);
  158. const resJson = await res.json();
  159. const message = this.extractMessage(resJson);
  160. options.onFinish(message);
  161. }
  162. } catch (e) {
  163. console.log("[Request] failed to make a chat request", e);
  164. options.onError?.(e as Error);
  165. }
  166. }
  167. async usage() {
  168. const formatDate = (d: Date) =>
  169. `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
  170. .getDate()
  171. .toString()
  172. .padStart(2, "0")}`;
  173. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  174. const now = new Date();
  175. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  176. const startDate = formatDate(startOfMonth);
  177. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  178. const [used, subs] = await Promise.all([
  179. fetch(
  180. this.path(
  181. `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
  182. ),
  183. {
  184. method: "GET",
  185. headers: getHeaders(),
  186. },
  187. ),
  188. fetch(this.path(OpenaiPath.SubsPath), {
  189. method: "GET",
  190. headers: getHeaders(),
  191. }),
  192. ]);
  193. if (used.status === 401) {
  194. throw new Error(Locale.Error.Unauthorized);
  195. }
  196. if (!used.ok || !subs.ok) {
  197. throw new Error("Failed to query usage from openai");
  198. }
  199. const response = (await used.json()) as {
  200. total_usage?: number;
  201. error?: {
  202. type: string;
  203. message: string;
  204. };
  205. };
  206. const total = (await subs.json()) as {
  207. hard_limit_usd?: number;
  208. };
  209. if (response.error && response.error.type) {
  210. throw Error(response.error.message);
  211. }
  212. if (response.total_usage) {
  213. response.total_usage = Math.round(response.total_usage) / 100;
  214. }
  215. if (total.hard_limit_usd) {
  216. total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
  217. }
  218. return {
  219. used: response.total_usage,
  220. total: total.hard_limit_usd,
  221. } as LLMUsage;
  222. }
  223. async models(): Promise<LLMModel[]> {
  224. if (this.disableListModels) {
  225. return DEFAULT_MODELS.slice();
  226. }
  227. const res = await fetch(this.path(OpenaiPath.ListModelPath), {
  228. method: "GET",
  229. headers: {
  230. ...getHeaders(),
  231. },
  232. });
  233. const resJson = (await res.json()) as OpenAIListModelResponse;
  234. const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
  235. console.log("[Models]", chatModels);
  236. if (!chatModels) {
  237. return [];
  238. }
  239. return chatModels.map((m) => ({
  240. name: m.id,
  241. available: true,
  242. }));
  243. }
  244. }
  245. export { OpenaiPath };