123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300 |
- import {
- ApiPath,
- DEFAULT_API_HOST,
- DEFAULT_MODELS,
- OpenaiPath,
- REQUEST_TIMEOUT_MS,
- ServiceProvider,
- } from "@/app/constant";
- import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
- import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
- import Locale from "../../locales";
- import {
- EventStreamContentType,
- fetchEventSource,
- } from "@fortaine/fetch-event-source";
- import { prettyObject } from "@/app/utils/format";
- import { getFullApi } from "@/app/config/client";
- export interface OpenAIListModelResponse {
- object: string;
- data: Array<{
- id: string;
- object: string;
- root: string;
- }>;
- }
- export class ChatGPTApi implements LLMApi {
- private disableListModels = true;
- path(path: string): string {
- return getFullApi([ApiPath.OpenAI, path].join("/"));
- }
- extractMessage(res: any) {
- return res.choices?.at(0)?.message?.content ?? "";
- }
- async chat(options: ChatOptions) {
- const messages = options.messages.map((v) => ({
- role: v.role,
- content: v.content,
- }));
- const modelConfig = {
- ...useAppConfig.getState().modelConfig,
- ...useChatStore.getState().currentSession().mask.modelConfig,
- ...{
- model: options.config.model,
- },
- };
- const requestPayload = {
- messages,
- stream: options.config.stream,
- model: modelConfig.model,
- temperature: modelConfig.temperature,
- presence_penalty: modelConfig.presence_penalty,
- frequency_penalty: modelConfig.frequency_penalty,
- top_p: modelConfig.top_p,
- // max_tokens: Math.max(modelConfig.max_tokens, 1024),
- // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
- };
- console.log("[Request] openai payload: ", requestPayload);
- const shouldStream = !!options.config.stream;
- const controller = new AbortController();
- options.onController?.(controller);
- try {
- const chatPath = this.path(OpenaiPath.ChatPath);
- const chatPayload = {
- method: "POST",
- body: JSON.stringify(requestPayload),
- signal: controller.signal,
- headers: getHeaders(),
- };
- // make a fetch request
- const requestTimeoutId = setTimeout(
- () => controller.abort(),
- REQUEST_TIMEOUT_MS,
- );
- if (shouldStream) {
- let responseText = "";
- let remainText = "";
- let finished = false;
- // animate response to make it looks smooth
- function animateResponseText() {
- if (finished || controller.signal.aborted) {
- responseText += remainText;
- console.log("[Response Animation] finished");
- return;
- }
- if (remainText.length > 0) {
- const fetchCount = Math.max(1, Math.round(remainText.length / 60));
- const fetchText = remainText.slice(0, fetchCount);
- responseText += fetchText;
- remainText = remainText.slice(fetchCount);
- options.onUpdate?.(responseText, fetchText);
- }
- requestAnimationFrame(animateResponseText);
- }
- // start animaion
- animateResponseText();
- const finish = () => {
- if (!finished) {
- finished = true;
- options.onFinish(responseText + remainText);
- }
- };
- controller.signal.onabort = finish;
- fetchEventSource(chatPath, {
- ...chatPayload,
- async onopen(res) {
- clearTimeout(requestTimeoutId);
- const contentType = res.headers.get("content-type");
- console.log(
- "[OpenAI] request response content type: ",
- contentType,
- );
- if (contentType?.startsWith("text/plain")) {
- responseText = await res.clone().text();
- return finish();
- }
- if (
- !res.ok ||
- !res.headers
- .get("content-type")
- ?.startsWith(EventStreamContentType) ||
- res.status !== 200
- ) {
- const responseTexts = [responseText];
- let extraInfo = await res.clone().text();
- try {
- const resJson = await res.clone().json();
- extraInfo = prettyObject(resJson);
- } catch {}
- if (res.status === 401) {
- responseTexts.push(Locale.Error.Unauthorized);
- }
- if (extraInfo) {
- responseTexts.push(extraInfo);
- }
- responseText = responseTexts.join("\n\n");
- return finish();
- }
- },
- onmessage(msg) {
- if (msg.data === "[DONE]" || finished) {
- return finish();
- }
- const text = msg.data;
- try {
- const json = JSON.parse(text) as {
- choices: Array<{
- delta: {
- content: string;
- };
- }>;
- };
- const delta = json.choices[0]?.delta?.content;
- if (delta) {
- remainText += delta;
- }
- } catch (e) {
- console.error("[Request] parse error", text);
- }
- },
- onclose() {
- finish();
- },
- onerror(e) {
- options.onError?.(e);
- throw e;
- },
- openWhenHidden: true,
- });
- } else {
- const res = await fetch(chatPath, chatPayload);
- clearTimeout(requestTimeoutId);
- const resJson = await res.json();
- const message = this.extractMessage(resJson);
- options.onFinish(message);
- }
- } catch (e) {
- console.log("[Request] failed to make a chat request", e);
- options.onError?.(e as Error);
- }
- }
- async usage() {
- const formatDate = (d: Date) =>
- `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
- .getDate()
- .toString()
- .padStart(2, "0")}`;
- const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
- const now = new Date();
- const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
- const startDate = formatDate(startOfMonth);
- const endDate = formatDate(new Date(Date.now() + ONE_DAY));
- const [used, subs] = await Promise.all([
- fetch(
- this.path(
- `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
- ),
- {
- method: "GET",
- headers: getHeaders(),
- },
- ),
- fetch(this.path(OpenaiPath.SubsPath), {
- method: "GET",
- headers: getHeaders(),
- }),
- ]);
- if (used.status === 401) {
- throw new Error(Locale.Error.Unauthorized);
- }
- if (!used.ok || !subs.ok) {
- throw new Error("Failed to query usage from openai");
- }
- const response = (await used.json()) as {
- total_usage?: number;
- error?: {
- type: string;
- message: string;
- };
- };
- const total = (await subs.json()) as {
- hard_limit_usd?: number;
- };
- if (response.error && response.error.type) {
- throw Error(response.error.message);
- }
- if (response.total_usage) {
- response.total_usage = Math.round(response.total_usage) / 100;
- }
- if (total.hard_limit_usd) {
- total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
- }
- return {
- used: response.total_usage,
- total: total.hard_limit_usd,
- } as LLMUsage;
- }
- async models(): Promise<LLMModel[]> {
- if (this.disableListModels) {
- return DEFAULT_MODELS.slice();
- }
- const res = await fetch(this.path(OpenaiPath.ListModelPath), {
- method: "GET",
- headers: {
- ...getHeaders(),
- },
- });
- const resJson = (await res.json()) as OpenAIListModelResponse;
- const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
- console.log("[Models]", chatModels);
- if (!chatModels) {
- return [];
- }
- return chatModels.map((m) => ({
- name: m.id,
- available: true,
- }));
- }
- }
- export { OpenaiPath };
|