api.ts 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. import { ACCESS_CODE_PREFIX } from "../constant";
  2. import { ChatMessage, ModelType, useAccessStore } from "../store";
  3. import { ChatGPTApi } from "./platforms/openai";
  4. export const ROLES = ["system", "user", "assistant"] as const;
  5. export type MessageRole = (typeof ROLES)[number];
  6. export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
  7. export type ChatModel = ModelType;
  8. export interface RequestMessage {
  9. role: MessageRole;
  10. content: string;
  11. }
  12. export interface LLMConfig {
  13. model: string;
  14. temperature?: number;
  15. top_p?: number;
  16. stream?: boolean;
  17. presence_penalty?: number;
  18. frequency_penalty?: number;
  19. }
  20. export interface ChatOptions {
  21. messages: RequestMessage[];
  22. config: LLMConfig;
  23. onUpdate?: (message: string, chunk: string) => void;
  24. onFinish: (message: string) => void;
  25. onError?: (err: Error) => void;
  26. onController?: (controller: AbortController) => void;
  27. }
  28. export interface LLMUsage {
  29. used: number;
  30. total: number;
  31. }
  32. export abstract class LLMApi {
  33. abstract chat(options: ChatOptions): Promise<void>;
  34. abstract usage(): Promise<LLMUsage>;
  35. }
  36. type ProviderName = "openai" | "azure" | "claude" | "palm";
  37. interface Model {
  38. name: string;
  39. provider: ProviderName;
  40. ctxlen: number;
  41. }
  42. interface ChatProvider {
  43. name: ProviderName;
  44. apiConfig: {
  45. baseUrl: string;
  46. apiKey: string;
  47. summaryModel: Model;
  48. };
  49. models: Model[];
  50. chat: () => void;
  51. usage: () => void;
  52. }
  53. export class ClientApi {
  54. public llm: LLMApi;
  55. constructor() {
  56. this.llm = new ChatGPTApi();
  57. }
  58. config() {}
  59. prompts() {}
  60. masks() {}
  61. async share(messages: ChatMessage[], avatarUrl: string | null = null) {
  62. const msgs = messages
  63. .map((m) => ({
  64. from: m.role === "user" ? "human" : "gpt",
  65. value: m.content,
  66. }))
  67. .concat([
  68. {
  69. from: "human",
  70. value:
  71. "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web",
  72. },
  73. ]);
  74. // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
  75. // Please do not modify this message
  76. console.log("[Share]", msgs);
  77. const res = await fetch("/sharegpt", {
  78. body: JSON.stringify({
  79. avatarUrl,
  80. items: msgs,
  81. }),
  82. headers: {
  83. "Content-Type": "application/json",
  84. },
  85. method: "POST",
  86. });
  87. const resJson = await res.json();
  88. console.log("[Share]", resJson);
  89. if (resJson.id) {
  90. return `https://shareg.pt/${resJson.id}`;
  91. }
  92. }
  93. }
  94. export const api = new ClientApi();
  95. export function getHeaders() {
  96. const accessStore = useAccessStore.getState();
  97. let headers: Record<string, string> = {
  98. "Content-Type": "application/json",
  99. "x-requested-with": "XMLHttpRequest",
  100. };
  101. const makeBearer = (token: string) => `Bearer ${token.trim()}`;
  102. const validString = (x: string) => x && x.length > 0;
  103. // use user's api key first
  104. if (validString(accessStore.token)) {
  105. headers.Authorization = makeBearer(accessStore.token);
  106. } else if (
  107. accessStore.enabledAccessControl() &&
  108. validString(accessStore.accessCode)
  109. ) {
  110. headers.Authorization = makeBearer(
  111. ACCESS_CODE_PREFIX + accessStore.accessCode,
  112. );
  113. }
  114. return headers;
  115. }