From a46eb717ec68088210d6bc734e501f701d2e3998 Mon Sep 17 00:00:00 2001 From: JasonLiu104 Date: Mon, 24 Jun 2024 16:32:42 +0800 Subject: [PATCH 1/2] =?UTF-8?q?[=E5=8A=9F=E8=83=BD]=E8=AA=BF=E6=95=B4ncclo?= =?UTF-8?q?g=E7=B4=80=E9=8C=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/client/platforms/openai.ts | 27 +++++++++++++++++++++++++++ app/layout.tsx | 9 ++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index f3599263023..8c8114398fa 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -52,6 +52,9 @@ interface RequestPayload { frequency_penalty: number; top_p: number; max_tokens?: number; + stream_options?: { + include_usage?: boolean; + }; } export class ChatGPTApi implements LLMApi { @@ -128,6 +131,17 @@ export class ChatGPTApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; + // nccLog 記錄的東西可以自行調整 + const nccLog = { + stream: requestPayload.stream, + model: requestPayload.model, + temperature: requestPayload.temperature, + presence_penalty: requestPayload.presence_penalty, + frequency_penalty: requestPayload.frequency_penalty, + top_p: requestPayload.top_p, + usage: null, + }; + // add max_tokens to vision model if (visionModel && modelConfig.model.includes("preview")) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); @@ -155,6 +169,12 @@ export class ChatGPTApi implements LLMApi { ); if (shouldStream) { + chatPayload.body = JSON.stringify({ + ...requestPayload, + stream_options: { + include_usage: true, + }, + }); let responseText = ""; let remainText = ""; let finished = false; @@ -242,6 +262,7 @@ export class ChatGPTApi implements LLMApi { const text = msg.data; try { const json = JSON.parse(text); + if (json.usage) nccLog.usage = json.usage; const choices = json.choices as Array<{ delta: { content: string }; }>; @@ -270,6 +291,12 @@ export class ChatGPTApi implements LLMApi { }, onclose() { finish(); + // 每一次訊息發出後最後會跑到這裡,這裡可以送出 nccLog 資料 + window._elog.push({ + web: "104_next_chat", + track: ["chat"], + ext: nccLog, + }); }, onerror(e) { options.onError?.(e); diff --git a/app/layout.tsx b/app/layout.tsx index 5898b21a1fa..a66b1107d0d 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -36,9 +36,16 @@ export default function RootLayout({ - + + {children} From e54d64505043217aadf5b9cfef0c664a524b216b Mon Sep 17 00:00:00 2001 From: JasonLiu104 Date: Thu, 27 Jun 2024 10:44:40 +0800 Subject: [PATCH 2/2] =?UTF-8?q?[=E5=8A=9F=E8=83=BD]=E6=B7=BB=E5=8A=A0log?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/client/platforms/openai.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 8c8114398fa..a2a0d2ca76d 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -292,6 +292,7 @@ export class ChatGPTApi implements LLMApi { onclose() { finish(); // 每一次訊息發出後最後會跑到這裡,這裡可以送出 nccLog 資料 + console.log("使用者已送出資料", nccLog); window._elog.push({ web: "104_next_chat", track: ["chat"],