mirror of
https://fastgit.cc/github.com/openclaw/openclaw
synced 2026-04-30 22:12:32 +08:00
test: cover openai-compatible usage chunks
This commit is contained in:
@@ -395,6 +395,77 @@ describe("openai transport stream", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("records usage from OpenAI-compatible streaming usage chunks", async () => {
|
||||
const model = {
|
||||
id: "glm-5",
|
||||
name: "GLM-5",
|
||||
api: "openai-completions",
|
||||
provider: "vllm",
|
||||
baseUrl: "http://localhost:8000/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 4096,
|
||||
} satisfies Model<"openai-completions">;
|
||||
const output = {
|
||||
role: "assistant" as const,
|
||||
content: [],
|
||||
api: model.api,
|
||||
provider: model.provider,
|
||||
model: model.id,
|
||||
usage: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 0,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
||||
},
|
||||
stopReason: "stop",
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
const stream: { push(event: unknown): void } = { push() {} };
|
||||
|
||||
async function* mockStream() {
|
||||
yield {
|
||||
id: "chatcmpl-vllm",
|
||||
object: "chat.completion.chunk" as const,
|
||||
created: 1775425651,
|
||||
model: "glm-5",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
delta: { role: "assistant" as const, content: "ok" },
|
||||
logprobs: null,
|
||||
finish_reason: "stop" as const,
|
||||
},
|
||||
],
|
||||
};
|
||||
yield {
|
||||
id: "chatcmpl-vllm",
|
||||
object: "chat.completion.chunk" as const,
|
||||
created: 1775425651,
|
||||
model: "glm-5",
|
||||
choices: [],
|
||||
usage: {
|
||||
prompt_tokens: 8,
|
||||
completion_tokens: 10,
|
||||
total_tokens: 18,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
|
||||
|
||||
expect(output.usage).toMatchObject({
|
||||
input: 8,
|
||||
output: 10,
|
||||
cacheRead: 0,
|
||||
totalTokens: 18,
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps OpenRouter thinking format for declared OpenRouter providers on custom proxy URLs", async () => {
|
||||
const streamFn = buildTransportAwareSimpleStreamFn(
|
||||
attachModelProviderRequestTransport(
|
||||
|
||||
Reference in New Issue
Block a user