tweak: adjust session getUsage function to use more up to date LanguageModelUsage instead of LanguageModelV2Usage (#22224)

This commit is contained in:
Aiden Cline
2026-04-12 21:39:06 -05:00
committed by GitHub
parent 26d35583c5
commit a915fe74be
2 changed files with 99 additions and 13 deletions

View File

@@ -4,7 +4,7 @@ import { BusEvent } from "@/bus/bus-event"
import { Bus } from "@/bus"
import { Decimal } from "decimal.js"
import z from "zod"
import { type ProviderMetadata } from "ai"
import { type ProviderMetadata, type LanguageModelUsage } from "ai"
import { Flag } from "../flag/flag"
import { Installation } from "../installation"
@@ -28,7 +28,6 @@ import { SessionID, MessageID, PartID } from "./schema"
import type { Provider } from "@/provider/provider"
import { Permission } from "@/permission"
import { Global } from "@/global"
import type { LanguageModelV2Usage } from "@ai-sdk/provider"
import { Effect, Layer, Option, Context } from "effect"
import { makeRuntime } from "@/effect/run-service"
@@ -240,7 +239,7 @@ export namespace Session {
export const getUsage = (input: {
model: Provider.Model
usage: LanguageModelV2Usage
usage: LanguageModelUsage
metadata?: ProviderMetadata
}) => {
const safe = (value: number) => {
@@ -249,11 +248,14 @@ export namespace Session {
}
const inputTokens = safe(input.usage.inputTokens ?? 0)
const outputTokens = safe(input.usage.outputTokens ?? 0)
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
const reasoningTokens = safe(input.usage.outputTokenDetails?.reasoningTokens ?? input.usage.reasoningTokens ?? 0)
const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
const cacheReadInputTokens = safe(
input.usage.inputTokenDetails?.cacheReadTokens ?? input.usage.cachedInputTokens ?? 0,
)
const cacheWriteInputTokens = safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
(input.usage.inputTokenDetails?.cacheWriteTokens ??
input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// google-vertex-anthropic returns metadata under "vertex" key
// (AnthropicMessagesLanguageModel custom provider key from 'vertex.anthropic.messages')
input.metadata?.["vertex"]?.["cacheCreationInputTokens"] ??
@@ -274,7 +276,7 @@ export namespace Session {
const tokens = {
total,
input: adjustedInputTokens,
output: outputTokens - reasoningTokens,
output: safe(outputTokens - reasoningTokens),
reasoning: reasoningTokens,
cache: {
write: cacheWriteInputTokens,

View File

@@ -1005,6 +1005,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
})
@@ -1023,7 +1032,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
inputTokenDetails: {
noCacheTokens: 800,
cacheReadTokens: 200,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
})
@@ -1039,6 +1056,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
metadata: {
anthropic: {
@@ -1059,7 +1085,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
inputTokenDetails: {
noCacheTokens: 800,
cacheReadTokens: 200,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
metadata: {
anthropic: {},
@@ -1078,7 +1112,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
reasoningTokens: 100,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: 400,
reasoningTokens: 100,
},
},
})
@@ -1104,7 +1146,15 @@ describe("session.getUsage", () => {
inputTokens: 0,
outputTokens: 1_000_000,
totalTokens: 1_000_000,
reasoningTokens: 250_000,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: 750_000,
reasoningTokens: 250_000,
},
},
})
@@ -1121,6 +1171,15 @@ describe("session.getUsage", () => {
inputTokens: 0,
outputTokens: 0,
totalTokens: 0,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
})
@@ -1148,6 +1207,15 @@ describe("session.getUsage", () => {
inputTokens: 1_000_000,
outputTokens: 100_000,
totalTokens: 1_100_000,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
})
@@ -1163,7 +1231,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
inputTokenDetails: {
noCacheTokens: 800,
cacheReadTokens: 200,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
}
if (npm === "@ai-sdk/amazon-bedrock") {
const result = Session.getUsage({
@@ -1214,7 +1290,15 @@ describe("session.getUsage", () => {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
inputTokenDetails: {
noCacheTokens: 800,
cacheReadTokens: 200,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
},
metadata: {
vertex: {