test: share lmstudio stream preload helpers

This commit is contained in:
Peter Steinberger
2026-04-20 21:21:26 +01:00
parent 226f0427bc
commit d4f602bdff

View File

@@ -50,6 +50,44 @@ function buildDoneStreamFn(): StreamFn {
});
}
function createWrappedLmstudioStream(
baseStream: StreamFn,
params?: { baseUrl?: string },
): StreamFn {
return wrapLmstudioInferencePreload({
provider: "lmstudio",
modelId: "qwen3-8b-instruct",
config: {
models: {
providers: {
lmstudio: {
baseUrl: params?.baseUrl ?? "http://localhost:1234",
models: [],
},
},
},
},
streamFn: baseStream,
} as never);
}
function runWrappedLmstudioStream(
wrapped: StreamFn,
model: Record<string, unknown>,
options?: Record<string, unknown>,
) {
return wrapped(
{
provider: "lmstudio",
api: "openai-completions",
id: "lmstudio/qwen3-8b-instruct",
...model,
} as never,
{ messages: [] } as never,
options as never,
);
}
describe("lmstudio stream wrapper", () => {
beforeEach(() => {
__resetLmstudioPreloadCooldownForTest();
@@ -66,31 +104,13 @@ describe("lmstudio stream wrapper", () => {
it("preloads LM Studio model before inference using model context window", async () => {
const baseStream = buildDoneStreamFn();
const wrapped = wrapLmstudioInferencePreload({
provider: "lmstudio",
modelId: "qwen3-8b-instruct",
config: {
models: {
providers: {
lmstudio: {
baseUrl: "http://lmstudio.internal:1234/v1",
models: [],
},
},
},
},
streamFn: baseStream,
} as never);
const stream = wrapped(
{
provider: "lmstudio",
api: "openai-completions",
id: "lmstudio/qwen3-8b-instruct",
contextWindow: 131072,
} as never,
{ messages: [] } as never,
{ apiKey: "lmstudio-token" } as never,
const wrapped = createWrappedLmstudioStream(baseStream, {
baseUrl: "http://lmstudio.internal:1234/v1",
});
const stream = runWrappedLmstudioStream(
wrapped,
{ contextWindow: 131072 },
{ apiKey: "lmstudio-token" },
);
const events = await collectEvents(stream);
@@ -109,32 +129,13 @@ describe("lmstudio stream wrapper", () => {
it("prefers model contextTokens over contextWindow for preload requests", async () => {
const baseStream = buildDoneStreamFn();
const wrapped = wrapLmstudioInferencePreload({
provider: "lmstudio",
modelId: "qwen3-8b-instruct",
config: {
models: {
providers: {
lmstudio: {
baseUrl: "http://lmstudio.internal:1234/v1",
models: [],
},
},
},
},
streamFn: baseStream,
} as never);
const stream = wrapped(
{
provider: "lmstudio",
api: "openai-completions",
id: "lmstudio/qwen3-8b-instruct",
contextWindow: 131072,
contextTokens: 64000,
} as never,
{ messages: [] } as never,
{ apiKey: "lmstudio-token" } as never,
const wrapped = createWrappedLmstudioStream(baseStream, {
baseUrl: "http://lmstudio.internal:1234/v1",
});
const stream = runWrappedLmstudioStream(
wrapped,
{ contextWindow: 131072, contextTokens: 64000 },
{ apiKey: "lmstudio-token" },
);
const events = await collectEvents(stream);