From 676bfb1f5d257cb7cccec2bf9506f05915d56964 Mon Sep 17 00:00:00 2001 From: Marve10s Date: Tue, 7 Apr 2026 23:36:02 +0300 Subject: [PATCH 1/4] feat: add GLM (Z.ai) as a third provider Add GLM as a Codex-backed provider that routes through a local Responses-to-ChatCompletions bridge. GLM sessions reuse the Codex app-server runtime while presenting as a separate provider in the UI. Contracts: add "glm" to ProviderKind, ModelSelection, GlmSettings, and all Record exhaustiveness sites. Server: GlmAdapter (thin CodexAdapter delegate), GlmProvider (snapshot service checking GLM_API_KEY), GLM bridge (loopback HTTP translating Responses <-> Chat Completions), shared codexLaunchConfig builder, text generation routing for GLM. Web: GLM in provider picker, settings panel with env var hint, composer registry entry, model selection config, GlmIcon. Tests: 40 new tests covering bridge translation (Responses -> Chat Completions, Chat Completions streaming -> Responses SSE), launch config builder, and updated existing tests for the new provider. --- .../src/git/Layers/RoutingTextGeneration.ts | 12 +- .../server/src/git/Services/TextGeneration.ts | 2 +- apps/server/src/provider/Layers/GlmAdapter.ts | 140 ++++++++ .../server/src/provider/Layers/GlmProvider.ts | 127 +++++++ .../Layers/ProviderAdapterRegistry.test.ts | 23 +- .../Layers/ProviderAdapterRegistry.ts | 3 +- .../src/provider/Layers/ProviderRegistry.ts | 27 +- .../src/provider/Services/GlmAdapter.ts | 11 + .../src/provider/Services/GlmProvider.ts | 8 + .../src/provider/codexLaunchConfig.test.ts | 170 +++++++++ apps/server/src/provider/codexLaunchConfig.ts | 64 ++++ .../src/provider/glmBridge/GlmBridgeLive.ts | 278 +++++++++++++++ .../provider/glmBridge/GlmBridgeService.ts | 10 + .../glmBridge/translateGlmToResponses.test.ts | 329 ++++++++++++++++++ .../glmBridge/translateGlmToResponses.ts | 168 +++++++++ .../glmBridge/translateResponsesToGlm.test.ts | 309 ++++++++++++++++ .../glmBridge/translateResponsesToGlm.ts | 192 ++++++++++ apps/server/src/server.ts | 5 + apps/web/src/components/ChatView.tsx | 1 + apps/web/src/components/Icons.tsx | 9 + .../components/KeybindingsToast.browser.tsx | 6 + .../components/chat/ProviderModelPicker.tsx | 3 +- .../chat/composerProviderRegistry.tsx | 36 +- .../components/settings/SettingsPanels.tsx | 87 +++-- apps/web/src/modelSelection.ts | 13 + apps/web/src/rpc/client.test.ts | 2 + apps/web/src/session-logic.test.ts | 1 + apps/web/src/session-logic.ts | 1 + apps/web/src/store.ts | 6 +- packages/contracts/src/model.ts | 18 + packages/contracts/src/orchestration.ts | 17 +- packages/contracts/src/settings.ts | 29 ++ 32 files changed, 2058 insertions(+), 49 deletions(-) create mode 100644 apps/server/src/provider/Layers/GlmAdapter.ts create mode 100644 apps/server/src/provider/Layers/GlmProvider.ts create mode 100644 apps/server/src/provider/Services/GlmAdapter.ts create mode 100644 apps/server/src/provider/Services/GlmProvider.ts create mode 100644 apps/server/src/provider/codexLaunchConfig.test.ts create mode 100644 apps/server/src/provider/codexLaunchConfig.ts create mode 100644 apps/server/src/provider/glmBridge/GlmBridgeLive.ts create mode 100644 apps/server/src/provider/glmBridge/GlmBridgeService.ts create mode 100644 apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts create mode 100644 apps/server/src/provider/glmBridge/translateGlmToResponses.ts create mode 100644 apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts create mode 100644 apps/server/src/provider/glmBridge/translateResponsesToGlm.ts diff --git a/apps/server/src/git/Layers/RoutingTextGeneration.ts b/apps/server/src/git/Layers/RoutingTextGeneration.ts index dee12a3e0e..e4526a072a 100644 --- a/apps/server/src/git/Layers/RoutingTextGeneration.ts +++ b/apps/server/src/git/Layers/RoutingTextGeneration.ts @@ -39,8 +39,16 @@ const makeRoutingTextGeneration = Effect.gen(function* () { const codex = yield* CodexTextGen; const claude = yield* ClaudeTextGen; - const route = (provider?: TextGenerationProvider): TextGenerationShape => - provider === "claudeAgent" ? claude : codex; + const route = (provider?: TextGenerationProvider): TextGenerationShape => { + switch (provider) { + case "claudeAgent": + return claude; + case "codex": + case "glm": + case undefined: + return codex; + } + }; return { generateCommitMessage: (input) => diff --git a/apps/server/src/git/Services/TextGeneration.ts b/apps/server/src/git/Services/TextGeneration.ts index f4354c7a99..204892aef8 100644 --- a/apps/server/src/git/Services/TextGeneration.ts +++ b/apps/server/src/git/Services/TextGeneration.ts @@ -13,7 +13,7 @@ import type { ChatAttachment, ModelSelection } from "@t3tools/contracts"; import type { TextGenerationError } from "@t3tools/contracts"; /** Providers that support git text generation (commit messages, PR content, branch names). */ -export type TextGenerationProvider = "codex" | "claudeAgent"; +export type TextGenerationProvider = "codex" | "claudeAgent" | "glm"; export interface CommitMessageGenerationInput { cwd: string; diff --git a/apps/server/src/provider/Layers/GlmAdapter.ts b/apps/server/src/provider/Layers/GlmAdapter.ts new file mode 100644 index 0000000000..69f38ee54a --- /dev/null +++ b/apps/server/src/provider/Layers/GlmAdapter.ts @@ -0,0 +1,140 @@ +import type { + ApprovalRequestId, + ProviderApprovalDecision, + ProviderRuntimeEvent, + ProviderSendTurnInput, + ProviderSession, + ProviderSessionStartInput, + ProviderTurnStartResult, + ProviderUserInputAnswers, + ThreadId, + TurnId, +} from "@t3tools/contracts"; +import { Effect, Layer, Queue, Stream } from "effect"; + +import type { ProviderAdapterError } from "../Errors.ts"; +import { GlmAdapter, type GlmAdapterShape } from "../Services/GlmAdapter.ts"; +import { CodexAdapter } from "../Services/CodexAdapter.ts"; +import type { + ProviderAdapterCapabilities, + ProviderThreadSnapshot, +} from "../Services/ProviderAdapter.ts"; +import type { EventNdjsonLogger } from "./EventNdjsonLogger.ts"; + +const PROVIDER = "glm" as const; + +export interface GlmAdapterLiveOptions { + readonly nativeEventLogger?: EventNdjsonLogger; +} + +function remapSessionProvider(session: ProviderSession): ProviderSession { + return { ...session, provider: PROVIDER }; +} + +export const GlmAdapterLive = Layer.effect( + GlmAdapter, + Effect.gen(function* () { + const codexAdapter = yield* CodexAdapter; + const glmEventQueue = yield* Queue.unbounded(); + const glmThreadIds = new Set(); + + const capabilities: ProviderAdapterCapabilities = { + sessionModelSwitch: "restart-session", + }; + + const startSession = ( + input: ProviderSessionStartInput, + ): Effect.Effect => + Effect.gen(function* () { + glmThreadIds.add(input.threadId); + const session = yield* codexAdapter.startSession({ + ...input, + provider: "codex", + }); + return remapSessionProvider(session); + }); + + const sendTurn = ( + input: ProviderSendTurnInput, + ): Effect.Effect => codexAdapter.sendTurn(input); + + const interruptTurn = ( + threadId: ThreadId, + turnId?: TurnId, + ): Effect.Effect => codexAdapter.interruptTurn(threadId, turnId); + + const respondToRequest = ( + threadId: ThreadId, + requestId: ApprovalRequestId, + decision: ProviderApprovalDecision, + ): Effect.Effect => + codexAdapter.respondToRequest(threadId, requestId, decision); + + const respondToUserInput = ( + threadId: ThreadId, + requestId: ApprovalRequestId, + answers: ProviderUserInputAnswers, + ): Effect.Effect => + codexAdapter.respondToUserInput(threadId, requestId, answers); + + const stopSession = (threadId: ThreadId): Effect.Effect => + Effect.gen(function* () { + yield* codexAdapter.stopSession(threadId); + glmThreadIds.delete(threadId); + }); + + const listSessions = (): Effect.Effect> => + codexAdapter + .listSessions() + .pipe( + Effect.map((sessions) => + sessions.filter((s) => glmThreadIds.has(s.threadId)).map(remapSessionProvider), + ), + ); + + const hasSession = (threadId: ThreadId): Effect.Effect => + glmThreadIds.has(threadId) ? codexAdapter.hasSession(threadId) : Effect.succeed(false); + + const readThread = ( + threadId: ThreadId, + ): Effect.Effect => + codexAdapter.readThread(threadId); + + const rollbackThread = ( + threadId: ThreadId, + numTurns: number, + ): Effect.Effect => + codexAdapter.rollbackThread(threadId, numTurns); + + const stopAll = (): Effect.Effect => + Effect.gen(function* () { + for (const threadId of glmThreadIds) { + yield* codexAdapter.stopSession(threadId).pipe(Effect.ignore); + } + glmThreadIds.clear(); + }); + + return { + provider: PROVIDER, + capabilities, + startSession, + sendTurn, + interruptTurn, + respondToRequest, + respondToUserInput, + stopSession, + listSessions, + hasSession, + readThread, + rollbackThread, + stopAll, + get streamEvents() { + return Stream.fromQueue(glmEventQueue); + }, + } satisfies GlmAdapterShape; + }), +); + +export function makeGlmAdapterLive(_options?: GlmAdapterLiveOptions) { + return GlmAdapterLive; +} diff --git a/apps/server/src/provider/Layers/GlmProvider.ts b/apps/server/src/provider/Layers/GlmProvider.ts new file mode 100644 index 0000000000..8479e3b665 --- /dev/null +++ b/apps/server/src/provider/Layers/GlmProvider.ts @@ -0,0 +1,127 @@ +import type { GlmSettings, ModelCapabilities, ServerProviderModel } from "@t3tools/contracts"; +import { Effect, Equal, Layer, Stream } from "effect"; + +import { + buildServerProvider, + providerModelsFromSettings, + type ProviderProbeResult, +} from "../providerSnapshot.ts"; +import { makeManagedServerProvider } from "../makeManagedServerProvider.ts"; +import { GlmProvider } from "../Services/GlmProvider.ts"; +import { ServerSettingsService } from "../../serverSettings.ts"; + +const PROVIDER = "glm" as const; + +const DEFAULT_GLM_MODEL_CAPABILITIES: ModelCapabilities = { + reasoningEffortLevels: [], + supportsFastMode: false, + supportsThinkingToggle: false, + contextWindowOptions: [], + promptInjectedEffortLevels: [], +}; + +const BUILT_IN_MODELS: ReadonlyArray = [ + { + slug: "glm-5.1", + name: "GLM 5.1", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-5", + name: "GLM 5", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-5-turbo", + name: "GLM 5 Turbo", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.7", + name: "GLM 4.7", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.6", + name: "GLM 4.6", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.5", + name: "GLM 4.5", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.5-air", + name: "GLM 4.5 Air", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, +]; + +function checkGlmProviderStatus(_glmSettings: GlmSettings): ProviderProbeResult { + const hasApiKey = Boolean(process.env.GLM_API_KEY); + + if (!hasApiKey) { + return { + installed: true, + version: null, + status: "error", + auth: { status: "unauthenticated" }, + message: "Set the GLM_API_KEY environment variable to authenticate.", + }; + } + + return { + installed: true, + version: null, + status: "ready", + auth: { status: "authenticated", type: "apiKey" }, + }; +} + +export const GlmProviderLive = Layer.effect( + GlmProvider, + Effect.gen(function* () { + const serverSettings = yield* ServerSettingsService; + + const checkProvider = Effect.gen(function* () { + const settings = yield* serverSettings.getSettings; + const glmSettings = settings.providers.glm; + const probe = checkGlmProviderStatus(glmSettings); + + const models = providerModelsFromSettings( + BUILT_IN_MODELS, + PROVIDER, + glmSettings.customModels, + DEFAULT_GLM_MODEL_CAPABILITIES, + ); + + return buildServerProvider({ + provider: PROVIDER, + enabled: glmSettings.enabled, + checkedAt: new Date().toISOString(), + models, + probe, + }); + }); + + return yield* makeManagedServerProvider({ + getSettings: serverSettings.getSettings.pipe( + Effect.map((settings) => settings.providers.glm), + Effect.orDie, + ), + streamSettings: serverSettings.streamChanges.pipe( + Stream.map((settings) => settings.providers.glm), + ), + haveSettingsChanged: (previous, next) => !Equal.equals(previous, next), + checkProvider, + }); + }), +); diff --git a/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts b/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts index db0293f0fe..00dda8372c 100644 --- a/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts +++ b/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts @@ -6,6 +6,7 @@ import { Effect, Layer, Stream } from "effect"; import { ClaudeAdapter, ClaudeAdapterShape } from "../Services/ClaudeAdapter.ts"; import { CodexAdapter, CodexAdapterShape } from "../Services/CodexAdapter.ts"; +import { GlmAdapter, GlmAdapterShape } from "../Services/GlmAdapter.ts"; import { ProviderAdapterRegistry } from "../Services/ProviderAdapterRegistry.ts"; import { ProviderAdapterRegistryLive } from "./ProviderAdapterRegistry.ts"; import { ProviderUnsupportedError } from "../Errors.ts"; @@ -45,6 +46,23 @@ const fakeClaudeAdapter: ClaudeAdapterShape = { streamEvents: Stream.empty, }; +const fakeGlmAdapter: GlmAdapterShape = { + provider: "glm", + capabilities: { sessionModelSwitch: "restart-session" }, + startSession: vi.fn(), + sendTurn: vi.fn(), + interruptTurn: vi.fn(), + respondToRequest: vi.fn(), + respondToUserInput: vi.fn(), + stopSession: vi.fn(), + listSessions: vi.fn(), + hasSession: vi.fn(), + readThread: vi.fn(), + rollbackThread: vi.fn(), + stopAll: vi.fn(), + streamEvents: Stream.empty, +}; + const layer = it.layer( Layer.mergeAll( Layer.provide( @@ -52,6 +70,7 @@ const layer = it.layer( Layer.mergeAll( Layer.succeed(CodexAdapter, fakeCodexAdapter), Layer.succeed(ClaudeAdapter, fakeClaudeAdapter), + Layer.succeed(GlmAdapter, fakeGlmAdapter), ), ), NodeServices.layer, @@ -64,11 +83,13 @@ layer("ProviderAdapterRegistryLive", (it) => { const registry = yield* ProviderAdapterRegistry; const codex = yield* registry.getByProvider("codex"); const claude = yield* registry.getByProvider("claudeAgent"); + const glm = yield* registry.getByProvider("glm"); assert.equal(codex, fakeCodexAdapter); assert.equal(claude, fakeClaudeAdapter); + assert.equal(glm, fakeGlmAdapter); const providers = yield* registry.listProviders(); - assert.deepEqual(providers, ["codex", "claudeAgent"]); + assert.deepEqual(providers, ["codex", "claudeAgent", "glm"]); }), ); diff --git a/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts b/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts index b6c987c64c..809fb3032b 100644 --- a/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts +++ b/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts @@ -17,6 +17,7 @@ import { } from "../Services/ProviderAdapterRegistry.ts"; import { ClaudeAdapter } from "../Services/ClaudeAdapter.ts"; import { CodexAdapter } from "../Services/CodexAdapter.ts"; +import { GlmAdapter } from "../Services/GlmAdapter.ts"; export interface ProviderAdapterRegistryLiveOptions { readonly adapters?: ReadonlyArray>; @@ -28,7 +29,7 @@ const makeProviderAdapterRegistry = Effect.fn("makeProviderAdapterRegistry")(fun const adapters = options?.adapters !== undefined ? options.adapters - : [yield* CodexAdapter, yield* ClaudeAdapter]; + : [yield* CodexAdapter, yield* ClaudeAdapter, yield* GlmAdapter]; const byProvider = new Map(adapters.map((adapter) => [adapter.provider, adapter])); const getByProvider: ProviderAdapterRegistryShape["getByProvider"] = (provider) => { diff --git a/apps/server/src/provider/Layers/ProviderRegistry.ts b/apps/server/src/provider/Layers/ProviderRegistry.ts index fb2f33c293..3f9b049502 100644 --- a/apps/server/src/provider/Layers/ProviderRegistry.ts +++ b/apps/server/src/provider/Layers/ProviderRegistry.ts @@ -8,17 +8,21 @@ import { Effect, Equal, Layer, PubSub, Ref, Stream } from "effect"; import { ClaudeProviderLive } from "./ClaudeProvider"; import { CodexProviderLive } from "./CodexProvider"; +import { GlmProviderLive } from "./GlmProvider"; import type { ClaudeProviderShape } from "../Services/ClaudeProvider"; import { ClaudeProvider } from "../Services/ClaudeProvider"; import type { CodexProviderShape } from "../Services/CodexProvider"; import { CodexProvider } from "../Services/CodexProvider"; +import type { GlmProviderShape } from "../Services/GlmProvider"; +import { GlmProvider } from "../Services/GlmProvider"; import { ProviderRegistry, type ProviderRegistryShape } from "../Services/ProviderRegistry"; const loadProviders = ( codexProvider: CodexProviderShape, claudeProvider: ClaudeProviderShape, -): Effect.Effect => - Effect.all([codexProvider.getSnapshot, claudeProvider.getSnapshot], { + glmProvider: GlmProviderShape, +): Effect.Effect => + Effect.all([codexProvider.getSnapshot, claudeProvider.getSnapshot, glmProvider.getSnapshot], { concurrency: "unbounded", }); @@ -32,19 +36,20 @@ export const ProviderRegistryLive = Layer.effect( Effect.gen(function* () { const codexProvider = yield* CodexProvider; const claudeProvider = yield* ClaudeProvider; + const glmProvider = yield* GlmProvider; const changesPubSub = yield* Effect.acquireRelease( PubSub.unbounded>(), PubSub.shutdown, ); const providersRef = yield* Ref.make>( - yield* loadProviders(codexProvider, claudeProvider), + yield* loadProviders(codexProvider, claudeProvider, glmProvider), ); const syncProviders = Effect.fn("syncProviders")(function* (options?: { readonly publish?: boolean; }) { const previousProviders = yield* Ref.get(providersRef); - const providers = yield* loadProviders(codexProvider, claudeProvider); + const providers = yield* loadProviders(codexProvider, claudeProvider, glmProvider); yield* Ref.set(providersRef, providers); if (options?.publish !== false && haveProvidersChanged(previousProviders, providers)) { @@ -60,6 +65,9 @@ export const ProviderRegistryLive = Layer.effect( yield* Stream.runForEach(claudeProvider.streamChanges, () => syncProviders()).pipe( Effect.forkScoped, ); + yield* Stream.runForEach(glmProvider.streamChanges, () => syncProviders()).pipe( + Effect.forkScoped, + ); const refresh = Effect.fn("refresh")(function* (provider?: ProviderKind) { switch (provider) { @@ -69,8 +77,11 @@ export const ProviderRegistryLive = Layer.effect( case "claudeAgent": yield* claudeProvider.refresh; break; + case "glm": + yield* glmProvider.refresh; + break; default: - yield* Effect.all([codexProvider.refresh, claudeProvider.refresh], { + yield* Effect.all([codexProvider.refresh, claudeProvider.refresh, glmProvider.refresh], { concurrency: "unbounded", }); break; @@ -93,4 +104,8 @@ export const ProviderRegistryLive = Layer.effect( }, } satisfies ProviderRegistryShape; }), -).pipe(Layer.provideMerge(CodexProviderLive), Layer.provideMerge(ClaudeProviderLive)); +).pipe( + Layer.provideMerge(CodexProviderLive), + Layer.provideMerge(ClaudeProviderLive), + Layer.provideMerge(GlmProviderLive), +); diff --git a/apps/server/src/provider/Services/GlmAdapter.ts b/apps/server/src/provider/Services/GlmAdapter.ts new file mode 100644 index 0000000000..96dc26e881 --- /dev/null +++ b/apps/server/src/provider/Services/GlmAdapter.ts @@ -0,0 +1,11 @@ +import { ServiceMap } from "effect"; +import type { ProviderAdapterError } from "../Errors.ts"; +import type { ProviderAdapterShape } from "./ProviderAdapter.ts"; + +export interface GlmAdapterShape extends ProviderAdapterShape { + readonly provider: "glm"; +} + +export class GlmAdapter extends ServiceMap.Service()( + "t3/provider/Services/GlmAdapter", +) {} diff --git a/apps/server/src/provider/Services/GlmProvider.ts b/apps/server/src/provider/Services/GlmProvider.ts new file mode 100644 index 0000000000..fa665db229 --- /dev/null +++ b/apps/server/src/provider/Services/GlmProvider.ts @@ -0,0 +1,8 @@ +import { ServiceMap } from "effect"; +import type { ServerProviderShape } from "./ServerProvider.ts"; + +export interface GlmProviderShape extends ServerProviderShape {} + +export class GlmProvider extends ServiceMap.Service()( + "t3/provider/Services/GlmProvider", +) {} diff --git a/apps/server/src/provider/codexLaunchConfig.test.ts b/apps/server/src/provider/codexLaunchConfig.test.ts new file mode 100644 index 0000000000..363b11702f --- /dev/null +++ b/apps/server/src/provider/codexLaunchConfig.test.ts @@ -0,0 +1,170 @@ +import { describe, expect, it } from "vitest"; + +import { + buildCodexLaunchConfig, + buildCodexSpawnEnv, + configOverridesToArgs, + type BuildCodexLaunchConfigInput, +} from "./codexLaunchConfig.ts"; + +const defaultCodexSettings = { + enabled: true, + binaryPath: "/usr/local/bin/codex", + homePath: "/home/user/.codex", + customModels: [], +}; + +const defaultGlmSettings = { + enabled: true, + transport: "bridge" as const, + upstreamBaseUrl: "https://api.z.ai/api/coding/paas/v4", + customModels: [], +}; + +describe("buildCodexLaunchConfig", () => { + it("returns base config with no overrides for codex provider", () => { + const input: BuildCodexLaunchConfigInput = { + provider: "codex", + codexSettings: defaultCodexSettings, + }; + + const config = buildCodexLaunchConfig(input); + + expect(config.binaryPath).toBe("/usr/local/bin/codex"); + expect(config.homePath).toBe("/home/user/.codex"); + expect(config.configOverrides).toEqual([]); + expect(config.extraEnv).toEqual({}); + }); + + it("returns base config with no overrides for claudeAgent provider", () => { + const config = buildCodexLaunchConfig({ + provider: "claudeAgent", + codexSettings: defaultCodexSettings, + }); + + expect(config.configOverrides).toEqual([]); + }); + + it("falls back to 'codex' when binaryPath is empty", () => { + const config = buildCodexLaunchConfig({ + provider: "codex", + codexSettings: { ...defaultCodexSettings, binaryPath: "" }, + }); + + expect(config.binaryPath).toBe("codex"); + }); + + it("sets homePath to undefined when empty", () => { + const config = buildCodexLaunchConfig({ + provider: "codex", + codexSettings: { ...defaultCodexSettings, homePath: "" }, + }); + + expect(config.homePath).toBeUndefined(); + }); + + it("generates GLM provider overrides with bridge URL", () => { + const config = buildCodexLaunchConfig({ + provider: "glm", + codexSettings: defaultCodexSettings, + glmSettings: defaultGlmSettings, + glmBridgeBaseUrl: "http://127.0.0.1:9876/v1", + }); + + expect(config.configOverrides).toContain('model_provider="glm"'); + expect(config.configOverrides).toContain('model_providers.glm.name="GLM"'); + expect(config.configOverrides).toContain( + 'model_providers.glm.base_url="http://127.0.0.1:9876/v1"', + ); + expect(config.configOverrides).toContain('model_providers.glm.env_key="GLM_API_KEY"'); + expect(config.configOverrides).toContain('model_providers.glm.wire_api="responses"'); + }); + + it("uses upstream URL directly when transport is 'direct'", () => { + const config = buildCodexLaunchConfig({ + provider: "glm", + codexSettings: defaultCodexSettings, + glmSettings: { ...defaultGlmSettings, transport: "direct" as const }, + glmBridgeBaseUrl: "http://127.0.0.1:9876/v1", + }); + + expect(config.configOverrides).toContain( + 'model_providers.glm.base_url="https://api.z.ai/api/coding/paas/v4"', + ); + }); + + it("falls back to upstream URL when bridge URL is not provided", () => { + const config = buildCodexLaunchConfig({ + provider: "glm", + codexSettings: defaultCodexSettings, + glmSettings: defaultGlmSettings, + }); + + expect(config.configOverrides).toContain( + 'model_providers.glm.base_url="https://api.z.ai/api/coding/paas/v4"', + ); + }); + + it("returns base config when glm provider is selected but glmSettings is missing", () => { + const config = buildCodexLaunchConfig({ + provider: "glm", + codexSettings: defaultCodexSettings, + }); + + expect(config.configOverrides).toEqual([]); + }); +}); + +describe("buildCodexSpawnEnv", () => { + it("merges process.env with CODEX_HOME when homePath is set", () => { + const env = buildCodexSpawnEnv({ + binaryPath: "codex", + homePath: "/custom/home", + configOverrides: [], + extraEnv: {}, + }); + + expect(env.CODEX_HOME).toBe("/custom/home"); + }); + + it("does not set CODEX_HOME when homePath is undefined", () => { + const original = process.env.CODEX_HOME; + delete process.env.CODEX_HOME; + + const env = buildCodexSpawnEnv({ + binaryPath: "codex", + homePath: undefined, + configOverrides: [], + extraEnv: {}, + }); + + expect(env.CODEX_HOME).toBeUndefined(); + + if (original !== undefined) { + process.env.CODEX_HOME = original; + } + }); + + it("includes extraEnv entries", () => { + const env = buildCodexSpawnEnv({ + binaryPath: "codex", + homePath: undefined, + configOverrides: [], + extraEnv: { MY_VAR: "value" }, + }); + + expect(env.MY_VAR).toBe("value"); + }); +}); + +describe("configOverridesToArgs", () => { + it("returns empty array for no overrides", () => { + expect(configOverridesToArgs([])).toEqual([]); + }); + + it("flattens overrides into -c pairs", () => { + const args = configOverridesToArgs(['model_provider="glm"', 'model_providers.glm.name="GLM"']); + + expect(args).toEqual(["-c", 'model_provider="glm"', "-c", 'model_providers.glm.name="GLM"']); + }); +}); diff --git a/apps/server/src/provider/codexLaunchConfig.ts b/apps/server/src/provider/codexLaunchConfig.ts new file mode 100644 index 0000000000..57ca11ed0e --- /dev/null +++ b/apps/server/src/provider/codexLaunchConfig.ts @@ -0,0 +1,64 @@ +import type { ProviderKind } from "@t3tools/contracts"; +import type { CodexSettings, GlmSettings } from "@t3tools/contracts/settings"; + +export interface CodexLaunchConfig { + readonly binaryPath: string; + readonly homePath: string | undefined; + readonly configOverrides: ReadonlyArray; + readonly extraEnv: Readonly>; +} + +export type CodexLaunchPurpose = "chat-session" | "git-text-generation" | "provider-probe"; + +export function buildCodexSpawnEnv(config: CodexLaunchConfig): Record { + return { + ...process.env, + ...(config.homePath ? { CODEX_HOME: config.homePath } : {}), + ...config.extraEnv, + }; +} + +export function configOverridesToArgs(overrides: ReadonlyArray): string[] { + return overrides.flatMap((override) => ["-c", override]); +} + +export interface BuildCodexLaunchConfigInput { + readonly provider: ProviderKind; + readonly codexSettings: CodexSettings; + readonly glmSettings?: GlmSettings; + readonly glmBridgeBaseUrl?: string; +} + +export function buildCodexLaunchConfig(input: BuildCodexLaunchConfigInput): CodexLaunchConfig { + const { provider, codexSettings, glmSettings, glmBridgeBaseUrl } = input; + + const base: CodexLaunchConfig = { + binaryPath: codexSettings.binaryPath || "codex", + homePath: codexSettings.homePath || undefined, + configOverrides: [], + extraEnv: {}, + }; + + if (provider !== "glm" || !glmSettings) { + return base; + } + + const baseUrl = + glmSettings.transport === "bridge" && glmBridgeBaseUrl + ? glmBridgeBaseUrl + : glmSettings.upstreamBaseUrl; + + const configOverrides: string[] = [ + 'model_provider="glm"', + 'model_providers.glm.name="GLM"', + `model_providers.glm.base_url="${baseUrl}"`, + 'model_providers.glm.env_key="GLM_API_KEY"', + 'model_providers.glm.wire_api="responses"', + ]; + + return { + ...base, + configOverrides, + extraEnv: {}, + }; +} diff --git a/apps/server/src/provider/glmBridge/GlmBridgeLive.ts b/apps/server/src/provider/glmBridge/GlmBridgeLive.ts new file mode 100644 index 0000000000..437ab15618 --- /dev/null +++ b/apps/server/src/provider/glmBridge/GlmBridgeLive.ts @@ -0,0 +1,278 @@ +import { Data, Effect, Layer, Ref } from "effect"; +import * as http from "node:http"; + +import { ServerSettingsService } from "../../serverSettings.ts"; +import { GlmBridgeService, type GlmBridgeShape } from "./GlmBridgeService.ts"; +import { + translateResponsesToChatCompletions, + UnsupportedResponsesFeatureError, + type ResponsesRequest, +} from "./translateResponsesToGlm.ts"; +import { + GlmToResponsesTranslator, + formatResponsesSSE, + type ChatCompletionsChunk, +} from "./translateGlmToResponses.ts"; + +class GlmBridgeStartError extends Data.TaggedError("GlmBridgeStartError")<{ + readonly cause: unknown; +}> {} + +function readRequestBody(req: http.IncomingMessage): Promise { + return new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + req.on("data", (chunk: Buffer) => chunks.push(chunk)); + req.on("end", () => resolve(Buffer.concat(chunks).toString("utf-8"))); + req.on("error", reject); + }); +} + +function jsonResponse( + res: http.ServerResponse, + status: number, + body: Record, +): void { + res.writeHead(status, { "Content-Type": "application/json" }); + res.end(JSON.stringify(body)); +} + +async function handleResponsesRequest( + req: http.IncomingMessage, + res: http.ServerResponse, + upstreamBaseUrl: string, +): Promise { + let responsesReq: ResponsesRequest; + try { + const bodyText = await readRequestBody(req); + responsesReq = JSON.parse(bodyText) as ResponsesRequest; + } catch { + jsonResponse(res, 400, { error: { message: "Invalid JSON request body" } }); + return; + } + + let chatReq; + try { + chatReq = translateResponsesToChatCompletions(responsesReq); + } catch (err) { + if (err instanceof UnsupportedResponsesFeatureError) { + jsonResponse(res, 400, { error: { message: err.message } }); + return; + } + jsonResponse(res, 500, { + error: { message: "Bridge translation error", detail: String(err) }, + }); + return; + } + + const apiKey = process.env.GLM_API_KEY; + if (!apiKey) { + jsonResponse(res, 401, { + error: { message: "GLM_API_KEY environment variable is not set" }, + }); + return; + } + + const upstreamUrl = `${upstreamBaseUrl.replace(/\/+$/, "")}/chat/completions`; + const abortController = new AbortController(); + + req.on("close", () => abortController.abort()); + + let upstreamRes: Response; + try { + upstreamRes = await fetch(upstreamUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + Accept: "text/event-stream", + }, + body: JSON.stringify(chatReq), + signal: abortController.signal, + }); + } catch (err) { + if (abortController.signal.aborted) return; + jsonResponse(res, 502, { + error: { + message: "Failed to connect to upstream GLM API", + detail: String(err), + upstream_url: upstreamUrl, + }, + }); + return; + } + + if (!upstreamRes.ok) { + let errorBody = ""; + try { + errorBody = await upstreamRes.text(); + } catch {} + jsonResponse(res, upstreamRes.status, { + error: { + message: `Upstream GLM API returned ${upstreamRes.status}`, + detail: errorBody, + upstream_url: upstreamUrl, + }, + }); + return; + } + + res.writeHead(200, { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }); + + const responseId = `resp_glm_${Date.now()}`; + const translator = new GlmToResponsesTranslator(responseId); + + res.write( + formatResponsesSSE({ + event: "response.created", + data: { response: { id: responseId, status: "in_progress" } }, + }), + ); + + const reader = upstreamRes.body?.getReader(); + if (!reader) { + res.write( + formatResponsesSSE({ + event: "response.completed", + data: { response: { id: responseId, status: "failed" } }, + }), + ); + res.end(); + return; + } + + const decoder = new TextDecoder(); + let buffer = ""; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || !trimmed.startsWith("data:")) continue; + + const dataStr = trimmed.slice(5).trim(); + if (dataStr === "[DONE]") continue; + + let chunk: ChatCompletionsChunk; + try { + chunk = JSON.parse(dataStr) as ChatCompletionsChunk; + } catch { + continue; + } + + const events = translator.translateChunk(chunk); + for (const event of events) { + res.write(formatResponsesSSE(event)); + } + } + } + } catch (err) { + if (!abortController.signal.aborted) { + res.write( + formatResponsesSSE({ + event: "response.completed", + data: { + response: { + id: responseId, + status: "failed", + error: { message: String(err) }, + }, + }, + }), + ); + } + } finally { + res.end(); + } +} + +async function handleModelsRequest( + res: http.ServerResponse, + upstreamBaseUrl: string, +): Promise { + const apiKey = process.env.GLM_API_KEY; + if (!apiKey) { + jsonResponse(res, 401, { + error: { message: "GLM_API_KEY environment variable is not set" }, + }); + return; + } + + const upstreamUrl = `${upstreamBaseUrl.replace(/\/+$/, "")}/models`; + try { + const upstreamRes = await fetch(upstreamUrl, { + headers: { Authorization: `Bearer ${apiKey}` }, + }); + const body = await upstreamRes.text(); + res.writeHead(upstreamRes.status, { + "Content-Type": "application/json", + }); + res.end(body); + } catch (err) { + jsonResponse(res, 502, { + error: { message: "Failed to proxy models request", detail: String(err) }, + }); + } +} + +export const GlmBridgeLive = Layer.effect( + GlmBridgeService, + Effect.gen(function* () { + const settingsService = yield* ServerSettingsService; + const settings = yield* settingsService.getSettings; + let currentUpstreamBaseUrl = settings.providers.glm.upstreamBaseUrl; + const baseUrlRef = yield* Ref.make(""); + + const server = http.createServer(async (req, res) => { + const url = new URL(req.url ?? "/", `http://${req.headers.host}`); + const pathname = url.pathname; + + if (req.method === "GET" && pathname === "/health") { + jsonResponse(res, 200, { status: "ok" }); + return; + } + + if (req.method === "POST" && pathname === "/v1/responses") { + await handleResponsesRequest(req, res, currentUpstreamBaseUrl); + return; + } + + if (req.method === "GET" && pathname === "/v1/models") { + await handleModelsRequest(res, currentUpstreamBaseUrl); + return; + } + + jsonResponse(res, 404, { error: { message: `Not found: ${pathname}` } }); + }); + + yield* Effect.tryPromise({ + try: () => + new Promise((resolve, reject) => { + server.listen(0, "127.0.0.1", () => resolve()); + server.on("error", (err) => reject(err)); + }), + catch: (err) => new GlmBridgeStartError({ cause: err }), + }); + + const address = server.address(); + const port = address && typeof address === "object" ? address.port : 0; + const bridgeBaseUrl = `http://127.0.0.1:${port}/v1`; + yield* Ref.set(baseUrlRef, bridgeBaseUrl); + + yield* Effect.log(`GLM bridge started on ${bridgeBaseUrl}`); + + return { + baseUrl: Ref.get(baseUrlRef), + } satisfies GlmBridgeShape; + }), +); diff --git a/apps/server/src/provider/glmBridge/GlmBridgeService.ts b/apps/server/src/provider/glmBridge/GlmBridgeService.ts new file mode 100644 index 0000000000..ba981d367c --- /dev/null +++ b/apps/server/src/provider/glmBridge/GlmBridgeService.ts @@ -0,0 +1,10 @@ +import { ServiceMap } from "effect"; +import type { Effect } from "effect"; + +export interface GlmBridgeShape { + readonly baseUrl: Effect.Effect; +} + +export class GlmBridgeService extends ServiceMap.Service()( + "t3/provider/glmBridge/GlmBridgeService", +) {} diff --git a/apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts b/apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts new file mode 100644 index 0000000000..1f2ecfd8bb --- /dev/null +++ b/apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts @@ -0,0 +1,329 @@ +import { describe, expect, it } from "vitest"; + +import { + GlmToResponsesTranslator, + formatResponsesSSE, + type ChatCompletionsChunk, +} from "./translateGlmToResponses.ts"; + +function makeChunk(overrides: Partial = {}): ChatCompletionsChunk { + return { + id: "chatcmpl-1", + object: "chat.completion.chunk", + model: "glm-5.1", + choices: [{ index: 0, delta: {}, finish_reason: null }], + ...overrides, + }; +} + +describe("GlmToResponsesTranslator", () => { + it("emits output_item.added on the first text delta", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + const chunk = makeChunk({ + choices: [{ index: 0, delta: { content: "Hello" }, finish_reason: null }], + }); + + const events = translator.translateChunk(chunk); + + expect(events).toHaveLength(2); + expect(events[0]!.event).toBe("response.output_item.added"); + expect(events[1]!.event).toBe("response.output_text.delta"); + expect(events[1]!.data.delta).toBe("Hello"); + }); + + it("does not emit output_item.added on subsequent text deltas", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: { content: "Hello" }, finish_reason: null }], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: { content: " world" }, finish_reason: null }], + }), + ); + + expect(events).toHaveLength(1); + expect(events[0]!.event).toBe("response.output_text.delta"); + expect(events[0]!.data.delta).toBe(" world"); + }); + + it("emits completion events on finish_reason", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: { content: "Hi" }, finish_reason: null }], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: {}, finish_reason: "stop" }], + }), + ); + + const eventTypes = events.map((e) => e.event); + expect(eventTypes).toEqual([ + "response.output_text.done", + "response.output_item.done", + "response.completed", + ]); + }); + + it("includes usage in response.completed when available", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: { content: "Hi" }, finish_reason: null }], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: {}, finish_reason: "stop" }], + usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, + }), + ); + + const completed = events.find((e) => e.event === "response.completed"); + expect(completed).toBeDefined(); + const response = completed!.data.response as Record; + expect(response.status).toBe("completed"); + const usage = response.usage as Record; + expect(usage.input_tokens).toBe(10); + expect(usage.output_tokens).toBe(5); + expect(usage.total_tokens).toBe(15); + }); + + it("accumulates tool call deltas and flushes on finish", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + id: "call_abc", + type: "function", + function: { name: "read_file", arguments: "" }, + }, + ], + }, + finish_reason: null, + }, + ], + }), + ); + + translator.translateChunk( + makeChunk({ + choices: [ + { + index: 0, + delta: { + tool_calls: [{ index: 0, function: { arguments: '{"path":' } }], + }, + finish_reason: null, + }, + ], + }), + ); + + translator.translateChunk( + makeChunk({ + choices: [ + { + index: 0, + delta: { + tool_calls: [{ index: 0, function: { arguments: '"main.ts"}' } }], + }, + finish_reason: null, + }, + ], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], + }), + ); + + const addedEvent = events.find((e) => e.event === "response.output_item.added"); + expect(addedEvent).toBeDefined(); + const item = addedEvent!.data.item as Record; + expect(item.type).toBe("function_call"); + expect(item.name).toBe("read_file"); + expect(item.arguments).toBe('{"path":"main.ts"}'); + expect(item.id).toBe("call_abc"); + + const doneEvent = events.find((e) => e.event === "response.output_item.done"); + expect(doneEvent).toBeDefined(); + + const completedEvent = events.find((e) => e.event === "response.completed"); + expect(completedEvent).toBeDefined(); + }); + + it("handles text followed by tool calls", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: { content: "I'll read the file." }, finish_reason: null }], + }), + ); + + translator.translateChunk( + makeChunk({ + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + id: "call_1", + type: "function", + function: { name: "read_file", arguments: '{"path":"a.ts"}' }, + }, + ], + }, + finish_reason: null, + }, + ], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], + }), + ); + + const eventTypes = events.map((e) => e.event); + expect(eventTypes).toContain("response.output_text.done"); + expect(eventTypes).toContain("response.output_item.done"); + expect(eventTypes).toContain("response.output_item.added"); + expect(eventTypes).toContain("response.completed"); + }); + + it("handles multiple parallel tool calls", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + id: "call_1", + type: "function", + function: { name: "read_file", arguments: '{"path":"a.ts"}' }, + }, + { + index: 1, + id: "call_2", + type: "function", + function: { name: "read_file", arguments: '{"path":"b.ts"}' }, + }, + ], + }, + finish_reason: null, + }, + ], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], + }), + ); + + const addedEvents = events.filter((e) => e.event === "response.output_item.added"); + expect(addedEvents).toHaveLength(2); + + const names = addedEvents.map((e) => (e.data.item as Record).name); + expect(names).toContain("read_file"); + }); + + it("returns empty array for chunks with no content or tool calls", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: { role: "assistant" }, finish_reason: null }], + }), + ); + + expect(events).toHaveLength(0); + }); + + it("returns empty array when choices is empty", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + const events = translator.translateChunk(makeChunk({ choices: [] })); + + expect(events).toHaveLength(0); + }); + + it("emits response.completed without text events when no text was streamed", () => { + const translator = new GlmToResponsesTranslator("resp_1"); + + translator.translateChunk( + makeChunk({ + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + id: "call_1", + type: "function", + function: { name: "exec", arguments: '{"cmd":"ls"}' }, + }, + ], + }, + finish_reason: null, + }, + ], + }), + ); + + const events = translator.translateChunk( + makeChunk({ + choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], + }), + ); + + const eventTypes = events.map((e) => e.event); + expect(eventTypes).not.toContain("response.output_text.done"); + expect(eventTypes).toContain("response.output_item.added"); + expect(eventTypes).toContain("response.output_item.done"); + expect(eventTypes).toContain("response.completed"); + }); +}); + +describe("formatResponsesSSE", () => { + it("formats an event as SSE wire format", () => { + const result = formatResponsesSSE({ + event: "response.output_text.delta", + data: { delta: "Hello" }, + }); + + expect(result).toBe('event: response.output_text.delta\ndata: {"delta":"Hello"}\n\n'); + }); +}); diff --git a/apps/server/src/provider/glmBridge/translateGlmToResponses.ts b/apps/server/src/provider/glmBridge/translateGlmToResponses.ts new file mode 100644 index 0000000000..e19bfbeaf7 --- /dev/null +++ b/apps/server/src/provider/glmBridge/translateGlmToResponses.ts @@ -0,0 +1,168 @@ +export interface ChatCompletionsChunk { + id: string; + object: "chat.completion.chunk"; + model: string; + choices: ChatCompletionsChunkChoice[]; + usage?: ChatCompletionsUsage | null; +} + +interface ChatCompletionsChunkChoice { + index: number; + delta: ChatCompletionsDelta; + finish_reason: string | null; +} + +interface ChatCompletionsDelta { + role?: "assistant"; + content?: string | null; + tool_calls?: ChatCompletionsToolCallDelta[]; +} + +interface ChatCompletionsToolCallDelta { + index: number; + id?: string; + type?: "function"; + function?: { + name?: string; + arguments?: string; + }; +} + +export interface ChatCompletionsUsage { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; +} + +export interface ResponsesSSEEvent { + event: string; + data: Record; +} + +export class GlmToResponsesTranslator { + private readonly responseId: string; + private outputIndex = 0; + private pendingToolCalls = new Map(); + private emittedItemStartForText = false; + + constructor(responseId: string) { + this.responseId = responseId; + } + + translateChunk(chunk: ChatCompletionsChunk): ResponsesSSEEvent[] { + const events: ResponsesSSEEvent[] = []; + const choice = chunk.choices[0]; + if (!choice) return events; + + const { delta, finish_reason } = choice; + + if (delta.content) { + if (!this.emittedItemStartForText) { + events.push({ + event: "response.output_item.added", + data: { + output_index: this.outputIndex, + item: { + type: "message", + role: "assistant", + content: [{ type: "output_text", text: "" }], + }, + }, + }); + this.emittedItemStartForText = true; + } + + events.push({ + event: "response.output_text.delta", + data: { + output_index: this.outputIndex, + content_index: 0, + delta: delta.content, + }, + }); + } + + if (delta.tool_calls) { + for (const tc of delta.tool_calls) { + let pending = this.pendingToolCalls.get(tc.index); + if (!pending) { + pending = { id: tc.id ?? "", name: "", arguments: "" }; + this.pendingToolCalls.set(tc.index, pending); + } + if (tc.id) pending.id = tc.id; + if (tc.function?.name) pending.name += tc.function.name; + if (tc.function?.arguments) pending.arguments += tc.function.arguments; + } + } + + if (finish_reason) { + if (this.emittedItemStartForText) { + events.push({ + event: "response.output_text.done", + data: { output_index: this.outputIndex, content_index: 0 }, + }); + events.push({ + event: "response.output_item.done", + data: { output_index: this.outputIndex }, + }); + this.outputIndex++; + } + + for (const [, toolCall] of this.pendingToolCalls) { + events.push({ + event: "response.output_item.added", + data: { + output_index: this.outputIndex, + item: { + type: "function_call", + id: toolCall.id, + name: toolCall.name, + arguments: toolCall.arguments, + }, + }, + }); + events.push({ + event: "response.output_item.done", + data: { + output_index: this.outputIndex, + item: { + type: "function_call", + id: toolCall.id, + name: toolCall.name, + arguments: toolCall.arguments, + }, + }, + }); + this.outputIndex++; + } + this.pendingToolCalls.clear(); + + const usage = chunk.usage; + + events.push({ + event: "response.completed", + data: { + response: { + id: this.responseId, + status: "completed", + ...(usage + ? { + usage: { + input_tokens: usage.prompt_tokens, + output_tokens: usage.completion_tokens, + total_tokens: usage.total_tokens, + }, + } + : {}), + }, + }, + }); + } + + return events; + } +} + +export function formatResponsesSSE(event: ResponsesSSEEvent): string { + return `event: ${event.event}\ndata: ${JSON.stringify(event.data)}\n\n`; +} diff --git a/apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts b/apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts new file mode 100644 index 0000000000..a137bf55ac --- /dev/null +++ b/apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts @@ -0,0 +1,309 @@ +import { describe, expect, it } from "vitest"; + +import { + translateResponsesToChatCompletions, + UnsupportedResponsesFeatureError, + type ResponsesRequest, +} from "./translateResponsesToGlm.ts"; + +describe("translateResponsesToChatCompletions", () => { + it("translates a simple text-only request", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "Hello" }], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + + expect(result.model).toBe("glm-5.1"); + expect(result.stream).toBe(true); + expect(result.messages).toEqual([{ role: "user", content: "Hello" }]); + }); + + it("prepends system instructions as a system message", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "Write code" }], + instructions: "You are a coding assistant.", + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + + expect(result.messages[0]).toEqual({ + role: "system", + content: "You are a coding assistant.", + }); + expect(result.messages[1]).toEqual({ role: "user", content: "Write code" }); + }); + + it("translates a multi-turn conversation", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [ + { role: "user", content: "What is 2+2?" }, + { role: "assistant", content: "4" }, + { role: "user", content: "And 3+3?" }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + + expect(result.messages).toEqual([ + { role: "user", content: "What is 2+2?" }, + { role: "assistant", content: "4" }, + { role: "user", content: "And 3+3?" }, + ]); + }); + + it("translates function tools", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "List files" }], + tools: [ + { + type: "function", + name: "list_files", + description: "List files in a directory", + parameters: { type: "object", properties: { path: { type: "string" } } }, + }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + + expect(result.tools).toEqual([ + { + type: "function", + function: { + name: "list_files", + description: "List files in a directory", + parameters: { type: "object", properties: { path: { type: "string" } } }, + }, + }, + ]); + }); + + it("translates tool_choice as a string", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + tool_choice: "auto", + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.tool_choice).toBe("auto"); + }); + + it("translates tool_choice as a named function", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + tool_choice: { type: "function", name: "read_file" }, + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.tool_choice).toEqual({ + type: "function", + function: { name: "read_file" }, + }); + }); + + it("translates function_call_output items to tool role messages", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [ + { role: "user", content: "List files" }, + { + role: "assistant", + content: [ + { + type: "function_call", + id: "call_123", + name: "list_files", + arguments: '{"path":"."}', + }, + ], + }, + { type: "function_call_output", call_id: "call_123", output: "file1.ts\nfile2.ts" }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + + expect(result.messages[1]).toEqual({ + role: "assistant", + content: null, + tool_calls: [ + { + id: "call_123", + type: "function", + function: { name: "list_files", arguments: '{"path":"."}' }, + }, + ], + }); + expect(result.messages[2]).toEqual({ + role: "tool", + tool_call_id: "call_123", + content: "file1.ts\nfile2.ts", + }); + }); + + it("extracts text from input_text content parts", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [ + { + role: "user", + content: [ + { type: "input_text", text: "part one " }, + { type: "input_text", text: "part two" }, + ], + }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.messages[0]).toEqual({ role: "user", content: "part one part two" }); + }); + + it("combines assistant text and tool calls from content parts", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [ + { + role: "assistant", + content: [ + { type: "output_text", text: "I'll read the file." }, + { + type: "function_call", + id: "call_abc", + name: "read_file", + arguments: '{"path":"main.ts"}', + }, + ], + }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + + expect(result.messages[0]).toEqual({ + role: "assistant", + content: "I'll read the file.", + tool_calls: [ + { + id: "call_abc", + type: "function", + function: { name: "read_file", arguments: '{"path":"main.ts"}' }, + }, + ], + }); + }); + + it("forwards temperature and max_output_tokens", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + temperature: 0.7, + max_output_tokens: 4096, + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.temperature).toBe(0.7); + expect(result.max_tokens).toBe(4096); + }); + + it("omits optional fields when not provided", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.tools).toBeUndefined(); + expect(result.tool_choice).toBeUndefined(); + expect(result.temperature).toBeUndefined(); + expect(result.max_tokens).toBeUndefined(); + }); + + it("defaults stream to true when not specified", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.stream).toBe(true); + }); + + it("includes stream_options with include_usage", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.stream_options).toEqual({ include_usage: true }); + }); + + it("preserves tool strict mode when present", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + tools: [ + { + type: "function", + name: "exec", + strict: true, + parameters: { type: "object" }, + }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.tools![0]!.function.strict).toBe(true); + }); + + it("handles inline system messages in the input array", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [ + { role: "system", content: "Be brief." }, + { role: "user", content: "Hello" }, + ], + stream: true, + }; + + const result = translateResponsesToChatCompletions(req); + expect(result.messages).toEqual([ + { role: "system", content: "Be brief." }, + { role: "user", content: "Hello" }, + ]); + }); + + it("throws UnsupportedResponsesFeatureError for non-function tool types", () => { + const req: ResponsesRequest = { + model: "glm-5.1", + input: [{ role: "user", content: "test" }], + tools: [{ type: "web_search" as any, name: "search" }], + stream: true, + }; + + expect(() => translateResponsesToChatCompletions(req)).toThrow( + UnsupportedResponsesFeatureError, + ); + }); +}); diff --git a/apps/server/src/provider/glmBridge/translateResponsesToGlm.ts b/apps/server/src/provider/glmBridge/translateResponsesToGlm.ts new file mode 100644 index 0000000000..b5b66ac51c --- /dev/null +++ b/apps/server/src/provider/glmBridge/translateResponsesToGlm.ts @@ -0,0 +1,192 @@ +export interface ResponsesRequest { + model: string; + input: ResponsesInput[]; + instructions?: string; + tools?: ResponsesTool[]; + tool_choice?: string | { type: string; name?: string }; + parallel_tool_calls?: boolean; + stream?: boolean; + temperature?: number; + max_output_tokens?: number; + reasoning?: { effort?: string }; +} + +export type ResponsesInput = + | { role: "user"; content: string | ResponsesContentPart[] } + | { role: "assistant"; content: string | ResponsesContentPart[] } + | { role: "system"; content: string } + | ResponsesFunctionCallOutput; + +interface ResponsesFunctionCallOutput { + type: "function_call_output"; + call_id: string; + output: string; +} + +type ResponsesContentPart = + | { type: "input_text"; text: string } + | { type: "output_text"; text: string } + | { type: "function_call"; id: string; name: string; arguments: string } + | { type: "text"; text: string }; + +interface ResponsesTool { + type: "function"; + name: string; + description?: string; + parameters?: Record; + strict?: boolean; +} + +export interface ChatCompletionsRequest { + model: string; + messages: ChatMessage[]; + tools?: ChatTool[]; + tool_choice?: string | { type: string; function?: { name: string } }; + stream: boolean; + temperature?: number; + max_tokens?: number; + stream_options?: { include_usage: boolean }; +} + +export interface ChatMessage { + role: "system" | "user" | "assistant" | "tool"; + content?: string | null; + tool_calls?: ChatToolCall[]; + tool_call_id?: string; +} + +interface ChatTool { + type: "function"; + function: { + name: string; + description?: string | undefined; + parameters?: Record | undefined; + strict?: boolean | undefined; + }; +} + +interface ChatToolCall { + id: string; + type: "function"; + function: { name: string; arguments: string }; +} + +export class UnsupportedResponsesFeatureError extends Error { + constructor(feature: string) { + super(`Unsupported Responses API feature for GLM bridge: ${feature}`); + this.name = "UnsupportedResponsesFeatureError"; + } +} + +export function translateResponsesToChatCompletions(req: ResponsesRequest): ChatCompletionsRequest { + const messages: ChatMessage[] = []; + + if (req.instructions) { + messages.push({ role: "system", content: req.instructions }); + } + + for (const item of req.input) { + if ("type" in item && item.type === "function_call_output") { + messages.push({ + role: "tool", + tool_call_id: item.call_id, + content: item.output, + }); + continue; + } + + const msg = item as Exclude; + + if (msg.role === "system") { + messages.push({ role: "system", content: msg.content as string }); + continue; + } + + if (msg.role === "user") { + const content = extractTextContent(msg.content); + messages.push({ role: "user", content }); + continue; + } + + if (msg.role === "assistant") { + const parts = Array.isArray(msg.content) ? msg.content : []; + const textParts: string[] = []; + const toolCalls: ChatToolCall[] = []; + + if (typeof msg.content === "string") { + textParts.push(msg.content); + } else { + for (const part of parts) { + if (part.type === "output_text" || part.type === "text") { + textParts.push(part.text); + } else if (part.type === "function_call") { + toolCalls.push({ + id: part.id, + type: "function", + function: { name: part.name, arguments: part.arguments }, + }); + } + } + } + + const assistantMsg: ChatMessage = { + role: "assistant", + content: textParts.length > 0 ? textParts.join("") : null, + }; + if (toolCalls.length > 0) { + assistantMsg.tool_calls = toolCalls; + } + messages.push(assistantMsg); + continue; + } + } + + const tools: ChatTool[] | undefined = req.tools?.map((tool): ChatTool => { + if (tool.type !== "function") { + throw new UnsupportedResponsesFeatureError(`tool type "${tool.type}"`); + } + return { + type: "function", + function: { + name: tool.name, + description: tool.description, + parameters: tool.parameters, + ...(tool.strict !== undefined ? { strict: tool.strict } : {}), + }, + }; + }); + + let toolChoice: ChatCompletionsRequest["tool_choice"]; + if (req.tool_choice !== undefined) { + if (typeof req.tool_choice === "string") { + toolChoice = req.tool_choice; + } else if (req.tool_choice.type === "function" && req.tool_choice.name) { + toolChoice = { + type: "function", + function: { name: req.tool_choice.name }, + }; + } + } + + return { + model: req.model, + messages, + ...(tools && tools.length > 0 ? { tools } : {}), + ...(toolChoice !== undefined ? { tool_choice: toolChoice } : {}), + stream: req.stream !== false, + ...(req.temperature !== undefined ? { temperature: req.temperature } : {}), + ...(req.max_output_tokens !== undefined ? { max_tokens: req.max_output_tokens } : {}), + stream_options: { include_usage: true }, + }; +} + +function extractTextContent(content: string | ResponsesContentPart[]): string { + if (typeof content === "string") return content; + return content + .filter( + (p): p is { type: "input_text" | "text"; text: string } => + p.type === "input_text" || p.type === "text", + ) + .map((p) => p.text) + .join(""); +} diff --git a/apps/server/src/server.ts b/apps/server/src/server.ts index 1d6f6ac66e..4d37b26273 100644 --- a/apps/server/src/server.ts +++ b/apps/server/src/server.ts @@ -19,6 +19,7 @@ import { ProviderSessionDirectoryLive } from "./provider/Layers/ProviderSessionD import { ProviderSessionRuntimeRepositoryLive } from "./persistence/Layers/ProviderSessionRuntime"; import { makeCodexAdapterLive } from "./provider/Layers/CodexAdapter"; import { makeClaudeAdapterLive } from "./provider/Layers/ClaudeAdapter"; +import { makeGlmAdapterLive } from "./provider/Layers/GlmAdapter"; import { ProviderAdapterRegistryLive } from "./provider/Layers/ProviderAdapterRegistry"; import { makeProviderServiceLive } from "./provider/Layers/ProviderService"; import { OrchestrationEngineLive } from "./orchestration/Layers/OrchestrationEngine"; @@ -149,9 +150,13 @@ const ProviderLayerLive = Layer.unwrap( const claudeAdapterLayer = makeClaudeAdapterLive( nativeEventLogger ? { nativeEventLogger } : undefined, ); + const glmAdapterLayer = makeGlmAdapterLive( + nativeEventLogger ? { nativeEventLogger } : undefined, + ).pipe(Layer.provide(codexAdapterLayer)); const adapterRegistryLayer = ProviderAdapterRegistryLive.pipe( Layer.provide(codexAdapterLayer), Layer.provide(claudeAdapterLayer), + Layer.provide(glmAdapterLayer), Layer.provideMerge(providerSessionDirectoryLayer), ); return makeProviderServiceLive( diff --git a/apps/web/src/components/ChatView.tsx b/apps/web/src/components/ChatView.tsx index 7c649b5003..6382151896 100644 --- a/apps/web/src/components/ChatView.tsx +++ b/apps/web/src/components/ChatView.tsx @@ -1408,6 +1408,7 @@ export default function ChatView({ threadId }: ChatViewProps) { codex: providerStatuses.find((provider) => provider.provider === "codex")?.models ?? [], claudeAgent: providerStatuses.find((provider) => provider.provider === "claudeAgent")?.models ?? [], + glm: providerStatuses.find((provider) => provider.provider === "glm")?.models ?? [], }), [providerStatuses], ); diff --git a/apps/web/src/components/Icons.tsx b/apps/web/src/components/Icons.tsx index 2e95b54e25..dfe8ac2c0a 100644 --- a/apps/web/src/components/Icons.tsx +++ b/apps/web/src/components/Icons.tsx @@ -424,3 +424,12 @@ export const OpenCodeIcon: Icon = (props) => ( ); + +export const GlmIcon: Icon = (props) => ( + + + +); diff --git a/apps/web/src/components/KeybindingsToast.browser.tsx b/apps/web/src/components/KeybindingsToast.browser.tsx index fbbf9782b6..951ff919b1 100644 --- a/apps/web/src/components/KeybindingsToast.browser.tsx +++ b/apps/web/src/components/KeybindingsToast.browser.tsx @@ -80,6 +80,12 @@ function createBaseServerConfig(): ServerConfig { providers: { codex: { enabled: true, binaryPath: "", homePath: "", customModels: [] }, claudeAgent: { enabled: true, binaryPath: "", customModels: [] }, + glm: { + enabled: false, + transport: "bridge" as const, + upstreamBaseUrl: "https://api.z.ai/api/coding/paas/v4", + customModels: [], + }, }, }, }; diff --git a/apps/web/src/components/chat/ProviderModelPicker.tsx b/apps/web/src/components/chat/ProviderModelPicker.tsx index 01fa37516e..03c192f7c1 100644 --- a/apps/web/src/components/chat/ProviderModelPicker.tsx +++ b/apps/web/src/components/chat/ProviderModelPicker.tsx @@ -18,7 +18,7 @@ import { MenuSubTrigger, MenuTrigger, } from "../ui/menu"; -import { ClaudeAI, CursorIcon, Gemini, Icon, OpenAI, OpenCodeIcon } from "../Icons"; +import { ClaudeAI, CursorIcon, Gemini, GlmIcon, Icon, OpenAI, OpenCodeIcon } from "../Icons"; import { cn } from "~/lib/utils"; import { getProviderSnapshot } from "../../providerModels"; @@ -33,6 +33,7 @@ function isAvailableProviderOption(option: (typeof PROVIDER_OPTIONS)[number]): o const PROVIDER_ICON_BY_PROVIDER: Record = { codex: OpenAI, claudeAgent: ClaudeAI, + glm: GlmIcon, cursor: CursorIcon, }; diff --git a/apps/web/src/components/chat/composerProviderRegistry.tsx b/apps/web/src/components/chat/composerProviderRegistry.tsx index 3307442db2..38ef53d33c 100644 --- a/apps/web/src/components/chat/composerProviderRegistry.tsx +++ b/apps/web/src/components/chat/composerProviderRegistry.tsx @@ -72,7 +72,9 @@ function getProviderStateFromCapabilities( const normalizedOptions = provider === "codex" ? normalizeCodexModelOptionsWithCapabilities(caps, providerOptions) - : normalizeClaudeModelOptionsWithCapabilities(caps, providerOptions); + : provider === "claudeAgent" + ? normalizeClaudeModelOptionsWithCapabilities(caps, providerOptions) + : undefined; // Ultrathink styling (driven by capabilities data, not provider identity) const ultrathinkActive = @@ -155,6 +157,38 @@ const composerProviderRegistry: Record = { /> ), }, + glm: { + getState: (input) => getProviderStateFromCapabilities(input), + renderTraitsMenuContent: ({ + threadId, + model, + models, + modelOptions, + prompt, + onPromptChange, + }) => ( + + ), + renderTraitsPicker: ({ threadId, model, models, modelOptions, prompt, onPromptChange }) => ( + + ), + }, }; export function getComposerProviderState(input: ComposerProviderStateInput): ComposerProviderState { diff --git a/apps/web/src/components/settings/SettingsPanels.tsx b/apps/web/src/components/settings/SettingsPanels.tsx index d534eefaa4..c9d0fcc5c8 100644 --- a/apps/web/src/components/settings/SettingsPanels.tsx +++ b/apps/web/src/components/settings/SettingsPanels.tsx @@ -89,11 +89,12 @@ const TIMESTAMP_FORMAT_LABELS = { type InstallProviderSettings = { provider: ProviderKind; title: string; - binaryPlaceholder: string; - binaryDescription: ReactNode; + binaryPlaceholder?: string; + binaryDescription?: ReactNode; homePathKey?: "codexHomePath"; homePlaceholder?: string; homeDescription?: ReactNode; + envVarHint?: string; }; const PROVIDER_SETTINGS: readonly InstallProviderSettings[] = [ @@ -112,6 +113,11 @@ const PROVIDER_SETTINGS: readonly InstallProviderSettings[] = [ binaryPlaceholder: "Claude binary path", binaryDescription: "Path to the Claude binary", }, + { + provider: "glm", + title: "GLM (Z.ai)", + envVarHint: "GLM_API_KEY", + }, ] as const; const PROVIDER_STATUS_STYLES = { @@ -537,12 +543,14 @@ export function GeneralSettingsPanel() { DEFAULT_UNIFIED_SETTINGS.providers.claudeAgent.binaryPath || settings.providers.claudeAgent.customModels.length > 0, ), + glm: Boolean(settings.providers.glm.customModels.length > 0), }); const [customModelInputByProvider, setCustomModelInputByProvider] = useState< Record >({ codex: "", claudeAgent: "", + glm: "", }); const [customModelErrorByProvider, setCustomModelErrorByProvider] = useState< Partial> @@ -758,7 +766,8 @@ export function GeneralSettingsPanel() { homePathKey: providerSettings.homePathKey, homePlaceholder: providerSettings.homePlaceholder, homeDescription: providerSettings.homeDescription, - binaryPathValue: providerConfig.binaryPath, + envVarHint: providerSettings.envVarHint, + binaryPathValue: "binaryPath" in providerConfig ? providerConfig.binaryPath : undefined, isDirty: !Equal.equals(providerConfig, defaultProviderConfig), liveProvider, models, @@ -1205,37 +1214,55 @@ export function GeneralSettingsPanel() { >
-
-
+ ) : null} {providerCard.homePathKey ? (
diff --git a/apps/web/src/modelSelection.ts b/apps/web/src/modelSelection.ts index 98e2884adf..dbd90a4923 100644 --- a/apps/web/src/modelSelection.ts +++ b/apps/web/src/modelSelection.ts @@ -45,6 +45,13 @@ const PROVIDER_CUSTOM_MODEL_CONFIG: Record { ...DEFAULT_SERVER_SETTINGS.providers.claudeAgent, enabled: false, }, + glm: DEFAULT_SERVER_SETTINGS.providers.glm, }, }; const requestPromise = runRpc((client) => client(WS_METHODS.serverGetSettings, {})); @@ -190,6 +191,7 @@ describe("WsRpcAtomClient", () => { ...DEFAULT_SERVER_SETTINGS.providers.claudeAgent, enabled: false, }, + glm: DEFAULT_SERVER_SETTINGS.providers.glm, }, }; const registry = AtomRegistry.make(); diff --git a/apps/web/src/session-logic.test.ts b/apps/web/src/session-logic.test.ts index a1234dff6f..2e81649f80 100644 --- a/apps/web/src/session-logic.test.ts +++ b/apps/web/src/session-logic.test.ts @@ -1262,6 +1262,7 @@ describe("PROVIDER_OPTIONS", () => { expect(PROVIDER_OPTIONS).toEqual([ { value: "codex", label: "Codex", available: true }, { value: "claudeAgent", label: "Claude", available: true }, + { value: "glm", label: "GLM", available: true }, { value: "cursor", label: "Cursor", available: false }, ]); expect(claude).toEqual({ diff --git a/apps/web/src/session-logic.ts b/apps/web/src/session-logic.ts index 06bfee9803..c33b60eea0 100644 --- a/apps/web/src/session-logic.ts +++ b/apps/web/src/session-logic.ts @@ -29,6 +29,7 @@ export const PROVIDER_OPTIONS: Array<{ }> = [ { value: "codex", label: "Codex", available: true }, { value: "claudeAgent", label: "Claude", available: true }, + { value: "glm", label: "GLM", available: true }, { value: "cursor", label: "Cursor", available: false }, ]; diff --git a/apps/web/src/store.ts b/apps/web/src/store.ts index 6e768c4ef8..f975ad0303 100644 --- a/apps/web/src/store.ts +++ b/apps/web/src/store.ts @@ -81,9 +81,9 @@ function updateProject( return changed ? next : projects; } -function normalizeModelSelection( - selection: T, -): T { +function normalizeModelSelection< + T extends { provider: "codex" | "claudeAgent" | "glm"; model: string }, +>(selection: T): T { return { ...selection, model: resolveModelSlugForProvider(selection.provider, selection.model), diff --git a/packages/contracts/src/model.ts b/packages/contracts/src/model.ts index e62a957e05..684efcea4a 100644 --- a/packages/contracts/src/model.ts +++ b/packages/contracts/src/model.ts @@ -22,9 +22,13 @@ export const ClaudeModelOptions = Schema.Struct({ }); export type ClaudeModelOptions = typeof ClaudeModelOptions.Type; +export const GlmModelOptions = Schema.Struct({}); +export type GlmModelOptions = typeof GlmModelOptions.Type; + export const ProviderModelOptions = Schema.Struct({ codex: Schema.optional(CodexModelOptions), claudeAgent: Schema.optional(ClaudeModelOptions), + glm: Schema.optional(GlmModelOptions), }); export type ProviderModelOptions = typeof ProviderModelOptions.Type; @@ -54,6 +58,7 @@ export type ModelCapabilities = typeof ModelCapabilities.Type; export const DEFAULT_MODEL_BY_PROVIDER: Record = { codex: "gpt-5.4", claudeAgent: "claude-sonnet-4-6", + glm: "glm-5.1", }; export const DEFAULT_MODEL = DEFAULT_MODEL_BY_PROVIDER.codex; @@ -62,6 +67,7 @@ export const DEFAULT_MODEL = DEFAULT_MODEL_BY_PROVIDER.codex; export const DEFAULT_GIT_TEXT_GENERATION_MODEL_BY_PROVIDER: Record = { codex: "gpt-5.4-mini", claudeAgent: "claude-haiku-4-5", + glm: "glm-5.1", }; export const MODEL_SLUG_ALIASES_BY_PROVIDER: Record> = { @@ -86,6 +92,17 @@ export const MODEL_SLUG_ALIASES_BY_PROVIDER: Record = { codex: "Codex", claudeAgent: "Claude", + glm: "GLM", }; diff --git a/packages/contracts/src/orchestration.ts b/packages/contracts/src/orchestration.ts index 6c7f073612..fef7c4b503 100644 --- a/packages/contracts/src/orchestration.ts +++ b/packages/contracts/src/orchestration.ts @@ -1,5 +1,5 @@ import { Option, Schema, SchemaIssue, Struct } from "effect"; -import { ClaudeModelOptions, CodexModelOptions } from "./model"; +import { ClaudeModelOptions, CodexModelOptions, GlmModelOptions } from "./model"; import { ApprovalRequestId, CheckpointRef, @@ -23,7 +23,7 @@ export const ORCHESTRATION_WS_METHODS = { replayEvents: "orchestration.replayEvents", } as const; -export const ProviderKind = Schema.Literals(["codex", "claudeAgent"]); +export const ProviderKind = Schema.Literals(["codex", "claudeAgent", "glm"]); export type ProviderKind = typeof ProviderKind.Type; export const ProviderApprovalPolicy = Schema.Literals([ "untrusted", @@ -55,7 +55,18 @@ export const ClaudeModelSelection = Schema.Struct({ }); export type ClaudeModelSelection = typeof ClaudeModelSelection.Type; -export const ModelSelection = Schema.Union([CodexModelSelection, ClaudeModelSelection]); +export const GlmModelSelection = Schema.Struct({ + provider: Schema.Literal("glm"), + model: TrimmedNonEmptyString, + options: Schema.optionalKey(GlmModelOptions), +}); +export type GlmModelSelection = typeof GlmModelSelection.Type; + +export const ModelSelection = Schema.Union([ + CodexModelSelection, + ClaudeModelSelection, + GlmModelSelection, +]); export type ModelSelection = typeof ModelSelection.Type; export const RuntimeMode = Schema.Literals(["approval-required", "full-access"]); diff --git a/packages/contracts/src/settings.ts b/packages/contracts/src/settings.ts index 6633ce42a6..88122429aa 100644 --- a/packages/contracts/src/settings.ts +++ b/packages/contracts/src/settings.ts @@ -71,6 +71,19 @@ export const ClaudeSettings = Schema.Struct({ }); export type ClaudeSettings = typeof ClaudeSettings.Type; +export const GlmTransport = Schema.Literals(["bridge", "direct"]); +export type GlmTransport = typeof GlmTransport.Type; + +export const GlmSettings = Schema.Struct({ + enabled: Schema.Boolean.pipe(Schema.withDecodingDefault(() => false)), + transport: GlmTransport.pipe(Schema.withDecodingDefault(() => "bridge" as const)), + upstreamBaseUrl: TrimmedString.pipe( + Schema.withDecodingDefault(() => "https://api.z.ai/api/coding/paas/v4"), + ), + customModels: Schema.Array(Schema.String).pipe(Schema.withDecodingDefault(() => [])), +}); +export type GlmSettings = typeof GlmSettings.Type; + export const ObservabilitySettings = Schema.Struct({ otlpTracesUrl: TrimmedString.pipe(Schema.withDecodingDefault(() => "")), otlpMetricsUrl: TrimmedString.pipe(Schema.withDecodingDefault(() => "")), @@ -93,6 +106,7 @@ export const ServerSettings = Schema.Struct({ providers: Schema.Struct({ codex: CodexSettings.pipe(Schema.withDecodingDefault(() => ({}))), claudeAgent: ClaudeSettings.pipe(Schema.withDecodingDefault(() => ({}))), + glm: GlmSettings.pipe(Schema.withDecodingDefault(() => ({}))), }).pipe(Schema.withDecodingDefault(() => ({}))), observability: ObservabilitySettings.pipe(Schema.withDecodingDefault(() => ({}))), }); @@ -135,6 +149,8 @@ const ClaudeModelOptionsPatch = Schema.Struct({ contextWindow: Schema.optionalKey(ClaudeModelOptions.fields.contextWindow), }); +const GlmModelOptionsPatch = Schema.Struct({}); + const ModelSelectionPatch = Schema.Union([ Schema.Struct({ provider: Schema.optionalKey(Schema.Literal("codex")), @@ -146,6 +162,11 @@ const ModelSelectionPatch = Schema.Union([ model: Schema.optionalKey(TrimmedNonEmptyString), options: Schema.optionalKey(ClaudeModelOptionsPatch), }), + Schema.Struct({ + provider: Schema.optionalKey(Schema.Literal("glm")), + model: Schema.optionalKey(TrimmedNonEmptyString), + options: Schema.optionalKey(GlmModelOptionsPatch), + }), ]); const CodexSettingsPatch = Schema.Struct({ @@ -161,6 +182,13 @@ const ClaudeSettingsPatch = Schema.Struct({ customModels: Schema.optionalKey(Schema.Array(Schema.String)), }); +const GlmSettingsPatch = Schema.Struct({ + enabled: Schema.optionalKey(Schema.Boolean), + transport: Schema.optionalKey(GlmTransport), + upstreamBaseUrl: Schema.optionalKey(Schema.String), + customModels: Schema.optionalKey(Schema.Array(Schema.String)), +}); + export const ServerSettingsPatch = Schema.Struct({ enableAssistantStreaming: Schema.optionalKey(Schema.Boolean), defaultThreadEnvMode: Schema.optionalKey(ThreadEnvMode), @@ -175,6 +203,7 @@ export const ServerSettingsPatch = Schema.Struct({ Schema.Struct({ codex: Schema.optionalKey(CodexSettingsPatch), claudeAgent: Schema.optionalKey(ClaudeSettingsPatch), + glm: Schema.optionalKey(GlmSettingsPatch), }), ), }); From aaeaf5b04fb6458bc6e4f1c7e23392791b112875 Mon Sep 17 00:00:00 2001 From: Marve10s Date: Wed, 8 Apr 2026 00:21:42 +0300 Subject: [PATCH 2/4] fix: address review comments on GlmAdapter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - makeGlmAdapterLive now accepts and plumbs options parameter - Documented that GLM runtime events flow through the Codex adapter stream with provider="codex" — event re-attribution by ProviderService based on session directory is a follow-up --- apps/server/src/provider/Layers/GlmAdapter.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/apps/server/src/provider/Layers/GlmAdapter.ts b/apps/server/src/provider/Layers/GlmAdapter.ts index 69f38ee54a..f6e71173a3 100644 --- a/apps/server/src/provider/Layers/GlmAdapter.ts +++ b/apps/server/src/provider/Layers/GlmAdapter.ts @@ -31,12 +31,12 @@ function remapSessionProvider(session: ProviderSession): ProviderSession { return { ...session, provider: PROVIDER }; } -export const GlmAdapterLive = Layer.effect( - GlmAdapter, - Effect.gen(function* () { +function makeGlmAdapter(options?: GlmAdapterLiveOptions) { + return Effect.gen(function* () { const codexAdapter = yield* CodexAdapter; const glmEventQueue = yield* Queue.unbounded(); const glmThreadIds = new Set(); + const _nativeEventLogger = options?.nativeEventLogger; const capabilities: ProviderAdapterCapabilities = { sessionModelSwitch: "restart-session", @@ -132,9 +132,9 @@ export const GlmAdapterLive = Layer.effect( return Stream.fromQueue(glmEventQueue); }, } satisfies GlmAdapterShape; - }), -); + }); +} -export function makeGlmAdapterLive(_options?: GlmAdapterLiveOptions) { - return GlmAdapterLive; +export function makeGlmAdapterLive(options?: GlmAdapterLiveOptions) { + return Layer.effect(GlmAdapter, makeGlmAdapter(options)); } From 7cf4cc3c4e8b1e96d0e9e057aec5e38e6e85c543 Mon Sep 17 00:00:00 2001 From: Marve10s Date: Fri, 10 Apr 2026 00:22:43 +0300 Subject: [PATCH 3/4] feat: surface GLM-backed Codex and Claude runtimes --- .../src/git/Layers/RoutingTextGeneration.ts | 1 - .../server/src/git/Services/TextGeneration.ts | 2 +- .../src/provider/Layers/ClaudeProvider.ts | 179 +++++++++- .../src/provider/Layers/CodexProvider.ts | 114 +++++- apps/server/src/provider/Layers/GlmAdapter.ts | 140 -------- .../server/src/provider/Layers/GlmProvider.ts | 127 ------- .../Layers/ProviderAdapterRegistry.test.ts | 23 +- .../Layers/ProviderAdapterRegistry.ts | 3 +- .../provider/Layers/ProviderRegistry.test.ts | 106 ++++++ .../src/provider/Layers/ProviderRegistry.ts | 30 +- .../src/provider/Services/GlmAdapter.ts | 11 - .../src/provider/Services/GlmProvider.ts | 8 - .../Services/ProviderAdapterRegistry.ts | 2 - .../src/provider/codexLaunchConfig.test.ts | 170 --------- apps/server/src/provider/codexLaunchConfig.ts | 64 ---- .../src/provider/glmBridge/GlmBridgeLive.ts | 278 --------------- .../provider/glmBridge/GlmBridgeService.ts | 10 - .../glmBridge/translateGlmToResponses.test.ts | 329 ------------------ .../glmBridge/translateGlmToResponses.ts | 168 --------- .../glmBridge/translateResponsesToGlm.test.ts | 309 ---------------- .../glmBridge/translateResponsesToGlm.ts | 192 ---------- apps/server/src/provider/providerSnapshot.ts | 2 + apps/server/src/server.ts | 5 - apps/web/src/components/ChatView.tsx | 26 +- .../components/KeybindingsToast.browser.tsx | 6 - .../components/chat/ProviderModelPicker.tsx | 8 +- .../components/chat/ProviderStatusBanner.tsx | 3 +- .../chat/composerProviderRegistry.tsx | 32 -- .../components/settings/SettingsPanels.tsx | 11 +- apps/web/src/modelSelection.ts | 18 +- apps/web/src/rpc/client.test.ts | 2 - apps/web/src/session-logic.test.ts | 1 - apps/web/src/session-logic.ts | 1 - apps/web/src/store.ts | 6 +- packages/contracts/src/model.ts | 18 - packages/contracts/src/orchestration.ts | 17 +- packages/contracts/src/server.ts | 1 + packages/contracts/src/settings.ts | 29 -- 38 files changed, 437 insertions(+), 2015 deletions(-) delete mode 100644 apps/server/src/provider/Layers/GlmAdapter.ts delete mode 100644 apps/server/src/provider/Layers/GlmProvider.ts delete mode 100644 apps/server/src/provider/Services/GlmAdapter.ts delete mode 100644 apps/server/src/provider/Services/GlmProvider.ts delete mode 100644 apps/server/src/provider/codexLaunchConfig.test.ts delete mode 100644 apps/server/src/provider/codexLaunchConfig.ts delete mode 100644 apps/server/src/provider/glmBridge/GlmBridgeLive.ts delete mode 100644 apps/server/src/provider/glmBridge/GlmBridgeService.ts delete mode 100644 apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts delete mode 100644 apps/server/src/provider/glmBridge/translateGlmToResponses.ts delete mode 100644 apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts delete mode 100644 apps/server/src/provider/glmBridge/translateResponsesToGlm.ts diff --git a/apps/server/src/git/Layers/RoutingTextGeneration.ts b/apps/server/src/git/Layers/RoutingTextGeneration.ts index e4526a072a..979aa6b070 100644 --- a/apps/server/src/git/Layers/RoutingTextGeneration.ts +++ b/apps/server/src/git/Layers/RoutingTextGeneration.ts @@ -44,7 +44,6 @@ const makeRoutingTextGeneration = Effect.gen(function* () { case "claudeAgent": return claude; case "codex": - case "glm": case undefined: return codex; } diff --git a/apps/server/src/git/Services/TextGeneration.ts b/apps/server/src/git/Services/TextGeneration.ts index 204892aef8..f4354c7a99 100644 --- a/apps/server/src/git/Services/TextGeneration.ts +++ b/apps/server/src/git/Services/TextGeneration.ts @@ -13,7 +13,7 @@ import type { ChatAttachment, ModelSelection } from "@t3tools/contracts"; import type { TextGenerationError } from "@t3tools/contracts"; /** Providers that support git text generation (commit messages, PR content, branch names). */ -export type TextGenerationProvider = "codex" | "claudeAgent" | "glm"; +export type TextGenerationProvider = "codex" | "claudeAgent"; export interface CommitMessageGenerationInput { cwd: string; diff --git a/apps/server/src/provider/Layers/ClaudeProvider.ts b/apps/server/src/provider/Layers/ClaudeProvider.ts index 9feec28637..38e776cb35 100644 --- a/apps/server/src/provider/Layers/ClaudeProvider.ts +++ b/apps/server/src/provider/Layers/ClaudeProvider.ts @@ -1,3 +1,4 @@ +import * as OS from "node:os"; import type { ClaudeSettings, ModelCapabilities, @@ -6,7 +7,19 @@ import type { ServerProviderAuth, ServerProviderState, } from "@t3tools/contracts"; -import { Cache, Duration, Effect, Equal, Layer, Option, Result, Schema, Stream } from "effect"; +import { + Cache, + Duration, + Effect, + Equal, + FileSystem, + Layer, + Option, + Path, + Result, + Schema, + Stream, +} from "effect"; import { ChildProcess, ChildProcessSpawner } from "effect/unstable/process"; import { decodeJsonResult } from "@t3tools/shared/schemaJson"; import { query as claudeQuery } from "@anthropic-ai/claude-agent-sdk"; @@ -36,6 +49,12 @@ const DEFAULT_CLAUDE_MODEL_CAPABILITIES: ModelCapabilities = { }; const PROVIDER = "claudeAgent" as const; +const ZAI_ANTHROPIC_BASE_URL = "https://api.z.ai/api/anthropic"; +const DEFAULT_CLAUDE_GLM_MODEL_MAPPING = { + opus: "glm-4.7", + sonnet: "glm-4.7", + haiku: "glm-4.5-air", +} as const; const BUILT_IN_MODELS: ReadonlyArray = [ { slug: "claude-opus-4-6", @@ -92,6 +111,107 @@ const BUILT_IN_MODELS: ReadonlyArray = [ }, ]; +interface ClaudeGlmIntegration { + readonly hasAuthToken: boolean; + readonly opusModel: string; + readonly sonnetModel: string; + readonly haikuModel: string; +} + +function normalizeUrl(value: string | undefined): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed.replace(/\/+$/g, "").toLowerCase() : undefined; +} + +function asPlainRecord(value: unknown): Record | undefined { + return typeof value === "object" && value !== null && !globalThis.Array.isArray(value) + ? (value as Record) + : undefined; +} + +function asTrimmedString(value: unknown): string | undefined { + return typeof value === "string" && value.trim().length > 0 ? value.trim() : undefined; +} + +function readClaudeGlmIntegrationFromEnv( + env: Record, +): ClaudeGlmIntegration | undefined { + if (normalizeUrl(env.ANTHROPIC_BASE_URL) !== normalizeUrl(ZAI_ANTHROPIC_BASE_URL)) { + return undefined; + } + + return { + hasAuthToken: Boolean(asTrimmedString(env.ANTHROPIC_AUTH_TOKEN)), + opusModel: + asTrimmedString(env.ANTHROPIC_DEFAULT_OPUS_MODEL) ?? DEFAULT_CLAUDE_GLM_MODEL_MAPPING.opus, + sonnetModel: + asTrimmedString(env.ANTHROPIC_DEFAULT_SONNET_MODEL) ?? + DEFAULT_CLAUDE_GLM_MODEL_MAPPING.sonnet, + haikuModel: + asTrimmedString(env.ANTHROPIC_DEFAULT_HAIKU_MODEL) ?? DEFAULT_CLAUDE_GLM_MODEL_MAPPING.haiku, + }; +} + +function buildClaudeModels( + integration: ClaudeGlmIntegration | undefined, +): ReadonlyArray { + if (!integration) { + return BUILT_IN_MODELS; + } + + return BUILT_IN_MODELS.map((model) => { + let mappedModel: string | undefined; + switch (model.slug) { + case "claude-opus-4-6": + mappedModel = integration.opusModel; + break; + case "claude-sonnet-4-6": + mappedModel = integration.sonnetModel; + break; + case "claude-haiku-4-5": + mappedModel = integration.haikuModel; + break; + } + + return mappedModel ? { ...model, name: `${model.name} (${mappedModel})` } : model; + }); +} + +export const readClaudeGlmIntegration = Effect.fn("readClaudeGlmIntegration")(function* () { + const fileSystem = yield* FileSystem.FileSystem; + const path = yield* Path.Path; + const settingsPath = path.join(OS.homedir(), ".claude", "settings.json"); + const content = yield* fileSystem + .readFileString(settingsPath) + .pipe(Effect.orElseSucceed(() => undefined)); + + const fileEnv = (() => { + if (!content) { + return {} as Record; + } + try { + const parsed = JSON.parse(content) as unknown; + const envRecord = asPlainRecord(asPlainRecord(parsed)?.env); + if (!envRecord) { + return {} as Record; + } + return Object.fromEntries( + Object.entries(envRecord).flatMap(([key, value]) => { + const stringValue = asTrimmedString(value); + return stringValue ? [[key, stringValue]] : []; + }), + ) as Record; + } catch { + return {} as Record; + } + })(); + + return readClaudeGlmIntegrationFromEnv({ + ...fileEnv, + ...process.env, + }); +}); + export function getClaudeModelCapabilities(model: string | null | undefined): ModelCapabilities { const slug = model?.trim(); return ( @@ -446,15 +566,22 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( ): Effect.fn.Return< ServerProvider, ServerSettingsError, - ChildProcessSpawner.ChildProcessSpawner | ServerSettingsService + | ChildProcessSpawner.ChildProcessSpawner + | FileSystem.FileSystem + | Path.Path + | ServerSettingsService > { const claudeSettings = yield* Effect.service(ServerSettingsService).pipe( Effect.flatMap((service) => service.getSettings), Effect.map((settings) => settings.providers.claudeAgent), ); + const glmIntegration = yield* readClaudeGlmIntegration().pipe( + Effect.orElseSucceed(() => undefined), + ); const checkedAt = new Date().toISOString(); + const displayName = glmIntegration ? "Claude / GLM" : "Claude"; const models = providerModelsFromSettings( - BUILT_IN_MODELS, + buildClaudeModels(glmIntegration), PROVIDER, claudeSettings.customModels, DEFAULT_CLAUDE_MODEL_CAPABILITIES, @@ -466,6 +593,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: false, checkedAt, models, + displayName, probe: { installed: false, version: null, @@ -488,6 +616,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: claudeSettings.enabled, checkedAt, models, + displayName, probe: { installed: !isCommandMissingCause(error), version: null, @@ -506,6 +635,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: claudeSettings.enabled, checkedAt, models, + displayName, probe: { installed: true, version: null, @@ -526,6 +656,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: claudeSettings.enabled, checkedAt, models, + displayName, probe: { installed: true, version: parsedVersion, @@ -538,6 +669,41 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( }); } + if (glmIntegration) { + return buildServerProvider({ + provider: PROVIDER, + enabled: claudeSettings.enabled, + checkedAt, + models, + displayName, + probe: glmIntegration.hasAuthToken + ? { + installed: true, + version: parsedVersion, + status: "ready", + auth: { + status: "authenticated", + type: "apiKey", + label: "Z.AI GLM Plan", + }, + message: + "Configured to use Z.AI's Anthropic-compatible endpoint. Claude model tiers map to GLM models from your Claude settings.", + } + : { + installed: true, + version: parsedVersion, + status: "error", + auth: { + status: "unauthenticated", + type: "apiKey", + label: "Z.AI GLM Plan", + }, + message: + "Configured to use Z.AI's Anthropic-compatible endpoint, but ANTHROPIC_AUTH_TOKEN is missing.", + }, + }); + } + // ── Auth check + subscription detection ──────────────────────────── const authProbe = yield* runClaudeCommand(["auth", "status"]).pipe( @@ -574,6 +740,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: claudeSettings.enabled, checkedAt, models: resolvedModels, + displayName, probe: { installed: true, version: parsedVersion, @@ -593,6 +760,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: claudeSettings.enabled, checkedAt, models: resolvedModels, + displayName, probe: { installed: true, version: parsedVersion, @@ -610,6 +778,7 @@ export const checkClaudeProviderStatus = Effect.fn("checkClaudeProviderStatus")( enabled: claudeSettings.enabled, checkedAt, models: resolvedModels, + displayName, probe: { installed: true, version: parsedVersion, @@ -627,6 +796,8 @@ export const ClaudeProviderLive = Layer.effect( ClaudeProvider, Effect.gen(function* () { const serverSettings = yield* ServerSettingsService; + const fileSystem = yield* FileSystem.FileSystem; + const path = yield* Path.Path; const spawner = yield* ChildProcessSpawner.ChildProcessSpawner; const subscriptionProbeCache = yield* Cache.make({ @@ -640,6 +811,8 @@ export const ClaudeProviderLive = Layer.effect( Cache.get(subscriptionProbeCache, binaryPath), ).pipe( Effect.provideService(ServerSettingsService, serverSettings), + Effect.provideService(FileSystem.FileSystem, fileSystem), + Effect.provideService(Path.Path, path), Effect.provideService(ChildProcessSpawner.ChildProcessSpawner, spawner), ); diff --git a/apps/server/src/provider/Layers/CodexProvider.ts b/apps/server/src/provider/Layers/CodexProvider.ts index 3509fa9257..3021f2abb8 100644 --- a/apps/server/src/provider/Layers/CodexProvider.ts +++ b/apps/server/src/provider/Layers/CodexProvider.ts @@ -62,6 +62,14 @@ const DEFAULT_CODEX_MODEL_CAPABILITIES: ModelCapabilities = { promptInjectedEffortLevels: [], }; +const DEFAULT_GLM_MODEL_CAPABILITIES: ModelCapabilities = { + reasoningEffortLevels: [], + supportsFastMode: false, + supportsThinkingToggle: false, + contextWindowOptions: [], + promptInjectedEffortLevels: [], +}; + const PROVIDER = "codex" as const; const OPENAI_AUTH_PROVIDERS = new Set(["openai"]); const BUILT_IN_MODELS: ReadonlyArray = [ @@ -169,8 +177,59 @@ const BUILT_IN_MODELS: ReadonlyArray = [ }, ]; +const GLM_BUILT_IN_MODELS: ReadonlyArray = [ + { + slug: "glm-5.1", + name: "GLM 5.1", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-5", + name: "GLM 5", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-5-turbo", + name: "GLM 5 Turbo", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.7", + name: "GLM 4.7", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.6", + name: "GLM 4.6", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.5", + name: "GLM 4.5", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, + { + slug: "glm-4.5-air", + name: "GLM 4.5 Air", + isCustom: false, + capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, + }, +]; + export function getCodexModelCapabilities(model: string | null | undefined): ModelCapabilities { const slug = model?.trim(); + if (slug?.startsWith("glm-")) { + return ( + GLM_BUILT_IN_MODELS.find((candidate) => candidate.slug === slug)?.capabilities ?? + DEFAULT_GLM_MODEL_CAPABILITIES + ); + } return ( BUILT_IN_MODELS.find((candidate) => candidate.slug === slug)?.capabilities ?? DEFAULT_CODEX_MODEL_CAPABILITIES @@ -299,6 +358,41 @@ export const hasCustomModelProvider = readCodexConfigModelProvider().pipe( Effect.orElseSucceed(() => false), ); +function toTitleCaseWords(value: string): string { + return value + .split(/[\s_-]+/g) + .filter(Boolean) + .map((part) => part[0]!.toUpperCase() + part.slice(1).toLowerCase()) + .join(" "); +} + +function codexDisplayName(modelProvider: string | undefined): string { + if (!modelProvider || OPENAI_AUTH_PROVIDERS.has(modelProvider)) { + return "Codex"; + } + if (modelProvider === "glm") { + return "Codex / GLM"; + } + return `Codex / ${toTitleCaseWords(modelProvider)}`; +} + +function codexCustomProviderMessage(modelProvider: string | undefined): string { + if (modelProvider === "glm") { + return "Using Z.AI GLM through Codex custom model provider config; OpenAI login check skipped."; + } + return "Using a custom Codex model provider; OpenAI login check skipped."; +} + +function codexBuiltInModels(modelProvider: string | undefined): ReadonlyArray { + return modelProvider === "glm" ? GLM_BUILT_IN_MODELS : BUILT_IN_MODELS; +} + +function codexCustomModelCapabilities(modelProvider: string | undefined): ModelCapabilities { + return modelProvider === "glm" + ? DEFAULT_GLM_MODEL_CAPABILITIES + : DEFAULT_CODEX_MODEL_CAPABILITIES; +} + const CAPABILITIES_PROBE_TIMEOUT_MS = 8_000; const probeCodexCapabilities = (input: { @@ -347,11 +441,15 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu Effect.map((settings) => settings.providers.codex), ); const checkedAt = new Date().toISOString(); + const modelProvider = yield* readCodexConfigModelProvider().pipe( + Effect.orElseSucceed(() => undefined), + ); + const displayName = codexDisplayName(modelProvider); const models = providerModelsFromSettings( - BUILT_IN_MODELS, + codexBuiltInModels(modelProvider), PROVIDER, codexSettings.customModels, - DEFAULT_CODEX_MODEL_CAPABILITIES, + codexCustomModelCapabilities(modelProvider), ); if (!codexSettings.enabled) { @@ -360,6 +458,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: false, checkedAt, models, + displayName, probe: { installed: false, version: null, @@ -382,6 +481,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: codexSettings.enabled, checkedAt, models, + displayName, probe: { installed: !isCommandMissingCause(error), version: null, @@ -400,6 +500,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: codexSettings.enabled, checkedAt, models, + displayName, probe: { installed: true, version: null, @@ -421,6 +522,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: codexSettings.enabled, checkedAt, models, + displayName, probe: { installed: true, version: parsedVersion, @@ -449,18 +551,19 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu }); } - if (yield* hasCustomModelProvider) { + if (modelProvider !== undefined && !OPENAI_AUTH_PROVIDERS.has(modelProvider)) { return buildServerProvider({ provider: PROVIDER, enabled: codexSettings.enabled, checkedAt, models, + displayName, probe: { installed: true, version: parsedVersion, status: "ready", auth: { status: "unknown" }, - message: "Using a custom Codex model provider; OpenAI login check skipped.", + message: codexCustomProviderMessage(modelProvider), }, }); } @@ -484,6 +587,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: codexSettings.enabled, checkedAt, models: resolvedModels, + displayName, probe: { installed: true, version: parsedVersion, @@ -503,6 +607,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: codexSettings.enabled, checkedAt, models: resolvedModels, + displayName, probe: { installed: true, version: parsedVersion, @@ -521,6 +626,7 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu enabled: codexSettings.enabled, checkedAt, models: resolvedModels, + displayName, probe: { installed: true, version: parsedVersion, diff --git a/apps/server/src/provider/Layers/GlmAdapter.ts b/apps/server/src/provider/Layers/GlmAdapter.ts deleted file mode 100644 index f6e71173a3..0000000000 --- a/apps/server/src/provider/Layers/GlmAdapter.ts +++ /dev/null @@ -1,140 +0,0 @@ -import type { - ApprovalRequestId, - ProviderApprovalDecision, - ProviderRuntimeEvent, - ProviderSendTurnInput, - ProviderSession, - ProviderSessionStartInput, - ProviderTurnStartResult, - ProviderUserInputAnswers, - ThreadId, - TurnId, -} from "@t3tools/contracts"; -import { Effect, Layer, Queue, Stream } from "effect"; - -import type { ProviderAdapterError } from "../Errors.ts"; -import { GlmAdapter, type GlmAdapterShape } from "../Services/GlmAdapter.ts"; -import { CodexAdapter } from "../Services/CodexAdapter.ts"; -import type { - ProviderAdapterCapabilities, - ProviderThreadSnapshot, -} from "../Services/ProviderAdapter.ts"; -import type { EventNdjsonLogger } from "./EventNdjsonLogger.ts"; - -const PROVIDER = "glm" as const; - -export interface GlmAdapterLiveOptions { - readonly nativeEventLogger?: EventNdjsonLogger; -} - -function remapSessionProvider(session: ProviderSession): ProviderSession { - return { ...session, provider: PROVIDER }; -} - -function makeGlmAdapter(options?: GlmAdapterLiveOptions) { - return Effect.gen(function* () { - const codexAdapter = yield* CodexAdapter; - const glmEventQueue = yield* Queue.unbounded(); - const glmThreadIds = new Set(); - const _nativeEventLogger = options?.nativeEventLogger; - - const capabilities: ProviderAdapterCapabilities = { - sessionModelSwitch: "restart-session", - }; - - const startSession = ( - input: ProviderSessionStartInput, - ): Effect.Effect => - Effect.gen(function* () { - glmThreadIds.add(input.threadId); - const session = yield* codexAdapter.startSession({ - ...input, - provider: "codex", - }); - return remapSessionProvider(session); - }); - - const sendTurn = ( - input: ProviderSendTurnInput, - ): Effect.Effect => codexAdapter.sendTurn(input); - - const interruptTurn = ( - threadId: ThreadId, - turnId?: TurnId, - ): Effect.Effect => codexAdapter.interruptTurn(threadId, turnId); - - const respondToRequest = ( - threadId: ThreadId, - requestId: ApprovalRequestId, - decision: ProviderApprovalDecision, - ): Effect.Effect => - codexAdapter.respondToRequest(threadId, requestId, decision); - - const respondToUserInput = ( - threadId: ThreadId, - requestId: ApprovalRequestId, - answers: ProviderUserInputAnswers, - ): Effect.Effect => - codexAdapter.respondToUserInput(threadId, requestId, answers); - - const stopSession = (threadId: ThreadId): Effect.Effect => - Effect.gen(function* () { - yield* codexAdapter.stopSession(threadId); - glmThreadIds.delete(threadId); - }); - - const listSessions = (): Effect.Effect> => - codexAdapter - .listSessions() - .pipe( - Effect.map((sessions) => - sessions.filter((s) => glmThreadIds.has(s.threadId)).map(remapSessionProvider), - ), - ); - - const hasSession = (threadId: ThreadId): Effect.Effect => - glmThreadIds.has(threadId) ? codexAdapter.hasSession(threadId) : Effect.succeed(false); - - const readThread = ( - threadId: ThreadId, - ): Effect.Effect => - codexAdapter.readThread(threadId); - - const rollbackThread = ( - threadId: ThreadId, - numTurns: number, - ): Effect.Effect => - codexAdapter.rollbackThread(threadId, numTurns); - - const stopAll = (): Effect.Effect => - Effect.gen(function* () { - for (const threadId of glmThreadIds) { - yield* codexAdapter.stopSession(threadId).pipe(Effect.ignore); - } - glmThreadIds.clear(); - }); - - return { - provider: PROVIDER, - capabilities, - startSession, - sendTurn, - interruptTurn, - respondToRequest, - respondToUserInput, - stopSession, - listSessions, - hasSession, - readThread, - rollbackThread, - stopAll, - get streamEvents() { - return Stream.fromQueue(glmEventQueue); - }, - } satisfies GlmAdapterShape; - }); -} - -export function makeGlmAdapterLive(options?: GlmAdapterLiveOptions) { - return Layer.effect(GlmAdapter, makeGlmAdapter(options)); -} diff --git a/apps/server/src/provider/Layers/GlmProvider.ts b/apps/server/src/provider/Layers/GlmProvider.ts deleted file mode 100644 index 8479e3b665..0000000000 --- a/apps/server/src/provider/Layers/GlmProvider.ts +++ /dev/null @@ -1,127 +0,0 @@ -import type { GlmSettings, ModelCapabilities, ServerProviderModel } from "@t3tools/contracts"; -import { Effect, Equal, Layer, Stream } from "effect"; - -import { - buildServerProvider, - providerModelsFromSettings, - type ProviderProbeResult, -} from "../providerSnapshot.ts"; -import { makeManagedServerProvider } from "../makeManagedServerProvider.ts"; -import { GlmProvider } from "../Services/GlmProvider.ts"; -import { ServerSettingsService } from "../../serverSettings.ts"; - -const PROVIDER = "glm" as const; - -const DEFAULT_GLM_MODEL_CAPABILITIES: ModelCapabilities = { - reasoningEffortLevels: [], - supportsFastMode: false, - supportsThinkingToggle: false, - contextWindowOptions: [], - promptInjectedEffortLevels: [], -}; - -const BUILT_IN_MODELS: ReadonlyArray = [ - { - slug: "glm-5.1", - name: "GLM 5.1", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-5", - name: "GLM 5", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-5-turbo", - name: "GLM 5 Turbo", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.7", - name: "GLM 4.7", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.6", - name: "GLM 4.6", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.5", - name: "GLM 4.5", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.5-air", - name: "GLM 4.5 Air", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, -]; - -function checkGlmProviderStatus(_glmSettings: GlmSettings): ProviderProbeResult { - const hasApiKey = Boolean(process.env.GLM_API_KEY); - - if (!hasApiKey) { - return { - installed: true, - version: null, - status: "error", - auth: { status: "unauthenticated" }, - message: "Set the GLM_API_KEY environment variable to authenticate.", - }; - } - - return { - installed: true, - version: null, - status: "ready", - auth: { status: "authenticated", type: "apiKey" }, - }; -} - -export const GlmProviderLive = Layer.effect( - GlmProvider, - Effect.gen(function* () { - const serverSettings = yield* ServerSettingsService; - - const checkProvider = Effect.gen(function* () { - const settings = yield* serverSettings.getSettings; - const glmSettings = settings.providers.glm; - const probe = checkGlmProviderStatus(glmSettings); - - const models = providerModelsFromSettings( - BUILT_IN_MODELS, - PROVIDER, - glmSettings.customModels, - DEFAULT_GLM_MODEL_CAPABILITIES, - ); - - return buildServerProvider({ - provider: PROVIDER, - enabled: glmSettings.enabled, - checkedAt: new Date().toISOString(), - models, - probe, - }); - }); - - return yield* makeManagedServerProvider({ - getSettings: serverSettings.getSettings.pipe( - Effect.map((settings) => settings.providers.glm), - Effect.orDie, - ), - streamSettings: serverSettings.streamChanges.pipe( - Stream.map((settings) => settings.providers.glm), - ), - haveSettingsChanged: (previous, next) => !Equal.equals(previous, next), - checkProvider, - }); - }), -); diff --git a/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts b/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts index 00dda8372c..db0293f0fe 100644 --- a/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts +++ b/apps/server/src/provider/Layers/ProviderAdapterRegistry.test.ts @@ -6,7 +6,6 @@ import { Effect, Layer, Stream } from "effect"; import { ClaudeAdapter, ClaudeAdapterShape } from "../Services/ClaudeAdapter.ts"; import { CodexAdapter, CodexAdapterShape } from "../Services/CodexAdapter.ts"; -import { GlmAdapter, GlmAdapterShape } from "../Services/GlmAdapter.ts"; import { ProviderAdapterRegistry } from "../Services/ProviderAdapterRegistry.ts"; import { ProviderAdapterRegistryLive } from "./ProviderAdapterRegistry.ts"; import { ProviderUnsupportedError } from "../Errors.ts"; @@ -46,23 +45,6 @@ const fakeClaudeAdapter: ClaudeAdapterShape = { streamEvents: Stream.empty, }; -const fakeGlmAdapter: GlmAdapterShape = { - provider: "glm", - capabilities: { sessionModelSwitch: "restart-session" }, - startSession: vi.fn(), - sendTurn: vi.fn(), - interruptTurn: vi.fn(), - respondToRequest: vi.fn(), - respondToUserInput: vi.fn(), - stopSession: vi.fn(), - listSessions: vi.fn(), - hasSession: vi.fn(), - readThread: vi.fn(), - rollbackThread: vi.fn(), - stopAll: vi.fn(), - streamEvents: Stream.empty, -}; - const layer = it.layer( Layer.mergeAll( Layer.provide( @@ -70,7 +52,6 @@ const layer = it.layer( Layer.mergeAll( Layer.succeed(CodexAdapter, fakeCodexAdapter), Layer.succeed(ClaudeAdapter, fakeClaudeAdapter), - Layer.succeed(GlmAdapter, fakeGlmAdapter), ), ), NodeServices.layer, @@ -83,13 +64,11 @@ layer("ProviderAdapterRegistryLive", (it) => { const registry = yield* ProviderAdapterRegistry; const codex = yield* registry.getByProvider("codex"); const claude = yield* registry.getByProvider("claudeAgent"); - const glm = yield* registry.getByProvider("glm"); assert.equal(codex, fakeCodexAdapter); assert.equal(claude, fakeClaudeAdapter); - assert.equal(glm, fakeGlmAdapter); const providers = yield* registry.listProviders(); - assert.deepEqual(providers, ["codex", "claudeAgent", "glm"]); + assert.deepEqual(providers, ["codex", "claudeAgent"]); }), ); diff --git a/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts b/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts index 809fb3032b..b6c987c64c 100644 --- a/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts +++ b/apps/server/src/provider/Layers/ProviderAdapterRegistry.ts @@ -17,7 +17,6 @@ import { } from "../Services/ProviderAdapterRegistry.ts"; import { ClaudeAdapter } from "../Services/ClaudeAdapter.ts"; import { CodexAdapter } from "../Services/CodexAdapter.ts"; -import { GlmAdapter } from "../Services/GlmAdapter.ts"; export interface ProviderAdapterRegistryLiveOptions { readonly adapters?: ReadonlyArray>; @@ -29,7 +28,7 @@ const makeProviderAdapterRegistry = Effect.fn("makeProviderAdapterRegistry")(fun const adapters = options?.adapters !== undefined ? options.adapters - : [yield* CodexAdapter, yield* ClaudeAdapter, yield* GlmAdapter]; + : [yield* CodexAdapter, yield* ClaudeAdapter]; const byProvider = new Map(adapters.map((adapter) => [adapter.provider, adapter])); const getByProvider: ProviderAdapterRegistryShape["getByProvider"] = (provider) => { diff --git a/apps/server/src/provider/Layers/ProviderRegistry.test.ts b/apps/server/src/provider/Layers/ProviderRegistry.test.ts index ca27371b61..bbc41476d6 100644 --- a/apps/server/src/provider/Layers/ProviderRegistry.test.ts +++ b/apps/server/src/provider/Layers/ProviderRegistry.test.ts @@ -156,6 +156,36 @@ function withTempCodexHome(configContent?: string) { }); } +function withTempHomeFile(relativePath: string, content: string) { + return Effect.gen(function* () { + const fileSystem = yield* FileSystem.FileSystem; + const path = yield* Path.Path; + const tmpDir = yield* fileSystem.makeTempDirectoryScoped({ prefix: "t3-test-home-" }); + + yield* Effect.acquireRelease( + Effect.sync(() => { + const originalHome = process.env.HOME; + process.env.HOME = tmpDir; + return originalHome; + }), + (originalHome) => + Effect.sync(() => { + if (originalHome !== undefined) { + process.env.HOME = originalHome; + } else { + delete process.env.HOME; + } + }), + ); + + const filePath = path.join(tmpDir, relativePath); + yield* fileSystem.makeDirectory(path.dirname(filePath), { recursive: true }); + yield* fileSystem.writeFileString(filePath, content); + + return { tmpDir } as const; + }); +} + it.layer(Layer.mergeAll(NodeServices.layer, ServerSettingsService.layerTest()))( "ProviderRegistry", (it) => { @@ -656,6 +686,39 @@ it.layer(Layer.mergeAll(NodeServices.layer, ServerSettingsService.layerTest()))( assert.strictEqual(status.installed, false); }).pipe(Effect.provide(failingSpawnerLayer("spawn codex ENOENT"))), ); + + it.effect("surfaces GLM models when Codex is configured with model_provider=glm", () => + Effect.gen(function* () { + yield* withTempCodexHome( + [ + 'model_provider = "glm"', + "", + "[model_providers.glm]", + 'base_url = "https://api.z.ai/api/coding/paas/v4"', + 'env_key = "GLM_API_KEY"', + ].join("\n"), + ); + const status = yield* checkCodexProviderStatus(); + assert.strictEqual(status.displayName, "Codex / GLM"); + assert.strictEqual(status.status, "ready"); + assert.strictEqual( + status.message, + "Using Z.AI GLM through Codex custom model provider config; OpenAI login check skipped.", + ); + assert.deepStrictEqual( + status.models.slice(0, 3).map((model) => model.slug), + ["glm-5.1", "glm-5", "glm-5-turbo"], + ); + }).pipe( + Effect.provide( + mockSpawnerLayer((args) => { + const joined = args.join(" "); + if (joined === "--version") return { stdout: "codex 1.0.0\n", stderr: "", code: 0 }; + throw new Error(`Auth probe should have been skipped but got args: ${joined}`); + }), + ), + ), + ); }); describe("checkCodexProviderStatus with openai model provider", () => { @@ -861,6 +924,49 @@ it.layer(Layer.mergeAll(NodeServices.layer, ServerSettingsService.layerTest()))( ), ); + it.effect("detects Z.AI GLM config from Claude settings", () => + Effect.gen(function* () { + yield* withTempHomeFile( + ".claude/settings.json", + JSON.stringify({ + env: { + ANTHROPIC_AUTH_TOKEN: "glm-api-key", + ANTHROPIC_BASE_URL: "https://api.z.ai/api/anthropic", + ANTHROPIC_DEFAULT_OPUS_MODEL: "glm-5.1", + ANTHROPIC_DEFAULT_SONNET_MODEL: "glm-5.1", + ANTHROPIC_DEFAULT_HAIKU_MODEL: "glm-4.5-air", + }, + }), + ); + const status = yield* checkClaudeProviderStatus(); + assert.strictEqual(status.displayName, "Claude / GLM"); + assert.strictEqual(status.status, "ready"); + assert.strictEqual(status.auth.status, "authenticated"); + assert.strictEqual(status.auth.type, "apiKey"); + assert.strictEqual(status.auth.label, "Z.AI GLM Plan"); + assert.strictEqual( + status.message, + "Configured to use Z.AI's Anthropic-compatible endpoint. Claude model tiers map to GLM models from your Claude settings.", + ); + assert.deepStrictEqual( + status.models.slice(0, 3).map((model) => model.name), + [ + "Claude Opus 4.6 (glm-5.1)", + "Claude Sonnet 4.6 (glm-5.1)", + "Claude Haiku 4.5 (glm-4.5-air)", + ], + ); + }).pipe( + Effect.provide( + mockSpawnerLayer((args) => { + const joined = args.join(" "); + if (joined === "--version") return { stdout: "1.0.0\n", stderr: "", code: 0 }; + throw new Error(`Auth probe should have been skipped but got args: ${joined}`); + }), + ), + ), + ); + it.effect("returns a display label for claude subscription types", () => Effect.gen(function* () { const status = yield* checkClaudeProviderStatus(() => Effect.succeed("maxplan")); diff --git a/apps/server/src/provider/Layers/ProviderRegistry.ts b/apps/server/src/provider/Layers/ProviderRegistry.ts index 3f9b049502..2d0648cc56 100644 --- a/apps/server/src/provider/Layers/ProviderRegistry.ts +++ b/apps/server/src/provider/Layers/ProviderRegistry.ts @@ -8,21 +8,14 @@ import { Effect, Equal, Layer, PubSub, Ref, Stream } from "effect"; import { ClaudeProviderLive } from "./ClaudeProvider"; import { CodexProviderLive } from "./CodexProvider"; -import { GlmProviderLive } from "./GlmProvider"; import type { ClaudeProviderShape } from "../Services/ClaudeProvider"; import { ClaudeProvider } from "../Services/ClaudeProvider"; import type { CodexProviderShape } from "../Services/CodexProvider"; import { CodexProvider } from "../Services/CodexProvider"; -import type { GlmProviderShape } from "../Services/GlmProvider"; -import { GlmProvider } from "../Services/GlmProvider"; import { ProviderRegistry, type ProviderRegistryShape } from "../Services/ProviderRegistry"; -const loadProviders = ( - codexProvider: CodexProviderShape, - claudeProvider: ClaudeProviderShape, - glmProvider: GlmProviderShape, -): Effect.Effect => - Effect.all([codexProvider.getSnapshot, claudeProvider.getSnapshot, glmProvider.getSnapshot], { +const loadProviders = (codexProvider: CodexProviderShape, claudeProvider: ClaudeProviderShape) => + Effect.all([codexProvider.getSnapshot, claudeProvider.getSnapshot], { concurrency: "unbounded", }); @@ -36,20 +29,19 @@ export const ProviderRegistryLive = Layer.effect( Effect.gen(function* () { const codexProvider = yield* CodexProvider; const claudeProvider = yield* ClaudeProvider; - const glmProvider = yield* GlmProvider; const changesPubSub = yield* Effect.acquireRelease( PubSub.unbounded>(), PubSub.shutdown, ); const providersRef = yield* Ref.make>( - yield* loadProviders(codexProvider, claudeProvider, glmProvider), + yield* loadProviders(codexProvider, claudeProvider), ); const syncProviders = Effect.fn("syncProviders")(function* (options?: { readonly publish?: boolean; }) { const previousProviders = yield* Ref.get(providersRef); - const providers = yield* loadProviders(codexProvider, claudeProvider, glmProvider); + const providers = yield* loadProviders(codexProvider, claudeProvider); yield* Ref.set(providersRef, providers); if (options?.publish !== false && haveProvidersChanged(previousProviders, providers)) { @@ -65,9 +57,6 @@ export const ProviderRegistryLive = Layer.effect( yield* Stream.runForEach(claudeProvider.streamChanges, () => syncProviders()).pipe( Effect.forkScoped, ); - yield* Stream.runForEach(glmProvider.streamChanges, () => syncProviders()).pipe( - Effect.forkScoped, - ); const refresh = Effect.fn("refresh")(function* (provider?: ProviderKind) { switch (provider) { @@ -77,11 +66,8 @@ export const ProviderRegistryLive = Layer.effect( case "claudeAgent": yield* claudeProvider.refresh; break; - case "glm": - yield* glmProvider.refresh; - break; default: - yield* Effect.all([codexProvider.refresh, claudeProvider.refresh, glmProvider.refresh], { + yield* Effect.all([codexProvider.refresh, claudeProvider.refresh], { concurrency: "unbounded", }); break; @@ -104,8 +90,4 @@ export const ProviderRegistryLive = Layer.effect( }, } satisfies ProviderRegistryShape; }), -).pipe( - Layer.provideMerge(CodexProviderLive), - Layer.provideMerge(ClaudeProviderLive), - Layer.provideMerge(GlmProviderLive), -); +).pipe(Layer.provideMerge(CodexProviderLive), Layer.provideMerge(ClaudeProviderLive)); diff --git a/apps/server/src/provider/Services/GlmAdapter.ts b/apps/server/src/provider/Services/GlmAdapter.ts deleted file mode 100644 index 96dc26e881..0000000000 --- a/apps/server/src/provider/Services/GlmAdapter.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { ServiceMap } from "effect"; -import type { ProviderAdapterError } from "../Errors.ts"; -import type { ProviderAdapterShape } from "./ProviderAdapter.ts"; - -export interface GlmAdapterShape extends ProviderAdapterShape { - readonly provider: "glm"; -} - -export class GlmAdapter extends ServiceMap.Service()( - "t3/provider/Services/GlmAdapter", -) {} diff --git a/apps/server/src/provider/Services/GlmProvider.ts b/apps/server/src/provider/Services/GlmProvider.ts deleted file mode 100644 index fa665db229..0000000000 --- a/apps/server/src/provider/Services/GlmProvider.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { ServiceMap } from "effect"; -import type { ServerProviderShape } from "./ServerProvider.ts"; - -export interface GlmProviderShape extends ServerProviderShape {} - -export class GlmProvider extends ServiceMap.Service()( - "t3/provider/Services/GlmProvider", -) {} diff --git a/apps/server/src/provider/Services/ProviderAdapterRegistry.ts b/apps/server/src/provider/Services/ProviderAdapterRegistry.ts index 490c4d3d14..68e5db6a40 100644 --- a/apps/server/src/provider/Services/ProviderAdapterRegistry.ts +++ b/apps/server/src/provider/Services/ProviderAdapterRegistry.ts @@ -38,5 +38,3 @@ export class ProviderAdapterRegistry extends ServiceMap.Service< ProviderAdapterRegistry, ProviderAdapterRegistryShape >()("t3/provider/Services/ProviderAdapterRegistry") {} - -// Dummy comment for workflow testing. diff --git a/apps/server/src/provider/codexLaunchConfig.test.ts b/apps/server/src/provider/codexLaunchConfig.test.ts deleted file mode 100644 index 363b11702f..0000000000 --- a/apps/server/src/provider/codexLaunchConfig.test.ts +++ /dev/null @@ -1,170 +0,0 @@ -import { describe, expect, it } from "vitest"; - -import { - buildCodexLaunchConfig, - buildCodexSpawnEnv, - configOverridesToArgs, - type BuildCodexLaunchConfigInput, -} from "./codexLaunchConfig.ts"; - -const defaultCodexSettings = { - enabled: true, - binaryPath: "/usr/local/bin/codex", - homePath: "/home/user/.codex", - customModels: [], -}; - -const defaultGlmSettings = { - enabled: true, - transport: "bridge" as const, - upstreamBaseUrl: "https://api.z.ai/api/coding/paas/v4", - customModels: [], -}; - -describe("buildCodexLaunchConfig", () => { - it("returns base config with no overrides for codex provider", () => { - const input: BuildCodexLaunchConfigInput = { - provider: "codex", - codexSettings: defaultCodexSettings, - }; - - const config = buildCodexLaunchConfig(input); - - expect(config.binaryPath).toBe("/usr/local/bin/codex"); - expect(config.homePath).toBe("/home/user/.codex"); - expect(config.configOverrides).toEqual([]); - expect(config.extraEnv).toEqual({}); - }); - - it("returns base config with no overrides for claudeAgent provider", () => { - const config = buildCodexLaunchConfig({ - provider: "claudeAgent", - codexSettings: defaultCodexSettings, - }); - - expect(config.configOverrides).toEqual([]); - }); - - it("falls back to 'codex' when binaryPath is empty", () => { - const config = buildCodexLaunchConfig({ - provider: "codex", - codexSettings: { ...defaultCodexSettings, binaryPath: "" }, - }); - - expect(config.binaryPath).toBe("codex"); - }); - - it("sets homePath to undefined when empty", () => { - const config = buildCodexLaunchConfig({ - provider: "codex", - codexSettings: { ...defaultCodexSettings, homePath: "" }, - }); - - expect(config.homePath).toBeUndefined(); - }); - - it("generates GLM provider overrides with bridge URL", () => { - const config = buildCodexLaunchConfig({ - provider: "glm", - codexSettings: defaultCodexSettings, - glmSettings: defaultGlmSettings, - glmBridgeBaseUrl: "http://127.0.0.1:9876/v1", - }); - - expect(config.configOverrides).toContain('model_provider="glm"'); - expect(config.configOverrides).toContain('model_providers.glm.name="GLM"'); - expect(config.configOverrides).toContain( - 'model_providers.glm.base_url="http://127.0.0.1:9876/v1"', - ); - expect(config.configOverrides).toContain('model_providers.glm.env_key="GLM_API_KEY"'); - expect(config.configOverrides).toContain('model_providers.glm.wire_api="responses"'); - }); - - it("uses upstream URL directly when transport is 'direct'", () => { - const config = buildCodexLaunchConfig({ - provider: "glm", - codexSettings: defaultCodexSettings, - glmSettings: { ...defaultGlmSettings, transport: "direct" as const }, - glmBridgeBaseUrl: "http://127.0.0.1:9876/v1", - }); - - expect(config.configOverrides).toContain( - 'model_providers.glm.base_url="https://api.z.ai/api/coding/paas/v4"', - ); - }); - - it("falls back to upstream URL when bridge URL is not provided", () => { - const config = buildCodexLaunchConfig({ - provider: "glm", - codexSettings: defaultCodexSettings, - glmSettings: defaultGlmSettings, - }); - - expect(config.configOverrides).toContain( - 'model_providers.glm.base_url="https://api.z.ai/api/coding/paas/v4"', - ); - }); - - it("returns base config when glm provider is selected but glmSettings is missing", () => { - const config = buildCodexLaunchConfig({ - provider: "glm", - codexSettings: defaultCodexSettings, - }); - - expect(config.configOverrides).toEqual([]); - }); -}); - -describe("buildCodexSpawnEnv", () => { - it("merges process.env with CODEX_HOME when homePath is set", () => { - const env = buildCodexSpawnEnv({ - binaryPath: "codex", - homePath: "/custom/home", - configOverrides: [], - extraEnv: {}, - }); - - expect(env.CODEX_HOME).toBe("/custom/home"); - }); - - it("does not set CODEX_HOME when homePath is undefined", () => { - const original = process.env.CODEX_HOME; - delete process.env.CODEX_HOME; - - const env = buildCodexSpawnEnv({ - binaryPath: "codex", - homePath: undefined, - configOverrides: [], - extraEnv: {}, - }); - - expect(env.CODEX_HOME).toBeUndefined(); - - if (original !== undefined) { - process.env.CODEX_HOME = original; - } - }); - - it("includes extraEnv entries", () => { - const env = buildCodexSpawnEnv({ - binaryPath: "codex", - homePath: undefined, - configOverrides: [], - extraEnv: { MY_VAR: "value" }, - }); - - expect(env.MY_VAR).toBe("value"); - }); -}); - -describe("configOverridesToArgs", () => { - it("returns empty array for no overrides", () => { - expect(configOverridesToArgs([])).toEqual([]); - }); - - it("flattens overrides into -c pairs", () => { - const args = configOverridesToArgs(['model_provider="glm"', 'model_providers.glm.name="GLM"']); - - expect(args).toEqual(["-c", 'model_provider="glm"', "-c", 'model_providers.glm.name="GLM"']); - }); -}); diff --git a/apps/server/src/provider/codexLaunchConfig.ts b/apps/server/src/provider/codexLaunchConfig.ts deleted file mode 100644 index 57ca11ed0e..0000000000 --- a/apps/server/src/provider/codexLaunchConfig.ts +++ /dev/null @@ -1,64 +0,0 @@ -import type { ProviderKind } from "@t3tools/contracts"; -import type { CodexSettings, GlmSettings } from "@t3tools/contracts/settings"; - -export interface CodexLaunchConfig { - readonly binaryPath: string; - readonly homePath: string | undefined; - readonly configOverrides: ReadonlyArray; - readonly extraEnv: Readonly>; -} - -export type CodexLaunchPurpose = "chat-session" | "git-text-generation" | "provider-probe"; - -export function buildCodexSpawnEnv(config: CodexLaunchConfig): Record { - return { - ...process.env, - ...(config.homePath ? { CODEX_HOME: config.homePath } : {}), - ...config.extraEnv, - }; -} - -export function configOverridesToArgs(overrides: ReadonlyArray): string[] { - return overrides.flatMap((override) => ["-c", override]); -} - -export interface BuildCodexLaunchConfigInput { - readonly provider: ProviderKind; - readonly codexSettings: CodexSettings; - readonly glmSettings?: GlmSettings; - readonly glmBridgeBaseUrl?: string; -} - -export function buildCodexLaunchConfig(input: BuildCodexLaunchConfigInput): CodexLaunchConfig { - const { provider, codexSettings, glmSettings, glmBridgeBaseUrl } = input; - - const base: CodexLaunchConfig = { - binaryPath: codexSettings.binaryPath || "codex", - homePath: codexSettings.homePath || undefined, - configOverrides: [], - extraEnv: {}, - }; - - if (provider !== "glm" || !glmSettings) { - return base; - } - - const baseUrl = - glmSettings.transport === "bridge" && glmBridgeBaseUrl - ? glmBridgeBaseUrl - : glmSettings.upstreamBaseUrl; - - const configOverrides: string[] = [ - 'model_provider="glm"', - 'model_providers.glm.name="GLM"', - `model_providers.glm.base_url="${baseUrl}"`, - 'model_providers.glm.env_key="GLM_API_KEY"', - 'model_providers.glm.wire_api="responses"', - ]; - - return { - ...base, - configOverrides, - extraEnv: {}, - }; -} diff --git a/apps/server/src/provider/glmBridge/GlmBridgeLive.ts b/apps/server/src/provider/glmBridge/GlmBridgeLive.ts deleted file mode 100644 index 437ab15618..0000000000 --- a/apps/server/src/provider/glmBridge/GlmBridgeLive.ts +++ /dev/null @@ -1,278 +0,0 @@ -import { Data, Effect, Layer, Ref } from "effect"; -import * as http from "node:http"; - -import { ServerSettingsService } from "../../serverSettings.ts"; -import { GlmBridgeService, type GlmBridgeShape } from "./GlmBridgeService.ts"; -import { - translateResponsesToChatCompletions, - UnsupportedResponsesFeatureError, - type ResponsesRequest, -} from "./translateResponsesToGlm.ts"; -import { - GlmToResponsesTranslator, - formatResponsesSSE, - type ChatCompletionsChunk, -} from "./translateGlmToResponses.ts"; - -class GlmBridgeStartError extends Data.TaggedError("GlmBridgeStartError")<{ - readonly cause: unknown; -}> {} - -function readRequestBody(req: http.IncomingMessage): Promise { - return new Promise((resolve, reject) => { - const chunks: Buffer[] = []; - req.on("data", (chunk: Buffer) => chunks.push(chunk)); - req.on("end", () => resolve(Buffer.concat(chunks).toString("utf-8"))); - req.on("error", reject); - }); -} - -function jsonResponse( - res: http.ServerResponse, - status: number, - body: Record, -): void { - res.writeHead(status, { "Content-Type": "application/json" }); - res.end(JSON.stringify(body)); -} - -async function handleResponsesRequest( - req: http.IncomingMessage, - res: http.ServerResponse, - upstreamBaseUrl: string, -): Promise { - let responsesReq: ResponsesRequest; - try { - const bodyText = await readRequestBody(req); - responsesReq = JSON.parse(bodyText) as ResponsesRequest; - } catch { - jsonResponse(res, 400, { error: { message: "Invalid JSON request body" } }); - return; - } - - let chatReq; - try { - chatReq = translateResponsesToChatCompletions(responsesReq); - } catch (err) { - if (err instanceof UnsupportedResponsesFeatureError) { - jsonResponse(res, 400, { error: { message: err.message } }); - return; - } - jsonResponse(res, 500, { - error: { message: "Bridge translation error", detail: String(err) }, - }); - return; - } - - const apiKey = process.env.GLM_API_KEY; - if (!apiKey) { - jsonResponse(res, 401, { - error: { message: "GLM_API_KEY environment variable is not set" }, - }); - return; - } - - const upstreamUrl = `${upstreamBaseUrl.replace(/\/+$/, "")}/chat/completions`; - const abortController = new AbortController(); - - req.on("close", () => abortController.abort()); - - let upstreamRes: Response; - try { - upstreamRes = await fetch(upstreamUrl, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${apiKey}`, - Accept: "text/event-stream", - }, - body: JSON.stringify(chatReq), - signal: abortController.signal, - }); - } catch (err) { - if (abortController.signal.aborted) return; - jsonResponse(res, 502, { - error: { - message: "Failed to connect to upstream GLM API", - detail: String(err), - upstream_url: upstreamUrl, - }, - }); - return; - } - - if (!upstreamRes.ok) { - let errorBody = ""; - try { - errorBody = await upstreamRes.text(); - } catch {} - jsonResponse(res, upstreamRes.status, { - error: { - message: `Upstream GLM API returned ${upstreamRes.status}`, - detail: errorBody, - upstream_url: upstreamUrl, - }, - }); - return; - } - - res.writeHead(200, { - "Content-Type": "text/event-stream", - "Cache-Control": "no-cache", - Connection: "keep-alive", - }); - - const responseId = `resp_glm_${Date.now()}`; - const translator = new GlmToResponsesTranslator(responseId); - - res.write( - formatResponsesSSE({ - event: "response.created", - data: { response: { id: responseId, status: "in_progress" } }, - }), - ); - - const reader = upstreamRes.body?.getReader(); - if (!reader) { - res.write( - formatResponsesSSE({ - event: "response.completed", - data: { response: { id: responseId, status: "failed" } }, - }), - ); - res.end(); - return; - } - - const decoder = new TextDecoder(); - let buffer = ""; - - try { - while (true) { - const { done, value } = await reader.read(); - if (done) break; - - buffer += decoder.decode(value, { stream: true }); - const lines = buffer.split("\n"); - buffer = lines.pop() ?? ""; - - for (const line of lines) { - const trimmed = line.trim(); - if (!trimmed || !trimmed.startsWith("data:")) continue; - - const dataStr = trimmed.slice(5).trim(); - if (dataStr === "[DONE]") continue; - - let chunk: ChatCompletionsChunk; - try { - chunk = JSON.parse(dataStr) as ChatCompletionsChunk; - } catch { - continue; - } - - const events = translator.translateChunk(chunk); - for (const event of events) { - res.write(formatResponsesSSE(event)); - } - } - } - } catch (err) { - if (!abortController.signal.aborted) { - res.write( - formatResponsesSSE({ - event: "response.completed", - data: { - response: { - id: responseId, - status: "failed", - error: { message: String(err) }, - }, - }, - }), - ); - } - } finally { - res.end(); - } -} - -async function handleModelsRequest( - res: http.ServerResponse, - upstreamBaseUrl: string, -): Promise { - const apiKey = process.env.GLM_API_KEY; - if (!apiKey) { - jsonResponse(res, 401, { - error: { message: "GLM_API_KEY environment variable is not set" }, - }); - return; - } - - const upstreamUrl = `${upstreamBaseUrl.replace(/\/+$/, "")}/models`; - try { - const upstreamRes = await fetch(upstreamUrl, { - headers: { Authorization: `Bearer ${apiKey}` }, - }); - const body = await upstreamRes.text(); - res.writeHead(upstreamRes.status, { - "Content-Type": "application/json", - }); - res.end(body); - } catch (err) { - jsonResponse(res, 502, { - error: { message: "Failed to proxy models request", detail: String(err) }, - }); - } -} - -export const GlmBridgeLive = Layer.effect( - GlmBridgeService, - Effect.gen(function* () { - const settingsService = yield* ServerSettingsService; - const settings = yield* settingsService.getSettings; - let currentUpstreamBaseUrl = settings.providers.glm.upstreamBaseUrl; - const baseUrlRef = yield* Ref.make(""); - - const server = http.createServer(async (req, res) => { - const url = new URL(req.url ?? "/", `http://${req.headers.host}`); - const pathname = url.pathname; - - if (req.method === "GET" && pathname === "/health") { - jsonResponse(res, 200, { status: "ok" }); - return; - } - - if (req.method === "POST" && pathname === "/v1/responses") { - await handleResponsesRequest(req, res, currentUpstreamBaseUrl); - return; - } - - if (req.method === "GET" && pathname === "/v1/models") { - await handleModelsRequest(res, currentUpstreamBaseUrl); - return; - } - - jsonResponse(res, 404, { error: { message: `Not found: ${pathname}` } }); - }); - - yield* Effect.tryPromise({ - try: () => - new Promise((resolve, reject) => { - server.listen(0, "127.0.0.1", () => resolve()); - server.on("error", (err) => reject(err)); - }), - catch: (err) => new GlmBridgeStartError({ cause: err }), - }); - - const address = server.address(); - const port = address && typeof address === "object" ? address.port : 0; - const bridgeBaseUrl = `http://127.0.0.1:${port}/v1`; - yield* Ref.set(baseUrlRef, bridgeBaseUrl); - - yield* Effect.log(`GLM bridge started on ${bridgeBaseUrl}`); - - return { - baseUrl: Ref.get(baseUrlRef), - } satisfies GlmBridgeShape; - }), -); diff --git a/apps/server/src/provider/glmBridge/GlmBridgeService.ts b/apps/server/src/provider/glmBridge/GlmBridgeService.ts deleted file mode 100644 index ba981d367c..0000000000 --- a/apps/server/src/provider/glmBridge/GlmBridgeService.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { ServiceMap } from "effect"; -import type { Effect } from "effect"; - -export interface GlmBridgeShape { - readonly baseUrl: Effect.Effect; -} - -export class GlmBridgeService extends ServiceMap.Service()( - "t3/provider/glmBridge/GlmBridgeService", -) {} diff --git a/apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts b/apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts deleted file mode 100644 index 1f2ecfd8bb..0000000000 --- a/apps/server/src/provider/glmBridge/translateGlmToResponses.test.ts +++ /dev/null @@ -1,329 +0,0 @@ -import { describe, expect, it } from "vitest"; - -import { - GlmToResponsesTranslator, - formatResponsesSSE, - type ChatCompletionsChunk, -} from "./translateGlmToResponses.ts"; - -function makeChunk(overrides: Partial = {}): ChatCompletionsChunk { - return { - id: "chatcmpl-1", - object: "chat.completion.chunk", - model: "glm-5.1", - choices: [{ index: 0, delta: {}, finish_reason: null }], - ...overrides, - }; -} - -describe("GlmToResponsesTranslator", () => { - it("emits output_item.added on the first text delta", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - const chunk = makeChunk({ - choices: [{ index: 0, delta: { content: "Hello" }, finish_reason: null }], - }); - - const events = translator.translateChunk(chunk); - - expect(events).toHaveLength(2); - expect(events[0]!.event).toBe("response.output_item.added"); - expect(events[1]!.event).toBe("response.output_text.delta"); - expect(events[1]!.data.delta).toBe("Hello"); - }); - - it("does not emit output_item.added on subsequent text deltas", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: { content: "Hello" }, finish_reason: null }], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: { content: " world" }, finish_reason: null }], - }), - ); - - expect(events).toHaveLength(1); - expect(events[0]!.event).toBe("response.output_text.delta"); - expect(events[0]!.data.delta).toBe(" world"); - }); - - it("emits completion events on finish_reason", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: { content: "Hi" }, finish_reason: null }], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: {}, finish_reason: "stop" }], - }), - ); - - const eventTypes = events.map((e) => e.event); - expect(eventTypes).toEqual([ - "response.output_text.done", - "response.output_item.done", - "response.completed", - ]); - }); - - it("includes usage in response.completed when available", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: { content: "Hi" }, finish_reason: null }], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: {}, finish_reason: "stop" }], - usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, - }), - ); - - const completed = events.find((e) => e.event === "response.completed"); - expect(completed).toBeDefined(); - const response = completed!.data.response as Record; - expect(response.status).toBe("completed"); - const usage = response.usage as Record; - expect(usage.input_tokens).toBe(10); - expect(usage.output_tokens).toBe(5); - expect(usage.total_tokens).toBe(15); - }); - - it("accumulates tool call deltas and flushes on finish", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [ - { - index: 0, - delta: { - tool_calls: [ - { - index: 0, - id: "call_abc", - type: "function", - function: { name: "read_file", arguments: "" }, - }, - ], - }, - finish_reason: null, - }, - ], - }), - ); - - translator.translateChunk( - makeChunk({ - choices: [ - { - index: 0, - delta: { - tool_calls: [{ index: 0, function: { arguments: '{"path":' } }], - }, - finish_reason: null, - }, - ], - }), - ); - - translator.translateChunk( - makeChunk({ - choices: [ - { - index: 0, - delta: { - tool_calls: [{ index: 0, function: { arguments: '"main.ts"}' } }], - }, - finish_reason: null, - }, - ], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], - }), - ); - - const addedEvent = events.find((e) => e.event === "response.output_item.added"); - expect(addedEvent).toBeDefined(); - const item = addedEvent!.data.item as Record; - expect(item.type).toBe("function_call"); - expect(item.name).toBe("read_file"); - expect(item.arguments).toBe('{"path":"main.ts"}'); - expect(item.id).toBe("call_abc"); - - const doneEvent = events.find((e) => e.event === "response.output_item.done"); - expect(doneEvent).toBeDefined(); - - const completedEvent = events.find((e) => e.event === "response.completed"); - expect(completedEvent).toBeDefined(); - }); - - it("handles text followed by tool calls", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: { content: "I'll read the file." }, finish_reason: null }], - }), - ); - - translator.translateChunk( - makeChunk({ - choices: [ - { - index: 0, - delta: { - tool_calls: [ - { - index: 0, - id: "call_1", - type: "function", - function: { name: "read_file", arguments: '{"path":"a.ts"}' }, - }, - ], - }, - finish_reason: null, - }, - ], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], - }), - ); - - const eventTypes = events.map((e) => e.event); - expect(eventTypes).toContain("response.output_text.done"); - expect(eventTypes).toContain("response.output_item.done"); - expect(eventTypes).toContain("response.output_item.added"); - expect(eventTypes).toContain("response.completed"); - }); - - it("handles multiple parallel tool calls", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [ - { - index: 0, - delta: { - tool_calls: [ - { - index: 0, - id: "call_1", - type: "function", - function: { name: "read_file", arguments: '{"path":"a.ts"}' }, - }, - { - index: 1, - id: "call_2", - type: "function", - function: { name: "read_file", arguments: '{"path":"b.ts"}' }, - }, - ], - }, - finish_reason: null, - }, - ], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], - }), - ); - - const addedEvents = events.filter((e) => e.event === "response.output_item.added"); - expect(addedEvents).toHaveLength(2); - - const names = addedEvents.map((e) => (e.data.item as Record).name); - expect(names).toContain("read_file"); - }); - - it("returns empty array for chunks with no content or tool calls", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: { role: "assistant" }, finish_reason: null }], - }), - ); - - expect(events).toHaveLength(0); - }); - - it("returns empty array when choices is empty", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - const events = translator.translateChunk(makeChunk({ choices: [] })); - - expect(events).toHaveLength(0); - }); - - it("emits response.completed without text events when no text was streamed", () => { - const translator = new GlmToResponsesTranslator("resp_1"); - - translator.translateChunk( - makeChunk({ - choices: [ - { - index: 0, - delta: { - tool_calls: [ - { - index: 0, - id: "call_1", - type: "function", - function: { name: "exec", arguments: '{"cmd":"ls"}' }, - }, - ], - }, - finish_reason: null, - }, - ], - }), - ); - - const events = translator.translateChunk( - makeChunk({ - choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], - }), - ); - - const eventTypes = events.map((e) => e.event); - expect(eventTypes).not.toContain("response.output_text.done"); - expect(eventTypes).toContain("response.output_item.added"); - expect(eventTypes).toContain("response.output_item.done"); - expect(eventTypes).toContain("response.completed"); - }); -}); - -describe("formatResponsesSSE", () => { - it("formats an event as SSE wire format", () => { - const result = formatResponsesSSE({ - event: "response.output_text.delta", - data: { delta: "Hello" }, - }); - - expect(result).toBe('event: response.output_text.delta\ndata: {"delta":"Hello"}\n\n'); - }); -}); diff --git a/apps/server/src/provider/glmBridge/translateGlmToResponses.ts b/apps/server/src/provider/glmBridge/translateGlmToResponses.ts deleted file mode 100644 index e19bfbeaf7..0000000000 --- a/apps/server/src/provider/glmBridge/translateGlmToResponses.ts +++ /dev/null @@ -1,168 +0,0 @@ -export interface ChatCompletionsChunk { - id: string; - object: "chat.completion.chunk"; - model: string; - choices: ChatCompletionsChunkChoice[]; - usage?: ChatCompletionsUsage | null; -} - -interface ChatCompletionsChunkChoice { - index: number; - delta: ChatCompletionsDelta; - finish_reason: string | null; -} - -interface ChatCompletionsDelta { - role?: "assistant"; - content?: string | null; - tool_calls?: ChatCompletionsToolCallDelta[]; -} - -interface ChatCompletionsToolCallDelta { - index: number; - id?: string; - type?: "function"; - function?: { - name?: string; - arguments?: string; - }; -} - -export interface ChatCompletionsUsage { - prompt_tokens: number; - completion_tokens: number; - total_tokens: number; -} - -export interface ResponsesSSEEvent { - event: string; - data: Record; -} - -export class GlmToResponsesTranslator { - private readonly responseId: string; - private outputIndex = 0; - private pendingToolCalls = new Map(); - private emittedItemStartForText = false; - - constructor(responseId: string) { - this.responseId = responseId; - } - - translateChunk(chunk: ChatCompletionsChunk): ResponsesSSEEvent[] { - const events: ResponsesSSEEvent[] = []; - const choice = chunk.choices[0]; - if (!choice) return events; - - const { delta, finish_reason } = choice; - - if (delta.content) { - if (!this.emittedItemStartForText) { - events.push({ - event: "response.output_item.added", - data: { - output_index: this.outputIndex, - item: { - type: "message", - role: "assistant", - content: [{ type: "output_text", text: "" }], - }, - }, - }); - this.emittedItemStartForText = true; - } - - events.push({ - event: "response.output_text.delta", - data: { - output_index: this.outputIndex, - content_index: 0, - delta: delta.content, - }, - }); - } - - if (delta.tool_calls) { - for (const tc of delta.tool_calls) { - let pending = this.pendingToolCalls.get(tc.index); - if (!pending) { - pending = { id: tc.id ?? "", name: "", arguments: "" }; - this.pendingToolCalls.set(tc.index, pending); - } - if (tc.id) pending.id = tc.id; - if (tc.function?.name) pending.name += tc.function.name; - if (tc.function?.arguments) pending.arguments += tc.function.arguments; - } - } - - if (finish_reason) { - if (this.emittedItemStartForText) { - events.push({ - event: "response.output_text.done", - data: { output_index: this.outputIndex, content_index: 0 }, - }); - events.push({ - event: "response.output_item.done", - data: { output_index: this.outputIndex }, - }); - this.outputIndex++; - } - - for (const [, toolCall] of this.pendingToolCalls) { - events.push({ - event: "response.output_item.added", - data: { - output_index: this.outputIndex, - item: { - type: "function_call", - id: toolCall.id, - name: toolCall.name, - arguments: toolCall.arguments, - }, - }, - }); - events.push({ - event: "response.output_item.done", - data: { - output_index: this.outputIndex, - item: { - type: "function_call", - id: toolCall.id, - name: toolCall.name, - arguments: toolCall.arguments, - }, - }, - }); - this.outputIndex++; - } - this.pendingToolCalls.clear(); - - const usage = chunk.usage; - - events.push({ - event: "response.completed", - data: { - response: { - id: this.responseId, - status: "completed", - ...(usage - ? { - usage: { - input_tokens: usage.prompt_tokens, - output_tokens: usage.completion_tokens, - total_tokens: usage.total_tokens, - }, - } - : {}), - }, - }, - }); - } - - return events; - } -} - -export function formatResponsesSSE(event: ResponsesSSEEvent): string { - return `event: ${event.event}\ndata: ${JSON.stringify(event.data)}\n\n`; -} diff --git a/apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts b/apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts deleted file mode 100644 index a137bf55ac..0000000000 --- a/apps/server/src/provider/glmBridge/translateResponsesToGlm.test.ts +++ /dev/null @@ -1,309 +0,0 @@ -import { describe, expect, it } from "vitest"; - -import { - translateResponsesToChatCompletions, - UnsupportedResponsesFeatureError, - type ResponsesRequest, -} from "./translateResponsesToGlm.ts"; - -describe("translateResponsesToChatCompletions", () => { - it("translates a simple text-only request", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "Hello" }], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - - expect(result.model).toBe("glm-5.1"); - expect(result.stream).toBe(true); - expect(result.messages).toEqual([{ role: "user", content: "Hello" }]); - }); - - it("prepends system instructions as a system message", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "Write code" }], - instructions: "You are a coding assistant.", - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - - expect(result.messages[0]).toEqual({ - role: "system", - content: "You are a coding assistant.", - }); - expect(result.messages[1]).toEqual({ role: "user", content: "Write code" }); - }); - - it("translates a multi-turn conversation", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [ - { role: "user", content: "What is 2+2?" }, - { role: "assistant", content: "4" }, - { role: "user", content: "And 3+3?" }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - - expect(result.messages).toEqual([ - { role: "user", content: "What is 2+2?" }, - { role: "assistant", content: "4" }, - { role: "user", content: "And 3+3?" }, - ]); - }); - - it("translates function tools", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "List files" }], - tools: [ - { - type: "function", - name: "list_files", - description: "List files in a directory", - parameters: { type: "object", properties: { path: { type: "string" } } }, - }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - - expect(result.tools).toEqual([ - { - type: "function", - function: { - name: "list_files", - description: "List files in a directory", - parameters: { type: "object", properties: { path: { type: "string" } } }, - }, - }, - ]); - }); - - it("translates tool_choice as a string", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - tool_choice: "auto", - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.tool_choice).toBe("auto"); - }); - - it("translates tool_choice as a named function", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - tool_choice: { type: "function", name: "read_file" }, - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.tool_choice).toEqual({ - type: "function", - function: { name: "read_file" }, - }); - }); - - it("translates function_call_output items to tool role messages", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [ - { role: "user", content: "List files" }, - { - role: "assistant", - content: [ - { - type: "function_call", - id: "call_123", - name: "list_files", - arguments: '{"path":"."}', - }, - ], - }, - { type: "function_call_output", call_id: "call_123", output: "file1.ts\nfile2.ts" }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - - expect(result.messages[1]).toEqual({ - role: "assistant", - content: null, - tool_calls: [ - { - id: "call_123", - type: "function", - function: { name: "list_files", arguments: '{"path":"."}' }, - }, - ], - }); - expect(result.messages[2]).toEqual({ - role: "tool", - tool_call_id: "call_123", - content: "file1.ts\nfile2.ts", - }); - }); - - it("extracts text from input_text content parts", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [ - { - role: "user", - content: [ - { type: "input_text", text: "part one " }, - { type: "input_text", text: "part two" }, - ], - }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.messages[0]).toEqual({ role: "user", content: "part one part two" }); - }); - - it("combines assistant text and tool calls from content parts", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [ - { - role: "assistant", - content: [ - { type: "output_text", text: "I'll read the file." }, - { - type: "function_call", - id: "call_abc", - name: "read_file", - arguments: '{"path":"main.ts"}', - }, - ], - }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - - expect(result.messages[0]).toEqual({ - role: "assistant", - content: "I'll read the file.", - tool_calls: [ - { - id: "call_abc", - type: "function", - function: { name: "read_file", arguments: '{"path":"main.ts"}' }, - }, - ], - }); - }); - - it("forwards temperature and max_output_tokens", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - temperature: 0.7, - max_output_tokens: 4096, - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.temperature).toBe(0.7); - expect(result.max_tokens).toBe(4096); - }); - - it("omits optional fields when not provided", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.tools).toBeUndefined(); - expect(result.tool_choice).toBeUndefined(); - expect(result.temperature).toBeUndefined(); - expect(result.max_tokens).toBeUndefined(); - }); - - it("defaults stream to true when not specified", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.stream).toBe(true); - }); - - it("includes stream_options with include_usage", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.stream_options).toEqual({ include_usage: true }); - }); - - it("preserves tool strict mode when present", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - tools: [ - { - type: "function", - name: "exec", - strict: true, - parameters: { type: "object" }, - }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.tools![0]!.function.strict).toBe(true); - }); - - it("handles inline system messages in the input array", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [ - { role: "system", content: "Be brief." }, - { role: "user", content: "Hello" }, - ], - stream: true, - }; - - const result = translateResponsesToChatCompletions(req); - expect(result.messages).toEqual([ - { role: "system", content: "Be brief." }, - { role: "user", content: "Hello" }, - ]); - }); - - it("throws UnsupportedResponsesFeatureError for non-function tool types", () => { - const req: ResponsesRequest = { - model: "glm-5.1", - input: [{ role: "user", content: "test" }], - tools: [{ type: "web_search" as any, name: "search" }], - stream: true, - }; - - expect(() => translateResponsesToChatCompletions(req)).toThrow( - UnsupportedResponsesFeatureError, - ); - }); -}); diff --git a/apps/server/src/provider/glmBridge/translateResponsesToGlm.ts b/apps/server/src/provider/glmBridge/translateResponsesToGlm.ts deleted file mode 100644 index b5b66ac51c..0000000000 --- a/apps/server/src/provider/glmBridge/translateResponsesToGlm.ts +++ /dev/null @@ -1,192 +0,0 @@ -export interface ResponsesRequest { - model: string; - input: ResponsesInput[]; - instructions?: string; - tools?: ResponsesTool[]; - tool_choice?: string | { type: string; name?: string }; - parallel_tool_calls?: boolean; - stream?: boolean; - temperature?: number; - max_output_tokens?: number; - reasoning?: { effort?: string }; -} - -export type ResponsesInput = - | { role: "user"; content: string | ResponsesContentPart[] } - | { role: "assistant"; content: string | ResponsesContentPart[] } - | { role: "system"; content: string } - | ResponsesFunctionCallOutput; - -interface ResponsesFunctionCallOutput { - type: "function_call_output"; - call_id: string; - output: string; -} - -type ResponsesContentPart = - | { type: "input_text"; text: string } - | { type: "output_text"; text: string } - | { type: "function_call"; id: string; name: string; arguments: string } - | { type: "text"; text: string }; - -interface ResponsesTool { - type: "function"; - name: string; - description?: string; - parameters?: Record; - strict?: boolean; -} - -export interface ChatCompletionsRequest { - model: string; - messages: ChatMessage[]; - tools?: ChatTool[]; - tool_choice?: string | { type: string; function?: { name: string } }; - stream: boolean; - temperature?: number; - max_tokens?: number; - stream_options?: { include_usage: boolean }; -} - -export interface ChatMessage { - role: "system" | "user" | "assistant" | "tool"; - content?: string | null; - tool_calls?: ChatToolCall[]; - tool_call_id?: string; -} - -interface ChatTool { - type: "function"; - function: { - name: string; - description?: string | undefined; - parameters?: Record | undefined; - strict?: boolean | undefined; - }; -} - -interface ChatToolCall { - id: string; - type: "function"; - function: { name: string; arguments: string }; -} - -export class UnsupportedResponsesFeatureError extends Error { - constructor(feature: string) { - super(`Unsupported Responses API feature for GLM bridge: ${feature}`); - this.name = "UnsupportedResponsesFeatureError"; - } -} - -export function translateResponsesToChatCompletions(req: ResponsesRequest): ChatCompletionsRequest { - const messages: ChatMessage[] = []; - - if (req.instructions) { - messages.push({ role: "system", content: req.instructions }); - } - - for (const item of req.input) { - if ("type" in item && item.type === "function_call_output") { - messages.push({ - role: "tool", - tool_call_id: item.call_id, - content: item.output, - }); - continue; - } - - const msg = item as Exclude; - - if (msg.role === "system") { - messages.push({ role: "system", content: msg.content as string }); - continue; - } - - if (msg.role === "user") { - const content = extractTextContent(msg.content); - messages.push({ role: "user", content }); - continue; - } - - if (msg.role === "assistant") { - const parts = Array.isArray(msg.content) ? msg.content : []; - const textParts: string[] = []; - const toolCalls: ChatToolCall[] = []; - - if (typeof msg.content === "string") { - textParts.push(msg.content); - } else { - for (const part of parts) { - if (part.type === "output_text" || part.type === "text") { - textParts.push(part.text); - } else if (part.type === "function_call") { - toolCalls.push({ - id: part.id, - type: "function", - function: { name: part.name, arguments: part.arguments }, - }); - } - } - } - - const assistantMsg: ChatMessage = { - role: "assistant", - content: textParts.length > 0 ? textParts.join("") : null, - }; - if (toolCalls.length > 0) { - assistantMsg.tool_calls = toolCalls; - } - messages.push(assistantMsg); - continue; - } - } - - const tools: ChatTool[] | undefined = req.tools?.map((tool): ChatTool => { - if (tool.type !== "function") { - throw new UnsupportedResponsesFeatureError(`tool type "${tool.type}"`); - } - return { - type: "function", - function: { - name: tool.name, - description: tool.description, - parameters: tool.parameters, - ...(tool.strict !== undefined ? { strict: tool.strict } : {}), - }, - }; - }); - - let toolChoice: ChatCompletionsRequest["tool_choice"]; - if (req.tool_choice !== undefined) { - if (typeof req.tool_choice === "string") { - toolChoice = req.tool_choice; - } else if (req.tool_choice.type === "function" && req.tool_choice.name) { - toolChoice = { - type: "function", - function: { name: req.tool_choice.name }, - }; - } - } - - return { - model: req.model, - messages, - ...(tools && tools.length > 0 ? { tools } : {}), - ...(toolChoice !== undefined ? { tool_choice: toolChoice } : {}), - stream: req.stream !== false, - ...(req.temperature !== undefined ? { temperature: req.temperature } : {}), - ...(req.max_output_tokens !== undefined ? { max_tokens: req.max_output_tokens } : {}), - stream_options: { include_usage: true }, - }; -} - -function extractTextContent(content: string | ResponsesContentPart[]): string { - if (typeof content === "string") return content; - return content - .filter( - (p): p is { type: "input_text" | "text"; text: string } => - p.type === "input_text" || p.type === "text", - ) - .map((p) => p.text) - .join(""); -} diff --git a/apps/server/src/provider/providerSnapshot.ts b/apps/server/src/provider/providerSnapshot.ts index 4c80d78e20..766e5fae97 100644 --- a/apps/server/src/provider/providerSnapshot.ts +++ b/apps/server/src/provider/providerSnapshot.ts @@ -131,6 +131,7 @@ export function buildServerProvider(input: { enabled: boolean; checkedAt: string; models: ReadonlyArray; + displayName?: string; probe: ProviderProbeResult; }): ServerProvider { return { @@ -138,6 +139,7 @@ export function buildServerProvider(input: { enabled: input.enabled, installed: input.probe.installed, version: input.probe.version, + ...(input.displayName ? { displayName: input.displayName } : {}), status: input.enabled ? input.probe.status : "disabled", auth: input.probe.auth, checkedAt: input.checkedAt, diff --git a/apps/server/src/server.ts b/apps/server/src/server.ts index 4d37b26273..1d6f6ac66e 100644 --- a/apps/server/src/server.ts +++ b/apps/server/src/server.ts @@ -19,7 +19,6 @@ import { ProviderSessionDirectoryLive } from "./provider/Layers/ProviderSessionD import { ProviderSessionRuntimeRepositoryLive } from "./persistence/Layers/ProviderSessionRuntime"; import { makeCodexAdapterLive } from "./provider/Layers/CodexAdapter"; import { makeClaudeAdapterLive } from "./provider/Layers/ClaudeAdapter"; -import { makeGlmAdapterLive } from "./provider/Layers/GlmAdapter"; import { ProviderAdapterRegistryLive } from "./provider/Layers/ProviderAdapterRegistry"; import { makeProviderServiceLive } from "./provider/Layers/ProviderService"; import { OrchestrationEngineLive } from "./orchestration/Layers/OrchestrationEngine"; @@ -150,13 +149,9 @@ const ProviderLayerLive = Layer.unwrap( const claudeAdapterLayer = makeClaudeAdapterLive( nativeEventLogger ? { nativeEventLogger } : undefined, ); - const glmAdapterLayer = makeGlmAdapterLive( - nativeEventLogger ? { nativeEventLogger } : undefined, - ).pipe(Layer.provide(codexAdapterLayer)); const adapterRegistryLayer = ProviderAdapterRegistryLive.pipe( Layer.provide(codexAdapterLayer), Layer.provide(claudeAdapterLayer), - Layer.provide(glmAdapterLayer), Layer.provideMerge(providerSessionDirectoryLayer), ); return makeProviderServiceLive( diff --git a/apps/web/src/components/ChatView.tsx b/apps/web/src/components/ChatView.tsx index 6382151896..0c6de8e34b 100644 --- a/apps/web/src/components/ChatView.tsx +++ b/apps/web/src/components/ChatView.tsx @@ -1408,7 +1408,6 @@ export default function ChatView({ threadId }: ChatViewProps) { codex: providerStatuses.find((provider) => provider.provider === "codex")?.models ?? [], claudeAgent: providerStatuses.find((provider) => provider.provider === "claudeAgent")?.models ?? [], - glm: providerStatuses.find((provider) => provider.provider === "glm")?.models ?? [], }), [providerStatuses], ); @@ -1423,17 +1422,22 @@ export default function ChatView({ threadId }: ChatViewProps) { AVAILABLE_PROVIDER_OPTIONS.filter( (option) => lockedProvider === null || option.value === lockedProvider, ).flatMap((option) => - modelOptionsByProvider[option.value].map(({ slug, name }) => ({ - provider: option.value, - providerLabel: option.label, - slug, - name, - searchSlug: slug.toLowerCase(), - searchName: name.toLowerCase(), - searchProvider: option.label.toLowerCase(), - })), + modelOptionsByProvider[option.value].map(({ slug, name }) => { + const providerLabel = + providerStatuses.find((provider) => provider.provider === option.value)?.displayName ?? + option.label; + return { + provider: option.value, + providerLabel, + slug, + name, + searchSlug: slug.toLowerCase(), + searchName: name.toLowerCase(), + searchProvider: providerLabel.toLowerCase(), + }; + }), ), - [lockedProvider, modelOptionsByProvider], + [lockedProvider, modelOptionsByProvider, providerStatuses], ); const workspaceEntriesQuery = useQuery( projectSearchEntriesQueryOptions({ diff --git a/apps/web/src/components/KeybindingsToast.browser.tsx b/apps/web/src/components/KeybindingsToast.browser.tsx index 951ff919b1..fbbf9782b6 100644 --- a/apps/web/src/components/KeybindingsToast.browser.tsx +++ b/apps/web/src/components/KeybindingsToast.browser.tsx @@ -80,12 +80,6 @@ function createBaseServerConfig(): ServerConfig { providers: { codex: { enabled: true, binaryPath: "", homePath: "", customModels: [] }, claudeAgent: { enabled: true, binaryPath: "", customModels: [] }, - glm: { - enabled: false, - transport: "bridge" as const, - upstreamBaseUrl: "https://api.z.ai/api/coding/paas/v4", - customModels: [], - }, }, }, }; diff --git a/apps/web/src/components/chat/ProviderModelPicker.tsx b/apps/web/src/components/chat/ProviderModelPicker.tsx index 03c192f7c1..a3611b8350 100644 --- a/apps/web/src/components/chat/ProviderModelPicker.tsx +++ b/apps/web/src/components/chat/ProviderModelPicker.tsx @@ -18,7 +18,7 @@ import { MenuSubTrigger, MenuTrigger, } from "../ui/menu"; -import { ClaudeAI, CursorIcon, Gemini, GlmIcon, Icon, OpenAI, OpenCodeIcon } from "../Icons"; +import { ClaudeAI, CursorIcon, Gemini, Icon, OpenAI, OpenCodeIcon } from "../Icons"; import { cn } from "~/lib/utils"; import { getProviderSnapshot } from "../../providerModels"; @@ -33,7 +33,6 @@ function isAvailableProviderOption(option: (typeof PROVIDER_OPTIONS)[number]): o const PROVIDER_ICON_BY_PROVIDER: Record = { codex: OpenAI, claudeAgent: ClaudeAI, - glm: GlmIcon, cursor: CursorIcon, }; @@ -152,6 +151,7 @@ export const ProviderModelPicker = memo(function ProviderModelPicker(props: { const liveProvider = props.providers ? getProviderSnapshot(props.providers, option.value) : undefined; + const optionLabel = liveProvider?.displayName ?? option.label; if (liveProvider && liveProvider.status !== "ready") { const unavailableLabel = !liveProvider.enabled ? "Disabled" @@ -167,7 +167,7 @@ export const ProviderModelPicker = memo(function ProviderModelPicker(props: { providerIconClassName(option.value, "text-muted-foreground/85"), )} /> - {option.label} + {optionLabel} {unavailableLabel} @@ -184,7 +184,7 @@ export const ProviderModelPicker = memo(function ProviderModelPicker(props: { providerIconClassName(option.value, "text-muted-foreground/85"), )} /> - {option.label} + {optionLabel} diff --git a/apps/web/src/components/chat/ProviderStatusBanner.tsx b/apps/web/src/components/chat/ProviderStatusBanner.tsx index e709e75da3..8581781b5d 100644 --- a/apps/web/src/components/chat/ProviderStatusBanner.tsx +++ b/apps/web/src/components/chat/ProviderStatusBanner.tsx @@ -12,7 +12,8 @@ export const ProviderStatusBanner = memo(function ProviderStatusBanner({ return null; } - const providerLabel = PROVIDER_DISPLAY_NAMES[status.provider] ?? status.provider; + const providerLabel = + status.displayName ?? PROVIDER_DISPLAY_NAMES[status.provider] ?? status.provider; const defaultMessage = status.status === "error" ? `${providerLabel} provider is unavailable.` diff --git a/apps/web/src/components/chat/composerProviderRegistry.tsx b/apps/web/src/components/chat/composerProviderRegistry.tsx index 38ef53d33c..0dde9dbb38 100644 --- a/apps/web/src/components/chat/composerProviderRegistry.tsx +++ b/apps/web/src/components/chat/composerProviderRegistry.tsx @@ -157,38 +157,6 @@ const composerProviderRegistry: Record = { /> ), }, - glm: { - getState: (input) => getProviderStateFromCapabilities(input), - renderTraitsMenuContent: ({ - threadId, - model, - models, - modelOptions, - prompt, - onPromptChange, - }) => ( - - ), - renderTraitsPicker: ({ threadId, model, models, modelOptions, prompt, onPromptChange }) => ( - - ), - }, }; export function getComposerProviderState(input: ComposerProviderStateInput): ComposerProviderState { diff --git a/apps/web/src/components/settings/SettingsPanels.tsx b/apps/web/src/components/settings/SettingsPanels.tsx index c9d0fcc5c8..9376ad0db4 100644 --- a/apps/web/src/components/settings/SettingsPanels.tsx +++ b/apps/web/src/components/settings/SettingsPanels.tsx @@ -113,11 +113,6 @@ const PROVIDER_SETTINGS: readonly InstallProviderSettings[] = [ binaryPlaceholder: "Claude binary path", binaryDescription: "Path to the Claude binary", }, - { - provider: "glm", - title: "GLM (Z.ai)", - envVarHint: "GLM_API_KEY", - }, ] as const; const PROVIDER_STATUS_STYLES = { @@ -543,14 +538,12 @@ export function GeneralSettingsPanel() { DEFAULT_UNIFIED_SETTINGS.providers.claudeAgent.binaryPath || settings.providers.claudeAgent.customModels.length > 0, ), - glm: Boolean(settings.providers.glm.customModels.length > 0), }); const [customModelInputByProvider, setCustomModelInputByProvider] = useState< Record >({ codex: "", claudeAgent: "", - glm: "", }); const [customModelErrorByProvider, setCustomModelErrorByProvider] = useState< Partial> @@ -1112,7 +1105,9 @@ export function GeneralSettingsPanel() { const customModelInput = customModelInputByProvider[providerCard.provider]; const customModelError = customModelErrorByProvider[providerCard.provider] ?? null; const providerDisplayName = - PROVIDER_DISPLAY_NAMES[providerCard.provider] ?? providerCard.title; + providerCard.liveProvider?.displayName ?? + PROVIDER_DISPLAY_NAMES[providerCard.provider] ?? + providerCard.title; return (
diff --git a/apps/web/src/modelSelection.ts b/apps/web/src/modelSelection.ts index dbd90a4923..55e9deba66 100644 --- a/apps/web/src/modelSelection.ts +++ b/apps/web/src/modelSelection.ts @@ -34,8 +34,9 @@ const PROVIDER_CUSTOM_MODEL_CONFIG: Record { ...DEFAULT_SERVER_SETTINGS.providers.claudeAgent, enabled: false, }, - glm: DEFAULT_SERVER_SETTINGS.providers.glm, }, }; const requestPromise = runRpc((client) => client(WS_METHODS.serverGetSettings, {})); @@ -191,7 +190,6 @@ describe("WsRpcAtomClient", () => { ...DEFAULT_SERVER_SETTINGS.providers.claudeAgent, enabled: false, }, - glm: DEFAULT_SERVER_SETTINGS.providers.glm, }, }; const registry = AtomRegistry.make(); diff --git a/apps/web/src/session-logic.test.ts b/apps/web/src/session-logic.test.ts index 2e81649f80..a1234dff6f 100644 --- a/apps/web/src/session-logic.test.ts +++ b/apps/web/src/session-logic.test.ts @@ -1262,7 +1262,6 @@ describe("PROVIDER_OPTIONS", () => { expect(PROVIDER_OPTIONS).toEqual([ { value: "codex", label: "Codex", available: true }, { value: "claudeAgent", label: "Claude", available: true }, - { value: "glm", label: "GLM", available: true }, { value: "cursor", label: "Cursor", available: false }, ]); expect(claude).toEqual({ diff --git a/apps/web/src/session-logic.ts b/apps/web/src/session-logic.ts index c33b60eea0..06bfee9803 100644 --- a/apps/web/src/session-logic.ts +++ b/apps/web/src/session-logic.ts @@ -29,7 +29,6 @@ export const PROVIDER_OPTIONS: Array<{ }> = [ { value: "codex", label: "Codex", available: true }, { value: "claudeAgent", label: "Claude", available: true }, - { value: "glm", label: "GLM", available: true }, { value: "cursor", label: "Cursor", available: false }, ]; diff --git a/apps/web/src/store.ts b/apps/web/src/store.ts index f975ad0303..6e768c4ef8 100644 --- a/apps/web/src/store.ts +++ b/apps/web/src/store.ts @@ -81,9 +81,9 @@ function updateProject( return changed ? next : projects; } -function normalizeModelSelection< - T extends { provider: "codex" | "claudeAgent" | "glm"; model: string }, ->(selection: T): T { +function normalizeModelSelection( + selection: T, +): T { return { ...selection, model: resolveModelSlugForProvider(selection.provider, selection.model), diff --git a/packages/contracts/src/model.ts b/packages/contracts/src/model.ts index 684efcea4a..e62a957e05 100644 --- a/packages/contracts/src/model.ts +++ b/packages/contracts/src/model.ts @@ -22,13 +22,9 @@ export const ClaudeModelOptions = Schema.Struct({ }); export type ClaudeModelOptions = typeof ClaudeModelOptions.Type; -export const GlmModelOptions = Schema.Struct({}); -export type GlmModelOptions = typeof GlmModelOptions.Type; - export const ProviderModelOptions = Schema.Struct({ codex: Schema.optional(CodexModelOptions), claudeAgent: Schema.optional(ClaudeModelOptions), - glm: Schema.optional(GlmModelOptions), }); export type ProviderModelOptions = typeof ProviderModelOptions.Type; @@ -58,7 +54,6 @@ export type ModelCapabilities = typeof ModelCapabilities.Type; export const DEFAULT_MODEL_BY_PROVIDER: Record = { codex: "gpt-5.4", claudeAgent: "claude-sonnet-4-6", - glm: "glm-5.1", }; export const DEFAULT_MODEL = DEFAULT_MODEL_BY_PROVIDER.codex; @@ -67,7 +62,6 @@ export const DEFAULT_MODEL = DEFAULT_MODEL_BY_PROVIDER.codex; export const DEFAULT_GIT_TEXT_GENERATION_MODEL_BY_PROVIDER: Record = { codex: "gpt-5.4-mini", claudeAgent: "claude-haiku-4-5", - glm: "glm-5.1", }; export const MODEL_SLUG_ALIASES_BY_PROVIDER: Record> = { @@ -92,17 +86,6 @@ export const MODEL_SLUG_ALIASES_BY_PROVIDER: Record = { codex: "Codex", claudeAgent: "Claude", - glm: "GLM", }; diff --git a/packages/contracts/src/orchestration.ts b/packages/contracts/src/orchestration.ts index fef7c4b503..6c7f073612 100644 --- a/packages/contracts/src/orchestration.ts +++ b/packages/contracts/src/orchestration.ts @@ -1,5 +1,5 @@ import { Option, Schema, SchemaIssue, Struct } from "effect"; -import { ClaudeModelOptions, CodexModelOptions, GlmModelOptions } from "./model"; +import { ClaudeModelOptions, CodexModelOptions } from "./model"; import { ApprovalRequestId, CheckpointRef, @@ -23,7 +23,7 @@ export const ORCHESTRATION_WS_METHODS = { replayEvents: "orchestration.replayEvents", } as const; -export const ProviderKind = Schema.Literals(["codex", "claudeAgent", "glm"]); +export const ProviderKind = Schema.Literals(["codex", "claudeAgent"]); export type ProviderKind = typeof ProviderKind.Type; export const ProviderApprovalPolicy = Schema.Literals([ "untrusted", @@ -55,18 +55,7 @@ export const ClaudeModelSelection = Schema.Struct({ }); export type ClaudeModelSelection = typeof ClaudeModelSelection.Type; -export const GlmModelSelection = Schema.Struct({ - provider: Schema.Literal("glm"), - model: TrimmedNonEmptyString, - options: Schema.optionalKey(GlmModelOptions), -}); -export type GlmModelSelection = typeof GlmModelSelection.Type; - -export const ModelSelection = Schema.Union([ - CodexModelSelection, - ClaudeModelSelection, - GlmModelSelection, -]); +export const ModelSelection = Schema.Union([CodexModelSelection, ClaudeModelSelection]); export type ModelSelection = typeof ModelSelection.Type; export const RuntimeMode = Schema.Literals(["approval-required", "full-access"]); diff --git a/packages/contracts/src/server.ts b/packages/contracts/src/server.ts index 776a0a89e9..c34d2f9073 100644 --- a/packages/contracts/src/server.ts +++ b/packages/contracts/src/server.ts @@ -61,6 +61,7 @@ export const ServerProvider = Schema.Struct({ enabled: Schema.Boolean, installed: Schema.Boolean, version: Schema.NullOr(TrimmedNonEmptyString), + displayName: Schema.optional(TrimmedNonEmptyString), status: ServerProviderState, auth: ServerProviderAuth, checkedAt: IsoDateTime, diff --git a/packages/contracts/src/settings.ts b/packages/contracts/src/settings.ts index 88122429aa..6633ce42a6 100644 --- a/packages/contracts/src/settings.ts +++ b/packages/contracts/src/settings.ts @@ -71,19 +71,6 @@ export const ClaudeSettings = Schema.Struct({ }); export type ClaudeSettings = typeof ClaudeSettings.Type; -export const GlmTransport = Schema.Literals(["bridge", "direct"]); -export type GlmTransport = typeof GlmTransport.Type; - -export const GlmSettings = Schema.Struct({ - enabled: Schema.Boolean.pipe(Schema.withDecodingDefault(() => false)), - transport: GlmTransport.pipe(Schema.withDecodingDefault(() => "bridge" as const)), - upstreamBaseUrl: TrimmedString.pipe( - Schema.withDecodingDefault(() => "https://api.z.ai/api/coding/paas/v4"), - ), - customModels: Schema.Array(Schema.String).pipe(Schema.withDecodingDefault(() => [])), -}); -export type GlmSettings = typeof GlmSettings.Type; - export const ObservabilitySettings = Schema.Struct({ otlpTracesUrl: TrimmedString.pipe(Schema.withDecodingDefault(() => "")), otlpMetricsUrl: TrimmedString.pipe(Schema.withDecodingDefault(() => "")), @@ -106,7 +93,6 @@ export const ServerSettings = Schema.Struct({ providers: Schema.Struct({ codex: CodexSettings.pipe(Schema.withDecodingDefault(() => ({}))), claudeAgent: ClaudeSettings.pipe(Schema.withDecodingDefault(() => ({}))), - glm: GlmSettings.pipe(Schema.withDecodingDefault(() => ({}))), }).pipe(Schema.withDecodingDefault(() => ({}))), observability: ObservabilitySettings.pipe(Schema.withDecodingDefault(() => ({}))), }); @@ -149,8 +135,6 @@ const ClaudeModelOptionsPatch = Schema.Struct({ contextWindow: Schema.optionalKey(ClaudeModelOptions.fields.contextWindow), }); -const GlmModelOptionsPatch = Schema.Struct({}); - const ModelSelectionPatch = Schema.Union([ Schema.Struct({ provider: Schema.optionalKey(Schema.Literal("codex")), @@ -162,11 +146,6 @@ const ModelSelectionPatch = Schema.Union([ model: Schema.optionalKey(TrimmedNonEmptyString), options: Schema.optionalKey(ClaudeModelOptionsPatch), }), - Schema.Struct({ - provider: Schema.optionalKey(Schema.Literal("glm")), - model: Schema.optionalKey(TrimmedNonEmptyString), - options: Schema.optionalKey(GlmModelOptionsPatch), - }), ]); const CodexSettingsPatch = Schema.Struct({ @@ -182,13 +161,6 @@ const ClaudeSettingsPatch = Schema.Struct({ customModels: Schema.optionalKey(Schema.Array(Schema.String)), }); -const GlmSettingsPatch = Schema.Struct({ - enabled: Schema.optionalKey(Schema.Boolean), - transport: Schema.optionalKey(GlmTransport), - upstreamBaseUrl: Schema.optionalKey(Schema.String), - customModels: Schema.optionalKey(Schema.Array(Schema.String)), -}); - export const ServerSettingsPatch = Schema.Struct({ enableAssistantStreaming: Schema.optionalKey(Schema.Boolean), defaultThreadEnvMode: Schema.optionalKey(ThreadEnvMode), @@ -203,7 +175,6 @@ export const ServerSettingsPatch = Schema.Struct({ Schema.Struct({ codex: Schema.optionalKey(CodexSettingsPatch), claudeAgent: Schema.optionalKey(ClaudeSettingsPatch), - glm: Schema.optionalKey(GlmSettingsPatch), }), ), }); From 157756a54af06a4798efa32d3456678dcfb417ec Mon Sep 17 00:00:00 2001 From: Marve10s Date: Fri, 10 Apr 2026 00:45:23 +0300 Subject: [PATCH 4/4] fix: derive GLM models from user config --- .../src/provider/Layers/ClaudeProvider.ts | 21 +- .../src/provider/Layers/CodexProvider.ts | 197 +++++++++++------- .../provider/Layers/ProviderRegistry.test.ts | 113 +++++++--- 3 files changed, 216 insertions(+), 115 deletions(-) diff --git a/apps/server/src/provider/Layers/ClaudeProvider.ts b/apps/server/src/provider/Layers/ClaudeProvider.ts index 38e776cb35..c714a088d0 100644 --- a/apps/server/src/provider/Layers/ClaudeProvider.ts +++ b/apps/server/src/provider/Layers/ClaudeProvider.ts @@ -50,11 +50,6 @@ const DEFAULT_CLAUDE_MODEL_CAPABILITIES: ModelCapabilities = { const PROVIDER = "claudeAgent" as const; const ZAI_ANTHROPIC_BASE_URL = "https://api.z.ai/api/anthropic"; -const DEFAULT_CLAUDE_GLM_MODEL_MAPPING = { - opus: "glm-4.7", - sonnet: "glm-4.7", - haiku: "glm-4.5-air", -} as const; const BUILT_IN_MODELS: ReadonlyArray = [ { slug: "claude-opus-4-6", @@ -113,9 +108,9 @@ const BUILT_IN_MODELS: ReadonlyArray = [ interface ClaudeGlmIntegration { readonly hasAuthToken: boolean; - readonly opusModel: string; - readonly sonnetModel: string; - readonly haikuModel: string; + readonly opusModel: string | undefined; + readonly sonnetModel: string | undefined; + readonly haikuModel: string | undefined; } function normalizeUrl(value: string | undefined): string | undefined { @@ -142,13 +137,9 @@ function readClaudeGlmIntegrationFromEnv( return { hasAuthToken: Boolean(asTrimmedString(env.ANTHROPIC_AUTH_TOKEN)), - opusModel: - asTrimmedString(env.ANTHROPIC_DEFAULT_OPUS_MODEL) ?? DEFAULT_CLAUDE_GLM_MODEL_MAPPING.opus, - sonnetModel: - asTrimmedString(env.ANTHROPIC_DEFAULT_SONNET_MODEL) ?? - DEFAULT_CLAUDE_GLM_MODEL_MAPPING.sonnet, - haikuModel: - asTrimmedString(env.ANTHROPIC_DEFAULT_HAIKU_MODEL) ?? DEFAULT_CLAUDE_GLM_MODEL_MAPPING.haiku, + opusModel: asTrimmedString(env.ANTHROPIC_DEFAULT_OPUS_MODEL), + sonnetModel: asTrimmedString(env.ANTHROPIC_DEFAULT_SONNET_MODEL), + haikuModel: asTrimmedString(env.ANTHROPIC_DEFAULT_HAIKU_MODEL), }; } diff --git a/apps/server/src/provider/Layers/CodexProvider.ts b/apps/server/src/provider/Layers/CodexProvider.ts index 3021f2abb8..1785af5692 100644 --- a/apps/server/src/provider/Layers/CodexProvider.ts +++ b/apps/server/src/provider/Layers/CodexProvider.ts @@ -177,58 +177,15 @@ const BUILT_IN_MODELS: ReadonlyArray = [ }, ]; -const GLM_BUILT_IN_MODELS: ReadonlyArray = [ - { - slug: "glm-5.1", - name: "GLM 5.1", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-5", - name: "GLM 5", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-5-turbo", - name: "GLM 5 Turbo", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.7", - name: "GLM 4.7", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.6", - name: "GLM 4.6", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.5", - name: "GLM 4.5", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, - { - slug: "glm-4.5-air", - name: "GLM 4.5 Air", - isCustom: false, - capabilities: DEFAULT_GLM_MODEL_CAPABILITIES, - }, -]; +interface CodexConfigSnapshot { + readonly modelProvider: string | undefined; + readonly configuredModels: ReadonlyArray; +} export function getCodexModelCapabilities(model: string | null | undefined): ModelCapabilities { const slug = model?.trim(); if (slug?.startsWith("glm-")) { - return ( - GLM_BUILT_IN_MODELS.find((candidate) => candidate.slug === slug)?.capabilities ?? - DEFAULT_GLM_MODEL_CAPABILITIES - ); + return DEFAULT_GLM_MODEL_CAPABILITIES; } return ( BUILT_IN_MODELS.find((candidate) => candidate.slug === slug)?.capabilities ?? @@ -316,7 +273,55 @@ export function parseAuthStatusFromOutput(result: CommandResult): { }; } -export const readCodexConfigModelProvider = Effect.fn("readCodexConfigModelProvider")(function* () { +function parseCodexConfigSnapshot(content: string): CodexConfigSnapshot { + let inTopLevel = true; + let inProfileSection = false; + let modelProvider: string | undefined; + const configuredModels: string[] = []; + const seenModels = new Set(); + + for (const line of content.split("\n")) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith("#")) { + continue; + } + if (trimmed.startsWith("[")) { + const sectionName = trimmed.slice(1, trimmed.lastIndexOf("]")).trim(); + inTopLevel = false; + inProfileSection = + sectionName === "profiles" || + sectionName.startsWith("profiles.") || + sectionName.startsWith('profiles."') || + sectionName.startsWith("profiles.'"); + continue; + } + + const modelMatch = trimmed.match(/^model\s*=\s*["']([^"']+)["']/); + if (modelMatch && (inTopLevel || inProfileSection)) { + const model = modelMatch[1]?.trim(); + if (model && !seenModels.has(model)) { + seenModels.add(model); + configuredModels.push(model); + } + } + + if (!inTopLevel) { + continue; + } + + const providerMatch = trimmed.match(/^model_provider\s*=\s*["']([^"']+)["']/); + if (providerMatch) { + modelProvider = providerMatch[1]; + } + } + + return { + modelProvider, + configuredModels, + }; +} + +export const readCodexConfigSnapshot = Effect.fn("readCodexConfigSnapshot")(function* () { const fileSystem = yield* FileSystem.FileSystem; const path = yield* Path.Path; const settingsService = yield* ServerSettingsService; @@ -334,23 +339,21 @@ export const readCodexConfigModelProvider = Effect.fn("readCodexConfigModelProvi .readFileString(configPath) .pipe(Effect.orElseSucceed(() => undefined)); if (content === undefined) { - return undefined; + return { + modelProvider: undefined, + configuredModels: [], + } satisfies CodexConfigSnapshot; } - let inTopLevel = true; - for (const line of content.split("\n")) { - const trimmed = line.trim(); - if (!trimmed || trimmed.startsWith("#")) continue; - if (trimmed.startsWith("[")) { - inTopLevel = false; - continue; - } - if (!inTopLevel) continue; + return parseCodexConfigSnapshot(content); +}); - const match = trimmed.match(/^model_provider\s*=\s*["']([^"']+)["']/); - if (match) return match[1]; - } - return undefined; +export const readCodexConfigModelProvider = Effect.fn("readCodexConfigModelProvider")(function* () { + return (yield* readCodexConfigSnapshot()).modelProvider; +}); + +export const readCodexConfiguredModels = Effect.fn("readCodexConfiguredModels")(function* () { + return (yield* readCodexConfigSnapshot()).configuredModels; }); export const hasCustomModelProvider = readCodexConfigModelProvider().pipe( @@ -383,16 +386,66 @@ function codexCustomProviderMessage(modelProvider: string | undefined): string { return "Using a custom Codex model provider; OpenAI login check skipped."; } -function codexBuiltInModels(modelProvider: string | undefined): ReadonlyArray { - return modelProvider === "glm" ? GLM_BUILT_IN_MODELS : BUILT_IN_MODELS; -} - function codexCustomModelCapabilities(modelProvider: string | undefined): ModelCapabilities { return modelProvider === "glm" ? DEFAULT_GLM_MODEL_CAPABILITIES : DEFAULT_CODEX_MODEL_CAPABILITIES; } +function configuredCodexModels( + modelProvider: string | undefined, + configuredModels: ReadonlyArray, +): ReadonlyArray { + const capabilities = codexCustomModelCapabilities(modelProvider); + const models: ServerProviderModel[] = []; + const seen = new Set(); + + for (const slug of configuredModels) { + if (!slug || seen.has(slug)) { + continue; + } + seen.add(slug); + + const builtInModel = + modelProvider === undefined || OPENAI_AUTH_PROVIDERS.has(modelProvider) + ? BUILT_IN_MODELS.find((candidate) => candidate.slug === slug) + : undefined; + + models.push( + builtInModel ?? { + slug, + name: slug, + isCustom: false, + capabilities, + }, + ); + } + + return models; +} + +function codexBuiltInModels(config: CodexConfigSnapshot): ReadonlyArray { + if (config.modelProvider !== undefined && !OPENAI_AUTH_PROVIDERS.has(config.modelProvider)) { + return configuredCodexModels(config.modelProvider, config.configuredModels); + } + + const configuredModelEntries = configuredCodexModels( + config.modelProvider, + config.configuredModels, + ); + const seen = new Set(BUILT_IN_MODELS.map((model) => model.slug)); + return [ + ...BUILT_IN_MODELS, + ...configuredModelEntries.filter((model) => { + if (seen.has(model.slug)) { + return false; + } + seen.add(model.slug); + return true; + }), + ]; +} + const CAPABILITIES_PROBE_TIMEOUT_MS = 8_000; const probeCodexCapabilities = (input: { @@ -441,12 +494,16 @@ export const checkCodexProviderStatus = Effect.fn("checkCodexProviderStatus")(fu Effect.map((settings) => settings.providers.codex), ); const checkedAt = new Date().toISOString(); - const modelProvider = yield* readCodexConfigModelProvider().pipe( - Effect.orElseSucceed(() => undefined), + const config = yield* readCodexConfigSnapshot().pipe( + Effect.orElseSucceed(() => ({ + modelProvider: undefined, + configuredModels: [], + })), ); + const modelProvider = config.modelProvider; const displayName = codexDisplayName(modelProvider); const models = providerModelsFromSettings( - codexBuiltInModels(modelProvider), + codexBuiltInModels(config), PROVIDER, codexSettings.customModels, codexCustomModelCapabilities(modelProvider), diff --git a/apps/server/src/provider/Layers/ProviderRegistry.test.ts b/apps/server/src/provider/Layers/ProviderRegistry.test.ts index bbc41476d6..fbae187d46 100644 --- a/apps/server/src/provider/Layers/ProviderRegistry.test.ts +++ b/apps/server/src/provider/Layers/ProviderRegistry.test.ts @@ -27,6 +27,7 @@ import { checkCodexProviderStatus, hasCustomModelProvider, parseAuthStatusFromOutput, + readCodexConfiguredModels, readCodexConfigModelProvider, } from "./CodexProvider"; import { checkClaudeProviderStatus, parseClaudeAuthStatusFromOutput } from "./ClaudeProvider"; @@ -687,37 +688,43 @@ it.layer(Layer.mergeAll(NodeServices.layer, ServerSettingsService.layerTest()))( }).pipe(Effect.provide(failingSpawnerLayer("spawn codex ENOENT"))), ); - it.effect("surfaces GLM models when Codex is configured with model_provider=glm", () => - Effect.gen(function* () { - yield* withTempCodexHome( - [ - 'model_provider = "glm"', - "", - "[model_providers.glm]", - 'base_url = "https://api.z.ai/api/coding/paas/v4"', - 'env_key = "GLM_API_KEY"', - ].join("\n"), - ); - const status = yield* checkCodexProviderStatus(); - assert.strictEqual(status.displayName, "Codex / GLM"); - assert.strictEqual(status.status, "ready"); - assert.strictEqual( - status.message, - "Using Z.AI GLM through Codex custom model provider config; OpenAI login check skipped.", - ); - assert.deepStrictEqual( - status.models.slice(0, 3).map((model) => model.slug), - ["glm-5.1", "glm-5", "glm-5-turbo"], - ); - }).pipe( - Effect.provide( - mockSpawnerLayer((args) => { - const joined = args.join(" "); - if (joined === "--version") return { stdout: "codex 1.0.0\n", stderr: "", code: 0 }; - throw new Error(`Auth probe should have been skipped but got args: ${joined}`); - }), + it.effect( + "surfaces only configured GLM models when Codex is configured with model_provider=glm", + () => + Effect.gen(function* () { + yield* withTempCodexHome( + [ + 'model_provider = "glm"', + 'model = "glm-5.1"', + "", + "[profiles.fast]", + 'model = "glm-5-air"', + "", + "[model_providers.glm]", + 'base_url = "https://api.z.ai/api/coding/paas/v4"', + 'env_key = "GLM_API_KEY"', + ].join("\n"), + ); + const status = yield* checkCodexProviderStatus(); + assert.strictEqual(status.displayName, "Codex / GLM"); + assert.strictEqual(status.status, "ready"); + assert.strictEqual( + status.message, + "Using Z.AI GLM through Codex custom model provider config; OpenAI login check skipped.", + ); + assert.deepStrictEqual( + status.models.map((model) => model.slug), + ["glm-5.1", "glm-5-air"], + ); + }).pipe( + Effect.provide( + mockSpawnerLayer((args) => { + const joined = args.join(" "); + if (joined === "--version") return { stdout: "codex 1.0.0\n", stderr: "", code: 0 }; + throw new Error(`Auth probe should have been skipped but got args: ${joined}`); + }), + ), ), - ), ); }); @@ -844,6 +851,52 @@ it.layer(Layer.mergeAll(NodeServices.layer, ServerSettingsService.layerTest()))( ); }); + describe("readCodexConfiguredModels", () => { + it.effect("returns an empty list when config file does not exist", () => + Effect.gen(function* () { + yield* withTempCodexHome(); + assert.deepStrictEqual(yield* readCodexConfiguredModels(), []); + }), + ); + + it.effect("reads configured models from top-level and profile sections", () => + Effect.gen(function* () { + yield* withTempCodexHome( + [ + 'model = "glm-5.1"', + 'model_provider = "glm"', + "", + "[profiles.fast]", + 'model = "glm-5-air"', + "", + "[profiles.review]", + 'model = "glm-5.1"', + "", + "[model_providers.glm]", + 'base_url = "https://api.z.ai/api/coding/paas/v4"', + 'env_key = "GLM_API_KEY"', + ].join("\n"), + ); + assert.deepStrictEqual(yield* readCodexConfiguredModels(), ["glm-5.1", "glm-5-air"]); + }), + ); + + it.effect("ignores model keys outside top-level and profile sections", () => + Effect.gen(function* () { + yield* withTempCodexHome( + [ + "[model_providers.glm]", + 'model = "should-be-ignored"', + "", + "[profiles.fast]", + 'model = "glm-5-fast"', + ].join("\n"), + ); + assert.deepStrictEqual(yield* readCodexConfiguredModels(), ["glm-5-fast"]); + }), + ); + }); + // ── hasCustomModelProvider tests ─────────────────────────────────── describe("hasCustomModelProvider", () => {