diff --git a/packages/app/server/src/env.ts b/packages/app/server/src/env.ts index da23dd994..5933baeb7 100644 --- a/packages/app/server/src/env.ts +++ b/packages/app/server/src/env.ts @@ -45,6 +45,8 @@ export const env = createEnv({ GROQ_API_KEY: z.string().optional(), XAI_API_KEY: z.string().optional(), OPENROUTER_API_KEY: z.string().optional(), + VERCEL_GATEWAY_API_KEY: z.string().optional(), + VERCEL_GATEWAY_BASE_URL: z.string().url().optional(), TAVILY_API_KEY: z.string().optional(), E2B_API_KEY: z.string().optional(), GOOGLE_SERVICE_ACCOUNT_KEY_ENCODED: z.string().optional(), diff --git a/packages/app/server/src/providers/ProviderFactory.ts b/packages/app/server/src/providers/ProviderFactory.ts index 8bc01e719..7c40883d5 100644 --- a/packages/app/server/src/providers/ProviderFactory.ts +++ b/packages/app/server/src/providers/ProviderFactory.ts @@ -22,6 +22,7 @@ import { OpenAIResponsesProvider } from './OpenAIResponsesProvider'; import { OpenRouterProvider } from './OpenRouterProvider'; import { ProviderType } from './ProviderType'; import { XAIProvider } from './XAIProvider'; +import { VercelGatewayProvider } from './VercelGatewayProvider'; import { VertexAIProvider, PROXY_PASSTHROUGH_ONLY_MODEL as VertexAIProxyPassthroughOnlyModel, @@ -58,6 +59,9 @@ const createChatModelToProviderMapping = (): Record => { case 'Xai': mapping[modelConfig.model_id] = ProviderType.XAI; break; + case 'VercelGateway': + mapping[modelConfig.model_id] = ProviderType.VERCEL_GATEWAY; + break; // Add other providers as needed default: // Skip models with unsupported providers @@ -192,6 +196,8 @@ export const getProvider = ( return new GroqProvider(stream, model); case ProviderType.XAI: return new XAIProvider(stream, model); + case ProviderType.VERCEL_GATEWAY: + return new VercelGatewayProvider(stream, model); default: throw new Error(`Unknown provider type: ${type}`); } diff --git a/packages/app/server/src/providers/ProviderType.ts b/packages/app/server/src/providers/ProviderType.ts index b2514ac80..93ab8c6aa 100644 --- a/packages/app/server/src/providers/ProviderType.ts +++ b/packages/app/server/src/providers/ProviderType.ts @@ -12,4 +12,5 @@ export enum ProviderType { OPENAI_VIDEOS = 'OPENAI_VIDEOS', GROQ = 'GROQ', XAI = 'XAI', + VERCEL_GATEWAY = 'VERCEL_GATEWAY', } diff --git a/packages/app/server/src/providers/VercelGatewayProvider.ts b/packages/app/server/src/providers/VercelGatewayProvider.ts new file mode 100644 index 000000000..459b4d254 --- /dev/null +++ b/packages/app/server/src/providers/VercelGatewayProvider.ts @@ -0,0 +1,117 @@ +import { LlmTransactionMetadata, Transaction } from '../types'; +import { getCostPerToken } from '../services/AccountingService'; +import { BaseProvider } from './BaseProvider'; +import { ProviderType } from './ProviderType'; +import logger from '../logger'; +import { env } from '../env'; + +interface CompletionStateBody { + id: string; + usage: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + }; +} + +interface StreamingChunkBody { + id: string; + choices: { + index: number; + delta: { + content?: string; + }; + finish_reason: string | null; + }[]; + usage: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + } | null; +} + +const parseSSEGPTFormat = (data: string): StreamingChunkBody[] => { + const events = data.split('\n\n'); + const chunks: StreamingChunkBody[] = []; + + for (const event of events) { + if (!event.trim()) continue; + if (event.startsWith('data: ')) { + const jsonStr = event.slice(6); + if (jsonStr.trim() === '[DONE]') continue; + try { + const parsed = JSON.parse(jsonStr); + chunks.push(parsed); + } catch (error) { + logger.error(`Error parsing SSE chunk: ${error}`); + } + } + } + + return chunks; +}; + +export class VercelGatewayProvider extends BaseProvider { + getType(): ProviderType { + return ProviderType.VERCEL_GATEWAY; + } + + getBaseUrl(): string { + return env.VERCEL_GATEWAY_BASE_URL || 'https://ai-gateway.vercel.sh/v1/ai'; + } + + getApiKey(): string | undefined { + return env.VERCEL_GATEWAY_API_KEY; + } + + async handleBody(data: string): Promise { + try { + let prompt_tokens = 0; + let completion_tokens = 0; + let total_tokens = 0; + let providerId = 'null'; + + if (this.getIsStream()) { + const chunks = parseSSEGPTFormat(data); + for (const chunk of chunks) { + if (chunk.usage !== null) { + prompt_tokens += chunk.usage.prompt_tokens; + completion_tokens += chunk.usage.completion_tokens; + total_tokens += chunk.usage.total_tokens; + } + providerId = chunk.id || 'null'; + } + } else { + const parsed = JSON.parse(data) as CompletionStateBody; + prompt_tokens += parsed.usage.prompt_tokens; + completion_tokens += parsed.usage.completion_tokens; + total_tokens += parsed.usage.total_tokens; + providerId = parsed.id || 'null'; + } + + const cost = getCostPerToken( + this.getModel(), + prompt_tokens, + completion_tokens + ); + + const metadata: LlmTransactionMetadata = { + providerId: providerId, + provider: this.getType(), + model: this.getModel(), + inputTokens: prompt_tokens, + outputTokens: completion_tokens, + totalTokens: total_tokens, + }; + + return { + rawTransactionCost: cost, + metadata: metadata, + status: 'success', + }; + } catch (error) { + logger.error(`Error processing data: ${error}`); + throw error; + } + } +} diff --git a/packages/app/server/src/services/AccountingService.ts b/packages/app/server/src/services/AccountingService.ts index 02e51e14e..34c3abcab 100644 --- a/packages/app/server/src/services/AccountingService.ts +++ b/packages/app/server/src/services/AccountingService.ts @@ -10,6 +10,7 @@ import { SupportedImageModel, SupportedVideoModel, XAIModels, + VercelGatewayModels, } from '@merit-systems/echo-typescript-sdk'; import { Decimal } from '@prisma/client/runtime/library'; @@ -30,6 +31,7 @@ export const ALL_SUPPORTED_MODELS: SupportedModel[] = [ ...OpenRouterModels, ...GroqModels, ...XAIModels, + ...VercelGatewayModels, ]; // Handle image models separately since they have different pricing structure diff --git a/packages/sdk/next/src/ai-providers/index.ts b/packages/sdk/next/src/ai-providers/index.ts index 4ea2555df..c938a42da 100644 --- a/packages/sdk/next/src/ai-providers/index.ts +++ b/packages/sdk/next/src/ai-providers/index.ts @@ -3,3 +3,4 @@ export * from './google'; export * from './xai'; export * from './groq'; export * from './openai'; +export * from './vercel'; diff --git a/packages/sdk/next/src/ai-providers/vercel.ts b/packages/sdk/next/src/ai-providers/vercel.ts new file mode 100644 index 000000000..60974288b --- /dev/null +++ b/packages/sdk/next/src/ai-providers/vercel.ts @@ -0,0 +1,10 @@ +import { getEchoToken } from '../auth/token-manager'; +import { + createEchoVercelGateway as createEchoVercelGatewayBase, + EchoConfig, + GatewayProvider, +} from '@merit-systems/echo-typescript-sdk'; + +export function createEchoVercelGateway(config: EchoConfig): GatewayProvider { + return createEchoVercelGatewayBase(config, async () => getEchoToken(config)); +} diff --git a/packages/sdk/ts/package.json b/packages/sdk/ts/package.json index f2a7f1066..43e5388d2 100644 --- a/packages/sdk/ts/package.json +++ b/packages/sdk/ts/package.json @@ -63,6 +63,7 @@ "@ai-sdk/openai": "2.0.32", "@ai-sdk/xai": "2.0.16", "@openrouter/ai-sdk-provider": "1.2.0", + "@ai-sdk/gateway": "^1.0.12", "ai": "5.0.47" } } diff --git a/packages/sdk/ts/src/__tests__/vercel-gateway-models.test.ts b/packages/sdk/ts/src/__tests__/vercel-gateway-models.test.ts new file mode 100644 index 000000000..3c107d42f --- /dev/null +++ b/packages/sdk/ts/src/__tests__/vercel-gateway-models.test.ts @@ -0,0 +1,45 @@ +import { describe, it, expect } from 'vitest'; +import { VercelGatewayModels } from '../src/supported-models/chat/vercel-gateway'; + +describe('VercelGatewayModels', () => { + it('should export a non-empty array of models', () => { + expect(VercelGatewayModels.length).toBeGreaterThan(0); + }); + + it('should have all models with VercelGateway provider', () => { + for (const model of VercelGatewayModels) { + expect(model.provider).toBe('VercelGateway'); + } + }); + + it('should have valid pricing for all models', () => { + for (const model of VercelGatewayModels) { + expect(model.input_cost_per_token).toBeGreaterThan(0); + expect(model.output_cost_per_token).toBeGreaterThan(0); + } + }); + + it('should have prefixed model IDs (provider/model-name)', () => { + for (const model of VercelGatewayModels) { + expect(model.model_id).toMatch(/^[a-z]+\/[a-z0-9._-]+$/); + } + }); + + it('should include key OpenAI models', () => { + const ids = VercelGatewayModels.map(m => m.model_id); + expect(ids).toContain('openai/gpt-4o'); + expect(ids).toContain('openai/gpt-4o-mini'); + }); + + it('should include key Anthropic models', () => { + const ids = VercelGatewayModels.map(m => m.model_id); + expect(ids).toContain('anthropic/claude-sonnet-4'); + expect(ids).toContain('anthropic/claude-3.5-sonnet'); + }); + + it('should include key Google models', () => { + const ids = VercelGatewayModels.map(m => m.model_id); + expect(ids).toContain('google/gemini-2.5-flash'); + expect(ids).toContain('google/gemini-2.5-pro'); + }); +}); diff --git a/packages/sdk/ts/src/__tests__/vercel-gateway.test.ts b/packages/sdk/ts/src/__tests__/vercel-gateway.test.ts new file mode 100644 index 000000000..02e3f0e92 --- /dev/null +++ b/packages/sdk/ts/src/__tests__/vercel-gateway.test.ts @@ -0,0 +1,66 @@ +import { describe, it, expect, vi } from 'vitest'; +import { createEchoVercelGateway } from '../src/providers/vercel'; +import { echoFetch } from '../src/providers/index'; + +// Mock @ai-sdk/gateway +vi.mock('@ai-sdk/gateway', () => ({ + createGatewayProvider: vi.fn((options) => ({ + _type: 'gateway', + _options: options, + languageModel: vi.fn(), + getAvailableModels: vi.fn(), + getCredits: vi.fn(), + textEmbeddingModel: vi.fn(), + })), +})); + +describe('createEchoVercelGateway', () => { + const mockGetTokenFn = vi.fn(); + const mockOnInsufficientFunds = vi.fn(); + + it('should create a gateway provider with echo fetch wrapper', () => { + const provider = createEchoVercelGateway( + { appId: '60601628-cdb7-481e-8f7e-921981220348' }, + mockGetTokenFn, + mockOnInsufficientFunds + ); + + expect(provider._type).toBe('gateway'); + expect(provider._options.apiKey).toBe('placeholder_replaced_by_echoFetch'); + expect(provider._options.baseURL).toBe('https://echo.router.merit.systems'); + expect(provider._options.fetch).toBeDefined(); + }); + + it('should use custom baseRouterUrl when provided', () => { + const provider = createEchoVercelGateway( + { + appId: '60601628-cdb7-481e-8f7e-921981220348', + baseRouterUrl: 'https://custom-gateway.example.com', + }, + mockGetTokenFn, + mockOnInsufficientFunds + ); + + expect(provider._options.baseURL).toBe('https://custom-gateway.example.com'); + }); + + it('should throw on invalid appId', () => { + expect(() => + createEchoVercelGateway( + { appId: 'invalid' }, + mockGetTokenFn, + mockOnInsufficientFunds + ) + ).toThrow('Invalid Echo App ID'); + }); + + it('should throw on empty appId', () => { + expect(() => + createEchoVercelGateway( + { appId: '' }, + mockGetTokenFn, + mockOnInsufficientFunds + ) + ).toThrow('Invalid Echo App ID'); + }); +}); diff --git a/packages/sdk/ts/src/index.ts b/packages/sdk/ts/src/index.ts index 521a30f93..0589dd8e7 100644 --- a/packages/sdk/ts/src/index.ts +++ b/packages/sdk/ts/src/index.ts @@ -11,6 +11,7 @@ export * from './utils/error-handling'; export * from './utils/validation'; export * from './providers'; export { createEchoXAI } from './providers/xai'; +export { createEchoVercelGateway } from './providers/vercel'; // Export tool-related types and utilities export type { @@ -48,6 +49,7 @@ export type { OpenRouterModel } from './supported-models/chat/openrouter'; export { GroqModels } from './supported-models/chat/groq'; export type { GroqModel } from './supported-models/chat/groq'; export { XAIModels } from './supported-models/chat/xai'; +export { VercelGatewayModels } from './supported-models/chat/vercel-gateway'; export type { XAIModel } from './supported-models/chat/xai'; export { OpenAIImageModels } from './supported-models/image/openai'; export type { OpenAIImageModel } from './supported-models/image/openai'; diff --git a/packages/sdk/ts/src/providers/index.ts b/packages/sdk/ts/src/providers/index.ts index 62f54fac8..ed07b6453 100644 --- a/packages/sdk/ts/src/providers/index.ts +++ b/packages/sdk/ts/src/providers/index.ts @@ -4,6 +4,7 @@ export * from './groq'; export * from './xai'; export * from './openai'; export * from './openrouter'; +export * from './vercel'; export function echoFetch( originalFetch: typeof fetch, @@ -63,3 +64,4 @@ export { type GroqProvider } from '@ai-sdk/groq'; export { type OpenAIProvider } from '@ai-sdk/openai'; export { type OpenRouterProvider } from '@openrouter/ai-sdk-provider'; export { type XaiProvider } from '@ai-sdk/xai'; +export { type GatewayProvider } from '@ai-sdk/gateway'; diff --git a/packages/sdk/ts/src/providers/vercel.ts b/packages/sdk/ts/src/providers/vercel.ts new file mode 100644 index 000000000..a3250baf4 --- /dev/null +++ b/packages/sdk/ts/src/providers/vercel.ts @@ -0,0 +1,26 @@ +import { + createGatewayProvider, + GatewayProvider, +} from '@ai-sdk/gateway'; +import { ROUTER_BASE_URL } from 'config'; +import { EchoConfig } from '../types'; +import { validateAppId } from '../utils/validation'; +import { echoFetch } from './index'; + +export function createEchoVercelGateway( + { appId, baseRouterUrl = ROUTER_BASE_URL }: EchoConfig, + getTokenFn: (appId: string) => Promise, + onInsufficientFunds?: () => void +): GatewayProvider { + validateAppId(appId, 'createEchoVercelGateway'); + + return createGatewayProvider({ + baseURL: baseRouterUrl, + apiKey: 'placeholder_replaced_by_echoFetch', + fetch: echoFetch( + fetch, + async () => await getTokenFn(appId), + onInsufficientFunds + ), + }); +} diff --git a/packages/sdk/ts/src/supported-models/chat/vercel-gateway.ts b/packages/sdk/ts/src/supported-models/chat/vercel-gateway.ts new file mode 100644 index 000000000..7376cf530 --- /dev/null +++ b/packages/sdk/ts/src/supported-models/chat/vercel-gateway.ts @@ -0,0 +1,272 @@ +import { SupportedModel } from '../types'; + +// Union type of all valid Vercel AI Gateway model IDs +// Models are prefixed with their upstream provider (e.g., openai/gpt-4o, anthropic/claude-3.5-sonnet) +export type VercelGatewayModel = + | 'openai/gpt-4o' + | 'openai/gpt-4o-mini' + | 'openai/gpt-4.1' + | 'openai/gpt-4.1-mini' + | 'openai/gpt-4.1-nano' + | 'openai/gpt-5' + | 'openai/gpt-5-mini' + | 'openai/gpt-5-nano' + | 'openai/o3' + | 'openai/o3-mini' + | 'openai/o4-mini' + | 'anthropic/claude-3.5-sonnet' + | 'anthropic/claude-3.7-sonnet' + | 'anthropic/claude-sonnet-4' + | 'anthropic/claude-opus-4' + | 'anthropic/claude-opus-4.1' + | 'anthropic/claude-3-haiku' + | 'anthropic/claude-3.5-haiku' + | 'anthropic/claude-3-opus' + | 'google/gemini-2.5-flash' + | 'google/gemini-2.5-flash-lite' + | 'google/gemini-2.5-pro' + | 'google/gemini-2.0-flash' + | 'google/gemini-2.0-flash-lite' + | 'deepseek/deepseek-r1' + | 'deepseek/deepseek-v3' + | 'deepseek/deepseek-v3.1' + | 'meta/llama-4-maverick' + | 'meta/llama-4-scout' + | 'xai/grok-3' + | 'xai/grok-3-mini' + | 'xai/grok-3-fast' + | 'xai/grok-4' + | 'mistral/mistral-large' + | 'mistral/mistral-medium' + | 'mistral/mistral-small' + | (string & {}); + +// Vercel AI Gateway model pricing +// Source: https://vercel.com/docs/ai-gateway/pricing +// Note: Vercel Gateway adds its own margin on top of upstream provider costs. +// These prices reflect Vercel's listed gateway prices. +export const VercelGatewayModels: SupportedModel[] = [ + // OpenAI models via Vercel AI Gateway + { + model_id: 'openai/gpt-4o', + input_cost_per_token: 2.5e-6, + output_cost_per_token: 1e-5, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-4o-mini', + input_cost_per_token: 1.5e-7, + output_cost_per_token: 6e-7, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-4.1', + input_cost_per_token: 2e-6, + output_cost_per_token: 8e-6, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-4.1-mini', + input_cost_per_token: 4e-7, + output_cost_per_token: 1.6e-6, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-4.1-nano', + input_cost_per_token: 1e-7, + output_cost_per_token: 4e-7, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-5', + input_cost_per_token: 1.5e-5, + output_cost_per_token: 7.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-5-mini', + input_cost_per_token: 1.1e-6, + output_cost_per_token: 4.4e-6, + provider: 'VercelGateway', + }, + { + model_id: 'openai/gpt-5-nano', + input_cost_per_token: 5e-7, + output_cost_per_token: 2e-6, + provider: 'VercelGateway', + }, + { + model_id: 'openai/o3', + input_cost_per_token: 2e-5, + output_cost_per_token: 8e-5, + provider: 'VercelGateway', + }, + { + model_id: 'openai/o3-mini', + input_cost_per_token: 1.1e-6, + output_cost_per_token: 4.4e-6, + provider: 'VercelGateway', + }, + { + model_id: 'openai/o4-mini', + input_cost_per_token: 1.1e-6, + output_cost_per_token: 4.4e-6, + provider: 'VercelGateway', + }, + // Anthropic models via Vercel AI Gateway + { + model_id: 'anthropic/claude-sonnet-4', + input_cost_per_token: 3e-6, + output_cost_per_token: 1.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-opus-4', + input_cost_per_token: 1.5e-5, + output_cost_per_token: 7.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-opus-4.1', + input_cost_per_token: 1.5e-5, + output_cost_per_token: 7.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-3.7-sonnet', + input_cost_per_token: 3e-6, + output_cost_per_token: 1.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-3.5-sonnet', + input_cost_per_token: 3e-6, + output_cost_per_token: 1.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-3.5-haiku', + input_cost_per_token: 8e-7, + output_cost_per_token: 4e-6, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-3-haiku', + input_cost_per_token: 2.5e-7, + output_cost_per_token: 1.25e-6, + provider: 'VercelGateway', + }, + { + model_id: 'anthropic/claude-3-opus', + input_cost_per_token: 1.5e-5, + output_cost_per_token: 7.5e-5, + provider: 'VercelGateway', + }, + // Google models via Vercel AI Gateway + { + model_id: 'google/gemini-2.5-pro', + input_cost_per_token: 1.25e-6, + output_cost_per_token: 1e-5, + provider: 'VercelGateway', + }, + { + model_id: 'google/gemini-2.5-flash', + input_cost_per_token: 1.5e-7, + output_cost_per_token: 1.5e-6, + provider: 'VercelGateway', + }, + { + model_id: 'google/gemini-2.5-flash-lite', + input_cost_per_token: 5e-8, + output_cost_per_token: 5e-7, + provider: 'VercelGateway', + }, + { + model_id: 'google/gemini-2.0-flash', + input_cost_per_token: 1e-7, + output_cost_per_token: 4e-7, + provider: 'VercelGateway', + }, + { + model_id: 'google/gemini-2.0-flash-lite', + input_cost_per_token: 7.5e-8, + output_cost_per_token: 3e-7, + provider: 'VercelGateway', + }, + // DeepSeek models via Vercel AI Gateway + { + model_id: 'deepseek/deepseek-r1', + input_cost_per_token: 5.5e-6, + output_cost_per_token: 2.19e-5, + provider: 'VercelGateway', + }, + { + model_id: 'deepseek/deepseek-v3', + input_cost_per_token: 2.7e-7, + output_cost_per_token: 1.1e-6, + provider: 'VercelGateway', + }, + { + model_id: 'deepseek/deepseek-v3.1', + input_cost_per_token: 2.7e-7, + output_cost_per_token: 1.1e-6, + provider: 'VercelGateway', + }, + // xAI models via Vercel AI Gateway + { + model_id: 'xai/grok-3', + input_cost_per_token: 3e-6, + output_cost_per_token: 1.5e-5, + provider: 'VercelGateway', + }, + { + model_id: 'xai/grok-3-mini', + input_cost_per_token: 3e-7, + output_cost_per_token: 1.5e-6, + provider: 'VercelGateway', + }, + { + model_id: 'xai/grok-3-fast', + input_cost_per_token: 5e-7, + output_cost_per_token: 2.5e-6, + provider: 'VercelGateway', + }, + { + model_id: 'xai/grok-4', + input_cost_per_token: 3e-6, + output_cost_per_token: 1.5e-5, + provider: 'VercelGateway', + }, + // Meta Llama models via Vercel AI Gateway + { + model_id: 'meta/llama-4-maverick', + input_cost_per_token: 2e-7, + output_cost_per_token: 6e-7, + provider: 'VercelGateway', + }, + { + model_id: 'meta/llama-4-scout', + input_cost_per_token: 1e-7, + output_cost_per_token: 3e-7, + provider: 'VercelGateway', + }, + // Mistral models via Vercel AI Gateway + { + model_id: 'mistral/mistral-large', + input_cost_per_token: 2e-6, + output_cost_per_token: 6e-6, + provider: 'VercelGateway', + }, + { + model_id: 'mistral/mistral-medium', + input_cost_per_token: 2.75e-7, + output_cost_per_token: 8.25e-7, + provider: 'VercelGateway', + }, + { + model_id: 'mistral/mistral-small', + input_cost_per_token: 1e-7, + output_cost_per_token: 3e-7, + provider: 'VercelGateway', + }, +]; diff --git a/packages/sdk/ts/src/supported-models/index.ts b/packages/sdk/ts/src/supported-models/index.ts index 3f641501d..b85a35cc9 100644 --- a/packages/sdk/ts/src/supported-models/index.ts +++ b/packages/sdk/ts/src/supported-models/index.ts @@ -4,6 +4,7 @@ export * from './chat/anthropic'; export * from './chat/gemini'; export * from './chat/openai'; export * from './chat/openrouter'; +export * from './chat/vercel-gateway'; export * from './image/openai'; export * from './responses/openai'; export * from './video/gemini';