Initial commit: Setting up OpenRouter MCP server for multimodal capabilities

This commit is contained in:
stabgan
2025-03-26 22:57:06 +05:30
commit 57eb800f9a
17 changed files with 1895 additions and 0 deletions

67
src/index.ts Normal file
View File

@@ -0,0 +1,67 @@
#!/usr/bin/env node
// OpenRouter Multimodal MCP Server
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { ToolHandlers } from './tool-handlers.js';
class OpenRouterMultimodalServer {
private server: Server;
private toolHandlers!: ToolHandlers; // Using definite assignment assertion
constructor() {
// Get API key and default model from environment variables
const apiKey = process.env.OPENROUTER_API_KEY;
const defaultModel = process.env.OPENROUTER_DEFAULT_MODEL;
// Check if API key is provided
if (!apiKey) {
throw new Error('OPENROUTER_API_KEY environment variable is required');
}
// Initialize the server
this.server = new Server(
{
name: 'openrouter-multimodal-server',
version: '1.0.0',
},
{
capabilities: {
tools: {},
},
}
);
// Set up error handling
this.server.onerror = (error) => console.error('[MCP Error]', error);
// Initialize tool handlers
this.toolHandlers = new ToolHandlers(
this.server,
apiKey,
defaultModel
);
process.on('SIGINT', async () => {
await this.server.close();
process.exit(0);
});
}
async run() {
const transport = new StdioServerTransport();
await this.server.connect(transport);
console.error('OpenRouter Multimodal MCP server running on stdio');
console.error('Using API key from environment variable');
console.error('Note: To use OpenRouter Multimodal, add the API key to your environment variables:');
console.error(' OPENROUTER_API_KEY=your-api-key');
if (process.env.OPENROUTER_DEFAULT_MODEL) {
console.error(` Using default model: ${process.env.OPENROUTER_DEFAULT_MODEL}`);
} else {
console.error(' No default model set. You will need to specify a model in each request.');
}
}
}
const server = new OpenRouterMultimodalServer();
server.run().catch(console.error);

167
src/model-cache.ts Normal file
View File

@@ -0,0 +1,167 @@
/**
* ModelCache - Caches OpenRouter model data to reduce API calls
*/
export class ModelCache {
private static instance: ModelCache;
private models: Record<string, any>;
private lastFetchTime: number;
private cacheExpiryTime: number; // in milliseconds (1 hour = 3600000)
private constructor() {
this.models = {};
this.lastFetchTime = 0;
this.cacheExpiryTime = 3600000; // 1 hour
}
/**
* Get singleton instance
*/
public static getInstance(): ModelCache {
if (!ModelCache.instance) {
ModelCache.instance = new ModelCache();
}
return ModelCache.instance;
}
/**
* Check if the cache is valid
*/
public isCacheValid(): boolean {
return (
Object.keys(this.models).length > 0 &&
Date.now() - this.lastFetchTime < this.cacheExpiryTime
);
}
/**
* Store all models
*/
public setModels(models: any[]): void {
this.models = {};
for (const model of models) {
this.models[model.id] = model;
}
this.lastFetchTime = Date.now();
}
/**
* Get all cached models
*/
public getAllModels(): any[] {
return Object.values(this.models);
}
/**
* Get a specific model by ID
*/
public getModel(modelId: string): any | null {
return this.models[modelId] || null;
}
/**
* Check if a model exists
*/
public hasModel(modelId: string): boolean {
return !!this.models[modelId];
}
/**
* Search models based on criteria
*/
public searchModels(params: {
query?: string;
provider?: string;
minContextLength?: number;
maxContextLength?: number;
maxPromptPrice?: number;
maxCompletionPrice?: number;
capabilities?: {
functions?: boolean;
tools?: boolean;
vision?: boolean;
json_mode?: boolean;
};
limit?: number;
}): any[] {
let results = this.getAllModels();
// Apply text search
if (params.query) {
const query = params.query.toLowerCase();
results = results.filter((model) =>
model.id.toLowerCase().includes(query) ||
(model.description && model.description.toLowerCase().includes(query)) ||
(model.provider && model.provider.toLowerCase().includes(query))
);
}
// Filter by provider
if (params.provider) {
results = results.filter((model) =>
model.provider && model.provider.toLowerCase() === params.provider!.toLowerCase()
);
}
// Filter by context length
if (params.minContextLength) {
results = results.filter(
(model) => model.context_length >= params.minContextLength
);
}
if (params.maxContextLength) {
results = results.filter(
(model) => model.context_length <= params.maxContextLength
);
}
// Filter by price
if (params.maxPromptPrice) {
results = results.filter(
(model) =>
!model.pricing?.prompt || model.pricing.prompt <= params.maxPromptPrice
);
}
if (params.maxCompletionPrice) {
results = results.filter(
(model) =>
!model.pricing?.completion ||
model.pricing.completion <= params.maxCompletionPrice
);
}
// Filter by capabilities
if (params.capabilities) {
if (params.capabilities.functions) {
results = results.filter(
(model) => model.capabilities?.function_calling
);
}
if (params.capabilities.tools) {
results = results.filter((model) => model.capabilities?.tools);
}
if (params.capabilities.vision) {
results = results.filter((model) => model.capabilities?.vision);
}
if (params.capabilities.json_mode) {
results = results.filter((model) => model.capabilities?.json_mode);
}
}
// Apply limit
if (params.limit && params.limit > 0) {
results = results.slice(0, params.limit);
}
return results;
}
/**
* Reset the cache
*/
public resetCache(): void {
this.models = {};
this.lastFetchTime = 0;
}
}

130
src/openrouter-api.ts Normal file
View File

@@ -0,0 +1,130 @@
import axios, { AxiosError, AxiosInstance } from 'axios';
import { McpError } from '@modelcontextprotocol/sdk/types.js';
/**
* Client for interacting with the OpenRouter API
*/
export class OpenRouterAPIClient {
private apiKey: string;
private axiosInstance: AxiosInstance;
private retryCount: number = 3;
private retryDelay: number = 1000; // Initial delay in ms
constructor(apiKey: string) {
this.apiKey = apiKey;
this.axiosInstance = axios.create({
baseURL: 'https://openrouter.ai/api/v1',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json',
'HTTP-Referer': 'https://github.com/stabgan/openrouter-mcp-multimodal',
'X-Title': 'OpenRouter MCP Multimodal Server'
},
timeout: 60000 // 60 seconds timeout
});
}
/**
* Get all available models from OpenRouter
*/
public async getModels(): Promise<any[]> {
try {
const response = await this.axiosInstance.get('/models');
return response.data.data;
} catch (error) {
this.handleRequestError(error);
return [];
}
}
/**
* Send a request to the OpenRouter API with retry functionality
*/
public async request(endpoint: string, method: string, data?: any): Promise<any> {
let lastError: Error | null = null;
let retries = 0;
while (retries <= this.retryCount) {
try {
const response = await this.axiosInstance.request({
url: endpoint,
method,
data
});
return response.data;
} catch (error) {
lastError = this.handleRetryableError(error, retries);
retries++;
if (retries <= this.retryCount) {
// Exponential backoff with jitter
const delay = this.retryDelay * Math.pow(2, retries - 1) * (0.5 + Math.random() * 0.5);
console.error(`Retrying in ${Math.round(delay)}ms (${retries}/${this.retryCount})`);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
}
// If we get here, all retries failed
throw lastError || new Error('Request failed after multiple retries');
}
/**
* Handle retryable errors
*/
private handleRetryableError(error: any, retryCount: number): Error {
if (axios.isAxiosError(error)) {
const axiosError = error as AxiosError;
// Rate limiting (429) or server errors (5xx)
if (axiosError.response?.status === 429 || (axiosError.response?.status && axiosError.response.status >= 500)) {
console.error(`Request error (retry ${retryCount}): ${axiosError.message}`);
if (axiosError.response?.status === 429) {
console.error('Rate limit exceeded. Retrying with backoff...');
}
return new Error(`OpenRouter API error: ${axiosError.response?.status} ${axiosError.message}`);
}
// For other status codes, don't retry
if (axiosError.response) {
const responseData = axiosError.response.data as any;
const message = responseData?.error?.message || axiosError.message;
throw new McpError('RequestFailed', `OpenRouter API error: ${message}`);
}
}
// Network errors should be retried
console.error(`Network error (retry ${retryCount}): ${error.message}`);
return new Error(`Network error: ${error.message}`);
}
/**
* Handle request errors
*/
private handleRequestError(error: any): never {
console.error('Error in OpenRouter API request:', error);
if (axios.isAxiosError(error)) {
const axiosError = error as AxiosError;
if (axiosError.response) {
const status = axiosError.response.status;
const responseData = axiosError.response.data as any;
const message = responseData?.error?.message || axiosError.message;
if (status === 401 || status === 403) {
throw new McpError('Unauthorized', `Authentication error: ${message}`);
} else if (status === 429) {
throw new McpError('RateLimitExceeded', `Rate limit exceeded: ${message}`);
} else {
throw new McpError('RequestFailed', `OpenRouter API error (${status}): ${message}`);
}
} else if (axiosError.request) {
throw new McpError('NetworkError', `Network error: ${axiosError.message}`);
}
}
throw new McpError('UnknownError', `Unknown error: ${error.message || 'No error message'}`);
}
}

347
src/tool-handlers.ts Normal file
View File

@@ -0,0 +1,347 @@
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import {
CallToolRequestSchema,
ErrorCode,
ListToolsRequestSchema,
McpError,
} from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
import { ModelCache } from './model-cache.js';
import { OpenRouterAPIClient } from './openrouter-api.js';
// Import tool handlers
import { handleChatCompletion, ChatCompletionToolRequest } from './tool-handlers/chat-completion.js';
import { handleSearchModels, SearchModelsToolRequest } from './tool-handlers/search-models.js';
import { handleGetModelInfo, GetModelInfoToolRequest } from './tool-handlers/get-model-info.js';
import { handleValidateModel, ValidateModelToolRequest } from './tool-handlers/validate-model.js';
import { handleAnalyzeImage, AnalyzeImageToolRequest } from './tool-handlers/analyze-image.js';
import { handleMultiImageAnalysis, MultiImageAnalysisToolRequest } from './tool-handlers/multi-image-analysis.js';
export class ToolHandlers {
private server: Server;
private openai: OpenAI;
private modelCache: ModelCache;
private apiClient: OpenRouterAPIClient;
private defaultModel?: string;
constructor(
server: Server,
apiKey: string,
defaultModel?: string
) {
this.server = server;
this.modelCache = ModelCache.getInstance();
this.apiClient = new OpenRouterAPIClient(apiKey);
this.defaultModel = defaultModel;
this.openai = new OpenAI({
apiKey: apiKey,
baseURL: 'https://openrouter.ai/api/v1',
defaultHeaders: {
'HTTP-Referer': 'https://github.com/stabgan/openrouter-mcp-multimodal',
'X-Title': 'OpenRouter MCP Multimodal Server',
},
});
this.setupToolHandlers();
}
private setupToolHandlers() {
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: [
// Chat Completion Tool
{
name: 'chat_completion',
description: 'Send a message to OpenRouter.ai and get a response',
inputSchema: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model to use (e.g., "google/gemini-2.5-pro-exp-03-25:free", "undi95/toppy-m-7b:free"). If not provided, uses the default model if set.',
},
messages: {
type: 'array',
description: 'An array of conversation messages with roles and content',
minItems: 1,
maxItems: 100,
items: {
type: 'object',
properties: {
role: {
type: 'string',
enum: ['system', 'user', 'assistant'],
description: 'The role of the message sender',
},
content: {
oneOf: [
{
type: 'string',
description: 'The text content of the message',
},
{
type: 'array',
description: 'Array of content parts for multimodal messages (text and images)',
items: {
type: 'object',
properties: {
type: {
type: 'string',
enum: ['text', 'image_url'],
description: 'The type of content (text or image)',
},
text: {
type: 'string',
description: 'The text content (for text type)',
},
image_url: {
type: 'object',
description: 'The image URL object (for image_url type)',
properties: {
url: {
type: 'string',
description: 'URL of the image (can be a data URL with base64)',
},
},
required: ['url'],
},
},
required: ['type'],
},
},
],
},
},
required: ['role', 'content'],
},
},
temperature: {
type: 'number',
description: 'Sampling temperature (0-2)',
minimum: 0,
maximum: 2,
},
},
required: ['messages'],
},
maxContextTokens: 200000
},
// Image Analysis Tool
{
name: 'analyze_image',
description: 'Analyze an image using OpenRouter vision models',
inputSchema: {
type: 'object',
properties: {
image_path: {
type: 'string',
description: 'Path to the image file to analyze (must be an absolute path)',
},
question: {
type: 'string',
description: 'Question to ask about the image',
},
model: {
type: 'string',
description: 'OpenRouter model to use (e.g., "anthropic/claude-3.5-sonnet")',
},
},
required: ['image_path'],
},
},
// Multi-Image Analysis Tool
{
name: 'multi_image_analysis',
description: 'Analyze multiple images at once with a single prompt and receive detailed responses',
inputSchema: {
type: 'object',
properties: {
images: {
type: 'array',
description: 'Array of image objects to analyze',
items: {
type: 'object',
properties: {
url: {
type: 'string',
description: 'URL or data URL of the image (can be a file:// URL to read from local filesystem)',
},
alt: {
type: 'string',
description: 'Optional alt text or description of the image',
},
},
required: ['url'],
},
},
prompt: {
type: 'string',
description: 'Prompt for analyzing the images',
},
markdown_response: {
type: 'boolean',
description: 'Whether to format the response in Markdown (default: true)',
default: true,
},
model: {
type: 'string',
description: 'OpenRouter model to use (defaults to claude-3.5-sonnet if not specified)',
},
},
required: ['images', 'prompt'],
},
},
// Search Models Tool
{
name: 'search_models',
description: 'Search and filter OpenRouter.ai models based on various criteria',
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'Optional search query to filter by name, description, or provider',
},
provider: {
type: 'string',
description: 'Filter by specific provider (e.g., "anthropic", "openai", "cohere")',
},
minContextLength: {
type: 'number',
description: 'Minimum context length in tokens',
},
maxContextLength: {
type: 'number',
description: 'Maximum context length in tokens',
},
maxPromptPrice: {
type: 'number',
description: 'Maximum price per 1K tokens for prompts',
},
maxCompletionPrice: {
type: 'number',
description: 'Maximum price per 1K tokens for completions',
},
capabilities: {
type: 'object',
description: 'Filter by model capabilities',
properties: {
functions: {
type: 'boolean',
description: 'Requires function calling capability',
},
tools: {
type: 'boolean',
description: 'Requires tools capability',
},
vision: {
type: 'boolean',
description: 'Requires vision capability',
},
json_mode: {
type: 'boolean',
description: 'Requires JSON mode capability',
}
}
},
limit: {
type: 'number',
description: 'Maximum number of results to return (default: 10)',
minimum: 1,
maximum: 50
}
}
},
},
// Get Model Info Tool
{
name: 'get_model_info',
description: 'Get detailed information about a specific model',
inputSchema: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model ID to get information for',
},
},
required: ['model'],
},
},
// Validate Model Tool
{
name: 'validate_model',
description: 'Check if a model ID is valid',
inputSchema: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model ID to validate',
},
},
required: ['model'],
},
},
],
}));
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
switch (request.params.name) {
case 'chat_completion':
return handleChatCompletion({
params: {
arguments: request.params.arguments as unknown as ChatCompletionToolRequest
}
}, this.openai, this.defaultModel);
case 'analyze_image':
return handleAnalyzeImage({
params: {
arguments: request.params.arguments as unknown as AnalyzeImageToolRequest
}
}, this.openai, this.defaultModel);
case 'multi_image_analysis':
return handleMultiImageAnalysis({
params: {
arguments: request.params.arguments as unknown as MultiImageAnalysisToolRequest
}
}, this.openai, this.defaultModel);
case 'search_models':
return handleSearchModels({
params: {
arguments: request.params.arguments as SearchModelsToolRequest
}
}, this.apiClient, this.modelCache);
case 'get_model_info':
return handleGetModelInfo({
params: {
arguments: request.params.arguments as unknown as GetModelInfoToolRequest
}
}, this.modelCache);
case 'validate_model':
return handleValidateModel({
params: {
arguments: request.params.arguments as unknown as ValidateModelToolRequest
}
}, this.modelCache);
default:
throw new McpError(
ErrorCode.MethodNotFound,
`Unknown tool: ${request.params.name}`
);
}
});
}
}

View File

@@ -0,0 +1,116 @@
import path from 'path';
import { promises as fs } from 'fs';
import sharp from 'sharp';
import { McpError } from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
export interface AnalyzeImageToolRequest {
image_path: string;
question?: string;
model?: string;
}
export async function handleAnalyzeImage(
request: { params: { arguments: AnalyzeImageToolRequest } },
openai: OpenAI,
defaultModel?: string
) {
const args = request.params.arguments;
try {
// Validate image path
const imagePath = args.image_path;
if (!path.isAbsolute(imagePath)) {
throw new McpError('InvalidParams', 'Image path must be absolute');
}
// Read image file
const imageBuffer = await fs.readFile(imagePath);
console.error(`Successfully read image buffer of size: ${imageBuffer.length}`);
// Get image metadata
const metadata = await sharp(imageBuffer).metadata();
console.error('Image metadata:', metadata);
// Calculate dimensions to keep base64 size reasonable
const MAX_DIMENSION = 800; // Larger than original example for better quality
const JPEG_QUALITY = 80; // Higher quality
let resizedBuffer = imageBuffer;
if (metadata.width && metadata.height) {
const largerDimension = Math.max(metadata.width, metadata.height);
if (largerDimension > MAX_DIMENSION) {
const resizeOptions = metadata.width > metadata.height
? { width: MAX_DIMENSION }
: { height: MAX_DIMENSION };
resizedBuffer = await sharp(imageBuffer)
.resize(resizeOptions)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
} else {
resizedBuffer = await sharp(imageBuffer)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
}
}
// Convert to base64
const base64Image = resizedBuffer.toString('base64');
// Select model
const model = args.model || defaultModel || 'anthropic/claude-3.5-sonnet';
// Prepare message with image
const messages = [
{
role: 'user',
content: [
{
type: 'text',
text: args.question || "What's in this image?"
},
{
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${base64Image}`
}
}
]
}
];
console.error('Sending request to OpenRouter...');
// Call OpenRouter API
const completion = await openai.chat.completions.create({
model,
messages,
});
return {
content: [
{
type: 'text',
text: completion.choices[0].message.content || '',
},
],
};
} catch (error) {
console.error('Error analyzing image:', error);
if (error instanceof McpError) {
throw error;
}
return {
content: [
{
type: 'text',
text: `Error analyzing image: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
};
}
}

View File

@@ -0,0 +1,135 @@
import OpenAI from 'openai';
import { ChatCompletionMessageParam } from 'openai/resources/chat/completions.js';
// Maximum context tokens
const MAX_CONTEXT_TOKENS = 200000;
export interface ChatCompletionToolRequest {
model?: string;
messages: ChatCompletionMessageParam[];
temperature?: number;
}
// Utility function to estimate token count (simplified)
function estimateTokenCount(text: string): number {
// Rough approximation: 4 characters per token
return Math.ceil(text.length / 4);
}
// Truncate messages to fit within the context window
function truncateMessagesToFit(
messages: ChatCompletionMessageParam[],
maxTokens: number
): ChatCompletionMessageParam[] {
const truncated: ChatCompletionMessageParam[] = [];
let currentTokenCount = 0;
// Always include system message first if present
if (messages[0]?.role === 'system') {
truncated.push(messages[0]);
currentTokenCount += estimateTokenCount(messages[0].content as string);
}
// Add messages from the end, respecting the token limit
for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i];
// Skip if it's the system message we've already added
if (i === 0 && message.role === 'system') continue;
// For string content, estimate tokens directly
if (typeof message.content === 'string') {
const messageTokens = estimateTokenCount(message.content);
if (currentTokenCount + messageTokens > maxTokens) break;
truncated.unshift(message);
currentTokenCount += messageTokens;
}
// For multimodal content (array), estimate tokens for text content
else if (Array.isArray(message.content)) {
let messageTokens = 0;
for (const part of message.content) {
if (part.type === 'text' && part.text) {
messageTokens += estimateTokenCount(part.text);
} else if (part.type === 'image_url') {
// Add a token cost estimate for images - this is a simplification
// Actual image token costs depend on resolution and model
messageTokens += 1000;
}
}
if (currentTokenCount + messageTokens > maxTokens) break;
truncated.unshift(message);
currentTokenCount += messageTokens;
}
}
return truncated;
}
export async function handleChatCompletion(
request: { params: { arguments: ChatCompletionToolRequest } },
openai: OpenAI,
defaultModel?: string
) {
const args = request.params.arguments;
// Validate model selection
const model = args.model || defaultModel;
if (!model) {
return {
content: [
{
type: 'text',
text: 'No model specified and no default model configured in MCP settings. Please specify a model or set OPENROUTER_DEFAULT_MODEL in the MCP configuration.',
},
],
isError: true,
};
}
// Validate message array
if (args.messages.length === 0) {
return {
content: [
{
type: 'text',
text: 'Messages array cannot be empty. At least one message is required.',
},
],
isError: true,
};
}
try {
// Truncate messages to fit within context window
const truncatedMessages = truncateMessagesToFit(args.messages, MAX_CONTEXT_TOKENS);
const completion = await openai.chat.completions.create({
model,
messages: truncatedMessages,
temperature: args.temperature ?? 1,
});
return {
content: [
{
type: 'text',
text: completion.choices[0].message.content || '',
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `OpenRouter API error: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

View File

@@ -0,0 +1,54 @@
import { McpError } from '@modelcontextprotocol/sdk/types.js';
import { ModelCache } from '../model-cache.js';
export interface GetModelInfoToolRequest {
model: string;
}
export async function handleGetModelInfo(
request: { params: { arguments: GetModelInfoToolRequest } },
modelCache: ModelCache
) {
const args = request.params.arguments;
try {
if (!modelCache.isCacheValid()) {
return {
content: [
{
type: 'text',
text: 'Model cache is empty or expired. Please call search_models first to populate the cache.',
},
],
isError: true,
};
}
const model = modelCache.getModel(args.model);
if (!model) {
throw new McpError('NotFound', `Model '${args.model}' not found`);
}
return {
content: [
{
type: 'text',
text: JSON.stringify(model, null, 2),
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `Error retrieving model info: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

View File

@@ -0,0 +1,168 @@
import fetch from 'node-fetch';
import sharp from 'sharp';
import { McpError } from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
export interface MultiImageAnalysisToolRequest {
images: Array<{
url: string;
alt?: string;
}>;
prompt: string;
markdown_response?: boolean;
model?: string;
}
async function fetchImageAsBuffer(url: string): Promise<Buffer> {
try {
// Handle data URLs
if (url.startsWith('data:')) {
const matches = url.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/);
if (!matches || matches.length !== 3) {
throw new Error('Invalid data URL');
}
return Buffer.from(matches[2], 'base64');
}
// Handle file URLs
if (url.startsWith('file://')) {
const filePath = url.replace('file://', '');
const fs = await import('fs/promises');
return await fs.readFile(filePath);
}
// Handle http/https URLs
const response = await fetch(url);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return Buffer.from(await response.arrayBuffer());
} catch (error) {
console.error(`Error fetching image from ${url}:`, error);
throw error;
}
}
async function processImage(buffer: Buffer): Promise<string> {
try {
// Get image metadata
const metadata = await sharp(buffer).metadata();
// Calculate dimensions to keep base64 size reasonable
const MAX_DIMENSION = 800;
const JPEG_QUALITY = 80;
if (metadata.width && metadata.height) {
const largerDimension = Math.max(metadata.width, metadata.height);
if (largerDimension > MAX_DIMENSION) {
const resizeOptions = metadata.width > metadata.height
? { width: MAX_DIMENSION }
: { height: MAX_DIMENSION };
const resizedBuffer = await sharp(buffer)
.resize(resizeOptions)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
return resizedBuffer.toString('base64');
}
}
// If no resizing needed, just convert to JPEG
const jpegBuffer = await sharp(buffer)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
return jpegBuffer.toString('base64');
} catch (error) {
console.error('Error processing image:', error);
throw error;
}
}
export async function handleMultiImageAnalysis(
request: { params: { arguments: MultiImageAnalysisToolRequest } },
openai: OpenAI,
defaultModel?: string
) {
const args = request.params.arguments;
try {
// Validate inputs
if (!args.images || args.images.length === 0) {
throw new McpError('InvalidParams', 'At least one image is required');
}
if (!args.prompt) {
throw new McpError('InvalidParams', 'A prompt is required');
}
// Prepare content array for the message
const content: Array<any> = [{
type: 'text',
text: args.prompt
}];
// Process each image
for (const image of args.images) {
try {
// Fetch and process the image
const imageBuffer = await fetchImageAsBuffer(image.url);
const base64Image = await processImage(imageBuffer);
// Add to content
content.push({
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${base64Image}`
}
});
} catch (error) {
console.error(`Error processing image ${image.url}:`, error);
// Continue with other images if one fails
}
}
// If no images were successfully processed
if (content.length === 1) {
throw new Error('Failed to process any of the provided images');
}
// Select model
const model = args.model || defaultModel || 'anthropic/claude-3.5-sonnet';
// Make the API call
const completion = await openai.chat.completions.create({
model,
messages: [{
role: 'user',
content
}]
});
return {
content: [
{
type: 'text',
text: completion.choices[0].message.content || '',
},
],
};
} catch (error) {
console.error('Error in multi-image analysis:', error);
if (error instanceof McpError) {
throw error;
}
return {
content: [
{
type: 'text',
text: `Error analyzing images: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
};
}
}

View File

@@ -0,0 +1,68 @@
import { ModelCache } from '../model-cache.js';
import { OpenRouterAPIClient } from '../openrouter-api.js';
export interface SearchModelsToolRequest {
query?: string;
provider?: string;
minContextLength?: number;
maxContextLength?: number;
maxPromptPrice?: number;
maxCompletionPrice?: number;
capabilities?: {
functions?: boolean;
tools?: boolean;
vision?: boolean;
json_mode?: boolean;
};
limit?: number;
}
export async function handleSearchModels(
request: { params: { arguments: SearchModelsToolRequest } },
apiClient: OpenRouterAPIClient,
modelCache: ModelCache
) {
const args = request.params.arguments;
try {
// Refresh the cache if needed
if (!modelCache.isCacheValid()) {
const models = await apiClient.getModels();
modelCache.setModels(models);
}
// Search models based on criteria
const results = modelCache.searchModels({
query: args.query,
provider: args.provider,
minContextLength: args.minContextLength,
maxContextLength: args.maxContextLength,
maxPromptPrice: args.maxPromptPrice,
maxCompletionPrice: args.maxCompletionPrice,
capabilities: args.capabilities,
limit: args.limit || 10,
});
return {
content: [
{
type: 'text',
text: JSON.stringify(results, null, 2),
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `Error searching models: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

View File

@@ -0,0 +1,50 @@
import { ModelCache } from '../model-cache.js';
export interface ValidateModelToolRequest {
model: string;
}
export async function handleValidateModel(
request: { params: { arguments: ValidateModelToolRequest } },
modelCache: ModelCache
) {
const args = request.params.arguments;
try {
if (!modelCache.isCacheValid()) {
return {
content: [
{
type: 'text',
text: 'Model cache is empty or expired. Please call search_models first to populate the cache.',
},
],
isError: true,
};
}
const isValid = modelCache.hasModel(args.model);
return {
content: [
{
type: 'text',
text: JSON.stringify({ valid: isValid }),
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `Error validating model: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}