Initial commit: Setting up OpenRouter MCP server for multimodal capabilities

This commit is contained in:
stabgan
2025-03-26 22:57:06 +05:30
commit 57eb800f9a
17 changed files with 1895 additions and 0 deletions

84
.github/workflows/docker-publish.yml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: Docker Image and NPM Package CI/CD
on:
push:
branches: [ main ]
tags: [ 'v*' ]
pull_request:
branches: [ main ]
jobs:
build-and-push-docker:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: |
ghcr.io/stabgan/openrouter-mcp-multimodal
stabgan/openrouter-mcp-multimodal
tags: |
type=semver,pattern={{version}}
type=ref,event=branch
type=sha,format=short
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
publish-npm:
runs-on: ubuntu-latest
# Only run on tagged versions (v*)
if: startsWith(github.ref, 'refs/tags/v')
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18.x'
registry-url: 'https://registry.npmjs.org/'
- name: Install dependencies
run: npm ci
- name: Build package
run: npm run build
- name: Publish to NPM
run: npm publish --access=public
env:
NODE_AUTH_TOKEN: ${{ secrets.NPMJS_TOKEN }}

50
.gitignore vendored Normal file
View File

@@ -0,0 +1,50 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Dependency directories
node_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Build outputs
dist
build
# dotenv environment variables file
.env
.env.test
.env.local
# IDE files
.idea/
.vscode/
*.iml
*.iws
*.ipr
*.swp
*.swo
# macOS
.DS_Store
# Windows
Thumbs.db
ehthumbs.db
ehthumbs_vista.db
*.stackdump
[Dd]esktop.ini
# Testing
coverage/
.nyc_output/

29
Dockerfile Normal file
View File

@@ -0,0 +1,29 @@
FROM node:18-alpine
WORKDIR /app
# Install dependencies for sharp
RUN apk add --no-cache \
g++ \
make \
python3
# Copy package files and install dependencies
COPY package*.json ./
RUN npm install
# Copy source code
COPY . .
# Build TypeScript code
RUN npm run build
# Default environment variables
ENV NODE_ENV=production
# The API key should be passed at runtime
# ENV OPENROUTER_API_KEY=your-api-key-here
# ENV OPENROUTER_DEFAULT_MODEL=your-default-model
# Run the server
CMD ["node", "dist/index.js"]

313
README.md Normal file
View File

@@ -0,0 +1,313 @@
# OpenRouter MCP Multimodal Server
An MCP (Model Context Protocol) server that provides chat and image analysis capabilities through OpenRouter.ai's diverse model ecosystem. This server combines text chat functionality with powerful image analysis capabilities.
## Features
- **Text Chat:**
- Direct access to all OpenRouter.ai chat models
- Support for simple text and multimodal conversations
- Configurable temperature and other parameters
- **Image Analysis:**
- Analyze single images with custom questions
- Process multiple images simultaneously
- Automatic image resizing and optimization
- Support for various image sources (local files, URLs, data URLs)
- **Model Selection:**
- Search and filter available models
- Validate model IDs
- Get detailed model information
- Support for default model configuration
- **Performance Optimization:**
- Smart model information caching
- Exponential backoff for retries
- Automatic rate limit handling
## Quick Start Configuration
### Prerequisites
1. Get your OpenRouter API key from [OpenRouter Keys](https://openrouter.ai/keys)
2. Choose a default model (optional)
### MCP Configuration Options
Add one of the following configurations to your MCP settings file (e.g., `cline_mcp_settings.json` or `claude_desktop_config.json`):
#### Option 1: Using npx (Node.js)
```json
{
"mcpServers": {
"openrouter": {
"command": "npx",
"args": [
"-y",
"@stabgan/openrouter-mcp-multimodal"
],
"env": {
"OPENROUTER_API_KEY": "your-api-key-here",
"OPENROUTER_DEFAULT_MODEL": "anthropic/claude-3.5-sonnet"
}
}
}
}
```
#### Option 2: Using uv (Python Package Manager)
```json
{
"mcpServers": {
"openrouter": {
"command": "uv",
"args": [
"run",
"-m",
"openrouter_mcp_multimodal"
],
"env": {
"OPENROUTER_API_KEY": "your-api-key-here",
"OPENROUTER_DEFAULT_MODEL": "anthropic/claude-3.5-sonnet"
}
}
}
}
```
#### Option 3: Using Docker
```json
{
"mcpServers": {
"openrouter": {
"command": "docker",
"args": [
"run",
"--rm",
"-i",
"-e", "OPENROUTER_API_KEY=your-api-key-here",
"-e", "OPENROUTER_DEFAULT_MODEL=anthropic/claude-3.5-sonnet",
"stabgan/openrouter-mcp-multimodal:latest"
]
}
}
}
```
#### Option 4: Using Smithery (recommended)
```json
{
"mcpServers": {
"openrouter": {
"command": "smithery",
"args": [
"run",
"stabgan/openrouter-mcp-multimodal"
],
"env": {
"OPENROUTER_API_KEY": "your-api-key-here",
"OPENROUTER_DEFAULT_MODEL": "anthropic/claude-3.5-sonnet"
}
}
}
}
```
## Available Tools
### chat_completion
Send text or multimodal messages to OpenRouter models:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "chat_completion",
arguments: {
model: "google/gemini-2.5-pro-exp-03-25:free", // Optional if default is set
messages: [
{
role: "system",
content: "You are a helpful assistant."
},
{
role: "user",
content: "What is the capital of France?"
}
],
temperature: 0.7 // Optional, defaults to 1.0
}
});
```
For multimodal messages with images:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "chat_completion",
arguments: {
model: "anthropic/claude-3.5-sonnet",
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's in this image?"
},
{
type: "image_url",
image_url: {
url: "https://example.com/image.jpg"
}
}
]
}
]
}
});
```
### analyze_image
Analyze a single image with an optional question:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "analyze_image",
arguments: {
image_path: "/absolute/path/to/image.jpg",
question: "What objects are in this image?", // Optional
model: "anthropic/claude-3.5-sonnet" // Optional if default is set
}
});
```
### multi_image_analysis
Analyze multiple images with a single prompt:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "multi_image_analysis",
arguments: {
images: [
{ url: "https://example.com/image1.jpg" },
{ url: "file:///absolute/path/to/image2.jpg" },
{
url: "https://example.com/image3.jpg",
alt: "Optional description of image 3"
}
],
prompt: "Compare these images and tell me their similarities and differences",
markdown_response: true, // Optional, defaults to true
model: "anthropic/claude-3-opus" // Optional if default is set
}
});
```
### search_models
Search and filter available models:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "search_models",
arguments: {
query: "claude", // Optional text search
provider: "anthropic", // Optional provider filter
capabilities: {
vision: true // Filter for models with vision capabilities
},
limit: 5 // Optional, defaults to 10
}
});
```
### get_model_info
Get detailed information about a specific model:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "get_model_info",
arguments: {
model: "anthropic/claude-3.5-sonnet"
}
});
```
### validate_model
Check if a model ID is valid:
```javascript
use_mcp_tool({
server_name: "openrouter",
tool_name: "validate_model",
arguments: {
model: "google/gemini-2.5-pro-exp-03-25:free"
}
});
```
## Error Handling
The server provides detailed error messages for various failure cases:
- Invalid input parameters
- Network errors
- Rate limiting issues
- Invalid image formats
- Authentication problems
## Troubleshooting
### Common Issues
- **"fetch is not defined" error**: This often occurs when the Node.js environment doesn't have global fetch. Use Node.js v18+ or add the PATH environment variable to your configuration as shown below:
```json
{
"mcpServers": {
"openrouter": {
"command": "npx",
"args": [
"-y",
"@stabgan/openrouter-mcp-multimodal"
],
"env": {
"OPENROUTER_API_KEY": "your-api-key-here",
"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
}
}
}
}
```
- **Image analysis failures**: Make sure your image path is absolute and the file format is supported.
## Development
To build from source:
```bash
git clone https://github.com/stabgan/openrouter-mcp-multimodal.git
cd openrouter-mcp-multimodal
npm install
npm run build
```
## License
MIT License

60
package.json Normal file
View File

@@ -0,0 +1,60 @@
{
"name": "@stabgan/openrouter-mcp-multimodal",
"version": "1.0.0",
"description": "MCP server for OpenRouter providing text chat and image analysis tools",
"type": "module",
"main": "dist/index.js",
"bin": {
"openrouter-multimodal": "dist/index.js"
},
"files": [
"dist",
"README.md",
"LICENSE"
],
"scripts": {
"build": "tsc && shx chmod +x dist/*.js",
"prepare": "npm run build",
"start": "node dist/index.js",
"watch": "tsc --watch"
},
"keywords": [
"mcp",
"openrouter",
"ai",
"llm",
"vision",
"image-analysis",
"modelcontextprotocol"
],
"author": "stabgan",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/stabgan/openrouter-mcp-multimodal.git"
},
"bugs": {
"url": "https://github.com/stabgan/openrouter-mcp-multimodal/issues"
},
"homepage": "https://github.com/stabgan/openrouter-mcp-multimodal#readme",
"engines": {
"node": ">=18.0.0"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.4.1",
"axios": "^1.7.9",
"node-fetch": "^3.3.2",
"openai": "^4.83.0",
"sharp": "^0.33.3"
},
"devDependencies": {
"@types/node": "^22.13.1",
"@types/sharp": "^0.32.0",
"shx": "^0.3.4",
"typescript": "^5.7.3"
},
"overrides": {
"uri-js": "npm:uri-js-replace",
"whatwg-url": "^14.1.0"
}
}

43
smithery.yaml Normal file
View File

@@ -0,0 +1,43 @@
name: openrouter-mcp-multimodal
version: 1.0.0
description: MCP server for OpenRouter providing text chat and image analysis tools
image:
name: ghcr.io/stabgan/openrouter-mcp-multimodal
tag: latest
entrypoint: ["node", "dist/index.js"]
build:
dockerfile: Dockerfile
publish:
smithery: true
dockerhub:
enabled: true
username: stabgan
repository: openrouter-mcp-multimodal
config:
env:
- name: OPENROUTER_API_KEY
description: OpenRouter API key for authentication
required: true
- name: OPENROUTER_DEFAULT_MODEL
description: Default model to use if none specified in requests
required: false
defaultValue: "anthropic/claude-3.5-sonnet"
documentation:
description: |
An MCP server that provides chat and image analysis capabilities through OpenRouter.ai's diverse model ecosystem.
Supports text chat, single and multi-image analysis, and model search/validation.
usage: |
This MCP server provides the following tools:
- chat_completion: Send text or multimodal messages to OpenRouter
- analyze_image: Analyze a single image with an optional question
- multi_image_analysis: Analyze multiple images with a single prompt
- search_models: Search and filter available models
- get_model_info: Get detailed information about a specific model
- validate_model: Check if a model ID is valid
link: https://github.com/stabgan/openrouter-mcp-multimodal

67
src/index.ts Normal file
View File

@@ -0,0 +1,67 @@
#!/usr/bin/env node
// OpenRouter Multimodal MCP Server
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { ToolHandlers } from './tool-handlers.js';
class OpenRouterMultimodalServer {
private server: Server;
private toolHandlers!: ToolHandlers; // Using definite assignment assertion
constructor() {
// Get API key and default model from environment variables
const apiKey = process.env.OPENROUTER_API_KEY;
const defaultModel = process.env.OPENROUTER_DEFAULT_MODEL;
// Check if API key is provided
if (!apiKey) {
throw new Error('OPENROUTER_API_KEY environment variable is required');
}
// Initialize the server
this.server = new Server(
{
name: 'openrouter-multimodal-server',
version: '1.0.0',
},
{
capabilities: {
tools: {},
},
}
);
// Set up error handling
this.server.onerror = (error) => console.error('[MCP Error]', error);
// Initialize tool handlers
this.toolHandlers = new ToolHandlers(
this.server,
apiKey,
defaultModel
);
process.on('SIGINT', async () => {
await this.server.close();
process.exit(0);
});
}
async run() {
const transport = new StdioServerTransport();
await this.server.connect(transport);
console.error('OpenRouter Multimodal MCP server running on stdio');
console.error('Using API key from environment variable');
console.error('Note: To use OpenRouter Multimodal, add the API key to your environment variables:');
console.error(' OPENROUTER_API_KEY=your-api-key');
if (process.env.OPENROUTER_DEFAULT_MODEL) {
console.error(` Using default model: ${process.env.OPENROUTER_DEFAULT_MODEL}`);
} else {
console.error(' No default model set. You will need to specify a model in each request.');
}
}
}
const server = new OpenRouterMultimodalServer();
server.run().catch(console.error);

167
src/model-cache.ts Normal file
View File

@@ -0,0 +1,167 @@
/**
* ModelCache - Caches OpenRouter model data to reduce API calls
*/
export class ModelCache {
private static instance: ModelCache;
private models: Record<string, any>;
private lastFetchTime: number;
private cacheExpiryTime: number; // in milliseconds (1 hour = 3600000)
private constructor() {
this.models = {};
this.lastFetchTime = 0;
this.cacheExpiryTime = 3600000; // 1 hour
}
/**
* Get singleton instance
*/
public static getInstance(): ModelCache {
if (!ModelCache.instance) {
ModelCache.instance = new ModelCache();
}
return ModelCache.instance;
}
/**
* Check if the cache is valid
*/
public isCacheValid(): boolean {
return (
Object.keys(this.models).length > 0 &&
Date.now() - this.lastFetchTime < this.cacheExpiryTime
);
}
/**
* Store all models
*/
public setModels(models: any[]): void {
this.models = {};
for (const model of models) {
this.models[model.id] = model;
}
this.lastFetchTime = Date.now();
}
/**
* Get all cached models
*/
public getAllModels(): any[] {
return Object.values(this.models);
}
/**
* Get a specific model by ID
*/
public getModel(modelId: string): any | null {
return this.models[modelId] || null;
}
/**
* Check if a model exists
*/
public hasModel(modelId: string): boolean {
return !!this.models[modelId];
}
/**
* Search models based on criteria
*/
public searchModels(params: {
query?: string;
provider?: string;
minContextLength?: number;
maxContextLength?: number;
maxPromptPrice?: number;
maxCompletionPrice?: number;
capabilities?: {
functions?: boolean;
tools?: boolean;
vision?: boolean;
json_mode?: boolean;
};
limit?: number;
}): any[] {
let results = this.getAllModels();
// Apply text search
if (params.query) {
const query = params.query.toLowerCase();
results = results.filter((model) =>
model.id.toLowerCase().includes(query) ||
(model.description && model.description.toLowerCase().includes(query)) ||
(model.provider && model.provider.toLowerCase().includes(query))
);
}
// Filter by provider
if (params.provider) {
results = results.filter((model) =>
model.provider && model.provider.toLowerCase() === params.provider!.toLowerCase()
);
}
// Filter by context length
if (params.minContextLength) {
results = results.filter(
(model) => model.context_length >= params.minContextLength
);
}
if (params.maxContextLength) {
results = results.filter(
(model) => model.context_length <= params.maxContextLength
);
}
// Filter by price
if (params.maxPromptPrice) {
results = results.filter(
(model) =>
!model.pricing?.prompt || model.pricing.prompt <= params.maxPromptPrice
);
}
if (params.maxCompletionPrice) {
results = results.filter(
(model) =>
!model.pricing?.completion ||
model.pricing.completion <= params.maxCompletionPrice
);
}
// Filter by capabilities
if (params.capabilities) {
if (params.capabilities.functions) {
results = results.filter(
(model) => model.capabilities?.function_calling
);
}
if (params.capabilities.tools) {
results = results.filter((model) => model.capabilities?.tools);
}
if (params.capabilities.vision) {
results = results.filter((model) => model.capabilities?.vision);
}
if (params.capabilities.json_mode) {
results = results.filter((model) => model.capabilities?.json_mode);
}
}
// Apply limit
if (params.limit && params.limit > 0) {
results = results.slice(0, params.limit);
}
return results;
}
/**
* Reset the cache
*/
public resetCache(): void {
this.models = {};
this.lastFetchTime = 0;
}
}

130
src/openrouter-api.ts Normal file
View File

@@ -0,0 +1,130 @@
import axios, { AxiosError, AxiosInstance } from 'axios';
import { McpError } from '@modelcontextprotocol/sdk/types.js';
/**
* Client for interacting with the OpenRouter API
*/
export class OpenRouterAPIClient {
private apiKey: string;
private axiosInstance: AxiosInstance;
private retryCount: number = 3;
private retryDelay: number = 1000; // Initial delay in ms
constructor(apiKey: string) {
this.apiKey = apiKey;
this.axiosInstance = axios.create({
baseURL: 'https://openrouter.ai/api/v1',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json',
'HTTP-Referer': 'https://github.com/stabgan/openrouter-mcp-multimodal',
'X-Title': 'OpenRouter MCP Multimodal Server'
},
timeout: 60000 // 60 seconds timeout
});
}
/**
* Get all available models from OpenRouter
*/
public async getModels(): Promise<any[]> {
try {
const response = await this.axiosInstance.get('/models');
return response.data.data;
} catch (error) {
this.handleRequestError(error);
return [];
}
}
/**
* Send a request to the OpenRouter API with retry functionality
*/
public async request(endpoint: string, method: string, data?: any): Promise<any> {
let lastError: Error | null = null;
let retries = 0;
while (retries <= this.retryCount) {
try {
const response = await this.axiosInstance.request({
url: endpoint,
method,
data
});
return response.data;
} catch (error) {
lastError = this.handleRetryableError(error, retries);
retries++;
if (retries <= this.retryCount) {
// Exponential backoff with jitter
const delay = this.retryDelay * Math.pow(2, retries - 1) * (0.5 + Math.random() * 0.5);
console.error(`Retrying in ${Math.round(delay)}ms (${retries}/${this.retryCount})`);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
}
// If we get here, all retries failed
throw lastError || new Error('Request failed after multiple retries');
}
/**
* Handle retryable errors
*/
private handleRetryableError(error: any, retryCount: number): Error {
if (axios.isAxiosError(error)) {
const axiosError = error as AxiosError;
// Rate limiting (429) or server errors (5xx)
if (axiosError.response?.status === 429 || (axiosError.response?.status && axiosError.response.status >= 500)) {
console.error(`Request error (retry ${retryCount}): ${axiosError.message}`);
if (axiosError.response?.status === 429) {
console.error('Rate limit exceeded. Retrying with backoff...');
}
return new Error(`OpenRouter API error: ${axiosError.response?.status} ${axiosError.message}`);
}
// For other status codes, don't retry
if (axiosError.response) {
const responseData = axiosError.response.data as any;
const message = responseData?.error?.message || axiosError.message;
throw new McpError('RequestFailed', `OpenRouter API error: ${message}`);
}
}
// Network errors should be retried
console.error(`Network error (retry ${retryCount}): ${error.message}`);
return new Error(`Network error: ${error.message}`);
}
/**
* Handle request errors
*/
private handleRequestError(error: any): never {
console.error('Error in OpenRouter API request:', error);
if (axios.isAxiosError(error)) {
const axiosError = error as AxiosError;
if (axiosError.response) {
const status = axiosError.response.status;
const responseData = axiosError.response.data as any;
const message = responseData?.error?.message || axiosError.message;
if (status === 401 || status === 403) {
throw new McpError('Unauthorized', `Authentication error: ${message}`);
} else if (status === 429) {
throw new McpError('RateLimitExceeded', `Rate limit exceeded: ${message}`);
} else {
throw new McpError('RequestFailed', `OpenRouter API error (${status}): ${message}`);
}
} else if (axiosError.request) {
throw new McpError('NetworkError', `Network error: ${axiosError.message}`);
}
}
throw new McpError('UnknownError', `Unknown error: ${error.message || 'No error message'}`);
}
}

347
src/tool-handlers.ts Normal file
View File

@@ -0,0 +1,347 @@
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import {
CallToolRequestSchema,
ErrorCode,
ListToolsRequestSchema,
McpError,
} from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
import { ModelCache } from './model-cache.js';
import { OpenRouterAPIClient } from './openrouter-api.js';
// Import tool handlers
import { handleChatCompletion, ChatCompletionToolRequest } from './tool-handlers/chat-completion.js';
import { handleSearchModels, SearchModelsToolRequest } from './tool-handlers/search-models.js';
import { handleGetModelInfo, GetModelInfoToolRequest } from './tool-handlers/get-model-info.js';
import { handleValidateModel, ValidateModelToolRequest } from './tool-handlers/validate-model.js';
import { handleAnalyzeImage, AnalyzeImageToolRequest } from './tool-handlers/analyze-image.js';
import { handleMultiImageAnalysis, MultiImageAnalysisToolRequest } from './tool-handlers/multi-image-analysis.js';
export class ToolHandlers {
private server: Server;
private openai: OpenAI;
private modelCache: ModelCache;
private apiClient: OpenRouterAPIClient;
private defaultModel?: string;
constructor(
server: Server,
apiKey: string,
defaultModel?: string
) {
this.server = server;
this.modelCache = ModelCache.getInstance();
this.apiClient = new OpenRouterAPIClient(apiKey);
this.defaultModel = defaultModel;
this.openai = new OpenAI({
apiKey: apiKey,
baseURL: 'https://openrouter.ai/api/v1',
defaultHeaders: {
'HTTP-Referer': 'https://github.com/stabgan/openrouter-mcp-multimodal',
'X-Title': 'OpenRouter MCP Multimodal Server',
},
});
this.setupToolHandlers();
}
private setupToolHandlers() {
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: [
// Chat Completion Tool
{
name: 'chat_completion',
description: 'Send a message to OpenRouter.ai and get a response',
inputSchema: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model to use (e.g., "google/gemini-2.5-pro-exp-03-25:free", "undi95/toppy-m-7b:free"). If not provided, uses the default model if set.',
},
messages: {
type: 'array',
description: 'An array of conversation messages with roles and content',
minItems: 1,
maxItems: 100,
items: {
type: 'object',
properties: {
role: {
type: 'string',
enum: ['system', 'user', 'assistant'],
description: 'The role of the message sender',
},
content: {
oneOf: [
{
type: 'string',
description: 'The text content of the message',
},
{
type: 'array',
description: 'Array of content parts for multimodal messages (text and images)',
items: {
type: 'object',
properties: {
type: {
type: 'string',
enum: ['text', 'image_url'],
description: 'The type of content (text or image)',
},
text: {
type: 'string',
description: 'The text content (for text type)',
},
image_url: {
type: 'object',
description: 'The image URL object (for image_url type)',
properties: {
url: {
type: 'string',
description: 'URL of the image (can be a data URL with base64)',
},
},
required: ['url'],
},
},
required: ['type'],
},
},
],
},
},
required: ['role', 'content'],
},
},
temperature: {
type: 'number',
description: 'Sampling temperature (0-2)',
minimum: 0,
maximum: 2,
},
},
required: ['messages'],
},
maxContextTokens: 200000
},
// Image Analysis Tool
{
name: 'analyze_image',
description: 'Analyze an image using OpenRouter vision models',
inputSchema: {
type: 'object',
properties: {
image_path: {
type: 'string',
description: 'Path to the image file to analyze (must be an absolute path)',
},
question: {
type: 'string',
description: 'Question to ask about the image',
},
model: {
type: 'string',
description: 'OpenRouter model to use (e.g., "anthropic/claude-3.5-sonnet")',
},
},
required: ['image_path'],
},
},
// Multi-Image Analysis Tool
{
name: 'multi_image_analysis',
description: 'Analyze multiple images at once with a single prompt and receive detailed responses',
inputSchema: {
type: 'object',
properties: {
images: {
type: 'array',
description: 'Array of image objects to analyze',
items: {
type: 'object',
properties: {
url: {
type: 'string',
description: 'URL or data URL of the image (can be a file:// URL to read from local filesystem)',
},
alt: {
type: 'string',
description: 'Optional alt text or description of the image',
},
},
required: ['url'],
},
},
prompt: {
type: 'string',
description: 'Prompt for analyzing the images',
},
markdown_response: {
type: 'boolean',
description: 'Whether to format the response in Markdown (default: true)',
default: true,
},
model: {
type: 'string',
description: 'OpenRouter model to use (defaults to claude-3.5-sonnet if not specified)',
},
},
required: ['images', 'prompt'],
},
},
// Search Models Tool
{
name: 'search_models',
description: 'Search and filter OpenRouter.ai models based on various criteria',
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'Optional search query to filter by name, description, or provider',
},
provider: {
type: 'string',
description: 'Filter by specific provider (e.g., "anthropic", "openai", "cohere")',
},
minContextLength: {
type: 'number',
description: 'Minimum context length in tokens',
},
maxContextLength: {
type: 'number',
description: 'Maximum context length in tokens',
},
maxPromptPrice: {
type: 'number',
description: 'Maximum price per 1K tokens for prompts',
},
maxCompletionPrice: {
type: 'number',
description: 'Maximum price per 1K tokens for completions',
},
capabilities: {
type: 'object',
description: 'Filter by model capabilities',
properties: {
functions: {
type: 'boolean',
description: 'Requires function calling capability',
},
tools: {
type: 'boolean',
description: 'Requires tools capability',
},
vision: {
type: 'boolean',
description: 'Requires vision capability',
},
json_mode: {
type: 'boolean',
description: 'Requires JSON mode capability',
}
}
},
limit: {
type: 'number',
description: 'Maximum number of results to return (default: 10)',
minimum: 1,
maximum: 50
}
}
},
},
// Get Model Info Tool
{
name: 'get_model_info',
description: 'Get detailed information about a specific model',
inputSchema: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model ID to get information for',
},
},
required: ['model'],
},
},
// Validate Model Tool
{
name: 'validate_model',
description: 'Check if a model ID is valid',
inputSchema: {
type: 'object',
properties: {
model: {
type: 'string',
description: 'The model ID to validate',
},
},
required: ['model'],
},
},
],
}));
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
switch (request.params.name) {
case 'chat_completion':
return handleChatCompletion({
params: {
arguments: request.params.arguments as unknown as ChatCompletionToolRequest
}
}, this.openai, this.defaultModel);
case 'analyze_image':
return handleAnalyzeImage({
params: {
arguments: request.params.arguments as unknown as AnalyzeImageToolRequest
}
}, this.openai, this.defaultModel);
case 'multi_image_analysis':
return handleMultiImageAnalysis({
params: {
arguments: request.params.arguments as unknown as MultiImageAnalysisToolRequest
}
}, this.openai, this.defaultModel);
case 'search_models':
return handleSearchModels({
params: {
arguments: request.params.arguments as SearchModelsToolRequest
}
}, this.apiClient, this.modelCache);
case 'get_model_info':
return handleGetModelInfo({
params: {
arguments: request.params.arguments as unknown as GetModelInfoToolRequest
}
}, this.modelCache);
case 'validate_model':
return handleValidateModel({
params: {
arguments: request.params.arguments as unknown as ValidateModelToolRequest
}
}, this.modelCache);
default:
throw new McpError(
ErrorCode.MethodNotFound,
`Unknown tool: ${request.params.name}`
);
}
});
}
}

View File

@@ -0,0 +1,116 @@
import path from 'path';
import { promises as fs } from 'fs';
import sharp from 'sharp';
import { McpError } from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
export interface AnalyzeImageToolRequest {
image_path: string;
question?: string;
model?: string;
}
export async function handleAnalyzeImage(
request: { params: { arguments: AnalyzeImageToolRequest } },
openai: OpenAI,
defaultModel?: string
) {
const args = request.params.arguments;
try {
// Validate image path
const imagePath = args.image_path;
if (!path.isAbsolute(imagePath)) {
throw new McpError('InvalidParams', 'Image path must be absolute');
}
// Read image file
const imageBuffer = await fs.readFile(imagePath);
console.error(`Successfully read image buffer of size: ${imageBuffer.length}`);
// Get image metadata
const metadata = await sharp(imageBuffer).metadata();
console.error('Image metadata:', metadata);
// Calculate dimensions to keep base64 size reasonable
const MAX_DIMENSION = 800; // Larger than original example for better quality
const JPEG_QUALITY = 80; // Higher quality
let resizedBuffer = imageBuffer;
if (metadata.width && metadata.height) {
const largerDimension = Math.max(metadata.width, metadata.height);
if (largerDimension > MAX_DIMENSION) {
const resizeOptions = metadata.width > metadata.height
? { width: MAX_DIMENSION }
: { height: MAX_DIMENSION };
resizedBuffer = await sharp(imageBuffer)
.resize(resizeOptions)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
} else {
resizedBuffer = await sharp(imageBuffer)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
}
}
// Convert to base64
const base64Image = resizedBuffer.toString('base64');
// Select model
const model = args.model || defaultModel || 'anthropic/claude-3.5-sonnet';
// Prepare message with image
const messages = [
{
role: 'user',
content: [
{
type: 'text',
text: args.question || "What's in this image?"
},
{
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${base64Image}`
}
}
]
}
];
console.error('Sending request to OpenRouter...');
// Call OpenRouter API
const completion = await openai.chat.completions.create({
model,
messages,
});
return {
content: [
{
type: 'text',
text: completion.choices[0].message.content || '',
},
],
};
} catch (error) {
console.error('Error analyzing image:', error);
if (error instanceof McpError) {
throw error;
}
return {
content: [
{
type: 'text',
text: `Error analyzing image: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
};
}
}

View File

@@ -0,0 +1,135 @@
import OpenAI from 'openai';
import { ChatCompletionMessageParam } from 'openai/resources/chat/completions.js';
// Maximum context tokens
const MAX_CONTEXT_TOKENS = 200000;
export interface ChatCompletionToolRequest {
model?: string;
messages: ChatCompletionMessageParam[];
temperature?: number;
}
// Utility function to estimate token count (simplified)
function estimateTokenCount(text: string): number {
// Rough approximation: 4 characters per token
return Math.ceil(text.length / 4);
}
// Truncate messages to fit within the context window
function truncateMessagesToFit(
messages: ChatCompletionMessageParam[],
maxTokens: number
): ChatCompletionMessageParam[] {
const truncated: ChatCompletionMessageParam[] = [];
let currentTokenCount = 0;
// Always include system message first if present
if (messages[0]?.role === 'system') {
truncated.push(messages[0]);
currentTokenCount += estimateTokenCount(messages[0].content as string);
}
// Add messages from the end, respecting the token limit
for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i];
// Skip if it's the system message we've already added
if (i === 0 && message.role === 'system') continue;
// For string content, estimate tokens directly
if (typeof message.content === 'string') {
const messageTokens = estimateTokenCount(message.content);
if (currentTokenCount + messageTokens > maxTokens) break;
truncated.unshift(message);
currentTokenCount += messageTokens;
}
// For multimodal content (array), estimate tokens for text content
else if (Array.isArray(message.content)) {
let messageTokens = 0;
for (const part of message.content) {
if (part.type === 'text' && part.text) {
messageTokens += estimateTokenCount(part.text);
} else if (part.type === 'image_url') {
// Add a token cost estimate for images - this is a simplification
// Actual image token costs depend on resolution and model
messageTokens += 1000;
}
}
if (currentTokenCount + messageTokens > maxTokens) break;
truncated.unshift(message);
currentTokenCount += messageTokens;
}
}
return truncated;
}
export async function handleChatCompletion(
request: { params: { arguments: ChatCompletionToolRequest } },
openai: OpenAI,
defaultModel?: string
) {
const args = request.params.arguments;
// Validate model selection
const model = args.model || defaultModel;
if (!model) {
return {
content: [
{
type: 'text',
text: 'No model specified and no default model configured in MCP settings. Please specify a model or set OPENROUTER_DEFAULT_MODEL in the MCP configuration.',
},
],
isError: true,
};
}
// Validate message array
if (args.messages.length === 0) {
return {
content: [
{
type: 'text',
text: 'Messages array cannot be empty. At least one message is required.',
},
],
isError: true,
};
}
try {
// Truncate messages to fit within context window
const truncatedMessages = truncateMessagesToFit(args.messages, MAX_CONTEXT_TOKENS);
const completion = await openai.chat.completions.create({
model,
messages: truncatedMessages,
temperature: args.temperature ?? 1,
});
return {
content: [
{
type: 'text',
text: completion.choices[0].message.content || '',
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `OpenRouter API error: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

View File

@@ -0,0 +1,54 @@
import { McpError } from '@modelcontextprotocol/sdk/types.js';
import { ModelCache } from '../model-cache.js';
export interface GetModelInfoToolRequest {
model: string;
}
export async function handleGetModelInfo(
request: { params: { arguments: GetModelInfoToolRequest } },
modelCache: ModelCache
) {
const args = request.params.arguments;
try {
if (!modelCache.isCacheValid()) {
return {
content: [
{
type: 'text',
text: 'Model cache is empty or expired. Please call search_models first to populate the cache.',
},
],
isError: true,
};
}
const model = modelCache.getModel(args.model);
if (!model) {
throw new McpError('NotFound', `Model '${args.model}' not found`);
}
return {
content: [
{
type: 'text',
text: JSON.stringify(model, null, 2),
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `Error retrieving model info: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

View File

@@ -0,0 +1,168 @@
import fetch from 'node-fetch';
import sharp from 'sharp';
import { McpError } from '@modelcontextprotocol/sdk/types.js';
import OpenAI from 'openai';
export interface MultiImageAnalysisToolRequest {
images: Array<{
url: string;
alt?: string;
}>;
prompt: string;
markdown_response?: boolean;
model?: string;
}
async function fetchImageAsBuffer(url: string): Promise<Buffer> {
try {
// Handle data URLs
if (url.startsWith('data:')) {
const matches = url.match(/^data:([A-Za-z-+\/]+);base64,(.+)$/);
if (!matches || matches.length !== 3) {
throw new Error('Invalid data URL');
}
return Buffer.from(matches[2], 'base64');
}
// Handle file URLs
if (url.startsWith('file://')) {
const filePath = url.replace('file://', '');
const fs = await import('fs/promises');
return await fs.readFile(filePath);
}
// Handle http/https URLs
const response = await fetch(url);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return Buffer.from(await response.arrayBuffer());
} catch (error) {
console.error(`Error fetching image from ${url}:`, error);
throw error;
}
}
async function processImage(buffer: Buffer): Promise<string> {
try {
// Get image metadata
const metadata = await sharp(buffer).metadata();
// Calculate dimensions to keep base64 size reasonable
const MAX_DIMENSION = 800;
const JPEG_QUALITY = 80;
if (metadata.width && metadata.height) {
const largerDimension = Math.max(metadata.width, metadata.height);
if (largerDimension > MAX_DIMENSION) {
const resizeOptions = metadata.width > metadata.height
? { width: MAX_DIMENSION }
: { height: MAX_DIMENSION };
const resizedBuffer = await sharp(buffer)
.resize(resizeOptions)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
return resizedBuffer.toString('base64');
}
}
// If no resizing needed, just convert to JPEG
const jpegBuffer = await sharp(buffer)
.jpeg({ quality: JPEG_QUALITY })
.toBuffer();
return jpegBuffer.toString('base64');
} catch (error) {
console.error('Error processing image:', error);
throw error;
}
}
export async function handleMultiImageAnalysis(
request: { params: { arguments: MultiImageAnalysisToolRequest } },
openai: OpenAI,
defaultModel?: string
) {
const args = request.params.arguments;
try {
// Validate inputs
if (!args.images || args.images.length === 0) {
throw new McpError('InvalidParams', 'At least one image is required');
}
if (!args.prompt) {
throw new McpError('InvalidParams', 'A prompt is required');
}
// Prepare content array for the message
const content: Array<any> = [{
type: 'text',
text: args.prompt
}];
// Process each image
for (const image of args.images) {
try {
// Fetch and process the image
const imageBuffer = await fetchImageAsBuffer(image.url);
const base64Image = await processImage(imageBuffer);
// Add to content
content.push({
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${base64Image}`
}
});
} catch (error) {
console.error(`Error processing image ${image.url}:`, error);
// Continue with other images if one fails
}
}
// If no images were successfully processed
if (content.length === 1) {
throw new Error('Failed to process any of the provided images');
}
// Select model
const model = args.model || defaultModel || 'anthropic/claude-3.5-sonnet';
// Make the API call
const completion = await openai.chat.completions.create({
model,
messages: [{
role: 'user',
content
}]
});
return {
content: [
{
type: 'text',
text: completion.choices[0].message.content || '',
},
],
};
} catch (error) {
console.error('Error in multi-image analysis:', error);
if (error instanceof McpError) {
throw error;
}
return {
content: [
{
type: 'text',
text: `Error analyzing images: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
};
}
}

View File

@@ -0,0 +1,68 @@
import { ModelCache } from '../model-cache.js';
import { OpenRouterAPIClient } from '../openrouter-api.js';
export interface SearchModelsToolRequest {
query?: string;
provider?: string;
minContextLength?: number;
maxContextLength?: number;
maxPromptPrice?: number;
maxCompletionPrice?: number;
capabilities?: {
functions?: boolean;
tools?: boolean;
vision?: boolean;
json_mode?: boolean;
};
limit?: number;
}
export async function handleSearchModels(
request: { params: { arguments: SearchModelsToolRequest } },
apiClient: OpenRouterAPIClient,
modelCache: ModelCache
) {
const args = request.params.arguments;
try {
// Refresh the cache if needed
if (!modelCache.isCacheValid()) {
const models = await apiClient.getModels();
modelCache.setModels(models);
}
// Search models based on criteria
const results = modelCache.searchModels({
query: args.query,
provider: args.provider,
minContextLength: args.minContextLength,
maxContextLength: args.maxContextLength,
maxPromptPrice: args.maxPromptPrice,
maxCompletionPrice: args.maxCompletionPrice,
capabilities: args.capabilities,
limit: args.limit || 10,
});
return {
content: [
{
type: 'text',
text: JSON.stringify(results, null, 2),
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `Error searching models: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

View File

@@ -0,0 +1,50 @@
import { ModelCache } from '../model-cache.js';
export interface ValidateModelToolRequest {
model: string;
}
export async function handleValidateModel(
request: { params: { arguments: ValidateModelToolRequest } },
modelCache: ModelCache
) {
const args = request.params.arguments;
try {
if (!modelCache.isCacheValid()) {
return {
content: [
{
type: 'text',
text: 'Model cache is empty or expired. Please call search_models first to populate the cache.',
},
],
isError: true,
};
}
const isValid = modelCache.hasModel(args.model);
return {
content: [
{
type: 'text',
text: JSON.stringify({ valid: isValid }),
},
],
};
} catch (error) {
if (error instanceof Error) {
return {
content: [
{
type: 'text',
text: `Error validating model: ${error.message}`,
},
],
isError: true,
};
}
throw error;
}
}

14
tsconfig.json Normal file
View File

@@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "es2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"strict": true,
"outDir": "dist",
"declaration": true,
"sourceMap": true,
"skipLibCheck": true
},
"include": ["src/**/*"]
}