Update dependencies and add comprehensive examples

This commit is contained in:
stabgan
2025-03-27 16:30:13 +05:30
parent 1fd46839ef
commit 436ac8d07f
6 changed files with 685 additions and 237 deletions

119
examples/README.md Normal file
View File

@@ -0,0 +1,119 @@
# OpenRouter MCP Server Examples
This directory contains example scripts demonstrating how to use the OpenRouter MCP Server for various tasks such as text chat, image analysis, and model searching.
## Prerequisites
Before running these examples, ensure you have:
1. Node.js 18 or later installed
2. OpenRouter API key (get one from [OpenRouter](https://openrouter.ai))
3. Set up the environment variable:
```
OPENROUTER_API_KEY=your_api_key_here
```
You can create a `.env` file in the root directory with this variable.
## JavaScript Example
The `index.js` file demonstrates how to use the MCP server from Node.js:
1. Starting the MCP server
2. Connecting to the server
3. Simple text chat
4. Single image analysis
5. Multiple image analysis
6. Model search
### Running the JavaScript Example
```bash
# Install dependencies if you haven't already
npm install
# Run the example
npm run examples
```
## Python Example
The `python_example.py` script demonstrates how to use the MCP server from Python:
1. Connecting to the MCP server
2. Converting MCP tool definitions to OpenAI format
3. Interactive chat loop with tool calling
### Running the Python Example
```bash
# Install required Python packages
pip install python-mcp openai python-dotenv
# Run the example
python examples/python_example.py
```
## Using the MCP Server in Your Projects
To use the OpenRouter MCP Server in your own projects:
1. Install the package:
```bash
npm install @stabgan/openrouter-mcp-multimodal
```
2. Create a client connection using the MCP client libraries:
```javascript
import { ClientSession, StdioServerParameters } from '@modelcontextprotocol/sdk/client/index.js';
import { stdio_client } from '@modelcontextprotocol/sdk/client/stdio.js';
// Configure server
const serverConfig = {
command: 'npx',
args: ['-y', '@stabgan/openrouter-mcp-multimodal'],
env: { OPENROUTER_API_KEY: 'your_api_key_here' }
};
// Create connection
const serverParams = new StdioServerParameters(
serverConfig.command,
serverConfig.args,
serverConfig.env
);
const client = await stdio_client(serverParams);
const [stdio, write] = client;
// Initialize session
const session = new ClientSession(stdio, write);
await session.initialize();
```
3. Call tools:
```javascript
// Get available tools
const response = await session.list_tools();
console.log('Available tools:', response.tools.map(tool => tool.name).join(', '));
// Call a tool
const result = await session.call_tool('mcp_openrouter_chat_completion', {
messages: [
{ role: 'user', content: 'Hello, what can you do?' }
],
model: 'deepseek/deepseek-chat-v3-0324:free'
});
console.log('Response:', result.content[0].text);
```
## Available Tools
The OpenRouter MCP Server provides the following tools:
1. `mcp_openrouter_chat_completion` - Text chat with LLMs
2. `mcp_openrouter_analyze_image` - Analyze a single image
3. `mcp_openrouter_multi_image_analysis` - Analyze multiple images
4. `search_models` - Search for available models
5. `get_model_info` - Get details about a specific model
6. `validate_model` - Check if a model ID is valid
For detailed information about each tool's parameters, see the [main README](../README.md) file.

262
examples/index.js Normal file
View File

@@ -0,0 +1,262 @@
#!/usr/bin/env node
/**
* OpenRouter MCP Server Examples
*
* This script demonstrates how to use the OpenRouter MCP Server for various tasks:
* 1. Text chat with LLMs
* 2. Single image analysis
* 3. Multiple image analysis
* 4. Model search and selection
*/
import { ClientSession, StdioServerParameters } from '@modelcontextprotocol/sdk/client/index.js';
import { stdio_client } from '@modelcontextprotocol/sdk/client/stdio.js';
import OpenAI from 'openai';
import dotenv from 'dotenv';
import path from 'path';
import { fileURLToPath } from 'url';
import fs from 'fs';
import { exec } from 'child_process';
import { promisify } from 'util';
// Get the directory name of the current module
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const execPromise = promisify(exec);
// Load environment variables
dotenv.config();
const API_KEY = process.env.OPENROUTER_API_KEY;
if (!API_KEY) {
console.error('Error: OPENROUTER_API_KEY environment variable is missing');
console.error('Please set it in a .env file or in your environment');
process.exit(1);
}
// OpenAI client for direct API calls if needed
const openai = new OpenAI({
apiKey: API_KEY,
baseURL: 'https://openrouter.ai/api/v1',
defaultHeaders: {
'HTTP-Referer': 'https://github.com/stabgan/openrouter-mcp-multimodal',
'X-Title': 'OpenRouter MCP Multimodal Examples',
},
});
// Image file paths for examples
const testImage = path.join(__dirname, '..', 'test.png');
/**
* Convert an image to base64
*/
async function imageToBase64(imagePath) {
try {
const imageBuffer = fs.readFileSync(imagePath);
return imageBuffer.toString('base64');
} catch (error) {
console.error(`Error reading image ${imagePath}: ${error.message}`);
throw error;
}
}
/**
* Example 1: Start the MCP server
*/
async function startMcpServer() {
try {
// Path to the project's main script
const serverScriptPath = path.join(__dirname, '..', 'dist', 'index.js');
// Start the MCP server as a child process
console.log('Starting MCP server...');
// Command to start the server with environment variables
const command = `OPENROUTER_API_KEY=${API_KEY} node ${serverScriptPath}`;
const { stdout, stderr } = await execPromise(command);
if (stderr) {
console.error('Server start error:', stderr);
}
console.log('MCP server output:', stdout);
console.log('MCP server started successfully!');
return serverScriptPath;
} catch (error) {
console.error('Failed to start MCP server:', error.message);
throw error;
}
}
/**
* Example 2: Connect to the MCP server
*/
async function connectToMcpServer(serverPath) {
try {
// Configuration for the MCP server
const serverConfig = {
command: 'node',
args: [serverPath],
env: {
OPENROUTER_API_KEY: API_KEY,
}
};
// Connect to the server
const session = await establishMcpSession(serverConfig);
console.log('Connected to MCP server');
return session;
} catch (error) {
console.error('Failed to connect to MCP server:', error.message);
throw error;
}
}
/**
* Establish an MCP session
*/
async function establishMcpSession(serverConfig) {
// Set up server parameters
const serverParams = new StdioServerParameters(
serverConfig.command,
serverConfig.args,
serverConfig.env
);
// Create client connection
const client = await stdio_client(serverParams);
const [stdio, write] = client;
// Create and initialize session
const session = new ClientSession(stdio, write);
await session.initialize();
// List available tools
const response = await session.list_tools();
console.log('Available tools:', response.tools.map(tool => tool.name).join(', '));
return session;
}
/**
* Example 3: Simple text chat using the MCP server
*/
async function textChatExample(session) {
console.log('\n--- Text Chat Example ---');
try {
// Call the text chat tool
const result = await session.call_tool('mcp_openrouter_chat_completion', {
messages: [
{ role: 'user', content: 'What is the Model Context Protocol (MCP) and how is it useful?' }
],
model: 'deepseek/deepseek-chat-v3-0324:free'
});
console.log('Response:', result.content[0].text);
} catch (error) {
console.error('Text chat error:', error.message);
}
}
/**
* Example 4: Image analysis using the MCP server
*/
async function imageAnalysisExample(session) {
console.log('\n--- Image Analysis Example ---');
try {
// Convert image to base64
const base64Image = await imageToBase64(testImage);
// Call the image analysis tool
const result = await session.call_tool('mcp_openrouter_analyze_image', {
image_path: testImage,
question: 'What can you see in this image? Please describe it in detail.'
});
console.log('Response:', result.content[0].text);
} catch (error) {
console.error('Image analysis error:', error.message);
}
}
/**
* Example 5: Multiple image analysis using the MCP server
*/
async function multiImageAnalysisExample(session) {
console.log('\n--- Multiple Image Analysis Example ---');
try {
// Call the multi-image analysis tool
const result = await session.call_tool('mcp_openrouter_multi_image_analysis', {
images: [
{ url: testImage }
],
prompt: 'What can you see in this image? Please describe it in detail.',
markdown_response: true
});
console.log('Response:', result.content[0].text);
} catch (error) {
console.error('Multi-image analysis error:', error.message);
}
}
/**
* Example 6: Search available models
*/
async function searchModelsExample(session) {
console.log('\n--- Search Models Example ---');
try {
// Call the search models tool
const result = await session.call_tool('search_models', {
query: 'free',
capabilities: {
vision: true
},
limit: 5
});
console.log('Available free vision models:');
result.content[0].models.forEach((model, index) => {
console.log(`${index + 1}. ${model.id} - Context length: ${model.context_length}`);
});
} catch (error) {
console.error('Search models error:', error.message);
}
}
/**
* Run all examples
*/
async function runExamples() {
try {
// Start the MCP server
const serverPath = await startMcpServer();
// Connect to the MCP server
const session = await connectToMcpServer(serverPath);
// Run the text chat example
await textChatExample(session);
// Run the image analysis example
await imageAnalysisExample(session);
// Run the multi-image analysis example
await multiImageAnalysisExample(session);
// Run the search models example
await searchModelsExample(session);
console.log('\nAll examples completed successfully!');
} catch (error) {
console.error('Error running examples:', error.message);
}
}
// Run the examples
runExamples().catch(console.error);

187
examples/python_example.py Normal file
View File

@@ -0,0 +1,187 @@
#!/usr/bin/env python3
"""
OpenRouter MCP Server - Python Example
This script demonstrates how to use the OpenRouter MCP Server from Python,
for various tasks such as text chat and image analysis.
"""
import os
import sys
import json
import asyncio
import subprocess
from typing import Optional, Dict, Any, List
from contextlib import AsyncExitStack
from dotenv import load_dotenv
# Try to import MCP client libraries, show a helpful error if not available
try:
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
except ImportError:
print("Error: MCP client libraries not found. Please install them with:")
print("pip install python-mcp")
sys.exit(1)
# Try to import OpenAI, show a helpful error if not available
try:
from openai import OpenAI
except ImportError:
print("Error: OpenAI client not found. Please install it with:")
print("pip install openai")
sys.exit(1)
# Load environment variables from .env file
load_dotenv()
# Get API key from environment, or show error
API_KEY = os.getenv("OPENROUTER_API_KEY")
if not API_KEY:
print("Error: OPENROUTER_API_KEY environment variable is missing")
print("Please create a .env file with OPENROUTER_API_KEY=your_key")
sys.exit(1)
# Default model to use
MODEL = "anthropic/claude-3-5-sonnet"
# Configuration for the MCP server
SERVER_CONFIG = {
"command": "npx",
"args": ["-y", "@stabgan/openrouter-mcp-multimodal"],
"env": {"OPENROUTER_API_KEY": API_KEY}
}
def convert_tool_format(tool):
"""Convert MCP tool definition to OpenAI tool format"""
converted_tool = {
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": {
"type": "object",
"properties": tool.inputSchema["properties"],
"required": tool.inputSchema["required"]
}
}
}
return converted_tool
class MCPClient:
"""MCP Client for interacting with the OpenRouter MCP server"""
def __init__(self):
self.session: Optional[ClientSession] = None
self.exit_stack = AsyncExitStack()
self.openai = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=API_KEY
)
self.messages = []
async def connect_to_server(self, server_config):
"""Connect to the MCP server"""
server_params = StdioServerParameters(**server_config)
stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
self.stdio, self.write = stdio_transport
self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))
await self.session.initialize()
# List available tools from the MCP server
response = await self.session.list_tools()
print("\nConnected to server with tools:", [tool.name for tool in response.tools])
return response.tools
async def process_query(self, query: str) -> str:
"""Process a text query using the MCP server"""
self.messages.append({
"role": "user",
"content": query
})
# Get available tools from the MCP server
response = await self.session.list_tools()
available_tools = [convert_tool_format(tool) for tool in response.tools]
# Make the initial OpenRouter API call with tool definitions
response = self.openai.chat.completions.create(
model=MODEL,
tools=available_tools,
messages=self.messages
)
self.messages.append(response.choices[0].message.model_dump())
final_text = []
content = response.choices[0].message
# Process tool calls if any
if content.tool_calls is not None:
tool_name = content.tool_calls[0].function.name
tool_args = content.tool_calls[0].function.arguments
tool_args = json.loads(tool_args) if tool_args else {}
# Execute tool call
try:
result = await self.session.call_tool(tool_name, tool_args)
final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")
except Exception as e:
print(f"Error calling tool {tool_name}: {e}")
result = None
# Add tool result to messages
self.messages.append({
"role": "tool",
"tool_call_id": content.tool_calls[0].id,
"name": tool_name,
"content": result.content if result else "Error executing tool call"
})
# Make a follow-up API call with the tool results
response = self.openai.chat.completions.create(
model=MODEL,
max_tokens=1000,
messages=self.messages,
)
final_text.append(response.choices[0].message.content)
else:
final_text.append(content.content)
return "\n".join(final_text)
async def chat_loop(self):
"""Run an interactive chat loop"""
print("\nMCP Client Started!")
print("Type your queries or 'quit' to exit.")
while True:
try:
query = input("\nQuery: ").strip()
if query.lower() in ['quit', 'exit']:
break
result = await self.process_query(query)
print("Result:")
print(result)
except Exception as e:
print(f"Error: {str(e)}")
async def cleanup(self):
"""Clean up resources"""
await self.exit_stack.aclose()
async def main():
"""Main entry point for the example script"""
client = MCPClient()
try:
await client.connect_to_server(SERVER_CONFIG)
await client.chat_loop()
finally:
await client.cleanup()
if __name__ == "__main__":
asyncio.run(main())