Add OpenAI Deep Research MCP server
- FastMCP server with deep_research and deep_research_info tools - OpenAI Responses API integration with background polling - Configurable model via DEEP_RESEARCH_MODEL env var - Default: o4-mini-deep-research (faster/cheaper) - Optional FastAPI backend for standalone use - Tested successfully: 80s query, 20 web searches, 4 citations
This commit is contained in:
29
backend/config.py
Normal file
29
backend/config.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""Configuration for Deep Research MCP Server."""
|
||||
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# OpenAI API Key (required)
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Hardcoded fallback model (cheaper/faster option)
|
||||
DEFAULT_MODEL = "o4-mini-deep-research-2025-06-26"
|
||||
|
||||
# Available models for reference
|
||||
AVAILABLE_MODELS = [
|
||||
"o4-mini-deep-research-2025-06-26", # Faster, cheaper (DEFAULT)
|
||||
"o3-deep-research-2025-06-26", # Thorough, ~$1+ per query
|
||||
]
|
||||
|
||||
# Deep Research Model - configurable via Docker env, falls back to hardcoded default
|
||||
DEEP_RESEARCH_MODEL = os.getenv("DEEP_RESEARCH_MODEL") or DEFAULT_MODEL
|
||||
|
||||
# FastAPI service configuration
|
||||
FASTAPI_HOST = os.getenv("DEEP_RESEARCH_HOST", "0.0.0.0")
|
||||
FASTAPI_PORT = int(os.getenv("DEEP_RESEARCH_PORT", "8002"))
|
||||
|
||||
# Polling configuration
|
||||
POLL_INTERVAL_SECONDS = float(os.getenv("DEEP_RESEARCH_POLL_INTERVAL", "5.0"))
|
||||
MAX_WAIT_MINUTES = int(os.getenv("DEEP_RESEARCH_MAX_WAIT", "15"))
|
||||
Reference in New Issue
Block a user