Files
mcp-deep-research/backend/config.py
Krishna Kumar 9a6ac3fd2f Add OpenAI Deep Research MCP server
- FastMCP server with deep_research and deep_research_info tools
- OpenAI Responses API integration with background polling
- Configurable model via DEEP_RESEARCH_MODEL env var
- Default: o4-mini-deep-research (faster/cheaper)
- Optional FastAPI backend for standalone use
- Tested successfully: 80s query, 20 web searches, 4 citations
2025-12-30 16:00:37 -06:00

30 lines
958 B
Python

"""Configuration for Deep Research MCP Server."""
import os
from dotenv import load_dotenv
load_dotenv()
# OpenAI API Key (required)
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Hardcoded fallback model (cheaper/faster option)
DEFAULT_MODEL = "o4-mini-deep-research-2025-06-26"
# Available models for reference
AVAILABLE_MODELS = [
"o4-mini-deep-research-2025-06-26", # Faster, cheaper (DEFAULT)
"o3-deep-research-2025-06-26", # Thorough, ~$1+ per query
]
# Deep Research Model - configurable via Docker env, falls back to hardcoded default
DEEP_RESEARCH_MODEL = os.getenv("DEEP_RESEARCH_MODEL") or DEFAULT_MODEL
# FastAPI service configuration
FASTAPI_HOST = os.getenv("DEEP_RESEARCH_HOST", "0.0.0.0")
FASTAPI_PORT = int(os.getenv("DEEP_RESEARCH_PORT", "8002"))
# Polling configuration
POLL_INTERVAL_SECONDS = float(os.getenv("DEEP_RESEARCH_POLL_INTERVAL", "5.0"))
MAX_WAIT_MINUTES = int(os.getenv("DEEP_RESEARCH_MAX_WAIT", "15"))