- FastMCP server with deep_research and deep_research_info tools - OpenAI Responses API integration with background polling - Configurable model via DEEP_RESEARCH_MODEL env var - Default: o4-mini-deep-research (faster/cheaper) - Optional FastAPI backend for standalone use - Tested successfully: 80s query, 20 web searches, 4 citations
30 lines
958 B
Python
30 lines
958 B
Python
"""Configuration for Deep Research MCP Server."""
|
|
|
|
import os
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
|
|
# OpenAI API Key (required)
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
|
|
# Hardcoded fallback model (cheaper/faster option)
|
|
DEFAULT_MODEL = "o4-mini-deep-research-2025-06-26"
|
|
|
|
# Available models for reference
|
|
AVAILABLE_MODELS = [
|
|
"o4-mini-deep-research-2025-06-26", # Faster, cheaper (DEFAULT)
|
|
"o3-deep-research-2025-06-26", # Thorough, ~$1+ per query
|
|
]
|
|
|
|
# Deep Research Model - configurable via Docker env, falls back to hardcoded default
|
|
DEEP_RESEARCH_MODEL = os.getenv("DEEP_RESEARCH_MODEL") or DEFAULT_MODEL
|
|
|
|
# FastAPI service configuration
|
|
FASTAPI_HOST = os.getenv("DEEP_RESEARCH_HOST", "0.0.0.0")
|
|
FASTAPI_PORT = int(os.getenv("DEEP_RESEARCH_PORT", "8002"))
|
|
|
|
# Polling configuration
|
|
POLL_INTERVAL_SECONDS = float(os.getenv("DEEP_RESEARCH_POLL_INTERVAL", "5.0"))
|
|
MAX_WAIT_MINUTES = int(os.getenv("DEEP_RESEARCH_MAX_WAIT", "15"))
|