Add content_blocks to council_query for structured UI display
- Add content_blocks array to council_query return with model responses and synthesis result for rendering as UI cards - Add MODEL_DISPLAY_NAMES mapping and get_display_name helper for user-friendly model names in the UI - Update Python version to 3.13 for compatibility - Refactor FastAPI endpoints for cleaner error handling
This commit is contained in:
@@ -13,6 +13,39 @@ FASTAPI_URL = os.getenv("COUNCIL_FASTAPI_URL", "http://localhost:8001")
|
||||
# Create MCP server instance
|
||||
mcp = FastMCP("council")
|
||||
|
||||
# Model display name mapping for human-readable names
|
||||
MODEL_DISPLAY_NAMES = {
|
||||
"openai/gpt-4o": "GPT-4o",
|
||||
"openai/gpt-4o-mini": "GPT-4o Mini",
|
||||
"openai/gpt-4-turbo": "GPT-4 Turbo",
|
||||
"openai/o1": "o1",
|
||||
"openai/o1-mini": "o1 Mini",
|
||||
"openai/o1-preview": "o1 Preview",
|
||||
"anthropic/claude-3.5-sonnet": "Claude 3.5 Sonnet",
|
||||
"anthropic/claude-3-opus": "Claude 3 Opus",
|
||||
"anthropic/claude-3-haiku": "Claude 3 Haiku",
|
||||
"google/gemini-pro": "Gemini Pro",
|
||||
"google/gemini-pro-1.5": "Gemini Pro 1.5",
|
||||
"google/gemini-2.0-flash-001": "Gemini 2.0 Flash",
|
||||
"google/gemini-2.0-flash-thinking-exp": "Gemini 2.0 Flash Thinking",
|
||||
"meta-llama/llama-3.1-405b-instruct": "Llama 3.1 405B",
|
||||
"meta-llama/llama-3.1-70b-instruct": "Llama 3.1 70B",
|
||||
"mistralai/mistral-large": "Mistral Large",
|
||||
"deepseek/deepseek-chat": "DeepSeek Chat",
|
||||
"deepseek/deepseek-r1": "DeepSeek R1",
|
||||
}
|
||||
|
||||
|
||||
def get_display_name(model_id: str) -> str:
|
||||
"""Get human-readable display name for a model ID."""
|
||||
if model_id in MODEL_DISPLAY_NAMES:
|
||||
return MODEL_DISPLAY_NAMES[model_id]
|
||||
# Fallback: extract the model name from the ID
|
||||
parts = model_id.split("/")
|
||||
if len(parts) > 1:
|
||||
return parts[-1].replace("-", " ").title()
|
||||
return model_id
|
||||
|
||||
# HTTP client for FastAPI communication
|
||||
_http_client: httpx.AsyncClient | None = None
|
||||
|
||||
@@ -83,6 +116,7 @@ async def council_query(
|
||||
Returns:
|
||||
Complete council response with all 3 stages and metadata including:
|
||||
- conversation_id: The conversation ID used
|
||||
- content_blocks: Structured blocks for each model response (for UI rendering)
|
||||
- stage1: Individual model responses
|
||||
- stage2: Peer rankings with aggregate scores
|
||||
- stage3: Chairman's synthesized final answer
|
||||
@@ -100,8 +134,35 @@ async def council_query(
|
||||
{"content": query}
|
||||
)
|
||||
|
||||
# Build content_blocks for structured UI rendering
|
||||
content_blocks = []
|
||||
|
||||
# Add Stage 1 responses as individual blocks
|
||||
stage1_results = result.get("stage1", [])
|
||||
for resp in stage1_results:
|
||||
model_id = resp.get("model", "unknown")
|
||||
content_blocks.append({
|
||||
"type": "council_response",
|
||||
"model": model_id,
|
||||
"model_display_name": get_display_name(model_id),
|
||||
"response": resp.get("response", ""),
|
||||
"stage": 1
|
||||
})
|
||||
|
||||
# Add Stage 3 synthesis block
|
||||
stage3_result = result.get("stage3", {})
|
||||
if stage3_result:
|
||||
model_id = stage3_result.get("model", "unknown")
|
||||
content_blocks.append({
|
||||
"type": "council_synthesis",
|
||||
"model": model_id,
|
||||
"model_display_name": get_display_name(model_id),
|
||||
"response": stage3_result.get("response", "")
|
||||
})
|
||||
|
||||
return {
|
||||
"conversation_id": conversation_id,
|
||||
"content_blocks": content_blocks,
|
||||
**result
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user