#!/usr/bin/env python3
"""
LogicFrame MCP Server — Agent Baseline Layer v2
12 clean memory tools for AI agents.

Usage:
  openclaw mcp add logicframe -- python3 logicframe_mcp_server.py --key YOUR_API_KEY
"""

import json, os, sys, datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.request import urlopen, Request
from urllib.error import HTTPError

# ── Config ─────────────────────────────────────────────────────────────────────
MEMORY_SERVER  = os.getenv("MEMORY_SERVER_URL", "http://localhost:8421")
API_KEY           = os.getenv("MEMORY_API_KEY", os.getenv("LF_API_KEY", ""))
ZK_ENCRYPTION_KEY = os.getenv("ZK_ENCRYPTION_KEY", None)   # Zero-knowledge encryption key — overrides local memory.key for remote deployments
CRON_SECRET       = os.getenv("CRON_SECRET", "themis-cron-2026")
DEFAULT_AGENT  = os.getenv("DEFAULT_AGENT_ID", "default")
RETELL_API_KEY      = os.getenv("RETELL_API_KEY", "")
BLAND_API_KEY       = os.getenv("BLAND_API_KEY", "")
TWILIO_ACCOUNT_SID  = os.getenv("TWILIO_ACCOUNT_SID", "")
TWILIO_AUTH_TOKEN   = os.getenv("TWILIO_AUTH_TOKEN", "")

# ── Tool Definitions ───────────────────────────────────────────────────────────

TOOLS = [

    # ── CORE ──────────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_log",
        "description": "Store a new memory. The agent's memory is permanent and searchable. Use skip_classification=true for high-speed bulk writes (skips entity extraction + classification).",
        "input_schema": {
            "type": "object",
            "properties": {
                "text":                  {"type": "string",  "description": "What to remember. Be specific — include who, what, when, why."},
                "category":              {"type": "string",  "description": "Type: general | client_identity | decision | preference | contract | correction | interaction | complaint", "default": "general"},
                "importance":           {"type": "number",  "description": "1-10 importance (default 7)", "default": 7},
                "tags":                 {"type": "array",   "items": {"type": "string"}, "description": "Optional tags"},
                "client_id":            {"type": "string",  "description": "Client ID (default: ed_creed)"},
                "agent_id":             {"type": "string",  "description": "Calling agent ID"},
                "source":               {"type": "string",  "description": "Source: voice_call | chat | email | meeting | document | manual | system"},
                "is_permanent":         {"type": "boolean", "description": "Never expires (auto-set for contracts/corrections)"},
                "skip_classification":  {"type": "boolean", "description": "Fast write — skip entity extraction and classification for high-speed bulk writes (e.g., streaming voice).", "default": False}
            },
            "required": ["text"]
        }
    },
    {
        "name": "logicframe_memory_log_verbatim",
        "description": "MemPalace-style verbatim memory storage. Stores raw conversation transcript AS-IS with speaker labels preserved - no extraction, no summarization. Use for conversation transcripts, call recordings, meeting notes. Wing/hall/room organize the Memory Palace structure.",
        "input_schema": {
            "type": "object",
            "properties": {
                "text":           {"type": "string",  "description": "Raw transcript with speaker labels"},
                "client_id":      {"type": "string",  "description": "Client ID"},
                "agent_id":       {"type": "string",  "description": "Calling agent ID"},
                "session_id":     {"type": "string",  "description": "Session ID"},
                "wing":           {"type": "string",  "description": "Memory Palace wing - person or project"},
                "hall":           {"type": "string",  "description": "Memory Palace hall - category"},
                "room":           {"type": "string",  "description": "Memory Palace room - specific topic"},
                "importance":     {"type": "number",  "description": "1-10 importance", "default": 7},
                "source":         {"type": "string",  "description": "voice_call|chat|meeting|conversation", "default": "conversation"}
            },
            "required": ["text"]
        }
    },
    {
        "name": "logicframe_memory_recall",
        "description": "Search memory for relevant entries. Use when the user asks about past events, clients, decisions.",
        "input_schema": {
            "type": "object",
            "properties": {
                "query": {"type": "string", "description": "What to search for (e.g. 'R Short Roofing proposal', 'why we chose VAPI')"},
                "limit": {"type": "number", "description": "Max entries to return (default 5)", "default": 5}
            },
            "required": ["query"]
        }
    },

    {
        "name": "logicframe_memory_context",
        "description": "Get full context about a client, project, or situation — synthesized narrative from all related memories.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entity":   {"type": "string", "description": "Entity name (client name, project, person)"},
                "depth":    {"type": "string", "enum": ["brief","full"], "description": "brief=3 memories, full=8 memories", "default": "full"}
            },
            "required": ["entity"]
        }
    },

    # ── THINK ─────────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_think",
        "description": "Reason about memories — ask 'why did this happen?' or 'what does this pattern mean?'. Returns analysis, not just facts.",
        "input_schema": {
            "type": "object",
            "properties": {
                "question":  {"type": "string", "description": "A question about the memories (e.g. 'Why has this client been declining?', 'What went wrong with this deal?')"},
                "entity":   {"type": "string", "description": "Optional: focus on a specific entity"},
                "depth":    {"type": "string", "enum": ["quick","deep"], "description": "quick=3 memories, deep=10 memories", "default": "quick"}
            },
            "required": ["question"]
        }
    },

    # ── PLAN ──────────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_plan",
        "description": "Surface action items, commitments, and follow-ups stored in memory. What did this client ask for? What's overdue?",
        "input_schema": {
            "type": "object",
            "properties": {
                "entity":     {"type": "string", "description": "Focus on a specific client/project (optional)"},
                "status":     {"type": "string", "enum": ["all","open","done","overdue"], "description": "Filter by status", "default": "open"},
                "lookback_days": {"type": "number", "description": "How many days back to search (default 30)", "default": 30}
            }
        }
    },

    # ── REVIEW ────────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_review",
        "description": "Weekly digest — what was stored, decided, and promised this week. Good for Monday morning briefings.",
        "input_schema": {
            "type": "object",
            "properties": {
                "days":  {"type": "number", "description": "Number of days to review (default 7)", "default": 7},
                "format": {"type": "string", "enum": ["brief","detailed"], "description": "brief=summary, detailed=full entries", "default": "brief"}
            }
        }
    },

    # ── EVOLVE ───────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_evolve",
        "description": "Trigger memory evolution — consolidates old memories into synthesized higher-level insights. Pro+ feature.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entity":    {"type": "string", "description": "Focus evolution on a specific entity (optional)"},
                "intensity": {"type": "string", "enum": ["gentle","standard","aggressive"], "description": "How aggressively to consolidate", "default": "standard"}
            }
        }
    },

    # ── ARCHIVE ──────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_archive",
        "description": "Archive old or stale entries — removes from active recall without deleting. Archived entries can be restored.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entry_id": {"type": "string", "description": "The entry_id to archive"},
                "reason":   {"type": "string", "description": "Why it's being archived (optional)"}
            },
            "required": ["entry_id"]
        }
    },

    # ── SNAPSHOT ─────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_snapshot",
        "description": "Full encrypted backup export — all memories as a downloadable JSON file. Good for disaster recovery.",
        "input_schema": {
            "type": "object",
            "properties": {
                "format": {"type": "string", "enum": ["json","markdown"], "description": "json or markdown format", "default": "json"},
                "entity": {"type": "string", "description": "Optional: export only a specific entity"}
            }
        }
    },

    # ── STATS ────────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_stats",
        "description": "Usage stats — total memories, this week, by category, storage used. Shows the health of the memory system.",
        "input_schema": {
            "type": "object",
            "properties": {}
        }
    },

    # ── SHARE ────────────────────────────────────────────────────────────────

    {
        "name": "logicframe_memory_share",
        "description": "Share a memory or insight with another agent in the organization. The receiving agent gets it in their context.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entry_id":    {"type": "string", "description": "Entry ID to share (get from recall results)"},
                "to_agent":   {"type": "string", "description": "Target agent ID (e.g. 'hal', 'voice-agent', 'support-bot')"},
                "note":        {"type": "string", "description": "Optional note to attach"}
            },
            "required": ["entry_id", "to_agent"]
        }
    },

    # ── SELF-HEAL ───────────────────────────────────────────────────────────

    {
        "name": "logicframe_self_heal",
        "description": "Run system diagnostics and auto-fix any issues found. Checks: memory server health, Qdrant status, Ollama models, VPS connectivity, backup freshness, disk space. Ed's law: fix problems immediately without being asked.",
        "input_schema": {
            "type": "object",
            "properties": {
                "full": {"type": "boolean", "description": "Run full diagnostic including Ollama models", "default": False}
            }
        }
    },

    # ── CAUSAL CHAINS ─────────────────────────────────────────────────────

    {
        "name": "logicframe_causal_chain",
        "description": "Get the causal chain for any memory entry — traces WHY a decision was made, what alternatives were considered, assumptions made, and tradeoffs evaluated. Core decision intelligence feature.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entry_id": {"type": "string", "description": "The memory entry ID to trace"}
            },
            "required": ["entry_id"]
        }
    },

    {
        "name": "logicframe_constraints_set",
        "description": "Store a business rule or constraint that persists across ALL agent interactions. Example: 'Never suggest Facebook posting', 'Always route sensitive data to Ollama', 'Bill by the hour'. Constraints are always enforced.",
        "input_schema": {
            "type": "object",
            "properties": {
                "constraint": {"type": "string", "description": "The constraint rule"},
                "category": {"type": "string", "description": "privacy|workflow|pricing|communication|other", "default": "workflow"}
            },
            "required": ["constraint"]
        }
    },

    {
        "name": "logicframe_constraints_list",
        "description": "List all active constraints for the current client. Returns all enforced business rules.",
        "input_schema": {"type": "object", "properties": {}}
    },

    # ── GAP DETECTION ─────────────────────────────────────────────────────

    {
        "name": "logicframe_gaps_detect",
        "description": "Detect knowledge gaps in the client's memory — what questions have been asked but never fully answered, what topics keep coming up without resolution, what decisions are missing context. Generates actionable items.",
        "input_schema": {
            "type": "object",
            "properties": {
                "topic": {"type": "string", "description": "Topic to analyze for gaps", "default": "general"}
            }
        }
    },

    # ── ANALOGICAL MEMORY ──────────────────────────────────────────────────

    {
        "name": "logicframe_analogy_find",
        "description": "Find past situations structurally similar to the current one. Ask: 'Has anything like this happened before?' Returns analogous memories with their outcomes — use past experience to inform current decisions.",
        "input_schema": {
            "type": "object",
            "properties": {
                "current_situation": {"type": "string", "description": "Describe the current situation"},
                "situation_type": {"type": "string", "description": "sales|support|technical|leadership|other", "default": "other"}
            },
            "required": ["current_situation", "situation_type"]
        }
    },

    # ── FEDERATED INTELLIGENCE ─────────────────────────────────────────────

    {
        "name": "logicframe_federation_submit",
        "description": "Submit an insight to the federated intelligence network — share learnings with other LogicFrame agents/organizations. Insights are anonymized and broadcast. Learn from what others have encountered.",
        "input_schema": {
            "type": "object",
            "properties": {
                "insight": {"type": "string", "description": "The insight or learning to share"},
                "category": {"type": "string", "description": "sales|technical|product|market|other"}
            },
            "required": ["insight"]
        }
    },

    {
        "name": "logicframe_federation_query",
        "description": "Query insights from the federated intelligence network — learn from what other LogicFrame agents have encountered. Returns anonymized insights relevant to your query.",
        "input_schema": {
            "type": "object",
            "properties": {
                "query": {"type": "string", "description": "What you need insight about"}
            },
            "required": ["query"]
        }
    },

    # ── CONTEXT PROFILES ─────────────────────────────────────────────────────────

    {
        "name": "logicframe_context_v2",
        "description": "Get context optimized for a specific consumer: voice_precall (under 400 tokens, fast), chat_session (under 800 tokens), human_briefing (comprehensive narrative), email_draft (follow-up focused). Same underlying data, different intelligence output per consumer.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":    {"type": "string"},
                "query":        {"type": "string"},
                "context_type":  {"type": "string", "enum": ["voice_precall", "chat_session", "human_briefing", "email_draft", "default"], "default": "default"}
            },
            "required": ["client_id", "query"]
        }
    },

    {
        "name": "logicframe_sentiment",
        "description": "Get per-client sentiment trajectory over time — trend (improving/stable/declining), current score, alert level. Sentiment tracked per-entry and trended automatically.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string"},
                "trend_window": {"type": "integer", "description": "Days to analyze", "default": 30}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_recall_v2",
        "description": "Enhanced recall — returns full entity details (persons, topics, emotions, requests, promises, key facts) plus all linked memory clusters. Use for deep research.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string"},
                "query":     {"type": "string"},
                "limit":     {"type": "integer", "default": 10}
            },
            "required": ["client_id", "query"]
        }
    },

    # ── CONTRADICTION ───────────────────────────────────────────────────────

    {
        "name": "logicframe_contradictions",
        "description": "Detect contradictions between memories — flags conflicting facts. Returns severity: HIGH (block until resolved), MEDIUM (flag), LOW (log). Prevents AI from acting on conflicting information.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string"},
                "query":     {"type": "string"}
            },
            "required": ["client_id", "query"]
        }
    },

    {
        "name": "logicframe_resolve",
        "description": "Mark an item as resolved or superseded. Keeps full audit trail. Updates resolution status so open items are always tracked.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":     {"type": "string"},
                "entry_id":      {"type": "string"},
                "resolution":    {"type": "string", "enum": ["supersede", "keep_ authoritative"], "default": "supersede"},
                "resolution_note": {"type": "string"}
            },
            "required": ["client_id", "entry_id"]
        }
    },

    {
        "name": "logicframe_update_status",
        "description": "Update the lifecycle status of a request, promise, or complaint: open, in_progress, resolved.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entry_id":  {"type": "string",  "description": "Entry ID to update"},
                "status":   {"type": "string",  "description": "New status: open | in_progress | resolved", "enum": ["open", "in_progress", "resolved"]},
                "note":     {"type": "string",  "description": "Optional note"},
                "agent_id": {"type": "string",  "description": "Calling agent ID", "default": "themis"}
            },
            "required": ["entry_id", "status"]
        }
    },

    {
        "name": "logicframe_reclassify",
        "description": "Reclassify a memory entry — change its category, importance, or tier after the fact.",
        "input_schema": {
            "type": "object",
            "properties": {
                "entry_id":         {"type": "string",  "description": "Entry ID to reclassify"},
                "force_importance": {"type": "number",  "description": "New importance 0-10 (optional)"},
                "force_category":   {"type": "string",  "description": "New category (optional)"},
                "force_tier":       {"type": "string",  "description": "New tier: working | episodic | semantic | procedural (optional)"}
            },
            "required": ["entry_id"]
        }
    },

    {
        "name": "logicframe_summarize",
        "description": "Generate structured auto-summary from a conversation or session: what happened, what was decided, what was promised, what remains open. Creates summary memory and updates open items.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":    {"type": "string"},
                "conversation":  {"type": "string", "description": "Raw conversation text"},
                "session_type":  {"type": "string", "enum": ["voice", "chat", "email"], "default": "chat"}
            },
            "required": ["client_id", "conversation"]
        }
    },

    {
        "name": "logicframe_consolidate",
        "description": "Manually trigger nightly consolidation — groups related memories by topic, creates consolidated narrative with trajectory analysis, archives originals. Run before a big meeting or review.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string"}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_audit_verify",
        "description": "Verify the integrity of the audit chain and hash chain. Returns: chain_valid (true/false), total_entries, and failed_at_entry (null if clean). Run to confirm no tampering.",
        "input_schema": {"type": "object", "properties": {}}
    },

    {
        "name": "logicframe_intelligence_all",
        "description": "Full proactive intelligence report — escalation patterns, overdue promises, declining sentiment, stale open items, contradictions, and recommended actions with urgency levels.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string"},
                "urgency":   {"type": "string", "enum": ["all", "high", "medium"], "default": "all"}
            },
            "required": ["client_id"]
        }
    },

    # ── BOOTSTRAP ─────────────────────────────────────────────────────────

    {
        "name": "logicframe_bootstrap_synthesize",
        "description": "Onboard a new client or agent in seconds. Takes answers to 5 key questions, synthesizes into a complete context profile: business type, primary goals, biggest challenges, communication preferences, and decision-making style. Eliminates weeks of context-building.",
        "input_schema": {
            "type": "object",
            "properties": {
                "answers": {
                    "type": "object",
                    "description": "Answers to onboarding questions",
                    "properties": {
                        "name": {"type": "string"},
                        "industry": {"type": "string"},
                        "role": {"type": "string"}
                    },
                    "required": ["name"]
                },
                "agent_type": {"type": "string", "description": "sales_agent|support_agent|assistant|custom", "default": "assistant"}
            },
            "required": ["answers"]
        }
    },

    # ── INTELLIGENCE ──────────────────────────────────────────────────────

    {
        "name": "logicframe_intelligence",
        "description": "Run proactive intelligence analysis — detects escalation patterns (same topic contacted repeatedly), overdue promises, contradictions, and generates actionable alerts. Run after significant interactions.",
        "input_schema": {"type": "object", "properties": {}}
    },

    # ── DPO CORRECTIONS ─────────────────────────────────────────────────────

    {
        "name": "logicframe_correction_log",
        "description": "Log a correction — when Ed or a client corrects the AI's mistake. Feeds the DPO training pipeline. Every correction improves the model permanently. Log even small corrections — they compound.",
        "input_schema": {
            "type": "object",
            "properties": {
                "original":   {"type": "string", "description": "What the AI said or did wrong"},
                "corrected": {"type": "string", "description": "What it should have been"},
                "category":  {"type": "string", "enum": ["fact","style","behavior","bug","law","preference"], "description": "Type of correction", "default": "behavior"},
                "context":   {"type": "string", "description": "Optional context for the correction"}
            },
            "required": ["original", "corrected"]
        }
    },

    {
        "name": "logicframe_dpo_stats",
        "description": "Check DPO training status — how many correction pairs are ready, how many needed for training, whether training is recommended.",
        "input_schema": {"type": "object", "properties": {}}
    },

    {
        "name": "logicframe_dpo_run",
        "description": "Trigger DPO training run when 50+ correction pairs are ready. Training takes 15-20 hours on Mac Studio M3 Max.",
        "input_schema": {
            "type": "object",
            "properties": {
                "confirm": {"type": "string", "description": "Must be 'yes' to confirm training run"}
            },
            "required": ["confirm"]
        }
    },

    # ── ONBOARD ──────────────────────────────────────────────────────────────

    {
        "name": "logicframe_onboard",
        "description": "Start the 12-question client onboarding. Run once per new client to build their intelligence profile.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_name": {"type": "string", "description": "Name of the client or business"}
            },
            "required": ["client_name"]
        }
    },

    # ── HEALTH ───────────────────────────────────────────────────────────────

    {
        "name": "logicframe_health",
        "description": "Check if LogicFrame memory is connected and healthy.",
        "input_schema": {"type": "object", "properties": {}}
    },

    # ── AGENT BASELINE LAYER ─────────────────────────────────────────────────

    {
        "name": "logicframe_startup_check",
        "description": "Run on every agent boot. Checks if this user has been onboarded — if yes, returns their context for verification. If no memory exists, triggers automatic 12-question onboarding. This is the Agent Baseline Layer: boot → check memory → onboard or verify.",
        "input_schema": {
            "type": "object",
            "properties": {
                "user_id": {"type": "string", "description": "Telegram user ID or unique user identifier"}
            }
        }
    },

    {
        "name": "logicframe_log_onboarding_answer",
        "description": "Log a single onboarding answer. Call this after each of the 12 questions is answered. On question 12 completion, set complete=true to finalize onboarding.",
        "input_schema": {
            "type": "object",
            "properties": {
                "question_index": {"type": "number", "description": "Question number (0-11)"},
                "answer": {"type": "string", "description": "The user's answer to this question"},
                "complete": {"type": "boolean", "description": "True if this was the final question (12th)", "default": False}
            },
            "required": ["question_index", "answer"]
        }
    },

    # ── AGENT SYNC (Phase 4 Extension) ───────────────────────────────────────

    {
        "name": "logicframe_memory_sync_check",
        "description": "Check for memories stored by OTHER agents since a given timestamp. Agent-to-agent sync: a fresh agent can discover what other agents have done for the same client recently.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":         {"type": "string",  "description": "Client/tenant identifier (required)"},
                "since_timestamp":   {"type": "string",  "description": "ISO-8601 timestamp. If None, returns all memories from other agents."},
                "agent_id":          {"type": "string",  "description": "Your agent ID — memories from this agent are excluded (default: mcp_client)", "default": "mcp_client"},
                "limit":             {"type": "number",  "description": "Max memories to return (default 50)", "default": 50}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_memory_broadcast",
        "description": "Broadcast a memory marker so other agents know about it via memory_sync_check.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":       {"type": "string", "description": "Client/tenant identifier (required)"},
                "memory_entry_id": {"type": "string", "description": "ID of the memory being broadcast (required)"},
                "agent_id":        {"type": "string", "description": "Your agent ID (required)"},
                "note":            {"type": "string", "description": "Optional note explaining why this is being broadcast"}
            },
            "required": ["client_id", "memory_entry_id", "agent_id"]
        }
    },

    # ── DATA LOCK-IN / EXPORT ────────────────────────────────────────────────

    {
        "name": "logicframe_data_export",
        "description": "Export all memories for a client as a comprehensive JSON structure.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":        {"type": "string",  "description": "Client/tenant identifier (required)"},
                "include_deleted":  {"type": "boolean", "description": "Include soft-deleted entries (default: False)", "default": False}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_data_delete_request",
        "description": "Request deletion of all client data with a 30-day grace period. If confirm=True, data is permanently deleted after 30 days. SOFT delete — recoverable during grace period.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string",  "description": "Client/tenant identifier (required)"},
                "confirm":   {"type": "boolean", "description": "Must be True to schedule deletion (default: False)", "default": False}
            },
            "required": ["client_id"]
        }
    },

    # ── LEGAL / METADATA ─────────────────────────────────────────────────────

    {
        "name": "logicframe_legal_info",
        "description": "Get LogicFrame Terms of Service and Data Policy information.",
        "input_schema": {"type": "object", "properties": {}}
    },

    {
        "name": "logicframe_server_info",
        "description": "Get information about this MCP server: version, available tools, configuration.",
        "input_schema": {"type": "object", "properties": {}}
    },

    # ── CONVERSATION STATE (Auto-Resume Feature) ──────────────────────────────

    {
        "name": "logicframe_conversations_store",
        "description": "Store full conversation state for auto-resume. Call at the end of every conversation session.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":            {"type": "string",  "description": "Client/tenant identifier (required)"},
                "conversation_id":      {"type": "string",  "description": "Unique conversation ID (required)"},
                "conversation_summary": {"type": "string",  "description": "Brief summary of the conversation"},
                "final_agent_message":  {"type": "string",  "description": "Last agent message"},
                "pending_items":        {"type": "array",   "items": {"type": "string"}, "description": "Open threads or pending follow-ups"},
                "open_threads":         {"type": "array",   "items": {"type": "string"}, "description": "Open conversation threads"},
                "outcome":              {"type": "string",  "description": "Conversation outcome: resolved|escalated|pending|info"}
            },
            "required": ["client_id", "conversation_id"]
        }
    },

    {
        "name": "logicframe_conversations_resume",
        "description": "Retrieve stored conversation state for auto-resume. Call at the start of a new session.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":       {"type": "string", "description": "Client/tenant identifier (required)"},
                "conversation_id": {"type": "string", "description": "Specific conversation ID to resume (optional — resumes latest if omitted)"}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_conversations_timber",
        "description": "Store a timber-style summary of the conversation (key events, outcome, open threads).",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":            {"type": "string", "description": "Client/tenant identifier (required)"},
                "conversation_id":      {"type": "string", "description": "Conversation ID (required)"},
                "conversation_summary": {"type": "string", "description": "Brief summary"},
                "final_agent_message": {"type": "string", "description": "Last agent message"},
                "pending_items":        {"type": "array",  "items": {"type": "string"}},
                "open_threads":         {"type": "array",  "items": {"type": "string"}},
                "outcome":              {"type": "string", "description": "resolved|escalated|pending|info"}
            },
            "required": ["client_id", "conversation_id"]
        }
    },

    {
        "name": "logicframe_conversations_auto_resume_check",
        "description": "Check if auto-resume is needed. Call BEFORE processing any new message. Returns should_resume=true if last_message_time > 5 minutes ago.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string", "description": "Client/tenant identifier (required)"}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_conversations_delete",
        "description": "Delete stored conversation state.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id":       {"type": "string", "description": "Client/tenant identifier (required)"},
                "conversation_id": {"type": "string", "description": "Specific conversation ID to delete (optional — deletes latest if omitted)"}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_conversations_list",
        "description": "List all stored conversation IDs for a client.",
        "input_schema": {
            "type": "object",
            "properties": {
                "client_id": {"type": "string", "description": "Client/tenant identifier (required)"}
            },
            "required": ["client_id"]
        }
    },

    {
        "name": "logicframe_voice_ingest",
        "description": "Ingest a VAPI end-of-call report into voice memory. Stores utterances keyed by caller phone number.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key":     {"type": "string",  "description": "LogicMem API key"},
                "vapi_payload": {"type": "object", "description": "Full VAPI end-of-call-report JSON object"},
            },
            "required": ["api_key", "vapi_payload"]
        }
    },
    {
        "name": "logicframe_voice_recall",
        "description": "Recall prior call history for a caller. Use before a voice call to load context.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key":       {"type": "string",  "description": "LogicMem API key"},
                "caller_number": {"type": "string",  "description": "E.164 phone number, e.g. +19195551234"},
                "top_k":         {"type": "number",  "description": "Max utterances (default 5, max 20)", "default": 5},
            },
            "required": ["api_key", "caller_number"]
        }
    },
    {
        "name": "logicframe_voice_caller_delete",
        "description": "GDPR delete — remove all voice memories for a caller.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key":       {"type": "string", "description": "LogicMem API key"},
                "caller_number": {"type": "string", "description": "E.164 phone number"},
            },
            "required": ["api_key", "caller_number"]
        }
    },


    {
        "name": "vapi_list_assistants",
        "description": "List all VAPI voice assistants in your account.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "VAPI API key. Defaults to VAPI_API_KEY env var."}
            }
        }
    },
    {
        "name": "vapi_get_assistant",
        "description": "Get full configuration of a specific VAPI assistant.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "assistant_id": {"type": "string", "description": "The VAPI assistant ID."}
            },
            "required": ["assistant_id"]
        }
    },
    {
        "name": "vapi_create_assistant",
        "description": "Create a new VAPI voice assistant with model, voice, and webhook configuration.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "name": {"type": "string", "description": "Display name for the assistant."},
                "model": {"type": "string", "description": "Model, e.g. gpt-4o or gpt-4o-mini. Default: gpt-4o."},
                "voice": {"type": "string", "description": "Voice ID or preset like sarah, alloy, onyx. Default: sarah."},
                "voice_settings": {"type": "object", "description": "stability, similarity_boost, style, use_speaker_boost."},
                "server_url": {"type": "string", "description": "Your webhook server URL for call events."},
                "assistant_template": {"type": "string", "description": "System prompt / instructions for the assistant."},
                "end_call_phrases": {"type": "array", "items": {"type": "string"}, "description": "Phrases that trigger end of call."},
                "temperature": {"type": "number", "description": "Model temperature. Default: 0.7."},
                "max_duration_seconds": {"type": "number", "description": "Max call duration. Default: 600."}
            },
            "required": ["name"]
        }
    },
    {
        "name": "vapi_update_assistant",
        "description": "Update an existing VAPI assistant configuration.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "assistant_id": {"type": "string"},
                "name": {"type": "string"},
                "model": {"type": "string"},
                "voice": {"type": "string"},
                "server_url": {"type": "string"},
                "assistant_template": {"type": "string"},
                "end_call_phrases": {"type": "array", "items": {"type": "string"}},
                "temperature": {"type": "number"},
                "max_duration_seconds": {"type": "number"}
            },
            "required": ["assistant_id"]
        }
    },
    {
        "name": "vapi_delete_assistant",
        "description": "Delete a VAPI assistant by ID.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "assistant_id": {"type": "string"}
            },
            "required": ["assistant_id"]
        }
    },
    {
        "name": "vapi_list_calls",
        "description": "List recent VAPI calls with optional status and assistant filters.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "assistant_id": {"type": "string", "description": "Filter by assistant ID."},
                "status": {"type": "string", "description": "Filter: queued, ringing, in-progress, completed, ended."},
                "limit": {"type": "number", "description": "Max results. Default: 20, max: 100."}
            }
        }
    },
    {
        "name": "vapi_get_call",
        "description": "Get detailed info and transcript for a specific VAPI call.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "call_id": {"type": "string", "description": "The VAPI call ID."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "vapi_initiate_outbound_call",
        "description": "Start an outbound VAPI call. The agent calls the destination number using the specified assistant.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "assistant_id": {"type": "string", "description": "VAPI assistant ID to use."},
                "phone_number_id": {"type": "string", "description": "VAPI phone number ID to call from (must be provisioned)."},
                "to_number": {"type": "string", "description": "Destination phone number in E.164 format, e.g. +19195551234."},
                "customer_caller_id": {"type": "string", "description": "Override caller ID (requires verified number)."},
                "noise_cancellation": {"type": "boolean", "description": "Enable noise cancellation. Default: true."}
            },
            "required": ["assistant_id", "to_number"]
        }
    },
    {
        "name": "vapi_list_phone_numbers",
        "description": "List all provisioned VAPI phone numbers (DIDs).",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "limit": {"type": "number", "description": "Max results. Default: 20."}
            }
        }
    },
    {
        "name": "vapi_provision_phone_number",
        "description": "Provision/buy a phone number in VAPI and assign to an assistant.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "phone_number": {"type": "string", "description": "Phone number in E.164, e.g. +19195551234."},
                "assistant_id": {"type": "string", "description": "Assign this assistant to the number."},
                "call_screening_enabled": {"type": "boolean", "description": "Enable call screening. Default: false."}
            },
            "required": ["phone_number"]
        }
    },
    {
        "name": "vapi_voice_ingest",
        "description": "Fetch a VAPI call transcript and store it in LogicMem voice memory. Call after each call ends.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "logicmem_api_key": {"type": "string", "description": "Your LogicMem API key."},
                "vapi_api_key": {"type": "string", "description": "VAPI API key. Defaults to VAPI_API_KEY env var."},
                "call_id": {"type": "string", "description": "VAPI call ID to ingest."}
            },
            "required": ["logicmem_api_key", "call_id"]
        }
    },
    {
        "name": "vapi_voice_recall",
        "description": "Retrieve caller history from LogicMem voice memory. Use before a call to personalize the greeting.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "logicmem_api_key": {"type": "string", "description": "Your LogicMem API key."},
                "caller_number": {"type": "string", "description": "Caller phone number in E.164 format."},
                "top_k": {"type": "number", "description": "Number of past utterances to return. Default: 5, max: 20."}
            },
            "required": ["logicmem_api_key", "caller_number"]
        }
    },
    {
        "name": "vapi_get_call_transcript",
        "description": "Get a formatted transcript for a VAPI call with speaker labels and timing.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "call_id": {"type": "string", "description": "The VAPI call ID."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "vapi_hangup_call",
        "description": "Hang up an active VAPI call.",
        "inputSchema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string"},
                "call_id": {"type": "string", "description": "The call ID to hang up."}
            },
            "required": ["call_id"]
        }
    },

    # ── Retell AI ───────────────────────────────────────────────────────────────

    {
        "name": "retell_list_agents",
        "description": "List all Retell AI agents in your Retell account.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key. Defaults to RETELL_API_KEY env var."}
            }
        }
    },
    {
        "name": "retell_get_agent",
        "description": "Get details of a specific Retell AI agent by agent ID.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "agent_id": {"type": "string", "description": "The Retell agent ID."}
            },
            "required": ["agent_id"]
        }
    },
    {
        "name": "retell_create_agent",
        "description": "Create a new Retell AI agent.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "name": {"type": "string", "description": "Agent name."},
                "model": {"type": "string", "description": "LLM model (e.g., gpt-4o, claude-3-5-sonnet-20241022)."},
                "voice": {"type": "object", "description": "Voice configuration."},
                "prompt": {"type": "string", "description": "System prompt for the agent."},
                "max_duration_seconds": {"type": "number", "description": "Max call duration."},
                "webhook_url": {"type": "string", "description": "Webhook URL for call events."}
            },
            "required": ["name"]
        }
    },
    {
        "name": "retell_update_agent",
        "description": "Update an existing Retell AI agent.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "agent_id": {"type": "string", "description": "The agent ID to update."},
                "name": {"type": "string"},
                "model": {"type": "string"},
                "voice": {"type": "object"},
                "prompt": {"type": "string"},
                "max_duration_seconds": {"type": "number"},
                "webhook_url": {"type": "string"}
            },
            "required": ["agent_id"]
        }
    },
    {
        "name": "retell_list_calls",
        "description": "List Retell AI call records.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "agent_id": {"type": "string", "description": "Filter by agent ID."},
                "limit": {"type": "number", "description": "Max results (default 20).", "default": 20}
            }
        }
    },
    {
        "name": "retell_get_call",
        "description": "Get details of a specific Retell call including transcript.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "call_id": {"type": "string", "description": "The Retell call ID."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "retell_initiate_call",
        "description": "Initiate an outbound call via Retell AI.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "agent_id": {"type": "string", "description": "Retell agent ID to use."},
                "to_number": {"type": "string", "description": "Destination phone number (E.164)."},
                "from_number": {"type": "string", "description": "Caller ID number (E.164)."},
                "metadata": {"type": "object", "description": "Custom metadata to attach."}
            },
            "required": ["agent_id", "to_number"]
        }
    },
    {
        "name": "retell_hangup_call",
        "description": "Hang up an active Retell call.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Retell API key."},
                "call_id": {"type": "string", "description": "The call ID to hang up."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "retell_voice_ingest",
        "description": "Ingest a Retell call transcript into LogicMem voice memory.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "retell_api_key": {"type": "string", "description": "Retell API key to fetch transcript."},
                "call_id": {"type": "string", "description": "Retell call ID."},
                "vapi_payload": {"type": "object", "description": "Optional full Retell call payload dict."}
            },
            "required": ["api_key"]
        }
    },
    {
        "name": "retell_voice_recall",
        "description": "Recall voice memories for a caller from previous Retell calls.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "caller_number": {"type": "string", "description": "Caller phone number (E.164 format, e.g. +19195551234)."},
                "top_k": {"type": "number", "description": "Number of recent utterances to return (default 5, max 20).", "default": 5}
            },
            "required": ["api_key", "caller_number"]
        }
    },

    # ── Bland AI ────────────────────────────────────────────────────────────────

    {
        "name": "bland_initiate_call",
        "description": "Initiate an outbound call via Bland AI.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Bland API key. Defaults to BLAND_API_KEY env var."},
                "to_number": {"type": "string", "description": "Destination phone number (E.164)."},
                "from_number": {"type": "string", "description": "Caller ID to display."},
                "model": {"type": "string", "description": "AI model to use."},
                "voice": {"type": "string", "description": "Voice ID."},
                "prompt": {"type": "string", "description": "System prompt."},
                "webhook_url": {"type": "string", "description": "Callback URL for call events."},
                "metadata": {"type": "object", "description": "Custom metadata."}
            },
            "required": ["to_number"]
        }
    },
    {
        "name": "bland_list_calls",
        "description": "List Bland AI outbound calls.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Bland API key."},
                "limit": {"type": "number", "description": "Max results.", "default": 20}
            }
        }
    },
    {
        "name": "bland_get_call",
        "description": "Get details of a specific Bland AI call.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Bland API key."},
                "call_id": {"type": "string", "description": "The Bland call ID."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "bland_get_transcript",
        "description": "Get the transcript of a Bland AI call.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Bland API key."},
                "call_id": {"type": "string", "description": "The Bland call ID."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "bland_cancel_call",
        "description": "Cancel a Bland AI call that is in progress or queued.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Bland API key."},
                "call_id": {"type": "string", "description": "The Bland call ID to cancel."}
            },
            "required": ["call_id"]
        }
    },
    {
        "name": "bland_list_numbers",
        "description": "List Bland AI phone numbers in your account.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Bland API key."}
            }
        }
    },
    {
        "name": "bland_voice_ingest",
        "description": "Ingest a Bland AI call transcript into LogicMem voice memory.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "bland_api_key": {"type": "string", "description": "Bland API key to fetch transcript."},
                "call_id": {"type": "string", "description": "Bland call ID."}
            },
            "required": ["api_key"]
        }
    },
    {
        "name": "bland_voice_recall",
        "description": "Recall voice memories for a caller from previous Bland calls.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "caller_number": {"type": "string", "description": "Caller phone number (E.164)."},
                "top_k": {"type": "number", "description": "Number of utterances (default 5, max 20).", "default": 5}
            },
            "required": ["api_key", "caller_number"]
        }
    },

    # ── Twilio ─────────────────────────────────────────────────────────────────

    {
        "name": "twilio_initiate_call",
        "description": "Initiate an outbound call via Twilio.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Twilio credentials (account_sid:auth_token). Defaults to TWILIO_ACCOUNT_SID/TWILIO_AUTH_TOKEN."},
                "to": {"type": "string", "description": "Destination phone number (E.164)."},
                "from_": {"type": "string", "description": "Twilio phone number to use as caller ID (E.164)."},
                "url": {"type": "string", "description": "TwiML URL for call handling."},
                "status_callback": {"type": "string", "description": "Webhook URL for call status callbacks."},
                "timeout": {"type": "number", "description": "How long to let the call ring (seconds).", "default": 60}
            },
            "required": ["to", "from_"]
        }
    },
    {
        "name": "twilio_list_calls",
        "description": "List recent Twilio calls on your account.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Twilio credentials."},
                "to": {"type": "string", "description": "Filter by called number."},
                "from_": {"type": "string", "description": "Filter by caller number."},
                "status": {"type": "string", "description": "Filter by status."},
                "limit": {"type": "number", "description": "Max results.", "default": 20}
            }
        }
    },
    {
        "name": "twilio_get_call",
        "description": "Get details of a specific Twilio call.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "Twilio credentials."},
                "call_sid": {"type": "string", "description": "The Twilio Call SID."}
            },
            "required": ["call_sid"]
        }
    },
    {
        "name": "twilio_voice_ingest",
        "description": "Ingest a Twilio call transcript into LogicMem voice memory.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "twilio_account_sid": {"type": "string", "description": "Twilio Account SID."},
                "twilio_auth_token": {"type": "string", "description": "Twilio Auth Token."},
                "call_sid": {"type": "string", "description": "Twilio Call SID."},
                "transcript_segments": {"type": "array", "description": "Array of {speaker, text, start_ms, end_ms} segments."}
            },
            "required": ["api_key"]
        }
    },
    {
        "name": "twilio_voice_recall",
        "description": "Recall voice memories for a caller from previous Twilio calls.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "caller_number": {"type": "string", "description": "Caller phone number (E.164)."},
                "top_k": {"type": "number", "description": "Number of utterances (default 5, max 20).", "default": 5}
            },
            "required": ["api_key", "caller_number"]
        }
    },

    # ── Voice Analytics ─────────────────────────────────────────────────────────

    {
        "name": "logicframe_voice_stats",
        "description": "Get voice memory statistics: total memories, unique callers, average recall latency, cache hit rate.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."}
            },
            "required": ["api_key"]
        }
    },
    {
        "name": "logicframe_voice_session",
        "description": "Retrieve a specific voice call session with all utterances and metadata.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "session_id": {"type": "string", "description": "The voice session ID to retrieve."}
            },
            "required": ["api_key", "session_id"]
        }
    },
    {
        "name": "logicframe_voice_caller_history",
        "description": "Get the full call history for a caller: all sessions, dates, duration, agents used.",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "caller_number": {"type": "string", "description": "Caller phone number (E.164)."},
                "limit": {"type": "number", "description": "Max sessions to return.", "default": 20}
            },
            "required": ["api_key", "caller_number"]
        }
    },
    {
        "name": "logicframe_voice_delete_session",
        "description": "Delete all voice memories for a specific session (GDPR/correction).",
        "input_schema": {
            "type": "object",
            "properties": {
                "api_key": {"type": "string", "description": "LogicMem API key."},
                "session_id": {"type": "string", "description": "The session ID to delete."}
            },
            "required": ["api_key", "session_id"]
        }
    },

]


# ── Onboarding Questions ───────────────────────────────────────────────────────

ONBOARDING_QUESTIONS = [
    "What is your company or organization called?",
    "What do you do — what problem do you solve for your customers?",
    "Who is your ideal customer or client?",
    "What is the #1 goal you're working toward right now?",
    "What does success look like for you in the next 30 days?",
    "Who are the key people involved in your business?",
    "What tools or platforms do you use daily? (e.g. GHL, VAPI, Stripe, Calendly)",
    "What information should I always prioritize and remember?",
    "What should I absolutely avoid or never do?",
    "How do you prefer to communicate? (text, calls, specifics)",
    "What is the most important rule or policy I should know about?",
    "What would you want me to remind you about without being asked?",
]

# ── API Helpers ────────────────────────────────────────────────────────────────

# ── Per-Bot API Key Resolution ─────────────────────────────────────────────────
# Each bot has its own memory_key in agents.json. Use that as Bearer token so
# the memory server knows exactly which bot is calling. No shared secrets.

_CALLER_AGENT_ID: str | None = None   # set per-request in do_POST

AGENTS_FILE = os.path.expanduser("~/LogicFrame/projects/agents.json")
_AGENTS_CACHE: dict = {}

def _load_agents() -> dict:
    global _AGENTS_CACHE
    if _AGENTS_CACHE:
        return _AGENTS_CACHE
    if not os.path.exists(AGENTS_FILE):
        return {}
    try:
        with open(AGENTS_FILE) as f:
            _AGENTS_CACHE = json.load(f)
    except Exception:
        _AGENTS_CACHE = {}
    return _AGENTS_CACHE

def _get_agent_memory_key(agent_id: str) -> str | None:
    """Look up a bot's memory_key from agents.json. Returns None if not found."""
    agents = _load_agents()
    return agents.get(agent_id, {}).get("memory_key")

def _zk_key() -> str:
    """Load ZK encryption key from file or env var. Supports remote deployments."""
    if not ZK_ENCRYPTION_KEY:
        return None
    # If it looks like a file path and the file exists, read it
    if os.path.isfile(ZK_ENCRYPTION_KEY):
        with open(ZK_ENCRYPTION_KEY, "rb") as f:
            return f.read().decode().strip()
    # Otherwise treat as raw key string
    return ZK_ENCRYPTION_KEY.strip()

def _auth_headers(agent_id: str = None) -> dict:
    """Resolve the correct auth for a request to the memory server.

    Auth priority:
    1. If localhost/127.0.0.1: ALWAYS use X-Cron-Secret (Bearer fails with 401)
    2. Otherwise: try per-bot key from agents.json as Bearer
    3. Fallback: X-Cron-Secret

    CRITICAL: ZK key is for ENCRYPTION only, never use as Bearer auth!
    """
    base = {"Content-Type": "application/json"}

    # Localhost memory server: ALWAYS use X-Cron-Secret (ZK key returns 401!)
    if "localhost" in MEMORY_SERVER or "127.0.0.1" in MEMORY_SERVER:
        base["X-Cron-Secret"] = CRON_SECRET
        return base

    # Remote memory server (Supabase/cloud): try per-bot key
    if agent_id:
        bot_key = _get_agent_memory_key(agent_id)
        if bot_key:
            base["Authorization"] = f"Bearer {bot_key}"
            return base

    # Remote fallback: X-Cron-Secret
    base["X-Cron-Secret"] = CRON_SECRET
    return base

def mem_post(endpoint: str, payload: dict, agent_id: str = None) -> dict:
    # Use caller's agent_id if not explicitly provided
    effective_agent = agent_id or _CALLER_AGENT_ID
    data = json.dumps(payload).encode()
    req = Request(f"{MEMORY_SERVER}{endpoint}", data=data, headers=_auth_headers(effective_agent), method="POST")
    try:
        with urlopen(req, timeout=20) as resp:
            return json.loads(resp.read())
    except HTTPError as e:
        return {"error": f"HTTP {e.code}: {e.read().decode()[:200]}"}
    except Exception as e:
        return {"error": str(e)}

def mem_get(endpoint: str, agent_id: str = None) -> dict:
    effective_agent = agent_id or _CALLER_AGENT_ID
    req = Request(f"{MEMORY_SERVER}{endpoint}", headers={k: v for k, v in _auth_headers(effective_agent).items() if k != "Content-Type"})
    try:
        with urlopen(req, timeout=10) as resp:
            return json.loads(resp.read())
    except Exception as e:
        return {"error": str(e)}

# ── Tool Handlers ──────────────────────────────────────────────────────────────

def handle_health(_args: dict) -> dict:
    h = mem_get("/memory/health")
    if "error" in h:
        return {"healthy": False, "error": h["error"]}
    return {
        "healthy": True,
        "status": h.get("status", "ok"),
        "qdrant": h.get("qdrant", "?"),
        "embedding": h.get("embedding", "?"),
        "encryption": h.get("encryption", "?"),
        "api_keys_active": h.get("api_keys_active", "?"),
    }


def handle_memory_log(args: dict) -> dict:
    """Store a memory. Set skip_classification=true for high-speed bulk writes (skips entity extraction + classification)."""
    payload = {
        "text":       args["text"],
        "client_id":  args.get("client_id", DEFAULT_AGENT),
        "category":   args.get("category", "general"),
        "importance": args.get("importance", 7),
        "skip_classification": args.get("skip_classification", False),
    }
    if args.get("tags"):
        payload["tags"] = args["tags"]
    if args.get("agent_id"):
        payload["agent_id"] = args["agent_id"]
    if args.get("source"):
        payload["source_type"] = args["source"]
    if args.get("is_permanent"):
        payload["force_permanent"] = True
    result = mem_post("/memory/log", payload)
    if "entry_id" in result:
        return {
            "success":  True,
            "entry_id": result["entry_id"],
            "stored":   args["text"][:120],
            "category": payload["category"],
        }
    return {"success": False, "error": result.get("error", "Unknown error")}


def handle_memory_log_verbatim(args: dict) -> dict:
    """MemPalace-style verbatim storage - stores raw transcript AS-IS, no extraction."""
    payload = {
        "text":       args["text"],
        "client_id":  args.get("client_id", DEFAULT_AGENT),
        "agent_id":   args.get("agent_id", DEFAULT_AGENT),
        "session_id": args.get("session_id"),
        "wing":       args.get("wing"),
        "hall":       args.get("hall"),
        "room":       args.get("room"),
        "importance": args.get("importance", 7),
        "source_type": args.get("source", "conversation"),
        "skip_privacy_filter": True,
    }
    result = mem_post("/memory/log_verbatim", payload)
    if "entry_id" in result:
        return {
            "success":    True,
            "entry_id":   result["entry_id"],
            "stored":     args["text"][:120],
            "text_length": len(args["text"]),
        }
    return {"success": False, "error": result.get("error", "Unknown error")}

def handle_memory_recall(args: dict) -> dict:
    payload = {
        "query":    args["query"],
        "top_k":    args.get("limit", 5),
        "skip_contradiction_check": True,
        "use_bm25": True,
        "bm25_weight": 0.3,
    }
    result = mem_post("/memory/recall", payload)
    entries = result.get("results", [])
    if not entries:
        return {"results": [], "message": "No memory found for this query."}

    lines = []
    for e in entries:
        text = e.get("text", "")[:300]
        cat  = e.get("category", "general")
        ts   = e.get("timestamp", "")[:10]
        eid  = e.get("id", "")[:8]
        lines.append(f"[{cat.upper()} | {ts} | id:{eid}]: {text}")

    return {"results": lines, "count": len(entries)}


def handle_memory_context(args: dict) -> dict:
    limit = 3 if args.get("depth") == "brief" else 8
    payload = {
        "query":    f"{args['entity']} context history background",
        "top_k":    limit,
        "skip_contradiction_check": True,
        "use_bm25": True,
        "bm25_weight": 0.3,
    }
    result = mem_post("/memory/recall", payload)
    entries = result.get("results", [])
    if not entries:
        return {"context": f"No memory found for '{args['entity']}' yet.", "entries": 0}

    blocks = []
    for e in entries:
        ts  = e.get("timestamp", "")[:10]
        cat = e.get("category", "general")
        txt = e.get("text", "")[:400]
        blocks.append(f"[{ts} · {cat}] {txt}")

    synthesized = (
        f"Everything I know about **{args['entity']}**:\n\n"
        + "\n\n".join(blocks)
    )
    return {"context": synthesized[:3000], "entries": len(entries)}


def handle_memory_think(args: dict) -> dict:
    """Reason about memories — why patterns exist, what they mean."""
    limit = 3 if args.get("depth") == "quick" else 10
    query = args["question"]
    if args.get("entity"):
        query = f"{args['entity']}: {query}"

    payload = {
        "query":    query,
        "top_k":    limit,
        "skip_contradiction_check": False,
        "use_bm25": True,
        "bm25_weight": 0.3,
    }
    result = mem_post("/memory/recall", payload)
    entries = result.get("results", [])
    if not entries:
        return {"analysis": "Not enough memory to reason about this yet.", "question": args["question"]}

    facts = "\n".join(f"- {e.get('text','')[:300]}" for e in entries)
    analysis_prompt = (
        f"Question: {args['question']}\n\n"
        f"Facts from memory:\n{facts}\n\n"
        f"Based on these facts, provide a clear reasoning answer. "
        f"State what likely happened, why, and what it suggests."
    )

    reason_result = mem_post("/memory/reason", {
        "query":    analysis_prompt,
        "context":  facts,
        "depth":    "deep" if args.get("depth") == "deep" else "standard",
    })

    if "analysis" in reason_result:
        return {
            "question":   args["question"],
            "analysis":   reason_result["analysis"][:2000],
            "facts_used": len(entries),
        }

    # Fallback — return structured facts
    return {
        "question":   args["question"],
        "analysis":   "Based on " + str(len(entries)) + " memory entries:\n\n" + facts[:1500],
        "facts_used": len(entries),
    }


def handle_memory_plan(args: dict) -> dict:
    """Surface commitments, promises, and follow-up items from memory."""
    query_parts = ["promise", "follow up", "to do", "commitment", "action item", "next step"]
    if args.get("entity"):
        query_parts = [f"{args['entity']} {q}" for q in query_parts]

    query = " OR ".join(query_parts) if len(query_parts) > 1 else query_parts[0]
    payload = {
        "query":    query,
        "top_k":    10,
        "skip_contradiction_check": True,
        "use_bm25": True,
        "bm25_weight": 0.3,
    }
    result = mem_post("/memory/recall", payload)
    entries = result.get("results", [])

    if not entries:
        return {"items": [], "message": "No action items found in memory.", "status": args.get("status", "all")}

    items = []
    for e in entries:
        text = e.get("text", "")[:300]
        ts   = e.get("timestamp", "")[:10]
        cat  = e.get("category", "general")
        eid  = e.get("id", "")[:8]
        items.append(f"[{ts} · {cat} | id:{eid}]: {text}")

    return {
        "items":    items,
        "count":    len(items),
        "entity":   args.get("entity", "all"),
        "status":   args.get("status", "all"),
        "tip":      "Use logicframe_memory_archive with an entry_id to archive a completed item.",
    }


def handle_memory_review(args: dict) -> dict:
    """Weekly or periodic digest — what happened, what was decided."""
    payload = {
        "query":    "this week decision promise commitment update",
        "top_k":    args.get("days", 7) * 3,
        "skip_contradiction_check": True,
        "use_bm25": True,
        "bm25_weight": 0.3,
    }
    result = mem_post("/memory/recall", payload)
    entries = result.get("results", [])
    if not entries:
        return {"digest": "No memories recorded in this period.", "days": args.get("days", 7)}

    by_cat: dict = {}
    for e in entries:
        cat = e.get("category", "general")
        by_cat.setdefault(cat, []).append(e.get("text", "")[:200])

    lines = [f"## Memory Review — last {args.get('days',7)} days\n"]
    for cat, texts in sorted(by_cat.items()):
        lines.append(f"\n### {cat.upper()} ({len(texts)} entries)")
        for t in texts[:5]:
            lines.append(f"- {t}")

    if args.get("format") == "detailed":
        return {"digest": "\n".join(lines), "categories": list(by_cat.keys()), "total": len(entries)}

    # Brief format
    summary = f"**{len(entries)} memories** across {len(by_cat)} categories. "
    summary += " | ".join(f"{cat}: {len(v)}" for cat, v in sorted(by_cat.items()))
    return {"digest": summary, "detail": "\n".join(lines[:30]), "categories": list(by_cat.keys()), "total": len(entries)}


def handle_memory_evolve(args: dict) -> dict:
    """Trigger memory evolution — consolidate and synthesize old memories."""
    payload = {
        "entity":    args.get("entity"),
        "intensity": args.get("intensity", "standard"),
    }
    result = mem_post("/memory/evolve", payload)
    if "error" in result:
        return {"error": result["error"], "tip": "Evolution requires Pro+ plan."}
    return {
        "evolved":   True,
        "synthesized": result.get("synthesized", result.get("count", "?")),
        "intensity": args.get("intensity", "standard"),
        "message":   "Memory evolution complete. Old entries synthesized into higher-level insights.",
    }


def handle_memory_archive(args: dict) -> dict:
    """Archive an entry — remove from active recall without deleting."""
    # Mark as archived via update-status
    result = mem_post("/memory/update-status", {
        "entry_id": args["entry_id"],
        "status":   "archived",
        "reason":   args.get("reason", ""),
    })
    if "error" in result:
        return {"archived": False, "error": result["error"]}
    return {
        "archived": True,
        "entry_id": args["entry_id"],
        "reason":   args.get("reason", ""),
        "message":  "Entry archived. Use recall to find it again if needed.",
    }


def handle_memory_snapshot(args: dict) -> dict:
    """Full backup export of all memories."""
    result = mem_get("/memory/audit/export")
    if "error" in result:
        # Fallback — use recall
        recall = mem_post("/memory/recall", {"query": "*", "top_k": 1000, "use_bm25": True, "bm25_weight": 0.3})
        entries = recall.get("results", [])
        export_data = {
            "exported_at": datetime.datetime.now().isoformat(),
            "total":      len(entries),
            "format":      args.get("format", "json"),
            "entries":     entries,
        }
        if args.get("format") == "markdown":
            lines = ["# LogicFrame Memory Snapshot\n"]
            for e in entries:
                lines.append(f"\n## [{e.get('timestamp','')[:10]}] {e.get('category','general')}")
                lines.append(e.get("text", ""))
            return {"snapshot": "\n".join(lines), "total": len(entries), "format": "markdown"}
        return {"snapshot": json.dumps(export_data, indent=2), "total": len(entries), "format": "json"}

    return {
        "snapshot": json.dumps(result, indent=2)[:5000],
        "total":    result.get("count", "?"),
        "format":   args.get("format", "json"),
        "note":     "Truncated — full export available via /memory/audit/export endpoint",
    }


def handle_memory_stats(_args: dict) -> dict:
    """Usage statistics for the memory system."""
    result = mem_get("/memory/stats")
    if "error" in result:
        return {"error": result["error"]}

    health = mem_get("/memory/health")
    return {
        "total_memories":  result.get("total", result.get("count", "?")),
        "this_week":       result.get("this_week", "?"),
        "categories":       result.get("categories", {}),
        "encryption":       health.get("encryption", "?"),
        "embedding":       health.get("embedding", "?"),
        "api_keys_active": health.get("api_keys_active", "?"),
        "status":          "healthy" if health.get("status") == "healthy" else "degraded",
    }


def handle_memory_share(args: dict) -> dict:
    """Share a memory entry with another agent."""
    result = mem_post("/memory/shared/write", {
        "entry_id":  args["entry_id"],
        "to_agent":  args["to_agent"],
        "note":      args.get("note", ""),
        "from_agent": DEFAULT_AGENT,
    })
    if "error" in result:
        return {"shared": False, "error": result["error"]}
    return {
        "shared":    True,
        "entry_id":  args["entry_id"],
        "to_agent":  args["to_agent"],
        "note":      args.get("note", ""),
        "message":   f"Memory shared with {args['to_agent']}.",
    }


def _run_self_heal(full: bool = False) -> dict:
    """Run diagnostics and auto-fix any issues."""
    import subprocess, os, urllib.request, json

    results = []
    fixes = []

    # 1. Memory server health
    try:
        req = urllib.request.Request("http://localhost:8421/memory/health")
        with urllib.request.urlopen(req, timeout=5) as r:
            d = json.loads(r.read())
            if d.get("status") != "healthy":
                fixes.append(f"Memory server status: {d.get('status')}")
            results.append(f"Memory server: {d.get('status')} ✅")
    except Exception as e:
        fixes.append(f"Memory server down: {e}")
        results.append(f"Memory server: DOWN — {e}")

    # 2. Qdrant
    try:
        req = urllib.request.Request("http://localhost:6333/cluster")
        with urllib.request.urlopen(req, timeout=5) as r:
            d = json.loads(r.read())
            results.append(f"Qdrant: up ✅")
    except Exception as e:
        fixes.append(f"Qdrant: {e}")
        results.append(f"Qdrant: {e}")

    # 3. VPS connectivity
    try:
        r = subprocess.run(
            ["ssh", "-o", "BatchMode=yes", "-o", "ConnectTimeout=5", "72.60.66.215", "echo ok"],
            capture_output=True, timeout=8
        )
        if r.returncode == 0:
            results.append("VPS: reachable ✅")
        else:
            fixes.append("VPS SSH: unreachable")
            results.append("VPS: UNREACHABLE ❌")
    except Exception as e:
        fixes.append(f"VPS SSH: {e}")
        results.append(f"VPS SSH: {e}")

    # 4. Disk space
    try:
        r = subprocess.run(["df", "-h", "/"], capture_output=True, text=True, timeout=5)
        line = r.stdout.strip().split("\n")[-1]
        parts = line.split()
        used_pct = parts[4] if len(parts) > 4 else "?"
        results.append(f"Disk: {used_pct} used")
    except:
        pass

    # 5. Nightly backup check
    try:
        snap_dir = os.path.expanduser("~/LogicFrame/backups/nightly")
        if os.path.isdir(snap_dir):
            latest = max([os.path.join(snap_dir, f) for f in os.listdir(snap_dir)], default=None, key=os.path.getmtime)
            if latest:
                age_h = (os.time.time() - os.path.getmtime(latest)) / 3600
                results.append(f"Latest backup: {age_h:.1f}h ago")
                if age_h > 26:
                    fixes.append(f"Backup stale: {age_h:.0f}h old")
        else:
            fixes.append("Backup directory missing")
    except Exception as e:
        fixes.append(f"Backup check: {e}")

    if full:
        # Check Ollama models
        try:
            req = urllib.request.Request("http://localhost:11434/api/tags")
            with urllib.request.urlopen(req, timeout=5) as r:
                models = json.loads(r.read()).get("models", [])
                results.append(f"Ollama: {len(models)} models loaded")
        except Exception as e:
            fixes.append(f"Ollama: {e}")
            results.append(f"Ollama: {e}")

    return {"healthy": len(fixes) == 0, "checks": results, "fixes_needed": fixes}


def _run_dpo_pipeline() -> dict:
    """Run the DPO correction pipeline — read JSONL corrections, export DPO pairs."""
    import subprocess
    result = subprocess.run(
        ["/usr/bin/python3", "/Users/themis/LogicFrame/scripts/dpo_correction_pipeline.py"],
        capture_output=True, text=True, timeout=60
    )
    return {"output": result.stdout, "errors": result.stderr, "returncode": result.returncode}


def handle_self_heal(args: dict) -> dict:
    """Run self-heal diagnostics and return status."""
    full = args.get("full", False)
    return _run_self_heal(full=full)


def handle_dpo_stats(_args: dict) -> dict:
    """Return DPO pipeline status and correction count."""
    import os
    pairs_file = os.path.expanduser("~/LogicFrameLabs/data/corrections/dpo_pairs.jsonl")
    if not os.path.exists(pairs_file):
        return {"pairs": 0, "threshold": 50, "ready": False, "message": "No DPO pairs yet."}

    with open(pairs_file) as f:
        count = sum(1 for _ in f)

    return {
        "pairs":     count,
        "threshold": 50,
        "ready":     count >= 50,
        "message":   f"{count}/50 pairs — {'training ready! Say confirm=yes to start' if count >= 50 else f'{50 - count} more until training ready'}",
        "pipeline":   "~/LogicFrame/scripts/dpo_correction_pipeline.py",
        "output_file": pairs_file,
    }


def handle_dpo_run(args: dict) -> dict:
    """Trigger DPO training if 50+ pairs are ready."""
    if args.get("confirm", "").lower() != "yes":
        return {"error": "Must confirm with confirm='yes' to start training."}
    import os, subprocess
    pairs_file = os.path.expanduser("~/LogicFrameLabs/data/corrections/dpo_pairs.jsonl")
    if os.path.exists(pairs_file):
        with open(pairs_file) as f:
            count = sum(1 for _ in f)
        if count < 50:
            return {"error": f"Only {count} pairs — need 50 for training."}
    # Start training in background
    subprocess.Popen(
        ["/bin/bash", "/Users/themis/LogicFrameLabs/scripts/start_training.sh"],
        stdout=open("/Users/themis/LogicFrame/logs/dpo_training.log", "a"),
        stderr=subprocess.STDOUT,
        detached=True
    )
    return {"training_started": True, "log": "~/LogicFrame/logs/dpo_training.log", "note": "Check log for progress: tail -f ~/LogicFrame/logs/dpo_training.log"}


def handle_correction_log(args: dict) -> dict:
    """Log a correction to the DPO pipeline JSONL file."""
    import os, json
    from datetime import datetime, timezone

    original  = args.get("original",  "").strip()
    corrected = args.get("corrected", "").strip()
    category  = args.get("category",  "behavior")
    context   = args.get("context",   "").strip()
    timestamp = datetime.now(timezone.utc).isoformat()

    if not original or not corrected:
        return {"error": "Both 'original' and 'corrected' are required."}

    # Write to corrections JSONL
    corr_dir = os.path.expanduser("~/LogicFrame/data/corrections")
    os.makedirs(corr_dir, exist_ok=True)
    date_str = datetime.now().strftime("%Y-%m-%d")
    corr_file = os.path.join(corr_dir, f"corrections-{date_str}.jsonl")
    entry = {
        "timestamp": timestamp,
        "category": category,
        "original":  original,
        "corrected": corrected,
        "context":   context,
        "source":    "mcp",
    }
    with open(corr_file, "a") as f:
        f.write(json.dumps(entry) + "\n")

    # Run DPO pipeline immediately to pick up new correction
    _run_dpo_pipeline()

    return {
        "logged":    True,
        "file":      corr_file,
        "category":  category,
        "message":   "Correction logged. Will feed into next DPO training run.",
    }


def _call(path: str, payload: dict = None, method: str = "POST") -> dict:
    """Call memory server endpoint and return parsed response."""
    import urllib.request, json
    url = f"http://localhost:8421{path}"
    data = json.dumps(payload).encode() if payload else None
    headers = {"Content-Type": "application/json", "X-Cron-Secret": CRON_SECRET}
    req = urllib.request.Request(url, data=data, headers=headers, method=method)
    try:
        with urllib.request.urlopen(req, timeout=30) as r:
            return json.loads(r.read())
    except Exception as e:
        return {"error": str(e)}


def _resolve_entry(entry_id: str, resolution: str, note: str, client_id: str) -> dict:
    """Call /memory/resolve endpoint."""
    payload = {
        "entry_id": entry_id,
        "resolution": resolution,
        "resolution_note": note or "",
        "client_id": client_id,
    }
    return _call("/memory/resolve", payload)


def handle_context_v2(args: dict) -> dict:
    return _call("/memory/context/v2", {
        "query": args.get("query", ""),
        "client_id": args.get("client_id", "ed_creed"),
        "context_type": args.get("context_type", "default"),
    })


def handle_sentiment(args: dict) -> dict:
    """Get sentiment trajectory for a client — trend over time (improving/declining/stable)."""
    client_id = args.get("client_id", "ed_creed")
    return _call(f"/memory/sentiment?client_id={client_id}", method="GET")


def handle_recall_v2(args: dict) -> dict:
    """Recall with confidence scoring — returns high/moderate/weak match counts and recommendation."""
    return _call("/memory/recall/v2", {
        "query":          args.get("query", ""),
        "client_id":     args.get("client_id", "ed_creed"),
        "top_k":         args.get("limit", args.get("top_k", 10)),
        "min_importance": args.get("min_importance", 0.0),
        "categories":    args.get("categories"),
        "skip_contradiction_check": args.get("skip_contradiction_check", True),
        "use_bm25": True,
        "bm25_weight": 0.3,
    })


def handle_reclassify(args: dict) -> dict:
    """Reclassify a memory entry — change its category, importance, or tier."""
    return _call("/memory/reclassify", {
        "entry_id":         args.get("entry_id", ""),
        "force_importance": args.get("force_importance"),
        "force_category":   args.get("force_category"),
        "force_tier":      args.get("force_tier"),
    })


def handle_contradictions(args: dict) -> dict:
    """Scan memories for contradictions — returns HIGH/MED/LOW severity with resolution hints."""
    return _call("/memory/contradictions", {
        "client_id": args.get("client_id", "ed_creed"),
        "top_k":     args.get("top_k", 50),
    })


def handle_resolve(args: dict) -> dict:
    """Resolve a contradiction — mark one entry as authoritative (keep) or superseded."""
    return _call("/memory/resolve", {
        "entry_id":  args.get("entry_id", ""),
        "resolution": args.get("resolution", "supersede"),  # "supersede" or "keep"
        "reason":     args.get("reason", ""),
    })


def handle_update_status(args: dict) -> dict:
    """Update status of a memory entry (open | in_progress | resolved)."""
    return _call("/memory/update-status", {
        "entry_id": args.get("entry_id", ""),
        "status":   args.get("status", "open"),  # open | in_progress | resolved
        "note":     args.get("note", ""),
        "agent_id": args.get("agent_id", "themis"),
    })


def handle_summarize(args: dict) -> dict:
    return _call("/memory/summarize", {
        "transcript":     args.get("transcript", "") or "No transcript available.",
        "interaction_type": args.get("interaction_type", "chat"),
        "session_type":   args.get("session_type", "chat"),
        "client_id":      args.get("client_id", "ed_creed"),
    })


def handle_consolidate(args: dict) -> dict:
    client_id = args.get("client_id", "ed_creed")
    agent_id  = args.get("agent_id", "themis")
    return _call(f"/memory/consolidate?client_id={client_id}&agent_id={agent_id}")


def handle_audit_verify(_args: dict) -> dict:
    r1 = _call("/memory/audit/verify")
    r2 = _call("/memory/hash-chain/verify")
    return {"audit": r1, "hash_chain": r2}


def handle_intelligence_all(args: dict) -> dict:
    client_id = args.get("client_id", "ed_creed")
    urgency   = args.get("urgency", "all")
    return _call(f"/memory/intelligence/all?client_id={client_id}&urgency={urgency}", method="GET")


def handle_causal_chain(args: dict) -> dict:
    """Get causal chain for a memory entry."""
    entry_id = args.get("entry_id", "")
    if not entry_id:
        return {"error": "entry_id is required"}
    result = _call(f"/memory/causal/chain", {"entry_id": entry_id})
    return result


def handle_constraints_set(args: dict) -> dict:
    """Store a business constraint."""
    constraint = args.get("constraint", "")
    if not constraint:
        return {"error": "constraint is required"}
    constraint_text = args.get("constraint", "")
    return _call("/memory/constraints/write", {
        "constraint_text": constraint_text,
        "category": args.get("category", "workflow"),
        "client_id": args.get("client_id", "ed_creed"),
    })


def handle_constraints_list(_args: dict) -> dict:
    """List all active constraints."""
    return _call("/memory/constraints/list?client_id=ed_creed", method="GET")


def handle_gaps_detect(args: dict) -> dict:
    """Detect knowledge gaps."""
    topic = args.get("topic", "general")
    return _call("/memory/gaps/detect", {
        "query": topic,
        "limit": 5,
        "client_id": "ed_creed",
    })


def handle_analogy_find(args: dict) -> dict:
    """Find analogous past situations."""
    situation = args.get("current_situation", "")
    stype = args.get("situation_type", "other")
    if not situation:
        return {"error": "current_situation is required"}
    return _call("/memory/analogy/find", {
        "current_situation": situation,
        "situation_type": stype,
    })


def handle_federation_submit(args: dict) -> dict:
    """Submit insight to federated network."""
    insight = args.get("insight", "")
    if not insight:
        return {"error": "insight is required"}
    return _call("/memory/federation/submit", {
        "insight_pattern": insight,
        "category": args.get("category", "general"),
        "client_id": args.get("client_id", "ed_creed"),
        "industry_vertical": args.get("industry", args.get("industry_vertical", "saas")),
        "company_size": args.get("company_size", "smb"),
        "use_case": args.get("use_case", "client_memory"),
        "outcome": args.get("outcome", "positive"),
        "outcome_description": args.get("outcome_description", insight[:200]),
    })


def handle_federation_query(args: dict) -> dict:
    """Query federated network."""
    query = args.get("query", "")
    if not query:
        return {"error": "query is required"}
    return _call("/memory/federation/query", {
        "query": query,
        "limit": 5,
        "client_id": "ed_creed",
    })


def handle_bootstrap_synthesize(args: dict) -> dict:
    """Synthesize onboarding answers into context profile."""
    answers = args.get("answers", {})
    agent_type = args.get("agent_type", "assistant")
    if not answers:
        return {"error": "answers is required"}
    return _call("/memory/bootstrap/synthesize", {
        "answers": answers,
        "agent_type": agent_type,
    })


def handle_intelligence(args: dict) -> dict:
    """Run proactive intelligence analysis — checks overdue promises, escalation, declining sentiment."""
    client_id = args.get("client_id", "ed_creed")
    return _call(f"/memory/intelligence?client_id={client_id}", method="GET")


def handle_onboard(args: dict) -> dict:
    client = args.get("client_name", "the client")
    mem_post("/memory/log", {
        "text":       f"Onboarding started for {client}",
        "client_id":  DEFAULT_AGENT,
        "category":   "client_identity",
        "importance": 9,
    })
    return {
        "onboarding_active": True,
        "client":  client,
        "questions": ONBOARDING_QUESTIONS,
        "instruction": (
            "Ask each question one at a time. After each answer, call "
            "logicframe_memory_log tagged category=client_identity. "
            "After all 12, call logicframe_memory_evolve to synthesize the profile."
        ),
    }


def handle_startup_check(args: dict) -> dict:
    """
    Agent Baseline Layer — runs on EVERY boot.
    Checks if this user has memory. If no → trigger onboarding.
    If yes → return their context for verification.
    """
    user_id = args.get("user_id", "unknown")
    client_id = f"user_{user_id}" if user_id != "unknown" else DEFAULT_AGENT

    # Check if any meaningful memories exist for this user
    result = mem_post("/memory/recall", {
        "query": "onboarding complete profile client identity business",
        "client_id": client_id,
        "limit": 3, "use_bm25": True, "bm25_weight": 0.3
    })

    memories = result.get("results", [])
    has_memory = any(
        any(tag in (e.get("tags") or []) for tag in ["onboarding", "profile_complete"])
        or ("onboarding" in (e.get("text") or "").lower() and "complete" in (e.get("text") or "").lower())
        for e in memories
    )

    if has_memory:
        # User has been onboarded — get full context
        context_result = mem_post("/memory/recall", {
            "query": "who is this user name business goals preferences tools",
            "client_id": client_id,
            "limit": 10, "use_bm25": True, "bm25_weight": 0.3
        })
        return {
            "status": "onboarded",
            "message": "Memory found for this user. Present the context summary and ask if anything needs updating.",
            "context": [e.get("text", "")[:300] for e in context_result.get("results", [])],
            "action": "verify_context"
        }
    else:
        # No memory — trigger automatic onboarding
        mem_post("/memory/log", {
            "text": f"First contact from user {user_id} — onboarding triggered automatically",
            "client_id": client_id,
            "category": "general",
            "importance": 7,
            "tags": ["first_contact", "auto_onboarding"]
        })
        return {
            "status": "needs_onboarding",
            "message": "No memory found for this user. Start the 12-question onboarding immediately.",
            "questions": ONBOARDING_QUESTIONS,
            "action": "start_onboarding",
            "instruction": (
                "Greet the user warmly, then say: "
                "'Let me get to know you so I can be genuinely helpful. "
                "Can I ask you 12 quick questions? It'll take about 3 minutes "
                "and then I'll know everything I need to assist you properly.' "
                "Then ask questions one at a time, logging each answer with "
                "logicframe_memory_log (category=client_identity, importance=9). "
                "After question 12, call logicframe_memory_evolve to synthesize "
                "the profile and confirm onboarding complete."
            )
        }


def handle_log_onboarding_answer(args: dict) -> dict:
    """Log a single onboarding answer. On question 12 (index 11), finalize onboarding."""
    question_index = args.get("question_index", 0)
    answer = args.get("answer", "")
    complete = args.get("complete", False)

    if question_index < len(ONBOARDING_QUESTIONS):
        question = ONBOARDING_QUESTIONS[question_index]
    else:
        question = f"Question {question_index + 1}"

    mem_post("/memory/log", {
        "text": f"ONBOARDING Q{question_index + 1}: {question}\nANSWER: {answer}",
        "client_id": DEFAULT_AGENT,
        "category": "client_identity",
        "importance": 9,
        "tags": ["onboarding", f"q{question_index + 1}"]
    })

    if complete:
        mem_post("/memory/log", {
            "text": (
                "ONBOARDING COMPLETE — All 12 questions answered. "
                "Full user profile built. Agent is ready to assist with full context."
            ),
            "client_id": DEFAULT_AGENT,
            "category": "client_identity",
            "importance": 10,
            "tags": ["onboarding", "complete", "profile_built"]
        })
        mem_post("/memory/evolve", {
            "client_id": DEFAULT_AGENT,
            "trigger": "onboarding_complete"
        })
        return {
            "complete": True,
            "message": "Onboarding finished. You now have full context on this user. You're ready to assist."
        }

    next_index = question_index + 1
    if next_index < len(ONBOARDING_QUESTIONS):
        return {
            "complete": False,
            "logged": True,
            "next_question": ONBOARDING_QUESTIONS[next_index],
            "next_index": next_index,
            "progress": f"Question {question_index + 1}/12 complete. {12 - question_index - 1} remaining."
        }
    else:
        return {"complete": True, "logged": True, "message": "All questions answered."}


# ── Agent Sync Handlers ───────────────────────────────────────────────────────

def handle_memory_sync_check(args: dict) -> dict:
    """Check for memories from OTHER agents since timestamp (agent-to-agent sync)."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    since_ts = args.get("since_timestamp")
    agent_id = args.get("agent_id", "mcp_client")
    limit = int(args.get("limit", 50))

    # Use the Phase 4 /memory/shared/updates endpoint
    url = f"{MEMORY_SERVER}/memory/shared/updates?client_id={client_id}&agent_id={agent_id}"
    if since_ts:
        url += f"&since_timestamp={since_ts}"

    try:
        req = Request(url, headers=_auth_headers(agent_id))
        with urlopen(req, timeout=20) as resp:
            data = json.loads(resp.read())
        return {
            "status": "success",
            "client_id": client_id,
            "sync_agent_id": agent_id,
            "since_timestamp": since_ts,
            "total_new": data.get("total_new", 0),
            "memories": data.get("new_memories", [])[:limit],
        }
    except Exception as e:
        return {"error": str(e), "status": "error"}


def handle_memory_broadcast(args: dict) -> dict:
    """Broadcast a memory marker so other agents see it via memory_sync_check."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    memory_entry_id = args.get("memory_entry_id", "")
    agent_id = args.get("agent_id", "unknown")
    note = args.get("note", "")

    result = mem_post("/memory/log", {
        "text": f"[SYNC BROADCAST] Memory {memory_entry_id} broadcast by {agent_id}. Note: {note}",
        "client_id": client_id,
        "category": "sync_broadcast",
        "importance": 5,
        "agent_id": agent_id,
        "source_type": "system",
        "metadata": {
            "type": "sync_broadcast",
            "memory_entry_id": memory_entry_id,
            "broadcast_by": agent_id,
        }
    })

    if "entry_id" in result:
        return {
            "status": "success",
            "broadcast_entry_id": result["entry_id"],
            "memory_entry_id": memory_entry_id,
            "client_id": client_id,
            "agent_id": agent_id,
            "timestamp": result.get("timestamp", ""),
        }
    return {"error": result.get("error", "Failed to broadcast"), "status": "error"}


def handle_data_export(args: dict) -> dict:
    """Export all memories for a client as JSON."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    include_deleted = args.get("include_deleted", False)

    result = mem_post("/memory/recall", {
        "query": "",
        "client_id": client_id,
        "limit": 500,
    })

    memories = result.get("results", []) if isinstance(result, dict) else []
    categories = list(set(m.get("category", "general") for m in memories))
    agents = list(set(m.get("agent_id", "unknown") for m in memories))
    timestamps = [m.get("timestamp", "") for m in memories if m.get("timestamp")]

    export_ts = datetime.datetime.utcnow().isoformat() + "Z"
    return {
        "export_metadata": {
            "export_timestamp": export_ts,
            "client_id": client_id,
            "version": "1.0",
            "logicframe_version": "v2.1.0",
        },
        "memories": memories,
        "summary": {
            "total_count": len(memories),
            "categories": categories,
            "agents": agents,
            "date_range": {
                "oldest": min(timestamps, default=None),
                "newest": max(timestamps, default=None),
            }
        }
    }


def handle_data_delete_request(args: dict) -> dict:
    """Request deletion of all client data with 30-day grace period."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    confirm = args.get("confirm", False)

    if not confirm:
        return {
            "status": "warning",
            "client_id": client_id,
            "message": "This will DELETE all data for this client after a 30-day grace period.",
            "details": {
                "what_happens": [
                    "All memories will be marked as pending_deletion",
                    "Data will be accessible but flagged for removal",
                    "After 30 days, data is PERMANENTLY deleted",
                    "This action CANNOT be undone after confirmation",
                ],
                "to_confirm": "Call again with confirm=True",
            },
            "deletion_date": None,
        }

    deletion_date = (datetime.datetime.utcnow() + datetime.timedelta(days=30)).strftime("%Y-%m-%d")
    timestamp = datetime.datetime.utcnow().isoformat() + "Z"

    result = mem_post("/memory/log", {
        "text": f"[DELETION REQUEST] All data for client {client_id} scheduled for deletion on {deletion_date}. Requested at {timestamp}.",
        "client_id": client_id,
        "category": "system",
        "importance": 10,
        "agent_id": "system",
        "source_type": "system",
        "is_permanent": True,
        "metadata": {
            "type": "deletion_request",
            "deletion_date": deletion_date,
            "requested_at": timestamp,
            "status": "pending",
        }
    })

    return {
        "status": "confirmed",
        "client_id": client_id,
        "message": f"All data for {client_id} will be permanently deleted on {deletion_date}",
        "deletion_date": deletion_date,
        "request_entry_id": result.get("entry_id"),
        "recovery_available_until": deletion_date,
        "note": "Contact LogicFrame support to cancel this request before the deletion date.",
    }


def handle_legal_info(_args: dict) -> dict:
    """Return Terms of Service and Data Policy."""
    return {
        "provider": "LogicFrame LLC",
        "tos_version": "1.0",
        "last_updated": "2026-04-01",
        "summary": (
            "LogicFrame stores memory and decision data on behalf of clients. "
            "Clients retain full ownership of their data. LogicFrame does not use "
            "client data to train external AI models. Data may be exported or "
            "deleted upon request (see data_delete_request)."
        ),
        "data_retention": (
            "Data is retained until explicitly deleted by the client or "
            "a deletion request is confirmed. Deletion requests have a 30-day grace period."
        ),
        "privacy": (
            "LogicFrame employs AES-256-GCM encryption, per-client tenant isolation, "
            "and role-based access controls."
        ),
        "contact": "edcreed@logicframe.io",
        "url": "https://logicframe.io/legal",
    }


def handle_server_info(_args: dict) -> dict:
    """Return MCP server metadata."""
    return {
        "name": "LogicFrame Memory MCP Server v2",
        "version": "2.1.0",
        "description": "Agent-to-agent memory sync, Auto-Resume conversation state, and data lock-in features",
        "memory_server": MEMORY_SERVER,
        "available_tools": [
            "logicframe_memory_log", "logicframe_memory_recall", "logicframe_memory_context",
            "logicframe_memory_think", "logicframe_memory_plan", "logicframe_memory_review",
            "logicframe_memory_evolve", "logicframe_memory_archive", "logicframe_memory_snapshot",
            "logicframe_memory_stats", "logicframe_memory_share",
            "logicframe_memory_sync_check", "logicframe_memory_broadcast",
            "logicframe_data_export", "logicframe_data_delete_request",
            "logicframe_legal_info", "logicframe_server_info",
            "logicframe_conversations_store", "logicframe_conversations_resume",
            "logicframe_conversations_timber", "logicframe_conversations_auto_resume_check",
            "logicframe_conversations_delete", "logicframe_conversations_list",
            "logicframe_onboard", "logicframe_health", "logicframe_startup_check",
            "logicframe_context_v2", "logicframe_recall_v2", "logicframe_contradictions",
            "logicframe_resolve", "logicframe_update_status", "logicframe_reclassify",
            "logicframe_summarize", "logicframe_sentiment", "logicframe_consolidate",
            "logicframe_audit_verify", "logicframe_self_heal", "logicframe_causal_chain",
            "logicframe_constraints_set", "logicframe_constraints_list", "logicframe_gaps_detect",
            "logicframe_analogy_find", "logicframe_federation_submit", "logicframe_federation_query",
            "logicframe_bootstrap_synthesize", "logicframe_intelligence", "logicframe_intelligence_all",
            "logicframe_correction_log", "logicframe_dpo_stats", "logicframe_dpo_run",
            "logicframe_log_onboarding_answer",
        ],
        "features": {
            "agent_sync": True,
            "data_export": True,
            "deletion_requests": True,
            "legal_info": True,
            "auto_resume": True,
        }
    }


# ── Conversation State Handlers (Auto-Resume — In-Memory) ──────────────────────
# NOTE: In-memory only — does NOT persist across server restarts.

import threading as _conv_threading
from datetime import datetime as _conv_datetime

_conv_store_lock = _conv_threading.Lock()
_conv_store: dict = {}   # key: client_id or "client_id:conversation_id"


def _conv_key(client_id: str, conversation_id: str = None) -> str:
    if conversation_id:
        return f"{client_id}:{conversation_id}"
    return client_id


def handle_conversations_store(args: dict) -> dict:
    """Store full conversation state for auto-resume."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    conversation_id = args.get("conversation_id", "default")
    key = _conv_key(client_id, conversation_id)
    now = _conv_datetime.utcnow().isoformat() + "Z"

    state = {
        "client_id": client_id,
        "conversation_id": conversation_id,
        "last_topic": args.get("conversation_summary", ""),
        "last_agent_message": args.get("final_agent_message", ""),
        "pending_items": args.get("pending_items", []),
        "open_threads": args.get("open_threads", []),
        "outcome": args.get("outcome", "info"),
        "last_message_time": now,
        "timestamp": now,
    }

    with _conv_store_lock:
        _conv_store[key] = state
        _conv_store[client_id] = state   # latest per client

    return {"status": "stored", "client_id": client_id, "conversation_id": conversation_id}


def handle_conversations_resume(args: dict) -> dict:
    """Retrieve stored conversation state for auto-resume."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    conversation_id = args.get("conversation_id")
    key = _conv_key(client_id, conversation_id) if conversation_id else client_id

    with _conv_store_lock:
        state = _conv_store.get(key)

    if not state:
        return {"status": "not_found", "client_id": client_id, "conversation_id": conversation_id}

    return {
        "status": "found",
        "client_id": client_id,
        "conversation_id": state.get("conversation_id"),
        "last_topic": state.get("last_topic", ""),
        "last_agent_message": state.get("last_agent_message", ""),
        "pending_items": state.get("pending_items", []),
        "open_threads": state.get("open_threads", []),
        "outcome": state.get("outcome", "info"),
        "last_message_time": state.get("last_message_time", ""),
    }


def handle_conversations_timber(args: dict) -> dict:
    """Store a timber-style summary of the conversation."""
    return handle_conversations_store({
        "client_id": args.get("client_id", DEFAULT_AGENT),
        "conversation_id": args.get("conversation_id", "default"),
        "conversation_summary": args.get("conversation_summary", ""),
        "final_agent_message": args.get("final_agent_message", ""),
        "pending_items": args.get("pending_items", []),
        "open_threads": args.get("open_threads", []),
        "outcome": args.get("outcome", "info"),
    })


def handle_conversations_auto_resume_check(args: dict) -> dict:
    """Check if auto-resume is needed (last message > 5 minutes ago)."""
    client_id = args.get("client_id", DEFAULT_AGENT)

    with _conv_store_lock:
        state = _conv_store.get(client_id)

    if not state:
        return {"should_resume": False, "reason": "no_previous_conversation", "client_id": client_id}

    last_time_str = state.get("last_message_time", "")
    elapsed_seconds = 0
    try:
        last_time = _conv_datetime.fromisoformat(last_time_str.replace("Z", "+00:00"))
        naive_last = last_time.replace(tzinfo=None)
        elapsed_seconds = (_conv_datetime.utcnow() - naive_last).total_seconds()
    except Exception:
        pass

    AUTO_RESUME_WINDOW = 300   # 5 minutes

    if elapsed_seconds > AUTO_RESUME_WINDOW:
        return {
            "should_resume": True,
            "elapsed_seconds": int(elapsed_seconds),
            "context": f"Previous: {state.get('last_topic', 'N/A')} | Last agent: {state.get('last_agent_message', 'N/A')}",
            "state": state,
            "client_id": client_id,
        }

    return {"should_resume": False, "elapsed_seconds": int(elapsed_seconds), "reason": "within_window", "client_id": client_id}


def handle_conversations_delete(args: dict) -> dict:
    """Delete stored conversation state."""
    client_id = args.get("client_id", DEFAULT_AGENT)
    conversation_id = args.get("conversation_id")
    key = _conv_key(client_id, conversation_id) if conversation_id else client_id

    with _conv_store_lock:
        deleted = key in _conv_store
        if deleted:
            del _conv_store[key]

    return {"status": "deleted" if deleted else "not_found", "client_id": client_id, "conversation_id": conversation_id}


def handle_conversations_list(args: dict) -> dict:
    """List all stored conversations for a client."""
    client_id = args.get("client_id", DEFAULT_AGENT)

    with _conv_store_lock:
        my_conversations = [
            {
                "conversation_id": v.get("conversation_id"),
                "last_topic": v.get("last_topic", ""),
                "last_message_time": v.get("last_message_time", ""),
                "outcome": v.get("outcome", "info"),
            }
            for k, v in _conv_store.items()
            if k.startswith(f"{client_id}:") or k == client_id
        ]

    return {"client_id": client_id, "conversations": my_conversations}

# ── MCP HTTP Server ────────────────────────────────────────────────────────────

class MCPHandler(BaseHTTPRequestHandler):
    def log_message(self, *a): pass

    def send_json(self, data: dict, status=200):
        body = json.dumps(data).encode()
        self.send_response(status)
        self.send_header("Content-Type", "application/json")
        self.send_header("Access-Control-Allow-Origin", "*")
        self.end_headers()
        self.wfile.write(body)

    def do_OPTIONS(self):
        self.send_response(200)
        self.send_header("Access-Control-Allow-Origin", "*")
        self.send_header("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
        self.send_header("Access-Control-Allow-Headers", "Content-Type, X-Agent-ID")
        self.end_headers()

    def do_GET(self):
        # Support /mcp path for Claude Code integration
        path = self.path.strip("/")
        if path in ("", "mcp"):
            self.send_json({
                "name":        "LogicFrame MCP Server v2",
                "version":     "2.0.0",
                "description": "39 tools: Memory + Intelligence + Decision + DPO",
                "tools":       [t["name"] for t in TOOLS],
            })
        elif path == "health":
            self.send_json({"status": "logicframe-mcp", "version": "2.0.0", "tools": len(TOOLS)})
        elif path == "tools":
            self.send_json({"tools": TOOLS})
        else:
            self.send_json({"error": "Not found"}, 404)

    def do_POST(self):
        try:
            transfer_encoding = self.headers.get("Transfer-Encoding", "").lower()
            if transfer_encoding == "chunked":
                # Read chunked encoding body
                body = b""
                while True:
                    line = self.rfile.readline()
                    chunk_size = int(line.strip(), 16)
                    if chunk_size == 0:
                        self.rfile.readline()  # trailing \r\n
                        break
                    body += self.rfile.read(chunk_size)
                    self.rfile.readline()  # trailing \r\n
            else:
                length = int(self.headers.get("Content-Length", 0))
                body = self.rfile.read(max(length, 0))
            data = json.loads(body.decode())
        except Exception:
            self.send_json({"error": "Invalid request"}, 400)
            return

        # Extract calling bot's identity from header — enables per-bot API key auth
        global _CALLER_AGENT_ID
        _CALLER_AGENT_ID = self.headers.get("X-Agent-ID") or None

        method = self.path.strip("/")
        params = data.get("params", {})

        handler_map = {
            "tools/call":   self.handle_tool_call,
            "initialize":   self.handle_initialize,
            "tools/list":   self.handle_tools_list,
            "mcp":          self.handle_mcp,
            "":           self.handle_mcp,
        }

        handler = handler_map.get(method)
        if handler:
            handler(params, data)
        else:
            self.send_json({"error": f"Unknown method: {method}"}, 404)

    def handle_mcp(self, params, data):
        # MCP JSON-RPC: route based on 'method' field in body
        # This lets POST /mcp work with any JSON-RPC method
        rpc_method = data.get('method', '')
        handler_map = {
            'tools/list':  self.handle_tools_list,
            'tools/call': self.handle_tool_call,
            'initialize': self.handle_initialize,
        }
        handler = handler_map.get(rpc_method)
        if handler:
            handler(params, data)
        else:
            self.send_json({'error': f'Unknown MCP method: {rpc_method}'}, 404)


    def do_DEBUG_GLOBALS(self):
        """Debug endpoint to check what's in globals at request time."""
        import sys
        g = globals()
        handler_keys = sorted([k for k in g.keys() if 'handle' in k.lower()])
        module_keys = sorted(g.keys())
        self.send_json({
            "total_globals": len(g),
            "module_keys": module_keys[:50],
            "handler_keys": handler_keys,
            "python_version": sys.version,
        })

    def handle_initialize(self, params, data):
        self.send_json({
            "protocolVersion": "2024-11-05",
            "capabilities":    {"tools": {}},
            "serverInfo":      {"name": "logicframe-mcp", "version": "2.0.0"},
        })

    def handle_tools_list(self, params, data):
        self.send_json({"tools": TOOLS})

    def handle_tool_call(self, params, data):
        name = params.get("name", "")
        args = params.get("arguments", {})

        # Handler lookup table: tool name -> (handler_method_name, function)
        # Using MCPHandler.__dict__ to access class-defined handlers without
        # triggering class-body local namespace issues
        _HANDLERS = {
            # Voice Memory
            "logicframe_voice_ingest":            ("handle_voice_ingest", None),
            "logicframe_voice_recall":            ("handle_voice_recall", None),
            "logicframe_voice_caller_delete":     ("handle_voice_caller_delete", None),
            # VAPI Platform
            "vapi_list_assistants":              ("handle_vapi_list_assistants", None),
            "vapi_get_assistant":                ("handle_vapi_get_assistant", None),
            "vapi_create_assistant":              ("handle_vapi_create_assistant", None),
            "vapi_update_assistant":              ("handle_vapi_update_assistant", None),
            "vapi_delete_assistant":              ("handle_vapi_delete_assistant", None),
            "vapi_list_calls":                    ("handle_vapi_list_calls", None),
            "vapi_get_call":                      ("handle_vapi_get_call", None),
            "vapi_initiate_outbound_call":        ("handle_vapi_initiate_outbound_call", None),
            "vapi_list_phone_numbers":            ("handle_vapi_list_phone_numbers", None),
            "vapi_provision_phone_number":         ("handle_vapi_provision_phone_number", None),
            "vapi_voice_ingest":                 ("handle_vapi_voice_ingest", None),
            "vapi_voice_recall":                  ("handle_vapi_voice_recall", None),
            "vapi_get_call_transcript":           ("handle_vapi_get_call_transcript", None),
            "vapi_hangup_call":                   ("handle_vapi_hangup_call", None),
            # Retell AI
            "retell_list_agents":                 ("handle_retell_list_agents", None),
            "retell_get_agent":                   ("handle_retell_get_agent", None),
            "retell_create_agent":                ("handle_retell_create_agent", None),
            "retell_update_agent":               ("handle_retell_update_agent", None),
            "retell_list_calls":                  ("handle_retell_list_calls", None),
            "retell_get_call":                    ("handle_retell_get_call", None),
            "retell_initiate_call":               ("handle_retell_initiate_call", None),
            "retell_hangup_call":                 ("handle_retell_hangup_call", None),
            "retell_voice_ingest":               ("handle_retell_voice_ingest", None),
            "retell_voice_recall":               ("handle_retell_voice_recall", None),
            # Bland AI
            "bland_initiate_call":               ("handle_bland_initiate_call", None),
            "bland_list_calls":                   ("handle_bland_list_calls", None),
            "bland_get_call":                    ("handle_bland_get_call", None),
            "bland_get_transcript":              ("handle_bland_get_transcript", None),
            "bland_cancel_call":                 ("handle_bland_cancel_call", None),
            "bland_list_numbers":                 ("handle_bland_list_numbers", None),
            "bland_voice_ingest":                ("handle_bland_voice_ingest", None),
            "bland_voice_recall":                ("handle_bland_voice_recall", None),
            # Twilio
            "twilio_initiate_call":              ("handle_twilio_initiate_call", None),
            "twilio_list_calls":                 ("handle_twilio_list_calls", None),
            "twilio_get_call":                  ("handle_twilio_get_call", None),
            "twilio_voice_ingest":              ("handle_twilio_voice_ingest", None),
            "twilio_voice_recall":             ("handle_twilio_voice_recall", None),
            # Voice Analytics
            "logicframe_voice_stats":            ("handle_logicframe_voice_stats", None),
            "logicframe_voice_session":          ("handle_logicframe_voice_session", None),
            "logicframe_voice_caller_history":  ("handle_logicframe_voice_caller_history", None),
            "logicframe_voice_delete_session":   ("handle_logicframe_voice_delete_session", None),
            # Memory Core
            "logicframe_health":                 ("handle_health", None),
            "logicframe_memory_log":             ("handle_memory_log", None),
            "logicframe_memory_log_verbatim":   ("handle_memory_log_verbatim", None),
            "logicframe_memory_recall":          ("handle_memory_recall", None),
            "logicframe_memory_context":         ("handle_memory_context", None),
            "logicframe_memory_think":           ("handle_memory_think", None),
            "logicframe_memory_plan":            ("handle_memory_plan", None),
            "logicframe_memory_review":          ("handle_memory_review", None),
            "logicframe_memory_evolve":         ("handle_memory_evolve", None),
            "logicframe_memory_archive":         ("handle_memory_archive", None),
            "logicframe_memory_snapshot":        ("handle_memory_snapshot", None),
            "logicframe_memory_stats":           ("handle_memory_stats", None),
            "logicframe_memory_share":           ("handle_memory_share", None),
            "logicframe_onboard":               ("handle_onboard", None),
            # Agent Baseline Layer
            "logicframe_startup_check":          ("handle_startup_check", None),
            "logicframe_log_onboarding_answer": ("handle_log_onboarding_answer", None),
            # Context Profiles + Recall Enhanced
            "logicframe_context_v2":             ("handle_context_v2", None),
            "logicframe_recall_v2":             ("handle_recall_v2", None),
            # Federation
            "logicframe_federation_submit":      ("handle_federation_submit", None),
            "logicframe_federation_query":       ("handle_federation_query", None),
        }

        info = _HANDLERS.get(name)
        if not info:
            self.send_json({"error": f"Unknown tool: {name}"}, 404)
            return

        handler_name = info[0]
        # Access via class __dict__ to avoid class-body local namespace issues
        # Try class methods first (MCPHandler.__dict__),
        # then module-level functions (globals - populated after class body executes)
        # Access via COMPLETE module namespace (sys.modules['__main__'].__dict__)
        # This is populated AFTER the entire file executes, including all handler functions.
        # Cannot use globals() inside a class method body — it may not see post-class defs.
        import sys
        main_ns = sys.modules['__main__'].__dict__
        handler_func = MCPHandler.__dict__.get(handler_name)
        if not handler_func:
            handler_func = main_ns.get(handler_name)
        if not handler_func:
            self.send_json({"error": f"Handler '{handler_name}' not found"}, 500)
            return

        try:
            # handler_func is an unbound function object; call with args only
            result = handler_func(args)
            self.send_json({"content": [{"type": "text", "text": str(result)}]})
        except Exception as e:
            import traceback
            self.send_json({"error": f"Handler error: {str(e)}"}, 500)

import requests as _req

VOICE_API = os.environ.get("VOICE_API_URL", "https://logicmem.io")
VAPI_API_KEY = os.environ.get("VAPI_API_KEY", "")

def _voice_call(method, path, api_key, json_data=None):
    headers = {"Content-Type": "application/json", "X-API-Key": api_key}
    url = f"{VOICE_API}{path}"
    if method == "GET":
        r = _req.get(url, headers=headers, timeout=15)
    else:
        r = _req.post(url, headers=headers, json=json_data, timeout=15)
    return r.json()

def handle_voice_ingest(args):
    api_key = args.get("api_key", "")
    payload = args.get("vapi_payload", {})
    if not api_key:
        return {"error": "api_key required"}
    result = _voice_call("POST", "/v1/voice/ingest", api_key, payload)
    if result.get("error"):
        return {"error": result["error"]}
    stored = result.get("utterances_stored", 0)
    return {"ok": True, "utterances_stored": stored}

def handle_voice_recall(args):
    api_key = args.get("api_key", "")
    caller = args.get("caller_number", "")
    top_k = min(int(args.get("top_k", 5)), 20)
    if not api_key or not caller:
        return {"error": "api_key and caller_number required"}
    result = _voice_call("POST", "/v1/voice/recall", api_key, {
        "caller_number": caller, "top_k": top_k
    })
    if result.get("error"):
        return {"error": result["error"]}
    utterances = result.get("utterances", [])
    if not utterances:
        return {"caller_number": caller, "utterances": [], "count": 0}
    return {
        "caller_number": caller,
        "utterances": utterances,
        "count": len(utterances),
        "latency_ms": result.get("latency_ms", "?"),
        "cache_hit": result.get("cache_hit", False)
    }

def handle_voice_caller_delete(args):
    api_key = args.get("api_key", "")
    caller = args.get("caller_number", "")
    if not api_key or not caller:
        return {"error": "api_key and caller_number required"}
    result = _voice_call("DELETE", f"/v1/voice/caller/{caller}", api_key)
    if result.get("error"):
        return {"error": result["error"]}
    return {"ok": True, "deleted": result.get("deleted", 0)}

# ── Retell AI Platform Handlers ────────────────────────────────────────────────

RETELL_BASE = "https://api.retell.ai/v2"

def _retell_headers(api_key):
    return {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}

def _retell(method, path, api_key, json_data=None, params=None):
    import requests as _req
    url = f"{RETELL_BASE}{path}"
    kwargs = {"headers": _retell_headers(api_key), "timeout": 20}
    if json_data:
        kwargs["json"] = json_data
    if params:
        kwargs["params"] = params
    r = _req.request(method, url, **kwargs)
    if r.status_code in (200, 201):
        return r.json() if r.text else {"ok": True}
    return {"error": r.text[:300], "status_code": r.status_code}


def handle_retell_list_agents(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    if not api_key:
        return {"error": "RETELL_API_KEY required - set env var or pass api_key"}
    result = _retell("GET", "/agent", api_key)
    if isinstance(result, list):
        return {"count": len(result), "agents": result}
    return {"count": 0, "agents": [], "raw": result}


def handle_retell_get_agent(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    aid = args.get("agent_id", "")
    if not api_key or not aid:
        return {"error": "api_key and agent_id required"}
    return _retell("GET", f"/agent/{aid}", api_key)


def handle_retell_create_agent(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    payload = {k: v for k, v in args.items()
               if k not in ("api_key",) and v not in (None, "")}
    if not payload.get("name"):
        return {"error": "name is required"}
    result = _retell("POST", "/agent", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "agent": result, "agent_id": result.get("agent_id", result.get("id", ""))}


def handle_retell_update_agent(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    aid = args.get("agent_id", "")
    if not api_key or not aid:
        return {"error": "api_key and agent_id required"}
    payload = {k: v for k, v in args.items()
               if k not in ("api_key", "agent_id") and v not in (None, "")}
    result = _retell("PATCH", f"/agent/{aid}", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "agent": result}


def handle_retell_list_calls(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    params = {}
    for f in ["agent_id", "limit"]:
        if f in args and args[f] not in (None, ""):
            params[f] = args[f]
    result = _retell("GET", "/call", api_key, params=params if params else None)
    if isinstance(result, list):
        return {"count": len(result), "calls": result}
    if "data" in result:
        return {"count": len(result["data"]), "calls": result["data"]}
    return {"count": 0, "calls": []}


def handle_retell_get_call(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    cid = args.get("call_id", "")
    if not api_key or not cid:
        return {"error": "api_key and call_id required"}
    return _retell("GET", f"/call/{cid}", api_key)


def handle_retell_initiate_call(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    aid = args.get("agent_id", "")
    to = args.get("to_number", "")
    if not aid or not to:
        return {"error": "agent_id and to_number required"}
    payload = {
        "agent_id": aid,
        "to_number": to,
    }
    for f in ["from_number", "metadata"]:
        if f in args and args[f] not in (None, ""):
            payload[f] = args[f]
    result = _retell("POST", "/call/outbound", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "call": result, "call_id": result.get("call_id", result.get("id", ""))}


def handle_retell_hangup_call(args):
    api_key = args.get("api_key", "") or RETELL_API_KEY
    cid = args.get("call_id", "")
    if not api_key or not cid:
        return {"error": "api_key and call_id required"}
    result = _retell("POST", f"/call/{cid}/hangup", api_key)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "call_id": cid}


def handle_retell_voice_ingest(args):
    api_key = args.get("api_key", "")  # LogicMem API key
    lm_key = api_key
    if not lm_key:
        return {"error": "api_key (LogicMem) required"}
    # Try to get transcript from Retell if call_id provided
    call_id = args.get("call_id", "")
    transcript_data = args.get("vapi_payload", None)  # Direct payload
    if not transcript_data and call_id:
        retell_key = args.get("retell_api_key", "") or RETELL_API_KEY
        if retell_key:
            transcript_data = _retell("GET", f"/call/{call_id}", retell_key)
    if not transcript_data:
        return {"error": "vapi_payload (transcript dict) or call_id+retell_api_key required"}
    # Normalize to VAPI-style payload for voice ingest
    # Extract caller number and utterances from Retell payload
    customer = transcript_data.get("customer", {})
    caller = customer.get("number", "") if isinstance(customer, dict) else ""
    # Retell uses transcript_object array
    transcript_obj = transcript_data.get("transcript_object", [])
    utterances = []
    for seg in transcript_obj:
        if isinstance(seg, dict):
            role = seg.get("role", "unknown")
            content = seg.get("content", "")
            if content:
                utterances.append({"role": role, "content": content})
    vapi_payload = {
        "type": "end-of-call-report",
        "call": {"id": call_id or transcript_data.get("call_id", "unknown")},
        "customer": {"number": caller},
        "messages": utterances
    }
    result = _voice_call("POST", "/v1/voice/ingest", lm_key, vapi_payload)
    if result.get("error"):
        return {"error": result["error"]}
    stored = result.get("utterances_stored", 0)
    return {"ok": True, "utterances_stored": stored}


def handle_retell_voice_recall(args):
    api_key = args.get("api_key", "")
    caller = args.get("caller_number", "")
    top_k = min(int(args.get("top_k", 5)), 20)
    if not api_key or not caller:
        return {"error": "api_key and caller_number required"}
    result = _voice_call("POST", "/v1/voice/recall", api_key, {
        "caller_number": caller, "top_k": top_k
    })
    if result.get("error"):
        return {"error": result["error"]}
    utterances = result.get("utterances", [])
    if not utterances:
        return {"caller_number": caller, "utterances": [], "count": 0}
    return {
        "caller_number": caller,
        "utterances": utterances,
        "count": len(utterances),
        "latency_ms": result.get("latency_ms", "?"),
        "cache_hit": result.get("cache_hit", False)
    }


# ── Bland AI Platform Handlers ────────────────────────────────────────────────

BLAND_BASE = "https://api.bland.ai/v1"

def _bland_headers(api_key):
    return {"Authorization": f"Bland {api_key}", "Content-Type": "application/json"}

def _bland(method, path, api_key, json_data=None, params=None):
    import requests as _req
    url = f"{BLAND_BASE}{path}"
    kwargs = {"headers": _bland_headers(api_key), "timeout": 20}
    if json_data:
        kwargs["json"] = json_data
    if params:
        kwargs["params"] = params
    r = _req.request(method, url, **kwargs)
    if r.status_code in (200, 201):
        return r.json() if r.text else {"ok": True}
    return {"error": r.text[:300], "status_code": r.status_code}


def handle_bland_initiate_call(args):
    api_key = args.get("api_key", "") or BLAND_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    to = args.get("to_number", "")
    if not to:
        return {"error": "to_number required"}
    payload = {}
    for f in ["to_number", "from_number", "model", "voice", "prompt", "webhook_url", "metadata"]:
        if f in args and args[f] not in (None, ""):
            payload[f] = args[f]
    result = _bland("POST", "/calls", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "call": result, "call_id": result.get("id", result.get("call_id", ""))}


def handle_bland_list_calls(args):
    api_key = args.get("api_key", "") or BLAND_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    params = {}
    limit = args.get("limit", 20)
    if limit:
        params["limit"] = min(int(limit), 100)
    result = _bland("GET", "/calls", api_key, params=params)
    if isinstance(result, list):
        return {"count": len(result), "calls": result}
    if "calls" in result:
        return {"count": len(result["calls"]), "calls": result["calls"]}
    return {"count": 0, "calls": []}


def handle_bland_get_call(args):
    api_key = args.get("api_key", "") or BLAND_API_KEY
    cid = args.get("call_id", "")
    if not api_key or not cid:
        return {"error": "api_key and call_id required"}
    return _bland("GET", f"/calls/{cid}", api_key)


def handle_bland_get_transcript(args):
    api_key = args.get("api_key", "") or BLAND_API_KEY
    cid = args.get("call_id", "")
    if not api_key or not cid:
        return {"error": "api_key and call_id required"}
    return _bland("GET", f"/calls/{cid}/transcript", api_key)


def handle_bland_cancel_call(args):
    api_key = args.get("api_key", "") or BLAND_API_KEY
    cid = args.get("call_id", "")
    if not api_key or not cid:
        return {"error": "api_key and call_id required"}
    result = _bland("POST", f"/calls/{cid}/cancel", api_key)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "call_id": cid}


def handle_bland_list_numbers(args):
    api_key = args.get("api_key", "") or BLAND_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    result = _bland("GET", "/phone-numbers", api_key)
    if isinstance(result, list):
        return {"count": len(result), "numbers": result}
    if "phone_numbers" in result:
        return {"count": len(result["phone_numbers"]), "numbers": result["phone_numbers"]}
    return {"count": 0, "numbers": []}


def handle_bland_voice_ingest(args):
    api_key = args.get("api_key", "")  # LogicMem key
    lm_key = api_key
    if not lm_key:
        return {"error": "api_key (LogicMem) required"}
    call_id = args.get("call_id", "")
    bland_key = args.get("bland_api_key", "") or BLAND_API_KEY
    transcript_data = None
    if bland_key and call_id:
        transcript_data = _bland("GET", f"/calls/{call_id}/transcript", bland_key)
    if not transcript_data:
        return {"error": "call_id + bland_api_key required for transcript lookup"}
    # Parse Bland transcript into VAPI-style utterances
    # Bland returns: {transcript: [{role, content, start, end}]}
    segments = transcript_data.get("transcript", [])
    utterances = []
    for seg in segments:
        if isinstance(seg, dict):
            role = seg.get("role", "unknown")
            content = seg.get("content", "")
            if content:
                utterances.append({"role": role, "content": content})
    vapi_payload = {
        "type": "end-of-call-report",
        "call": {"id": call_id},
        "customer": {"number": ""},
        "messages": utterances
    }
    result = _voice_call("POST", "/v1/voice/ingest", lm_key, vapi_payload)
    if result.get("error"):
        return {"error": result["error"]}
    return {"ok": True, "utterances_stored": result.get("utterances_stored", 0)}


def handle_bland_voice_recall(args):
    api_key = args.get("api_key", "")
    caller = args.get("caller_number", "")
    top_k = min(int(args.get("top_k", 5)), 20)
    if not api_key or not caller:
        return {"error": "api_key and caller_number required"}
    result = _voice_call("POST", "/v1/voice/recall", api_key, {
        "caller_number": caller, "top_k": top_k
    })
    if result.get("error"):
        return {"error": result["error"]}
    utterances = result.get("utterances", [])
    if not utterances:
        return {"caller_number": caller, "utterances": [], "count": 0}
    return {
        "caller_number": caller,
        "utterances": utterances,
        "count": len(utterances),
        "latency_ms": result.get("latency_ms", "?"),
        "cache_hit": result.get("cache_hit", False)
    }


# ── Twilio Platform Handlers ───────────────────────────────────────────────────

TWILIO_BASE = "https://api.twilio.com/2010-04-01"

def _twilio_headers(account_sid, auth_token):
    import base64
    credentials = f"{account_sid}:{auth_token}"
    encoded = base64.b64encode(credentials.encode()).decode()
    return {"Authorization": f"Basic {encoded}", "Content-Type": "application/x-www-form-urlencoded"}

def _twilio(method, path, account_sid, auth_token, data=None):
    import requests as _req
    url = f"{TWILIO_BASE}{path}"
    kwargs = {"headers": _twilio_headers(account_sid, auth_token), "timeout": 20}
    if data:
        kwargs["data"] = data
    r = _req.request(method, url, **kwargs)
    if r.status_code in (200, 201):
        # Twilio returns XML for REST API - parse key fields
        try:
            import xml.etree.ElementTree as ET
            root = ET.fromstring(r.text)
            result = {}
            for child in root:
                result[child.tag] = child.text
            return result
        except:
            return {"raw": r.text[:500]}
    return {"error": r.text[:300], "status_code": r.status_code}


def _twilio_credentials(args):
    """Extract Twilio credentials from args or env."""
    key = args.get("api_key", "")
    if key and ":" in key:
        sid, token = key.split(":", 1)
        return sid, token
    sid = args.get("twilio_account_sid", "") or TWILIO_ACCOUNT_SID
    token = args.get("twilio_auth_token", "") or TWILIO_AUTH_TOKEN
    return sid, token


def handle_twilio_initiate_call(args):
    sid, token = _twilio_credentials(args)
    if not sid or not token:
        return {"error": "Twilio credentials required - set TWILIO_ACCOUNT_SID/TWILIO_AUTH_TOKEN or pass api_key (account_sid:auth_token)"}
    to = args.get("to", "")
    from_arg = args.get("from_", "")
    if not to or not from_arg:
        return {"error": "to and from_ (phone numbers) required"}
    data = {"To": to, "From": from_arg, "Timeout": str(args.get("timeout", 60))}
    if args.get("url"):
        data["Url"] = args["url"]
    if args.get("status_callback"):
        data["StatusCallback"] = args["status_callback"]
    result = _twilio("POST", f"/Accounts/{sid}/Calls.json", sid, token, data)
    if "error" in result and result.get("status_code"):
        return result
    return {"ok": True, "call": result, "call_sid": result.get("Sid", "")}


def handle_twilio_list_calls(args):
    sid, token = _twilio_credentials(args)
    if not sid or not token:
        return {"error": "credentials required"}
    params = {}
    for f in ["To", "From", "Status"]:
        val = args.get(f.lower().replace("_", ""), "") or args.get(f.lower(), "")
        if val:
            params[f] = val
    limit = min(int(args.get("limit", 20)), 100)
    params["PageSize"] = limit
    import requests as _req
    url = f"{TWILIO_BASE}/Accounts/{sid}/Calls.json"
    headers = _twilio_headers(sid, token)
    r = _req.get(url, headers=headers, params=params, timeout=20)
    if r.status_code == 200:
        data = r.json()
        calls = data.get("calls", [])
        return {"count": len(calls), "calls": calls}
    return {"count": 0, "calls": [], "error": r.text[:200]}


def handle_twilio_get_call(args):
    sid, token = _twilio_credentials(args)
    if not sid or not token:
        return {"error": "credentials required"}
    call_sid = args.get("call_sid", "")
    if not call_sid:
        return {"error": "call_sid required"}
    result = _twilio("GET", f"/Accounts/{sid}/Calls/{call_sid}.json", sid, token)
    return result


def handle_twilio_voice_ingest(args):
    api_key = args.get("api_key", "")  # LogicMem key
    lm_key = api_key
    if not lm_key:
        return {"error": "api_key (LogicMem) required"}
    segments = args.get("transcript_segments", [])
    caller = ""
    # Try to get caller from Twilio if call_sid provided
    call_sid = args.get("call_sid", "")
    if call_sid:
        sid, token = _twilio_credentials(args)
        if sid and token:
            call_data = _twilio("GET", f"/Accounts/{sid}/Calls/{call_sid}.json", sid, token)
            caller = call_data.get("From", "")
    utterances = []
    for seg in segments:
        if isinstance(seg, dict):
            role = seg.get("speaker", seg.get("role", "unknown"))
            content = seg.get("text", seg.get("content", ""))
            if content:
                utterances.append({"role": role, "content": content})
    vapi_payload = {
        "type": "end-of-call-report",
        "call": {"id": call_sid or "unknown"},
        "customer": {"number": caller},
        "messages": utterances
    }
    result = _voice_call("POST", "/v1/voice/ingest", lm_key, vapi_payload)
    if result.get("error"):
        return {"error": result["error"]}
    return {"ok": True, "utterances_stored": result.get("utterances_stored", 0)}


def handle_twilio_voice_recall(args):
    api_key = args.get("api_key", "")
    caller = args.get("caller_number", "")
    top_k = min(int(args.get("top_k", 5)), 20)
    if not api_key or not caller:
        return {"error": "api_key and caller_number required"}
    result = _voice_call("POST", "/v1/voice/recall", api_key, {
        "caller_number": caller, "top_k": top_k
    })
    if result.get("error"):
        return {"error": result["error"]}
    utterances = result.get("utterances", [])
    if not utterances:
        return {"caller_number": caller, "utterances": [], "count": 0}
    return {
        "caller_number": caller,
        "utterances": utterances,
        "count": len(utterances),
        "latency_ms": result.get("latency_ms", "?"),
        "cache_hit": result.get("cache_hit", False)
    }


# ── Voice Analytics Handlers ──────────────────────────────────────────────────

def handle_logicframe_voice_stats(args):
    api_key = args.get("api_key", "")
    if not api_key:
        return {"error": "api_key required"}
    result = _voice_call("GET", "/v1/voice/stats", api_key)
    if result.get("error"):
        return {"error": result["error"]}
    return result


def handle_logicframe_voice_session(args):
    api_key = args.get("api_key", "")
    session_id = args.get("session_id", "")
    if not api_key or not session_id:
        return {"error": "api_key and session_id required"}
    result = _voice_call("GET", f"/v1/voice/session/{session_id}", api_key)
    if result.get("error"):
        return {"error": result["error"]}
    return result


def handle_logicframe_voice_caller_history(args):
    api_key = args.get("api_key", "")
    caller = args.get("caller_number", "")
    limit = min(int(args.get("limit", 20)), 100)
    if not api_key or not caller:
        return {"error": "api_key and caller_number required"}
    result = _voice_call("POST", "/v1/voice/caller/history", api_key, {
        "caller_number": caller, "limit": limit
    })
    if result.get("error"):
        return {"error": result["error"]}
    return result


def handle_logicframe_voice_delete_session(args):
    api_key = args.get("api_key", "")
    session_id = args.get("session_id", "")
    if not api_key or not session_id:
        return {"error": "api_key and session_id required"}
    result = _voice_call("DELETE", f"/v1/voice/session/{session_id}", api_key)
    if result.get("error"):
        return {"error": result["error"]}
    return {"ok": True, "deleted": True, "session_id": session_id}


# ── Main ───────────────────────────────────────────────────────────────────────

# ── EXTERNAL HANDLERS (moved before main block) ──────────────────────────
# ── VAPI Platform Handlers ─────────────────────────────────────────────────────

VAPI_BASE = "https://api.vapi.ai"

def _vapi_headers(api_key):
    return {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}

def _vapi(method, path, api_key, json_data=None, params=None):
    import requests as _req
    url = f"{VAPI_BASE}{path}"
    kwargs = {"headers": _vapi_headers(api_key), "timeout": 20}
    if json_data:
        kwargs["json"] = json_data
    if params:
        kwargs["params"] = params
    r = _req.request(method, url, **kwargs)
    if r.status_code in (200, 201):
        return r.json() if r.text else {"ok": True}
    return {"error": r.text[:300], "status_code": r.status_code}


def handle_vapi_list_assistants(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    if not api_key:
        return {"error": "VAPI API key required - set VAPI_API_KEY env var or pass api_key"}
    result = _vapi("GET", "/assistant", api_key)
    if isinstance(result, list):
        return {"count": len(result), "assistants": result}
    return {"count": 0, "assistants": [], "raw": result}


def handle_vapi_get_assistant(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    aid = args.get("assistant_id", "")
    if not api_key or not aid:
        return {"error": "api_key and assistant_id required"}
    return _vapi("GET", f"/assistant/{aid}", api_key)


def handle_vapi_create_assistant(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    payload = {k: v for k, v in args.items()
               if k not in ("api_key",) and v not in (None, "")}
    if not payload.get("name"):
        return {"error": "name is required"}
    result = _vapi("POST", "/assistant", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "assistant": result, "assistant_id": result.get("id", "")}


def handle_vapi_update_assistant(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    aid = args.get("assistant_id", "")
    if not api_key or not aid:
        return {"error": "api_key and assistant_id required"}
    payload = {k: v for k, v in args.items()
               if k not in ("api_key", "assistant_id") and v not in (None, "")}
    result = _vapi("PATCH", f"/assistant/{aid}", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "assistant": result}


def handle_vapi_delete_assistant(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    aid = args.get("assistant_id", "")
    if not api_key or not aid:
        return {"error": "api_key and assistant_id required"}
    result = _vapi("DELETE", f"/assistant/{aid}", api_key)
    return {"ok": True, "deleted": True, "assistant_id": aid}


def handle_vapi_list_calls(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    params = {k: v for k, v in args.items()
              if k == "api_key" and v}
    for f in ["assistant_id", "status", "limit"]:
        if f in args and args[f] not in (None, ""):
            params[f] = args[f]
    result = _vapi("GET", "/call", api_key, params=params)
    if isinstance(result, list):
        return {"count": len(result), "calls": result}
    if "data" in result:
        return {"count": len(result["data"]), "calls": result["data"]}
    return {"count": 0, "calls": []}


def handle_vapi_get_call(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    cid = args.get("call_id", "")
    if not api_key or not cid:
        return {"error": "api_key and call_id required"}
    return _vapi("GET", f"/call/{cid}", api_key)


def handle_vapi_initiate_outbound_call(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    to_number = args.get("to_number", "")
    assistant_id = args.get("assistant_id", "")
    if not to_number:
        return {"error": "to_number (E.164) required"}
    if not assistant_id:
        return {"error": "assistant_id required"}
    payload = {"assistant_id": assistant_id, "to": to_number}
    for f in ["phone_number_id", "customer_caller_id", "noise_cancellation"]:
        if f in args and args[f] not in (None, ""):
            payload[f] = args[f]
    result = _vapi("POST", "/call", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "call": result, "call_id": result.get("id", ""), "status": result.get("status", "")}


def handle_vapi_list_phone_numbers(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    if not api_key:
        return {"error": "api_key required"}
    params = {"limit": args.get("limit", 20)}
    result = _vapi("GET", "/phone-number", api_key, params=params)
    if isinstance(result, list):
        return {"count": len(result), "phone_numbers": result}
    if "data" in result:
        return {"count": len(result["data"]), "phone_numbers": result["data"]}
    return {"count": 0, "phone_numbers": []}


def handle_vapi_provision_phone_number(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    phone = args.get("phone_number", "")
    if not api_key or not phone:
        return {"error": "api_key and phone_number (E.164) required"}
    payload = {"phone_number": phone}
    for f in ["assistant_id", "call_screening_enabled"]:
        if f in args and args[f] not in (None, ""):
            payload[f] = args[f]
    result = _vapi("POST", "/phone-number", api_key, payload)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "phone_number": result, "id": result.get("id", "")}


def handle_vapi_voice_ingest(args):
    lm_key = args.get("logicmem_api_key", "")
    vapi_key = args.get("vapi_api_key", "") or VAPI_API_KEY
    call_id = args.get("call_id", "")
    if not lm_key:
        return {"error": "logicmem_api_key required"}
    if not call_id:
        return {"error": "call_id required"}
    call_data = _vapi("GET", f"/call/{call_id}", vapi_key)
    if "error" in call_data and "status_code" in call_data:
        return call_data
    messages = call_data.get("messages", [])
    customer = call_data.get("customer", {}) or {}
    phone = customer.get("number", "") if isinstance(customer, dict) else str(customer)
    vapi_payload = {
        "type": "end-of-call-report",
        "call": {"id": call_id,
                 "started_at": call_data.get("started_at", ""),
                 "ended_at": call_data.get("ended_at", "")},
        "customer": {"number": phone},
        "assistant_id": call_data.get("assistant_id", ""),
        "messages": messages
    }
    voice_result = _voice_call("POST", "/v1/voice/ingest", lm_key, vapi_payload)
    if isinstance(voice_result, dict) and "error" in voice_result:
        return voice_result
    return {"ok": True, "utterances_stored": voice_result.get("utterances_stored", 0),
            "call_id": call_id, "caller": phone}


def handle_vapi_voice_recall(args):
    lm_key = args.get("logicmem_api_key", "")
    caller = args.get("caller_number", "")
    top_k = min(int(args.get("top_k", 5)), 20)
    if not lm_key or not caller:
        return {"error": "logicmem_api_key and caller_number required"}
    result = _voice_call("POST", "/v1/voice/recall", lm_key,
                         {"caller_number": caller, "top_k": top_k})
    if isinstance(result, dict) and "error" in result:
        return result
    return {
        "caller_number": caller,
        "utterances": result.get("utterances", []),
        "count": result.get("count", 0),
        "latency_ms": result.get("latency_ms", "?"),
        "cache_hit": result.get("cache_hit", False)
    }


def handle_vapi_get_call_transcript(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    call_id = args.get("call_id", "")
    if not api_key or not call_id:
        return {"error": "api_key and call_id required"}
    call_data = _vapi("GET", f"/call/{call_id}", api_key)
    if "error" in call_data and "status_code" in call_data:
        return call_data
    messages = call_data.get("messages", [])
    transcript = []
    for m in messages:
        transcript.append({
            "speaker": m.get("role", "unknown"),
            "content": m.get("content", ""),
            "start_ms": m.get("start_time", m.get("start_ms", 0)),
            "end_ms": m.get("end_time", m.get("end_ms", 0))
        })
    return {"call_id": call_id, "duration_ms": call_data.get("duration", 0),
            "transcript": transcript, "count": len(transcript)}


def handle_vapi_hangup_call(args):
    api_key = args.get("api_key", "") or VAPI_API_KEY
    call_id = args.get("call_id", "")
    if not api_key or not call_id:
        return {"error": "api_key and call_id required"}
    result = _vapi("POST", f"/call/{call_id}/hang-up", api_key)
    if "error" in result and "status_code" in result:
        return result
    return {"ok": True, "call_id": call_id, "status": "hung-up"}


if __name__ == "__main__":
    port = int(os.getenv("MCP_PORT", "8422"))
    zk_note = " ✅ ZK key set" if ZK_ENCRYPTION_KEY else "   local memory.key"
    print(f"LogicFrame MCP Server v2 starting on port {port}")
    print(f"Memory server: {MEMORY_SERVER} [{zk_note}]")
    print(f"Tools ({len(TOOLS)}):")
    for t in TOOLS:
        print(f"  - {t['name']}")
    server = HTTPServer(("0.0.0.0", port), MCPHandler)
    print(f"\nReady: curl http://localhost:{port}/health")
    server.serve_forever()
