"""
Claude Code Usage Dashboard — FastAPI Backend
Parses ~/.claude/ session data, serves stats via REST API.
Leaderboard: friends' dashboards auto-push stats here.
"""

import json
import os
from contextlib import asynccontextmanager
from datetime import datetime, timezone
from pathlib import Path

from typing import Optional

from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel

from database import get_db, init_db
from parser import full_parse, estimate_cost_for_tokens, PRICING

# Leaderboard config — friends set these env vars
PLAYER_NAME = os.environ.get("PLAYER_NAME", "")
LEADERBOARD_URL = os.environ.get("LEADERBOARD_URL", "")
LEADERBOARD_ONLY = os.environ.get("LEADERBOARD_ONLY", "").lower() in ("1", "true", "yes")


class PlayerStats(BaseModel):
    name: str
    total_prompts: int = 0
    total_api_calls: int = 0
    total_active_hours: float = 0
    total_input_tokens: int = 0
    total_output_tokens: int = 0
    total_cache_read: int = 0
    total_cache_write: int = 0
    total_lines_written: int = 0
    total_cost: float = 0
    total_sessions: int = 0
    total_projects: int = 0
    model_usage: dict = {}
    projects_data: list = []
    earliest_session: Optional[str] = None
    latest_session: Optional[str] = None


def _get_my_stats() -> dict:
    """Build stats payload for leaderboard submission."""
    conn = get_db()
    row = conn.execute("""
        SELECT COUNT(*) as total_sessions,
            COALESCE(SUM(human_prompts), 0) as total_prompts,
            COALESCE(SUM(api_calls + subagent_api_calls), 0) as total_api_calls,
            COALESCE(SUM(active_seconds), 0) as total_active_seconds,
            COALESCE(SUM(input_tokens), 0) as total_input_tokens,
            COALESCE(SUM(output_tokens), 0) as total_output_tokens,
            COALESCE(SUM(cache_read_tokens), 0) as total_cache_read,
            COALESCE(SUM(cache_write_tokens), 0) as total_cache_write,
            COALESCE(SUM(lines_written), 0) as total_lines_written,
            COALESCE(SUM(cost), 0) as total_cost,
            MIN(first_msg_at) as earliest_session,
            MAX(last_msg_at) as latest_session
        FROM sessions
    """).fetchone()
    total_projects = conn.execute("SELECT COUNT(*) as c FROM projects").fetchone()["c"]
    all_sessions = conn.execute("SELECT model_usage FROM sessions").fetchall()
    model_totals = {}
    for s in all_sessions:
        for m, c in json.loads(s["model_usage"]).items():
            model_totals[m] = model_totals.get(m, 0) + c

    # Per-project breakdown
    projects = conn.execute("""
        SELECT p.name, COUNT(s.id) as sessions,
            COALESCE(SUM(s.human_prompts), 0) as prompts,
            COALESCE(SUM(s.api_calls + s.subagent_api_calls), 0) as api_calls,
            COALESCE(SUM(s.active_seconds), 0) as active_seconds,
            COALESCE(SUM(s.input_tokens), 0) as input_tokens,
            COALESCE(SUM(s.output_tokens), 0) as output_tokens,
            COALESCE(SUM(s.cache_read_tokens), 0) as cache_read,
            COALESCE(SUM(s.cache_write_tokens), 0) as cache_write,
            COALESCE(SUM(s.lines_written), 0) as lines_written,
            COALESCE(SUM(s.cost), 0) as cost
        FROM projects p
        LEFT JOIN sessions s ON s.project_id = p.id
        GROUP BY p.id
        ORDER BY cost DESC
    """).fetchall()

    projects_data = [
        {
            "name": p["name"], "sessions": p["sessions"], "prompts": p["prompts"],
            "api_calls": p["api_calls"], "active_hours": round(p["active_seconds"] / 3600, 2),
            "input_tokens": p["input_tokens"], "output_tokens": p["output_tokens"],
            "cache_read": p["cache_read"], "cache_write": p["cache_write"],
            "lines_written": p["lines_written"], "cost": round(p["cost"], 2),
        }
        for p in projects
    ]

    conn.close()
    return {
        "total_prompts": row["total_prompts"],
        "total_api_calls": row["total_api_calls"],
        "total_active_hours": round(row["total_active_seconds"] / 3600, 2),
        "total_input_tokens": row["total_input_tokens"],
        "total_output_tokens": row["total_output_tokens"],
        "total_cache_read": row["total_cache_read"],
        "total_cache_write": row["total_cache_write"],
        "total_lines_written": row["total_lines_written"],
        "total_cost": round(row["total_cost"], 2),
        "total_sessions": row["total_sessions"],
        "total_projects": total_projects,
        "model_usage": model_totals,
        "projects_data": projects_data,
        "earliest_session": row["earliest_session"],
        "latest_session": row["latest_session"],
    }


def _push_to_leaderboard():
    """Push stats to the central leaderboard server."""
    if not LEADERBOARD_URL or not PLAYER_NAME:
        return
    try:
        import urllib.request
        stats = _get_my_stats()
        stats["name"] = PLAYER_NAME
        data = json.dumps(stats).encode()
        req = urllib.request.Request(
            f"{LEADERBOARD_URL.rstrip('/')}/api/leaderboard/submit",
            data=data,
            headers={"Content-Type": "application/json"},
            method="POST",
        )
        urllib.request.urlopen(req, timeout=10)
        print(f"Leaderboard push OK → {LEADERBOARD_URL} as '{PLAYER_NAME}'")
    except Exception as e:
        print(f"Leaderboard push failed: {e}")


def _upsert_player(conn, name, stats):
    """Insert or update a player in the leaderboard table."""
    conn.execute("""
        INSERT INTO players (name, total_prompts, total_api_calls, total_active_hours,
            total_input_tokens, total_output_tokens, total_cache_read, total_cache_write,
            total_lines_written, total_cost, total_sessions, total_projects,
            model_usage, projects_data, earliest_session, latest_session, last_seen)
        VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        ON CONFLICT(name) DO UPDATE SET
            total_prompts=excluded.total_prompts, total_api_calls=excluded.total_api_calls,
            total_active_hours=excluded.total_active_hours,
            total_input_tokens=excluded.total_input_tokens, total_output_tokens=excluded.total_output_tokens,
            total_cache_read=excluded.total_cache_read, total_cache_write=excluded.total_cache_write,
            total_lines_written=excluded.total_lines_written, total_cost=excluded.total_cost,
            total_sessions=excluded.total_sessions, total_projects=excluded.total_projects,
            model_usage=excluded.model_usage, projects_data=excluded.projects_data,
            earliest_session=excluded.earliest_session, latest_session=excluded.latest_session,
            last_seen=excluded.last_seen
    """, (
        name, stats["total_prompts"], stats["total_api_calls"], stats["total_active_hours"],
        stats["total_input_tokens"], stats["total_output_tokens"],
        stats["total_cache_read"], stats["total_cache_write"],
        stats["total_lines_written"], stats["total_cost"],
        stats["total_sessions"], stats["total_projects"],
        json.dumps(stats.get("model_usage", {})),
        json.dumps(stats.get("projects_data", [])),
        stats["earliest_session"], stats["latest_session"],
        datetime.now(timezone.utc).isoformat(),
    ))
    conn.commit()


def _submit_self_to_leaderboard():
    """Submit own stats to the local leaderboard table (if PLAYER_NAME is set)."""
    if not PLAYER_NAME:
        return
    stats = _get_my_stats()
    conn = get_db()
    _upsert_player(conn, PLAYER_NAME, stats)
    conn.commit()
    conn.close()


@asynccontextmanager
async def lifespan(app: FastAPI):
    init_db()

    if not LEADERBOARD_ONLY:
        conn = get_db()
        full_parse(conn)
        conn.close()
        _submit_self_to_leaderboard()
        _push_to_leaderboard()
    elif PLAYER_NAME:
        # Leaderboard-only mode still registers self if PLAYER_NAME is set
        _submit_self_to_leaderboard()

    import asyncio
    async def auto_refresh():
        while True:
            await asyncio.sleep(3600)
            try:
                if not LEADERBOARD_ONLY:
                    conn = get_db()
                    full_parse(conn)
                    conn.close()
                    _submit_self_to_leaderboard()
                    _push_to_leaderboard()
                    print("Hourly auto-refresh complete")
            except Exception as e:
                print(f"Auto-refresh failed: {e}")

    task = asyncio.create_task(auto_refresh())
    yield
    task.cancel()


app = FastAPI(title="Claude Dashboard", lifespan=lifespan)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)


from fastapi.responses import JSONResponse

def _blocked():
    return JSONResponse({"error": "not available"}, status_code=404)


@app.get("/api/overview")
def overview():
    if LEADERBOARD_ONLY:
        return _blocked()
    """Global stats across all projects."""
    conn = get_db()
    row = conn.execute("""
        SELECT
            COUNT(*) as total_sessions,
            COALESCE(SUM(human_prompts), 0) as total_prompts,
            COALESCE(SUM(api_calls + subagent_api_calls), 0) as total_api_calls,
            COALESCE(SUM(active_seconds), 0) as total_active_seconds,
            COALESCE(SUM(wall_seconds), 0) as total_wall_seconds,
            COALESCE(SUM(input_tokens), 0) as total_input_tokens,
            COALESCE(SUM(output_tokens), 0) as total_output_tokens,
            COALESCE(SUM(cache_read_tokens), 0) as total_cache_read,
            COALESCE(SUM(cache_write_tokens), 0) as total_cache_write,
            COALESCE(SUM(lines_written), 0) as total_lines_written,
            COALESCE(SUM(cost), 0) as total_cost,
            MIN(first_msg_at) as earliest_session,
            MAX(last_msg_at) as latest_session
        FROM sessions
    """).fetchone()

    projects = conn.execute("""
        SELECT p.id, p.name, COUNT(s.id) as session_count,
            COALESCE(SUM(s.human_prompts), 0) as prompts,
            COALESCE(SUM(s.api_calls + s.subagent_api_calls), 0) as api_calls,
            COALESCE(SUM(s.active_seconds), 0) as active_seconds,
            COALESCE(SUM(s.input_tokens), 0) as input_tokens,
            COALESCE(SUM(s.output_tokens), 0) as output_tokens,
            COALESCE(SUM(s.cache_read_tokens), 0) as cache_read,
            COALESCE(SUM(s.cache_write_tokens), 0) as cache_write,
            COALESCE(SUM(s.lines_written), 0) as lines_written,
            COALESCE(SUM(s.cost), 0) as cost
        FROM projects p
        LEFT JOIN sessions s ON s.project_id = p.id
        GROUP BY p.id
        ORDER BY active_seconds DESC
    """).fetchall()

    project_list = []
    for p in projects:
        project_list.append({
            "id": p["id"],
            "name": p["name"],
            "sessions": p["session_count"],
            "prompts": p["prompts"],
            "api_calls": p["api_calls"],
            "active_hours": round(p["active_seconds"] / 3600, 2),
            "input_tokens": p["input_tokens"],
            "output_tokens": p["output_tokens"],
            "cache_read": p["cache_read"],
            "cache_write": p["cache_write"],
            "lines_written": p["lines_written"],
            "cost": round(p["cost"], 2),
        })

    conn.close()
    return {
        "total_sessions": row["total_sessions"],
        "total_prompts": row["total_prompts"],
        "total_api_calls": row["total_api_calls"],
        "total_active_hours": round(row["total_active_seconds"] / 3600, 2),
        "total_wall_hours": round(row["total_wall_seconds"] / 3600, 2),
        "total_input_tokens": row["total_input_tokens"],
        "total_output_tokens": row["total_output_tokens"],
        "total_cache_read": row["total_cache_read"],
        "total_cache_write": row["total_cache_write"],
        "total_lines_written": row["total_lines_written"],
        "total_cost": round(row["total_cost"], 2),
        "earliest_session": row["earliest_session"],
        "latest_session": row["latest_session"],
        "projects": project_list,
    }


@app.get("/api/projects/{project_id}")
def project_detail(project_id: int):
    """Detailed stats for a single project."""
    if LEADERBOARD_ONLY:
        return _blocked()
    conn = get_db()
    proj = conn.execute("SELECT * FROM projects WHERE id = ?", (project_id,)).fetchone()
    if not proj:
        conn.close()
        return {"error": "Project not found"}

    sessions = conn.execute("""
        SELECT * FROM sessions WHERE project_id = ?
        ORDER BY first_msg_at DESC
    """, (project_id,)).fetchall()

    # Daily activity — compute cost per-row using prompts table (assistant rows have model)
    daily = conn.execute("""
        SELECT DATE(timestamp) as day,
            COUNT(CASE WHEN type='human' THEN 1 END) as prompts,
            COUNT(CASE WHEN type='assistant' THEN 1 END) as api_calls,
            SUM(input_tokens) as input_tokens,
            SUM(output_tokens) as output_tokens,
            SUM(cache_read_tokens) as cache_read,
            SUM(cache_write_tokens) as cache_write
        FROM prompts
        WHERE project_id = ?
        GROUP BY DATE(timestamp)
        ORDER BY day
    """, (project_id,)).fetchall()

    # Recent prompts
    recent = conn.execute("""
        SELECT timestamp, type, model, content_preview,
            input_tokens, output_tokens, cache_read_tokens, cache_write_tokens
        FROM prompts
        WHERE project_id = ? AND type = 'human'
        ORDER BY timestamp DESC
        LIMIT 50
    """, (project_id,)).fetchall()

    session_list = []
    for s in sessions:
        session_list.append({
            "id": s["id"],
            "first_msg_at": s["first_msg_at"],
            "last_msg_at": s["last_msg_at"],
            "wall_hours": round(s["wall_seconds"] / 3600, 2),
            "active_hours": round(s["active_seconds"] / 3600, 2),
            "human_prompts": s["human_prompts"],
            "api_calls": s["api_calls"],
            "subagent_api_calls": s["subagent_api_calls"],
            "input_tokens": s["input_tokens"],
            "output_tokens": s["output_tokens"],
            "cache_read": s["cache_read_tokens"],
            "cache_write": s["cache_write_tokens"],
            "lines_written": s["lines_written"] if "lines_written" in s.keys() else 0,
            "model_usage": json.loads(s["model_usage"]),
            "cost": round(s["cost"], 2),
        })

    daily_list = []
    for d in daily:
        # Daily cost: sum from per-prompt assistant rows for that day
        day_cost_row = conn.execute("""
            SELECT SUM(
                CASE
                    WHEN model LIKE '%haiku%' THEN
                        input_tokens * ? + output_tokens * ? +
                        cache_read_tokens * ? + cache_write_tokens * ?
                    WHEN model LIKE '%sonnet%' THEN
                        input_tokens * ? + output_tokens * ? +
                        cache_read_tokens * ? + cache_write_tokens * ?
                    ELSE
                        input_tokens * ? + output_tokens * ? +
                        cache_read_tokens * ? + cache_write_tokens * ?
                END
            ) as cost
            FROM prompts
            WHERE project_id = ? AND DATE(timestamp) = ? AND type = 'assistant'
        """, (
            PRICING["haiku"]["input"], PRICING["haiku"]["output"],
            PRICING["haiku"]["cache_read"], PRICING["haiku"]["cache_write"],
            PRICING["sonnet"]["input"], PRICING["sonnet"]["output"],
            PRICING["sonnet"]["cache_read"], PRICING["sonnet"]["cache_write"],
            PRICING["opus"]["input"], PRICING["opus"]["output"],
            PRICING["opus"]["cache_read"], PRICING["opus"]["cache_write"],
            project_id, d["day"],
        )).fetchone()
        daily_list.append({
            "day": d["day"],
            "prompts": d["prompts"],
            "api_calls": d["api_calls"],
            "input_tokens": d["input_tokens"] or 0,
            "output_tokens": d["output_tokens"] or 0,
            "cost": round(day_cost_row["cost"] or 0, 2),
        })

    conn.close()
    return {
        "project": {"id": proj["id"], "name": proj["name"]},
        "sessions": session_list,
        "daily_activity": daily_list,
        "recent_prompts": [dict(r) for r in recent],
    }


@app.get("/api/vibe-stats/{project_name}")
def vibe_stats(project_name: str):
    """Pre-formatted vibe code stats for embedding in project pages."""
    if LEADERBOARD_ONLY:
        return _blocked()
    conn = get_db()
    proj = conn.execute("SELECT * FROM projects WHERE name = ?", (project_name,)).fetchone()
    if not proj:
        conn.close()
        return {"error": f"Project not found: {project_name}"}

    sessions = conn.execute("""
        SELECT * FROM sessions WHERE project_id = ? ORDER BY first_msg_at ASC
    """, (proj["id"],)).fetchall()

    total_prompts = sum(s["human_prompts"] for s in sessions)
    total_api = sum(s["api_calls"] + s["subagent_api_calls"] for s in sessions)
    total_active = sum(s["active_seconds"] for s in sessions)
    total_input = sum(s["input_tokens"] for s in sessions)
    total_output = sum(s["output_tokens"] for s in sessions)
    total_cache_read = sum(s["cache_read_tokens"] for s in sessions)
    total_cache_write = sum(s["cache_write_tokens"] for s in sessions)
    total_lines = sum((s["lines_written"] if "lines_written" in s.keys() else 0) for s in sessions)
    total_cost = sum(s["cost"] for s in sessions)

    # Cost breakdown using per-model pricing from prompts table
    breakdown_rows = conn.execute("""
        SELECT
            SUM(CASE
                WHEN model LIKE '%haiku%' THEN cache_read_tokens * ?
                WHEN model LIKE '%sonnet%' THEN cache_read_tokens * ?
                ELSE cache_read_tokens * ?
            END) as cache_read_cost,
            SUM(CASE
                WHEN model LIKE '%haiku%' THEN cache_write_tokens * ?
                WHEN model LIKE '%sonnet%' THEN cache_write_tokens * ?
                ELSE cache_write_tokens * ?
            END) as cache_write_cost,
            SUM(CASE
                WHEN model LIKE '%haiku%' THEN output_tokens * ?
                WHEN model LIKE '%sonnet%' THEN output_tokens * ?
                ELSE output_tokens * ?
            END) as output_cost,
            SUM(CASE
                WHEN model LIKE '%haiku%' THEN input_tokens * ?
                WHEN model LIKE '%sonnet%' THEN input_tokens * ?
                ELSE input_tokens * ?
            END) as input_cost
        FROM prompts
        WHERE project_id = ? AND type = 'assistant'
    """, (
        PRICING["haiku"]["cache_read"], PRICING["sonnet"]["cache_read"], PRICING["opus"]["cache_read"],
        PRICING["haiku"]["cache_write"], PRICING["sonnet"]["cache_write"], PRICING["opus"]["cache_write"],
        PRICING["haiku"]["output"], PRICING["sonnet"]["output"], PRICING["opus"]["output"],
        PRICING["haiku"]["input"], PRICING["sonnet"]["input"], PRICING["opus"]["input"],
        proj["id"],
    )).fetchone()

    cost_breakdown = [
        {"label": "Cache reads", "tokens": total_cache_read,
         "cost": round(breakdown_rows["cache_read_cost"] or 0)},
        {"label": "Cache writes", "tokens": total_cache_write,
         "cost": round(breakdown_rows["cache_write_cost"] or 0)},
        {"label": "Output", "tokens": total_output,
         "cost": round(breakdown_rows["output_cost"] or 0)},
        {"label": "Input", "tokens": total_input,
         "cost": round(breakdown_rows["input_cost"] or 0)},
    ]
    max_cost = max((c["cost"] for c in cost_breakdown), default=1)
    for c in cost_breakdown:
        c["pct"] = round(c["cost"] / max_cost * 100) if max_cost else 0

    session_list = []
    for s in sessions:
        session_list.append({
            "first_msg_at": s["first_msg_at"],
            "last_msg_at": s["last_msg_at"],
            "active_hours": round(s["active_seconds"] / 3600, 2),
            "prompts": s["human_prompts"],
            "api_calls": s["api_calls"] + s["subagent_api_calls"],
            "cost": round(s["cost"], 2),
        })

    conn.close()
    return {
        "project": project_name,
        "prompts": total_prompts,
        "api_calls": total_api,
        "active_hours": round(total_active / 3600, 1),
        "total_tokens": total_input + total_output,
        "input_tokens": total_input,
        "output_tokens": total_output,
        "cache_read": total_cache_read,
        "cache_write": total_cache_write,
        "lines_written": total_lines,
        "cost": round(total_cost),
        "cost_per_prompt": round(total_cost / total_prompts, 2) if total_prompts else 0,
        "cost_per_line": round(total_cost / total_lines, 4) if total_lines else 0,
        "sessions": len(sessions),
        "cost_breakdown": cost_breakdown,
        "session_list": session_list,
        "last_updated": sessions[-1]["last_msg_at"] if sessions else None,
    }


@app.get("/api/my-stats")
def my_stats(name: str = "Anonymous"):
    """Export stats for leaderboard sharing."""
    if LEADERBOARD_ONLY:
        return _blocked()
    conn = get_db()
    import socket
    from datetime import datetime, timezone

    row = conn.execute("""
        SELECT
            COUNT(*) as total_sessions,
            COALESCE(SUM(human_prompts), 0) as total_prompts,
            COALESCE(SUM(api_calls + subagent_api_calls), 0) as total_api_calls,
            COALESCE(SUM(active_seconds), 0) as total_active_seconds,
            COALESCE(SUM(input_tokens), 0) as total_input_tokens,
            COALESCE(SUM(output_tokens), 0) as total_output_tokens,
            COALESCE(SUM(cache_read_tokens), 0) as total_cache_read,
            COALESCE(SUM(cache_write_tokens), 0) as total_cache_write,
            COALESCE(SUM(lines_written), 0) as total_lines_written,
            COALESCE(SUM(cost), 0) as total_cost,
            MIN(first_msg_at) as earliest_session,
            MAX(last_msg_at) as latest_session
        FROM sessions
    """).fetchone()

    total_projects = conn.execute("SELECT COUNT(*) as c FROM projects").fetchone()["c"]

    # Aggregate model usage across all sessions
    all_sessions = conn.execute("SELECT model_usage FROM sessions").fetchall()
    model_totals = {}
    for s in all_sessions:
        import json as _json
        for m, c in _json.loads(s["model_usage"]).items():
            model_totals[m] = model_totals.get(m, 0) + c

    conn.close()
    return {
        "name": name,
        "total_prompts": row["total_prompts"],
        "total_api_calls": row["total_api_calls"],
        "total_active_hours": round(row["total_active_seconds"] / 3600, 2),
        "total_input_tokens": row["total_input_tokens"],
        "total_output_tokens": row["total_output_tokens"],
        "total_cache_read": row["total_cache_read"],
        "total_cache_write": row["total_cache_write"],
        "total_lines_written": row["total_lines_written"],
        "total_cost": round(row["total_cost"], 2),
        "total_sessions": row["total_sessions"],
        "total_projects": total_projects,
        "model_usage": model_totals,
        "earliest_session": row["earliest_session"],
        "latest_session": row["latest_session"],
        "exported_at": datetime.now(timezone.utc).isoformat(),
    }


@app.post("/api/leaderboard/submit")
def leaderboard_submit(stats: PlayerStats):
    """Accept stats from a friend's dashboard."""
    conn = get_db()
    _upsert_player(conn, stats.name, stats.model_dump())
    conn.close()
    return {"status": "ok", "player": stats.name}


@app.get("/api/leaderboard")
def leaderboard():
    """Return all players for the leaderboard."""
    conn = get_db()
    rows = conn.execute("""
        SELECT * FROM players ORDER BY total_cost DESC
    """).fetchall()
    conn.close()
    return [
        {
            "name": r["name"],
            "total_prompts": r["total_prompts"],
            "total_api_calls": r["total_api_calls"],
            "total_active_hours": r["total_active_hours"],
            "total_input_tokens": r["total_input_tokens"],
            "total_output_tokens": r["total_output_tokens"],
            "total_cache_read": r["total_cache_read"],
            "total_cache_write": r["total_cache_write"],
            "total_lines_written": r["total_lines_written"],
            "total_cost": round(r["total_cost"], 2),
            "total_sessions": r["total_sessions"],
            "total_projects": r["total_projects"],
            "model_usage": json.loads(r["model_usage"]),
            "last_seen": r["last_seen"],
        }
        for r in rows
    ]


@app.get("/api/leaderboard/{player_name}")
def leaderboard_player(player_name: str):
    """Return a player's per-project breakdown. Only available on personal dashboard."""
    if LEADERBOARD_ONLY:
        return _blocked()
    conn = get_db()
    row = conn.execute("SELECT * FROM players WHERE name = ?", (player_name,)).fetchone()
    conn.close()
    if not row:
        return JSONResponse({"error": "Player not found"}, status_code=404)
    return {
        "name": row["name"],
        "projects": json.loads(row["projects_data"] or "[]"),
        "model_usage": json.loads(row["model_usage"]),
        "last_seen": row["last_seen"],
    }


@app.post("/api/refresh")
def refresh():
    """Re-parse all session files (incremental — skips unchanged files)."""
    if LEADERBOARD_ONLY:
        return _blocked()
    conn = get_db()
    result = full_parse(conn)
    conn.close()
    _submit_self_to_leaderboard()
    _push_to_leaderboard()
    return result


@app.get("/api/timeline")
def timeline():
    """All prompts across all projects, for timeline view."""
    if LEADERBOARD_ONLY:
        return _blocked()
    conn = get_db()
    rows = conn.execute("""
        SELECT p.timestamp, p.type, p.model, p.content_preview,
            p.input_tokens, p.output_tokens, p.cache_read_tokens, p.cache_write_tokens,
            pr.name as project_name
        FROM prompts p
        JOIN projects pr ON pr.id = p.project_id
        WHERE p.type = 'human'
        ORDER BY p.timestamp DESC
        LIMIT 200
    """).fetchall()
    conn.close()
    return [dict(r) for r in rows]


# Serve frontend static files
if LEADERBOARD_ONLY:
    # Leaderboard-only mode: serve the standalone leaderboard page
    leaderboard_dir = Path(__file__).parent.parent / "leaderboard"
    if leaderboard_dir.exists():
        app.mount("/", StaticFiles(directory=str(leaderboard_dir), html=True), name="leaderboard")
else:
    # Full dashboard mode: serve the Vue app build
    frontend_dist = Path(__file__).parent.parent / "frontend" / "dist"
    if frontend_dist.exists():
        app.mount("/", StaticFiles(directory=str(frontend_dist), html=True), name="frontend")


if __name__ == "__main__":
    import uvicorn
    port = int(os.environ.get("PORT", 3400))
    uvicorn.run(app, host="0.0.0.0", port=port)
