diff --git a/backend/api/core/cit_utils.py b/backend/api/core/cit_utils.py index 1e33ce13..5e0461ae 100644 --- a/backend/api/core/cit_utils.py +++ b/backend/api/core/cit_utils.py @@ -14,6 +14,7 @@ from fastapi.concurrency import run_in_threadpool from .config import settings +from ..services.cit_db_service import cits_dp_service def _is_postgres_configured() -> bool: @@ -94,5 +95,15 @@ async def load_sr_and_check( if not screening: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No screening database configured for this systematic review") + # Best-effort runtime schema evolution for agentic screening. + # CAN-SR uses per-upload screening tables, so we may need to add the + # validation columns to the specific table referenced by the SR. + try: + table_name = (screening or {}).get("table_name") or "citations" + await run_in_threadpool(cits_dp_service.ensure_step_validation_columns, table_name) + except Exception: + # Don't block requests if the DB isn't ready/configured. + pass + return sr, screening diff --git a/backend/api/screen/agentic_utils.py b/backend/api/screen/agentic_utils.py new file mode 100644 index 00000000..1250287b --- /dev/null +++ b/backend/api/screen/agentic_utils.py @@ -0,0 +1,95 @@ +"""backend.api.screen.agentic_utils + +Utilities for the GREP-Agent style "screening + critical" workflow. + +We keep this module small and dependency-free so routers can reuse the helpers +for title/abstract and fulltext pipelines. +""" + +from __future__ import annotations + +import re +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class ParsedAgentXML: + answer: str + confidence: float + rationale: str + parse_ok: bool + + +_TAG_RE_CACHE: dict[str, re.Pattern[str]] = {} + + +def _tag_re(tag: str) -> re.Pattern[str]: + if tag not in _TAG_RE_CACHE: + _TAG_RE_CACHE[tag] = re.compile(rf"<{tag}>(.*?)", re.IGNORECASE | re.DOTALL) + return _TAG_RE_CACHE[tag] + + +def parse_agent_xml(text: str) -> ParsedAgentXML: + """Parse , , tags from model output.""" + + raw = (text or "").strip() + ans_m = _tag_re("answer").search(raw) + conf_m = _tag_re("confidence").search(raw) + rat_m = _tag_re("rationale").search(raw) + + answer = (ans_m.group(1).strip() if ans_m else "") + rationale = (rat_m.group(1).strip() if rat_m else "") + + conf_val = 0.0 + if conf_m: + try: + conf_val = float(conf_m.group(1).strip()) + except Exception: + conf_val = 0.0 + conf_val = max(0.0, min(1.0, conf_val)) + + parse_ok = bool(ans_m and conf_m) + return ParsedAgentXML(answer=answer, confidence=conf_val, rationale=rationale, parse_ok=parse_ok) + + +def resolve_option(raw_answer: str, options: list[str]) -> str: + """Resolve a model answer to one of the provided options (best-effort).""" + ans = (raw_answer or "").strip() + if not ans: + return ans + + # Exact match first + for opt in options or []: + if ans == opt: + return opt + + # Case-insensitive exact + ans_l = ans.lower() + for opt in options or []: + if ans_l == (opt or "").lower(): + return opt + + # Substring containment (mirrors existing CAN-SR JSON screening logic) + for opt in options or []: + if (opt or "").lower() in ans_l: + return opt + + return ans + + +def build_critical_options(*, all_options: list[str], screening_answer: str) -> list[str]: + """Forced alternatives: (all_options - {screening_answer}) + ["None of the above"].""" + base = [o for o in (all_options or []) if (o or "").strip()] + sa = (screening_answer or "").strip() + if sa: + base = [o for o in base if o.strip() != sa] + base.append("None of the above") + # stable unique + seen = set() + out = [] + for o in base: + if o not in seen: + seen.add(o) + out.append(o) + return out diff --git a/backend/api/screen/prompts.py b/backend/api/screen/prompts.py index ba7ac9d5..97861767 100644 --- a/backend/api/screen/prompts.py +++ b/backend/api/screen/prompts.py @@ -72,4 +72,149 @@ - Use sentence indices from the numbered full text for "evidence_sentences" - Use table numbers from the Tables section for "evidence_tables" - Use figure numbers from the Figures section for "evidence_figures" +""" + + +# --------------------------------------------------------------------------- +# Agentic screening (GREP-Agent style) prompt contracts +# --------------------------------------------------------------------------- + +# NOTE: +# CAN-SR historically used JSON output for screening. The agentic plan expects +# XML-tag parsing (, , ) so we can reuse a stable +# parsing contract across screening + critical steps. + +PROMPT_XML_TEMPLATE_TA = """ +You are a highly critical, helpful scientific evaluator completing an academic review. + +Task: +Answer the question "{question}" for the following citation. + +Citation: +{cit} + +Choose EXACTLY ONE of these options (exact text): +{options} + +Additional guidance: +{xtra} + +Output requirement: +Return ONLY the following XML tags (no Markdown, no extra prose): +... +... +... + +Confidence requirements: +- confidence is a float between 0 and 1 +- be conservative; do not overestimate confidence +""" + + +PROMPT_XML_TEMPLATE_TA_CRITICAL = """ +You are a critical reviewer double-checking another model's screening answer. + +Original question: +"{question}" + +Citation: +{cit} + +The first model answered: +"{screening_answer}" + +Now, you MUST choose from the following forced alternatives. +Rules: +- You are NOT allowed to choose the original answer. +- If you agree with the original answer, choose "None of the above". + +Forced alternatives (choose exactly one; exact text): +{options} + +Additional guidance: +{xtra} + +Output requirement: +Return ONLY the following XML tags (no Markdown, no extra prose): +... +... +... + +Confidence requirements: +- confidence is a float between 0 and 1 +- be conservative; do not overestimate confidence +""" + + +PROMPT_XML_TEMPLATE_FULLTEXT = """ +You are assisting with a scientific full-text screening task. + +Task: +Evaluate the question "{question}" against the paper content provided as numbered sentences (e.g., "[0] ...", "[1] ..."). + +Choose EXACTLY ONE of these options (exact text): +{options} + +Additional guidance: +{xtra} + +Full text (numbered sentences): +{fulltext} + +Tables (numbered): +{tables} + +Figures (numbered; captions correspond to images provided alongside this message): +{figures} + +Output requirement: +Return ONLY the following XML tags (no Markdown, no extra prose): +... +... +... + +Confidence requirements: +- confidence is a float between 0 and 1 +- be conservative; do not overestimate confidence +""" + + +PROMPT_XML_TEMPLATE_FULLTEXT_CRITICAL = """ +You are a critical reviewer double-checking another model's full-text screening answer. + +Original question: +"{question}" + +The first model answered: +"{screening_answer}" + +Now, you MUST choose from the following forced alternatives. +Rules: +- You are NOT allowed to choose the original answer. +- If you agree with the original answer, choose "None of the above". + +Forced alternatives (choose exactly one; exact text): +{options} + +Additional guidance: +{xtra} + +Full text (numbered sentences): +{fulltext} + +Tables (numbered): +{tables} + +Figures (numbered; captions correspond to images provided alongside this message): +{figures} + +Output requirement: +Return ONLY the following XML tags (no Markdown, no extra prose): +... +... +... + +Confidence requirements: +- confidence is a float between 0 and 1 +- be conservative; do not overestimate confidence """ \ No newline at end of file diff --git a/backend/api/screen/router.py b/backend/api/screen/router.py index 10174b82..400ea9b0 100644 --- a/backend/api/screen/router.py +++ b/backend/api/screen/router.py @@ -19,13 +19,28 @@ # Import consolidated Postgres helpers if available (optional) from ..services.cit_db_service import cits_dp_service, snake_case_column, snake_case -from .prompts import PROMPT_JSON_TEMPLATE, PROMPT_JSON_TEMPLATE_FULLTEXT +from .prompts import ( + PROMPT_JSON_TEMPLATE, + PROMPT_JSON_TEMPLATE_FULLTEXT, + PROMPT_XML_TEMPLATE_TA, + PROMPT_XML_TEMPLATE_TA_CRITICAL, + PROMPT_XML_TEMPLATE_FULLTEXT, + PROMPT_XML_TEMPLATE_FULLTEXT_CRITICAL, +) +from .agentic_utils import build_critical_options, parse_agent_xml, resolve_option logger = logging.getLogger(__name__) router = APIRouter() +class AgentRunsQueryResponse(BaseModel): + sr_id: str + pipeline: str + citation_ids: List[int] + runs: List[Dict[str, Any]] + + def _normalize_int_list(v: Any) -> List[int]: if v is None: return [] @@ -85,6 +100,31 @@ class HumanClassifyRequest(BaseModel): explanation: Optional[str] = Field("", description="Optional free-text explanation from the human reviewer") confidence: Optional[float] = Field(None, ge=0.0, le=1.0, description="Optional confidence (0.0 - 1.0)") reviewer: Optional[str] = Field(None, description="Optional reviewer id or name") + + +class TitleAbstractRunRequest(BaseModel): + sr_id: str = Field(..., description="Systematic review id") + citation_id: int = Field(..., ge=1, description="Citation id (row id in the SR screening table)") + model: Optional[str] = Field(None, description="Model key/deployment to use") + temperature: float = Field(0.0, ge=0.0, le=1.0) + max_tokens: int = Field(1200, ge=64, le=4000) + prompt_version: Optional[str] = Field("v1", description="Prompt version tag for auditing") + + +class ValidateStepRequest(BaseModel): + sr_id: str = Field(..., description="Systematic review id") + citation_id: int = Field(..., ge=1, description="Citation id (row id in the SR screening table)") + step: str = Field("l1", description="Validation step: l1|l2|parameters") + + +class FulltextRunRequest(BaseModel): + sr_id: str = Field(..., description="Systematic review id") + citation_id: int = Field(..., ge=1, description="Citation id (row id in the SR screening table)") + model: Optional[str] = Field(None, description="Model key/deployment to use") + temperature: float = Field(0.0, ge=0.0, le=1.0) + max_tokens: int = Field(2000, ge=64, le=4000) + prompt_version: Optional[str] = Field("v1", description="Prompt version tag for auditing") + # _update_sync moved to backend.api.core.postgres.update_jsonb_column # Use run_in_threadpool(update_jsonb_column, ...) where needed. @@ -397,6 +437,630 @@ async def human_classify_citation( return {"status": "success", "sr_id": sr_id, "citation_id": citation_id, "column": col_name, "classification": classification_json} + +@router.post("/title-abstract/run") +async def run_title_abstract_agentic( + payload: TitleAbstractRunRequest, + current_user: Dict[str, Any] = Depends(get_current_active_user), +): + """Run orchestrated Title/Abstract screening + critical for one citation. + + Implements Phase 1 MVP endpoint from planning/agentic_implementation_plan. + """ + + sr_id = str(payload.sr_id) + citation_id = int(payload.citation_id) + + try: + sr, screening = await load_sr_and_check(sr_id, current_user, srdb_service) + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Failed to load systematic review or screening: {e}", + ) + + table_name = (screening or {}).get("table_name") or "citations" + + # Ensure LLM client is available + if not azure_openai_client.is_configured(): + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Azure OpenAI client is not configured on the server", + ) + + # Load citation row + try: + row = await run_in_threadpool(cits_dp_service.get_citation_by_id, citation_id, table_name) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to query screening DB: {e}") + + if not row: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Citation not found") + + # Build combined citation text (use SR include columns or fallback to title+abstract) + include_cols = [] + try: + include_cols = cits_dp_service.load_include_columns_from_criteria(sr) or [] + except Exception: + include_cols = [] + if not include_cols: + include_cols = ["title", "abstract"] + + citation_text = citations_router._build_combined_citation_from_row(row, include_cols) + + # Load L1 criteria + cp = sr.get("criteria_parsed") or sr.get("criteria") or {} + l1 = cp.get("l1") if isinstance(cp, dict) else None + questions = (l1 or {}).get("questions") if isinstance(l1, dict) else [] + possible = (l1 or {}).get("possible_answers") if isinstance(l1, dict) else [] + addinfos = (l1 or {}).get("additional_infos") if isinstance(l1, dict) else [] + questions = questions if isinstance(questions, list) else [] + possible = possible if isinstance(possible, list) else [] + addinfos = addinfos if isinstance(addinfos, list) else [] + + if not questions: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="SR has no L1 criteria questions configured") + + async def _call_llm(prompt: str) -> Tuple[str, Dict[str, Any], int]: + """Return (content, usage, latency_ms).""" + import time + + t0 = time.time() + messages = [{"role": "user", "content": prompt}] + resp = await azure_openai_client.chat_completion( + messages=messages, + model=payload.model, + max_tokens=payload.max_tokens, + temperature=payload.temperature, + stream=False, + ) + latency_ms = int((time.time() - t0) * 1000) + content = ((resp.get("choices") or [{}])[0].get("message") or {}).get("content") or "" + usage = resp.get("usage") or {} + return str(content), dict(usage), latency_ms + + results: List[Dict[str, Any]] = [] + user_email = str(current_user.get("email") or current_user.get("id") or "") + + for i, q in enumerate(questions): + if not isinstance(q, str) or not q.strip(): + continue + + opts = possible[i] if i < len(possible) and isinstance(possible[i], list) else [] + opts = [str(o) for o in opts if o is not None and str(o).strip()] + xtra = addinfos[i] if i < len(addinfos) and isinstance(addinfos[i], str) else "" + + if not opts: + # still return shape to UI + results.append( + { + "question": q, + "criterion_key": snake_case(q, max_len=56), + "error": "No options configured", + } + ) + continue + + options_listed = "\n".join(opts) + criterion_key = snake_case(q, max_len=56) + + # 1) screening + screening_prompt = PROMPT_XML_TEMPLATE_TA.format( + question=q, + cit=citation_text, + options=options_listed, + xtra=xtra or "", + ) + screening_raw, screening_usage, screening_latency = await _call_llm(screening_prompt) + screening_parsed = parse_agent_xml(screening_raw) + screening_answer = resolve_option(screening_parsed.answer, opts) + + try: + screening_run_id = await run_in_threadpool( + cits_dp_service.insert_screening_agent_run, + { + "sr_id": sr_id, + "table_name": table_name, + "citation_id": citation_id, + "pipeline": "title_abstract", + "criterion_key": criterion_key, + "stage": "screening", + "answer": screening_answer, + "confidence": screening_parsed.confidence, + "rationale": screening_parsed.rationale, + "raw_response": screening_raw, + "model": payload.model, + "prompt_version": payload.prompt_version, + "temperature": payload.temperature, + "latency_ms": screening_latency, + "input_tokens": screening_usage.get("prompt_tokens"), + "output_tokens": screening_usage.get("completion_tokens"), + }, + ) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to persist screening run: {e}") + + # 2) critical + critical_opts = build_critical_options(all_options=opts, screening_answer=screening_answer) + critical_listed = "\n".join(critical_opts) + critical_prompt = PROMPT_XML_TEMPLATE_TA_CRITICAL.format( + question=q, + cit=citation_text, + screening_answer=screening_answer, + options=critical_listed, + xtra=xtra or "", + ) + critical_raw, critical_usage, critical_latency = await _call_llm(critical_prompt) + critical_parsed = parse_agent_xml(critical_raw) + critical_answer = resolve_option(critical_parsed.answer, critical_opts) + + disagrees = str(critical_answer).strip() != "None of the above" + + try: + critical_run_id = await run_in_threadpool( + cits_dp_service.insert_screening_agent_run, + { + "sr_id": sr_id, + "table_name": table_name, + "citation_id": citation_id, + "pipeline": "title_abstract", + "criterion_key": criterion_key, + "stage": "critical", + "answer": critical_answer, + "confidence": critical_parsed.confidence, + "rationale": critical_parsed.rationale, + "raw_response": critical_raw, + "model": payload.model, + "prompt_version": payload.prompt_version, + "temperature": payload.temperature, + "latency_ms": critical_latency, + "input_tokens": critical_usage.get("prompt_tokens"), + "output_tokens": critical_usage.get("completion_tokens"), + }, + ) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to persist critical run: {e}") + + results.append( + { + "question": q, + "criterion_key": criterion_key, + "screening": { + "run_id": screening_run_id, + "answer": screening_answer, + "confidence": screening_parsed.confidence, + "rationale": screening_parsed.rationale, + "parse_ok": screening_parsed.parse_ok, + }, + "critical": { + "run_id": critical_run_id, + "answer": critical_answer, + "confidence": critical_parsed.confidence, + "rationale": critical_parsed.rationale, + "parse_ok": critical_parsed.parse_ok, + "disagrees": disagrees, + }, + } + ) + + return { + "status": "success", + "sr_id": sr_id, + "citation_id": citation_id, + "pipeline": "title_abstract", + "criteria": results, + } + + +@router.post("/validate") +async def validate_screening_step( + payload: ValidateStepRequest, + current_user: Dict[str, Any] = Depends(get_current_active_user), +): + """Mark a citation as validated for a given step. + + Phase 1 MVP uses step=l1 (Title/Abstract). This endpoint is written to be + forward-compatible with l2/parameters. + """ + + sr_id = str(payload.sr_id) + citation_id = int(payload.citation_id) + step = (payload.step or "l1").lower().strip() + + if step not in {"l1", "l2", "parameters"}: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="step must be one of: l1, l2, parameters") + + try: + _sr, screening = await load_sr_and_check(sr_id, current_user, srdb_service) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to load SR: {e}") + + table_name = (screening or {}).get("table_name") or "citations" + + validated_by_col = f"{step}_validated_by" + validated_at_col = f"{step}_validated_at" + validated_by = str(current_user.get("email") or current_user.get("id") or "") + now_iso = datetime.utcnow().isoformat() + "Z" + + try: + # Ensure columns exist (best-effort; no-migrations philosophy) + await run_in_threadpool(cits_dp_service.create_column, validated_by_col, "TEXT", table_name) + await run_in_threadpool(cits_dp_service.create_column, validated_at_col, "TIMESTAMPTZ", table_name) + + u1 = await run_in_threadpool(cits_dp_service.update_text_column, citation_id, validated_by_col, validated_by, table_name) + u2 = await run_in_threadpool(cits_dp_service.update_text_column, citation_id, validated_at_col, now_iso, table_name) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to update validation fields: {e}") + + if not (u1 and u2): + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Citation not found to update") + + return { + "status": "success", + "sr_id": sr_id, + "citation_id": citation_id, + "step": step, + "validated_by": validated_by, + "validated_at": now_iso, + } + + +@router.post("/fulltext/run") +async def run_fulltext_agentic( + payload: FulltextRunRequest, + current_user: Dict[str, Any] = Depends(get_current_active_user), +): + """Run orchestrated Fulltext screening + critical for one citation (L2).""" + + sr_id = str(payload.sr_id) + citation_id = int(payload.citation_id) + + try: + sr, screening = await load_sr_and_check(sr_id, current_user, srdb_service) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to load SR: {e}") + + table_name = (screening or {}).get("table_name") or "citations" + + if not azure_openai_client.is_configured(): + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Azure OpenAI client is not configured on the server", + ) + + # Load citation row + try: + row = await run_in_threadpool(cits_dp_service.get_citation_by_id, citation_id, table_name) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to query screening DB: {e}") + + if not row: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Citation not found") + + # Ensure fulltext exists (CAN-SR source of truth: extracted DI/Grobid artifacts) + if not row.get("fulltext"): + # We don't have a direct SR id in the extract endpoint signature; it expects sr_id. + # We'll try best-effort to trigger extraction if fulltext_url exists. + try: + from ..extract.router import extract_fulltext_from_storage + + await extract_fulltext_from_storage(sr_id, citation_id, current_user=current_user) # type: ignore + except Exception: + pass + + row = await run_in_threadpool(cits_dp_service.get_citation_by_id, citation_id, table_name) + + include_cols = [] + try: + include_cols = cits_dp_service.load_include_columns_from_criteria(sr) or [] + except Exception: + include_cols = [] + if not include_cols: + include_cols = ["title", "abstract"] + + citation_text = citations_router._build_combined_citation_from_row(row or {}, include_cols) + fulltext = (row or {}).get("fulltext") or citation_text + + # Tables/Figures context from row + tables_md_lines: List[str] = [] + figures_lines: List[str] = [] + images: List[Tuple[bytes, str]] = [] + + ft_tables = (row or {}).get("fulltext_tables") + if isinstance(ft_tables, str): + try: + ft_tables = json.loads(ft_tables) + except Exception: + ft_tables = None + if isinstance(ft_tables, list): + for item in ft_tables: + if not isinstance(item, dict): + continue + idx = item.get("index") + blob_addr = item.get("blob_address") + caption = item.get("caption") + if not idx or not blob_addr: + continue + try: + md_bytes, _ = await storage_service.get_bytes_by_path(blob_addr) + md_txt = md_bytes.decode("utf-8", errors="replace") + header = f"Table [T{idx}]" + (f" caption: {caption}" if caption else "") + tables_md_lines.extend([header, md_txt, ""]) + except Exception: + continue + + ft_figs = (row or {}).get("fulltext_figures") + if isinstance(ft_figs, str): + try: + ft_figs = json.loads(ft_figs) + except Exception: + ft_figs = None + if isinstance(ft_figs, list): + for item in ft_figs: + if not isinstance(item, dict): + continue + idx = item.get("index") + blob_addr = item.get("blob_address") + caption = item.get("caption") + if not idx or not blob_addr: + continue + figures_lines.append(f"Figure [F{idx}] caption: {caption or '(no caption)'} (see attached image F{idx})") + try: + img_bytes, _ = await storage_service.get_bytes_by_path(blob_addr) + if img_bytes: + images.append((img_bytes, "image/png")) + except Exception: + continue + + # Load L2 criteria + cp = sr.get("criteria_parsed") or sr.get("criteria") or {} + l2 = cp.get("l2") if isinstance(cp, dict) else None + questions = (l2 or {}).get("questions") if isinstance(l2, dict) else [] + possible = (l2 or {}).get("possible_answers") if isinstance(l2, dict) else [] + addinfos = (l2 or {}).get("additional_infos") if isinstance(l2, dict) else [] + questions = questions if isinstance(questions, list) else [] + possible = possible if isinstance(possible, list) else [] + addinfos = addinfos if isinstance(addinfos, list) else [] + + if not questions: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="SR has no L2 criteria questions configured") + + async def _call_llm(prompt: str) -> Tuple[str, Dict[str, Any], int]: + import time + + t0 = time.time() + # Use multimodal API when we have figure images + if images: + content = await azure_openai_client.multimodal_chat( + user_text=prompt, + images=images, + system_prompt=None, + model=payload.model, + max_tokens=payload.max_tokens, + temperature=payload.temperature, + ) + latency_ms = int((time.time() - t0) * 1000) + # multimodal_chat does not expose usage + return str(content), {}, latency_ms + + messages = [{"role": "user", "content": prompt}] + resp = await azure_openai_client.chat_completion( + messages=messages, + model=payload.model, + max_tokens=payload.max_tokens, + temperature=payload.temperature, + stream=False, + ) + latency_ms = int((time.time() - t0) * 1000) + content = ((resp.get("choices") or [{}])[0].get("message") or {}).get("content") or "" + usage = resp.get("usage") or {} + return str(content), dict(usage), latency_ms + + results: List[Dict[str, Any]] = [] + + for i, q in enumerate(questions): + if not isinstance(q, str) or not q.strip(): + continue + + opts = possible[i] if i < len(possible) and isinstance(possible[i], list) else [] + opts = [str(o) for o in opts if o is not None and str(o).strip()] + xtra = addinfos[i] if i < len(addinfos) and isinstance(addinfos[i], str) else "" + + if not opts: + results.append({"question": q, "criterion_key": snake_case(q, max_len=56), "error": "No options configured"}) + continue + + criterion_key = snake_case(q, max_len=56) + options_listed = "\n".join(opts) + + # 1) screening + screening_prompt = PROMPT_XML_TEMPLATE_FULLTEXT.format( + question=q, + options=options_listed, + xtra=xtra or "", + fulltext=fulltext, + tables="\n".join(tables_md_lines) if tables_md_lines else "(none)", + figures="\n".join(figures_lines) if figures_lines else "(none)", + ) + screening_raw, screening_usage, screening_latency = await _call_llm(screening_prompt) + screening_parsed = parse_agent_xml(screening_raw) + screening_answer = resolve_option(screening_parsed.answer, opts) + + try: + screening_run_id = await run_in_threadpool( + cits_dp_service.insert_screening_agent_run, + { + "sr_id": sr_id, + "table_name": table_name, + "citation_id": citation_id, + "pipeline": "fulltext", + "criterion_key": criterion_key, + "stage": "screening", + "answer": screening_answer, + "confidence": screening_parsed.confidence, + "rationale": screening_parsed.rationale, + "raw_response": screening_raw, + "model": payload.model, + "prompt_version": payload.prompt_version, + "temperature": payload.temperature, + "latency_ms": screening_latency, + "input_tokens": screening_usage.get("prompt_tokens"), + "output_tokens": screening_usage.get("completion_tokens"), + }, + ) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to persist screening run: {e}") + + # 2) critical + critical_opts = build_critical_options(all_options=opts, screening_answer=screening_answer) + critical_listed = "\n".join(critical_opts) + critical_prompt = PROMPT_XML_TEMPLATE_FULLTEXT_CRITICAL.format( + question=q, + screening_answer=screening_answer, + options=critical_listed, + xtra=xtra or "", + fulltext=fulltext, + tables="\n".join(tables_md_lines) if tables_md_lines else "(none)", + figures="\n".join(figures_lines) if figures_lines else "(none)", + ) + critical_raw, critical_usage, critical_latency = await _call_llm(critical_prompt) + critical_parsed = parse_agent_xml(critical_raw) + critical_answer = resolve_option(critical_parsed.answer, critical_opts) + disagrees = str(critical_answer).strip() != "None of the above" + + try: + critical_run_id = await run_in_threadpool( + cits_dp_service.insert_screening_agent_run, + { + "sr_id": sr_id, + "table_name": table_name, + "citation_id": citation_id, + "pipeline": "fulltext", + "criterion_key": criterion_key, + "stage": "critical", + "answer": critical_answer, + "confidence": critical_parsed.confidence, + "rationale": critical_parsed.rationale, + "raw_response": critical_raw, + "model": payload.model, + "prompt_version": payload.prompt_version, + "temperature": payload.temperature, + "latency_ms": critical_latency, + "input_tokens": critical_usage.get("prompt_tokens"), + "output_tokens": critical_usage.get("completion_tokens"), + }, + ) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to persist critical run: {e}") + + results.append( + { + "question": q, + "criterion_key": criterion_key, + "screening": { + "run_id": screening_run_id, + "answer": screening_answer, + "confidence": screening_parsed.confidence, + "rationale": screening_parsed.rationale, + "parse_ok": screening_parsed.parse_ok, + }, + "critical": { + "run_id": critical_run_id, + "answer": critical_answer, + "confidence": critical_parsed.confidence, + "rationale": critical_parsed.rationale, + "parse_ok": critical_parsed.parse_ok, + "disagrees": disagrees, + }, + } + ) + + return { + "status": "success", + "sr_id": sr_id, + "citation_id": citation_id, + "pipeline": "fulltext", + "criteria": results, + } + + +@router.get("/agent-runs/latest", response_model=AgentRunsQueryResponse) +async def get_latest_agent_runs( + sr_id: str, + pipeline: str, + citation_ids: str, + current_user: Dict[str, Any] = Depends(get_current_active_user), +): + """Fetch latest screening_agent_runs for a set of citations. + + Query params: + - sr_id: SR id + - pipeline: title_abstract | fulltext + - citation_ids: comma-separated citation ids + """ + + pipeline_norm = (pipeline or "").strip().lower() + if pipeline_norm in {"ta", "titleabstract", "title-abstract"}: + pipeline_norm = "title_abstract" + if pipeline_norm not in {"title_abstract", "fulltext"}: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="pipeline must be 'title_abstract' or 'fulltext'") + + raw_ids = [p.strip() for p in (citation_ids or "").split(",") if p.strip()] + if not raw_ids: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="citation_ids is required") + parsed_ids: List[int] = [] + for p in raw_ids: + try: + parsed_ids.append(int(p)) + except Exception: + continue + if not parsed_ids: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="citation_ids must be a comma-separated list of integers") + + try: + _sr, screening = await load_sr_and_check(sr_id, current_user, srdb_service) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to load SR: {e}") + + table_name = (screening or {}).get("table_name") or "citations" + + try: + rows = await run_in_threadpool( + cits_dp_service.list_latest_agent_runs, + sr_id=sr_id, + table_name=table_name, + citation_ids=parsed_ids, + pipeline=pipeline_norm, + ) + except RuntimeError as rexc: + raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=str(rexc)) + except Exception as e: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to query screening_agent_runs: {e}") + + return AgentRunsQueryResponse(sr_id=sr_id, pipeline=pipeline_norm, citation_ids=parsed_ids, runs=rows) + async def update_inclusion_decision( sr: Dict[str, Any], citation_id: int, diff --git a/backend/api/services/cit_db_service.py b/backend/api/services/cit_db_service.py index 85371835..1e3f28e4 100644 --- a/backend/api/services/cit_db_service.py +++ b/backend/api/services/cit_db_service.py @@ -13,8 +13,15 @@ can surface a 503 with an actionable message. """ from typing import Any, Dict, List, Optional, Tuple -import psycopg2 -import psycopg2.extras + +# psycopg2 is optional in some deploy/test contexts. +# Per module docstring contract: methods should raise RuntimeError when psycopg2 +# is unavailable so routers can surface a 503. +try: + import psycopg2 # type: ignore + import psycopg2.extras # type: ignore +except Exception: # pragma: no cover + psycopg2 = None import json import re import os @@ -22,6 +29,8 @@ import csv import urllib.parse as up import hashlib +from datetime import datetime +import uuid # Local settings import (for POSTGRES_ADMIN_DSN / DATABASE_URL usage) try: @@ -145,6 +154,297 @@ def __init__(self): # nothing stateful for now; keep class for ergonomics and easier testing pass + def _require_psycopg2(self) -> None: + if psycopg2 is None: + raise RuntimeError( + "psycopg2 is not installed. Install backend dependencies (requirements.txt) " + "or run with the docker backend image." + ) + + # ----------------------- + # Schema helpers + # ----------------------- + def table_exists(self, table_name: str = "citations") -> bool: + """Return True if a public table exists. + + NOTE: We intentionally use runtime schema evolution (ALTER TABLE ...) + throughout CAN-SR, so callers need a safe way to check existence before + attempting to add columns. + """ + table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() + conn = None + try: + conn = postgres_server.conn + cur = conn.cursor() + cur.execute( + """ + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = %s + LIMIT 1 + """, + (table_name,), + ) + return cur.fetchone() is not None + except Exception: + _safe_rollback(conn) + raise + finally: + if conn: + pass + + def ensure_step_validation_columns(self, table_name: str = "citations") -> None: + """Ensure step-level validation columns exist for a screening table. + + CAN-SR uses per-upload screening tables, so we create these columns on + those tables (not just a single shared citations table). + + This is intentionally NOT backwards-compatible: it will eagerly add the + columns to whatever table is passed. + """ + if not self.table_exists(table_name): + return + + # L1 (Title/Abstract) + self.create_column("l1_validated_by", "TEXT", table_name=table_name) + self.create_column("l1_validated_at", "TIMESTAMPTZ", table_name=table_name) + + # L2 (Full Text) + self.create_column("l2_validated_by", "TEXT", table_name=table_name) + self.create_column("l2_validated_at", "TIMESTAMPTZ", table_name=table_name) + + # Parameters / extraction + self.create_column("parameters_validated_by", "TEXT", table_name=table_name) + self.create_column("parameters_validated_at", "TIMESTAMPTZ", table_name=table_name) + + def ensure_screening_agent_runs_table(self) -> None: + """Ensure the normalized agent-run storage table exists. + + We keep it in the shared Postgres DB (public schema). Because CAN-SR uses + per-upload screening tables (each with its own id sequence), we store + both the `sr_id` and the screening `table_name` alongside `citation_id`. + """ + conn = None + try: + self._require_psycopg2() + conn = postgres_server.conn + cur = conn.cursor() + + cur.execute( + """ + CREATE TABLE IF NOT EXISTS screening_agent_runs ( + id TEXT PRIMARY KEY, + sr_id TEXT NOT NULL, + table_name TEXT NOT NULL, + citation_id INT NOT NULL, + pipeline TEXT NOT NULL, + criterion_key TEXT NOT NULL, + stage TEXT NOT NULL, + answer TEXT, + confidence DOUBLE PRECISION, + rationale TEXT, + raw_response TEXT, + model TEXT, + prompt_version TEXT, + temperature DOUBLE PRECISION, + top_p DOUBLE PRECISION, + seed INT, + latency_ms INT, + input_tokens INT, + output_tokens INT, + cost_usd DOUBLE PRECISION, + created_at TIMESTAMPTZ DEFAULT now() + ) + """ + ) + + # A couple of pragmatic indexes for common lookups. + cur.execute( + """ + CREATE INDEX IF NOT EXISTS idx_screening_agent_runs_citation + ON screening_agent_runs (sr_id, table_name, citation_id, pipeline) + """ + ) + cur.execute( + """ + CREATE INDEX IF NOT EXISTS idx_screening_agent_runs_criterion + ON screening_agent_runs (sr_id, pipeline, criterion_key, stage) + """ + ) + + conn.commit() + except Exception: + _safe_rollback(conn) + raise + finally: + if conn: + pass + + def ensure_agentic_screening_schema(self) -> None: + """One-call bootstrap for agentic screening. + + This is safe to call at startup (creates only global tables), and can + also be called by endpoints before use. + """ + self.ensure_screening_agent_runs_table() + + # ----------------------- + # Agent-run persistence + # ----------------------- + def insert_screening_agent_run(self, run: Dict[str, Any]) -> str: + """Insert a single screening_agent_runs row. + + Expected keys (most optional): + - sr_id, table_name, citation_id, pipeline, criterion_key, stage + - answer, confidence, rationale, raw_response + - model, prompt_version, temperature, top_p, seed + - latency_ms, input_tokens, output_tokens, cost_usd + + Returns the generated run id. + """ + self._require_psycopg2() + self.ensure_screening_agent_runs_table() + + run_id = str(run.get("id") or uuid.uuid4()) + sr_id = str(run.get("sr_id") or "") + table_name = str(run.get("table_name") or "") + citation_id = int(run.get("citation_id") or 0) + pipeline = str(run.get("pipeline") or "") + criterion_key = str(run.get("criterion_key") or "") + stage = str(run.get("stage") or "") + + if not (sr_id and table_name and citation_id and pipeline and criterion_key and stage): + raise ValueError("insert_screening_agent_run missing required fields") + + conn = None + try: + conn = postgres_server.conn + cur = conn.cursor() + cur.execute( + """ + INSERT INTO screening_agent_runs ( + id, sr_id, table_name, citation_id, pipeline, criterion_key, stage, + answer, confidence, rationale, raw_response, + model, prompt_version, temperature, top_p, seed, + latency_ms, input_tokens, output_tokens, cost_usd, created_at + ) VALUES ( + %s, %s, %s, %s, %s, %s, %s, + %s, %s, %s, %s, + %s, %s, %s, %s, %s, + %s, %s, %s, %s, %s + ) + """, + ( + run_id, + sr_id, + table_name, + citation_id, + pipeline, + criterion_key, + stage, + run.get("answer"), + run.get("confidence"), + run.get("rationale"), + run.get("raw_response"), + run.get("model"), + run.get("prompt_version"), + run.get("temperature"), + run.get("top_p"), + run.get("seed"), + run.get("latency_ms"), + run.get("input_tokens"), + run.get("output_tokens"), + run.get("cost_usd"), + run.get("created_at") or datetime.utcnow().isoformat() + "Z", + ), + ) + conn.commit() + return run_id + except Exception: + _safe_rollback(conn) + raise + finally: + if conn: + pass + + def list_latest_agent_runs( + self, + *, + sr_id: str, + table_name: str, + citation_ids: List[int], + pipeline: str, + ) -> List[Dict[str, Any]]: + """Return latest agent runs per (citation_id, criterion_key, stage) for a set of citations. + + This is designed for list pages where we need to compute "needs validation" + without loading full raw responses. + """ + self._require_psycopg2() + self.ensure_screening_agent_runs_table() + + sr_id = str(sr_id or "") + table_name = str(table_name or "") + pipeline = str(pipeline or "") + + ids: List[int] = [] + for i in citation_ids or []: + try: + ids.append(int(i)) + except Exception: + continue + if not (sr_id and table_name and pipeline and ids): + return [] + + conn = None + try: + conn = postgres_server.conn + cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + + # DISTINCT ON picks the first row per group according to ORDER BY. + cur.execute( + """ + SELECT DISTINCT ON (citation_id, criterion_key, stage) + id, + sr_id, + table_name, + citation_id, + pipeline, + criterion_key, + stage, + answer, + confidence, + rationale, + model, + prompt_version, + temperature, + top_p, + seed, + latency_ms, + input_tokens, + output_tokens, + cost_usd, + created_at + FROM screening_agent_runs + WHERE sr_id = %s + AND table_name = %s + AND pipeline = %s + AND citation_id = ANY(%s) + ORDER BY citation_id, criterion_key, stage, created_at DESC + """, + (sr_id, table_name, pipeline, ids), + ) + + rows = cur.fetchall() or [] + return [dict(r) for r in rows if r] + except Exception: + _safe_rollback(conn) + raise + finally: + if conn: + pass + # ----------------------- # Low level connection helpers # ----------------------- @@ -160,6 +460,7 @@ def create_column(self, col: str, col_type: str, table_name: str = "citations") col_type is the SQL type (e.g. TEXT, JSONB). """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -193,6 +494,7 @@ def update_jsonb_column( Update a JSONB column for a citation. Creates the column if needed. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -227,6 +529,7 @@ def update_text_column( Update a TEXT column for a citation. Creates the column if needed. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -259,6 +562,7 @@ def update_bool_column( ) -> int: """Update a BOOLEAN column for a citation. Creates the column if needed.""" table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -284,6 +588,7 @@ def update_bool_column( def get_table_columns(self, table_name: str = "citations") -> List[Dict[str, str]]: """Return [{name, data_type, udt_name}] for table columns ordered by ordinal_position.""" table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -368,6 +673,7 @@ def copy_jsonb_if_empty( Intended for auto-filling human_* from llm_* while never overwriting. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -407,6 +713,7 @@ def dump_citations_csv(self, table_name: str = "citations") -> bytes: Uses Postgres COPY for correctness and performance. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -439,6 +746,7 @@ def dump_citations_csv_filtered(self, table_name: str = "citations") -> bytes: explicit scalar columns (selected/explanation/confidence/found/value/...). """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() # 1) Determine columns to export cols_meta = self.get_table_columns(table_name) @@ -596,6 +904,7 @@ def get_citation_by_id(self, citation_id: int, table_name: str = "citations") -> Return a dict mapping column -> value for the citation row, or None. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -638,6 +947,7 @@ def get_citations_by_ids( List[dict] rows. Missing ids are omitted. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() ids: List[int] = [] for i in citation_ids or []: try: @@ -691,6 +1001,7 @@ def backfill_human_decisions(self, criteria_parsed: Dict[str, Any], table_name: - undecided: any question missing/unanswered """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() cp = criteria_parsed or {} l1_qs = (cp.get("l1") or {}).get("questions") if isinstance(cp.get("l1"), dict) else None @@ -815,6 +1126,7 @@ def list_citation_ids(self, filter_step=None, table_name: str = "citations") -> Return list of integer primary keys (id) from citations table ordered by id. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -862,6 +1174,7 @@ def list_fulltext_urls(self, table_name: str = "citations") -> List[str]: Return list of fulltext_url values (non-null) from citations table. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -898,6 +1211,7 @@ def attach_fulltext( Creates columns if necessary. Returns rows modified (0/1). """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() # create columns if missing self.create_column("fulltext_url", "TEXT", table_name=table_name) # compute md5 @@ -929,6 +1243,7 @@ def get_column_value(self, citation_id: int, column: str, table_name: str = "cit Return the value stored in `column` for the citation row (or None). """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -968,6 +1283,7 @@ def set_column_value(self, citation_id: int, column: str, value: Any, table_name def drop_table(self, table_name: str, cascade: bool = True) -> None: """Drop a screening table in the shared database.""" table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -994,6 +1310,7 @@ def create_table_and_insert_sync( is per-upload (e.g. sr___citations) inside the shared DB. """ table_name = _validate_ident(table_name, kind="table_name") + self._require_psycopg2() conn = None try: conn = postgres_server.conn @@ -1009,6 +1326,15 @@ def create_table_and_insert_sync( col_defs.append('"fulltext_url" TEXT') col_defs.append('"fulltext" TEXT') col_defs.append('"fulltext_md5" TEXT') + + # Step-level validation fields (agentic screening plan) + col_defs.append('"l1_validated_by" TEXT') + col_defs.append('"l1_validated_at" TIMESTAMP WITH TIME ZONE') + col_defs.append('"l2_validated_by" TEXT') + col_defs.append('"l2_validated_at" TIMESTAMP WITH TIME ZONE') + col_defs.append('"parameters_validated_by" TEXT') + col_defs.append('"parameters_validated_at" TIMESTAMP WITH TIME ZONE') + col_defs.append('"created_at" TIMESTAMP WITH TIME ZONE DEFAULT now()') cols_sql = ", ".join(col_defs) diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml index 3cca1cbd..1a8567a7 100644 --- a/backend/docker-compose.yml +++ b/backend/docker-compose.yml @@ -47,6 +47,9 @@ services: # POSTGRESQL - Database (Citations & Systematic Reviews) # ============================================================================= pgdb-service: + # IMPORTANT: pin to a major version. + # Using `postgres` (latest) can auto-upgrade across major versions (e.g., 16 -> 18) + # and break existing on-disk data without a pg_upgrade/backup-restore. image: postgres container_name: pgdb-service restart: unless-stopped @@ -57,7 +60,7 @@ services: ports: - "5432:5432" volumes: - - ./volumes/postgres:/var/lib/postgresql/data + - ./volumes/postgres:/var/lib/postgresql healthcheck: test: ["CMD-SHELL", "pg_isready -U admin -d postgres -h localhost"] interval: 30s diff --git a/backend/main.py b/backend/main.py index 606dc55f..379553c5 100644 --- a/backend/main.py +++ b/backend/main.py @@ -14,6 +14,7 @@ from api.core.config import settings from api.services.sr_db_service import srdb_service from api.services.user_db import user_db_service +from api.services.cit_db_service import cits_dp_service app = FastAPI( @@ -45,6 +46,15 @@ async def startup_event(): except Exception as e: print(f"⚠️ Failed to ensure SR table exists: {e}", flush=True) + # Agentic screening schema bootstrap (no migrations; runtime schema evolution) + try: + print("🤖 Ensuring agentic screening tables...", flush=True) + await run_in_threadpool(cits_dp_service.ensure_agentic_screening_schema) + print("✓ Agentic screening tables initialized", flush=True) + except Exception as e: + # Do not fail startup; allow deployments without Postgres / in degraded mode. + print(f"⚠️ Failed to ensure agentic screening tables: {e}", flush=True) + # Procrastinate schema + run-all job tables try: from api.jobs.procrastinate_app import ( diff --git a/frontend/app/[lang]/can-sr/l1-screen/view/page.tsx b/frontend/app/[lang]/can-sr/l1-screen/view/page.tsx index 75721fb6..bb4dd22f 100644 --- a/frontend/app/[lang]/can-sr/l1-screen/view/page.tsx +++ b/frontend/app/[lang]/can-sr/l1-screen/view/page.tsx @@ -60,6 +60,16 @@ type CriteriaData = { possible_answers: string[][] } +type LatestAgentRun = { + citation_id: number + criterion_key: string + stage: 'screening' | 'critical' | string + answer?: string | null + confidence?: number | null + rationale?: string | null + created_at?: string +} + /* Main page component */ export default function CanSrL1ScreenPage() { const router = useRouter() @@ -91,6 +101,12 @@ export default function CanSrL1ScreenPage() { // Collapsible open state for LLM panels const [panelOpen, setPanelOpen] = useState>({}) + // Agentic runs (screening_agent_runs) for this citation + const [agentRuns, setAgentRuns] = useState([]) + const [loadingRuns, setLoadingRuns] = useState(false) + + const [validating, setValidating] = useState(false) + useEffect(() => { if (!srId || !citationId) { router.replace('/can-sr') @@ -159,6 +175,49 @@ export default function CanSrL1ScreenPage() { fetchCitationById(citationId) }, [srId, citationId]) + // Load latest agent runs for this citation (screening + critical per criterion) + useEffect(() => { + if (!srId || !citationId) return + const loadRuns = async () => { + setLoadingRuns(true) + try { + const headers = getAuthHeaders() + const res = await fetch( + `/api/can-sr/screen/agent-runs/latest?sr_id=${encodeURIComponent( + srId, + )}&pipeline=${encodeURIComponent('title_abstract')}&citation_ids=${encodeURIComponent( + String(citationId), + )}`, + { method: 'GET', headers }, + ) + const data = await res.json().catch(() => ({})) + if (res.ok && Array.isArray(data?.runs)) { + setAgentRuns(data.runs as LatestAgentRun[]) + } else { + setAgentRuns([]) + } + } catch { + setAgentRuns([]) + } finally { + setLoadingRuns(false) + } + } + loadRuns() + }, [srId, citationId]) + + const runsByCriterion = useMemo(() => { + const by: Record = {} + for (const r of agentRuns) { + const key = String((r as any)?.criterion_key || '') + if (!key) continue + if (!by[key]) by[key] = {} + const stage = String((r as any)?.stage || '') + if (stage === 'screening') by[key].screening = r + if (stage === 'critical') by[key].critical = r + } + return by + }, [agentRuns]) + // Load parsed criteria (L1) useEffect(() => { if (!srId) return @@ -438,6 +497,107 @@ export default function CanSrL1ScreenPage() { />
+ {/* Agentic summary + Validate */} +
+
+
+

Agentic results

+

+ Latest screening + critical runs per criterion. +

+
+
+ + + {citation?.l1_validated_by ? ( + + Validated by {String(citation.l1_validated_by)} + + ) : ( + Not validated + )} +
+
+ + {loadingRuns ? ( +
Loading agent runs…
+ ) : criteriaData?.questions?.length ? ( +
+ {criteriaData.questions.map((q, idx) => { + const criterionKey = q + ? q + .trim() + .toLowerCase() + .replace(/[^\w]+/g, '_') + .replace(/_+/g, '_') + .replace(/^_+|_+$/g, '') + .slice(0, 56) + : '' + + const r = runsByCriterion[criterionKey] || {} + const scr = r.screening + const crit = r.critical + + const critDisagrees = + crit && String((crit as any)?.answer || '').trim() !== '' && + String((crit as any)?.answer || '').trim() !== 'None of the above' + + return ( +
+
{q}
+
+
+
Screening
+
Answer: {String((scr as any)?.answer ?? '—')}
+
Confidence: {String((scr as any)?.confidence ?? '—')}
+
+
+
Critical
+
Answer: {String((crit as any)?.answer ?? '—')}
+
Confidence: {String((crit as any)?.confidence ?? '—')}
+ {critDisagrees ? ( +
Disagrees
+ ) : null} +
+
+
+ ) + })} +
+ ) : ( +
No criteria loaded yet.
+ )} +
+
{/* Workspace (left) */}
diff --git a/frontend/app/[lang]/can-sr/l2-screen/view/page.tsx b/frontend/app/[lang]/can-sr/l2-screen/view/page.tsx index 914634da..fef5b4f6 100644 --- a/frontend/app/[lang]/can-sr/l2-screen/view/page.tsx +++ b/frontend/app/[lang]/can-sr/l2-screen/view/page.tsx @@ -63,6 +63,16 @@ type CriteriaData = { additional_infos?: (string | null)[] // optional per-question extra guidance when available } +type LatestAgentRun = { + citation_id: number + criterion_key: string + stage: 'screening' | 'critical' | string + answer?: string | null + confidence?: number | null + rationale?: string | null + created_at?: string +} + /* Main page component */ export default function CanSrL2ScreenViewPage() { const router = useRouter() @@ -98,6 +108,11 @@ export default function CanSrL2ScreenViewPage() { // Hint text from Title/Abstract screening for L1 questions const [hintByIndex, setHintByIndex] = useState>({}) + // Agentic runs (screening_agent_runs) for this citation + const [agentRuns, setAgentRuns] = useState([]) + const [loadingRuns, setLoadingRuns] = useState(false) + const [validating, setValidating] = useState(false) + // Fulltext PDF viewer linkage const [fulltextCoords, setFulltextCoords] = useState(null) const [fulltextPages, setFulltextPages] = useState<{ width: number; height: number }[] | null>(null) @@ -218,6 +233,49 @@ export default function CanSrL2ScreenViewPage() { fetchCitationById(citationId) }, [srId, citationId]) + // Load latest agent runs for this citation (screening + critical per criterion) + useEffect(() => { + if (!srId || !citationId) return + const loadRuns = async () => { + setLoadingRuns(true) + try { + const headers = getAuthHeaders() + const res = await fetch( + `/api/can-sr/screen/agent-runs/latest?sr_id=${encodeURIComponent( + srId, + )}&pipeline=${encodeURIComponent('fulltext')}&citation_ids=${encodeURIComponent( + String(citationId), + )}`, + { method: 'GET', headers }, + ) + const data = await res.json().catch(() => ({})) + if (res.ok && Array.isArray(data?.runs)) { + setAgentRuns(data.runs as LatestAgentRun[]) + } else { + setAgentRuns([]) + } + } catch { + setAgentRuns([]) + } finally { + setLoadingRuns(false) + } + } + loadRuns() + }, [srId, citationId]) + + const runsByCriterion = useMemo(() => { + const by: Record = {} + for (const r of agentRuns) { + const key = String((r as any)?.criterion_key || '') + if (!key) continue + if (!by[key]) by[key] = {} + const stage = String((r as any)?.stage || '') + if (stage === 'screening') by[key].screening = r + if (stage === 'critical') by[key].critical = r + } + return by + }, [agentRuns]) + // Load parsed criteria (L1 + L2 merged, L1 first) useEffect(() => { if (!srId) return @@ -616,6 +674,115 @@ export default function CanSrL2ScreenViewPage() { />
+ {/* Agentic summary + Validate */} +
+
+
+

Agentic results

+

+ Latest screening + critical runs for L2/fulltext per criterion. +

+
+
+ + + {citation?.l2_validated_by ? ( + + Validated by {String(citation.l2_validated_by)} + + ) : ( + Not validated + )} +
+
+ + {loadingRuns ? ( +
Loading agent runs…
+ ) : criteriaData?.questions?.length ? ( +
+ {criteriaData.questions + .map((q, idx) => ({ q, idx })) + .filter(({ idx }) => sourceFlags[idx] === 'l2') + .map(({ q, idx }) => { + const criterionKey = q + ? q + .trim() + .toLowerCase() + .replace(/[^\w]+/g, '_') + .replace(/_+/g, '_') + .replace(/^_+|_+$/g, '') + .slice(0, 56) + : '' + + const r = runsByCriterion[criterionKey] || {} + const scr = r.screening + const crit = r.critical + + const critDisagrees = + crit && + String((crit as any)?.answer || '').trim() !== '' && + String((crit as any)?.answer || '').trim() !== 'None of the above' + + return ( +
+
{q}
+
+
+
Screening
+
Answer: {String((scr as any)?.answer ?? '—')}
+
Confidence: {String((scr as any)?.confidence ?? '—')}
+
+
+
Critical
+
Answer: {String((crit as any)?.answer ?? '—')}
+
Confidence: {String((crit as any)?.confidence ?? '—')}
+ {critDisagrees ? ( +
Disagrees
+ ) : null} +
+
+
+ ) + })} +
+ ) : ( +
No criteria loaded yet.
+ )} +
+
{/* Workspace (left) */}
diff --git a/frontend/app/api/can-sr/screen/agent-runs/latest/route.ts b/frontend/app/api/can-sr/screen/agent-runs/latest/route.ts new file mode 100644 index 00000000..4fb1b1b9 --- /dev/null +++ b/frontend/app/api/can-sr/screen/agent-runs/latest/route.ts @@ -0,0 +1,67 @@ +import { NextRequest, NextResponse } from 'next/server' +import { BACKEND_URL } from '@/lib/config' + +/** + * Proxy: GET /api/can-sr/screen/agent-runs/latest?sr_id=&pipeline=title_abstract&citation_ids=1,2,3 + * -> GET {BACKEND_URL}/api/screen/agent-runs/latest?sr_id=...&pipeline=...&citation_ids=... + */ + +export async function OPTIONS() { + return new Response(null, { + status: 204, + headers: { + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET,OPTIONS', + 'Access-Control-Allow-Headers': 'Authorization, Content-Type', + }, + }) +} + +export async function GET(request: NextRequest) { + try { + const params = request.nextUrl.searchParams + const srId = params.get('sr_id') + const pipeline = params.get('pipeline') + const citationIds = params.get('citation_ids') + + if (!srId || !pipeline || !citationIds) { + return NextResponse.json( + { error: 'sr_id, pipeline, citation_ids are required' }, + { status: 400 }, + ) + } + + const authHeader = request.headers.get('authorization') + if (!authHeader) { + return NextResponse.json( + { error: 'Authorization header is required' }, + { status: 401 }, + ) + } + + const url = new URL(`${BACKEND_URL}/api/screen/agent-runs/latest`) + url.searchParams.set('sr_id', srId) + url.searchParams.set('pipeline', pipeline) + url.searchParams.set('citation_ids', citationIds) + + const res = await fetch(url.toString(), { + method: 'GET', + headers: { + Authorization: authHeader, + }, + }) + + const text = await res.text().catch(() => '') + let json: any = null + try { + json = text ? JSON.parse(text) : {} + } catch { + json = { detail: text || null } + } + + return NextResponse.json(json, { status: res.status }) + } catch (err: any) { + console.error('Agent runs latest proxy GET error:', err) + return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) + } +} diff --git a/frontend/app/api/can-sr/screen/validate/route.ts b/frontend/app/api/can-sr/screen/validate/route.ts new file mode 100644 index 00000000..5ea0e153 --- /dev/null +++ b/frontend/app/api/can-sr/screen/validate/route.ts @@ -0,0 +1,44 @@ +import { NextRequest, NextResponse } from 'next/server' +import { BACKEND_URL } from '@/lib/config' + +/** + * Proxy: POST /api/can-sr/screen/validate + * body: { sr_id, citation_id, step } + * -> POST {BACKEND_URL}/api/screen/validate + */ + +export async function POST(request: NextRequest) { + try { + const authHeader = request.headers.get('authorization') + if (!authHeader) { + return NextResponse.json( + { error: 'Authorization header is required' }, + { status: 401 }, + ) + } + + const body = await request.json().catch(() => ({})) + + const res = await fetch(`${BACKEND_URL}/api/screen/validate`, { + method: 'POST', + headers: { + Authorization: authHeader, + 'Content-Type': 'application/json', + }, + body: JSON.stringify(body), + }) + + const text = await res.text().catch(() => '') + let json: any = null + try { + json = text ? JSON.parse(text) : {} + } catch { + json = { detail: text || null } + } + + return NextResponse.json(json, { status: res.status }) + } catch (err: any) { + console.error('Validate proxy POST error:', err) + return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) + } +} diff --git a/frontend/components/can-sr/PagedList.tsx b/frontend/components/can-sr/PagedList.tsx index c0ab7a05..b0417b50 100644 --- a/frontend/components/can-sr/PagedList.tsx +++ b/frontend/components/can-sr/PagedList.tsx @@ -14,6 +14,15 @@ type CitationInfo = { pageview: string } +type LatestAgentRun = { + citation_id: number + criterion_key: string + stage: 'screening' | 'critical' | string + answer?: string | null + confidence?: number | null + created_at?: string +} + function getAuthHeaders(): Record { const token = getAuthToken() const tokenType = getTokenType() @@ -53,13 +62,19 @@ export default function PagedList({ ) const [showClassify, setShowClassify] = useState>({}) + // TA list controls + const [threshold, setThreshold] = useState(0.9) + const [filterMode, setFilterMode] = useState<'needs' | 'validated' | 'unvalidated' | 'all'>('needs') + + const [latestRunsByCitation, setLatestRunsByCitation] = useState>({}) + const fileInputRefs = useRef>({}) // --- paging --- useEffect(() => { const lp = Math.max(1, Math.ceil((citationIds?.length || 0) / pageSize)) setLastpage(lp) - setpage((prev) => Math.min(Math.max(1, prev), lp)) + setpage((prev: number) => Math.min(Math.max(1, prev), lp)) }, [citationIds, pageSize]) useEffect(() => { @@ -112,12 +127,102 @@ export default function PagedList({ if (row?.fulltext_url) nextShow[id] = true } - setLlmClassified((prev) => ({ ...prev, ...nextLlm })) - setHumanVerified((prev) => ({ ...prev, ...nextHuman })) - setShowClassify((prev) => ({ ...prev, ...nextShow })) + setLlmClassified((prev: Record) => ({ ...prev, ...nextLlm })) + setHumanVerified((prev: Record) => ({ ...prev, ...nextHuman })) + setShowClassify((prev: Record) => ({ ...prev, ...nextShow })) + + // Fetch latest agent runs for this page (L1=title_abstract, L2=fulltext) + try { + const shouldFetchRuns = (screeningStep === 'l1' || screeningStep === 'l2') && pageIds.length + if (shouldFetchRuns) { + const pipeline = screeningStep === 'l2' ? 'fulltext' : 'title_abstract' + const r2 = await fetch( + `/api/can-sr/screen/agent-runs/latest?sr_id=${encodeURIComponent(srId)}&pipeline=${encodeURIComponent( + pipeline, + )}&citation_ids=${encodeURIComponent(pageIds.join(','))}`, + { method: 'GET', headers }, + ) + const j2 = await r2.json().catch(() => ({})) + if (r2.ok && Array.isArray(j2?.runs)) { + const grouped: Record = {} + for (const run of j2.runs as LatestAgentRun[]) { + const cid = Number((run as any)?.citation_id) + if (!Number.isFinite(cid)) continue + if (!grouped[cid]) grouped[cid] = [] + grouped[cid].push(run) + } + setLatestRunsByCitation((prev: Record) => ({ ...prev, ...grouped })) + } + } + } catch (e) { + // best-effort + } } fetchCitations() - }, [citationIds, page, pageSize, questions, srId]) + }, [citationIds, page, pageSize, questions, srId, screeningStep]) + + // Reset cached runs when switching steps (avoid mixing l1/l2 pipeline results) + useEffect(() => { + setLatestRunsByCitation({}) + }, [screeningStep]) + + const isValidatedForStep = (row: any): boolean => { + if (!row) return false + if (screeningStep === 'l1') return Boolean(row?.l1_validated_by) + if (screeningStep === 'l2') return Boolean(row?.l2_validated_by) + if (screeningStep === 'extract') return Boolean(row?.parameters_validated_by) + return false + } + + const computeNeedsValidation = (citationId: number, row: any): boolean => { + // If validated, it no longer “needs validation” + if (isValidatedForStep(row)) return false + + const runs = latestRunsByCitation[citationId] || [] + if (!runs.length) { + // No agent runs yet => should be in "unvalidated" but not necessarily "needs" + // We'll treat missing runs as "needs" so it's easy to find. + return true + } + + // Group by criterion_key + const byKey: Record = {} + for (const r of runs) { + const key = String((r as any)?.criterion_key || '') + if (!key) continue + if (!byKey[key]) byKey[key] = [] + byKey[key].push(r) + } + + // Needs validation if ANY criterion is low confidence OR critical disagrees + for (const key of Object.keys(byKey)) { + const items = byKey[key] + const screening = items.find((x) => String((x as any)?.stage) === 'screening') + const critical = items.find((x) => String((x as any)?.stage) === 'critical') + + const conf = Number((screening as any)?.confidence) + if (Number.isFinite(conf) && conf < threshold) return true + + const criticalAns = String((critical as any)?.answer || '') + // In our critical prompt contract, agreement is "None of the above". + if (critical && criticalAns.trim() !== '' && criticalAns.trim() !== 'None of the above') return true + } + + return false + } + + const filteredCitationData = citationData.filter((row: any) => { + const id = Number(row?.id) + if (!Number.isFinite(id)) return false + const validated = isValidatedForStep(row) + const needs = computeNeedsValidation(id, row) + const unvalidated = !validated + if (filterMode === 'all') return true + if (filterMode === 'validated') return validated + if (filterMode === 'unvalidated') return unvalidated + if (filterMode === 'needs') return needs + return true + }) // NOTE: Previously we fetched each citation via /citations/get. // This is now replaced by a single /citations/batch call per page. @@ -156,7 +261,7 @@ export default function PagedList({ { method: 'POST', headers, body: JSON.stringify(bodyPayload) }, ) } - setLlmClassified((prev) => ({ ...prev, [id]: true })) + setLlmClassified((prev: Record) => ({ ...prev, [id]: true })) } const onChooseFile = (id: number) => { @@ -196,16 +301,56 @@ export default function PagedList({ { method: 'POST', headers, body: fd as any }, ) - setShowClassify((prev) => ({ ...prev, [id]: true })) + setShowClassify((prev: Record) => ({ ...prev, [id]: true })) } return (
+ {screeningStep === 'l1' || screeningStep === 'l2' ? ( +
+
+ + ) => { + const v = Number(e.target.value) + if (!Number.isFinite(v)) return + setThreshold(Math.max(0, Math.min(1, v))) + }} + className="w-24 rounded-md border border-gray-200 px-2 py-1 text-sm" + /> +
+ +
+ + +
+
+ ) : null} +
    - {citationData.map((data) => ( + {filteredCitationData.map((data: any) => (
  • Citation #{data.id}

    @@ -334,7 +479,7 @@ export default function PagedList({ setJumpPageInput(e.target.value)} + onChange={(e: React.ChangeEvent) => setJumpPageInput(e.target.value)} className="w-20 rounded-md border border-gray-200 px-2 py-1 text-sm" placeholder={String(page)} inputMode="numeric" diff --git a/frontend/package-lock.json b/frontend/package-lock.json index a8237aa6..ae2deb78 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1022,10 +1022,9 @@ } }, "node_modules/@next/env": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/env/-/env-16.1.6.tgz", - "integrity": "sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ==", - "license": "MIT" + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/env/-/env-16.2.2.tgz", + "integrity": "sha512-LqSGz5+xGk9EL/iBDr2yo/CgNQV6cFsNhRR2xhSXYh7B/hb4nePCxlmDvGEKG30NMHDFf0raqSyOZiQrO7BkHQ==" }, "node_modules/@next/eslint-plugin-next": { "version": "15.5.9", @@ -1037,13 +1036,12 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-16.1.6.tgz", - "integrity": "sha512-wTzYulosJr/6nFnqGW7FrG3jfUUlEf8UjGA0/pyypJl42ExdVgC6xJgcXQ+V8QFn6niSG2Pb8+MIG1mZr2vczw==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-16.2.2.tgz", + "integrity": "sha512-B92G3ulrwmkDSEJEp9+XzGLex5wC1knrmCSIylyVeiAtCIfvEJYiN3v5kXPlYt5R4RFlsfO/v++aKV63Acrugg==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "darwin" @@ -1053,13 +1051,12 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-16.1.6.tgz", - "integrity": "sha512-BLFPYPDO+MNJsiDWbeVzqvYd4NyuRrEYVB5k2N3JfWncuHAy2IVwMAOlVQDFjj+krkWzhY2apvmekMkfQR0CUQ==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-16.2.2.tgz", + "integrity": "sha512-7ZwSgNKJNQiwW0CKhNm9B1WS2L1Olc4B2XY0hPYCAL3epFnugMhuw5TMWzMilQ3QCZcCHoYm9NGWTHbr5REFxw==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "darwin" @@ -1069,13 +1066,12 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-16.1.6.tgz", - "integrity": "sha512-OJYkCd5pj/QloBvoEcJ2XiMnlJkRv9idWA/j0ugSuA34gMT6f5b7vOiCQHVRpvStoZUknhl6/UxOXL4OwtdaBw==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-16.2.2.tgz", + "integrity": "sha512-c3m8kBHMziMgo2fICOP/cd/5YlrxDU5YYjAJeQLyFsCqVF8xjOTH/QYG4a2u48CvvZZSj1eHQfBCbyh7kBr30Q==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1085,13 +1081,12 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-16.1.6.tgz", - "integrity": "sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-16.2.2.tgz", + "integrity": "sha512-VKLuscm0P/mIfzt+SDdn2+8TNNJ7f0qfEkA+az7OqQbjzKdBxAHs0UvuiVoCtbwX+dqMEL9U54b5wQ/aN3dHeg==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1101,13 +1096,12 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-16.1.6.tgz", - "integrity": "sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-16.2.2.tgz", + "integrity": "sha512-kU3OPHJq6sBUjOk7wc5zJ7/lipn8yGldMoAv4z67j6ov6Xo/JvzA7L7LCsyzzsXmgLEhk3Qkpwqaq/1+XpNR3g==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1117,13 +1111,12 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-16.1.6.tgz", - "integrity": "sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-16.2.2.tgz", + "integrity": "sha512-CKXRILyErMtUftp+coGcZ38ZwE/Aqq45VMCcRLr2I4OXKrgxIBDXHnBgeX/UMil0S09i2JXaDL3Q+TN8D/cKmg==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1133,13 +1126,12 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-16.1.6.tgz", - "integrity": "sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-16.2.2.tgz", + "integrity": "sha512-sS/jSk5VUoShUqINJFvNjVT7JfR5ORYj/+/ZpOYbbIohv/lQfduWnGAycq2wlknbOql2xOR0DoV0s6Xfcy49+g==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "win32" @@ -1149,13 +1141,12 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-16.1.6.tgz", - "integrity": "sha512-NRfO39AIrzBnixKbjuo2YiYhB6o9d8v/ymU9m/Xk8cyVk+k7XylniXkHwjs4s70wedVffc6bQNbufk5v0xEm0A==", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-16.2.2.tgz", + "integrity": "sha512-aHaKceJgdySReT7qeck5oShucxWRiiEuwCGK8HHALe6yZga8uyFpLkPgaRw3kkF04U7ROogL/suYCNt/+CuXGA==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "win32" @@ -2823,11 +2814,10 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.3.tgz", + "integrity": "sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==", "dev": true, - "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } @@ -3587,11 +3577,10 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", + "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", "dev": true, - "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -4863,11 +4852,10 @@ } }, "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", - "dev": true, - "license": "ISC" + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", + "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", + "dev": true }, "node_modules/for-each": { "version": "0.3.5", @@ -7350,14 +7338,13 @@ } }, "node_modules/next": { - "version": "16.1.6", - "resolved": "https://registry.npmjs.org/next/-/next-16.1.6.tgz", - "integrity": "sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw==", - "license": "MIT", + "version": "16.2.2", + "resolved": "https://registry.npmjs.org/next/-/next-16.2.2.tgz", + "integrity": "sha512-i6AJdyVa4oQjyvX/6GeER8dpY/xlIV+4NMv/svykcLtURJSy/WzDnnUk/TM4d0uewFHK7xSQz4TbIwPgjky+3A==", "dependencies": { - "@next/env": "16.1.6", + "@next/env": "16.2.2", "@swc/helpers": "0.5.15", - "baseline-browser-mapping": "^2.8.3", + "baseline-browser-mapping": "^2.9.19", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", "styled-jsx": "5.1.6" @@ -7369,15 +7356,15 @@ "node": ">=20.9.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "16.1.6", - "@next/swc-darwin-x64": "16.1.6", - "@next/swc-linux-arm64-gnu": "16.1.6", - "@next/swc-linux-arm64-musl": "16.1.6", - "@next/swc-linux-x64-gnu": "16.1.6", - "@next/swc-linux-x64-musl": "16.1.6", - "@next/swc-win32-arm64-msvc": "16.1.6", - "@next/swc-win32-x64-msvc": "16.1.6", - "sharp": "^0.34.4" + "@next/swc-darwin-arm64": "16.2.2", + "@next/swc-darwin-x64": "16.2.2", + "@next/swc-linux-arm64-gnu": "16.2.2", + "@next/swc-linux-arm64-musl": "16.2.2", + "@next/swc-linux-x64-gnu": "16.2.2", + "@next/swc-linux-x64-musl": "16.2.2", + "@next/swc-win32-arm64-msvc": "16.2.2", + "@next/swc-win32-x64-msvc": "16.2.2", + "sharp": "^0.34.5" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -7701,11 +7688,10 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", "dev": true, - "license": "MIT", "engines": { "node": ">=8.6" }, @@ -8973,11 +8959,10 @@ } }, "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, - "license": "MIT", "engines": { "node": ">=12" },