|
from fastapi import APIRouter, HTTPException |
|
from typing import Dict, Any |
|
import os |
|
import time |
|
from tasks.createBenchConfigFile import CreateBenchConfigTask |
|
from tasks.createBench import CreateBenchTask |
|
|
|
router = APIRouter(tags=["benchmark"]) |
|
|
|
|
|
active_bench_tasks = {} |
|
active_config_tasks = {} |
|
|
|
|
|
|
|
session_files = {} |
|
|
|
@router.post("/generate-benchmark") |
|
async def generate_benchmark(data: Dict[str, Any]): |
|
""" |
|
Generate a benchmark configuration and run the ingestion process |
|
|
|
Args: |
|
data: Dictionary containing session_id |
|
|
|
Returns: |
|
Dictionary with logs and config_path |
|
""" |
|
session_id = data.get("session_id") |
|
|
|
|
|
print(f"DEBUG: Session ID reçu: {session_id}") |
|
print(f"DEBUG: Session files disponibles: {list(router.session_files.keys())}") |
|
|
|
if not session_id or session_id not in router.session_files: |
|
return {"error": "Invalid or missing session ID"} |
|
|
|
file_path = router.session_files[session_id] |
|
all_logs = [] |
|
|
|
try: |
|
|
|
config_task = CreateBenchConfigTask(session_uid=session_id) |
|
|
|
active_config_tasks[session_id] = config_task |
|
|
|
|
|
config_path = config_task.run(file_path=file_path) |
|
|
|
|
|
all_logs.extend(config_task.get_logs()) |
|
|
|
|
|
|
|
|
|
return { |
|
"status": "running", |
|
"config_path": config_path, |
|
"logs": all_logs |
|
} |
|
except Exception as e: |
|
return { |
|
"status": "error", |
|
"error": str(e), |
|
"logs": all_logs |
|
} |
|
|
|
@router.get("/config-logs/{session_id}") |
|
async def get_config_logs(session_id: str): |
|
""" |
|
Get the logs for a running configuration task |
|
|
|
Args: |
|
session_id: Session ID for the task |
|
|
|
Returns: |
|
Dictionary with logs and completion status |
|
""" |
|
if session_id not in active_config_tasks: |
|
raise HTTPException(status_code=404, detail="Configuration task not found") |
|
|
|
config_task = active_config_tasks[session_id] |
|
logs = config_task.get_logs() |
|
is_completed = config_task.is_task_completed() |
|
|
|
|
|
|
|
if is_completed and session_id not in active_bench_tasks: |
|
try: |
|
|
|
config_path_str = f"uploaded_files/{session_id}/config.yml" |
|
bench_task = CreateBenchTask(session_uid=session_id, config_path=config_path_str) |
|
|
|
|
|
active_bench_tasks[session_id] = bench_task |
|
|
|
|
|
logs.append("[INFO] Configuration file generated, starting benchmark creation") |
|
|
|
|
|
bench_task.run() |
|
except Exception as bench_error: |
|
error_msg = f"Error starting benchmark creation: {str(bench_error)}" |
|
logs.append(f"[ERROR] {error_msg}") |
|
|
|
return { |
|
"logs": logs, |
|
"is_completed": is_completed |
|
} |
|
|
|
@router.get("/benchmark-logs/{session_id}") |
|
async def get_benchmark_logs(session_id: str): |
|
""" |
|
Get the logs for a running benchmark task |
|
|
|
Args: |
|
session_id: Session ID for the task |
|
|
|
Returns: |
|
Dictionary with logs and completion status |
|
""" |
|
if session_id not in active_bench_tasks: |
|
raise HTTPException(status_code=404, detail="Benchmark task not found") |
|
|
|
bench_task = active_bench_tasks[session_id] |
|
logs = bench_task.get_logs() |
|
is_completed = bench_task.is_task_completed() |
|
|
|
return { |
|
"logs": logs, |
|
"is_completed": is_completed |
|
} |