Spaces:
Running
Running
from fastapi.responses import JSONResponse | |
from google import genai | |
# from app.models.moderation_data import ModerationData | |
from app.models.moderation_response import ModerationResponse | |
from app.models.schemas import ModerationRequest | |
from app.models.standard_response import StandardResponse | |
from app.models.schemas import ModerationData | |
def to_json_response(data: StandardResponse) -> JSONResponse: | |
return JSONResponse(content=data.model_dump(), status_code=data.status) | |
def moderate_content(request: ModerationRequest) -> ModerationResponse: | |
flagged_for = {} | |
scores = {} | |
corrected_content = None | |
original_content = request.content | |
if "misinfo" in request.checkFor: | |
correct_info = request.identify_misinfo() | |
if correct_info.startswith("False"): | |
flagged_for["misinfo"] = 1 | |
corrected_content = str(correct_info[6:]) | |
request.correct_typos() | |
result = request.classify_moderation() | |
result["misinfo"] = flagged_for.get("misinfo", 0) | |
for category in request.checkFor: | |
scores[category] = result[category] | |
if result[category] > request.threshold: | |
flagged_for[category] = result[category] | |
cleaned_content = request.content | |
if len(flagged_for) > 0: | |
if list(flagged_for.keys()) == ["misinfo"]: | |
# Do nothing | |
pass | |
else: | |
gemini_response = request.cleanse_content() | |
cleaned_content = gemini_response | |
payload = ModerationData( | |
flagged_for=flagged_for, | |
cleaned_content=cleaned_content, | |
corrected_content=corrected_content, | |
original_content=original_content, | |
scores=scores, | |
) | |
response = ModerationResponse( | |
error=False, title="Cleaned", status=200, payload=payload | |
) | |
return response | |