Spaces:
Running
Running
''' | |
from fastapi import FastAPI, Query | |
from pydantic import BaseModel | |
import cloudscraper | |
from bs4 import BeautifulSoup | |
from transformers import pipeline | |
import torch | |
import re | |
import os | |
#os.environ["HF_HOME"] = "/home/user/huggingface" | |
#os.environ["TRANSFORMERS_CACHE"] = "/home/user/huggingface" | |
app = FastAPI() | |
class ThreadResponse(BaseModel): | |
question: str | |
replies: list[str] | |
def clean_text(text: str) -> str: | |
text = text.strip() | |
text = re.sub(r"\b\d+\s*likes?,?\s*\d*\s*replies?$", "", text, flags=re.IGNORECASE).strip() | |
return text | |
@app.get("/scrape", response_model=ThreadResponse) | |
def scrape(url: str = Query(...)): | |
scraper = cloudscraper.create_scraper() | |
response = scraper.get(url) | |
if response.status_code == 200: | |
soup = BeautifulSoup(response.content, 'html.parser') | |
comment_containers = soup.find_all('div', class_='post__content') | |
if comment_containers: | |
question = clean_text(comment_containers[0].get_text(strip=True, separator="\n")) | |
replies = [clean_text(comment.get_text(strip=True, separator="\n")) for comment in comment_containers[1:]] | |
return ThreadResponse(question=question, replies=replies) | |
return ThreadResponse(question="", replies=[]) | |
MODEL_NAME = "microsoft/phi-2" | |
# Load the text-generation pipeline once at startup | |
text_generator = pipeline( | |
"text-generation", | |
model=MODEL_NAME, | |
trust_remote_code=True, | |
device=0 if torch.cuda.is_available() else -1, # GPU if available, else CPU | |
) | |
class PromptRequest(BaseModel): | |
prompt: str | |
@app.post("/generate") | |
async def generate_text(request: PromptRequest): | |
# The model expects a string prompt, so pass request.prompt directly | |
outputs = text_generator( | |
request.prompt, | |
max_new_tokens=512, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True, | |
num_return_sequences=1, | |
) | |
generated_text = outputs[0]['generated_text'] | |
# Optional: parse reasoning and content if your model uses special tags like </think> | |
if "</think>" in generated_text: | |
reasoning_content = generated_text.split("</think>")[0].strip() | |
content = generated_text.split("</think>")[1].strip() | |
else: | |
reasoning_content = "" | |
content = generated_text.strip() | |
return { | |
"reasoning_content": reasoning_content, | |
"generated_text": content | |
} | |
''' | |
from fastapi import FastAPI, Query | |
from pydantic import BaseModel | |
import cloudscraper | |
from bs4 import BeautifulSoup | |
from transformers import T5Tokenizer, T5ForConditionalGeneration | |
import torch | |
import re | |
app = FastAPI() | |
# --- Data Models --- | |
class ThreadResponse(BaseModel): | |
question: str | |
replies: list[str] | |
class PromptRequest(BaseModel): | |
prompt: str | |
class GenerateResponse(BaseModel): | |
reasoning_content: str | |
generated_text: str | |
# --- Utility Functions --- | |
def clean_text(text: str) -> str: | |
text = text.strip() | |
text = re.sub(r"\b\d+\s*likes?,?\s*\d*\s*replies?$", "", text, flags=re.IGNORECASE).strip() | |
return text | |
# --- Scraping Endpoint --- | |
def scrape(url: str = Query(...)): | |
scraper = cloudscraper.create_scraper() | |
response = scraper.get(url) | |
if response.status_code == 200: | |
soup = BeautifulSoup(response.content, 'html.parser') | |
comment_containers = soup.find_all('div', class_='post__content') | |
if comment_containers: | |
question = clean_text(comment_containers[0].get_text(strip=True, separator="\n")) | |
replies = [clean_text(comment.get_text(strip=True, separator="\n")) for comment in comment_containers[1:]] | |
return ThreadResponse(question=question, replies=replies) | |
return ThreadResponse(question="", replies=[]) | |
# --- Load T5-Small Model and Tokenizer --- | |
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-large") | |
model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-large") | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = model.to(device) | |
# --- Core Generation Function Using T5 Prompting --- | |
def generate_text_with_t5(prompt: str) -> (str, str): | |
""" | |
Accepts a prompt string that includes the T5 task prefix (e.g. "summarize: ..."), | |
generates output text, and optionally extracts reasoning if present. | |
Returns a tuple (reasoning_content, generated_text). | |
""" | |
# Tokenize input prompt with truncation to max 512 tokens | |
inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=512, truncation=True).to(device) | |
# Generate output tokens with beam search for quality | |
outputs = model.generate( | |
inputs, | |
max_length=512, | |
num_beams=4, | |
repetition_penalty=2.5, | |
length_penalty=1.0, | |
early_stopping=True, | |
) | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Optional: parse reasoning if your prompt/model uses a special separator like </think> | |
if "</think>" in generated_text: | |
reasoning_content, content = generated_text.split("</think>", 1) | |
reasoning_content = reasoning_content.strip() | |
content = content.strip() | |
else: | |
reasoning_content = "" | |
content = generated_text.strip() | |
return reasoning_content, content | |
# --- /generate Endpoint Using T5 Prompting --- | |
async def generate(request: PromptRequest): | |
""" | |
Accepts a prompt string from frontend, which should include the T5 task prefix, | |
e.g. "summarize: {text to summarize}" or "translate English to German: {text}". | |
Returns generated text and optional reasoning content. | |
""" | |
reasoning_content, generated_text = generate_text_with_t5(request.prompt) | |
return GenerateResponse(reasoning_content=reasoning_content, generated_text=generated_text) | |