Spaces:
Sleeping
Sleeping
File size: 1,137 Bytes
d86fc01 7d1e868 d86fc01 7d1e868 d86fc01 a4c01fe 06534f9 7d1e868 1c18d40 b87d488 52808f5 360a4d3 b87d488 06534f9 b87d488 06534f9 b87d488 06534f9 a4c01fe 06534f9 7d1e868 b87d488 7d1e868 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=False,
allow_methods=["*"],
allow_headers=["*"]
)
model_name = "togethercomputer/RedPajama-INCITE-7B-Base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
class PromptRequest(BaseModel):
prompt: str
@app.post("/api/generate-story")
async def generate_story(req: PromptRequest):
prompt = req.prompt.strip()
if not prompt:
raise HTTPException(status_code=400, detail="Prompt must not be empty")
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
outputs = model.generate(
**inputs,
max_new_tokens=200,
do_sample=True,
top_p=0.9,
temperature=0.85,
repetition_penalty=1.2
)
story = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"story": story}
|