Spaces:
Running
Running
File size: 775 Bytes
80c3a84 6c0215b d5939d1 d15392d ad67d60 80c3a84 e9f3a9a 80c3a84 a0b62ab 80c3a84 ad67d60 80c3a84 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama
app = FastAPI()
qwen3_gguf_llm = Llama.from_pretrained(
repo_id="unsloth/Qwen3-0.6B-GGUF",
filename="Qwen3-0.6B-BF16.gguf",
)
class PromptRequest(BaseModel):
prompt: str
class GenerateResponse(BaseModel):
reasoning_content: str = ""
generated_text: str
@app.post("/generate/qwen3-0.6b-gguf", response_model=GenerateResponse)
async def generate_qwen3_gguf_endpoint(request: PromptRequest):
messages = [{"role": "user", "content": request.prompt}]
response = qwen3_gguf_llm.create_chat_completion(messages=messages, max_tokens=256)
generated_text = response['choices'][0]['message']['content']
return GenerateResponse(generated_text=generated_text)
|