Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,26 +1,32 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
from pydantic import BaseModel
|
3 |
-
from llama_cpp import Llama
|
4 |
-
|
5 |
-
app = FastAPI()
|
6 |
-
|
7 |
-
# Load the model
|
8 |
-
llm = Llama.from_pretrained(
|
9 |
-
repo_id="unsloth/phi-4-GGUF",
|
10 |
-
filename="phi-4-Q4_K_M.gguf",
|
11 |
-
)
|
12 |
-
|
13 |
-
# Define request model
|
14 |
-
class ChatRequest(BaseModel):
|
15 |
-
system_prompt: str
|
16 |
-
query: str
|
17 |
-
|
18 |
-
@app.post("/chat-p4q4")
|
19 |
-
async def chat(request: ChatRequest):
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from llama_cpp import Llama
|
4 |
+
|
5 |
+
app = FastAPI()
|
6 |
+
|
7 |
+
# Load the model
|
8 |
+
llm = Llama.from_pretrained(
|
9 |
+
repo_id="unsloth/phi-4-GGUF",
|
10 |
+
filename="phi-4-Q4_K_M.gguf",
|
11 |
+
)
|
12 |
+
|
13 |
+
# Define request model
|
14 |
+
class ChatRequest(BaseModel):
|
15 |
+
system_prompt: str
|
16 |
+
query: str
|
17 |
+
|
18 |
+
@app.post("/chat-p4q4")
|
19 |
+
async def chat(request: ChatRequest):
|
20 |
+
try:
|
21 |
+
response = llm.create_chat_completion(
|
22 |
+
messages=[
|
23 |
+
{"role": "system", "content": request.system_prompt},
|
24 |
+
{"role": "user", "content": request.query},
|
25 |
+
]
|
26 |
+
)
|
27 |
+
return {"response": response}
|
28 |
+
except Exception as e:
|
29 |
+
# Log the error or print it for debugging
|
30 |
+
print("Error during model inference:", e)
|
31 |
+
return {"error": str(e)}
|
32 |
+
|