Pujan-Dev commited on
Commit
ccc7fea
·
1 Parent(s): 02706b2

feat: added img_classifier

Browse files
features/image_classifier/routes.py CHANGED
@@ -1,26 +1,25 @@
1
  from slowapi import Limiter
2
  from config import ACCESS_RATE
3
- from fastapi import APIRouter, File, Request, Depends, HTTPException, UploadFile
4
  from fastapi.security import HTTPBearer
5
- from slowapi import Limiter
6
  from slowapi.util import get_remote_address
7
  from .controller import Classify_Image_router
 
8
  router = APIRouter()
9
  limiter = Limiter(key_func=get_remote_address)
10
  security = HTTPBearer()
11
 
 
12
  @router.post("/analyse")
13
  @limiter.limit(ACCESS_RATE)
14
  async def analyse(
15
- request: Request,
16
- file: UploadFile = File(...),
17
- token: str = Depends(security)
18
  ):
19
  result = await Classify_Image_router(file) # await the async function
20
  return result
21
 
 
22
  @router.get("/health")
23
  @limiter.limit(ACCESS_RATE)
24
  def health(request: Request):
25
  return {"status": "ok"}
26
-
 
1
  from slowapi import Limiter
2
  from config import ACCESS_RATE
3
+ from fastapi import APIRouter, File, Request, Depends, UploadFile
4
  from fastapi.security import HTTPBearer
 
5
  from slowapi.util import get_remote_address
6
  from .controller import Classify_Image_router
7
+
8
  router = APIRouter()
9
  limiter = Limiter(key_func=get_remote_address)
10
  security = HTTPBearer()
11
 
12
+
13
  @router.post("/analyse")
14
  @limiter.limit(ACCESS_RATE)
15
  async def analyse(
16
+ request: Request, file: UploadFile = File(...), token: str = Depends(security)
 
 
17
  ):
18
  result = await Classify_Image_router(file) # await the async function
19
  return result
20
 
21
+
22
  @router.get("/health")
23
  @limiter.limit(ACCESS_RATE)
24
  def health(request: Request):
25
  return {"status": "ok"}
 
features/nepali_text_classifier/inferencer.py CHANGED
@@ -7,7 +7,9 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
 
8
  def classify_text(text: str):
9
  model, tokenizer = get_model_tokenizer()
10
- inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512)
 
 
11
  inputs = {k: v.to(device) for k, v in inputs.items()}
12
 
13
  with torch.no_grad():
@@ -17,7 +19,7 @@ def classify_text(text: str):
17
  pred = torch.argmax(probs, dim=1).item()
18
  prob_percent = probs[0][pred].item() * 100
19
 
20
- return {"label": "Human" if pred == 0 else "AI", "confidence": round(prob_percent, 2)}
21
-
22
-
23
-
 
7
 
8
  def classify_text(text: str):
9
  model, tokenizer = get_model_tokenizer()
10
+ inputs = tokenizer(
11
+ text, return_tensors="pt", padding=True, truncation=True, max_length=512
12
+ )
13
  inputs = {k: v.to(device) for k, v in inputs.items()}
14
 
15
  with torch.no_grad():
 
19
  pred = torch.argmax(probs, dim=1).item()
20
  prob_percent = probs[0][pred].item() * 100
21
 
22
+ return {
23
+ "label": "Human" if pred == 0 else "AI",
24
+ "confidence": round(prob_percent, 2),
25
+ }