MantshaSaleem commited on
Commit
f3d19ef
·
verified ·
1 Parent(s): bcb48d2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from transformers import pipeline
4
+ from typing import List
5
+
6
+ app = FastAPI(
7
+ title = "Hate Speech Detection API",
8
+ description = "A simple API to classify text using the unitary/toxic-bert model.",
9
+ version = "1.0.0"
10
+ )
11
+
12
+ classifier= pipeline("text-classification" , model="unitary/toxic-bert", tokenizer="unitary/toxic-bert", device=-1)
13
+
14
+ #writing pydantic models
15
+ class TextInput(BaseModel):
16
+ text: str
17
+
18
+
19
+
20
+ @app.get("/")
21
+ def get_root():
22
+ return {"message": "Welcome to the Hate Speech Detection API!"}
23
+
24
+ @app.post("/predict")
25
+ def predict_toxicity(input: TextInput):
26
+ classifier_result = classifier(input.text)
27
+ prediction=list(classifier_result)[0]
28
+ final_prediction = {}
29
+ if prediction['score']>0.5:
30
+ final_prediction['label']='Toxic'
31
+ final_prediction['non-toxic-score']=1-prediction['score']
32
+ final_prediction['toxic-score']=prediction['score']
33
+ else:
34
+ final_prediction['label']='Non-Toxic'
35
+ final_prediction['non-toxic-score']=1- prediction['score']
36
+ final_prediction['toxic-score']=prediction['score']
37
+
38
+ return final_prediction
39
+
40
+
41
+
42
+
43
+
44
+
45
+