Commit
·
c9ad1bf
1
Parent(s):
848842c
Update app.py
Browse files
app.py
CHANGED
@@ -13,14 +13,15 @@ tokenizer=BertTokenizer.from_pretrained("./")
|
|
13 |
|
14 |
def predict(text=None) -> dict:
|
15 |
model.eval()
|
16 |
-
inputs = tokenizer(text, return_tensors="pt")
|
17 |
input_ids = inputs["input_ids"].to(device)
|
18 |
attention_mask = inputs["attention_mask"].to(device)
|
19 |
model.to(device)
|
20 |
token_logits = model(input_ids, attention_mask=attention_mask).logits
|
|
|
21 |
mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
|
22 |
mask_token_logits = token_logits[0, mask_token_index, :]
|
23 |
-
print(mask_token_logits
|
24 |
top_5_tokens = torch.topk(mask_token_logits, NUM_CLASSES, dim=1).indices[0].tolist()
|
25 |
score = torch.nn.functional.softmax(mask_token_logits)[0]
|
26 |
top_5_score = torch.topk(score, NUM_CLASSES).values.tolist()
|
|
|
13 |
|
14 |
def predict(text=None) -> dict:
|
15 |
model.eval()
|
16 |
+
inputs = tokenizer(str(text), return_tensors="pt")
|
17 |
input_ids = inputs["input_ids"].to(device)
|
18 |
attention_mask = inputs["attention_mask"].to(device)
|
19 |
model.to(device)
|
20 |
token_logits = model(input_ids, attention_mask=attention_mask).logits
|
21 |
+
print(mask_token_index)
|
22 |
mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
|
23 |
mask_token_logits = token_logits[0, mask_token_index, :]
|
24 |
+
print(mask_token_logits)
|
25 |
top_5_tokens = torch.topk(mask_token_logits, NUM_CLASSES, dim=1).indices[0].tolist()
|
26 |
score = torch.nn.functional.softmax(mask_token_logits)[0]
|
27 |
top_5_score = torch.topk(score, NUM_CLASSES).values.tolist()
|