kwanpon commited on
Commit
17c19fa
·
verified ·
1 Parent(s): 7bb85d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -22
app.py CHANGED
@@ -1,5 +1,5 @@
1
- # import gradio as gr
2
- # from transformers import pipeline
3
  import pandas as pd
4
  from datasets import Dataset
5
  from transformers import (
@@ -8,8 +8,7 @@ from transformers import (
8
  TrainingArguments,
9
  Trainer
10
  )
11
- import torch
12
- import gradio as gr
13
 
14
  # load dataset
15
  df = pd.read_csv("dataset.csv")
@@ -34,7 +33,6 @@ training_args = TrainingArguments(
34
  logging_steps=10,
35
  save_strategy="no",
36
  learning_rate=2e-5,
37
- # evaluation_strategy="no",
38
  )
39
 
40
  # train
@@ -44,33 +42,20 @@ trainer = Trainer(
44
  train_dataset=tokenized_dataset,
45
  tokenizer=tokenizer,
46
  )
47
-
48
  trainer.train()
49
 
50
-
51
- # inference function for Gradio
52
  def classify(text):
53
  inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
54
  with torch.no_grad():
55
  outputs = model(**inputs)
56
  probs = torch.softmax(outputs.logits, dim=1).numpy()[0]
57
  return {
58
- "ไม่ใช่การจ้างงานรถขนส่ง": float(probs[0]),
59
- "เป็นการจ้างงานรถขนส่ง": float(probs[1]),
60
  }
61
 
62
- # classifier = pipeline(
63
- # "zero-shot-classification",
64
- # model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli"
65
- # )
66
-
67
- # def classify(text, labels):
68
- # labels = [label.strip() for label in labels.split(",")]
69
- # result = classifier(text, candidate_labels=labels)
70
- # return {label: round(score, 4) for label, score in zip(result["labels"], result["scores"])}
71
-
72
-
73
- # Gradio interface
74
  demo = gr.Interface(
75
  fn=classify,
76
  inputs=gr.Textbox(lines=3, label="ข้อความ"),
 
1
+ import torch
2
+ import gradio as gr
3
  import pandas as pd
4
  from datasets import Dataset
5
  from transformers import (
 
8
  TrainingArguments,
9
  Trainer
10
  )
11
+
 
12
 
13
  # load dataset
14
  df = pd.read_csv("dataset.csv")
 
33
  logging_steps=10,
34
  save_strategy="no",
35
  learning_rate=2e-5,
 
36
  )
37
 
38
  # train
 
42
  train_dataset=tokenized_dataset,
43
  tokenizer=tokenizer,
44
  )
 
45
  trainer.train()
46
 
47
+ # inference function for gradio
 
48
  def classify(text):
49
  inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
50
  with torch.no_grad():
51
  outputs = model(**inputs)
52
  probs = torch.softmax(outputs.logits, dim=1).numpy()[0]
53
  return {
54
+ "No": float(probs[0]),
55
+ "Yes": float(probs[1]),
56
  }
57
 
58
+ # gradio interface
 
 
 
 
 
 
 
 
 
 
 
59
  demo = gr.Interface(
60
  fn=classify,
61
  inputs=gr.Textbox(lines=3, label="ข้อความ"),