Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,89 +1,93 @@
|
|
1 |
import os
|
2 |
from datasets import load_dataset
|
3 |
-
from transformers import
|
4 |
-
TrOCRProcessor,
|
5 |
-
VisionEncoderDecoderModel,
|
6 |
-
Seq2SeqTrainer,
|
7 |
-
Seq2SeqTrainingArguments,
|
8 |
-
default_data_collator,
|
9 |
-
)
|
10 |
|
11 |
# Check if model already exists
|
12 |
if os.path.exists("trained_model"):
|
13 |
print("β
Model already exists. Skipping training.")
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
from datasets import load_dataset
|
3 |
+
from transformers import TrOCRProcessor, VisionEncoderDecoderModel, Seq2SeqTrainer, Seq2SeqTrainingArguments, default_data_collator
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
# Check if model already exists
|
6 |
if os.path.exists("trained_model"):
|
7 |
print("β
Model already exists. Skipping training.")
|
8 |
+
exit()
|
9 |
+
|
10 |
+
print("π Starting training...")
|
11 |
+
|
12 |
+
# Load only 100 samples for faster CPU training
|
13 |
+
ds = load_dataset("Azu/Handwritten-Mathematical-Expression-Convert-LaTeX", split="train[:100]")
|
14 |
+
|
15 |
+
# DEBUG: Inspect a few labels
|
16 |
+
print("\nπ Sample labels from dataset:")
|
17 |
+
for i in range(5):
|
18 |
+
print(f"{i}: {ds[i]['label']} (type: {type(ds[i]['label'])})")
|
19 |
+
|
20 |
+
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
|
21 |
+
|
22 |
+
# Safely extract label string from possible dict or str
|
23 |
+
def safe_get_label(example):
|
24 |
+
label = example.get("label")
|
25 |
+
if isinstance(label, dict) and "latex" in label:
|
26 |
+
return label["latex"]
|
27 |
+
elif isinstance(label, str):
|
28 |
+
return label
|
29 |
+
else:
|
30 |
+
return None
|
31 |
+
|
32 |
+
def preprocess(example):
|
33 |
+
label_str = safe_get_label(example)
|
34 |
+
if not isinstance(label_str, str) or label_str.strip() == "":
|
35 |
+
return {} # Skip if label is invalid
|
36 |
+
|
37 |
+
# Convert image to RGB
|
38 |
+
img = example["image"].convert("RGB")
|
39 |
+
inputs = processor(images=img, return_tensors="pt")
|
40 |
+
|
41 |
+
# Tokenize label
|
42 |
+
labels = processor.tokenizer(
|
43 |
+
label_str,
|
44 |
+
truncation=True,
|
45 |
+
padding="max_length",
|
46 |
+
max_length=128
|
47 |
+
).input_ids
|
48 |
+
|
49 |
+
return {
|
50 |
+
"pixel_values": inputs.pixel_values[0],
|
51 |
+
"labels": labels
|
52 |
+
}
|
53 |
+
|
54 |
+
# Preprocess and filter
|
55 |
+
ds = ds.map(preprocess, remove_columns=["image", "label"])
|
56 |
+
ds = ds.filter(lambda ex: "labels" in ex and ex["labels"] is not None)
|
57 |
+
|
58 |
+
# Check number of remaining examples
|
59 |
+
print(f"β
Total usable training samples: {len(ds)}")
|
60 |
+
if len(ds) == 0:
|
61 |
+
raise RuntimeError("β No usable training samples after preprocessing.")
|
62 |
+
|
63 |
+
# Model setup
|
64 |
+
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
|
65 |
+
model.config.decoder_start_token_id = processor.tokenizer.cls_token_id
|
66 |
+
model.config.pad_token_id = processor.tokenizer.pad_token_id
|
67 |
+
|
68 |
+
training_args = Seq2SeqTrainingArguments(
|
69 |
+
output_dir="trained_model",
|
70 |
+
per_device_train_batch_size=2,
|
71 |
+
num_train_epochs=1,
|
72 |
+
learning_rate=5e-5,
|
73 |
+
logging_steps=10,
|
74 |
+
save_steps=500,
|
75 |
+
fp16=False,
|
76 |
+
push_to_hub=False,
|
77 |
+
)
|
78 |
+
|
79 |
+
trainer = Seq2SeqTrainer(
|
80 |
+
model=model,
|
81 |
+
args=training_args,
|
82 |
+
train_dataset=ds,
|
83 |
+
tokenizer=processor.tokenizer,
|
84 |
+
data_collator=default_data_collator,
|
85 |
+
)
|
86 |
+
|
87 |
+
trainer.train()
|
88 |
+
print("β
Training completed")
|
89 |
+
|
90 |
+
# Save model
|
91 |
+
model.save_pretrained("trained_model")
|
92 |
+
processor.save_pretrained("trained_model")
|
93 |
+
print("β
Model saved to trained_model/")
|