Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -66,8 +66,8 @@ model_g = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
66 |
).to(device).eval()
|
67 |
#-----------------------------subfolder-----------------------------#
|
68 |
|
69 |
-
# Load Perseus-Doc-vl-
|
70 |
-
MODEL_ID_O = "prithivMLmods/Perseus-Doc-vl-
|
71 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
72 |
model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
73 |
MODEL_ID_O,
|
@@ -116,7 +116,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
116 |
elif model_name == "MonkeyOCR-Recognition":
|
117 |
processor = processor_g
|
118 |
model = model_g
|
119 |
-
elif model_name == "Perseus-Doc-vl-
|
120 |
processor = processor_o
|
121 |
model = model_o
|
122 |
else:
|
@@ -174,7 +174,7 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
174 |
elif model_name == "MonkeyOCR-Recognition":
|
175 |
processor = processor_g
|
176 |
model = model_g
|
177 |
-
elif model_name == "Perseus-Doc-vl-
|
178 |
processor = processor_o
|
179 |
model = model_o
|
180 |
else:
|
@@ -290,14 +290,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
290 |
markdown_output = gr.Markdown(label="(Result.Md)")
|
291 |
|
292 |
model_choice = gr.Radio(
|
293 |
-
choices=["docscopeOCR-7B-050425-exp", "MonkeyOCR-Recognition", "coreOCR-7B-050325-preview", "Perseus-Doc-vl-
|
294 |
label="Select Model",
|
295 |
value="docscopeOCR-7B-050425-exp"
|
296 |
)
|
297 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/core-OCR/discussions)")
|
298 |
gr.Markdown("> [docscopeOCR-7B-050425-exp](https://huggingface.co/prithivMLmods/docscopeOCR-7B-050425-exp): The docscopeOCR-7B-050425-exp model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
299 |
gr.Markdown("> [MonkeyOCR](https://huggingface.co/echo840/MonkeyOCR): MonkeyOCR adopts a Structure-Recognition-Relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
|
300 |
-
gr.Markdown("> [Perseus-Doc-vl-
|
301 |
gr.Markdown("> [coreOCR-7B-050325-preview](https://huggingface.co/prithivMLmods/coreOCR-7B-050325-preview): The coreOCR-7B-050325-preview model is a fine-tuned version of Qwen2-VL-7B, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
302 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
303 |
|
|
|
66 |
).to(device).eval()
|
67 |
#-----------------------------subfolder-----------------------------#
|
68 |
|
69 |
+
# Load Perseus-Doc-vl-0712
|
70 |
+
MODEL_ID_O = "prithivMLmods/Perseus-Doc-vl-0712"
|
71 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
72 |
model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
73 |
MODEL_ID_O,
|
|
|
116 |
elif model_name == "MonkeyOCR-Recognition":
|
117 |
processor = processor_g
|
118 |
model = model_g
|
119 |
+
elif model_name == "Perseus-Doc-vl-0712":
|
120 |
processor = processor_o
|
121 |
model = model_o
|
122 |
else:
|
|
|
174 |
elif model_name == "MonkeyOCR-Recognition":
|
175 |
processor = processor_g
|
176 |
model = model_g
|
177 |
+
elif model_name == "Perseus-Doc-vl-0712":
|
178 |
processor = processor_o
|
179 |
model = model_o
|
180 |
else:
|
|
|
290 |
markdown_output = gr.Markdown(label="(Result.Md)")
|
291 |
|
292 |
model_choice = gr.Radio(
|
293 |
+
choices=["docscopeOCR-7B-050425-exp", "MonkeyOCR-Recognition", "coreOCR-7B-050325-preview", "Perseus-Doc-vl-0712"],
|
294 |
label="Select Model",
|
295 |
value="docscopeOCR-7B-050425-exp"
|
296 |
)
|
297 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/core-OCR/discussions)")
|
298 |
gr.Markdown("> [docscopeOCR-7B-050425-exp](https://huggingface.co/prithivMLmods/docscopeOCR-7B-050425-exp): The docscopeOCR-7B-050425-exp model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
299 |
gr.Markdown("> [MonkeyOCR](https://huggingface.co/echo840/MonkeyOCR): MonkeyOCR adopts a Structure-Recognition-Relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
|
300 |
+
gr.Markdown("> [Perseus-Doc-vl-0712](https://huggingface.co/prithivMLmods/Perseus-Doc-vl-0712): The Perseus-Doc-vl-0712 model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Document Retrieval, Content Extraction, and Analysis Recognition. Built on top of the Qwen2.5-VL architecture, this model enhances document comprehension capabilities")
|
301 |
gr.Markdown("> [coreOCR-7B-050325-preview](https://huggingface.co/prithivMLmods/coreOCR-7B-050325-preview): The coreOCR-7B-050325-preview model is a fine-tuned version of Qwen2-VL-7B, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
302 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
303 |
|