Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,6 @@ import cv2
|
|
| 15 |
|
| 16 |
from transformers import (
|
| 17 |
Qwen2VLForConditionalGeneration,
|
| 18 |
-
Glm4vForConditionalGeneration,
|
| 19 |
Qwen2_5_VLForConditionalGeneration,
|
| 20 |
AutoModelForImageTextToText,
|
| 21 |
AutoProcessor,
|
|
@@ -67,10 +66,10 @@ model_g = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
| 67 |
).to(device).eval()
|
| 68 |
#-----------------------------subfolder-----------------------------#
|
| 69 |
|
| 70 |
-
# Load
|
| 71 |
-
MODEL_ID_O = "
|
| 72 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
| 73 |
-
model_o =
|
| 74 |
MODEL_ID_O,
|
| 75 |
trust_remote_code=True,
|
| 76 |
torch_dtype=torch.float16
|
|
@@ -117,7 +116,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
| 117 |
elif model_name == "MonkeyOCR-Recognition":
|
| 118 |
processor = processor_g
|
| 119 |
model = model_g
|
| 120 |
-
elif model_name == "
|
| 121 |
processor = processor_o
|
| 122 |
model = model_o
|
| 123 |
else:
|
|
@@ -175,7 +174,7 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
| 175 |
elif model_name == "MonkeyOCR-Recognition":
|
| 176 |
processor = processor_g
|
| 177 |
model = model_g
|
| 178 |
-
elif model_name == "
|
| 179 |
processor = processor_o
|
| 180 |
model = model_o
|
| 181 |
else:
|
|
@@ -291,14 +290,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 291 |
markdown_output = gr.Markdown(label="(Result.Md)")
|
| 292 |
|
| 293 |
model_choice = gr.Radio(
|
| 294 |
-
choices=["docscopeOCR-7B-050425-exp", "MonkeyOCR-Recognition", "coreOCR-7B-050325-preview", "
|
| 295 |
label="Select Model",
|
| 296 |
value="docscopeOCR-7B-050425-exp"
|
| 297 |
)
|
| 298 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/core-OCR/discussions)")
|
| 299 |
-
gr.Markdown("> [GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking): GLM-4.1V-9B-Thinking, designed to explore the upper limits of reasoning in vision-language models. By introducing a thinking paradigm and leveraging reinforcement learning, the model significantly enhances its capabilities. It achieves state-of-the-art performance among 10B-parameter VLMs.")
|
| 300 |
gr.Markdown("> [docscopeOCR-7B-050425-exp](https://huggingface.co/prithivMLmods/docscopeOCR-7B-050425-exp): The docscopeOCR-7B-050425-exp model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
| 301 |
gr.Markdown("> [MonkeyOCR](https://huggingface.co/echo840/MonkeyOCR): MonkeyOCR adopts a Structure-Recognition-Relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
|
|
|
|
| 302 |
gr.Markdown("> [coreOCR-7B-050325-preview](https://huggingface.co/prithivMLmods/coreOCR-7B-050325-preview): The coreOCR-7B-050325-preview model is a fine-tuned version of Qwen2-VL-7B, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
| 303 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
| 304 |
|
|
|
|
| 15 |
|
| 16 |
from transformers import (
|
| 17 |
Qwen2VLForConditionalGeneration,
|
|
|
|
| 18 |
Qwen2_5_VLForConditionalGeneration,
|
| 19 |
AutoModelForImageTextToText,
|
| 20 |
AutoProcessor,
|
|
|
|
| 66 |
).to(device).eval()
|
| 67 |
#-----------------------------subfolder-----------------------------#
|
| 68 |
|
| 69 |
+
# Load Pollux-OCR-Caption-2B
|
| 70 |
+
MODEL_ID_O = "prithivMLmods/Pollux-OCR-Caption-2B"
|
| 71 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
| 72 |
+
model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 73 |
MODEL_ID_O,
|
| 74 |
trust_remote_code=True,
|
| 75 |
torch_dtype=torch.float16
|
|
|
|
| 116 |
elif model_name == "MonkeyOCR-Recognition":
|
| 117 |
processor = processor_g
|
| 118 |
model = model_g
|
| 119 |
+
elif model_name == "Pollux-OCR-Caption-2B":
|
| 120 |
processor = processor_o
|
| 121 |
model = model_o
|
| 122 |
else:
|
|
|
|
| 174 |
elif model_name == "MonkeyOCR-Recognition":
|
| 175 |
processor = processor_g
|
| 176 |
model = model_g
|
| 177 |
+
elif model_name == "Pollux-OCR-Caption-2B":
|
| 178 |
processor = processor_o
|
| 179 |
model = model_o
|
| 180 |
else:
|
|
|
|
| 290 |
markdown_output = gr.Markdown(label="(Result.Md)")
|
| 291 |
|
| 292 |
model_choice = gr.Radio(
|
| 293 |
+
choices=["docscopeOCR-7B-050425-exp", "MonkeyOCR-Recognition", "coreOCR-7B-050325-preview", "Pollux-OCR-Caption-2B"],
|
| 294 |
label="Select Model",
|
| 295 |
value="docscopeOCR-7B-050425-exp"
|
| 296 |
)
|
| 297 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/core-OCR/discussions)")
|
|
|
|
| 298 |
gr.Markdown("> [docscopeOCR-7B-050425-exp](https://huggingface.co/prithivMLmods/docscopeOCR-7B-050425-exp): The docscopeOCR-7B-050425-exp model is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
| 299 |
gr.Markdown("> [MonkeyOCR](https://huggingface.co/echo840/MonkeyOCR): MonkeyOCR adopts a Structure-Recognition-Relation (SRR) triplet paradigm, which simplifies the multi-tool pipeline of modular approaches while avoiding the inefficiency of using large multimodal models for full-page document processing.")
|
| 300 |
+
gr.Markdown("> [Pollux-OCR-Caption-2B](https://huggingface.co/prithivMLmods/Pollux-OCR-Caption-2B): The Pollux-OCR-Caption-2B model is a fine-tuned version of Qwen2.5-VL architecture, optimized for Document Retrieval, Content Extraction, and Analysis Recognition. Built on top of the Qwen2.5-VL architecture.")
|
| 301 |
gr.Markdown("> [coreOCR-7B-050325-preview](https://huggingface.co/prithivMLmods/coreOCR-7B-050325-preview): The coreOCR-7B-050325-preview model is a fine-tuned version of Qwen2-VL-7B, optimized for Document-Level Optical Character Recognition (OCR), long-context vision-language understanding, and accurate image-to-text conversion with mathematical LaTeX formatting.")
|
| 302 |
gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
|
| 303 |
|