Spaces:
Running
on
Zero
Running
on
Zero
update app
#5
by
prithivMLmods
- opened
app.py
CHANGED
|
@@ -99,6 +99,7 @@ MODEL_ID_M = "prithivMLmods/docscopeOCR-7B-050425-exp"
|
|
| 99 |
processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
|
| 100 |
model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 101 |
MODEL_ID_M,
|
|
|
|
| 102 |
trust_remote_code=True,
|
| 103 |
torch_dtype=torch.float16
|
| 104 |
).to(device).eval()
|
|
@@ -108,6 +109,7 @@ MODEL_ID_X = "prithivMLmods/coreOCR-7B-050325-preview"
|
|
| 108 |
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
|
| 109 |
model_x = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 110 |
MODEL_ID_X,
|
|
|
|
| 111 |
trust_remote_code=True,
|
| 112 |
torch_dtype=torch.float16
|
| 113 |
).to(device).eval()
|
|
@@ -125,6 +127,7 @@ processor_g = AutoProcessor.from_pretrained(
|
|
| 125 |
|
| 126 |
model_g = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 127 |
MODEL_ID_G,
|
|
|
|
| 128 |
trust_remote_code=True,
|
| 129 |
subfolder=SUBFOLDER,
|
| 130 |
torch_dtype=torch.float16
|
|
@@ -136,6 +139,7 @@ MODEL_ID_O = "prithivMLmods/Camel-Doc-OCR-080125"
|
|
| 136 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
| 137 |
model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 138 |
MODEL_ID_O,
|
|
|
|
| 139 |
trust_remote_code=True,
|
| 140 |
torch_dtype=torch.float16
|
| 141 |
).to(device).eval()
|
|
@@ -214,7 +218,7 @@ css = """
|
|
| 214 |
"""
|
| 215 |
|
| 216 |
# Create the Gradio Interface
|
| 217 |
-
with gr.Blocks(
|
| 218 |
gr.Markdown("# **core [OCR](https://huggingface.co/collections/prithivMLmods/multimodal-implementations)**", elem_id="main-title")
|
| 219 |
with gr.Row():
|
| 220 |
with gr.Column(scale=2):
|
|
@@ -249,4 +253,4 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
|
|
| 249 |
)
|
| 250 |
|
| 251 |
if __name__ == "__main__":
|
| 252 |
-
demo.queue(max_size=50).launch(mcp_server=True, ssr_mode=False, show_error=True)
|
|
|
|
| 99 |
processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
|
| 100 |
model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 101 |
MODEL_ID_M,
|
| 102 |
+
attn_implementation="flash_attention_2",
|
| 103 |
trust_remote_code=True,
|
| 104 |
torch_dtype=torch.float16
|
| 105 |
).to(device).eval()
|
|
|
|
| 109 |
processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
|
| 110 |
model_x = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 111 |
MODEL_ID_X,
|
| 112 |
+
attn_implementation="flash_attention_2",
|
| 113 |
trust_remote_code=True,
|
| 114 |
torch_dtype=torch.float16
|
| 115 |
).to(device).eval()
|
|
|
|
| 127 |
|
| 128 |
model_g = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 129 |
MODEL_ID_G,
|
| 130 |
+
attn_implementation="flash_attention_2",
|
| 131 |
trust_remote_code=True,
|
| 132 |
subfolder=SUBFOLDER,
|
| 133 |
torch_dtype=torch.float16
|
|
|
|
| 139 |
processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True)
|
| 140 |
model_o = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 141 |
MODEL_ID_O,
|
| 142 |
+
attn_implementation="flash_attention_2",
|
| 143 |
trust_remote_code=True,
|
| 144 |
torch_dtype=torch.float16
|
| 145 |
).to(device).eval()
|
|
|
|
| 218 |
"""
|
| 219 |
|
| 220 |
# Create the Gradio Interface
|
| 221 |
+
with gr.Blocks() as demo:
|
| 222 |
gr.Markdown("# **core [OCR](https://huggingface.co/collections/prithivMLmods/multimodal-implementations)**", elem_id="main-title")
|
| 223 |
with gr.Row():
|
| 224 |
with gr.Column(scale=2):
|
|
|
|
| 253 |
)
|
| 254 |
|
| 255 |
if __name__ == "__main__":
|
| 256 |
+
demo.queue(max_size=50).launch(css=css, theme=steel_blue_theme, mcp_server=True, ssr_mode=False, show_error=True)
|