broadfield-dev commited on
Commit
2f41c97
·
verified ·
1 Parent(s): 93a2bbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -15
app.py CHANGED
@@ -58,10 +58,15 @@ Describe the content of the sidebar, including any navigation, filters, or adver
58
  def load_model(model_name):
59
  """Loads the specified model and processor from Hugging Face."""
60
  model_id = MODEL_OPTIONS[model_name]
61
- yield f"Status: Loading {model_name} model ({model_id})... Please wait.", gr.update(interactive=False)
 
 
 
 
 
 
62
 
63
  try:
64
- # Specific loading instructions for the FP8 model
65
  if "FP8" in model_id:
66
  model = Qwen3VLForConditionalGeneration.from_pretrained(
67
  model_id,
@@ -69,23 +74,22 @@ def load_model(model_name):
69
  device_map="auto",
70
  trust_remote_code=True
71
  )
72
- else: # Standard loading for other models
73
  model = Qwen3VLForConditionalGeneration.from_pretrained(
74
  model_id,
75
  device_map="auto",
76
  trust_remote_code=True
77
  )
78
-
79
  processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
80
 
81
- yield f"Status: {model_name} model loaded successfully.", gr.update(interactive=True)
 
 
82
 
83
  except Exception as e:
84
- yield f"Status: Error loading model: {e}", gr.update(interactive=True)
85
- model, processor = None, None
86
-
87
- return model, processor
88
-
89
 
90
  # --- Playwright Screenshot Function ---
91
  def take_screenshot(url, max_dimension=1024, full_page_capture=True):
@@ -122,7 +126,7 @@ def take_screenshot(url, max_dimension=1024, full_page_capture=True):
122
  # --- Inference Function ---
123
  def process_and_generate(model, processor, image_input, text_prompt, processing_size=512):
124
  if model is None or processor is None:
125
- return "Error: Model is not loaded. Please select a model and click 'Load Model'."
126
  if image_input is None or not text_prompt.strip():
127
  return "Please provide both an image and a text prompt."
128
 
@@ -142,7 +146,6 @@ def process_and_generate(model, processor, image_input, text_prompt, processing_
142
 
143
  # --- Gradio Interface ---
144
  with gr.Blocks() as demo:
145
- # State components to hold the loaded model and processor
146
  model_state = gr.State(None)
147
  processor_state = gr.State(None)
148
 
@@ -172,10 +175,9 @@ with gr.Blocks() as demo:
172
 
173
  output_text = gr.Textbox(label="Model Output", lines=20, interactive=False, placeholder="Generated markdown will appear here...")
174
 
175
- # --- UI Event Handlers ---
176
  def update_image(url, max_dimension, full_page_capture):
177
  path = take_screenshot(url, max_dimension, full_page_capture)
178
- if "Error" in path: raise gr.Error(path)
179
  return path
180
 
181
  def generate_markdown_with_loading(model, processor, image, user_prompt, processing_size, use_template):
@@ -211,4 +213,4 @@ if __name__ == "__main__":
211
  print("Playwright installation complete.")
212
  except Exception as e:
213
  print(f"Could not install playwright dependencies: {e}")
214
- demo.launch()
 
58
  def load_model(model_name):
59
  """Loads the specified model and processor from Hugging Face."""
60
  model_id = MODEL_OPTIONS[model_name]
61
+
62
+ # First yield: Update status to loading, disable button, clear previous model state.
63
+ # MUST return a value for all 4 outputs.
64
+ yield f"Status: Loading {model_name} model ({model_id})... Please wait.", gr.update(interactive=False), None, None
65
+
66
+ model = None
67
+ processor = None
68
 
69
  try:
 
70
  if "FP8" in model_id:
71
  model = Qwen3VLForConditionalGeneration.from_pretrained(
72
  model_id,
 
74
  device_map="auto",
75
  trust_remote_code=True
76
  )
77
+ else:
78
  model = Qwen3VLForConditionalGeneration.from_pretrained(
79
  model_id,
80
  device_map="auto",
81
  trust_remote_code=True
82
  )
 
83
  processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
84
 
85
+ # Final yield on success: Update status, enable button, and return the loaded model/processor.
86
+ # MUST return a value for all 4 outputs.
87
+ yield f"Status: {model_name} model loaded successfully.", gr.update(interactive=True), model, processor
88
 
89
  except Exception as e:
90
+ # Final yield on error: Update status, enable button, return None for model/processor.
91
+ # MUST return a value for all 4 outputs.
92
+ yield f"Status: Error loading model: {e}", gr.update(interactive=True), None, None
 
 
93
 
94
  # --- Playwright Screenshot Function ---
95
  def take_screenshot(url, max_dimension=1024, full_page_capture=True):
 
126
  # --- Inference Function ---
127
  def process_and_generate(model, processor, image_input, text_prompt, processing_size=512):
128
  if model is None or processor is None:
129
+ return "Error: Model is not loaded. Please select a model and click 'Load/Switch Model'."
130
  if image_input is None or not text_prompt.strip():
131
  return "Please provide both an image and a text prompt."
132
 
 
146
 
147
  # --- Gradio Interface ---
148
  with gr.Blocks() as demo:
 
149
  model_state = gr.State(None)
150
  processor_state = gr.State(None)
151
 
 
175
 
176
  output_text = gr.Textbox(label="Model Output", lines=20, interactive=False, placeholder="Generated markdown will appear here...")
177
 
 
178
  def update_image(url, max_dimension, full_page_capture):
179
  path = take_screenshot(url, max_dimension, full_page_capture)
180
+ if "Error" in str(path): raise gr.Error(path)
181
  return path
182
 
183
  def generate_markdown_with_loading(model, processor, image, user_prompt, processing_size, use_template):
 
213
  print("Playwright installation complete.")
214
  except Exception as e:
215
  print(f"Could not install playwright dependencies: {e}")
216
+ demo.launch(share=True)