CorneliusWang commited on
Commit
c99cd73
·
verified ·
1 Parent(s): 3a765bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -132
app.py CHANGED
@@ -7,57 +7,7 @@ import sys
7
  import os
8
  import subprocess
9
  from pathlib import Path
10
-
11
- # Monkey-patch to fix Gradio schema generation bug BEFORE importing gradio
12
- # Prevents TypeError: argument of type 'bool' is not iterable
13
- def _patch_gradio_schema_bug():
14
- """Patch Gradio's buggy schema generation."""
15
- try:
16
- from gradio_client import utils as gradio_client_utils
17
-
18
- # Patch get_type - the main buggy function
19
- if hasattr(gradio_client_utils, 'get_type'):
20
- _original_get_type = gradio_client_utils.get_type
21
-
22
- def _patched_get_type(schema):
23
- """Handle bool schemas that cause the bug."""
24
- if isinstance(schema, bool):
25
- return "bool"
26
- if schema is None:
27
- return "Any"
28
- if not isinstance(schema, dict):
29
- return "Any"
30
- try:
31
- return _original_get_type(schema)
32
- except TypeError as e:
33
- if "is not iterable" in str(e):
34
- return "Any"
35
- raise
36
-
37
- gradio_client_utils.get_type = _patched_get_type
38
-
39
- # Patch the wrapper function too
40
- if hasattr(gradio_client_utils, '_json_schema_to_python_type'):
41
- _original_json_to_type = gradio_client_utils._json_schema_to_python_type
42
-
43
- def _patched_json_to_type(schema, defs=None):
44
- """Catch errors in schema conversion."""
45
- try:
46
- return _original_json_to_type(schema, defs)
47
- except (TypeError, AttributeError) as e:
48
- if "is not iterable" in str(e):
49
- return "Any"
50
- raise
51
-
52
- gradio_client_utils._json_schema_to_python_type = _patched_json_to_type
53
- except (ImportError, AttributeError):
54
- pass
55
-
56
- # Apply patch BEFORE importing gradio
57
- _patch_gradio_schema_bug()
58
-
59
- # Now import gradio (patch will be in effect)
60
- import gradio as gr
61
 
62
  # Add project paths
63
  sys.path.insert(0, str(Path(__file__).parent))
@@ -67,31 +17,19 @@ sys.path.insert(0, str(Path(__file__).parent / "student_agent_dev"))
67
  def run_comparison(iterations: int, seed: int, use_deterministic: bool, device: str):
68
  """
69
  Run strategy comparison with LM Student.
70
-
71
- Args:
72
- iterations: Number of training iterations
73
- seed: Random seed (ignored if deterministic)
74
- use_deterministic: Use fixed seed=42
75
- device: 'cpu' or 'cuda' (GPU)
76
  """
77
 
78
  # Set device environment variable for subprocess
79
- # On Hugging Face Spaces with GPU, try to use CUDA
80
  if device == "cuda":
81
  try:
82
  import torch
83
- # Check if CUDA is available
84
  if torch.cuda.is_available():
85
  try:
86
- # Try to get device name to verify GPU works
87
  gpu_name = torch.cuda.get_device_name(0)
88
  gpu_count = torch.cuda.device_count()
89
  print(f"✅ GPU available: {gpu_name} (Count: {gpu_count})")
90
- # Keep device as "cuda"
91
  except Exception as e:
92
  print(f"⚠️ GPU detection failed: {e}")
93
- print(" Attempting to use CUDA anyway (may work)...")
94
- # Don't fallback immediately - let it try
95
  else:
96
  print("⚠️ CUDA not available, falling back to CPU")
97
  device = "cpu"
@@ -102,7 +40,7 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
102
  print(f"⚠️ GPU check error: {e}, falling back to CPU")
103
  device = "cpu"
104
 
105
- # Set environment variable for subprocess to pick up
106
  os.environ["CUDA_DEVICE"] = device
107
  print(f"🔧 Using device: {device}")
108
 
@@ -119,14 +57,13 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
119
  cmd.extend(["--seed", str(int(seed))])
120
 
121
  try:
122
- # Ensure environment variables are passed to subprocess
123
  env = os.environ.copy()
124
  env["CUDA_DEVICE"] = os.environ.get("CUDA_DEVICE", device)
125
 
126
  result = subprocess.run(
127
  cmd,
128
  cwd=str(Path(__file__).parent),
129
- env=env, # Pass environment variables
130
  capture_output=True,
131
  text=True,
132
  timeout=3600 # 1 hour timeout
@@ -135,13 +72,12 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
135
  stdout_text = result.stdout
136
  stderr_text = result.stderr
137
 
138
- # Combine outputs
139
  full_output = f"=== STDOUT ===\n{stdout_text}\n\n=== STDERR ===\n{stderr_text}"
140
 
141
  if result.returncode != 0:
142
  return f"❌ Error occurred:\n{full_output}", None
143
 
144
- # Find output plot (check multiple possible locations)
145
  plot_paths = [
146
  Path(__file__).parent / "teacher_agent_dev" / "comparison_all_strategies.png",
147
  Path(__file__).parent / "comparison_all_strategies.png",
@@ -157,7 +93,6 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
157
  if plot_path:
158
  return f"✅ Comparison complete!\n\n{stdout_text}", str(plot_path)
159
  else:
160
- # Return output even if plot not found (might still be useful)
161
  error_msg = f"⚠️ Plot not found at expected locations.\n"
162
  error_msg += f"Checked: {[str(p) for p in plot_paths]}\n\n"
163
  error_msg += f"Output:\n{full_output}"
@@ -174,8 +109,6 @@ def check_gpu():
174
  """Check if GPU is available on Hugging Face Spaces."""
175
  try:
176
  import torch
177
-
178
- # Check CUDA availability
179
  if torch.cuda.is_available():
180
  try:
181
  gpu_name = torch.cuda.get_device_name(0)
@@ -183,12 +116,9 @@ def check_gpu():
183
  cuda_version = torch.version.cuda
184
  return f"✅ GPU Available: {gpu_name} (Count: {gpu_count}, CUDA: {cuda_version})"
185
  except Exception as e:
186
- # GPU might be available but not immediately accessible
187
  return f"✅ GPU Detected (accessing: {str(e)[:50]}...)"
188
  else:
189
- # On Hugging Face Spaces, check environment
190
  if os.getenv("SPACE_ID"):
191
- # Check if GPU hardware is allocated
192
  hf_hardware = os.getenv("SPACE_HARDWARE", "unknown")
193
  if "gpu" in hf_hardware.lower() or "t4" in hf_hardware.lower() or "l4" in hf_hardware.lower():
194
  return f"⚠️ GPU Hardware ({hf_hardware}) allocated but not accessible yet. Try running anyway."
@@ -211,12 +141,9 @@ with gr.Blocks(title="MentorFlow - Strategy Comparison") as demo:
211
  3. **Teacher Strategy**: RL teacher agent learns optimal curriculum
212
 
213
  ## Usage
214
-
215
  1. Set parameters below
216
  2. Click "Run Comparison" to start training
217
  3. View results and generated plots
218
-
219
- **Note**: With LM Student, this will take 15-30 minutes for 500 iterations.
220
  """)
221
 
222
  # GPU Status
@@ -229,33 +156,10 @@ with gr.Blocks(title="MentorFlow - Strategy Comparison") as demo:
229
  # Parameters
230
  with gr.Row():
231
  with gr.Column():
232
- iterations = gr.Slider(
233
- minimum=50,
234
- maximum=500,
235
- value=100,
236
- step=50,
237
- label="Iterations",
238
- info="Number of training iterations (higher = longer runtime)"
239
- )
240
-
241
- seed = gr.Number(
242
- value=42,
243
- label="Random Seed",
244
- info="Seed for reproducibility (ignored if deterministic)"
245
- )
246
-
247
- use_deterministic = gr.Checkbox(
248
- value=True,
249
- label="Deterministic Mode",
250
- info="Use fixed seed=42 for reproducible results"
251
- )
252
-
253
- device = gr.Radio(
254
- choices=["cuda", "cpu"],
255
- value="cuda", # Default to GPU for HF Spaces with Nvidia 4xL4
256
- label="Device",
257
- info="GPU (cuda) recommended for Nvidia 4xL4, CPU fallback available"
258
- )
259
 
260
  with gr.Column():
261
  run_btn = gr.Button("🚀 Run Comparison", variant="primary", size="lg")
@@ -263,19 +167,9 @@ with gr.Blocks(title="MentorFlow - Strategy Comparison") as demo:
263
  # Output
264
  with gr.Row():
265
  with gr.Column(scale=1):
266
- output_text = gr.Textbox(
267
- label="Output",
268
- lines=15,
269
- max_lines=30,
270
- interactive=False
271
- )
272
-
273
  with gr.Column(scale=1):
274
- output_plot = gr.Image(
275
- label="Comparison Plot",
276
- type="filepath",
277
- height=500
278
- )
279
 
280
  # Run comparison
281
  run_btn.click(
@@ -284,21 +178,7 @@ with gr.Blocks(title="MentorFlow - Strategy Comparison") as demo:
284
  outputs=[output_text, output_plot],
285
  api_name="run_comparison"
286
  )
287
-
288
- gr.Markdown("""
289
- ## 📊 Understanding Results
290
-
291
- The comparison plot shows:
292
- - **Learning Curves**: How each strategy improves over time
293
- - **Difficult Question Performance**: Accuracy on hard questions
294
- - **Curriculum Diversity**: Topic coverage over time
295
- - **Learning Efficiency**: Iterations to reach target vs final performance
296
-
297
- The **Teacher Strategy** should ideally outperform Random and Progressive strategies.
298
- """)
299
 
300
  if __name__ == "__main__":
301
- # For Hugging Face Spaces
302
- # Monkey-patch above should fix schema bug, but upgrade to Gradio 5.x is recommended
303
- demo.launch()
304
-
 
7
  import os
8
  import subprocess
9
  from pathlib import Path
10
+ import gradio as gr # Import directly, no patch needed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Add project paths
13
  sys.path.insert(0, str(Path(__file__).parent))
 
17
  def run_comparison(iterations: int, seed: int, use_deterministic: bool, device: str):
18
  """
19
  Run strategy comparison with LM Student.
 
 
 
 
 
 
20
  """
21
 
22
  # Set device environment variable for subprocess
 
23
  if device == "cuda":
24
  try:
25
  import torch
 
26
  if torch.cuda.is_available():
27
  try:
 
28
  gpu_name = torch.cuda.get_device_name(0)
29
  gpu_count = torch.cuda.device_count()
30
  print(f"✅ GPU available: {gpu_name} (Count: {gpu_count})")
 
31
  except Exception as e:
32
  print(f"⚠️ GPU detection failed: {e}")
 
 
33
  else:
34
  print("⚠️ CUDA not available, falling back to CPU")
35
  device = "cpu"
 
40
  print(f"⚠️ GPU check error: {e}, falling back to CPU")
41
  device = "cpu"
42
 
43
+ # Set environment variable for subprocess
44
  os.environ["CUDA_DEVICE"] = device
45
  print(f"🔧 Using device: {device}")
46
 
 
57
  cmd.extend(["--seed", str(int(seed))])
58
 
59
  try:
 
60
  env = os.environ.copy()
61
  env["CUDA_DEVICE"] = os.environ.get("CUDA_DEVICE", device)
62
 
63
  result = subprocess.run(
64
  cmd,
65
  cwd=str(Path(__file__).parent),
66
+ env=env,
67
  capture_output=True,
68
  text=True,
69
  timeout=3600 # 1 hour timeout
 
72
  stdout_text = result.stdout
73
  stderr_text = result.stderr
74
 
 
75
  full_output = f"=== STDOUT ===\n{stdout_text}\n\n=== STDERR ===\n{stderr_text}"
76
 
77
  if result.returncode != 0:
78
  return f"❌ Error occurred:\n{full_output}", None
79
 
80
+ # Check multiple possible locations for the plot
81
  plot_paths = [
82
  Path(__file__).parent / "teacher_agent_dev" / "comparison_all_strategies.png",
83
  Path(__file__).parent / "comparison_all_strategies.png",
 
93
  if plot_path:
94
  return f"✅ Comparison complete!\n\n{stdout_text}", str(plot_path)
95
  else:
 
96
  error_msg = f"⚠️ Plot not found at expected locations.\n"
97
  error_msg += f"Checked: {[str(p) for p in plot_paths]}\n\n"
98
  error_msg += f"Output:\n{full_output}"
 
109
  """Check if GPU is available on Hugging Face Spaces."""
110
  try:
111
  import torch
 
 
112
  if torch.cuda.is_available():
113
  try:
114
  gpu_name = torch.cuda.get_device_name(0)
 
116
  cuda_version = torch.version.cuda
117
  return f"✅ GPU Available: {gpu_name} (Count: {gpu_count}, CUDA: {cuda_version})"
118
  except Exception as e:
 
119
  return f"✅ GPU Detected (accessing: {str(e)[:50]}...)"
120
  else:
 
121
  if os.getenv("SPACE_ID"):
 
122
  hf_hardware = os.getenv("SPACE_HARDWARE", "unknown")
123
  if "gpu" in hf_hardware.lower() or "t4" in hf_hardware.lower() or "l4" in hf_hardware.lower():
124
  return f"⚠️ GPU Hardware ({hf_hardware}) allocated but not accessible yet. Try running anyway."
 
141
  3. **Teacher Strategy**: RL teacher agent learns optimal curriculum
142
 
143
  ## Usage
 
144
  1. Set parameters below
145
  2. Click "Run Comparison" to start training
146
  3. View results and generated plots
 
 
147
  """)
148
 
149
  # GPU Status
 
156
  # Parameters
157
  with gr.Row():
158
  with gr.Column():
159
+ iterations = gr.Slider(minimum=50, maximum=500, value=100, step=50, label="Iterations")
160
+ seed = gr.Number(value=42, label="Random Seed")
161
+ use_deterministic = gr.Checkbox(value=True, label="Deterministic Mode")
162
+ device = gr.Radio(choices=["cuda", "cpu"], value="cuda", label="Device")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
  with gr.Column():
165
  run_btn = gr.Button("🚀 Run Comparison", variant="primary", size="lg")
 
167
  # Output
168
  with gr.Row():
169
  with gr.Column(scale=1):
170
+ output_text = gr.Textbox(label="Output", lines=15, max_lines=30, interactive=False)
 
 
 
 
 
 
171
  with gr.Column(scale=1):
172
+ output_plot = gr.Image(label="Comparison Plot", type="filepath", height=500)
 
 
 
 
173
 
174
  # Run comparison
175
  run_btn.click(
 
178
  outputs=[output_text, output_plot],
179
  api_name="run_comparison"
180
  )
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
  if __name__ == "__main__":
183
+ # Standard HF Spaces configuration
184
+ demo.launch(server_name="0.0.0.0", server_port=7860)