Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -73,15 +73,16 @@ def run_diva_vqa(video_path, is_finetune, train_data_name, test_data_name, netwo
|
|
| 73 |
|
| 74 |
def toggle_finetune_visibility(train_dataset):
|
| 75 |
"""
|
| 76 |
-
when using
|
| 77 |
"""
|
| 78 |
return gr.update(visible=(train_dataset == "lsvq_train"))
|
| 79 |
|
| 80 |
-
def
|
| 81 |
-
""
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
| 85 |
|
| 86 |
|
| 87 |
with gr.Blocks() as demo:
|
|
@@ -95,9 +96,10 @@ with gr.Blocks() as demo:
|
|
| 95 |
"⚙️ This demo is currently running on <strong>Hugging Face ZeroGPU Space</strong>: Dynamic resources (NVIDIA A100)."
|
| 96 |
)
|
| 97 |
|
| 98 |
-
with gr.Row():
|
| 99 |
with gr.Column(scale=2):
|
| 100 |
video_input = gr.Video(label="Upload a Video (e.g. mp4)")
|
|
|
|
| 101 |
train_dataset = gr.Dropdown(
|
| 102 |
label="Train Dataset",
|
| 103 |
choices=["cvd_2014", "konvid_1k", "live_vqc", "lsvq_train", "youtube_ugc"],
|
|
@@ -111,6 +113,7 @@ with gr.Blocks() as demo:
|
|
| 111 |
value="konvid_1k",
|
| 112 |
visible=False
|
| 113 |
)
|
|
|
|
| 114 |
model_dropdown = gr.Dropdown(
|
| 115 |
label="Our Models",
|
| 116 |
choices=["diva-vqa", "diva-vqa_large"],
|
|
@@ -121,7 +124,6 @@ with gr.Blocks() as demo:
|
|
| 121 |
with gr.Column(scale=1):
|
| 122 |
output_box = gr.Textbox(label="Predicted Perceptual Quality Score (0–100)", lines=5)
|
| 123 |
|
| 124 |
-
|
| 125 |
train_dataset.change(
|
| 126 |
fn=toggle_finetune_visibility,
|
| 127 |
inputs=train_dataset,
|
|
@@ -129,22 +131,21 @@ with gr.Blocks() as demo:
|
|
| 129 |
)
|
| 130 |
|
| 131 |
is_finetune_checkbox.change(
|
| 132 |
-
fn=
|
| 133 |
-
inputs=[is_finetune_checkbox, train_dataset],
|
| 134 |
-
outputs=test_dataset
|
| 135 |
)
|
| 136 |
|
| 137 |
train_dataset.change(
|
| 138 |
-
fn=
|
| 139 |
-
inputs=[is_finetune_checkbox, train_dataset],
|
| 140 |
-
outputs=test_dataset
|
| 141 |
)
|
| 142 |
|
| 143 |
-
|
| 144 |
run_button.click(
|
| 145 |
fn=run_diva_vqa,
|
| 146 |
inputs=[video_input, is_finetune_checkbox, train_dataset, test_dataset, model_dropdown],
|
| 147 |
outputs=output_box
|
| 148 |
)
|
| 149 |
|
| 150 |
-
demo.launch()
|
|
|
|
| 73 |
|
| 74 |
def toggle_finetune_visibility(train_dataset):
|
| 75 |
"""
|
| 76 |
+
when using train dataset is lsvq_train
|
| 77 |
"""
|
| 78 |
return gr.update(visible=(train_dataset == "lsvq_train"))
|
| 79 |
|
| 80 |
+
def update_test_dataset(is_finetune, train_dataset, current_test_value):
|
| 81 |
+
if train_dataset != "lsvq_train":
|
| 82 |
+
msg = f"Intra-dataset experiment — test dataset is automatically set to **{train_dataset}**."
|
| 83 |
+
return gr.update(value=train_dataset, visible=False), gr.update(value=msg, visible=True)
|
| 84 |
+
else:
|
| 85 |
+
return gr.update(visible=is_finetune, value=current_test_value), gr.update(value="", visible=False)
|
| 86 |
|
| 87 |
|
| 88 |
with gr.Blocks() as demo:
|
|
|
|
| 96 |
"⚙️ This demo is currently running on <strong>Hugging Face ZeroGPU Space</strong>: Dynamic resources (NVIDIA A100)."
|
| 97 |
)
|
| 98 |
|
| 99 |
+
with gr.Row():
|
| 100 |
with gr.Column(scale=2):
|
| 101 |
video_input = gr.Video(label="Upload a Video (e.g. mp4)")
|
| 102 |
+
|
| 103 |
train_dataset = gr.Dropdown(
|
| 104 |
label="Train Dataset",
|
| 105 |
choices=["cvd_2014", "konvid_1k", "live_vqc", "lsvq_train", "youtube_ugc"],
|
|
|
|
| 113 |
value="konvid_1k",
|
| 114 |
visible=False
|
| 115 |
)
|
| 116 |
+
mode_message = gr.Markdown("", visible=False)
|
| 117 |
model_dropdown = gr.Dropdown(
|
| 118 |
label="Our Models",
|
| 119 |
choices=["diva-vqa", "diva-vqa_large"],
|
|
|
|
| 124 |
with gr.Column(scale=1):
|
| 125 |
output_box = gr.Textbox(label="Predicted Perceptual Quality Score (0–100)", lines=5)
|
| 126 |
|
|
|
|
| 127 |
train_dataset.change(
|
| 128 |
fn=toggle_finetune_visibility,
|
| 129 |
inputs=train_dataset,
|
|
|
|
| 131 |
)
|
| 132 |
|
| 133 |
is_finetune_checkbox.change(
|
| 134 |
+
fn=update_test_dataset,
|
| 135 |
+
inputs=[is_finetune_checkbox, train_dataset, test_dataset],
|
| 136 |
+
outputs=[test_dataset, mode_message]
|
| 137 |
)
|
| 138 |
|
| 139 |
train_dataset.change(
|
| 140 |
+
fn=update_test_dataset,
|
| 141 |
+
inputs=[is_finetune_checkbox, train_dataset, test_dataset],
|
| 142 |
+
outputs=[test_dataset, mode_message]
|
| 143 |
)
|
| 144 |
|
|
|
|
| 145 |
run_button.click(
|
| 146 |
fn=run_diva_vqa,
|
| 147 |
inputs=[video_input, is_finetune_checkbox, train_dataset, test_dataset, model_dropdown],
|
| 148 |
outputs=output_box
|
| 149 |
)
|
| 150 |
|
| 151 |
+
demo.launch()
|