Spaces:
Sleeping
Sleeping
Upload 31 files
Browse files- app.py +2 -2
- demo_test.py +1 -1
app.py
CHANGED
|
@@ -75,8 +75,8 @@ demo = gr.Interface(
|
|
| 75 |
inputs=[
|
| 76 |
gr.Video(label="Upload a Video (e.g. mp4)"),
|
| 77 |
gr.Checkbox(label="Use Finetuning?", value=False),
|
| 78 |
-
gr.Dropdown(label="Train Dataset Name", choices=["konvid_1k", "youtube_ugc", "live_vqc", "lsvq_train"
|
| 79 |
-
gr.Dropdown(label="Test Dataset Name", choices=["konvid_1k", "youtube_ugc", "live_vqc", "lsvq"
|
| 80 |
gr.Dropdown(label="Our Models", choices=["diva-vqa", "diva-vqa_large"], value="diva-vqa_large")
|
| 81 |
],
|
| 82 |
outputs=gr.Textbox(label="Predicted Perceptual Quality Score (0–100)"),
|
|
|
|
| 75 |
inputs=[
|
| 76 |
gr.Video(label="Upload a Video (e.g. mp4)"),
|
| 77 |
gr.Checkbox(label="Use Finetuning?", value=False),
|
| 78 |
+
gr.Dropdown(label="Train Dataset Name", choices=["konvid_1k", "youtube_ugc", "live_vqc", "lsvq_train"], value="lsvq_train"),
|
| 79 |
+
gr.Dropdown(label="Test Dataset Name", choices=["konvid_1k", "youtube_ugc", "live_vqc", "lsvq"], value="konvid_1k"),
|
| 80 |
gr.Dropdown(label="Our Models", choices=["diva-vqa", "diva-vqa_large"], value="diva-vqa_large")
|
| 81 |
],
|
| 82 |
outputs=gr.Textbox(label="Predicted Perceptual Quality Score (0–100)"),
|
demo_test.py
CHANGED
|
@@ -51,7 +51,7 @@ def load_model(config, device, input_features=11520):
|
|
| 51 |
else:
|
| 52 |
model_path = os.path.join(config.save_path, f"wo_finetune/{config.train_data_name}_{config.network_name}_{config.model_name}_{config.select_criteria}"
|
| 53 |
f"_trained_median_model_param.pth")
|
| 54 |
-
|
| 55 |
state_dict = torch.load(model_path, map_location=device)
|
| 56 |
fixed_state_dict = fix_state_dict(state_dict)
|
| 57 |
try:
|
|
|
|
| 51 |
else:
|
| 52 |
model_path = os.path.join(config.save_path, f"wo_finetune/{config.train_data_name}_{config.network_name}_{config.model_name}_{config.select_criteria}"
|
| 53 |
f"_trained_median_model_param.pth")
|
| 54 |
+
print("Loading model from:", model_path)
|
| 55 |
state_dict = torch.load(model_path, map_location=device)
|
| 56 |
fixed_state_dict = fix_state_dict(state_dict)
|
| 57 |
try:
|