Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import pandas as pd
|
| 3 |
|
| 4 |
from constants import *
|
|
@@ -11,6 +12,8 @@ def get_data(verified, dataset, ipc, label_type, metric_weights=None):
|
|
| 11 |
label_type = [label_type]
|
| 12 |
|
| 13 |
data = pd.read_csv("data.csv")
|
|
|
|
|
|
|
| 14 |
data["verified"] = data["verified"].apply(lambda x: bool(x))
|
| 15 |
data["dataset"] = data["dataset"].apply(lambda x: DATASET_LIST[x])
|
| 16 |
data["ipc"] = data["ipc"].apply(lambda x: IPC_LIST[x])
|
|
@@ -28,12 +31,13 @@ def get_data(verified, dataset, ipc, label_type, metric_weights=None):
|
|
| 28 |
data["score"] = data[METRICS[0].lower()] * 0.0
|
| 29 |
for i, metric in enumerate(METRICS):
|
| 30 |
data["score"] += data[metric.lower()] * metric_weights[i] * METRICS_SIGN[i]
|
|
|
|
| 31 |
data = data.sort_values(by="score", ascending=False)
|
| 32 |
data["ranking"] = range(1, len(data) + 1)
|
| 33 |
|
| 34 |
for metric in METRICS:
|
| 35 |
-
data[metric.lower()] = data[metric.lower()].apply(lambda x: round(x,
|
| 36 |
-
data["score"] = data["score"].apply(lambda x: round(x,
|
| 37 |
|
| 38 |
# formatting
|
| 39 |
data["method"] = "[" + data["method"] + "](" + data["method_reference"] + ")"
|
|
@@ -41,9 +45,9 @@ def get_data(verified, dataset, ipc, label_type, metric_weights=None):
|
|
| 41 |
data = data.drop(columns=["method_reference", "dataset", "ipc"])
|
| 42 |
data = data[['ranking', 'method', 'verified', 'date', 'label_type', 'hlr', 'ior', 'score']]
|
| 43 |
if label_type == "Hard Label":
|
| 44 |
-
data = data.rename(columns={"ranking": "Ranking", "method": "Method", "date": "Date", "label_type": "Label Type", "hlr": "HLR
|
| 45 |
else:
|
| 46 |
-
data = data.rename(columns={"ranking": "Ranking", "method": "Method", "date": "Date", "label_type": "Label Type", "hlr": "HLR
|
| 47 |
return data
|
| 48 |
|
| 49 |
|
|
@@ -92,6 +96,13 @@ with gr.Blocks() as leaderboard:
|
|
| 92 |
gr.Slider(label=f"Weight for HLR", minimum=0.0, maximum=1.0, value=0.5, interactive=True))
|
| 93 |
adjust_btn = gr.Button("Adjust Weights")
|
| 94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
# metric_weights = [s.value for s in metric_sliders]
|
| 97 |
metric_weights = [metric_sliders[0].value, 1.0 - metric_sliders[0].value]
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
import pandas as pd
|
| 4 |
|
| 5 |
from constants import *
|
|
|
|
| 12 |
label_type = [label_type]
|
| 13 |
|
| 14 |
data = pd.read_csv("data.csv")
|
| 15 |
+
# filter data with no hlr or ior (no nan)
|
| 16 |
+
data = data.dropna(subset=["hlr", "ior"])
|
| 17 |
data["verified"] = data["verified"].apply(lambda x: bool(x))
|
| 18 |
data["dataset"] = data["dataset"].apply(lambda x: DATASET_LIST[x])
|
| 19 |
data["ipc"] = data["ipc"].apply(lambda x: IPC_LIST[x])
|
|
|
|
| 31 |
data["score"] = data[METRICS[0].lower()] * 0.0
|
| 32 |
for i, metric in enumerate(METRICS):
|
| 33 |
data["score"] += data[metric.lower()] * metric_weights[i] * METRICS_SIGN[i]
|
| 34 |
+
data["score"] = (np.exp(-0.01 * data["score"]) - np.exp(-1.0)) / (np.exp(1.0) - np.exp(-1.0))
|
| 35 |
data = data.sort_values(by="score", ascending=False)
|
| 36 |
data["ranking"] = range(1, len(data) + 1)
|
| 37 |
|
| 38 |
for metric in METRICS:
|
| 39 |
+
data[metric.lower()] = data[metric.lower()].apply(lambda x: round(x, 3))
|
| 40 |
+
data["score"] = data["score"].apply(lambda x: round(x, 3))
|
| 41 |
|
| 42 |
# formatting
|
| 43 |
data["method"] = "[" + data["method"] + "](" + data["method_reference"] + ")"
|
|
|
|
| 45 |
data = data.drop(columns=["method_reference", "dataset", "ipc"])
|
| 46 |
data = data[['ranking', 'method', 'verified', 'date', 'label_type', 'hlr', 'ior', 'score']]
|
| 47 |
if label_type == "Hard Label":
|
| 48 |
+
data = data.rename(columns={"ranking": "Ranking", "method": "Method", "date": "Date", "label_type": "Label Type", "hlr": "HLR%↓", "ior": "IOR%↑", "score": "DDRS↑", "verified": "Verified"})
|
| 49 |
else:
|
| 50 |
+
data = data.rename(columns={"ranking": "Ranking", "method": "Method", "date": "Date", "label_type": "Label Type", "hlr": "HLR%↓", "ior": "IOR%↑", "score": "DDRS↑", "verified": "Verified"})
|
| 51 |
return data
|
| 52 |
|
| 53 |
|
|
|
|
| 96 |
gr.Slider(label=f"Weight for HLR", minimum=0.0, maximum=1.0, value=0.5, interactive=True))
|
| 97 |
adjust_btn = gr.Button("Adjust Weights")
|
| 98 |
|
| 99 |
+
with gr.Accordion("Metric Definitions", open=False):
|
| 100 |
+
gr.Markdown(METRIC_DEFINITION_INTRODUCTION, latex_delimiters=[
|
| 101 |
+
{'left': '$$', 'right': '$$', 'display': True},
|
| 102 |
+
{'left': '$', 'right': '$', 'display': False},
|
| 103 |
+
{'left': '\\(', 'right': '\\)', 'display': False},
|
| 104 |
+
{'left': '\\[', 'right': '\\]', 'display': True}
|
| 105 |
+
])
|
| 106 |
|
| 107 |
# metric_weights = [s.value for s in metric_sliders]
|
| 108 |
metric_weights = [metric_sliders[0].value, 1.0 - metric_sliders[0].value]
|