margsli's picture
Update app.py
ca31f95 verified
raw
history blame
7.41 kB
"""A gradio app that renders a static leaderboard. This is used for Hugging Face Space."""
import ast
import argparse
import glob
import pickle
import gradio as gr
import numpy as np
import pandas as pd
leader_component_values = [None]
space = "   "
def make_default_md():
leaderboard_md = f"""
# NeurIPS LLM Merging Competition Leaderboard
[Website](https://llm-merging.github.io/index) | [Starter Kit (Github)](https://github.com/llm-merging/LLM-Merging) | [Discord](https://discord.com/invite/dPBHEVnV)
"""
return leaderboard_md
def make_arena_leaderboard_md(model_table_df):
total_models = len(model_table_df)
leaderboard_md = f"""
Validation Benchmark Performance is averaged.
Final performance will be assessed at the end of the competition on a hidden test set, which may or may not be correlated with Validation performance.
If you've made a submission, but don't see your model below, or there is no score for your model, please be patient -- our current setup requires us to manually evaluate all submissions.
Higher values are better.
Total #models: **{total_models}**.{space}
"""
return leaderboard_md
def load_leaderboard_table_csv(filename, add_hyperlink=False):
lines = open(filename).readlines()
heads = [v.strip() for v in lines[0].split(",")]
rows = []
for i in range(1, len(lines)):
row = [v.strip() for v in lines[i].split(",")]
for j in range(len(heads)):
item = {}
for h, v in zip(heads, row):
if h == "Validation Score":
if v != "-":
v = ast.literal_eval(v)
else:
v = np.nan
item[h] = v
if add_hyperlink:
item["Model"] = f'<a target="_blank" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{item["Model"]}</a>'
rows.append(item)
rows.sort(key=lambda m: m.get("Validation Score", 0), reverse=True)
return rows
def get_full_table(model_table_df):
values = []
for i in range(len(model_table_df)):
row = []
ranking = i+1
row.append(ranking)
model_name = model_table_df.iloc[i]["Model"]
score = model_table_df.iloc[i]["Validation Score"]
# model display name
row.append(model_name)
row.append(score)
values.append(row)
# values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)
return values
key_to_category_name = {
"full": "Overall",
}
cat_name_to_explanation = {
"Overall": "Overall Questions",
}
def build_leaderboard_tab(leaderboard_table_file, show_plot=False):
arena_dfs = {}
category_elo_results = {}
if leaderboard_table_file is None: # Do live update
default_md = "Loading ..."
else:
default_md = make_default_md()
md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown")
if leaderboard_table_file:
data = load_leaderboard_table_csv(leaderboard_table_file)
model_table_df = pd.DataFrame(data)
with gr.Tabs() as tabs:
arena_table_vals = get_full_table(model_table_df)
with gr.Tab("Full leaderboard", id=0):
md = make_arena_leaderboard_md(model_table_df)
leaderboard_markdown = gr.Markdown(md, elem_id="leaderboard_markdown")
display_df = gr.Dataframe(
headers=[
"Rank",
"πŸ€– Model / Submission Name",
"⭐ Validation Performance",
],
datatype=[
"number",
"markdown",
"number",
],
value=arena_table_vals,
elem_id="arena_leaderboard_dataframe",
height=700,
column_widths=[70, 190, 110],
wrap=True,
)
gr.Markdown(
f"""Note: .
""",
elem_id="leaderboard_markdown"
)
leader_component_values[:] = [default_md]
if not show_plot:
gr.Markdown(
""" ## Submit your model [here]().
""",
elem_id="leaderboard_markdown",
)
else:
pass
with gr.Accordion(
"πŸ“ Citation",
open=True,
):
citation_md = """
### Citation
Please cite the following paper
"""
gr.Markdown(citation_md, elem_id="leaderboard_markdown")
gr.Markdown(acknowledgment_md)
return [md_1]
block_css = """
#notice_markdown {
font-size: 104%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#category_deets {
text-align: center;
padding: 0px;
padding-left: 5px;
}
#leaderboard_markdown {
font-size: 104%
}
#leaderboard_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_header_markdown {
font-size: 104%;
text-align: center;
display:block;
}
#leaderboard_dataframe td {
line-height: 0.1em;
}
#plot-title {
text-align: center;
display:block;
}
#non-interactive-button {
display: inline-block;
padding: 10px 10px;
background-color: #f7f7f7; /* Super light grey background */
text-align: center;
font-size: 26px; /* Larger text */
border-radius: 0; /* Straight edges, no border radius */
border: 0px solid #dcdcdc; /* A light grey border to match the background */
user-select: none; /* The text inside the button is not selectable */
pointer-events: none; /* The button is non-interactive */
}
footer {
display:none !important
}
.sponsor-image-about img {
margin: 0 20px;
margin-top: 20px;
height: 40px;
max-height: 100%;
width: auto;
float: left;
}
"""
acknowledgment_md = """
### Acknowledgment
We thank Hugging Face, sakana.ai, and arcee.ai for their generous [sponsorship](https://llm-merging.github.io/sponsors).
<div class="sponsor-image-about">
</div>
"""
def build_demo(leaderboard_table_file):
text_size = gr.themes.sizes.text_lg
theme = gr.themes.Base(text_size=text_size)
theme.set(button_secondary_background_fill_hover="*primary_300",
button_secondary_background_fill_hover_dark="*primary_700")
with gr.Blocks(
title="LLM Merging Leaderboard",
theme=theme,
# theme = gr.themes.Base.load("theme.json"), # uncomment to use new cool theme
css=block_css,
) as demo:
leader_components = build_leaderboard_tab(
leaderboard_table_file, show_plot=True
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--share", action="store_true")
parser.add_argument("--host", default="0.0.0.0")
parser.add_argument("--port", type=int, default=7860)
args = parser.parse_args()
leaderboard_table_files = glob.glob("leaderboard_table_*.csv")
leaderboard_table_files.sort(key=lambda x: int(x[18:-4]))
leaderboard_table_file = leaderboard_table_files[-1]
demo = build_demo(leaderboard_table_file)
demo.launch(share=args.share, server_name=args.host, server_port=args.port)