Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Muennighoff
commited on
Commit
•
bd1cf3d
1
Parent(s):
e2f398c
Update metrics
Browse files
app.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
from datasets import load_dataset
|
2 |
import gradio as gr
|
3 |
-
import pandas as pd
|
4 |
from huggingface_hub import HfApi, hf_hub_download
|
5 |
from huggingface_hub.repocard import metadata_load
|
|
|
6 |
|
7 |
TASKS = [
|
8 |
"BitextMining",
|
@@ -278,8 +278,8 @@ def get_mteb_average():
|
|
278 |
# Approximation (Missing Bitext Mining & including some nans)
|
279 |
NUM_SCORES = DATA_OVERALL.shape[0] * DATA_OVERALL.shape[1]
|
280 |
|
281 |
-
|
282 |
-
DATA_OVERALL.to_csv("overall.csv")
|
283 |
|
284 |
DATA_OVERALL.insert(1, f"Average ({len(TASK_LIST_EN)} datasets)", DATA_OVERALL[TASK_LIST_EN].mean(axis=1, skipna=False))
|
285 |
DATA_OVERALL.insert(2, f"Classification Average ({len(TASK_LIST_CLASSIFICATION)} datasets)", DATA_OVERALL[TASK_LIST_CLASSIFICATION].mean(axis=1, skipna=False))
|
@@ -315,9 +315,10 @@ with block:
|
|
315 |
gr.Markdown(f"""
|
316 |
Massive Text Embedding Benchmark (MTEB) Leaderboard. To submit, refer to the <a href="https://github.com/embeddings-benchmark/mteb#leaderboard" target="_blank" style="text-decoration: underline">MTEB GitHub repository</a> 🤗
|
317 |
|
|
|
|
|
318 |
- **Total Scores**: >{NUM_SCORES}
|
319 |
- **Total Models**: {len(DATA_OVERALL)}
|
320 |
-
- **Total Users**: TODO
|
321 |
""")
|
322 |
with gr.Tabs():
|
323 |
with gr.TabItem("Overall"):
|
|
|
1 |
from datasets import load_dataset
|
2 |
import gradio as gr
|
|
|
3 |
from huggingface_hub import HfApi, hf_hub_download
|
4 |
from huggingface_hub.repocard import metadata_load
|
5 |
+
import pandas as pd
|
6 |
|
7 |
TASKS = [
|
8 |
"BitextMining",
|
|
|
278 |
# Approximation (Missing Bitext Mining & including some nans)
|
279 |
NUM_SCORES = DATA_OVERALL.shape[0] * DATA_OVERALL.shape[1]
|
280 |
|
281 |
+
# Debugging:
|
282 |
+
# DATA_OVERALL.to_csv("overall.csv")
|
283 |
|
284 |
DATA_OVERALL.insert(1, f"Average ({len(TASK_LIST_EN)} datasets)", DATA_OVERALL[TASK_LIST_EN].mean(axis=1, skipna=False))
|
285 |
DATA_OVERALL.insert(2, f"Classification Average ({len(TASK_LIST_CLASSIFICATION)} datasets)", DATA_OVERALL[TASK_LIST_CLASSIFICATION].mean(axis=1, skipna=False))
|
|
|
315 |
gr.Markdown(f"""
|
316 |
Massive Text Embedding Benchmark (MTEB) Leaderboard. To submit, refer to the <a href="https://github.com/embeddings-benchmark/mteb#leaderboard" target="_blank" style="text-decoration: underline">MTEB GitHub repository</a> 🤗
|
317 |
|
318 |
+
- **Total Datasets**: 56
|
319 |
+
- **Total Languages**: 117
|
320 |
- **Total Scores**: >{NUM_SCORES}
|
321 |
- **Total Models**: {len(DATA_OVERALL)}
|
|
|
322 |
""")
|
323 |
with gr.Tabs():
|
324 |
with gr.TabItem("Overall"):
|