Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
eduagarcia
commited on
Commit
•
4717ca8
1
Parent(s):
b4fc70b
fix eval_name for non main revision models
Browse files- .gitignore +1 -1
- src/leaderboard/read_evals.py +10 -5
- src/submission/submit.py +2 -2
- upload_initial_queue.py +4 -1
.gitignore
CHANGED
@@ -4,7 +4,7 @@ __pycache__/
|
|
4 |
.ipynb_checkpoints
|
5 |
*ipynb
|
6 |
.vscode/
|
7 |
-
|
8 |
hub/
|
9 |
modules/
|
10 |
original_results/
|
|
|
4 |
.ipynb_checkpoints
|
5 |
*ipynb
|
6 |
.vscode/
|
7 |
+
*.sh
|
8 |
hub/
|
9 |
modules/
|
10 |
original_results/
|
src/leaderboard/read_evals.py
CHANGED
@@ -58,19 +58,24 @@ class EvalResult:
|
|
58 |
# Precision
|
59 |
precision = Precision.from_str(config.get("model_dtype"))
|
60 |
num_params = round(config.get("model_num_parameters", 0) / 1_000_000_000, 2)
|
|
|
61 |
|
62 |
# Get model and org
|
63 |
org_and_model = config.get("model_name")
|
64 |
org_and_model = org_and_model.split("/", 1)
|
65 |
|
|
|
|
|
|
|
66 |
if len(org_and_model) == 1:
|
67 |
org = None
|
68 |
model = org_and_model[0]
|
69 |
-
result_key = f"{model}_{
|
70 |
else:
|
71 |
org = org_and_model[0]
|
72 |
model = org_and_model[1]
|
73 |
-
result_key = f"{org}_{model}_{
|
|
|
74 |
full_model = "/".join(org_and_model)
|
75 |
|
76 |
# Extract results available in this file (some results are split in several files)
|
@@ -123,7 +128,7 @@ class EvalResult:
|
|
123 |
results=results,
|
124 |
precision=precision,
|
125 |
model_sha=config.get("model_sha", ""),
|
126 |
-
revision=
|
127 |
json_filename=json_filename,
|
128 |
eval_time=config.get("total_evaluation_time_seconds", 0.0),
|
129 |
num_params=num_params
|
@@ -146,7 +151,7 @@ class EvalResult:
|
|
146 |
self.main_language = request.get("main_language", "?")
|
147 |
except Exception as e:
|
148 |
self.status = "FAILED"
|
149 |
-
print(f"Could not find request file for {self.org}/{self.model}, precision {self.precision.value.name}")
|
150 |
|
151 |
def update_with_dynamic_file_dict(self, file_dict):
|
152 |
self.license = file_dict.get("license", "?")
|
@@ -219,7 +224,7 @@ def get_request_file_for_model(requests_path, model_name, precision, revision):
|
|
219 |
|
220 |
if revision is None or revision == "":
|
221 |
revision = "main"
|
222 |
-
|
223 |
# Select correct request file (precision)
|
224 |
request_file = ""
|
225 |
request_files = sorted(request_files, reverse=True)
|
|
|
58 |
# Precision
|
59 |
precision = Precision.from_str(config.get("model_dtype"))
|
60 |
num_params = round(config.get("model_num_parameters", 0) / 1_000_000_000, 2)
|
61 |
+
revision = config.get("model_revision", "main")
|
62 |
|
63 |
# Get model and org
|
64 |
org_and_model = config.get("model_name")
|
65 |
org_and_model = org_and_model.split("/", 1)
|
66 |
|
67 |
+
prefix = f"{precision.value.name}"
|
68 |
+
if revision != "main":
|
69 |
+
prefix = f"{revision}_{prefix}"
|
70 |
if len(org_and_model) == 1:
|
71 |
org = None
|
72 |
model = org_and_model[0]
|
73 |
+
result_key = f"{model}_{prefix}"
|
74 |
else:
|
75 |
org = org_and_model[0]
|
76 |
model = org_and_model[1]
|
77 |
+
result_key = f"{org}_{model}_{prefix}"
|
78 |
+
|
79 |
full_model = "/".join(org_and_model)
|
80 |
|
81 |
# Extract results available in this file (some results are split in several files)
|
|
|
128 |
results=results,
|
129 |
precision=precision,
|
130 |
model_sha=config.get("model_sha", ""),
|
131 |
+
revision=revision,
|
132 |
json_filename=json_filename,
|
133 |
eval_time=config.get("total_evaluation_time_seconds", 0.0),
|
134 |
num_params=num_params
|
|
|
151 |
self.main_language = request.get("main_language", "?")
|
152 |
except Exception as e:
|
153 |
self.status = "FAILED"
|
154 |
+
print(f"Could not find request file for {self.org}/{self.model}, precision {self.precision.value.name}, revision {self.revision}")
|
155 |
|
156 |
def update_with_dynamic_file_dict(self, file_dict):
|
157 |
self.license = file_dict.get("license", "?")
|
|
|
224 |
|
225 |
if revision is None or revision == "":
|
226 |
revision = "main"
|
227 |
+
|
228 |
# Select correct request file (precision)
|
229 |
request_file = ""
|
230 |
request_files = sorted(request_files, reverse=True)
|
src/submission/submit.py
CHANGED
@@ -60,7 +60,7 @@ def add_new_eval(
|
|
60 |
return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
|
61 |
|
62 |
# Does the model actually exist?
|
63 |
-
if revision == "":
|
64 |
revision = "main"
|
65 |
|
66 |
# Is the model on the hub?
|
@@ -150,7 +150,7 @@ def add_new_eval(
|
|
150 |
out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
|
151 |
else:
|
152 |
rev_name = revision
|
153 |
-
if
|
154 |
rev_name = rev_name[:7]
|
155 |
out_path = f"{OUT_DIR}/{model_path}_eval_request_{rev_name}_{private}_{precision}_{weight_type}.json"
|
156 |
|
|
|
60 |
return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.")
|
61 |
|
62 |
# Does the model actually exist?
|
63 |
+
if revision == "" or revision is None:
|
64 |
revision = "main"
|
65 |
|
66 |
# Is the model on the hub?
|
|
|
150 |
out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
|
151 |
else:
|
152 |
rev_name = revision
|
153 |
+
if len(rev_name) > 12:
|
154 |
rev_name = rev_name[:7]
|
155 |
out_path = f"{OUT_DIR}/{model_path}_eval_request_{rev_name}_{private}_{precision}_{weight_type}.json"
|
156 |
|
upload_initial_queue.py
CHANGED
@@ -13,6 +13,8 @@ with open('initial_queue.jsonl', 'r', encoding='utf-8') as outfile:
|
|
13 |
if not line.startswith('//'):
|
14 |
data = json.loads(line)
|
15 |
model_id = f'{data["model"]}_eval_request_False_{data["precision"]}_{data["weight_type"]}'
|
|
|
|
|
16 |
model_path = os.path.join(EVAL_REQUESTS_PATH, model_id + '.json')
|
17 |
if not os.path.exists(model_path):
|
18 |
print(f"Adding new eval {model_id}")
|
@@ -24,7 +26,8 @@ with open('initial_queue.jsonl', 'r', encoding='utf-8') as outfile:
|
|
24 |
private=False,
|
25 |
weight_type=data["weight_type"],
|
26 |
model_type=data["model_type"],
|
27 |
-
|
|
|
28 |
)
|
29 |
print(message)
|
30 |
|
|
|
13 |
if not line.startswith('//'):
|
14 |
data = json.loads(line)
|
15 |
model_id = f'{data["model"]}_eval_request_False_{data["precision"]}_{data["weight_type"]}'
|
16 |
+
if data["revision"] != "main":
|
17 |
+
model_id = f'{data["model"]}_eval_request_{data["revision"]}_False_{data["precision"]}_{data["weight_type"]}'
|
18 |
model_path = os.path.join(EVAL_REQUESTS_PATH, model_id + '.json')
|
19 |
if not os.path.exists(model_path):
|
20 |
print(f"Adding new eval {model_id}")
|
|
|
26 |
private=False,
|
27 |
weight_type=data["weight_type"],
|
28 |
model_type=data["model_type"],
|
29 |
+
main_language=data["main_language"],
|
30 |
+
source="manual"
|
31 |
)
|
32 |
print(message)
|
33 |
|