FinancialSupport commited on
Commit
5bea8fb
1 Parent(s): 07fe2af

Update src/leaderboard/read_evals.py

Browse files
Files changed (1) hide show
  1. src/leaderboard/read_evals.py +203 -196
src/leaderboard/read_evals.py CHANGED
@@ -1,196 +1,203 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
13
-
14
-
15
- @dataclass
16
- class EvalResult:
17
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
- """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
- org: str
22
- model: str
23
- revision: str # commit hash, "" if main
24
- results: dict
25
- precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
- architecture: str = "Unknown"
29
- license: str = "?"
30
- likes: int = 0
31
- num_params: int = 0
32
- date: str = "" # submission date of request file
33
- still_on_hub: bool = False
34
-
35
- @classmethod
36
- def init_from_json_file(self, json_filepath):
37
- """Inits the result from the specific model result file"""
38
- with open(json_filepath) as fp:
39
- data = json.load(fp)
40
-
41
- config = data.get("config")
42
-
43
- # Precision
44
- precision = Precision.from_str(config.get("model_dtype"))
45
-
46
- # Get model and org
47
- org_and_model = config.get("model_name", config.get("model_args", None))
48
- org_and_model = org_and_model.split("/", 1)
49
-
50
- if len(org_and_model) == 1:
51
- org = None
52
- model = org_and_model[0]
53
- result_key = f"{model}_{precision.value.name}"
54
- else:
55
- org = org_and_model[0]
56
- model = org_and_model[1]
57
- result_key = f"{org}_{model}_{precision.value.name}"
58
- full_model = "/".join(org_and_model)
59
-
60
- still_on_hub, _, model_config = is_model_on_hub(
61
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
- )
63
- architecture = "?"
64
- if model_config is not None:
65
- architectures = getattr(model_config, "architectures", None)
66
- if architectures:
67
- architecture = ";".join(architectures)
68
-
69
- # Extract results available in this file (some results are split in several files)
70
- results = {}
71
- for task in Tasks:
72
- task = task.value
73
-
74
- # We average all scores of a given metric (not all metrics are present in all files)
75
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
- if accs.size == 0 or any([acc is None for acc in accs]):
77
- continue
78
-
79
- mean_acc = np.mean(accs) * 100.0
80
- results[task.benchmark] = mean_acc
81
-
82
- return self(
83
- eval_name=result_key,
84
- full_model=full_model,
85
- org=org,
86
- model=model,
87
- results=results,
88
- precision=precision,
89
- revision= config.get("model_sha", ""),
90
- still_on_hub=still_on_hub,
91
- architecture=architecture
92
- )
93
-
94
- def update_with_request_file(self, requests_path):
95
- """Finds the relevant request file for the current model and updates info with it"""
96
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
-
98
- try:
99
- with open(request_file, "r") as f:
100
- request = json.load(f)
101
- self.model_type = ModelType.from_str(request.get("model_type", ""))
102
- self.weight_type = WeightType[request.get("weight_type", "Original")]
103
- self.license = request.get("license", "?")
104
- self.likes = request.get("likes", 0)
105
- self.num_params = request.get("params", 0)
106
- self.date = request.get("submitted_time", "")
107
- except Exception:
108
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
-
110
- def to_dict(self):
111
- """Converts the Eval Result to a dict compatible with our dataframe display"""
112
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
- data_dict = {
114
- "eval_name": self.eval_name, # not a column, just a save name,
115
- AutoEvalColumn.precision.name: self.precision.value.name,
116
- AutoEvalColumn.model_type.name: self.model_type.value.name,
117
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
- AutoEvalColumn.architecture.name: self.architecture,
120
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
- AutoEvalColumn.revision.name: self.revision,
122
- AutoEvalColumn.average.name: average,
123
- AutoEvalColumn.license.name: self.license,
124
- AutoEvalColumn.likes.name: self.likes,
125
- AutoEvalColumn.params.name: self.num_params,
126
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
- }
128
-
129
- for task in Tasks:
130
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
-
132
- return data_dict
133
-
134
-
135
- def get_request_file_for_model(requests_path, model_name, precision):
136
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
- request_files = os.path.join(
138
- requests_path,
139
- f"{model_name}_eval_request_*.json",
140
- )
141
- request_files = glob.glob(request_files)
142
-
143
- # Select correct request file (precision)
144
- request_file = ""
145
- request_files = sorted(request_files, reverse=True)
146
- for tmp_request_file in request_files:
147
- with open(tmp_request_file, "r") as f:
148
- req_content = json.load(f)
149
- if (
150
- req_content["status"] in ["FINISHED"]
151
- and req_content["precision"] == precision.split(".")[-1]
152
- ):
153
- request_file = tmp_request_file
154
- return request_file
155
-
156
-
157
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
- """From the path of the results folder root, extract all needed info for results"""
159
- model_result_filepaths = []
160
-
161
- for root, _, files in os.walk(results_path):
162
- # We should only have json files in model results
163
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
- continue
165
-
166
- # Sort the files by date
167
- try:
168
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
169
- except dateutil.parser._parser.ParserError:
170
- files = [files[-1]]
171
-
172
- for file in files:
173
- model_result_filepaths.append(os.path.join(root, file))
174
-
175
- eval_results = {}
176
- for model_result_filepath in model_result_filepaths:
177
- # Creation of result
178
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
- eval_result.update_with_request_file(requests_path)
180
-
181
- # Store results of same eval together
182
- eval_name = eval_result.eval_name
183
- if eval_name in eval_results.keys():
184
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
185
- else:
186
- eval_results[eval_name] = eval_result
187
-
188
- results = []
189
- for v in eval_results.values():
190
- try:
191
- v.to_dict() # we test if the dict version is complete
192
- results.append(v)
193
- except KeyError: # not all eval values present
194
- continue
195
-
196
- return results
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import math
4
+ import os
5
+ from dataclasses import dataclass
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
+ from src.submission.check_validity import is_model_on_hub
13
+
14
+
15
+ @dataclass
16
+ class EvalResult:
17
+ """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
+ """
19
+ eval_name: str # org_model_precision (uid)
20
+ full_model: str # org/model (path on hub)
21
+ org: str
22
+ model: str
23
+ revision: str # commit hash, "" if main
24
+ results: dict
25
+ precision: Precision = Precision.Unknown
26
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
+ weight_type: WeightType = WeightType.Original # Original or Adapter
28
+ architecture: str = "Unknown"
29
+ license: str = "?"
30
+ likes: int = 0
31
+ num_params: int = 0
32
+ date: str = "" # submission date of request file
33
+ still_on_hub: bool = False
34
+ author: str = ""
35
+
36
+ @classmethod
37
+ def init_from_json_file(self, json_filepath):
38
+ """Inits the result from the specific model result file"""
39
+ with open(json_filepath) as fp:
40
+ data = json.load(fp)
41
+
42
+ config = data.get("config")
43
+
44
+ # Precision
45
+ precision = Precision.from_str(config.get("model_dtype"))
46
+
47
+ # Get model and org
48
+ org_and_model = config.get("model_name", config.get("model_args", None))
49
+ org_and_model = org_and_model.split("/", 1)
50
+
51
+ if len(org_and_model) == 1:
52
+ org = None
53
+ model = org_and_model[0]
54
+ result_key = f"{model}_{precision.value.name}"
55
+ else:
56
+ org = org_and_model[0]
57
+ model = org_and_model[1]
58
+ result_key = f"{org}_{model}_{precision.value.name}"
59
+ full_model = "/".join(org_and_model)
60
+
61
+ still_on_hub, _, model_config = is_model_on_hub(
62
+ full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
63
+ )
64
+ architecture = "?"
65
+ if model_config is not None:
66
+ architectures = getattr(model_config, "architectures", None)
67
+ if architectures:
68
+ architecture = ";".join(architectures)
69
+
70
+ author = data.get("author", "")
71
+
72
+ # Extract results available in this file (some results are split in several files)
73
+ results = {}
74
+ for task in Tasks:
75
+ task = task.value
76
+
77
+ # We average all scores of a given metric (not all metrics are present in all files)
78
+ accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
79
+ if accs.size == 0 or any([acc is None for acc in accs]):
80
+ continue
81
+
82
+ mean_acc = np.mean(accs) * 100.0
83
+ results[task.benchmark] = mean_acc
84
+
85
+ return self(
86
+ eval_name=result_key,
87
+ full_model=full_model,
88
+ org=org,
89
+ model=model,
90
+ results=results,
91
+ precision=precision,
92
+ revision= config.get("model_sha", ""),
93
+ still_on_hub=still_on_hub,
94
+ architecture=architecture,
95
+ author=author
96
+
97
+ )
98
+
99
+ def update_with_request_file(self, requests_path):
100
+ """Finds the relevant request file for the current model and updates info with it"""
101
+ request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
102
+
103
+ try:
104
+ with open(request_file, "r") as f:
105
+ request = json.load(f)
106
+ self.model_type = ModelType.from_str(request.get("model_type", ""))
107
+ self.weight_type = WeightType[request.get("weight_type", "Original")]
108
+ self.license = request.get("license", "?")
109
+ self.likes = request.get("likes", 0)
110
+ self.num_params = request.get("params", 0)
111
+ self.date = request.get("submitted_time", "")
112
+ self.author = request.get("author", self.author)
113
+ except Exception:
114
+ print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
115
+
116
+ def to_dict(self):
117
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
118
+ average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
119
+ data_dict = {
120
+ "eval_name": self.eval_name, # not a column, just a save name,
121
+ AutoEvalColumn.precision.name: self.precision.value.name,
122
+ AutoEvalColumn.model_type.name: self.model_type.value.name,
123
+ AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
124
+ AutoEvalColumn.weight_type.name: self.weight_type.value.name,
125
+ AutoEvalColumn.architecture.name: self.architecture,
126
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
127
+ AutoEvalColumn.revision.name: self.revision,
128
+ AutoEvalColumn.average.name: average,
129
+ AutoEvalColumn.license.name: self.license,
130
+ AutoEvalColumn.likes.name: self.likes,
131
+ AutoEvalColumn.params.name: self.num_params,
132
+ AutoEvalColumn.still_on_hub.name: self.still_on_hub,
133
+ AutoEvalColumn.author.name: self.author,
134
+ }
135
+
136
+ for task in Tasks:
137
+ data_dict[task.value.col_name] = self.results[task.value.benchmark]
138
+
139
+ return data_dict
140
+
141
+
142
+ def get_request_file_for_model(requests_path, model_name, precision):
143
+ """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
144
+ request_files = os.path.join(
145
+ requests_path,
146
+ f"{model_name}_eval_request_*.json",
147
+ )
148
+ request_files = glob.glob(request_files)
149
+
150
+ # Select correct request file (precision)
151
+ request_file = ""
152
+ request_files = sorted(request_files, reverse=True)
153
+ for tmp_request_file in request_files:
154
+ with open(tmp_request_file, "r") as f:
155
+ req_content = json.load(f)
156
+ if (
157
+ req_content["status"] in ["FINISHED"]
158
+ and req_content["precision"] == precision.split(".")[-1]
159
+ ):
160
+ request_file = tmp_request_file
161
+ return request_file
162
+
163
+
164
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
165
+ """From the path of the results folder root, extract all needed info for results"""
166
+ model_result_filepaths = []
167
+
168
+ for root, _, files in os.walk(results_path):
169
+ # We should only have json files in model results
170
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
171
+ continue
172
+
173
+ # Sort the files by date
174
+ try:
175
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
176
+ except dateutil.parser._parser.ParserError:
177
+ files = [files[-1]]
178
+
179
+ for file in files:
180
+ model_result_filepaths.append(os.path.join(root, file))
181
+
182
+ eval_results = {}
183
+ for model_result_filepath in model_result_filepaths:
184
+ # Creation of result
185
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
186
+ eval_result.update_with_request_file(requests_path)
187
+
188
+ # Store results of same eval together
189
+ eval_name = eval_result.eval_name
190
+ if eval_name in eval_results.keys():
191
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
192
+ else:
193
+ eval_results[eval_name] = eval_result
194
+
195
+ results = []
196
+ for v in eval_results.values():
197
+ try:
198
+ v.to_dict() # we test if the dict version is complete
199
+ results.append(v)
200
+ except KeyError: # not all eval values present
201
+ continue
202
+
203
+ return results