Update metrics/scrolls.py
Browse files- metrics/scrolls.py +26 -6
metrics/scrolls.py
CHANGED
@@ -31,24 +31,44 @@ Returns: depending on the Scrolls subset, one or several of:
|
|
31 |
"exact_match": Exact Match score
|
32 |
"f1": F1 score
|
33 |
"rouge": ROUGE score
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
Examples:
|
35 |
predictions = ["exact match example", "hello there", "general kenobi"] # List[str]
|
36 |
references = [["exact match example"], ["hello", "hi there"], ["commander kenobi"]] # List[List[str]]
|
37 |
|
38 |
-
>>> scrolls_metric = datasets.load_metric(
|
39 |
>>> results = scrolls_metric.compute(predictions=predictions, references=references)
|
40 |
>>> print(results)
|
41 |
-
{'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136,
|
|
|
|
|
42 |
|
43 |
-
>>> scrolls_metric = datasets.load_metric(
|
44 |
>>> results = scrolls_metric.compute(predictions=predictions, references=references)
|
45 |
>>> print(results)
|
46 |
-
{'exact_match': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667
|
|
|
47 |
|
48 |
-
>>> scrolls_metric = datasets.load_metric(
|
49 |
>>> results = scrolls_metric.compute(predictions=predictions, references=references)
|
50 |
>>> print(results)
|
51 |
-
{'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667
|
|
|
52 |
"""
|
53 |
|
54 |
DATASET_TO_METRICS = {
|
|
|
31 |
"exact_match": Exact Match score
|
32 |
"f1": F1 score
|
33 |
"rouge": ROUGE score
|
34 |
+
|
35 |
+
Use the following code to download the metric:
|
36 |
+
```
|
37 |
+
import os, shutil
|
38 |
+
from huggingface_hub import hf_hub_download
|
39 |
+
def download_metric():
|
40 |
+
scrolls_metric_path = hf_hub_download(repo_id="datasets/tau/scrolls", filename="metrics/scrolls.py")
|
41 |
+
updated_scrolls_metric_path = (
|
42 |
+
os.path.dirname(scrolls_metric_path) + os.path.basename(scrolls_metric_path).replace(".", "_") + ".py"
|
43 |
+
)
|
44 |
+
shutil.copy(scrolls_metric_path, updated_scrolls_metric_path)
|
45 |
+
return updated_scrolls_metric_path
|
46 |
+
|
47 |
+
scrolls_metric_path = download_metric()
|
48 |
+
```
|
49 |
+
|
50 |
Examples:
|
51 |
predictions = ["exact match example", "hello there", "general kenobi"] # List[str]
|
52 |
references = [["exact match example"], ["hello", "hi there"], ["commander kenobi"]] # List[List[str]]
|
53 |
|
54 |
+
>>> scrolls_metric = datasets.load_metric(scrolls_metric_path, 'gov_report') # 'gov_report' or any of ["qmsum", "summ_screen_fd"]
|
55 |
>>> results = scrolls_metric.compute(predictions=predictions, references=references)
|
56 |
>>> print(results)
|
57 |
+
{'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136,
|
58 |
+
'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 55.8136,
|
59 |
+
'display_keys': ['rouge/rouge1', 'rouge/rouge2', 'rouge/rougeL'], 'display': [72.2222, 33.3333, 72.2222]}
|
60 |
|
61 |
+
>>> scrolls_metric = datasets.load_metric(scrolls_metric_path, 'contract_nli') # 'contract_nli' or "quality"
|
62 |
>>> results = scrolls_metric.compute(predictions=predictions, references=references)
|
63 |
>>> print(results)
|
64 |
+
{'exact_match': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 33.3333,
|
65 |
+
'display_keys': ['exact_match'], 'display': [33.3333]}
|
66 |
|
67 |
+
>>> scrolls_metric = datasets.load_metric(scrolls_metric_path, 'narrative_qa') # 'narrative_qa' or "qasper"
|
68 |
>>> results = scrolls_metric.compute(predictions=predictions, references=references)
|
69 |
>>> print(results)
|
70 |
+
{'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 72.2222,
|
71 |
+
'display_keys': ['f1'], 'display': [72.2222]}
|
72 |
"""
|
73 |
|
74 |
DATASET_TO_METRICS = {
|