Datasets:
Update files from the datasets library (from 1.6.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.6.0
glue.py
CHANGED
@@ -16,14 +16,12 @@
|
|
16 |
# Lint as: python3
|
17 |
"""The General Language Understanding Evaluation (GLUE) benchmark."""
|
18 |
|
19 |
-
from __future__ import absolute_import, division, print_function
|
20 |
|
21 |
import csv
|
22 |
import os
|
23 |
import textwrap
|
24 |
|
25 |
import numpy as np
|
26 |
-
import six
|
27 |
|
28 |
import datasets
|
29 |
|
@@ -453,7 +451,7 @@ class Glue(datasets.GeneratorBasedBuilder):
|
|
453 |
]
|
454 |
|
455 |
def _info(self):
|
456 |
-
features = {text_feature: datasets.Value("string") for text_feature in
|
457 |
if self.config.label_classes:
|
458 |
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
|
459 |
else:
|
@@ -565,7 +563,7 @@ class Glue(datasets.GeneratorBasedBuilder):
|
|
565 |
"is_acceptable": row[1],
|
566 |
}
|
567 |
|
568 |
-
example = {feat: row[col] for feat, col in
|
569 |
example["idx"] = n
|
570 |
|
571 |
if self.config.label_column in row:
|
@@ -579,7 +577,7 @@ class Glue(datasets.GeneratorBasedBuilder):
|
|
579 |
example["label"] = process_label(-1)
|
580 |
|
581 |
# Filter out corrupted rows.
|
582 |
-
for value in
|
583 |
if value is None:
|
584 |
break
|
585 |
else:
|
@@ -588,12 +586,15 @@ class Glue(datasets.GeneratorBasedBuilder):
|
|
588 |
def _generate_example_mrpc_files(self, mrpc_files, split):
|
589 |
if split == "test":
|
590 |
with open(mrpc_files["test"], encoding="utf8") as f:
|
|
|
|
|
|
|
591 |
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
592 |
for n, row in enumerate(reader):
|
593 |
yield {
|
594 |
"sentence1": row["#1 String"],
|
595 |
"sentence2": row["#2 String"],
|
596 |
-
"label":
|
597 |
"idx": n,
|
598 |
}
|
599 |
else:
|
|
|
16 |
# Lint as: python3
|
17 |
"""The General Language Understanding Evaluation (GLUE) benchmark."""
|
18 |
|
|
|
19 |
|
20 |
import csv
|
21 |
import os
|
22 |
import textwrap
|
23 |
|
24 |
import numpy as np
|
|
|
25 |
|
26 |
import datasets
|
27 |
|
|
|
451 |
]
|
452 |
|
453 |
def _info(self):
|
454 |
+
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
|
455 |
if self.config.label_classes:
|
456 |
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
|
457 |
else:
|
|
|
563 |
"is_acceptable": row[1],
|
564 |
}
|
565 |
|
566 |
+
example = {feat: row[col] for feat, col in self.config.text_features.items()}
|
567 |
example["idx"] = n
|
568 |
|
569 |
if self.config.label_column in row:
|
|
|
577 |
example["label"] = process_label(-1)
|
578 |
|
579 |
# Filter out corrupted rows.
|
580 |
+
for value in example.values():
|
581 |
if value is None:
|
582 |
break
|
583 |
else:
|
|
|
586 |
def _generate_example_mrpc_files(self, mrpc_files, split):
|
587 |
if split == "test":
|
588 |
with open(mrpc_files["test"], encoding="utf8") as f:
|
589 |
+
# The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
|
590 |
+
# the Quality key.
|
591 |
+
f.seek(3)
|
592 |
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
593 |
for n, row in enumerate(reader):
|
594 |
yield {
|
595 |
"sentence1": row["#1 String"],
|
596 |
"sentence2": row["#2 String"],
|
597 |
+
"label": int(row["Quality"]),
|
598 |
"idx": n,
|
599 |
}
|
600 |
else:
|