Datasets:
Update pclue.py
Browse files
pclue.py
CHANGED
@@ -65,22 +65,19 @@ class Pclue(datasets.GeneratorBasedBuilder):
|
|
65 |
datasets.SplitGenerator(
|
66 |
name=datasets.Split.TRAIN,
|
67 |
gen_kwargs={
|
68 |
-
"filepath": path+"/pclue/train.json",
|
69 |
-
"split": "train",
|
70 |
},
|
71 |
),
|
72 |
datasets.SplitGenerator(
|
73 |
name=datasets.Split.TEST,
|
74 |
gen_kwargs={
|
75 |
-
"filepath": path+"/pclue/test.json",
|
76 |
-
"split": "test",
|
77 |
},
|
78 |
),
|
79 |
datasets.SplitGenerator(
|
80 |
name=datasets.Split.VALIDATION,
|
81 |
gen_kwargs={
|
82 |
-
"filepath": path+"/pclue/dev.json",
|
83 |
-
"split": "val",
|
84 |
},
|
85 |
),
|
86 |
]
|
@@ -89,6 +86,5 @@ class Pclue(datasets.GeneratorBasedBuilder):
|
|
89 |
"""Yields examples."""
|
90 |
with open(filepath, encoding="utf-8") as f:
|
91 |
for idx, row in enumerate(f):
|
92 |
-
dd=json.loads(row)
|
93 |
-
yield idx, {"input":dd["input"],"target":dd["target"]}
|
94 |
-
|
|
|
65 |
datasets.SplitGenerator(
|
66 |
name=datasets.Split.TRAIN,
|
67 |
gen_kwargs={
|
68 |
+
"filepath": path + "/pclue/train.json",
|
|
|
69 |
},
|
70 |
),
|
71 |
datasets.SplitGenerator(
|
72 |
name=datasets.Split.TEST,
|
73 |
gen_kwargs={
|
74 |
+
"filepath": path + "/pclue/test.json",
|
|
|
75 |
},
|
76 |
),
|
77 |
datasets.SplitGenerator(
|
78 |
name=datasets.Split.VALIDATION,
|
79 |
gen_kwargs={
|
80 |
+
"filepath": path + "/pclue/dev.json",
|
|
|
81 |
},
|
82 |
),
|
83 |
]
|
|
|
86 |
"""Yields examples."""
|
87 |
with open(filepath, encoding="utf-8") as f:
|
88 |
for idx, row in enumerate(f):
|
89 |
+
dd = json.loads(row)
|
90 |
+
yield idx, {"input": dd["input"], "target": dd["target"]}
|
|