Datasets:
add plain_text field that combines text fields
Browse files- euscrawl.py +25 -7
euscrawl.py
CHANGED
@@ -53,9 +53,9 @@ _CITATION = """\
|
|
53 |
_URL = "http://ixa.ehu.eus/euscrawl/files/euscrawl-v1-free-jsonl.tar.bz2"
|
54 |
_FILEPATH = "euscrawl-v1-free-jsonl/euscrawl-v1.free.jsonl"
|
55 |
KEYS = [
|
|
|
56 |
"title",
|
57 |
"opening",
|
58 |
-
"heading",
|
59 |
"text",
|
60 |
"extra",
|
61 |
"license",
|
@@ -64,6 +64,7 @@ KEYS = [
|
|
64 |
"author",
|
65 |
"type",
|
66 |
"lang",
|
|
|
67 |
"category",
|
68 |
"tags",
|
69 |
"id",
|
@@ -83,20 +84,23 @@ class EusCrawl(datasets.GeneratorBasedBuilder):
|
|
83 |
description=_DESCRIPTION,
|
84 |
features=datasets.Features(
|
85 |
{
|
|
|
86 |
"title": datasets.Value("string"),
|
87 |
"opening": datasets.Value("string"),
|
88 |
-
"heading": datasets.Value("string"),
|
89 |
"text": datasets.Value("string"),
|
90 |
-
"extra": datasets.Sequence(
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
94 |
"license": datasets.Value("string"),
|
95 |
"source": datasets.Value("string"),
|
96 |
"url": datasets.Value("string"),
|
97 |
"author": datasets.Value("string"),
|
98 |
"type": datasets.Value("string"),
|
99 |
"lang": datasets.Value("string"),
|
|
|
100 |
"category": datasets.Sequence(datasets.Value("string")),
|
101 |
"tags": datasets.Sequence(datasets.Value("string")),
|
102 |
"id": datasets.Value("int32"),
|
@@ -128,5 +132,19 @@ class EusCrawl(datasets.GeneratorBasedBuilder):
|
|
128 |
if filepath == _FILEPATH:
|
129 |
for id, line in enumerate(file):
|
130 |
data = json.loads(line)
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
yield id, {key: data.get(key, None) for key in KEYS}
|
|
|
53 |
_URL = "http://ixa.ehu.eus/euscrawl/files/euscrawl-v1-free-jsonl.tar.bz2"
|
54 |
_FILEPATH = "euscrawl-v1-free-jsonl/euscrawl-v1.free.jsonl"
|
55 |
KEYS = [
|
56 |
+
"plain_text",
|
57 |
"title",
|
58 |
"opening",
|
|
|
59 |
"text",
|
60 |
"extra",
|
61 |
"license",
|
|
|
64 |
"author",
|
65 |
"type",
|
66 |
"lang",
|
67 |
+
"heading",
|
68 |
"category",
|
69 |
"tags",
|
70 |
"id",
|
|
|
84 |
description=_DESCRIPTION,
|
85 |
features=datasets.Features(
|
86 |
{
|
87 |
+
"plain_text": datasets.Value("string"),
|
88 |
"title": datasets.Value("string"),
|
89 |
"opening": datasets.Value("string"),
|
|
|
90 |
"text": datasets.Value("string"),
|
91 |
+
"extra": datasets.Sequence(
|
92 |
+
{
|
93 |
+
"title": datasets.Value("string"),
|
94 |
+
"text": datasets.Value("string"),
|
95 |
+
}
|
96 |
+
),
|
97 |
"license": datasets.Value("string"),
|
98 |
"source": datasets.Value("string"),
|
99 |
"url": datasets.Value("string"),
|
100 |
"author": datasets.Value("string"),
|
101 |
"type": datasets.Value("string"),
|
102 |
"lang": datasets.Value("string"),
|
103 |
+
"heading": datasets.Value("string"),
|
104 |
"category": datasets.Sequence(datasets.Value("string")),
|
105 |
"tags": datasets.Sequence(datasets.Value("string")),
|
106 |
"id": datasets.Value("int32"),
|
|
|
132 |
if filepath == _FILEPATH:
|
133 |
for id, line in enumerate(file):
|
134 |
data = json.loads(line)
|
135 |
+
plain_text_lines = []
|
136 |
+
plain_text_lines += data.get("title", "").splitlines()
|
137 |
+
plain_text_lines += data.get("opening", "").splitlines()
|
138 |
+
plain_text_lines += data.get("text", "").splitlines()
|
139 |
+
plain_text_lines += [
|
140 |
+
line
|
141 |
+
for extra in data.get("extra", [])
|
142 |
+
for line in extra["title"].splitlines()
|
143 |
+
+ extra["text"].splitlines()
|
144 |
+
]
|
145 |
+
plain_text_lines = [
|
146 |
+
line.strip() for line in plain_text_lines if line.strip()
|
147 |
+
]
|
148 |
+
data["plain_text"] = "\n".join(plain_text_lines)
|
149 |
+
# defaut to None if field is missing
|
150 |
yield id, {key: data.get(key, None) for key in KEYS}
|