Datasets:
update
Browse files- SuperLim.py +6 -6
- data/DaLAJ.tar.xz +0 -0
- data/DaLAJ/dev.csv +0 -0
- data/DaLAJ/test.csv +0 -0
- data/DaLAJ/train.csv +0 -0
SuperLim.py
CHANGED
@@ -126,7 +126,7 @@ The Swedish Word-in-Context dataset provides a benchmark for evaluating distribu
|
|
126 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
127 |
_URL = "https://huggingface.co/datasets/AI-Sweden/SuperLim/resolve/main/data/"
|
128 |
_TASKS = {
|
129 |
-
"dalaj": "DaLAJ
|
130 |
"sweana": "SweAna",
|
131 |
"swediag": "SweDiag",
|
132 |
"swefaq": "SweFaq",
|
@@ -211,15 +211,15 @@ class SuperLim(datasets.GeneratorBasedBuilder):
|
|
211 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
212 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
213 |
#urls = _URLS[self.config.name]
|
214 |
-
data_dir = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name]))
|
215 |
-
data_dir = os.path.join(data_dir,self.config.
|
216 |
|
217 |
return [
|
218 |
datasets.SplitGenerator(
|
219 |
name=datasets.Split.TRAIN,
|
220 |
# These kwargs will be passed to _generate_examples
|
221 |
gen_kwargs={
|
222 |
-
"filepath": os.path.join(
|
223 |
"split": "train",
|
224 |
},
|
225 |
),
|
@@ -227,7 +227,7 @@ class SuperLim(datasets.GeneratorBasedBuilder):
|
|
227 |
name=datasets.Split.TEST,
|
228 |
# These kwargs will be passed to _generate_examples
|
229 |
gen_kwargs={
|
230 |
-
"filepath": os.path.join(
|
231 |
"split": "test"
|
232 |
},
|
233 |
),
|
@@ -235,7 +235,7 @@ class SuperLim(datasets.GeneratorBasedBuilder):
|
|
235 |
name=datasets.Split.VALIDATION,
|
236 |
# These kwargs will be passed to _generate_examples
|
237 |
gen_kwargs={
|
238 |
-
"filepath": os.path.join(
|
239 |
"split": "dev",
|
240 |
},
|
241 |
),
|
|
|
126 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
127 |
_URL = "https://huggingface.co/datasets/AI-Sweden/SuperLim/resolve/main/data/"
|
128 |
_TASKS = {
|
129 |
+
"dalaj": "DaLAJ",
|
130 |
"sweana": "SweAna",
|
131 |
"swediag": "SweDiag",
|
132 |
"swefaq": "SweFaq",
|
|
|
211 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
212 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
213 |
#urls = _URLS[self.config.name]
|
214 |
+
#data_dir = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name]))
|
215 |
+
#data_dir = os.path.join(data_dir,self.config.name)
|
216 |
|
217 |
return [
|
218 |
datasets.SplitGenerator(
|
219 |
name=datasets.Split.TRAIN,
|
220 |
# These kwargs will be passed to _generate_examples
|
221 |
gen_kwargs={
|
222 |
+
"filepath": os.path.join(_URL,_TASKS[self.config.name], "train.csv"),
|
223 |
"split": "train",
|
224 |
},
|
225 |
),
|
|
|
227 |
name=datasets.Split.TEST,
|
228 |
# These kwargs will be passed to _generate_examples
|
229 |
gen_kwargs={
|
230 |
+
"filepath": os.path.join(_URL,_TASKS[self.config.name], "test.csv"),
|
231 |
"split": "test"
|
232 |
},
|
233 |
),
|
|
|
235 |
name=datasets.Split.VALIDATION,
|
236 |
# These kwargs will be passed to _generate_examples
|
237 |
gen_kwargs={
|
238 |
+
"filepath": os.path.join(_URL,_TASKS[self.config.name], "dev.csv"),
|
239 |
"split": "dev",
|
240 |
},
|
241 |
),
|
data/DaLAJ.tar.xz
DELETED
Binary file (207 kB)
|
|
data/DaLAJ/dev.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/DaLAJ/test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/DaLAJ/train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|