Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
9468830
1 Parent(s): 123c572

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (7d7bc1d2fc53afa7a061e131b4b8c749e3b9c6f0)
- Delete loading script (8182d28ec7b0485f8cf24981ed32f4bbdb4f4849)

README.md CHANGED
@@ -17,9 +17,9 @@ source_datasets:
17
  task_categories:
18
  - translation
19
  task_ids: []
20
- paperswithcode_id: null
21
  pretty_name: OpusFinlex
22
  dataset_info:
 
23
  features:
24
  - name: translation
25
  dtype:
@@ -27,13 +27,18 @@ dataset_info:
27
  languages:
28
  - fi
29
  - sv
30
- config_name: fi-sv
31
  splits:
32
  - name: train
33
- num_bytes: 610550215
34
  num_examples: 3114141
35
- download_size: 153886554
36
- dataset_size: 610550215
 
 
 
 
 
 
37
  ---
38
 
39
  # Dataset Card for [opus_finlex]
 
17
  task_categories:
18
  - translation
19
  task_ids: []
 
20
  pretty_name: OpusFinlex
21
  dataset_info:
22
+ config_name: fi-sv
23
  features:
24
  - name: translation
25
  dtype:
 
27
  languages:
28
  - fi
29
  - sv
 
30
  splits:
31
  - name: train
32
+ num_bytes: 610547719
33
  num_examples: 3114141
34
+ download_size: 269359572
35
+ dataset_size: 610547719
36
+ configs:
37
+ - config_name: fi-sv
38
+ data_files:
39
+ - split: train
40
+ path: fi-sv/train-*
41
+ default: true
42
  ---
43
 
44
  # Dataset Card for [opus_finlex]
fi-sv/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e74e58bf7aaffa2242f0fac85e109a5a93643bd3797c8197f6ba5f81f848921e
3
+ size 109010506
fi-sv/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2b28671cb4194a1373d340e68c19169d76c1bfa827031d0e1606c65e37c483e
3
+ size 160349066
opus_finlex.py DELETED
@@ -1,92 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
-
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _CITATION = """\
23
- J. Tiedemann, 2012, Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the \
24
- 8th International Conference on Language Resources and Evaluation (LREC 2012)"""
25
-
26
-
27
- _DESCRIPTION = """\
28
- The Finlex Data Base is a comprehensive collection of legislative and other judicial information of Finland, \
29
- which is available in Finnish, Swedish and partially in English. This corpus is taken from the Semantic Finlex serice \
30
- that provides the Finnish and Swedish data as linked open data and also raw XML files."""
31
-
32
-
33
- _HOMEPAGE = "http://opus.nlpl.eu/Finlex.php"
34
-
35
-
36
- _LICENSE = ""
37
-
38
-
39
- _URLs = {"train": "https://object.pouta.csc.fi/OPUS-Finlex/v2018/moses/fi-sv.txt.zip"}
40
-
41
-
42
- class OpusFinlex(datasets.GeneratorBasedBuilder):
43
- """TODO: Short description of my dataset."""
44
-
45
- VERSION = datasets.Version("1.0.0")
46
-
47
- BUILDER_CONFIGS = [datasets.BuilderConfig(name="fi-sv", version=VERSION)]
48
-
49
- def _info(self):
50
- return datasets.DatasetInfo(
51
- description=_DESCRIPTION,
52
- features=datasets.Features(
53
- {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
54
- ),
55
- supervised_keys=None,
56
- homepage="http://opus.nlpl.eu/Finlex.php",
57
- citation=_CITATION,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """Returns SplitGenerators."""
62
- data_dir = dl_manager.download_and_extract(_URLs)
63
- return [
64
- datasets.SplitGenerator(
65
- name=datasets.Split.TRAIN,
66
- # These kwargs will be passed to _generate_examples
67
- gen_kwargs={
68
- "source_file": os.path.join(data_dir["train"], "Finlex.fi-sv.fi"),
69
- "target_file": os.path.join(data_dir["train"], "Finlex.fi-sv.sv"),
70
- "split": "train",
71
- },
72
- ),
73
- ]
74
-
75
- def _generate_examples(self, source_file, target_file, split):
76
- """This function returns the examples in the raw (text) form."""
77
- with open(source_file, encoding="utf-8") as f:
78
- source_sentences = f.read().split("\n")
79
- with open(target_file, encoding="utf-8") as f:
80
- target_sentences = f.read().split("\n")
81
-
82
- assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
83
- len(source_sentences),
84
- len(target_sentences),
85
- source_file,
86
- target_file,
87
- )
88
-
89
- source, target = tuple(self.config.name.split("-"))
90
- for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
91
- result = {"translation": {source: l1, target: l2}}
92
- yield idx, result