Datasets:
rcds
/

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
Joelito commited on
Commit
d6ca119
1 Parent(s): ef3166a

Create swiss_ruling_summarization.py

Browse files
Files changed (1) hide show
  1. swiss_ruling_summarization.py +183 -0
swiss_ruling_summarization.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """Dataset for the Swiss Ruling Summarization task."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import lzma
21
+ import os
22
+
23
+ import datasets
24
+ try:
25
+ import lzma as xz
26
+ except ImportError:
27
+ import pylzma as xz
28
+
29
+
30
+ # TODO: Add BibTeX citation
31
+ # Find for instance the citation on arxiv or on the dataset repo/website
32
+ _CITATION = """\
33
+ @InProceedings{huggingface:dataset,
34
+ title = {A great new dataset},
35
+ author={huggingface, Inc.
36
+ },
37
+ year={2020}
38
+ }
39
+ """
40
+
41
+ # You can copy an official description
42
+ _DESCRIPTION = """\
43
+ This dataset contains court decisions for the swiss ruling summarization task.
44
+ """
45
+
46
+ # TODO: Add a link to an official homepage for the dataset here
47
+ _HOMEPAGE = ""
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = ""
51
+
52
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
+ _URLS = {
55
+ "full": "https://huggingface.co/datasets/rcds/swiss_court_view_generation/resolve/main/data"
56
+ }
57
+
58
+
59
+ class SwissRulingSummarization(datasets.GeneratorBasedBuilder):
60
+ """This dataset contains court decision for court view generation task."""
61
+
62
+ VERSION = datasets.Version("1.0.0")
63
+
64
+ # This is an example of a dataset with multiple configurations.
65
+ # If you don't want/need to define several sub-sets in your dataset,
66
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
67
+
68
+ # If you need to make complex sub-parts in the datasets with configurable options
69
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
70
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
71
+
72
+ # You will be able to load one or the other configurations in the following list with
73
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
74
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
75
+ BUILDER_CONFIGS = [
76
+ datasets.BuilderConfig(name="full", version=VERSION, description="This part of my dataset covers the whole dataset")
77
+ ]
78
+
79
+ DEFAULT_CONFIG_NAME = "full" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
+
81
+ def _info(self):
82
+ if self.config.name == "full": # This is the name of the configuration selected in BUILDER_CONFIGS above
83
+ features = datasets.Features(
84
+ {
85
+ "decision_id": datasets.Value("string"),
86
+ "header": datasets.Value("string"),
87
+ "regeste": datasets.Value("string"),
88
+ "text": datasets.Value("string"),
89
+ "law_area": datasets.Value("string"),
90
+ "law_sub_area": datasets.Value("string"),
91
+ "language": datasets.Value("string"),
92
+ "year": datasets.Value("int32"),
93
+ "court": datasets.Value("string"),
94
+ "chamber": datasets.Value("string"),
95
+ "canton": datasets.Value("string"),
96
+ "region": datasets.Value("string")
97
+
98
+ # These are the features of your dataset like images, labels ...
99
+ }
100
+ )
101
+ return datasets.DatasetInfo(
102
+ # This is the description that will appear on the datasets page.
103
+ description=_DESCRIPTION,
104
+ # This defines the different columns of the dataset and their types
105
+ features=features, # Here we define them above because they are different between the two configurations
106
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
107
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
108
+ # supervised_keys=("sentence", "label"),
109
+ # Homepage of the dataset for documentation
110
+ # homepage=_HOMEPAGE,
111
+ # License for the dataset if available
112
+ # license=_LICENSE,
113
+ # Citation for the dataset
114
+ # citation=_CITATION,
115
+ )
116
+
117
+ def _split_generators(self, dl_manager):
118
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
119
+
120
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
121
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
122
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
123
+ urls = _URLS[self.config.name]
124
+ filepath_train = dl_manager.download(os.path.join(urls, "train.jsonl.xz"))
125
+ filepath_validation = dl_manager.download(os.path.join(urls, "validation.jsonl.xz"))
126
+ filepath_test = dl_manager.download(os.path.join(urls, "test.jsonl.xz"))
127
+
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ # These kwargs will be passed to _generate_examples
132
+ gen_kwargs={
133
+ "filepath": filepath_train,
134
+ "split": "train",
135
+ },
136
+ ),
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.VALIDATION,
139
+ # These kwargs will be passed to _generate_examples
140
+ gen_kwargs={
141
+ "filepath": filepath_validation,
142
+ "split": "validation",
143
+ },
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.TEST,
147
+ # These kwargs will be passed to _generate_examples
148
+ gen_kwargs={
149
+ "filepath": filepath_test,
150
+ "split": "test"
151
+ },
152
+ )
153
+ ]
154
+
155
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
156
+ def _generate_examples(self, filepath, split):
157
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
158
+ line_counter = 0
159
+ try:
160
+ with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
161
+ for id, line in enumerate(f):
162
+ line_counter += 1
163
+ if line:
164
+ data = json.loads(line)
165
+ if self.config.name == "full" or self.config.name == "origin":
166
+ yield id, {
167
+ "decision_id": data["decision_id"],
168
+ "header": data["header"],
169
+ "regeste": data["regeste"],
170
+ "text": data["text"],
171
+ "law_area": data["law_area"],
172
+ "law_sub_area": data["law_sub_area"],
173
+ "language": data["language"],
174
+ "year": data["year"],
175
+ "court": data["court"],
176
+ "chamber": data["chamber"],
177
+ "canton": data["canton"],
178
+ "region": data["region"]
179
+ }
180
+ except lzma.LZMAError as e:
181
+ print(split, e)
182
+ if line_counter == 0:
183
+ raise e