Datasets:

ArXiv:
dynasent / dynasent.py
frankaging
add round2
3bb3188
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Dynabench.DynaSent"""
from __future__ import absolute_import, division, print_function
import json
import os
from collections import OrderedDict
import datasets
logger = datasets.logging.get_logger(__name__)
_VERSION = datasets.Version("1.1.0") # v1.1 fixed for example uid.
_NUM_ROUNDS = 2
_DESCRIPTION = """\
Dynabench.DynaSent is a Sentiment Analysis dataset collected using a
human-and-model-in-the-loop.
""".strip()
class DynabenchRoundDetails:
"""Round details for Dynabench.DynaSent datasets."""
def __init__(
self, citation, description, homepage, data_license, data_url,
data_features, data_subset_map=None
):
self.citation = citation
self.description = description
self.homepage = homepage
self.data_license = data_license
self.data_url = data_url
self.data_features = data_features
self.data_subset_map = data_subset_map
# Provide the details for each round
_ROUND_DETAILS = {
1: DynabenchRoundDetails(
citation="""\
@article{
potts-etal-2020-dynasent,
title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},
author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus
and Kiela, Douwe},
journal={arXiv preprint arXiv:2012.15349},
url={https://arxiv.org/abs/2012.15349},
year={2020}
}
""".strip(),
description="""\
DynaSent is an English-language benchmark task for ternary
(positive/negative/neutral) sentiment analysis.
For more details on the dataset construction process,
see https://github.com/cgpotts/dynasent.
""".strip(),
homepage="https://dynabench.org/tasks/3",
data_license="CC BY 4.0",
data_url="https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip",
data_features=datasets.Features(
{
"id": datasets.Value("string"),
"hit_ids": datasets.features.Sequence(
datasets.Value("string")
),
"sentence": datasets.Value("string"),
"indices_into_review_text": datasets.features.Sequence(
datasets.Value("int32")
),
"model_0_label": datasets.Value("string"),
"model_0_probs": {
"negative": datasets.Value("float32"),
"positive": datasets.Value("float32"),
"neutral": datasets.Value("float32")
},
"text_id": datasets.Value("string"),
"review_id": datasets.Value("string"),
"review_rating": datasets.Value("int32"),
"label_distribution": {
"positive": datasets.features.Sequence(
datasets.Value("string")
),
"negative": datasets.features.Sequence(
datasets.Value("string")
),
"neutral": datasets.features.Sequence(
datasets.Value("string")
),
"mixed": datasets.features.Sequence(
datasets.Value("string")
)
},
"gold_label": datasets.Value("string"),
"metadata": {
"split": datasets.Value("string"),
"round": datasets.Value("int32"),
"subset": datasets.Value("string"),
"model_in_the_loop": datasets.Value("string"),
}
}
),
data_subset_map=OrderedDict({
"all": {
"dir": "dynasent-v1.1",
"file_prefix": "dynasent-v1.1-round01-yelp-",
"model": "RoBERTa"
}
}),
),
2: DynabenchRoundDetails(
citation="""\
@article{
potts-etal-2020-dynasent,
title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},
author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus
and Kiela, Douwe},
journal={arXiv preprint arXiv:2012.15349},
url={https://arxiv.org/abs/2012.15349},
year={2020}
}
""".strip(),
description="""\
DynaSent is an English-language benchmark task for ternary
(positive/negative/neutral) sentiment analysis.
For more details on the dataset construction process,
see https://github.com/cgpotts/dynasent.
""".strip(),
homepage="https://dynabench.org/tasks/3",
data_license="CC BY 4.0",
data_url="https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip",
data_features=datasets.Features(
{
"id": datasets.Value("string"),
"hit_ids": datasets.features.Sequence(
datasets.Value("string")
),
"sentence": datasets.Value("string"),
"sentence_author": datasets.Value("string"),
"has_prompt": datasets.Value("bool"),
"prompt_data": {
"indices_into_review_text": datasets.features.Sequence(
datasets.Value("int32")
),
"review_rating": datasets.Value("int32"),
"prompt_sentence": datasets.Value("string"),
"review_id": datasets.Value("string")
},
"model_1_label": datasets.Value("string"),
"model_1_probs": {
"negative": datasets.Value("float32"),
"positive": datasets.Value("float32"),
"neutral": datasets.Value("float32")
},
"text_id": datasets.Value("string"),
"label_distribution": {
"positive": datasets.features.Sequence(
datasets.Value("string")
),
"negative": datasets.features.Sequence(
datasets.Value("string")
),
"neutral": datasets.features.Sequence(
datasets.Value("string")
),
"mixed": datasets.features.Sequence(
datasets.Value("string")
)
},
"gold_label": datasets.Value("string"),
"metadata": {
"split": datasets.Value("string"),
"round": datasets.Value("int32"),
"subset": datasets.Value("string"),
"model_in_the_loop": datasets.Value("string"),
}
}
),
data_subset_map=OrderedDict({
"all": {
"dir": "dynasent-v1.1",
"file_prefix": "dynasent-v1.1-round02-dynabench-",
"model": "RoBERTa"
}
}),
)
}
class DynabenchDynaSentConfig(datasets.BuilderConfig):
"""BuilderConfig for Dynabench.DynaSent datasets."""
def __init__(self, round, subset='all', **kwargs):
"""BuilderConfig for Dynabench.DynaSent.
Args:
round: integer, the dynabench round to load.
subset: string, the subset of that round's data to load or 'all'.
**kwargs: keyword arguments forwarded to super.
"""
assert isinstance(round, int), "round ({}) must be set and of type integer".format(round)
assert 0 < round <= _NUM_ROUNDS, \
"round (received {}) must be between 1 and {}".format(round, _NUM_ROUNDS)
super(DynabenchDynaSentConfig, self).__init__(
name="dynabench.dynasent.r{}.{}".format(round, subset),
description="Dynabench DynaSent dataset for round {}, showing dataset selection: {}.".format(round, subset),
**kwargs,
)
self.round = round
self.subset = subset
class DynabenchDynaSent(datasets.GeneratorBasedBuilder):
"""Dynabench.DynaSent"""
BUILDER_CONFIG_CLASS = DynabenchDynaSentConfig
BUILDER_CONFIGS = [
DynabenchDynaSentConfig(
version=_VERSION,
round=round,
subset=subset,
) # pylint:disable=g-complex-comprehension
for round in range(1, _NUM_ROUNDS+1) for subset in _ROUND_DETAILS[round].data_subset_map
]
def _info(self):
round_details = _ROUND_DETAILS[self.config.round]
return datasets.DatasetInfo(
description=round_details.description,
features=round_details.data_features,
homepage=round_details.homepage,
citation=round_details.citation,
supervised_keys=None
)
@staticmethod
def _get_filepath(dl_dir, round, subset, split):
round_details = _ROUND_DETAILS[round]
return os.path.join(
dl_dir,
round_details.data_subset_map[subset]["dir"],
round_details.data_subset_map[subset]["file_prefix"] + split + ".jsonl"
)
def _split_generators(self, dl_manager):
round_details = _ROUND_DETAILS[self.config.round]
dl_dir = dl_manager.download_and_extract(round_details.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": self._get_filepath(
dl_dir, self.config.round, self.config.subset, "train"
),
"split": "train",
"round": self.config.round,
"subset": self.config.subset,
"model_in_the_loop": round_details.data_subset_map[self.config.subset]["model"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": self._get_filepath(
dl_dir, self.config.round, self.config.subset, "dev"
),
"split": "validation",
"round": self.config.round,
"subset": self.config.subset,
"model_in_the_loop": round_details.data_subset_map[self.config.subset]["model"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": self._get_filepath(
dl_dir, self.config.round, self.config.subset, "test"
),
"split": "test",
"round": self.config.round,
"subset": self.config.subset,
"model_in_the_loop": round_details.data_subset_map[self.config.subset]["model"],
},
),
]
def _generate_examples(self, filepath, split, round, subset, model_in_the_loop):
"""This function returns the examples in the raw (text) form."""
ternary_labels = ('positive', 'negative', 'neutral') # Enforce to be the tenary version now.
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for line in f:
d = json.loads(line)
if d['gold_label'] in ternary_labels:
if round == 1:
# Construct DynaSent features.
yield d["text_id"], {
"id": d["text_id"],
# DynaSent Example.
"hit_ids": d["hit_ids"],
"sentence": d["sentence"],
"indices_into_review_text": d["indices_into_review_text"],
"model_0_label": d["model_0_label"],
"model_0_probs": d["model_0_probs"],
"text_id": d["text_id"],
"review_id": d["review_id"],
"review_rating": d["review_rating"],
"label_distribution": d["label_distribution"],
"gold_label": d["gold_label"],
# Metadata.
"metadata": {
"split": split,
"round": round,
"subset": subset,
"model_in_the_loop": model_in_the_loop
}
}
elif round == 2:
# Construct DynaSent features.
if d["has_prompt"]:
if "indices_into_review_text" in d["prompt_data"]:
indices_into_review_text = d["prompt_data"]["indices_into_review_text"]
else:
indices_into_review_text = []
if "review_rating" in d["prompt_data"]:
review_rating = d["prompt_data"]["review_rating"]
else:
review_rating = -1 # -1 means unknown.
if "review_id" in d["prompt_data"]:
review_id = d["prompt_data"]["review_id"]
else:
review_id = ""
if "prompt_sentence" in d["prompt_data"]:
prompt_sentence = d["prompt_data"]["prompt_sentence"]
else:
prompt_sentence = ""
prompt_data = {
"indices_into_review_text": indices_into_review_text,
"review_rating": review_rating,
"prompt_sentence": prompt_sentence,
"review_id": review_id,
}
else:
prompt_data = {
"indices_into_review_text": [],
"review_rating": -1, # -1 means unknown.
"prompt_sentence": "",
"review_id": "",
}
yield d["text_id"], {
"id": d["text_id"],
# DynaSent Example.
"hit_ids": d["hit_ids"],
"sentence": d["sentence"],
"sentence_author": d["sentence_author"],
"has_prompt": d["has_prompt"],
"prompt_data": prompt_data,
"model_1_label": d["model_1_label"],
"model_1_probs": d["model_1_probs"],
"text_id": d["text_id"],
"label_distribution": d["label_distribution"],
"gold_label": d["gold_label"],
# Metadata.
"metadata": {
"split": split,
"round": round,
"subset": subset,
"model_in_the_loop": model_in_the_loop
}
}