baneks / baneks.py
zeio's picture
fix(n-views): updated data type for n-views column
0d132c5 verified
from pandas import read_csv
from datasets import GeneratorBasedBuilder, Value, Version, BuilderConfig, Features, DatasetInfo, SplitGenerator, Split
_DESCRIPTION = '''
This dataset contains anekdotes parsed from a few vk social network communities. The data can be useful for fine-tuning language generation models as well for tasks of automatic humour analysis.
'''
_HOMEPAGE = 'https://huggingface.co/datasets/zeio/baneks'
_LICENSE = 'Apache License Version 2.0'
_URLS = {
'censored': 'https://huggingface.co/datasets/zeio/baneks/resolve/main/censored.tsv',
'default': 'https://huggingface.co/datasets/zeio/baneks/resolve/main/default.tsv',
'inflated': 'https://huggingface.co/datasets/zeio/baneks/resolve/main/inflated.tsv'
}
class Baneks(GeneratorBasedBuilder):
VERSION = Version('10.10.2023')
BUILDER_CONFIGS = [
BuilderConfig(name = 'censored', version = VERSION, description = 'No duplicates - entries with the same text are grouped and aggregated'),
BuilderConfig(name = 'default', version = VERSION, description = 'Same as "censored", but censored words are replaced with inferred values for their initial form'),
BuilderConfig(name = 'inflated', version = VERSION, description = 'Each entry corresponds to a post, minimal changes to the source data')
]
DEFAULT_CONFIG_NAME = 'default'
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features = Features({
'text': Value('string'),
'published': Value('string'),
'id': Value('int32'),
'n-likes': Value('int32'),
'n-views': Value('float'),
'accessed': Value('string'),
'source': Value('string')
}),
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
name = self.config.name
url = _URLS[name]
# path = os.path.join(dl_manager.download_and_extract(url), f'{name}.tsv')
return [
SplitGenerator(
name = Split.TRAIN,
gen_kwargs = {
"path": dl_manager.download_and_extract(url)
}
)
]
def _generate_examples(self, path: str):
for _, row in read_csv(path, sep = '\t').iterrows():
yield f'{row["id"]:08d}-{row["source"]}', dict(row)