multi-model-plant-genome-corpus / multi-model-plant-genome-corpus.py
suke-sho's picture
Update multi-model-plant-genome-corpus.py
dbc8de6 verified
raw
history blame
No virus
4.8 kB
from typing import List
import datasets
from Bio import SeqIO
import os
_CITATION = ""
_DESCRIPTION = """
Dataset made of model plants genomes available on NCBI.
Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that
they can only contain the letters A, T, C, G and N.
"""
_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/"
_LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/"
_CHUNK_LENGTHS = [6000,]
def filter_fn(char: str) -> str:
"""
Transforms any letter different from a base nucleotide into an 'N'.
"""
if char in {'A', 'T', 'C', 'G'}:
return char
else:
return 'N'
def clean_sequence(seq: str) -> str:
"""
Process a chunk of DNA to have all letters in upper and restricted to
A, T, C, G and N.
"""
seq = seq.upper()
seq = map(filter_fn, seq)
seq = ''.join(list(seq))
return seq
class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig):
"""BuilderConfig for the Plant Multi Species Pre-training Dataset."""
def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs):
"""BuilderConfig for the multi species genomes.
Args:
chunk_length (:obj:`int`): Chunk length.
overlap: (:obj:`int`): Overlap in base pairs for two consecutive chunks (defaults to 100).
**kwargs: keyword arguments forwarded to super.
"""
num_kbp = int(chunk_length/1000)
super().__init__(
*args,
name=f'{num_kbp}kbp',
**kwargs,
)
self.chunk_length = chunk_length
self.overlap = overlap
class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
"""Genomes from multiple plant species, filtered and split into chunks of consecutive nucleotides."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = PlantMultiSpeciesGenomesConfig
BUILDER_CONFIGS = [PlantMultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS]
DEFAULT_CONFIG_NAME = "6kbp"
def _info(self):
features = datasets.Features(
{
"sequence": datasets.Value("string"),
"description": datasets.Value("string"),
"start_pos": datasets.Value("int32"),
"end_pos": datasets.Value("int32"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
filepaths_txt = dl_manager.download('plant_genome_file_names.txt')
with open(filepaths_txt, 'r') as f:
filepaths = [os.path.join("plant_genomes", filepath.rstrip()) for filepath in f]
genome_files = dl_manager.download_and_extract(filepaths)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": genome_files, "chunk_length": self.config.chunk_length})
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, files, chunk_length):
key = 0
for file in files:
with open(file, 'rt') as f:
fasta_sequences = SeqIO.parse(f, 'fasta')
for record in fasta_sequences:
sequence, description = str(record.seq), record.description
# clean chromosome sequence
sequence = clean_sequence(sequence)
seq_length = len(sequence)
# split into chunks
num_chunks = (seq_length - 2 * self.config.overlap) // chunk_length
if num_chunks < 1:
continue
sequence = sequence[:(chunk_length * num_chunks + 2 * self.config.overlap)]
seq_length = len(sequence)
for i in range(num_chunks):
# get chunk
start_pos = i * chunk_length
end_pos = min(seq_length, (i+1) * chunk_length + 2 * self.config.overlap)
chunk_sequence = sequence[start_pos:end_pos]
# yield chunk
yield key, {
'sequence': chunk_sequence,
'description': description,
'start_pos': start_pos,
'end_pos': end_pos,
}
key += 1