"repo_name","path","content","license" "rust-bio-tools","./rust-bio-tools/tests/lib.rs","use bio::io::fastq;\nuse rust_htslib::bam;\nuse rust_htslib::bam::Read;\nuse std::fs;\nuse std::process::Command;\n\n/// Compare an output file to the expected output and delete the output file.\nfn test_output(result: &str, expected: &str) {\n assert!(Command::new('cmp')\n .arg(result)\n .arg(expected)\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n fs::remove_file(result).unwrap();\n}\n\n/// Compare two fastq files, ignoring the name lines\n/// Reads are sorted by their sequence, which is not 100% robust\n/// if mutations/ sequencing errors are considered.\nfn compare_fastq(result: &str, expected: &str, strand: bool) {\n let result_reader = fastq::Reader::from_file(result).unwrap();\n let mut result_recs: Vec =\n result_reader.records().filter_map(Result::ok).collect();\n result_recs.sort_by_key(|x| x.seq().to_owned());\n let expected_reader = fastq::Reader::from_file(expected).unwrap();\n let mut expected_recs: Vec =\n expected_reader.records().filter_map(Result::ok).collect();\n expected_recs.sort_by_key(|x| x.seq().to_owned());\n assert_eq!(result_recs.len(), expected_recs.len());\n for (result, expected) in result_recs.iter().zip(expected_recs.iter()) {\n assert_eq!(result.seq(), expected.seq());\n assert_eq!(result.qual(), expected.qual());\n if strand {\n assert_eq!(result.desc(), expected.desc())\n }\n }\n}\n\nfn compare_bam(result: &str, expected: &str) {\n let mut result_reader = bam::Reader::from_path(result).unwrap();\n let mut result_recs: Vec =\n result_reader.records().filter_map(Result::ok).collect();\n result_recs.sort_by_key(|x| x.seq().as_bytes());\n let mut expected_reader = bam::Reader::from_path(expected).unwrap();\n let mut expected_recs: Vec =\n expected_reader.records().filter_map(Result::ok).collect();\n expected_recs.sort_by_key(|x| x.seq().as_bytes());\n for (result, expected) in result_recs.iter().zip(expected_recs.iter()) {\n assert_eq!(result.seq().as_bytes(), expected.seq().as_bytes());\n assert_eq!(result.qual(), expected.qual());\n }\n}\n\n#[test]\nfn fastq_split() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt fastq-split tests/A.fastq tests/B.fastq < tests/test.fastq')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/A.fastq', 'tests/expected/A.fastq');\n test_output('tests/B.fastq', 'tests/expected/B.fastq');\n}\n\n#[test]\nfn fastq_filter() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg(\n 'target/debug/rbt fastq-filter tests/ids.txt < tests/test.fastq > tests/filtered.fastq'\n )\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/filtered.fastq', 'tests/expected/B.fastq');\n}\n\n#[test]\nfn bam_depth() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt bam-depth tests/test.bam < tests/pos.txt > tests/depth.txt')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/depth.txt', 'tests/expected/depth.txt');\n}\n\n#[test]\nfn vcf_to_txt() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC < tests/test.vcf > tests/variant-table.txt')\n .spawn().unwrap().wait().unwrap().success());\n test_output(\n 'tests/variant-table.txt',\n 'tests/expected/variant-table.txt',\n );\n}\n\n#[test]\nfn vcf_to_txt_with_filter() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC --with-filter < tests/test-with-filter.vcf > tests/variant-table-with-filter.txt')\n .spawn().unwrap().wait().unwrap().success());\n test_output(\n 'tests/variant-table-with-filter.txt',\n 'tests/expected/variant-table-with-filter.txt',\n );\n}\n\n// FIXME: can't work out how to use should_panic macro\n//#[should_panic]\nfn vcf_to_txt_input_info_as_format() {\n assert!(String::from_utf8_lossy(\n &Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-to-txt --fmt T < tests/test.vcf')\n .output()\n .unwrap()\n .stderr\n )\n .contains(''Unable to find FORMAT \'T\' in the input file! Is \'T\' an INFO tag?''));\n}\n\n#[test]\nfn vcf_match() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-match -d 50 -l 20 tests/test3.vcf < tests/test2.vcf > tests/matching.bcf')\n .spawn().unwrap().wait().unwrap().success());\n test_output('tests/matching.bcf', 'tests/expected/matching.bcf');\n}\n\n#[test]\nfn vcf_match_same() {\n assert!(Command::new('bash').arg('-c')\n .arg('target/debug/rbt vcf-match -d 50 -l 20 tests/test4.vcf < tests/test4.vcf > tests/matching-same.bcf')\n .spawn().unwrap().wait().unwrap().success());\n test_output(\n 'tests/matching-same.bcf',\n 'tests/expected/matching-same.bcf',\n );\n}\n\n#[test]\nfn vcf_fix_iupac_alleles() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg(\n 'target/debug/rbt vcf-fix-iupac-alleles < tests/test-iupac.vcf > tests/iupac-fixed.bcf'\n )\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/iupac-fixed.bcf', 'tests/expected/iupac-fixed.bcf');\n}\n\n#[test]\nfn vcf_baf() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-baf < tests/test-freebayes.vcf > tests/baf.bcf')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/baf.bcf', 'tests/expected/baf.bcf');\n}\n\n#[test]\nfn test_vcf_report() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-report tests/ref.fa -v a=tests/report-test.vcf -v b=tests/report-test.vcf -b a:tumor=tests/test-report.bam -b b:tumor=tests/test-report.bam -- tests/test-vcf-report')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success()\n );\n let files1 = vec![\n (\n 'tests/test-vcf-report/indexes/index1.html',\n 'tests/expected/report/indexes/index1.html',\n ),\n (\n 'tests/test-vcf-report/genes/KRAS1.html',\n 'tests/expected/report/genes/KRAS1.html',\n ),\n ];\n\n let files2 = vec![\n (\n 'tests/test-vcf-report/details/a/ENST00000557334_5_c_35G_A.html',\n 'tests/expected/report/details/a/ENST00000557334_5_c_35G_A.html',\n ),\n (\n 'tests/test-vcf-report/details/b/ENST00000557334_5_c_35G_A.html',\n 'tests/expected/report/details/b/ENST00000557334_5_c_35G_A.html',\n ),\n ];\n\n for (result, expected) in files1 {\n // delete line 22 with timestamp and 15 with version\n // this may fail on OS X due to the wrong sed being installed\n assert!(Command::new('bash')\n .arg('-c')\n .arg('sed -i '22d;15d' '.to_owned() + result)\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output(result, expected)\n }\n for (result, expected) in files2 {\n // Delete line 35 with timestamp and 28 with version\n // This may fail on OS X due to the wrong sed being installed\n assert!(Command::new('bash')\n .arg('-c')\n .arg('sed -i '36d;29d' '.to_owned() + result)\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output(result, expected)\n }\n fs::remove_dir_all('tests/test-vcf-report').unwrap();\n}\n\n#[test]\nfn test_csv_report() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt csv-report tests/test_report.csv -- tests/test-csv-report')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n\n let result = 'tests/test-csv-report/data/index1.js';\n let expected = 'tests/expected/csv-report/data/index1.js';\n test_output(result, expected);\n\n fs::remove_dir_all('tests/test-csv-report').unwrap();\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_two_cluster() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments fastq --umi-len 3 -u --max-umi-dist 0 --max-seq-dist 2 tests/test-consensus.fastq tests/test-consensus.fastq /tmp/test-consensus.1.fastq /tmp/test-consensus.2.fastq')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/test-consensus.1.fastq',\n 'tests/expected/test-consensus.1.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test-consensus.2.fastq',\n 'tests/expected/test-consensus.2.fastq',\n false,\n );\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_single_cluster() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments fastq --umi-len 3 -u --max-umi-dist 2 --max-seq-dist 2 tests/test-consensus.fastq tests/test-consensus.fastq /tmp/test-consensus_single.1.fastq /tmp/test-consensus_single.2.fastq')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/test-consensus_single.1.fastq',\n 'tests/expected/test-consensus_single.1.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test-consensus_single.2.fastq',\n 'tests/expected/test-consensus_single.2.fastq',\n false,\n );\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_reads() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments fastq --umi-len 10 --max-umi-dist 0 --max-seq-dist 8 --insert-size 450 --std-dev 50 tests/overlapping-consensus.1.fastq tests/overlapping-consensus.2.fastq /tmp/test_overlapping-consensus.1.fastq /tmp/test_overlapping-consensus.2.fastq /tmp/test_overlapping-consensus.3.fastq')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/test_overlapping-consensus.1.fastq',\n 'tests/expected/test_overlapping-consensus.1.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test_overlapping-consensus.2.fastq',\n 'tests/expected/test_overlapping-consensus.2.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test_overlapping-consensus.3.fastq',\n 'tests/expected/test_overlapping-consensus.3.fastq',\n false,\n );\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_from_bam() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments bam tests/overlapping_consensus_marked.bam /tmp/bam_consensus_r1.fq /tmp/bam_consensus_r2.fq /tmp/bam_consensus_se.fq /tmp/overlapping_consensus_mapped.bam')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/bam_consensus_r1.fq',\n 'tests/expected/bam_consensus_r1.fq',\n true,\n );\n compare_fastq(\n '/tmp/bam_consensus_r2.fq',\n 'tests/expected/bam_consensus_r2.fq',\n true,\n );\n compare_fastq(\n '/tmp/bam_consensus_se.fq',\n 'tests/expected/bam_consensus_se.fq',\n true,\n );\n compare_bam(\n '/tmp/overlapping_consensus_mapped.bam',\n 'tests/expected/overlapping_consensus_mapped.bam',\n );\n}\n\n#[test]\nfn test_vcf_annotate_dgidb() {\n let exec_test = Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-annotate-dgidb tests/annotate_dgidb_test.vcf | bcftools view - | wc -l').output()\n .expect('failed to execute process');\n assert!(exec_test.status.success());\n assert_eq!(String::from_utf8(exec_test.stdout).unwrap().trim(), '65');\n}\n\n#[test]\nfn test_stats_fasta_file() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt sequence-stats < tests/stats.fasta > /tmp/result.fasta.stats')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n\n test_output(\n '/tmp/result.fasta.stats',\n 'tests/expected/result.fasta.stats',\n );\n}\n\n#[test]\nfn test_stats_fastq_file() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt sequence-stats -q < tests/stats.fastq > /tmp/result.fastq.stats')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n\n test_output(\n '/tmp/result.fastq.stats',\n 'tests/expected/result.fastq.stats',\n );\n}\n\n#[test]\nfn test_vcf_split() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-split tests/test-vcf-split.vcf /tmp/vcf-split1.bcf /tmp/vcf-split2.bcf')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n}\n\n#[test]\nfn test_vcf_split_chain() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-split tests/test-vcf-split-chain.vcf /tmp/vcf-split-chain1.bcf /tmp/vcf-split-chain2.bcf')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output(\n '/tmp/vcf-split-chain1.bcf',\n 'tests/expected/vcf-split-chain1.bcf',\n );\n test_output(\n '/tmp/vcf-split-chain2.bcf',\n 'tests/expected/vcf-split-chain2.bcf',\n );\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/depth.rs","//! Compute the depth of coverage in a BAM file for a list of reference sequences and positions.\n//!\n//! ## Input:\n//! A BAM file and a positions file.\n//! The positions file contains the name of one reference sequence and one position per line (tab separated).\n//! Example:\n//! ```\n//! 16 1\n//! 17 1\n//! 17 2\n//! 17 38\n//! 17 39\n//! ```\n//!\n//! Positions are read from stdin, the BAM file is the first argument.\n//!\n//! ## Output:\n//! Depth are written to stdout as tab-separated lines, similar to the positions input.\n//! Example:\n//! ```\n//! 16 1 0\n//! 17 1 5\n//! 17 2 5\n//! 17 38 14\n//! 17 39 13\n//! ```\n//!\n//! ## Usage:\n//!\n//! ```bash\n//! $ rbt bam-depth tests/test.bam < tests/pos.txt > tests/depth.txt\n//! ```\n//! Where `pos.txt` is a positions file, as described above.\n//!\n//!\nuse anyhow::Result;\nuse log::info;\nuse std::cmp;\nuse std::io;\n\nuse serde::Deserialize;\n\nuse rust_htslib::bam;\nuse rust_htslib::bam::{FetchDefinition, Read};\nuse std::path::Path;\n\n#[derive(Deserialize, Debug)]\nstruct PosRecord {\n chrom: String,\n pos: u32,\n}\n\npub fn depth>(\n bam_path: P,\n max_read_length: u32,\n include_flags: u16,\n exclude_flags: u16,\n min_mapq: u8,\n) -> Result<()> {\n let mut bam_reader = bam::IndexedReader::from_path(&bam_path)?;\n let bam_header = bam_reader.header().clone();\n let mut pos_reader = csv::ReaderBuilder::new()\n .has_headers(false)\n .delimiter(b'\t')\n .from_reader(io::stdin());\n let mut csv_writer = csv::WriterBuilder::new()\n .delimiter(b'\t')\n .from_writer(io::BufWriter::new(io::stdout()));\n\n for (i, record) in pos_reader.deserialize().enumerate() {\n let record: PosRecord = record?;\n\n // jump to correct position\n let tid = bam_header.tid(record.chrom.as_bytes()).unwrap() as i32;\n let start = cmp::max(record.pos as i64 - max_read_length as i64 - 1, 0);\n bam_reader.fetch(FetchDefinition::Region(\n tid,\n start as i64,\n start as i64 + (max_read_length * 2) as i64,\n ))?;\n\n // iterate over pileups\n let mut covered = false;\n for pileup in bam_reader.pileup() {\n let pileup = pileup?;\n covered = pileup.pos() == record.pos - 1;\n\n if covered {\n let depth = pileup\n .alignments()\n .filter(|alignment| {\n let record = alignment.record();\n let flags = record.flags();\n (!flags) & include_flags == 0\n && flags & exclude_flags == 0\n && record.mapq() >= min_mapq\n })\n .count();\n\n csv_writer.serialize((&record.chrom, record.pos, depth))?;\n break;\n } else if pileup.pos() > record.pos {\n break;\n }\n }\n if !covered {\n csv_writer.serialize((&record.chrom, record.pos, 0))?;\n }\n\n if (i + 1) % 100 == 0 {\n info!('{} records written.', i + 1);\n }\n }\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/plot/plot_bam.rs","use crate::bcf::report::table_report::create_report_table::create_report_data;\nuse crate::bcf::report::table_report::create_report_table::manipulate_json;\nuse crate::common::Region;\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse itertools::Itertools;\nuse std::io;\nuse std::io::Write;\nuse std::path::Path;\nuse tera::{Context, Tera};\n\npub(crate) fn plot_bam + std::fmt::Debug>(\n bam_paths: &[P],\n fasta_path: P,\n region: &Region,\n max_read_depth: u32,\n) -> Result<()> {\n let mut plots = Vec::new();\n\n let Region { target, start, end } = region.clone();\n for bam_path in bam_paths {\n let content = create_report_data(&fasta_path, None, bam_path, region, max_read_depth)?;\n let visualization = manipulate_json(content, start, end)?;\n\n plots.push(visualization);\n }\n\n let bams = bam_paths\n .iter()\n .map(|b| b.as_ref().iter().last().unwrap().to_str().unwrap())\n .collect_vec();\n\n let mut templates = Tera::default();\n templates.add_raw_template('bam_plot.html.tera', include_str!('bam_plot.html.tera'))?;\n let mut context = Context::new();\n let local: DateTime = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n context.insert('plots', &plots);\n context.insert('bams', &bams);\n context.insert('chrom', &target);\n context.insert('start', &start);\n context.insert('end', &end);\n\n let html = templates.render('bam_plot.html.tera', &context)?;\n io::stdout().write_all(html.as_bytes())?;\n\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/plot/mod.rs","pub mod plot_bam;\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/anonymize_reads.rs","use anyhow::Result;\nuse bio::io::fasta;\nuse rand::prelude::{SliceRandom, ThreadRng};\nuse rand::seq::IteratorRandom;\nuse rust_htslib::bam;\nuse rust_htslib::bam::Read;\nuse std::collections::HashMap;\nuse std::ops::Range;\nuse std::path::Path;\nuse uuid::Uuid;\n\npub fn anonymize_reads + std::fmt::Debug>(\n bam: P,\n input_ref: P,\n output_bam: P,\n output_ref: P,\n chr: String,\n interval: Range,\n keep_only_pairs: bool,\n) -> Result<()> {\n let start = interval.start;\n let end = interval.end;\n let mut fasta_reader = fasta::IndexedReader::from_file(&input_ref)?;\n fasta_reader.fetch(&chr, start, end)?;\n let mut reference = Vec::new();\n reference.resize((end - start) as usize, 0);\n fasta_reader.read(&mut reference)?;\n let mut rng = rand::thread_rng();\n let alphabet = [b'A', b'C', b'G', b'T'];\n\n //Build artificial reference\n let mut artificial_reference = Vec::new();\n add_random_bases(end - start, &mut artificial_reference, &mut rng, &alphabet)?;\n let mut altered_bases = init_altered_bases(&reference, &artificial_reference)?;\n let mut fa_writer = fasta::Writer::to_file(output_ref)?;\n let ref_id = Uuid::new_v4().to_hyphenated().to_string();\n fa_writer.write(&ref_id, None, &artificial_reference)?;\n\n let mut bam_reader = bam::IndexedReader::from_path(bam)?;\n bam_reader.fetch((chr.as_bytes(), start, end + 1))?;\n\n let mut header = bam::Header::new();\n header.push_record(\n bam::header::HeaderRecord::new(b'SQ')\n .push_tag(b'SN', &ref_id)\n .push_tag(b'LN', &(end - start)),\n );\n let mut bam_writer = bam::Writer::from_path(output_bam, &header, bam::Format::Bam)?;\n let mate_in_range = |record: &bam::Record| -> bool {\n (record.mtid() == record.tid())\n && (record.mpos() >= (start as i64))\n && (record.mpos() < (end as i64))\n };\n for result in bam_reader.records() {\n let mut record = result?;\n if (record.pos() >= start as i64)\n && (record.cigar().end_pos() < end as i64)\n && (!keep_only_pairs || mate_in_range(&record))\n {\n record.cache_cigar();\n //Check if mate record end within region\n let artificial_seq = if record.is_unmapped() || record.seq_len() == 0 {\n let mut seq = Vec::new();\n add_random_bases(record.seq_len() as u64, &mut seq, &mut rng, &alphabet)?;\n seq\n } else {\n build_sequence(\n &mut altered_bases,\n &record,\n start as usize,\n &mut rng,\n &alphabet,\n )?\n };\n let artificial_record = build_record(&record, &artificial_seq, start as i64)?;\n bam_writer.write(&artificial_record)?;\n }\n }\n Ok(())\n}\n\nfn init_altered_bases(\n original_ref: &[u8],\n artificial_reference: &[u8],\n) -> Result>> {\n let mut altered_bases = HashMap::new();\n for (i, (artifical_base, original_base)) in artificial_reference\n .iter()\n .zip(original_ref.iter())\n .enumerate()\n {\n altered_bases\n .entry(i)\n .or_insert_with(HashMap::new)\n .insert(*original_base, *artifical_base);\n }\n Ok(altered_bases)\n}\n\nfn build_record(record: &bam::Record, artificial_seq: &[u8], offset: i64) -> Result {\n let mut artificial_record = bam::record::Record::new();\n artificial_record.set(\n record.qname(),\n Some(&record.cigar()),\n artificial_seq,\n record.qual(),\n );\n set_mandatory_fields(&mut artificial_record, record, offset)?;\n for aux_result in record.aux_iter() {\n let (tag, aux_field) = aux_result?;\n artificial_record.push_aux(tag, aux_field)?;\n }\n Ok(artificial_record)\n}\n\nfn build_sequence(\n altered_bases: &mut HashMap>,\n record: &bam::Record,\n offset: usize,\n rng: &mut ThreadRng,\n alphabet: &[u8],\n) -> Result> {\n let mut artificial_seq = Vec::new();\n let record_seq = record.seq().as_bytes();\n let mut record_pos = 0;\n let mut ref_pos = record.pos() as usize - offset;\n //Create random seq for leading softclips\n for cigar in record.cigar_cached().unwrap().iter() {\n match cigar.char() {\n 'S' => {\n add_random_bases(cigar.len() as u64, &mut artificial_seq, rng, alphabet)?;\n record_pos += cigar.len() as usize;\n }\n 'M' | 'X' | '=' => {\n (0..cigar.len()).for_each(|_| {\n let base_mappings = altered_bases.get(&ref_pos).unwrap().clone();\n let altered_base = *altered_bases\n .get_mut(&ref_pos)\n .unwrap()\n .entry(*record_seq.get(record_pos).unwrap())\n .or_insert_with(|| {\n *alphabet\n .iter()\n .filter(|&x| !base_mappings.values().any(|y| x == y))\n .choose(rng)\n .unwrap()\n });\n artificial_seq.push(altered_base);\n ref_pos += 1;\n record_pos += 1;\n });\n // Add reference bases except for mismatches\n }\n 'I' => {\n add_random_bases(cigar.len() as u64, &mut artificial_seq, rng, alphabet)?;\n record_pos += cigar.len() as usize;\n }\n 'D' | 'N' => {\n ref_pos += cigar.len() as usize;\n }\n _ => {}\n }\n }\n\n Ok(artificial_seq)\n}\n\nfn set_mandatory_fields(\n target_rec: &mut bam::Record,\n source_rec: &bam::Record,\n offset: i64,\n) -> Result<()> {\n target_rec.set_pos(source_rec.pos() - offset);\n target_rec.set_tid(0);\n let (mtid, mpos) = if source_rec.mtid() == -1 {\n (-1, -1)\n } else if source_rec.mtid() == source_rec.tid() {\n (0, source_rec.mpos() - offset)\n } else {\n (1, source_rec.mpos())\n };\n target_rec.set_mtid(mtid);\n target_rec.set_mpos(mpos);\n target_rec.set_flags(source_rec.flags());\n target_rec.set_insert_size(source_rec.insert_size());\n target_rec.set_mapq(source_rec.mapq());\n Ok(())\n}\n\nfn add_random_bases(\n length: u64,\n seq: &mut Vec,\n rng: &mut ThreadRng,\n alphabet: &[u8],\n) -> Result<()> {\n (0..length).for_each(|_| seq.push(*alphabet.choose(rng).unwrap()));\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/collapse_reads_to_fragments/calc_consensus.rs","use crate::common::CalcConsensus;\nuse bio::io::fastq;\nuse bio::stats::probs::LogProb;\nuse bio_types::sequence::SequenceRead;\nuse bio_types::sequence::SequenceReadPairOrientation;\nuse derive_new::new;\nuse itertools::Itertools;\nuse rust_htslib::bam;\nuse rust_htslib::bam::record::Aux;\nuse std::collections::{HashMap, HashSet};\nuse std::ops::BitOrAssign;\n\nconst ALLELES: &[u8] = b'ACGT';\n\npub fn get_umi_string(rec: &bam::record::Record) -> String {\n let umi = match rec.aux(b'RX') {\n Ok(Aux::String(value)) => {\n format!(' RX:Z:{}', value)\n }\n _ => String::from(''),\n };\n umi\n}\n\n#[derive(Eq, PartialEq)]\nenum StrandObservation {\n None,\n Forward,\n Reverse,\n Both,\n}\n\nimpl BitOrAssign for StrandObservation {\n fn bitor_assign(&mut self, rhs: Self) {\n if let StrandObservation::None = self {\n *self = rhs;\n } else if *self != rhs {\n *self = StrandObservation::Both;\n }\n }\n}\n\n#[derive(new)]\npub struct CalcOverlappingConsensus<'a> {\n recs1: &'a [bam::Record],\n recs2: &'a [bam::Record],\n r1_vec: &'a [bool],\n r2_vec: &'a [bool],\n seqids: &'a [usize],\n uuid: &'a str,\n read_ids: &'a mut Option>>,\n}\n\nimpl<'a> CalcOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.r1_vec().len();\n let mut consensus_seq: Vec = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec = Vec::with_capacity(seq_len);\n let mut consensus_strand = b'SI:Z:'.to_vec();\n let read_orientations_opt = self.build_read_orientation_string();\n let mut consensus_lh = LogProb::ln_one();\n for i in 0..seq_len {\n match (\n self.recs1().len() == 1,\n self.map_read_pos(i, self.r1_vec()),\n self.map_read_pos(i, self.r2_vec()),\n ) {\n (true, Some(base_pos), None) => {\n let base = self.recs1()[0].seq().as_bytes()[base_pos];\n consensus_seq.push(base);\n consensus_qual.push(self.recs1()[0].qual()[base_pos] + 33);\n consensus_lh += Self::overall_allele_likelihood(self, &base, i);\n }\n (true, None, Some(base_pos)) => {\n let base = self.recs2()[0].seq().as_bytes()[base_pos];\n consensus_seq.push(base);\n consensus_qual.push(self.recs2()[0].qual()[base_pos] + 33);\n consensus_lh += Self::overall_allele_likelihood(self, &base, i);\n }\n _ => {\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec();\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n }\n };\n self.build_consensus_strand(&mut consensus_strand, consensus_seq[i], i);\n }\n let name = if self.read_ids.is_some() {\n Self::build_verbose_read_name(self.uuid(), self.seqids(), self.read_ids)\n } else {\n format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n )\n };\n if let Some(mut read_orientations) = read_orientations_opt {\n consensus_strand.append(&mut read_orientations)\n }\n let umi = get_umi_string(&self.recs1()[0]);\n let description = format!('{}{}', String::from_utf8(consensus_strand).unwrap(), umi);\n let consensus_rec =\n fastq::Record::with_attrs(&name, Some(&description), &consensus_seq, &consensus_qual);\n (consensus_rec, consensus_lh)\n }\n\n fn recs1(&self) -> &[bam::Record] {\n self.recs1\n }\n\n fn recs2(&self) -> &[bam::Record] {\n self.recs2\n }\n\n fn r1_vec(&self) -> &[bool] {\n self.r1_vec\n }\n\n fn r2_vec(&self) -> &[bool] {\n self.r2_vec\n }\n\n fn build_consensus_strand(&self, consensus_strand: &mut Vec, ref_base: u8, pos: usize) {\n let mut strand = StrandObservation::None;\n let rec1_pos = self.map_read_pos(pos, self.r1_vec());\n let rec2_pos = self.map_read_pos(pos, self.r2_vec());\n let mut strand_observation = |recs: &[bam::Record], rec_pos: Option| {\n if let Some(pos) = rec_pos {\n recs.iter().for_each(|rec| {\n if rec.base(pos) == ref_base {\n match rec.is_reverse() {\n true => strand |= StrandObservation::Reverse,\n false => strand |= StrandObservation::Forward,\n };\n }\n });\n }\n };\n strand_observation(self.recs1(), rec1_pos);\n strand_observation(self.recs2(), rec2_pos);\n match strand {\n StrandObservation::Forward => consensus_strand.push(b'+'),\n StrandObservation::Reverse => consensus_strand.push(b'-'),\n StrandObservation::Both => consensus_strand.push(b'*'),\n StrandObservation::None => consensus_strand.push(b'.'),\n }\n }\n fn build_read_orientation_string(&self) -> Option> {\n let mut read_orientations_set: HashSet<_> = self\n .recs1()\n .iter()\n .filter_map(|rec| match rec.read_pair_orientation() {\n SequenceReadPairOrientation::F2F1 => Some(b'F2F1,'),\n SequenceReadPairOrientation::F2R1 => Some(b'F2R1,'),\n SequenceReadPairOrientation::F1F2 => Some(b'F1F2,'),\n SequenceReadPairOrientation::R2F1 => Some(b'R2F1,'),\n SequenceReadPairOrientation::F1R2 => Some(b'F1R2,'),\n SequenceReadPairOrientation::R2R1 => Some(b'R2R1,'),\n SequenceReadPairOrientation::R1F2 => Some(b'R1F2,'),\n SequenceReadPairOrientation::R1R2 => Some(b'R1R2,'),\n SequenceReadPairOrientation::None => None,\n })\n .collect();\n let mut read_orientations_string = b' RO:Z:'.to_vec();\n read_orientations_set\n .drain()\n .for_each(|entry| read_orientations_string.extend_from_slice(entry));\n match read_orientations_string.pop() {\n Some(b',') => Some(read_orientations_string),\n Some(b':') => None,\n Some(_) => unreachable!(),\n None => unreachable!(),\n }\n }\n fn map_read_pos(&self, consensus_pos: usize, alignment_vec: &[bool]) -> Option {\n match alignment_vec[consensus_pos] {\n true => Some(\n alignment_vec[0..(consensus_pos + 1)]\n .iter()\n .filter(|&v| *v)\n .count()\n - 1,\n ),\n false => None,\n }\n }\n}\n\nimpl<'a> CalcConsensus<'a, bam::Record> for CalcOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, pos: usize) -> LogProb {\n let mut lh = LogProb::ln_one();\n let rec1_pos = self.map_read_pos(pos, self.r1_vec());\n let rec2_pos = self.map_read_pos(pos, self.r2_vec());\n for (rec1, rec2) in self.recs1().iter().zip(self.recs2()) {\n if let Some(pos) = rec1_pos {\n lh += Self::allele_likelihood_in_rec(\n allele,\n &rec1.seq().as_bytes(),\n rec1.qual(),\n pos,\n 0,\n );\n };\n if let Some(pos) = rec2_pos {\n lh += Self::allele_likelihood_in_rec(\n allele,\n &rec2.seq().as_bytes(),\n rec2.qual(),\n pos,\n 0,\n );\n };\n }\n lh\n }\n\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n\n#[derive(new)]\npub struct CalcNonOverlappingConsensus<'a> {\n recs: &'a [bam::Record],\n seqids: &'a [usize],\n uuid: &'a str,\n read_ids: &'a mut Option>>,\n}\n\nimpl<'a> CalcNonOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.recs()[0].seq().len();\n let mut consensus_seq: Vec = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec = Vec::with_capacity(seq_len);\n let mut consensus_strand = b'SI:Z:'.to_vec();\n let mut cigar_map = HashMap::new();\n for record in self.recs() {\n let cached_cigar = record.raw_cigar();\n if !cigar_map.contains_key(cached_cigar) {\n cigar_map.insert(cached_cigar, Vec::new());\n }\n cigar_map.get_mut(cached_cigar).unwrap().push(record);\n }\n\n // Potential workflow for different read lengths\n // compute consensus of all reads with max len\n // compute offset of all shorter reads\n // pad shorter reads\n // drop first consensus, compute consensus of full length reads and padded reads\n // ignore padded bases for consensus computation\n\n let mut consensus_lh = LogProb::ln_one();\n\n for i in 0..seq_len {\n // Maximum a-posteriori estimate for the consensus base.\n // Find the allele (theta \in ACGT) with the highest likelihood\n // given the bases at this position, weighted with their quality values\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec(); //Check this. See below\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n self.build_consensus_strand(&mut consensus_strand, consensus_seq[i], i);\n }\n let name = if self.read_ids.is_some() {\n Self::build_verbose_read_name(self.uuid(), self.seqids(), self.read_ids)\n } else {\n format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n )\n };\n let umi = get_umi_string(&self.recs()[0]);\n let description = format!('{}{}', String::from_utf8(consensus_strand).unwrap(), umi);\n let consensus_rec =\n fastq::Record::with_attrs(&name, Some(&description), &consensus_seq, &consensus_qual);\n (consensus_rec, consensus_lh)\n }\n pub fn recs(&self) -> &[bam::Record] {\n self.recs\n }\n fn build_consensus_strand(\n &self,\n consensus_strand: &mut Vec,\n ref_base: u8,\n current_pos: usize,\n ) {\n let mut strand = StrandObservation::None;\n self.recs().iter().for_each(|rec| {\n if rec.base(current_pos) == ref_base {\n match rec.is_reverse() {\n true => strand |= StrandObservation::Reverse,\n false => strand |= StrandObservation::Forward,\n };\n }\n });\n match strand {\n StrandObservation::Forward => consensus_strand.push(b'+'),\n StrandObservation::Reverse => consensus_strand.push(b'-'),\n StrandObservation::Both => consensus_strand.push(b'*'),\n StrandObservation::None => consensus_strand.push(b'.'),\n }\n }\n}\n\nimpl<'a> CalcConsensus<'a, bam::Record> for CalcNonOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb {\n let mut lh = LogProb::ln_one(); // posterior: log(P(theta)) = 1\n for rec in self.recs() {\n lh += Self::allele_likelihood_in_rec(allele, &rec.seq().as_bytes(), rec.qual(), i, 0);\n }\n lh\n }\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/collapse_reads_to_fragments/pipeline.rs","use super::calc_consensus::{CalcNonOverlappingConsensus, CalcOverlappingConsensus};\nuse super::unmark_record;\nuse anyhow::Result;\nuse bio::io::fastq;\nuse derive_new::new;\nuse rust_htslib::bam;\nuse rust_htslib::bam::record::Aux;\nuse rust_htslib::bam::Read;\nuse std::cmp::Ordering;\nuse std::collections::{BTreeMap, HashMap, HashSet};\nuse std::io;\nuse std::ops::Deref;\nuse std::ops::DerefMut;\nuse uuid::Uuid;\n\n#[derive(new)]\npub struct CallConsensusRead {\n bam_reader: bam::Reader,\n fq1_writer: fastq::Writer,\n fq2_writer: fastq::Writer,\n fq_se_writer: fastq::Writer,\n bam_skipped_writer: bam::Writer,\n verbose_read_names: bool,\n}\n\ntype Position = i64;\ntype GroupIDs = HashSet;\ntype RecordIDs = Vec;\n\n#[derive(Hash, PartialEq, Eq, Debug)]\npub enum RecordId {\n Regular(Vec),\n Split(Vec),\n}\n\n#[derive(Hash, PartialEq, Eq, Clone, Debug)]\npub enum GroupId {\n Regular(u32),\n Split(u32),\n}\n\n#[derive(new, Debug)]\npub struct GroupEndIndex {\n #[new(default)]\n group_pos: HashMap,\n #[new(default)]\n group_end_idx: BTreeMap,\n}\n\nimpl GroupEndIndex {\n ///Inserts a new group id at given position\n ///If position is already saved for the group id the group-end-index will be updated\n pub fn insert(&mut self, group_id: GroupId, end_pos: i64) -> Result<()> {\n let update_end_pos = match self.group_pos.get(&group_id) {\n Some(¤t_end_pos) => match current_end_pos < end_pos {\n true => {\n self.group_end_idx\n .get_mut(¤t_end_pos)\n .map(|group_ids| group_ids.remove(&group_id));\n true\n }\n false => false,\n },\n None => true,\n };\n if update_end_pos {\n self.group_pos.insert(group_id.clone(), end_pos);\n self.group_end_idx\n .entry(end_pos)\n .or_insert_with(HashSet::new)\n .insert(group_id);\n }\n Ok(())\n }\n\n pub fn cut_lower_group_ids(&mut self, current_pos: Option) -> Result> {\n let group_ids: Vec = self\n .group_end_idx\n .range(\n ..current_pos.unwrap_or(\n self.group_end_idx\n .iter()\n .next_back()\n .map_or(0, |(entry, _)| *entry)\n + 1,\n ),\n )\n .flat_map(|(_, group_ids)| group_ids.clone())\n .collect();\n group_ids.iter().for_each(|group_id| {\n self.group_pos.remove(group_id);\n });\n match current_pos {\n Some(pos) => self.group_end_idx = self.group_end_idx.split_off(&pos),\n None => self.group_end_idx.clear(),\n }\n Ok(group_ids)\n }\n}\n\nimpl CallConsensusRead {\n pub fn call_consensus_reads(&mut self) -> Result<()> {\n let mut group_end_idx = GroupEndIndex::new();\n let mut duplicate_groups: HashMap = HashMap::new();\n let mut record_storage: HashMap = HashMap::new();\n let mut current_chrom = None;\n let mut read_ids: Option>> = if self.verbose_read_names {\n Some(HashMap::new())\n } else {\n None\n };\n for (i, result) in self.bam_reader.records().enumerate() {\n let mut record = result?;\n if !record.is_unmapped() {\n let mut record_pos = None;\n match current_chrom == Some(record.tid()) {\n true => record_pos = Some(record.pos()),\n false => current_chrom = Some(record.tid()),\n }\n //Process completed duplicate groups\n calc_consensus_complete_groups(\n &mut group_end_idx,\n &mut duplicate_groups,\n record_pos,\n &mut record_storage,\n &mut self.fq1_writer,\n &mut self.fq2_writer,\n &mut self.fq_se_writer,\n &mut self.bam_skipped_writer,\n &mut read_ids,\n )?;\n }\n if record.is_unmapped() || record.is_mate_unmapped() {\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n continue;\n }\n if record.is_supplementary() {\n //TODO Supplementary Alignment\n continue;\n }\n record.cache_cigar();\n let duplicate_id_option = match record.aux(b'DI') {\n Ok(Aux::I8(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::I16(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::I32(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::U8(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::U16(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::U32(duplicate_id)) => Some(duplicate_id),\n Err(_) => None,\n _ => unreachable!('Invalid type for tag 'DI''),\n };\n let record_name = record.qname();\n read_ids.as_mut().map(|x| x.insert(i, record_name.to_vec()));\n //Check if record has duplicate ID\n match duplicate_id_option {\n //Case: duplicate ID exists\n Some(duplicate_id) => {\n let regular_id = RecordId::Regular(record_name.to_owned());\n let record_end_pos = record.cigar_cached().unwrap().end_pos() - 1;\n match record_storage.get_mut(®ular_id) {\n //Case: Right record\n Some(storage_entry) => {\n //For right record save end position and duplicate group ID\n let group_id_opt = match storage_entry {\n RecordStorage::PairedRecords {\n ref mut r1_rec,\n ref mut r2_rec,\n } => {\n let group_id = if cigar_has_softclips(r1_rec)\n || cigar_has_softclips(&record)\n {\n unmark_record(r1_rec)?;\n self.bam_skipped_writer.write(r1_rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n None\n } else {\n duplicate_groups\n .entry(GroupId::Regular(duplicate_id))\n .or_insert_with(Vec::new)\n .push(RecordId::Regular(record_name.to_owned()));\n r2_rec.get_or_insert(IndexedRecord {\n rec: record,\n rec_id: i,\n });\n Some(GroupId::Regular(duplicate_id))\n };\n group_id\n }\n // This arm is reached if a mate is mapped to another chromosome.\n // In that case a new duplicate and record ID is required\n RecordStorage::SingleRecord { rec } => {\n let group_id = if cigar_has_softclips(rec)\n || cigar_has_softclips(&record)\n {\n unmark_record(rec)?;\n self.bam_skipped_writer.write(rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n None\n } else {\n duplicate_groups\n .entry(GroupId::Split(duplicate_id))\n .or_insert_with(Vec::new)\n .push(RecordId::Split(record_name.to_owned()));\n record_storage.insert(\n RecordId::Split(record_name.to_owned()),\n RecordStorage::SingleRecord {\n rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n },\n );\n Some(GroupId::Split(duplicate_id))\n };\n group_id\n }\n };\n if let Some(group_id) = group_id_opt {\n group_end_idx.insert(group_id, record_end_pos)?;\n } else {\n record_storage.remove(®ular_id);\n };\n }\n //Case: Left record or record w/o mate\n None => {\n if !record.is_paired() {\n //If right or single record save end position and duplicate group ID\n if cigar_has_softclips(&record) {\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n } else {\n duplicate_groups\n .entry(GroupId::Regular(duplicate_id))\n .or_insert_with(Vec::new)\n .push(RecordId::Regular(record_name.to_owned()));\n\n group_end_idx\n .insert(GroupId::Regular(duplicate_id), record_end_pos)?;\n record_storage.insert(\n RecordId::Regular(record_name.to_owned()),\n RecordStorage::SingleRecord {\n rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n },\n );\n }\n } else {\n record_storage.insert(\n RecordId::Regular(record_name.to_owned()),\n RecordStorage::PairedRecords {\n r1_rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n r2_rec: None,\n },\n );\n }\n }\n }\n }\n //Duplicate ID not existing\n //Record is written to bam file if it or its mate is unmapped\n //If record is right mate consensus is calculated\n //Else record is added to hashMap\n None => {\n match record_storage.get_mut(&RecordId::Regular(record_name.to_owned())) {\n //Case: Left record\n None => {\n if !record.is_paired() || record.tid() != record.mtid() {\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n } else {\n record_storage.insert(\n RecordId::Regular(record_name.to_owned()),\n RecordStorage::PairedRecords {\n r1_rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n r2_rec: None,\n },\n );\n }\n }\n //Case: Left record already stored\n Some(_record_pair) => {\n let (rec_id, mut l_rec) = match record_storage\n .remove(&RecordId::Regular(record_name.to_owned()))\n .unwrap()\n {\n RecordStorage::PairedRecords { r1_rec, .. } => {\n (r1_rec.rec_id, r1_rec.into_rec())\n }\n RecordStorage::SingleRecord { .. } => unreachable!(),\n };\n if cigar_has_softclips(&l_rec) || cigar_has_softclips(&record) {\n unmark_record(&mut l_rec)?;\n self.bam_skipped_writer.write(&l_rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n } else {\n let alignment_vectors = calc_read_alignments(&l_rec, &record);\n match alignment_vectors {\n Some((r1_alignment, r2_alignment)) => {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n\n self.fq_se_writer.write_record(\n &CalcOverlappingConsensus::new(\n &[l_rec],\n &[record],\n &r1_alignment,\n &r2_alignment,\n &[rec_id, i],\n uuid,\n &mut read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n }\n None => {\n unmark_record(&mut l_rec)?;\n self.bam_skipped_writer.write(&l_rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n }\n };\n }\n }\n }\n }\n }\n }\n //Process remaining groups\n calc_consensus_complete_groups(\n &mut group_end_idx,\n &mut duplicate_groups,\n None,\n &mut record_storage,\n &mut self.fq1_writer,\n &mut self.fq2_writer,\n &mut self.fq_se_writer,\n &mut self.bam_skipped_writer,\n &mut read_ids,\n )?;\n Ok(())\n }\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn calc_consensus_complete_groups<'a, W: io::Write>(\n group_end_idx: &mut GroupEndIndex,\n duplicate_groups: &mut HashMap,\n end_pos: Option,\n record_storage: &mut HashMap,\n fq1_writer: &'a mut fastq::Writer,\n fq2_writer: &'a mut fastq::Writer,\n fq_se_writer: &'a mut fastq::Writer,\n bam_skipped_writer: &'a mut bam::Writer,\n read_ids: &'a mut Option>>,\n) -> Result<()> {\n let group_ids = group_end_idx.cut_lower_group_ids(end_pos)?;\n for group_id in group_ids {\n let cigar_groups =\n group_reads_by_cigar(duplicate_groups.remove(&group_id).unwrap(), record_storage)?;\n for cigar_group in cigar_groups.values() {\n match cigar_group {\n CigarGroup::PairedRecords {\n r1_recs,\n r2_recs,\n r1_seqids,\n r2_seqids,\n } => {\n let alignment_vectors = calc_read_alignments(&r1_recs[0], &r2_recs[0]);\n match alignment_vectors {\n Some((r1_alignment, r2_alignment)) => {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n let mut seqids = r1_seqids.clone();\n seqids.append(&mut r2_seqids.clone());\n fq_se_writer.write_record(\n &CalcOverlappingConsensus::new(\n r1_recs,\n r2_recs,\n &r1_alignment,\n &r2_alignment,\n &seqids,\n uuid,\n read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n }\n None => {\n // If reads do not overlap or CIGAR in overlapping region differs R1 and R2 are handled sepperatly\n if r1_recs.len() > 1 {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n fq1_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n r1_recs, r1_seqids, uuid, read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n fq2_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n r2_recs, r2_seqids, uuid, read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n } else {\n let mut r1_rec = r1_recs[0].clone();\n unmark_record(&mut r1_rec)?;\n bam_skipped_writer.write(&r1_rec)?;\n let mut r2_rec = r2_recs[0].clone();\n unmark_record(&mut r2_rec)?;\n bam_skipped_writer.write(&r2_rec)?;\n }\n }\n };\n }\n CigarGroup::SingleRecords { recs, seqids } => match recs.len().cmp(&1) {\n Ordering::Greater => {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n fq_se_writer.write_record(\n &CalcNonOverlappingConsensus::new(recs, seqids, uuid, read_ids)\n .calc_consensus()\n .0,\n )?;\n }\n _ => {\n let mut rec = recs[0].clone();\n unmark_record(&mut rec)?;\n bam_skipped_writer.write(&rec)?;\n }\n },\n }\n }\n }\n Ok(())\n}\n\nfn group_reads_by_cigar(\n record_ids: Vec,\n record_storage: &mut HashMap,\n) -> Result> {\n let mut cigar_groups: HashMap = HashMap::new();\n for rec_id in record_ids {\n let storage_entry = record_storage.remove(&rec_id).unwrap();\n storage_entry.add_to_group(&mut cigar_groups)?;\n }\n Ok(cigar_groups)\n}\n\nfn calc_read_alignments(\n r1_rec: &bam::Record,\n r2_rec: &bam::Record,\n) -> Option<(Vec, Vec)> {\n let r1_start = r1_rec.pos();\n let r1_end = r1_rec.cigar_cached().unwrap().end_pos();\n let r2_start = r2_rec.pos();\n let r2_end = r1_rec.cigar_cached().unwrap().end_pos();\n if r1_rec.tid() != r2_rec.tid() {\n None\n } else if r1_start <= r2_start {\n //Check if reads overlap\n if r1_end >= r2_start {\n let offset = r2_start - r1_start;\n calc_alignment_vectors(offset, r1_rec, r2_rec)\n } else {\n //Reads do not overlap\n None\n }\n } else {\n //R2 starts before R1\n if r2_end >= r1_start {\n let offset = r1_start - r2_start;\n calc_alignment_vectors(offset, r2_rec, r1_rec)\n } else {\n None\n }\n }\n}\n\nfn calc_alignment_vectors(\n mut offset: i64,\n r1_rec: &bam::Record,\n r2_rec: &bam::Record,\n) -> Option<(Vec, Vec)> {\n let mut r1_vec = Vec::new();\n let mut r2_vec = Vec::new();\n let mut r1_cigarstring = r1_rec\n .cigar_cached()\n .unwrap()\n .iter()\n .flat_map(|cigar| vec![cigar.char(); cigar.len() as usize])\n .collect::>()\n .into_iter();\n let mut r2_cigarstring = r2_rec\n .cigar_cached()\n .unwrap()\n .iter()\n .flat_map(|cigar| vec![cigar.char(); cigar.len() as usize])\n .collect::>()\n .into_iter();\n let mut r1_cigar = r1_cigarstring.next();\n let mut r2_cigar = match offset == 0 {\n true => r2_cigarstring.next(),\n false => None,\n };\n let mut intersection_entry_passed = false;\n loop {\n if r2_cigar == None {\n match r1_cigar {\n None => break,\n Some('M') | Some('X') | Some('=') | Some('D') | Some('N') => {\n offset -= 1;\n }\n Some('S') => unreachable!(),\n Some(_) => {}\n }\n match_single_cigar(&r1_cigar, &mut r1_vec, &mut r2_vec);\n r1_cigar = r1_cigarstring.next();\n if offset == 0 {\n r2_cigar = r2_cigarstring.next();\n }\n } else if r1_cigar == None {\n match_single_cigar(&r2_cigar, &mut r2_vec, &mut r1_vec);\n r2_cigar = r2_cigarstring.next();\n } else if r1_cigar != r2_cigar {\n if !intersection_entry_passed && r1_cigar == Some('I') {\n r1_vec.push(true);\n r2_vec.push(false);\n r1_cigar = r1_cigarstring.next();\n } else {\n return None;\n }\n } else {\n intersection_entry_passed = true; // Can this me somehow only be called once?!\n match (r1_cigar, r2_cigar) {\n (Some('M'), Some('M'))\n | (Some('X'), Some('X'))\n | (Some('='), Some('='))\n | (Some('I'), Some('I')) => {\n r1_vec.push(true);\n r2_vec.push(true);\n r1_cigar = r1_cigarstring.next();\n r2_cigar = r2_cigarstring.next();\n }\n (Some('D'), Some('D')) | (Some('H'), Some('H')) => {\n r1_cigar = r1_cigarstring.next();\n r2_cigar = r2_cigarstring.next();\n }\n (None, None) | (None, Some(_)) | (Some(_), None) | (Some(_), Some(_)) => {\n unreachable!()\n }\n };\n }\n }\n Some((r1_vec, r2_vec))\n}\n\nfn cigar_has_softclips(rec: &bam::Record) -> bool {\n for cigar_operation in rec.cigar_cached().unwrap().iter() {\n if let bam::record::Cigar::SoftClip(_) = cigar_operation {\n return true;\n }\n }\n false\n}\n\nfn match_single_cigar(cigar: &Option, first_vec: &mut Vec, second_vec: &mut Vec) {\n match cigar {\n Some('M') | Some('S') | Some('X') | Some('=') | Some('I') => {\n first_vec.push(true);\n second_vec.push(false);\n }\n Some(_) | None => {}\n };\n}\n\npub enum RecordStorage {\n PairedRecords {\n r1_rec: IndexedRecord,\n r2_rec: Option,\n },\n SingleRecord {\n rec: IndexedRecord,\n },\n}\n\nimpl RecordStorage {\n fn add_to_group(self, cigar_groups: &mut HashMap) -> Result<()> {\n let (r1_rec_entry, r1_rec_id, r2_rec_entry, r2_rec_id, cigar_tuple) = match self {\n RecordStorage::PairedRecords { r1_rec, r2_rec } => {\n let r1_rec_id = r1_rec.rec_id;\n let r1_rec_entry = r1_rec.into_rec();\n let r2_rec_unwrapped = r2_rec.unwrap();\n let r2_rec_id = r2_rec_unwrapped.rec_id;\n let r2_rec_entry = r2_rec_unwrapped.into_rec();\n let cigar_tuple = Cigar::Tuple {\n r1_cigar: r1_rec_entry.raw_cigar().to_vec(),\n r2_cigar: r2_rec_entry.raw_cigar().to_vec(),\n };\n if !cigar_groups.contains_key(&cigar_tuple) {\n cigar_groups.insert(\n cigar_tuple.clone(),\n CigarGroup::PairedRecords {\n r1_recs: Vec::new(),\n r2_recs: Vec::new(),\n r1_seqids: Vec::new(),\n r2_seqids: Vec::new(),\n },\n );\n }\n (\n r1_rec_entry,\n r1_rec_id,\n Some(r2_rec_entry),\n Some(r2_rec_id),\n cigar_tuple,\n )\n }\n RecordStorage::SingleRecord { rec } => {\n let rec_id = rec.rec_id;\n let rec_entry = rec.into_rec();\n let cigar_single = Cigar::Single {\n cigar: rec_entry.raw_cigar().to_vec(),\n };\n if !cigar_groups.contains_key(&cigar_single) {\n cigar_groups.insert(\n cigar_single.clone(),\n CigarGroup::SingleRecords {\n recs: Vec::new(),\n seqids: Vec::new(),\n },\n );\n }\n (rec_entry, rec_id, None, None, cigar_single)\n }\n };\n match cigar_groups.get_mut(&cigar_tuple) {\n Some(CigarGroup::PairedRecords {\n r1_recs,\n r2_recs,\n r1_seqids,\n r2_seqids,\n }) => {\n r1_recs.push(r1_rec_entry);\n r2_recs.push(r2_rec_entry.unwrap());\n r1_seqids.push(r1_rec_id);\n r2_seqids.push(r2_rec_id.unwrap());\n }\n Some(CigarGroup::SingleRecords { recs, seqids }) => {\n recs.push(r1_rec_entry);\n seqids.push(r1_rec_id);\n }\n None => unreachable!(),\n }\n\n Ok(())\n }\n}\n\npub struct IndexedRecord {\n rec: bam::Record,\n rec_id: usize,\n}\n\nimpl IndexedRecord {\n fn into_rec(self) -> bam::Record {\n self.rec\n }\n}\n\nimpl Deref for IndexedRecord {\n type Target = bam::Record;\n fn deref(&self) -> &bam::Record {\n &self.rec\n }\n}\n\nimpl DerefMut for IndexedRecord {\n fn deref_mut(&mut self) -> &mut Self::Target {\n &mut self.rec\n }\n}\n\npub enum CigarGroup {\n PairedRecords {\n r1_recs: Vec,\n r2_recs: Vec,\n r1_seqids: Vec,\n r2_seqids: Vec,\n },\n SingleRecords {\n recs: Vec,\n seqids: Vec,\n },\n}\n\n#[derive(Hash, PartialEq, Eq, Clone)]\npub enum Cigar {\n Tuple {\n r1_cigar: Vec,\n r2_cigar: Vec,\n },\n Single {\n cigar: Vec,\n },\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/collapse_reads_to_fragments/mod.rs","mod calc_consensus;\nmod pipeline;\n\nuse anyhow::Result;\nuse bio::io::fastq;\nuse log::info;\nuse pipeline::CallConsensusRead;\nuse rust_htslib::bam;\nuse rust_htslib::bam::{Format, Header, Read};\nuse std::path::Path;\n\npub fn call_consensus_reads_from_paths>(\n bam_in: P,\n fq1: P,\n fq2: P,\n fq_se: P,\n bam_skipped_out: P,\n verbose_read_names: bool,\n) -> Result<()> {\n info!('Reading input files:\n {}', bam_in.as_ref().display());\n info!(\n 'Writing forward consensus reads to:\n {}',\n fq1.as_ref().display()\n );\n info!(\n 'Writing reverse consensus reads to:\n {}',\n fq2.as_ref().display()\n );\n info!(\n 'Writing single end consensus reads to:\n {}',\n fq_se.as_ref().display()\n );\n info!(\n 'Writing skipped reads to:\n {}',\n bam_skipped_out.as_ref().display()\n );\n let bam_reader = bam::Reader::from_path(bam_in)?;\n let fq1_writer = fastq::Writer::to_file(fq1)?;\n let fq2_writer = fastq::Writer::to_file(fq2)?;\n let fq_se_writer = fastq::Writer::to_file(fq_se)?;\n let bam_skipped_writer = bam::Writer::from_path(\n bam_skipped_out,\n &Header::from_template(bam_reader.header()),\n Format::Bam,\n )?;\n CallConsensusRead::new(\n bam_reader,\n fq1_writer,\n fq2_writer,\n fq_se_writer,\n bam_skipped_writer,\n verbose_read_names,\n )\n .call_consensus_reads()\n}\n\npub fn unmark_record(record: &mut bam::record::Record) -> Result<()> {\n record.unset_duplicate();\n let _ = record.remove_aux(b'PG');\n let _ = record.remove_aux(b'DI');\n let _ = record.remove_aux(b'DS');\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bam/mod.rs","//! Tools that work on BAM files\npub mod anonymize_reads;\npub mod collapse_reads_to_fragments;\npub mod depth;\npub mod plot;\n","mit" "rust-bio-tools","./rust-bio-tools/src/common.rs","use anyhow::Context;\nuse approx::relative_eq;\nuse bio::stats::probs::{LogProb, PHREDProb};\nuse bio_types::sequence::SequenceRead;\nuse itertools::Itertools;\nuse ordered_float::NotNaN;\nuse std::cmp;\nuse std::collections::HashMap;\nuse std::str::FromStr;\n\nconst PROB_CONFUSION: LogProb = LogProb(-1.0986122886681098); // (1 / 3).ln()\nconst ALLELES: &[u8] = b'ACGT';\n\npub trait CalcConsensus<'a, R: SequenceRead> {\n fn validate_read_lengths(recs: &[R]) -> bool {\n let reference_length = recs[0].len();\n recs.iter()\n .map(|rec| rec.len())\n .all(|len| len == reference_length)\n }\n /// Compute the likelihood for the given allele and read position.\n /// The allele (A, C, G, or T) is an explicit parameter,\n /// the position i is captured by the closure.\n ///\n /// Likelihoods are managed in log space.\n /// A matching base is scored with (1 - PHRED score), a mismatch\n /// with PHRED score + confusion constant.\n fn allele_likelihood_in_rec(\n allele: &u8,\n seq: &[u8],\n qual: &[u8],\n i: usize,\n offset: u8,\n ) -> LogProb {\n let q = LogProb::from(PHREDProb::from((qual[i] - offset) as f64));\n if *allele == seq[i].to_ascii_uppercase() {\n q.ln_one_minus_exp()\n } else {\n q + PROB_CONFUSION\n }\n }\n fn build_consensus_sequence(\n likelihoods: Vec,\n consensus_lh: &mut LogProb,\n consensus_seq: &mut Vec,\n consensus_qual: &mut Vec,\n offset: f64,\n ) {\n if relative_eq!(*likelihoods[0], *likelihoods[1])\n && relative_eq!(*likelihoods[1], *likelihoods[2])\n && relative_eq!(*likelihoods[2], *likelihoods[3])\n {\n consensus_seq.push(b'N');\n consensus_qual.push(offset as u8);\n } else {\n let (max_posterior, allele_lh) = likelihoods\n .iter()\n .enumerate()\n .max_by_key(|&(_, &lh)| NotNaN::new(*lh).unwrap())\n .unwrap();\n *consensus_lh += *allele_lh;\n let marginal = LogProb::ln_sum_exp(&likelihoods);\n // new base: MAP\n consensus_seq.push(ALLELES[max_posterior]);\n // new qual: (1 - MAP)\n let qual = (likelihoods[max_posterior] - marginal).ln_one_minus_exp();\n // Assume the maximal quality, if the likelihood is infinite\n let truncated_quality: f64 = if (*PHREDProb::from(qual)).is_infinite() {\n 93.0\n } else {\n *PHREDProb::from(qual)\n };\n // Truncate quality values to PHRED+33 range\n consensus_qual\n .push(cmp::min(93 + offset as u64, (truncated_quality + offset) as u64) as u8);\n }\n }\n fn build_verbose_read_name(\n uuid: &str,\n seq_ids: &[usize],\n read_ids: &Option>>,\n ) -> String {\n format!(\n '{}_consensus-read-from:{}',\n uuid,\n seq_ids\n .iter()\n .map(|i| String::from_utf8(\n read_ids\n .as_ref()\n .map(|x| x.get(i).unwrap())\n .unwrap()\n .to_vec()\n )\n .unwrap())\n .join(',')\n )\n }\n\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb;\n fn seqids(&self) -> &'a [usize];\n fn uuid(&self) -> &'a str;\n}\n\n#[derive(Debug, Clone)]\npub struct Region {\n pub(crate) target: String,\n pub(crate) start: u64,\n pub(crate) end: u64,\n}\n\nimpl FromStr for Region {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result {\n let (target, range) = s.split_once(':').context('No ':' in region string')?;\n let (start, end) = range.split_once('-').context('No '-' in region string')?;\n let start = start.parse::()?;\n let end = end.parse::()?;\n Ok(Region {\n target: target.into(),\n start,\n end,\n })\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/fastq/split.rs","//! Split reads from stdin up into the given files.\n//!\n//! ## Usage:\n//!\n//! Distribute reads from `test.fastq` into the files `A.fastq` and `B.fastq`.\n//! ```bash\n//! $ rbt fastq-split A.fastq B.fastq < test.fastq\n//! ```\n//!\nuse anyhow::Result;\nuse bio::io::fastq;\nuse bio::io::fastq::FastqRead;\nuse log::info;\nuse std::io;\nuse std::path::Path;\n\npub fn split>(out_paths: &[P]) -> Result<()> {\n let mut reader = fastq::Reader::new(io::stdin());\n let mut writers = Vec::new();\n for path in out_paths {\n writers.push(fastq::Writer::to_file(path)?);\n }\n let mut record = fastq::Record::new();\n let mut i = 0;\n let mut j = 0;\n loop {\n reader.read(&mut record)?;\n if record.is_empty() {\n return Ok(());\n }\n writers[i].write_record(&record)?;\n i = (i + 1) % writers.len();\n j += 1;\n if j % 1000 == 0 {\n info!('{} records written.', j);\n }\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/fastq/collapse_reads_to_fragments/calc_consensus.rs","use crate::common::CalcConsensus;\nuse bio::io::fastq;\nuse bio::stats::probs::LogProb;\nuse derive_new::new;\nuse itertools::Itertools;\n\nconst ALLELES: &[u8] = b'ACGT';\n\n/// Compute a maximum likelihood fragment sequence for a collection of FASTQ reads.\n///\n/// For each position, compute the likelihood of each allele and\n/// choose the most likely one. Write the most likely allele i.e. base\n/// as sequence into the consensus sequence. The quality value is the\n/// likelihood for this allele, encoded in PHRED+33.\n/// //TODO Generalize as this is identical to BAM except Offset and cigar/writing to record\n#[derive(new)]\npub struct CalcNonOverlappingConsensus<'a> {\n recs: &'a [fastq::Record],\n seqids: &'a [usize],\n uuid: &'a str,\n verbose_read_names: bool,\n}\nimpl<'a> CalcNonOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.recs()[0].seq().len();\n let mut consensus_seq: Vec = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec = Vec::with_capacity(seq_len);\n\n // assert that all reads have the same length here\n assert!(\n Self::validate_read_lengths(self.recs()),\n 'Read length of FASTQ records {:?} differ. Cannot compute consensus sequence.',\n self.seqids()\n );\n\n // Potential workflow for different read lengths\n // compute consensus of all reads with max len\n // compute offset of all shorter reads\n // pad shorter reads\n // drop first consensus, compute consensus of full length reads and padded reads\n // ignore padded bases for consensus computation\n\n let mut consensus_lh = LogProb::ln_one();\n\n for i in 0..seq_len {\n // Maximum a-posteriori estimate for the consensus base.\n // Find the allele (theta \in ACGT) with the highest likelihood\n // given the bases at this position, weighted with their quality values\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec(); //Check this. See below\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n }\n\n let name = match self.verbose_read_names {\n true => format!(\n '{}_consensus-read-from:{}',\n self.uuid(),\n self.seqids().iter().map(|i| format!('{}', i)).join(',')\n ),\n false => format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n ),\n };\n\n (\n fastq::Record::with_attrs(&name, None, &consensus_seq, &consensus_qual),\n consensus_lh,\n )\n }\n\n pub fn recs(&self) -> &[fastq::Record] {\n self.recs\n }\n}\n\n//TODO Generalized as it is identical to BAM except Offset\nimpl<'a> CalcConsensus<'a, fastq::Record> for CalcNonOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb {\n let mut lh = LogProb::ln_one(); // posterior: log(P(theta)) = 1\n for rec in self.recs() {\n lh += Self::allele_likelihood_in_rec(allele, rec.seq(), rec.qual(), i, 33);\n }\n lh\n }\n\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n\n/// Compute a consensus sequence for a collection of paired-end FASTQ\n/// reads taking overlap into account.\n///\n/// For each position, compute the likelihood of each allele and\n/// choose the most likely one. Write the most likely allele i.e. base\n/// as sequence into the consensus sequence. The quality value is the\n/// likelihood for this allele, encoded in PHRED+33.\n#[derive(new)]\npub struct CalcOverlappingConsensus<'a> {\n recs1: &'a [fastq::Record],\n recs2: &'a [fastq::Record],\n overlap: usize,\n seqids: &'a [usize],\n uuid: &'a str,\n verbose_read_names: bool,\n}\n\n//TODO Generalize as this is identical to BAM except Offset and cigar/writing to record\nimpl<'a> CalcOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.recs1()[0].seq().len() + self.recs2()[0].seq().len() - self.overlap();\n let mut consensus_seq: Vec = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec = Vec::with_capacity(seq_len);\n\n // assert that all reads have the same length here\n assert!(\n Self::validate_read_lengths(self.recs1()),\n 'Read length of FASTQ forward records {:?} differ. Cannot compute consensus sequence.',\n self.seqids()\n );\n\n assert!(\n Self::validate_read_lengths(self.recs2()),\n 'Read length of FASTQ reverse records {:?} differ. Cannot compute consensus sequence.',\n self.seqids()\n );\n let mut consensus_lh = LogProb::ln_one();\n\n for i in 0..seq_len {\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec(); //This will be calculated every iteration\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n }\n let name = match self.verbose_read_names {\n true => format!(\n '{}_consensus-read-from:{}',\n self.uuid(),\n self.seqids().iter().map(|i| format!('{}', i)).join(',')\n ),\n false => format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n ),\n };\n (\n fastq::Record::with_attrs(&name, None, &consensus_seq, &consensus_qual),\n consensus_lh,\n )\n }\n\n fn recs1(&self) -> &[fastq::Record] {\n self.recs1\n }\n\n fn recs2(&self) -> &[fastq::Record] {\n self.recs2\n }\n\n fn overlap(&self) -> usize {\n self.overlap\n }\n}\n\nimpl<'a> CalcConsensus<'a, fastq::Record> for CalcOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb {\n let mut lh = LogProb::ln_one();\n for (rec1, rec2) in self.recs1().iter().zip(self.recs2()) {\n if i < rec1.seq().len() {\n lh += Self::allele_likelihood_in_rec(allele, rec1.seq(), rec1.qual(), i, 33);\n };\n if i >= rec1.seq().len() - self.overlap() {\n let rec2_i = i - (rec1.seq().len() - self.overlap());\n let rec2_seq = bio::alphabets::dna::revcomp(rec2.seq());\n let rec2_qual: Vec = rec2.qual().iter().rev().cloned().collect();\n lh += Self::allele_likelihood_in_rec(allele, &rec2_seq, &rec2_qual, rec2_i, 33);\n };\n }\n lh\n }\n\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/fastq/collapse_reads_to_fragments/pipeline.rs","use anyhow::Result;\nuse bio::io::fastq;\nuse bio::io::fastq::{FastqRead, Record};\nuse bio::stats::probs::LogProb;\nuse derive_new::new;\nuse ordered_float::NotNaN;\nuse rgsl::randist::gaussian::ugaussian_P;\nuse rocksdb::DB;\nuse std::io;\nuse std::io::Write;\nuse std::mem;\nuse std::process::{Command, Stdio};\nuse std::str;\nuse tempfile::tempdir;\nuse uuid::Uuid;\n\nuse super::calc_consensus::{CalcNonOverlappingConsensus, CalcOverlappingConsensus};\n\nconst HAMMING_THRESHOLD: f64 = 10.0;\n\n/// Interpret a cluster returned by starcode\nfn parse_cluster(record: csv::StringRecord) -> Result> {\n let seqids = &record[2];\n Ok(csv::ReaderBuilder::new()\n .delimiter(b',')\n .has_headers(false)\n .from_reader(seqids.as_bytes())\n .deserialize()\n .next()\n .unwrap()?)\n}\n\n/// Calculates the median hamming distance for all records by deriving the overlap from insert size\nfn median_hamming_distance(\n insert_size: usize,\n f_recs: &[fastq::Record],\n r_recs: &[fastq::Record],\n) -> Option {\n let distances = f_recs.iter().zip(r_recs).filter_map(|(f_rec, r_rec)| {\n // check if reads overlap within insert size\n if (insert_size < f_rec.seq().len()) | (insert_size < r_rec.seq().len()) {\n return None;\n }\n if insert_size >= (f_rec.seq().len() + r_rec.seq().len()) {\n return None;\n }\n let overlap = (f_rec.seq().len() + r_rec.seq().len()) - insert_size;\n let suffix_start_idx: usize = f_rec.seq().len() - overlap;\n Some(bio::alignment::distance::hamming(\n &f_rec.seq()[suffix_start_idx..],\n &bio::alphabets::dna::revcomp(r_rec.seq())[..overlap],\n ))\n });\n stats::median(distances)\n}\n\n/// as shown in http://www.milefoot.com/math/stat/pdfc-normaldisc.htm\nfn isize_pmf(value: f64, mean: f64, sd: f64) -> LogProb {\n LogProb((ugaussian_P((value + 0.5 - mean) / sd) - ugaussian_P((value - 0.5 - mean) / sd)).ln())\n}\n\n/// Used to store a mapping of read index to read sequence\n#[derive(Debug)]\nstruct FastqStorage {\n db: DB,\n}\n\nimpl FastqStorage {\n /// Create a new FASTQStorage using a Rocksdb database\n /// that maps read indices to read seqeunces.\n pub fn new() -> Result {\n // Save storage_dir to prevent it from leaving scope and\n // in turn deleting the tempdir\n let storage_dir = tempdir()?.path().join('db');\n Ok(FastqStorage {\n db: DB::open_default(storage_dir)?,\n })\n }\n\n #[allow(clippy::wrong_self_convention)]\n fn as_key(i: u64) -> [u8; 8] {\n unsafe { mem::transmute::(i) }\n }\n\n /// Enter a (read index, read sequence) pair into the database.\n pub fn put(&mut self, i: usize, f_rec: &fastq::Record, r_rec: &fastq::Record) -> Result<()> {\n Ok(self.db.put(\n &Self::as_key(i as u64),\n serde_json::to_string(&(f_rec, r_rec))?.as_bytes(),\n )?)\n }\n\n /// Retrieve the read sequence of the read with index `i`.\n pub fn get(&self, i: usize) -> Result<(fastq::Record, fastq::Record)> {\n Ok(serde_json::from_str(\n str::from_utf8(&self.db.get(&Self::as_key(i as u64))?.unwrap()).unwrap(),\n )?)\n }\n}\n\npub struct OverlappingConsensus {\n record: Record,\n likelihood: LogProb,\n}\n\npub struct NonOverlappingConsensus {\n f_record: Record,\n r_record: Record,\n likelihood: LogProb,\n}\n\npub trait CallConsensusReads<'a, R: io::Read + io::BufRead + 'a, W: io::Write + 'a> {\n /// Cluster reads from fastq readers according to their sequence\n /// and UMI, then compute a consensus sequence.\n ///\n /// Cluster the reads in the input file according to their sequence\n /// (concatenated p5 and p7 reads without UMI). Read the\n /// identified clusters, and cluster all reds in a cluster by UMI,\n /// creating groups of very likely PCR duplicates.\n /// Next, compute a consensus read for each unique read,\n /// i.e. a cluster with similar sequences and identical UMI,\n /// and write it into the output files.\n fn call_consensus_reads(&'a mut self) -> Result<()> {\n let spinner_style = indicatif::ProgressStyle::default_spinner()\n .tick_chars('⠁⠂⠄⡀⢀⠠⠐⠈ ')\n .template('{prefix:.bold.dim} {spinner} {wide_msg}');\n\n // cluster by umi\n // Note: If starcode is not installed, this throws a\n // hard to interpret error:\n // (No such file or directory (os error 2))\n // The expect added below should make this more clear.\n let mut umi_cluster = Command::new('starcode')\n .arg('--dist')\n .arg(format!('{}', self.umi_dist()))\n .arg('--seq-id')\n .arg('-s')\n .stdin(Stdio::piped())\n .stdout(Stdio::piped())\n .stderr(Stdio::piped())\n .spawn()\n .expect('Error in starcode call. Starcode might not be installed.');\n\n let mut f_rec = fastq::Record::new();\n let mut r_rec = fastq::Record::new();\n // init temp storage for reads\n let mut read_storage = FastqStorage::new()?;\n let mut i = 0;\n\n // prepare spinner for user feedback\n let pb = indicatif::ProgressBar::new_spinner();\n pb.set_style(spinner_style.clone());\n pb.set_prefix('[1/2] Clustering input reads by UMI using starcode.');\n\n loop {\n // update spinner\n pb.set_message(&format!(' Processed {:>10} reads', i));\n pb.inc(1);\n self.fq1_reader().read(&mut f_rec)?;\n self.fq2_reader().read(&mut r_rec)?;\n\n match (f_rec.is_empty(), r_rec.is_empty()) {\n (true, true) => break,\n (false, false) => (),\n (true, false) => {\n let error_message = format!('Given FASTQ files have unequal lengths. Forward file returned record {} as empty, reverse record is not: id:'{}' seq:'{:?}'.', i, r_rec.id(), str::from_utf8(r_rec.seq()));\n panic!('{}', error_message);\n }\n (false, true) => {\n let error_message = format!('Given FASTQ files have unequal lengths. Reverse file returned record {} as empty, forward record is not: id:'{}' seq:'{:?}'.', i, f_rec.id(), str::from_utf8(f_rec.seq()));\n panic!('{}', error_message);\n }\n }\n // extract umi for clustering\n let umi = if self.reverse_umi() {\n r_rec.seq()[..self.umi_len()].to_owned()\n } else {\n f_rec.seq()[..self.umi_len()].to_owned()\n };\n umi_cluster.stdin.as_mut().unwrap().write_all(&umi)?;\n umi_cluster.stdin.as_mut().unwrap().write_all(b'\n')?;\n // remove umi from read sequence for all further clustering steps\n if self.reverse_umi() {\n r_rec = self.strip_umi_from_record(&r_rec)\n } else {\n f_rec = self.strip_umi_from_record(&f_rec)\n }\n // store read sequences in an on-disk key value store for random access\n read_storage.put(i, &f_rec, &r_rec)?;\n i += 1;\n }\n umi_cluster.stdin.as_mut().unwrap().flush()?;\n drop(umi_cluster.stdin.take());\n pb.finish_with_message(&format!('Done. Analyzed {} reads.', i));\n\n // prepare user feedback\n let mut j = 0;\n let pb = indicatif::ProgressBar::new_spinner();\n pb.set_style(spinner_style);\n pb.set_prefix('[1/2] Clustering input reads by UMI using starcode.');\n // read clusters identified by the first starcode run\n // the first run clustered by UMI, hence all reads in\n // the clusters handled here had similar UMIs\n for record in csv::ReaderBuilder::new()\n .delimiter(b'\t')\n .has_headers(false)\n .from_reader(umi_cluster.stdout.as_mut().unwrap())\n .records()\n {\n // update spinner\n pb.inc(1);\n pb.set_message(&format!('Processed {:>10} cluster', j));\n let seqids = parse_cluster(record?)?;\n // cluster within in this cluster by read sequence\n let mut seq_cluster = Command::new('starcode')\n .arg('--dist')\n .arg(format!('{}', self.seq_dist()))\n .arg('--seq-id')\n .arg('-s')\n .stdin(Stdio::piped())\n .stdout(Stdio::piped())\n .stderr(Stdio::piped())\n .spawn()?;\n for &seqid in &seqids {\n // get sequences from rocksdb (key value store)\n let (f_rec, r_rec) = read_storage.get(seqid - 1).unwrap();\n // perform clustering using the concatenated read sequences\n // without the UMIs (remove in the first clustering step)\n seq_cluster\n .stdin\n .as_mut()\n .unwrap()\n .write_all(&[f_rec.seq(), r_rec.seq()].concat())?;\n seq_cluster.stdin.as_mut().unwrap().write_all(b'\n')?;\n }\n seq_cluster.stdin.as_mut().unwrap().flush()?;\n drop(seq_cluster.stdin.take());\n\n // handle each potential unique read, i.e. clusters with similar\n // UMI and similar sequence\n for record in csv::ReaderBuilder::new()\n .delimiter(b'\t')\n .has_headers(false)\n .from_reader(seq_cluster.stdout.as_mut().unwrap())\n .records()\n {\n let inner_seqids = parse_cluster(record?)?;\n // this is a proper cluster\n // calculate consensus reads and write to output FASTQs\n let mut f_recs = Vec::new();\n let mut r_recs = Vec::new();\n let mut outer_seqids = Vec::new();\n\n for inner_seqid in inner_seqids {\n let seqid = seqids[inner_seqid - 1];\n let (f_rec, r_rec) = read_storage.get(seqid - 1)?;\n f_recs.push(f_rec);\n r_recs.push(r_rec);\n outer_seqids.push(seqid);\n }\n self.write_records(f_recs, r_recs, outer_seqids)?;\n }\n\n match seq_cluster\n .wait()\n .expect('process did not even start')\n .code()\n {\n Some(0) => (),\n Some(s) => eprintln!('Starcode failed with error code {}', s),\n None => eprintln!('Starcode was terminated by signal'),\n }\n j += 1;\n }\n pb.finish_with_message(&format!('Done. Processed {} cluster.', j));\n Ok(())\n }\n fn strip_umi_from_record(&mut self, record: &Record) -> Record {\n let rec_seq = &record.seq()[self.umi_len()..];\n let rec_qual = &record.qual()[self.umi_len()..];\n Record::with_attrs(record.id(), record.desc(), rec_seq, rec_qual)\n }\n fn write_records(\n &mut self,\n f_recs: Vec,\n r_recs: Vec,\n outer_seqids: Vec,\n ) -> Result<()>;\n fn fq1_reader(&mut self) -> &mut fastq::Reader;\n fn fq2_reader(&mut self) -> &mut fastq::Reader;\n fn umi_len(&self) -> usize;\n fn seq_dist(&self) -> usize;\n fn umi_dist(&self) -> usize;\n fn reverse_umi(&self) -> bool;\n}\n\n/// Struct for calling non-overlapping consensus reads\n/// Implements Trait CallConsensusReads\n#[allow(clippy::too_many_arguments)]\n#[derive(new)]\npub struct CallNonOverlappingConsensusRead<'a, R: io::Read, W: io::Write> {\n fq1_reader: &'a mut fastq::Reader,\n fq2_reader: &'a mut fastq::Reader,\n fq1_writer: &'a mut fastq::Writer,\n fq2_writer: &'a mut fastq::Writer,\n umi_len: usize,\n seq_dist: usize,\n umi_dist: usize,\n reverse_umi: bool,\n verbose_read_names: bool,\n}\n\nimpl<'a, R: io::Read + io::BufRead, W: io::Write> CallConsensusReads<'a, R, W>\n for CallNonOverlappingConsensusRead<'a, R, W>\n{\n fn write_records(\n &mut self,\n f_recs: Vec,\n r_recs: Vec,\n outer_seqids: Vec,\n ) -> Result<()> {\n if f_recs.len() > 1 {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n self.fq1_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n &f_recs,\n &outer_seqids,\n uuid,\n self.verbose_read_names,\n )\n .calc_consensus()\n .0,\n )?;\n self.fq2_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n &r_recs,\n &outer_seqids,\n uuid,\n self.verbose_read_names,\n )\n .calc_consensus()\n .0,\n )?;\n } else {\n self.fq1_writer.write_record(&f_recs[0])?;\n self.fq2_writer.write_record(&r_recs[0])?;\n }\n Ok(())\n }\n\n fn fq1_reader(&mut self) -> &mut fastq::Reader {\n self.fq1_reader\n }\n\n fn fq2_reader(&mut self) -> &mut fastq::Reader {\n self.fq2_reader\n }\n\n fn umi_len(&self) -> usize {\n self.umi_len\n }\n\n fn seq_dist(&self) -> usize {\n self.seq_dist\n }\n\n fn umi_dist(&self) -> usize {\n self.umi_dist\n }\n\n fn reverse_umi(&self) -> bool {\n self.reverse_umi\n }\n}\n\n///Clusters fastq reads by UMIs and calls consensus for overlapping reads\n#[allow(clippy::too_many_arguments)]\n#[derive(new)]\npub struct CallOverlappingConsensusRead<'a, R: io::Read, W: io::Write> {\n fq1_reader: &'a mut fastq::Reader,\n fq2_reader: &'a mut fastq::Reader,\n fq1_writer: &'a mut fastq::Writer,\n fq2_writer: &'a mut fastq::Writer,\n fq3_writer: &'a mut fastq::Writer,\n umi_len: usize,\n seq_dist: usize,\n umi_dist: usize,\n insert_size: usize,\n std_dev: usize,\n reverse_umi: bool,\n verbose_read_names: bool,\n}\n\nimpl<'a, R: io::Read, W: io::Write> CallOverlappingConsensusRead<'a, R, W> {\n fn isize_highest_probability(&mut self, f_seq_len: usize, r_seq_len: usize) -> f64 {\n if f_seq_len + f_seq_len < self.insert_size {\n self.insert_size as f64\n } else if f_seq_len + r_seq_len > self.insert_size + 2 * self.std_dev {\n (self.insert_size + 2 * self.std_dev) as f64\n } else {\n (f_seq_len + r_seq_len) as f64\n }\n }\n\n fn maximum_likelihood_overlapping_consensus(\n &mut self,\n f_recs: &[Record],\n r_recs: &[Record],\n outer_seqids: &[usize],\n uuid: &str,\n ) -> OverlappingConsensus {\n //Returns consensus record by filtering overlaps with lowest hamming distance.\n //For these overlaps(insert sizes) the consensus reads and their likelihoods are calculated.\n //The read with maximum likelihood will be returned.\n let insert_sizes = ((self.insert_size - 2 * self.std_dev)\n ..(self.insert_size + 2 * self.std_dev))\n .filter_map(|insert_size| {\n median_hamming_distance(insert_size, f_recs, r_recs)\n .filter(|&median_distance| median_distance < HAMMING_THRESHOLD)\n .map(|_| insert_size)\n });\n insert_sizes\n .map(|insert_size| {\n let overlap = (f_recs[0].seq().len() + r_recs[0].seq().len()) - insert_size;\n let (consensus_record, lh_isize) = CalcOverlappingConsensus::new(\n f_recs,\n r_recs,\n overlap,\n outer_seqids,\n uuid,\n self.verbose_read_names,\n )\n .calc_consensus();\n let likelihood = lh_isize\n + isize_pmf(\n insert_size as f64,\n self.insert_size as f64,\n self.std_dev as f64,\n );\n OverlappingConsensus {\n record: consensus_record,\n likelihood,\n }\n })\n .max_by_key(|consensus| NotNaN::new(*consensus.likelihood).unwrap())\n .unwrap()\n }\n\n fn maximum_likelihood_nonoverlapping_consensus(\n &mut self,\n f_recs: &[Record],\n r_recs: &[Record],\n outer_seqids: &[usize],\n uuid: &str,\n ) -> NonOverlappingConsensus {\n //Calculate non-overlapping consensus records and shared lh\n let (f_consensus_rec, f_lh) =\n CalcNonOverlappingConsensus::new(f_recs, outer_seqids, uuid, self.verbose_read_names)\n .calc_consensus();\n let (r_consensus_rec, r_lh) =\n CalcNonOverlappingConsensus::new(r_recs, outer_seqids, uuid, self.verbose_read_names)\n .calc_consensus();\n let overall_lh_isize = f_lh + r_lh;\n //Determine insert size with highest probability for non-overlapping records based on expected insert size\n let likeliest_isize =\n self.isize_highest_probability(f_recs[0].seq().len(), r_recs[0].seq().len());\n let overall_lh = overall_lh_isize\n + isize_pmf(\n likeliest_isize,\n self.insert_size as f64,\n self.std_dev as f64,\n );\n NonOverlappingConsensus {\n f_record: f_consensus_rec,\n r_record: r_consensus_rec,\n likelihood: overall_lh,\n }\n }\n}\n\nimpl<'a, R: io::Read + io::BufRead, W: io::Write> CallConsensusReads<'a, R, W>\n for CallOverlappingConsensusRead<'a, R, W>\n{\n fn write_records(\n &mut self,\n f_recs: Vec,\n r_recs: Vec,\n outer_seqids: Vec,\n ) -> Result<()> {\n //TODO Add deterministic uuid considering read ids\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n let ol_consensus =\n self.maximum_likelihood_overlapping_consensus(&f_recs, &r_recs, &outer_seqids, uuid);\n let non_ol_consensus =\n self.maximum_likelihood_nonoverlapping_consensus(&f_recs, &r_recs, &outer_seqids, uuid);\n match ol_consensus.likelihood > non_ol_consensus.likelihood {\n true => self.fq3_writer.write_record(&ol_consensus.record)?,\n false => {\n self.fq1_writer.write_record(&non_ol_consensus.f_record)?;\n self.fq2_writer.write_record(&non_ol_consensus.r_record)?;\n }\n }\n Ok(())\n }\n\n fn fq1_reader(&mut self) -> &mut fastq::Reader {\n self.fq1_reader\n }\n\n fn fq2_reader(&mut self) -> &mut fastq::Reader {\n self.fq2_reader\n }\n\n fn umi_len(&self) -> usize {\n self.umi_len\n }\n\n fn seq_dist(&self) -> usize {\n self.seq_dist\n }\n\n fn umi_dist(&self) -> usize {\n self.umi_dist\n }\n\n fn reverse_umi(&self) -> bool {\n self.reverse_umi\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/fastq/collapse_reads_to_fragments/mod.rs","//! Tool to merge sets of reads in paired FASTQ files that share the UMI and have similar read sequence.\n//! The result is a maximum likelihood fragment sequence per set.\n//!\n//! This tool takes two FASTQ files (forward and reverse)\n//! and returns two FASTQ files in which all PCR duplicates\n//! have been merged into a consensus read.\n//! Duplicates are identified by a Unique Molecular Identifier (UMI).\n//!\n//! ## Requirements:\n//!\n//! - starcode\n//!\n//!\n//! ## Usage:\n//!\n//! ```bash\n//! $ rbt collapse-reads-to-fragments fastq \\n//! \\n//! \\n//! \\n//! \\n//! -l \\n//! -D \ # See step 1 below\n//! -d \ # See step 2 below\n//! --umi-on-reverse # if the UMIs are part of the reverse reads\n//! ```\n//!\n//! ## Assumptions:\n//!\n//! - Reads are of equal length\n//! - UMI is the prefix of the reads\n//!\n//! ## Workflow:\n//!\n//! The main steps are:\n//!\n//! 1. Preparation\n//! 1. Remove UMI sequence from read (and save it for later use).\n//! 2. Concatenate forward and reverse sequence.\n//! ```text\n//! Forward Read: [================]\n//! Reverse Read: [(UMI)-----------]\n//! Sequence for clustering in step 3: [================-----------]\n//! ```\n//!\n//! 2. Cluster all reads by their UMIs using starcode.\n//! Each cluster generated in this step contains reads with similar UMIs.\n//! However, all PCR duplicates of a read are within one cluster, since they\n//! share a UMI sequence.\n//! The size of these clusters highly depends on the length of the used UMI.\n//!\n//! 2. For each cluster from step two:\n//! 1. Cluster reads by their concatenated sequences (without UMI) using starcode.\n//! 2. Each new cluster contains reads that have a similar UMI (from step 2)\n//! as well as similar sequences. Consequently, these sets of reads are\n//! likely to be PCR duplicates of each other.\n//!\n//! 3. For each cluster from step three: Compute a consensus sequence.\n//!\n//! At each position in the read, all bases and quality values are used\n//! to compute the base with Maximum a-posteriori probability (MAP).\n//!\n//! 1. For one position, compute the likelihood for the four alleles\n//! A, C, G, and T, incorporating the number of bases as well as\n//! their quality values.\n//! 2. Choose the allele with the largest likelihood for the consensus read.\n//! 3. Compute the quality value of the consensus read from the maximum posterior\n//! probability used to select the allele.\n//!\n//! 4. Write consensus reads to output file.\n//!\n//!\n//!\n// Since this is a binary crate, documentation needs to be compiled with this 'ancient incantation':\n// https://github.com/rust-lang/cargo/issues/1865#issuecomment-394179125\nmod calc_consensus;\nmod pipeline;\n\nuse anyhow::Result;\nuse bio::io::fastq;\nuse flate2::bufread::MultiGzDecoder;\nuse flate2::write::GzEncoder;\nuse flate2::Compression;\nuse log::info;\nuse pipeline::{CallConsensusReads, CallNonOverlappingConsensusRead, CallOverlappingConsensusRead};\nuse std::fs;\nuse std::io::{BufReader, BufWriter, Read, Write};\nuse std::path::Path;\n\n// TODO: reduce arguments for clippy to <= 7\n/// Build readers for the given input and output FASTQ files and pass them to\n/// `call_consensus_reads`.\n///\n/// The type of the readers (writers) depends on the file ending.\n/// If the input file names end with '.gz' a gzipped reader (writer) is used.\n#[allow(clippy::too_many_arguments)]\npub fn call_consensus_reads_from_paths + std::fmt::Debug>(\n fq1: P,\n fq2: P,\n fq1_out: P,\n fq2_out: P,\n fq3_out: Option

,\n umi_len: usize,\n seq_dist: usize,\n umi_dist: usize,\n reverse_umi: bool,\n verbose_read_names: bool,\n insert_size: Option,\n std_dev: Option,\n) -> Result<()> {\n match fq3_out {\n None => {\n info!(\n 'Reading input files:\n {}\n {}',\n fq1.as_ref().display(),\n fq2.as_ref().display()\n );\n info!(\n 'Writing output to:\n {}\n {}',\n fq1_out.as_ref().display(),\n fq2_out.as_ref().display()\n );\n\n fn reader>(\n path: P,\n ) -> Result>>> {\n let r: Box = if path.as_ref().ends_with('.gz') {\n Box::new(\n fs::File::open(&path)\n .map(BufReader::new)\n .map(MultiGzDecoder::new)?,\n )\n } else {\n Box::new(fs::File::open(&path).map(BufReader::new)?)\n };\n Ok(fastq::Reader::new(r))\n }\n\n fn writer>(path: P) -> Result>> {\n let w: Box = if path.as_ref().ends_with('.gz') {\n Box::new(\n fs::File::create(&path)\n .map(BufWriter::new)\n .map(|w| GzEncoder::new(w, Compression::default()))?,\n )\n } else {\n Box::new(fs::File::create(&path).map(BufWriter::new)?)\n };\n Ok(fastq::Writer::new(w))\n }\n\n CallNonOverlappingConsensusRead::new(\n &mut reader(fq1)?,\n &mut reader(fq2)?,\n &mut writer(fq1_out)?,\n &mut writer(fq2_out)?,\n umi_len,\n seq_dist,\n umi_dist,\n reverse_umi,\n verbose_read_names,\n )\n .call_consensus_reads()\n }\n Some(fq3_path) => {\n eprintln!(\n 'Reading input files:\n {}\n {}',\n fq1.as_ref().display(),\n fq2.as_ref().display()\n );\n eprintln!(\n 'Writing output to:\n {}\n {}\n {}',\n fq1_out.as_ref().display(),\n fq2_out.as_ref().display(),\n fq3_path.as_ref().display()\n );\n match (fq1.as_ref().ends_with('.gz'), fq2.as_ref().ends_with('.gz'), fq1_out.as_ref().ends_with('.gz'), fq2_out.as_ref().ends_with('.gz'), fq3_path.as_ref().ends_with('.gz')) {\n (false, false, false, false, false) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::from_file(fq1)?,\n &mut fastq::Reader::from_file(fq2)?,\n &mut fastq::Writer::to_file(fq1_out)?,\n &mut fastq::Writer::to_file(fq2_out)?,\n &mut fastq::Writer::to_file(fq3_path)?,\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n (true, true, false, false, false) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::new(fs::File::open(fq1).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Reader::new(fs::File::open(fq2).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Writer::to_file(fq1_out)?,\n &mut fastq::Writer::to_file(fq2_out)?,\n &mut fastq::Writer::to_file(fq3_path)?,\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n (false, false, true, true, true) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::from_file(fq1)?,\n &mut fastq::Reader::from_file(fq2)?,\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq1_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq2_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq3_path)?, Compression::default())),\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n (true, true, true, true, true) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::new(fs::File::open(fq1).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Reader::new(fs::File::open(fq2).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq1_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq2_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq3_path)?, Compression::default())),\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n _ => panic!('Invalid combination of files. Each pair of files (input and output) need to be both gzipped or both not zipped.')\n }\n }\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/fastq/mod.rs","//! Tools that work on FASTQ files\npub mod collapse_reads_to_fragments;\npub mod filter;\npub mod split;\n","mit" "rust-bio-tools","./rust-bio-tools/src/fastq/filter.rs","//! Filter reads matching names in a text file into a new FASTQ file.\n//!\n//! ## Usage:\n//!\n//! Extract the read with identifier `A` from `test.fastq` into a new file `filtered.fastq`\n//! ```bash\n//! $ cat ids.txt\n//! A\n//!\n//! $ cat test.fastq\n//! @A\n//! ACTCTATCTA\n//! +\n//! !!!!!!!!!!\n//! @B\n//! CTCTATCTCTA\n//! +\n//! !!!!!!!!!!!\n//!\n//! $ rbt fastq-filter ids.txt < test.fastq > filtered.fastq\n//!\n//! $ cat filtered.fastq\n//! @A\n//! ACTCTATCTA\n//! +\n//! !!!!!!!!!!\n//! ```\n//!\nuse anyhow::Result;\nuse bio::io::fastq;\nuse bio::io::fastq::FastqRead;\nuse std::collections::HashSet;\nuse std::fs::File;\nuse std::io::{self, BufRead, BufReader};\nuse std::iter::FromIterator;\nuse std::path::Path;\n\npub fn filter>(ids_path: P) -> Result<()> {\n let mut reader = fastq::Reader::new(io::stdin());\n let mut writer = fastq::Writer::new(io::stdout());\n let f = File::open(ids_path)?;\n let f = BufReader::new(f);\n let ids =\n HashSet::::from_iter(f.lines().filter_map(Result::ok).collect::>());\n\n let mut record = fastq::Record::new();\n\n loop {\n reader.read(&mut record)?;\n if record.is_empty() {\n return Ok(());\n }\n if !ids.contains(record.id()) {\n writer.write_record(&record)?;\n }\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/sequences_stats.rs","//! Compute statics on sequences from stdin:\n//! - min: length of shortest sequence\n//! - max: length of longest sequence\n//! - average: average length of sequence\n//! - median: median length of sequence\n//! - nb_reads: number of reads\n//! - nb_bases: number of bases\n//! - n50: N50 of sequences\n//!\n//! Output is in yaml format\n//!\n//! ## Usage:\n//!\n//! ```\n//! $ rbt sequences-stats < A.fasta\n//! $ rbt sequences-stats -q < A.fastq\n//! ```\n\nuse anyhow::{bail, Result};\nuse bio::io::{fasta, fastq};\nuse std::io;\nuse thiserror::Error;\n\npub fn stats(fastq: bool) -> Result<()> {\n let mut lengths = if fastq {\n fastq_lengths()\n } else {\n fasta_lengths()\n };\n\n if lengths.is_empty() {\n bail!(InputError::NoSequence);\n }\n // Sort lengths one time\n lengths.sort_unstable();\n\n let nb_bases = lengths.iter().sum::();\n\n println!(\n 'min: {min}\nmax: {max}\naverage: {average}\nmediane: {mediane}\nnumber of reads: {nb_reads}\nnumber of bases: {nb_bases}\nn50: {n50}',\n min = lengths[0], // First element is the minimal element\n max = lengths[lengths.len() - 1], // last element is the maximal element\n average = average(&lengths),\n mediane = median(&lengths),\n nb_reads = lengths.len(),\n nb_bases = nb_bases,\n n50 = n50(&lengths, nb_bases),\n );\n\n Ok(())\n}\n\nfn fasta_lengths() -> Vec {\n let reader = fasta::Reader::new(io::stdin());\n\n let mut lengths = Vec::new();\n\n let mut records = reader.records();\n while let Some(Ok(record)) = records.next() {\n lengths.push(record.seq().len());\n }\n\n lengths\n}\n\npub fn fastq_lengths() -> Vec {\n let reader = fastq::Reader::new(io::stdin());\n\n let mut lengths = Vec::new();\n\n let mut records = reader.records();\n while let Some(Ok(record)) = records.next() {\n lengths.push(record.seq().len());\n }\n\n lengths\n}\n\nfn n50(numbers: &[usize], nb_bases_total: usize) -> usize {\n let mut acc = 0;\n for val in numbers.iter() {\n acc += *val;\n if acc > nb_bases_total / 2 {\n return *val;\n }\n }\n\n numbers[numbers.len() - 1]\n}\n\nfn average(numbers: &[usize]) -> f64 {\n numbers.iter().sum::() as f64 / numbers.len() as f64\n}\n\nfn median(data: &[usize]) -> f64 {\n match data.len() {\n 0 => 0.0,\n 1 => data[0] as f64,\n len if len % 2 == 0 => {\n let v1 = data[(len / 2) - 1];\n let v2 = data[len / 2];\n (v1 + v2) as f64 / 2.0\n }\n len => data[len / 2] as f64,\n }\n}\n\n#[derive(Error, Debug)]\npub enum InputError {\n #[error('stdin didn't contain any sequence')]\n NoSequence,\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/cli.rs","use crate::common::Region;\nuse std::path::PathBuf;\nuse structopt::StructOpt;\n\n#[derive(StructOpt)]\n#[structopt(\n about = 'A set of ultra-fast command line utilities for bioinformatics tasks based on Rust-Bio.',\n author = 'Johannes Köster ',\n name = 'Rust-Bio-Tools'\n)]\npub(crate) struct Rbt {\n #[structopt(long, short, help = 'Verbose output.')]\n pub(crate) verbose: bool,\n\n #[structopt(subcommand)]\n pub(crate) cmd: Command,\n}\n\n#[derive(StructOpt)]\npub(crate) enum Command {\n /// Split FASTQ file from STDIN into N chunks.\n ///\n /// Example:\n /// rbt fastq-split A.fastq B.fastq < test.fastq\n #[structopt(author = 'Johannes Köster ')]\n FastqSplit {\n #[structopt(parse(from_os_str), help = 'File name(s) for the chunks to create.')]\n chunks: Vec,\n },\n /// Remove records from a FASTQ file (from STDIN), output to STDOUT.\n ///\n /// Example:\n /// rbt fastq-filter ids.txt < test.fastq > filtered.fastq\n #[structopt(author = 'Erik Clarke ')]\n FastqFilter {\n #[structopt(parse(from_os_str))]\n /// File with list of record IDs to remove, one per line.\n ids: PathBuf,\n },\n\n /// Print depth of BAM or CRAM file at given positions from STDIN (tab separated: chrom, pos).\n ///\n /// Usage:\n /// $ rbt bam-depth test.bam < pos.txt > depth.txt\n ///\n /// The positions file contains the name of one reference sequence and one position per line (tab separated).\n /// Example:\n ///\n /// 16 1\n /// 17 38\n /// 17 39\n ///\n /// Depths are written to stdout as tab-separated lines, similar to the positions input.\n /// Example:\n ///\n /// 16 1 0\n /// 17 38 14\n /// 17 39 13\n #[structopt(author = 'Johannes Köster ')]\n BamDepth {\n /// Path to indexed BAM file.\n #[structopt(parse(from_os_str))]\n bam_path: PathBuf,\n\n /// Maximum read length to consider. This affects the speed of the involved pileup.\n /// Reads longer than this length can be missed when calculating the depth.\n #[structopt(long, short, default_value = '1000')]\n max_read_length: u32,\n\n /// Skip reads with mask bits unset [].\n #[structopt(long = 'incl-flags', short, default_value = '0')]\n include_flags: u16,\n\n /// Skip reads with mask bits set [UNMAP, SECONDARY, QCFAIL, DUP].\n #[structopt(long = 'excl-flags', short, default_value = '1796')]\n exclude_flags: u16,\n\n /// Minimum mapping quality.\n #[structopt(long, short = 'q', default_value = '0')]\n min_mapq: u8,\n },\n\n /// Convert any IUPAC codes in alleles into Ns (in order to comply with VCF 4 specs).\n /// Reads VCF/BCF from STDIN and writes BCF to STDOUT.\n ///\n /// Example:\n /// rbt vcf-fix-iupac-alleles < test.vcf > fixed.bcf\n #[structopt(author = 'Johannes Köster ')]\n VcfFixIupacAlleles {},\n\n /// Convert VCF/BCF file from STDIN to tab-separated TXT file at STDOUT.\n /// INFO and FORMAT tags have to be selected explicitly.\n ///\n /// Example:\n /// rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC < test.vcf > variant-table.txt\n ///\n /// The resulting table can be e.g. parsed with PANDAS in Python:\n ///\n /// pd.read_table('variants.txt', header=[0, 1])\n #[structopt(author = 'Johannes Köster ')]\n VcfToTxt {\n /// Select INFO tags\n #[structopt(long, short, value_name = 'NAME')]\n info: Vec,\n\n /// Select FORMAT tags.\n #[structopt(long = 'fmt', short, value_name = 'NAME')]\n format: Vec,\n\n /// Display genotypes.\n #[structopt(long, short)]\n genotypes: bool,\n\n /// Include FILTER field.\n #[structopt(long)]\n with_filter: bool,\n },\n\n /// Annotate for each variant in a VCF/BCF at STDIN whether it is contained in a\n /// given second VCF/BCF. The matching is fuzzy for indels and exact for SNVs.\n /// Results are printed as BCF to STDOUT, with an additional INFO tag MATCHING.\n /// The two vcfs do not have to be sorted.\n ///\n /// Example:\n /// rbt vcf-match dbsnp.vcf < calls.vcf | bcftools view\n #[structopt(author = 'Johannes Köster ')]\n VcfMatch {\n /// VCF/BCF file to match against.\n #[structopt(parse(from_os_str))]\n vcf: PathBuf,\n\n /// Maximum distance between centres of two indels considered to match.\n #[structopt(long, short = 'd', value_name = 'INT', default_value = '20')]\n max_dist: u32,\n\n /// Maximum difference between lengths of two indels.\n #[structopt(long, short = 'l', value_name = 'INT', default_value = '10')]\n max_len_diff: u32,\n },\n\n /// Annotate b-allele frequency for each single nucleotide variant and sample.\n ///\n /// Example:\n /// rbt vcf-baf < calls.bcf > annotated.bcf\n #[structopt(\n author = 'Johannes Köster , Jan Forster '\n )]\n VcfBaf {},\n\n /// Looks for interacting drugs in DGIdb and annotates them for every gene in every record.\n ///\n /// Example:\n /// rbt vcf-annotate-dgidb input.vcf > output.vcf\n #[structopt(author = 'Felix Mölder ')]\n VcfAnnotateDgidb {\n /// VCF/BCF file to be extended by dgidb drug entries\n #[structopt()]\n vcf: String,\n\n /// Url prefix for requesting interaction drugs by gene names.\n #[structopt(\n long,\n short = 'p',\n default_value = 'http://dgidb.org/api/v2/interactions.json?genes='\n )]\n api_path: String,\n\n /// Info field name to be used for annotation.\n #[structopt(long, short = 'f', default_value = 'dgiDB_drugs')]\n field: String,\n\n /// A list of data sources included in query. If omitted all sources are considered.\n /// A list of all sources can be found at http://dgidb.org/api/v2/interaction_sources.json\n #[structopt(long, short = 's', value_name = 'STR')]\n datasources: Option>,\n\n /// Number of genes to submit per api request. A lower value increases the number of api requests in return.\n /// Too many requests could be rejected by the DGIdb server.\n #[structopt(long, short = 'g', default_value = '500')]\n genes_per_request: usize,\n },\n\n /// Creates report from a given csv file containing a table with the given data\n /// Examples:\n /// With current directory as default ouput path:\n /// rbt csv-report path/to/table.csv --rows-per-page 100 --sort-column 'p-value' --sort-order ascending\n #[structopt(author = 'Felix Wiegand ')]\n CsvReport {\n /// CSV file including the data for the report.\n #[structopt()]\n csv_path: String,\n\n /// Sets the numbers of rows of each table per page. Default is 100.\n #[structopt(long, short = 'r', default_value = '100')]\n rows_per_page: u32,\n\n /// Column that the data should be sorted by.\n #[structopt(long, short = 'c')]\n sort_column: Option,\n\n /// Order the data ascending or descending. Default is descending.\n #[structopt(long, short = 'o', default_value = 'descending', possible_values = &['ascending','descending'])]\n sort_order: String,\n\n /// Change the separator of the csv file to tab or anything else. Default is ','.\n #[structopt(long, short = 's', default_value = ',')]\n separator: char,\n\n /// Configure a custom formatter function for each column by providing a file containing a javascript object with csv column title as the key and a format function as the value.\n /// More information on the formatting functions and how to use them here: https://bootstrap-table.com/docs/api/column-options/#formatter.\n #[structopt(long, short = 'f')]\n formatter: Option,\n\n /// Pins the table until the given column such that scrolling to the right does not hide the given column and those before.\n #[structopt(long, short = 'p')]\n pin_until: Option,\n\n /// Relative output path for the report files. Default value is the current directory.\n #[structopt(default_value = '.')]\n output_path: String,\n },\n\n #[structopt(verbatim_doc_comment)]\n /// Creates a html file with a vega visualization of the given bam region that is then written to stdout.\n ///\n /// EXAMPLE:\n /// rbt plot-bam -b input.bam -g 2:132424-132924 -r input.fa > plot.html\n #[structopt(\n author = 'Felix Wiegand ',\n usage = 'rbt plot-bam [OPTIONS] --bam-path ... --reference --region > plot.html'\n )]\n PlotBam {\n /// BAM file to be visualized.\n #[structopt(long, short = 'b', required = true, parse(from_os_str))]\n bam_path: Vec,\n\n /// Path to the reference fasta file.\n #[structopt(long, short = 'r', parse(from_os_str))]\n reference: PathBuf,\n\n /// Chromosome and region for the visualization. Example: 2:132424-132924\n #[structopt(long, short = 'g')]\n region: Region,\n\n /// Set the maximum rows that will be shown in the alignment plots.\n #[structopt(long, short = 'd', default_value = '500')]\n max_read_depth: u32,\n },\n\n /// Creates report from a given VCF file including a visual plot\n /// for every variant with the given BAM and FASTA file.\n /// The VCF file has to be annotated with VEP, using the options --hgvs and --hgvsg.\n ///\n /// Examples:\n /// With current directory as default ouput path:\n /// rbt vcf-report fasta.fa --vcfs a=a.vcf b=b.vcf --bams a:sample1=a.bam b:sample1=b.bam\n /// With custom directory as default ouput path:\n /// rbt vcf-report fasta.fa --vcfs a=a.vcf b=b.vcf --bams a:sample1=a.bam b:sample1=b.bam -- my/output/path/\n /// With custom info tags in table report:\n /// rbt vcf-report fasta.fa --vcfs a=a.vcf b=b.vcf --bams a:sample1=a.bam b:sample1=b.bam --info PROB_SOMATIC PROB_GERMLINE\n #[structopt(\n author = 'Johannes Köster , Felix Wiegand '\n )]\n VcfReport {\n /// FASTA file containing the reference genome for the visual plot\n #[structopt()]\n fasta: String,\n\n /// VCF files to include (multi-sample). Group is the name that will be used in the oncoprint. There needs to be one corresponding BAM file for each sample of a VCF/BCF file. Please only use VCF/BCF files annotated by VEP.\n #[structopt(long, short = 'v', value_name = 'GROUP=VCF_FILE')]\n vcfs: Vec,\n\n /// VCF files to include (multi-sample). Group is the name that will be used in the oncoprint. There needs to be one corresponding BAM file for each sample of a VCF/BCF file. Please only use VCF/BCF files annotated by VEP.\n #[structopt(long, short = 'b', value_name = 'GROUP:SAMPLE=BAM_FILE')]\n bams: Vec,\n\n /// Set the maximum number of cells in the oncoprint per page. Lowering max-cells should improve the performance of the plots in the browser. Default value is 1000.\n #[structopt(long, short = 'c', default_value = '1000')]\n cells: u32,\n\n /// Set the maximum lines of reads that will be shown in the alignment plots. Default value is 500.\n #[structopt(long, short = 'd', default_value = '500')]\n max_read_depth: u32,\n\n /// Add custom values from the info field to each variant as a data attribute to access them via the custom javascript. Multiple fields starting with the same prefix can be added by placing '*' at the end of a prefix.\n #[structopt(long, short = 'i', value_name = 'INFO_TAG')]\n infos: Option>,\n\n /// Add custom values from the format field to each variant as a data attribute to access them via the custom javascript. All given format values will also be inserted into the main table.\n #[structopt(long, short = 'f', value_name = 'FORMAT_TAG')]\n formats: Option>,\n\n /// Add multiple keys from the info field of your vcf to the plots of the first and second stage of the report.\n #[structopt(long, value_name = 'PLOT_INFO')]\n plot_info: Option>,\n\n /// Change the default javascript file for the table-report to a custom one to add own plots or tables to the sidebar by appending these to an empty div in the HTML template.\n #[structopt(long, short = 'j', value_name = 'JS_FILE_PATH')]\n custom_js_template: Option,\n\n /// Add one or multiple js file (e.g. libraries) for usage in the custom-js-file. The ordering of the arguments will be the same as they will be imported.\n #[structopt(long, short = 'l', value_name = 'JS_FILE_PATH')]\n custom_js_files: Option>,\n\n /// Add a TSV file that contains one or multiple custom values for each sample for the oncoprint. First column has to be the sample name, followed by one or more columns with custom values. Make sure you include one row for each given sample.\n #[structopt(long, short = 't', value_name = 'TSV_FILE_PATH')]\n tsv: Option,\n\n /// Sets the number of threads used to build the table reports.\n #[structopt(long, default_value = '0')]\n threads: usize,\n\n /// Set the name of the annotation field generated by VEP.\n #[structopt(long, short = 'a', default_value = 'ANN')]\n annotation_field: String,\n\n /// Relative output path for the report files. Default value is the current directory.\n #[structopt(default_value = '.')]\n output_path: String,\n },\n\n /// Split a given VCF/BCF file into N chunks of approximately the same size. Breakends are kept together.\n /// Output type is always BCF.\n ///\n /// Example:\n /// rbt vcf-split input.bcf output1.bcf output2.bcf output3.bcf ... outputN.bcf\n #[structopt(author = 'Johannes Köster ')]\n VcfSplit {\n #[structopt(parse(from_os_str), help = 'Input VCF/BCF that shall be splitted.')]\n input: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n help = 'BCF files to split into. Breakends are kept together. Each file will contain approximately the same number of records.'\n )]\n output: Vec,\n },\n\n /// Tool to predict maximum likelihood fragment sequence from FASTQ or BAM files.\n ///\n /// Requirements:\n /// - starcode\n #[structopt(\n author = 'Johannes Köster , Henning Timm , Felix Mölder '\n )]\n CollapseReadsToFragments {\n #[structopt(subcommand)]\n cmd: CollapseReadsToFragmentsSubcommand,\n },\n\n /// Tool to build artifical reads from real BAM files with identical properties.\n #[structopt(author = 'Felix Mölder ')]\n BamAnonymize {\n #[structopt(parse(from_os_str), help = 'Input BAM file')]\n bam: PathBuf,\n #[structopt(parse(from_os_str), help = 'Input reference as fasta file')]\n input_ref: PathBuf,\n #[structopt(parse(from_os_str), help = 'Output BAM file with artificial reads')]\n output_bam: PathBuf,\n #[structopt(\n parse(from_os_str),\n help = 'Output fasta file with artificial reference'\n )]\n output_ref: PathBuf,\n #[structopt(help = 'chromosome name')]\n chr: String,\n #[structopt(help = '1-based start position')]\n start: u64,\n #[structopt(help = '1-based exclusive end position')]\n end: u64,\n #[structopt(\n long,\n short = 'p',\n help = 'Only simulates reads whos mates are both in defined range.'\n )]\n keep_only_pairs: bool,\n },\n\n /// Tool to compute stats on sequence file (from STDIN), output is in YAML with fields:\n /// - min: length of shortest sequence\n /// - max: length of longest sequence\n /// - average: average length of sequence\n /// - median: median length of sequence\n /// - nb_reads: number of reads\n /// - nb_bases: number of bases\n /// - n50: N50 of sequences\n ///\n /// Example:\n /// rbt sequence-stats < test.fasta\n /// rbt sequence-stats -q < test.fastq\n #[structopt(author = 'Pierre Marijon ')]\n SequenceStats {\n #[structopt(\n long,\n short = 'q',\n help = 'Flag to indicate the sequence in stdin is in fastq format.'\n )]\n fastq: bool,\n },\n}\n\n#[derive(StructOpt)]\npub enum CollapseReadsToFragmentsSubcommand {\n /// Tool to merge sets of reads from paired FASTQ files that share the UMI and have similar read sequence. The result is a maximum likelihood fragment sequence per set with base quality scores improved accordingly.\n ///\n /// Takes two FASTQ files (forward and reverse) and returns two FASTQ files in which all PCR duplicates have been merged into a consensus read.\n /// Duplicates are identified by a Unique Molecular Identifier (UMI).\n ///\n /// Assumptions:\n /// - Reads are of equal length\n /// - UMI is the prefix of the reads\n ///\n /// Example:\n /// rbt collapse-reads-to-fragments fastq \\n /// reads_1.fq reads_2.fq \ # input files\n /// merged_1.fq merged_2.fq \ # output files\n /// -l 13 \ # length of UMI\n /// -d 1 \ # max hamming distance of UMIs within a cluster\n /// -D 2 \ # max hamming distance of sequences within a cluster\n /// --umi-on-reverse # UMI is the prefix of the reverse read\n #[structopt(\n author = 'Johannes Köster , Henning Timm , Felix Mölder '\n )]\n Fastq {\n #[structopt(parse(from_os_str), help = 'Input FASTQ file with forward reads.')]\n fq1: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Input FASTQ file with reverse reads.')]\n fq2: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with forward reads')]\n consensus_fq1: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with reverse reads')]\n consensus_fq2: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n requires_all(&['insert-size', 'std-dev']),\n help = 'Output FASTQ file for overlapping consensus reads (Required for calculating overlapping consensus only)'\n )]\n consensus_fq3: Option,\n\n #[structopt(\n long,\n short = 'd',\n default_value = '1',\n help = 'Maximum hamming distance between the UMIs of any pair of reads in the same cluster.'\n )]\n max_umi_dist: usize,\n\n #[structopt(\n long,\n short = 'l',\n default_value = '8',\n help = 'Length of UMI in read.'\n )]\n umi_len: usize,\n\n #[structopt(long, short = 'D', possible_values = &['1','2','3','4','5','6','7','8'], default_value = '2', help = 'Maximum hamming distance between the sequences of any pair of reads in the same cluster.')]\n max_seq_dist: usize,\n\n #[structopt(long, short = 'u', help = 'Set if UMI is on reverse read')]\n umi_on_reverse: bool,\n\n #[structopt(\n long,\n help = 'Add list of reads that were merged for each consensus read. Note that this can yield very long FASTQ name lines which cannot be handled by some tools.'\n )]\n verbose_read_names: bool,\n\n #[structopt(\n long,\n short = 'i',\n requires = 'consensus-fq3',\n help = 'Expected insert size of sequenced fragment (Required for calculating overlapping consensus only)'\n )]\n insert_size: Option,\n\n #[structopt(\n long,\n short = 's',\n requires = 'consensus-fq3',\n help = 'Standard deviation of expected insert size. Defines search space of the most likely overlap. (Required for calculating overlapping consensus only)'\n )]\n std_dev: Option,\n },\n\n /// Tool to merge sets of PCR duplicate reads from a BAM file into one maximum likelihood fragment sequence each with accordingly improved base quality scores.\n ///\n /// Takes a BAM file and returns a BAM file in which all PCR duplicates have been merged into a consensus read.\n /// Duplicates must be marked by Picard Tools using the TAG_DUPLICATE_SET_MEMBERS option.\n ///\n /// Assumptions:\n /// - Reads are of equal length\n /// - Reads are marked by Picard Tools\n #[structopt(author = 'Felix Mölder ')]\n Bam {\n #[structopt(parse(from_os_str), help = 'Input BAM file with marked duplicates')]\n bam: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with forward reads')]\n consensus_fq1: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with reverse reads')]\n consensus_fq2: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n help = 'Output FASTQ file for overlapping consensus reads.'\n )]\n consensus_fq_se: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n help = 'Output FASTQ file for overlapping consensus reads.'\n )]\n skipped_bam: PathBuf,\n\n #[structopt(\n long,\n help = 'Add list of reads that were merged for each consensus read. Note that this can yield very long FASTQ name lines which cannot be handled by some tools.'\n )]\n verbose_read_names: bool,\n },\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/csv/report.rs","use crate::bcf::report::oncoprint::WriteErr;\nuse anyhow::Context as AnyhowContext;\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse derive_new::new;\nuse itertools::Itertools;\nuse lz_str::compress_to_utf16;\nuse serde_derive::Serialize;\nuse serde_json::json;\nuse std::collections::{HashMap, HashSet};\nuse std::convert::TryInto;\nuse std::fs;\nuse std::fs::File;\nuse std::io::{Read, Write};\nuse std::path::Path;\nuse std::str::FromStr;\nuse tera::{Context, Tera};\nuse xlsxwriter::*;\n\ntype LookupTable = HashMap>>;\n\n#[allow(clippy::too_many_arguments)]\npub(crate) fn csv_report(\n csv_path: &str,\n output_path: &str,\n rows_per_page: usize,\n separator: char,\n sort_column: Option<&str>,\n ascending: Option,\n formatter: Option<&str>,\n pin_until: Option<&str>,\n) -> Result<()> {\n let mut rdr = csv::ReaderBuilder::new()\n .delimiter(separator as u8)\n .from_path(csv_path)?;\n\n let header = rdr.headers()?.clone();\n let titles = header.iter().collect_vec();\n let mut table = Vec::new();\n let mut numeric = HashMap::new();\n let mut non_numeric = HashMap::new();\n let mut integer = HashMap::new();\n for res in rdr.records() {\n let row = res?;\n let mut table_entry = HashMap::new();\n for (i, tile) in titles.iter().enumerate() {\n table_entry.insert(tile.to_string(), row[i].to_owned());\n match f32::from_str(&row[i]) {\n Ok(_) => {\n let num = numeric.entry(tile.to_owned()).or_insert_with(|| 0);\n *num += 1;\n if i32::from_str(&row[i]).is_ok() {\n let int = integer.entry(tile.to_owned()).or_insert_with(|| 0);\n *int += 1;\n }\n }\n _ => {\n let no_num = non_numeric.entry(tile.to_owned()).or_insert_with(|| 0);\n *no_num += 1;\n }\n }\n }\n table.push(table_entry);\n }\n\n let mut is_numeric = HashMap::new();\n for title in &titles {\n let is_num = match (numeric.get(title), non_numeric.get(title)) {\n (Some(num), Some(no_num)) => num > no_num,\n (Some(_), None) => true,\n _ => false,\n };\n is_numeric.insert(title.to_owned(), is_num);\n }\n\n let mut is_integer = HashMap::new();\n for title in &titles {\n let is_int = match (integer.get(title), non_numeric.get(title)) {\n (Some(num), Some(no_num)) => num > no_num,\n (Some(_), None) => true,\n _ => false,\n };\n is_integer.insert(title.to_owned(), is_int);\n }\n\n let mut plot_data = HashMap::new();\n let mut num_plot_data = HashMap::new();\n let mut reasonable_plot = titles.iter().map(|t| (*t, true)).collect::>();\n\n for title in &titles {\n match is_numeric.get(title) {\n Some(true) => {\n let plot = num_plot(&table, title.to_string());\n num_plot_data.insert(title, plot);\n }\n Some(false) => {\n if let Some(plot) = nominal_plot(&table, title.to_string()) {\n plot_data.insert(title, plot);\n } else {\n plot_data.insert(title, vec![]);\n reasonable_plot.insert(title, false);\n }\n }\n _ => unreachable!(),\n };\n }\n\n match (sort_column, ascending) {\n (Some(column), Some(true)) => table.sort_by(|a, b| {\n match (\n f32::from_str(a.get(column).unwrap()),\n f32::from_str(b.get(column).unwrap()),\n ) {\n (Ok(float_a), Ok(float_b)) => float_a.partial_cmp(&float_b).unwrap(),\n _ => a.get(column).cmp(&b.get(column)),\n }\n }),\n (Some(column), Some(false)) => table.sort_by(|a, b| {\n match (\n f32::from_str(a.get(column).unwrap()),\n f32::from_str(b.get(column).unwrap()),\n ) {\n (Ok(float_a), Ok(float_b)) => float_b.partial_cmp(&float_a).unwrap(),\n _ => a.get(column).cmp(&b.get(column)),\n }\n }),\n (_, _) => {}\n }\n\n let wb = Workbook::new(&(output_path.to_owned() + '/report.xlsx'));\n let mut sheet = wb.add_worksheet(Some('Report'))?;\n for (i, title) in titles.iter().enumerate() {\n sheet.write_string(0, i.try_into()?, title, None)?;\n }\n\n for (i, row) in table.iter().enumerate() {\n for (c, title) in titles.iter().enumerate() {\n sheet.write_string(\n (i + 1).try_into()?,\n c.try_into()?,\n row.get(*title).unwrap(),\n None,\n )?;\n }\n }\n\n wb.close()?;\n\n let pages = if table.len() % rows_per_page == 0 && !table.is_empty() {\n (table.len() / rows_per_page) - 1\n } else {\n table.len() / rows_per_page\n };\n\n let plot_path = output_path.to_owned() + '/plots/';\n fs::create_dir(Path::new(&plot_path)).context(WriteErr::CantCreateDir {\n dir_path: plot_path.to_owned(),\n })?;\n\n for (n, title) in titles.iter().enumerate() {\n let mut templates = Tera::default();\n templates.add_raw_template('plot.js.tera', include_str!('plot.js.tera'))?;\n let mut context = Context::new();\n match is_numeric.get(title) {\n Some(true) => {\n context.insert(\n 'table',\n &json!(num_plot_data.get(title).unwrap()).to_string(),\n );\n context.insert('num', &true);\n }\n Some(false) => {\n context.insert('table', &json!(plot_data.get(title).unwrap()).to_string());\n context.insert('num', &false);\n }\n _ => unreachable!(),\n }\n context.insert('title', &title);\n context.insert('index', &n.to_string());\n let js = templates.render('plot.js.tera', &context)?;\n\n let file_path = plot_path.to_owned() + 'plot_' + &n.to_string() + '.js';\n let mut file = fs::File::create(file_path)?;\n file.write_all(js.as_bytes())?;\n }\n\n let index_path = output_path.to_owned() + '/indexes/';\n fs::create_dir(Path::new(&index_path)).context(WriteErr::CantCreateDir {\n dir_path: index_path.to_owned(),\n })?;\n\n let data_path = output_path.to_owned() + '/data/';\n fs::create_dir(Path::new(&data_path)).context(WriteErr::CantCreateDir {\n dir_path: data_path.to_owned(),\n })?;\n\n let mut prefixes = make_prefixes(\n table\n .clone()\n .into_iter()\n .map(|hm| {\n hm.into_iter()\n .filter(|(k, _)| !is_numeric.get(k.as_str()).unwrap())\n .collect()\n })\n .collect(),\n titles\n .clone()\n .into_iter()\n .filter(|e| !is_numeric.get(e).unwrap())\n .collect(),\n rows_per_page,\n );\n\n let bin = make_bins(\n table\n .clone()\n .into_iter()\n .map(|hm| {\n hm.into_iter()\n .filter(|(k, _)| {\n *is_numeric.get(k.as_str()).unwrap() && !is_integer.get(k.as_str()).unwrap()\n })\n .collect()\n })\n .collect(),\n titles\n .clone()\n .into_iter()\n .filter(|e| *is_numeric.get(e).unwrap() && !is_integer.get(e).unwrap())\n .collect(),\n rows_per_page,\n );\n\n let int_bin = make_bins_for_integers(\n table\n .clone()\n .into_iter()\n .map(|hm| {\n hm.into_iter()\n .filter(|(k, _)| *is_integer.get(k.as_str()).unwrap())\n .collect()\n })\n .collect(),\n titles\n .clone()\n .into_iter()\n .filter(|e| *is_integer.get(e).unwrap())\n .collect(),\n rows_per_page,\n );\n\n for (k, v) in bin.into_iter().chain(int_bin) {\n prefixes.insert(k, v);\n }\n\n let prefix_path = output_path.to_owned() + '/prefixes/';\n fs::create_dir(Path::new(&prefix_path)).context(WriteErr::CantCreateDir {\n dir_path: prefix_path.to_owned(),\n })?;\n\n for (n, title) in titles.iter().enumerate() {\n if let Some(prefix_table) = prefixes.get(title.to_owned()) {\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'prefix_table.html.tera',\n include_str!('prefix_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('title', title);\n context.insert('index', &n.to_string());\n context.insert('table', prefix_table);\n context.insert('numeric', is_numeric.get(title).unwrap());\n let html = templates.render('prefix_table.html.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/prefixes/col_' + &n.to_string() + '.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n\n let title_path = prefix_path.to_owned() + '/col_' + &n.to_string() + '/';\n fs::create_dir(Path::new(&title_path)).context(WriteErr::CantCreateDir {\n dir_path: title_path.to_owned(),\n })?;\n\n for (prefix, values) in prefix_table {\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'lookup_table.html.tera',\n include_str!('lookup_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('title', title);\n context.insert('values', values);\n context.insert('index', &n.to_string());\n let html = templates.render('lookup_table.html.tera', &context)?;\n\n let file_path = title_path.to_owned() + prefix + '.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n }\n }\n }\n\n let formatter_object = if let Some(f) = formatter {\n let mut file_string = ''.to_string();\n let mut custom_file =\n File::open(f).context('Unable to open given file for formatting colums')?;\n custom_file\n .read_to_string(&mut file_string)\n .context('Unable to read string from formatting file')?;\n\n Some(file_string)\n } else {\n None\n };\n\n let pinned_columns = if let Some(col) = pin_until {\n titles.iter().position(|&r| r == col).context(\n 'Given value for --pin-until did not match any of the columns of your csv file',\n )? + 1\n } else {\n 0\n };\n\n let mut templates = Tera::default();\n templates.add_raw_template('csv_report.js.tera', include_str!('csv_report.js.tera'))?;\n let mut context = Context::new();\n context.insert('titles', &titles);\n context.insert('num', &is_numeric);\n context.insert('formatter', &formatter_object);\n context.insert('pinned_columns', &pinned_columns);\n context.insert('pin', &pin_until.is_some());\n\n let js = templates.render('csv_report.js.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/js/csv_report.js';\n let mut file = fs::File::create(file_path)?;\n file.write_all(js.as_bytes())?;\n\n if table.is_empty() {\n let mut templates = Tera::default();\n templates.add_raw_template('csv_report.html.tera', include_str!('csv_report.html.tera'))?;\n templates.add_raw_template('data.js.tera', include_str!('data.js.tera'))?;\n let mut context = Context::new();\n context.insert('table', &table);\n context.insert('titles', &titles);\n context.insert('current_page', &1);\n context.insert('pages', &1);\n let local: DateTime = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n context.insert('is_reasonable', &reasonable_plot);\n\n let data: Vec> = Vec::new();\n\n context.insert(\n 'data',\n &json!(compress_to_utf16(&json!(data).to_string())).to_string(),\n );\n\n let js = templates.render('data.js.tera', &context)?;\n let js_file_path = output_path.to_owned() + '/data/index1.js';\n let mut js_file = fs::File::create(js_file_path)?;\n js_file.write_all(js.as_bytes())?;\n\n let html = templates.render('csv_report.html.tera', &context)?;\n let file_path = output_path.to_owned() + '/indexes/index1.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n } else {\n for (i, current_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n\n let mut templates = Tera::default();\n templates\n .add_raw_template('csv_report.html.tera', include_str!('csv_report.html.tera'))?;\n templates.add_raw_template('data.js.tera', include_str!('data.js.tera'))?;\n let mut context = Context::new();\n context.insert('table', ¤t_table);\n context.insert('titles', &titles);\n context.insert('current_page', &page);\n context.insert('pages', &(pages + 1));\n let local: DateTime = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n context.insert('is_reasonable', &reasonable_plot);\n\n let mut data = Vec::new();\n for row in current_table {\n let mut r = Vec::new();\n for title in &titles {\n r.push(row.get(*title).unwrap())\n }\n data.push(r);\n }\n\n context.insert(\n 'data',\n &json!(compress_to_utf16(&json!(data).to_string())).to_string(),\n );\n\n let html = templates.render('csv_report.html.tera', &context)?;\n let js = templates.render('data.js.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/indexes/index' + &page.to_string() + '.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n\n let js_file_path = output_path.to_owned() + '/data/index' + &page.to_string() + '.js';\n let mut js_file = fs::File::create(js_file_path)?;\n js_file.write_all(js.as_bytes())?;\n }\n }\n Ok(())\n}\n\nfn num_plot(table: &[HashMap], column: String) -> Vec {\n let mut values = Vec::new();\n let mut nan = 0;\n for row in table {\n match f32::from_str(row.get(&column).unwrap()) {\n Ok(val) => values.push(val.to_owned()),\n _ => nan += 1,\n }\n }\n let min = values.iter().fold(f32::INFINITY, |a, &b| a.min(b));\n let max = values.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));\n let bins = 20;\n let step = (max - min) / bins as f32;\n let mut binned_data = HashMap::new();\n let mut bin_borders = HashMap::new();\n for val in values {\n for i in 0..bins {\n let lower_bound = min + i as f32 * step;\n let upper_bound = lower_bound + step;\n let bin_name = String::from('bin') + &i.to_string();\n bin_borders.insert(bin_name.to_owned(), (lower_bound, upper_bound));\n let entry = binned_data.entry(bin_name.to_owned()).or_insert_with(|| 0);\n if ((i < (bins - 1) && val < upper_bound) || (i < bins && val <= upper_bound))\n && val >= lower_bound\n {\n *entry += 1;\n }\n }\n }\n if nan > 0 {\n bin_borders.insert(\n String::from('bin') + &bins.to_string(),\n (f32::NAN, f32::NAN),\n );\n binned_data.insert(String::from('bin') + &bins.to_string(), nan);\n }\n let mut plot_data = Vec::new();\n for (name, v) in binned_data {\n let (lower_bound, upper_bound) = bin_borders.get(&name).unwrap();\n let plot_record = BinnedPlotRecord {\n bin_start: *lower_bound,\n value: v,\n bin_end: *upper_bound,\n };\n plot_data.push(plot_record);\n }\n plot_data\n}\n\nfn nominal_plot(table: &[HashMap], column: String) -> Option> {\n let values = table\n .iter()\n .map(|row| row.get(&column).unwrap().to_owned())\n .filter(|s| !s.is_empty())\n .collect_vec();\n\n let mut count_values = HashMap::new();\n for v in values {\n let entry = count_values.entry(v.to_owned()).or_insert_with(|| 0);\n *entry += 1;\n }\n\n let mut plot_data = count_values\n .iter()\n .map(|(k, v)| PlotRecord {\n key: k.to_owned(),\n value: *v,\n })\n .collect_vec();\n\n if plot_data.len() > 10 {\n let unique_values: HashSet<_> = count_values.iter().map(|(_, v)| v).collect();\n if unique_values.len() <= 1 {\n return None;\n };\n plot_data.sort_by(|a, b| b.value.cmp(&a.value));\n plot_data = plot_data.into_iter().take(10).collect();\n }\n\n Some(plot_data)\n}\n\nfn make_prefixes(\n table: Vec>,\n titles: Vec<&str>,\n rows_per_page: usize,\n) -> LookupTable {\n let mut title_map = HashMap::new();\n for (i, partial_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n let prefix_len = 3;\n for (index, row) in partial_table.iter().enumerate() {\n for key in &titles {\n let value = &row[key.to_owned()].trim().to_owned();\n if !value.is_empty() {\n let entry = value.split_whitespace().take(1).collect_vec()[0];\n if entry.len() >= prefix_len {\n let prefix = entry.chars().take(prefix_len).collect::();\n let prefix_map = title_map\n .entry(key.to_string())\n .or_insert_with(HashMap::new);\n let values = prefix_map.entry(prefix).or_insert_with(Vec::new);\n values.push((value.to_owned(), page, index));\n }\n }\n }\n }\n // write stuff to output map with page like so: HashMap>>\n }\n title_map\n}\n\nfn make_bins(\n table: Vec>,\n titles: Vec<&str>,\n rows_per_page: usize,\n) -> LookupTable {\n let mut title_map = HashMap::new();\n for title in titles {\n let mut values = Vec::new();\n for row in &table {\n if let Ok(val) = f32::from_str(row.get(title).unwrap()) {\n values.push(val.to_owned())\n }\n }\n let min = values.iter().fold(f32::INFINITY, |a, &b| a.min(b));\n let max = values.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));\n let bins = 20;\n let step = (max - min) / bins as f32;\n let mut bin_data = HashMap::new();\n for val in values {\n for i in 0..bins {\n let lower_bound = min + i as f32 * step;\n let upper_bound = lower_bound + step;\n let bin_name = lower_bound.to_string() + '-' + &upper_bound.to_string();\n let entry = bin_data\n .entry(bin_name.to_owned())\n .or_insert_with(HashSet::new);\n if ((i < (bins - 1) && val < upper_bound) || (i < bins && val <= upper_bound))\n && val >= lower_bound\n {\n entry.insert(val.to_string());\n }\n }\n }\n\n let mut value_on_page = HashMap::new();\n for (i, partial_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n for (index, row) in partial_table.iter().enumerate() {\n if let Ok(val) = f32::from_str(row.get(title).unwrap()) {\n let entry = value_on_page\n .entry(val.to_string())\n .or_insert_with(HashSet::new);\n entry.insert((page, index));\n }\n }\n // write stuff to output map with page like so: HashMap>>\n }\n let mut bin_map = HashMap::new();\n for (bin, values) in bin_data {\n for v in values {\n let entry = bin_map.entry(bin.to_string()).or_insert_with(Vec::new);\n for (page, index) in value_on_page.get(&v).unwrap() {\n entry.push((v.to_string(), *page, *index));\n }\n }\n }\n title_map.insert(title.to_string(), bin_map);\n }\n\n title_map\n}\n\nfn make_bins_for_integers(\n table: Vec>,\n titles: Vec<&str>,\n rows_per_page: usize,\n) -> LookupTable {\n let mut title_map = HashMap::new();\n for title in titles {\n let mut values = Vec::new();\n for row in &table {\n if let Ok(val) = i32::from_str(row.get(title).unwrap()) {\n values.push(val.to_owned())\n }\n }\n let min = *values.iter().min().unwrap();\n let max = *values.iter().max().unwrap();\n let bins = 20;\n let step = if max - min <= 20 {\n 1\n } else {\n (max - min) / bins\n };\n let mut bin_data = HashMap::new();\n for val in values {\n for i in 0..bins {\n let lower_bound = min + i * step;\n let upper_bound = if i == bins { max } else { lower_bound + step };\n let bin_name = lower_bound.to_string() + '-' + &upper_bound.to_string();\n let entry = bin_data\n .entry(bin_name.to_owned())\n .or_insert_with(HashSet::new);\n if ((i < (bins - 1) && val < upper_bound) || (i < bins && val <= upper_bound))\n && val >= lower_bound\n {\n entry.insert(val.to_string());\n }\n }\n }\n\n let mut value_on_page = HashMap::new();\n for (i, partial_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n for (index, row) in partial_table.iter().enumerate() {\n if let Ok(val) = i32::from_str(row.get(title).unwrap()) {\n let entry = value_on_page\n .entry(val.to_string())\n .or_insert_with(HashSet::new);\n entry.insert((page, index));\n }\n }\n // write stuff to output map with page like so: HashMap>>\n }\n let mut bin_map = HashMap::new();\n for (bin, values) in bin_data {\n for v in values {\n let entry = bin_map.entry(bin.to_string()).or_insert_with(Vec::new);\n for (page, index) in value_on_page.get(&v).unwrap() {\n entry.push((v.to_string(), *page, *index));\n }\n }\n }\n title_map.insert(title.to_string(), bin_map);\n }\n\n title_map\n}\n\n#[derive(new, Serialize, Debug, Clone)]\nstruct PlotRecord {\n key: String,\n value: u32,\n}\n\n#[derive(new, Serialize, Debug, Clone)]\nstruct BinnedPlotRecord {\n bin_start: f32,\n bin_end: f32,\n value: u32,\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/csv/mod.rs","//! Tools that work on CSV files.\npub mod report;\n","mit" "rust-bio-tools","./rust-bio-tools/src/main.rs","//! Documentation for Rust Bio Tools\nuse anyhow::{Context, Result};\nuse itertools::Itertools;\nuse log::LevelFilter;\nuse rayon::prelude::*;\nuse std::collections::HashMap;\nuse std::fs;\nuse std::path::Path;\nuse structopt::StructOpt;\n\nuse cli::Command::*;\n\npub mod bam;\npub mod bcf;\nmod cli;\npub mod common;\npub mod csv;\npub mod fastq;\npub mod sequences_stats;\n\nfn main() -> Result<()> {\n let args = cli::Rbt::from_args();\n\n fern::Dispatch::new()\n .format(|out, message, _| out.finish(format_args!('{}', message)))\n .level(if args.verbose {\n LevelFilter::Debug\n } else {\n LevelFilter::Info\n })\n .chain(std::io::stderr())\n .apply()\n .unwrap();\n\n match args.cmd {\n FastqSplit { chunks } => {\n fastq::split::split(&chunks.iter().map(|p| p.to_str().unwrap()).collect_vec())?\n }\n FastqFilter { ids } => fastq::filter::filter(&ids).unwrap(),\n BamDepth {\n bam_path,\n max_read_length,\n include_flags,\n exclude_flags,\n min_mapq,\n } => bam::depth::depth(\n &bam_path,\n max_read_length,\n include_flags,\n exclude_flags,\n min_mapq,\n )?,\n VcfToTxt {\n info,\n format,\n genotypes,\n with_filter,\n } => bcf::to_txt::to_txt(\n info.iter().map(|s| s as &str).collect_vec().as_slice(),\n format.iter().map(|s| s as &str).collect_vec().as_slice(),\n genotypes,\n with_filter,\n )?,\n VcfMatch {\n vcf,\n max_dist,\n max_len_diff,\n } => bcf::match_variants::match_variants(vcf, max_dist, max_len_diff)?,\n VcfBaf {} => bcf::baf::calculate_baf()?,\n VcfFixIupacAlleles {} => bcf::fix_iupac_alleles::fix_iupac_alleles()?,\n VcfAnnotateDgidb {\n vcf,\n api_path,\n field,\n datasources,\n genes_per_request,\n } => bcf::annotate_dgidb::annotate_dgidb(\n vcf,\n api_path,\n &*field,\n datasources.as_deref(),\n genes_per_request,\n )?,\n CsvReport {\n csv_path,\n rows_per_page,\n sort_column,\n sort_order,\n separator,\n formatter,\n pin_until,\n output_path,\n } => {\n if !Path::new(&output_path).exists() {\n fs::create_dir_all(Path::new(&output_path))?;\n }\n bcf::report::embed_js(&output_path, false, None, vec![])?;\n bcf::report::embed_css(&output_path, false)?;\n bcf::report::embed_html(&output_path)?;\n\n let order = match sort_order.as_str() {\n 'ascending' => Some(true),\n 'descending' => Some(false),\n _ => None,\n };\n\n csv::report::csv_report(\n &csv_path,\n &output_path,\n rows_per_page as usize,\n separator,\n sort_column.as_deref(),\n order,\n formatter.as_deref(),\n pin_until.as_deref(),\n )?\n }\n PlotBam {\n bam_path,\n reference,\n region,\n max_read_depth,\n } => bam::plot::plot_bam::plot_bam(&bam_path, reference, ®ion, max_read_depth)?,\n VcfReport {\n fasta,\n vcfs,\n bams,\n cells,\n max_read_depth,\n infos,\n formats,\n plot_info,\n custom_js_template,\n custom_js_files,\n tsv,\n threads,\n annotation_field,\n output_path,\n } => {\n let mut sample_calls = HashMap::new();\n let mut bam_paths = HashMap::new();\n if !Path::new(&output_path).exists() {\n fs::create_dir(Path::new(&output_path)).context(format!(\n 'Couldn't create output directory at {}. Please make sure the path exists.',\n output_path\n ))?;\n }\n let js_files_vec = custom_js_files\n .clone()\n .map_or_else(Vec::new, |values| values.into_iter().collect());\n let js_file_names = if let Some(files) = custom_js_files {\n files\n .iter()\n .map(|f| {\n f.split('/')\n .collect_vec()\n .pop()\n .unwrap_or_else(|| {\n panic!('Unable to extract file name from path: {:?}', f)\n })\n .to_owned()\n })\n .collect()\n } else {\n vec![]\n };\n bcf::report::embed_js(\n &output_path,\n true,\n custom_js_template.as_deref(),\n js_files_vec,\n )?;\n bcf::report::embed_css(&output_path, true)?;\n bcf::report::embed_html(&output_path)?;\n let detail_path = output_path.to_owned() + '/details/';\n fs::create_dir(Path::new(&detail_path))?;\n for vcf in vcfs {\n let v: Vec<_> = vcf.split('=').collect();\n match sample_calls.insert(v[0].to_owned(), v[1].to_owned()) {\n None => {}\n _ => panic!('Found duplicate sample name {}. Please make sure the provided sample names are unique.', v[0].to_owned())\n }\n }\n for bam in bams {\n let b: Vec<_> = bam.split('=').collect();\n let c: Vec<_> = b[0].split(':').collect();\n let rec = bam_paths.entry(c[0].to_owned()).or_insert_with(Vec::new);\n rec.push((c[1].to_owned(), b[1].to_owned()))\n }\n\n rayon::ThreadPoolBuilder::new()\n .num_threads(threads)\n .build_global()?;\n\n sample_calls.par_iter().for_each(|(sample, sample_call)| {\n bcf::report::table_report::table_report(\n sample_call,\n &fasta,\n bam_paths\n .get(sample)\n .unwrap_or_else(|| panic!('No bam provided for sample {}', sample)),\n &output_path,\n sample,\n infos.clone(),\n formats.clone(),\n max_read_depth,\n js_file_names.clone(),\n &annotation_field,\n )\n .unwrap_or_else(|e| {\n panic!('Failed building table report for sample {}. {}', sample, e)\n });\n });\n\n bcf::report::oncoprint::oncoprint(\n &sample_calls,\n &output_path,\n cells,\n tsv.as_deref(),\n plot_info,\n &annotation_field,\n )?\n }\n VcfSplit { input, output } => bcf::split::split(input, output.as_ref())?,\n CollapseReadsToFragments { cmd } => match cmd {\n cli::CollapseReadsToFragmentsSubcommand::Fastq {\n fq1,\n fq2,\n consensus_fq1,\n consensus_fq2,\n consensus_fq3,\n umi_len,\n max_seq_dist,\n max_umi_dist,\n umi_on_reverse,\n verbose_read_names,\n insert_size,\n std_dev,\n } => fastq::collapse_reads_to_fragments::call_consensus_reads_from_paths(\n fq1,\n fq2,\n consensus_fq1,\n consensus_fq2,\n consensus_fq3,\n umi_len,\n max_seq_dist,\n max_umi_dist,\n umi_on_reverse,\n verbose_read_names,\n insert_size,\n std_dev,\n )?,\n cli::CollapseReadsToFragmentsSubcommand::Bam {\n bam,\n consensus_fq1,\n consensus_fq2,\n consensus_fq_se,\n skipped_bam,\n verbose_read_names,\n } => bam::collapse_reads_to_fragments::call_consensus_reads_from_paths(\n bam,\n consensus_fq1,\n consensus_fq2,\n consensus_fq_se,\n skipped_bam,\n verbose_read_names,\n )?,\n },\n BamAnonymize {\n bam,\n input_ref,\n output_bam,\n output_ref,\n chr,\n start,\n end,\n keep_only_pairs,\n } => bam::anonymize_reads::anonymize_reads(\n bam,\n input_ref,\n output_bam,\n output_ref,\n chr,\n start - 1..end - 1,\n keep_only_pairs,\n )?,\n SequenceStats { fastq } => sequences_stats::stats(fastq)?,\n }\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/split.rs","use std::collections::HashMap;\nuse std::path::Path;\n\nuse anyhow::Context;\nuse anyhow::Result;\nuse itertools::Itertools;\nuse rust_htslib::bcf;\nuse rust_htslib::bcf::Read;\n\npub fn split>(input_bcf: P, output_bcfs: &[P]) -> Result<()> {\n let n_records = bcf::Reader::from_path(input_bcf.as_ref())\n .context('error reading input VCF/BCF')?\n .records()\n .fold(0_u64, |count, _| count + 1);\n let mut reader = bcf::Reader::from_path(input_bcf).context('error reading input VCF/BCF')?;\n let header = bcf::Header::from_template(reader.header());\n let mut bnd_cache = HashMap::new();\n\n let chunk_size = n_records / output_bcfs.len() as u64;\n\n let mut writers = output_bcfs\n .iter()\n .map(|path| {\n bcf::Writer::from_path(path, &header, false, bcf::Format::Bcf)\n .context('error creating output VCF/BCF')\n })\n .collect::>>()?;\n\n for (rec, i) in reader.records().zip(0..) {\n let rec = rec?;\n\n let mut chunk = i / (chunk_size + 1);\n if rec.is_bnd() {\n if let Some(group) = BreakendGroup::from(&rec) {\n let event_chunk = match group {\n BreakendGroup::Event(id) => bnd_cache.entry(id).or_insert(chunk),\n BreakendGroup::Mates(ids) => {\n let ids = ids.clone();\n bnd_cache.entry(ids.concat()).or_insert(chunk)\n }\n };\n chunk = *event_chunk;\n }\n };\n let writer = &mut writers[chunk as usize];\n writer.write(&rec)?;\n }\n\n Ok(())\n}\n\n#[derive(Eq, PartialEq, Hash, Clone, Debug)]\nenum BreakendGroup {\n Event(Vec),\n Mates(Vec>),\n}\n\nimpl BreakendGroup {\n fn from(rec: &bcf::Record) -> Option {\n if let Some(event) = rec.event() {\n Some(BreakendGroup::Event(event))\n } else if let Some(mut mates) = rec.mateids() {\n let id = rec.id();\n mates.push(id);\n mates.sort();\n Some(BreakendGroup::Mates(mates))\n } else {\n None\n }\n }\n}\n\ntype Id = Vec;\n\ntrait BndRecord {\n fn is_bnd(&self) -> bool;\n fn event(&self) -> Option;\n fn mateids(&self) -> Option>;\n}\n\nimpl BndRecord for bcf::Record {\n fn is_bnd(&self) -> bool {\n self.info(b'SVTYPE').string().map_or(false, |entries| {\n entries.map_or(false, |entries| entries[0] == b'BND')\n })\n }\n\n fn event(&self) -> Option {\n if let Ok(Some(event)) = self.info(b'EVENT').string() {\n Some(event[0].to_owned())\n } else {\n None\n }\n }\n\n fn mateids(&self) -> Option> {\n match self.info(b'MATEID').string() {\n Ok(Some(s)) => Some(s.clone().into_iter().map(|v| v.to_vec()).collect_vec()),\n _ => None,\n }\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/table_report/alignment_reader.rs","extern crate rust_htslib;\n\nuse self::rust_htslib::bam::FetchDefinition;\nuse crate::bcf::report::table_report::fasta_reader::read_fasta;\nuse crate::common::Region;\nuse anyhow::Result;\nuse itertools::Itertools;\nuse rust_htslib::bam::record::CigarStringView;\nuse rust_htslib::{bam, bam::Read};\nuse serde::Serialize;\nuse std::collections::HashMap;\nuse std::path::Path;\n\n#[derive(Serialize, Clone, Debug, PartialEq)]\npub enum Marker {\n A,\n T,\n C,\n G,\n N,\n Deletion,\n Insertion,\n Match,\n Pairing,\n}\n\n#[derive(Clone, Debug)]\npub struct Alignment {\n sequence: String,\n pos: i64,\n length: u16,\n flags: Vec,\n name: String,\n cigar: CigarStringView,\n paired: bool,\n mate_pos: i64,\n tid: i32,\n mate_tid: i32,\n mapq: u8,\n aux: HashMap,\n}\n\n#[derive(Serialize, Clone, Debug, PartialEq)]\npub struct AlignmentNucleobase {\n pub marker_type: Marker,\n pub bases: String,\n pub start_position: f64,\n pub end_position: f64,\n pub flags: Vec,\n pub name: String,\n pub read_start: u32,\n pub read_end: u32,\n pub mapq: u8,\n pub cigar: String,\n aux: HashMap,\n}\n\n#[derive(Serialize, Clone, Debug, PartialEq)]\npub struct AlignmentMatch {\n pub marker_type: Marker,\n pub start_position: f64,\n pub end_position: f64,\n pub flags: Vec,\n pub name: String,\n pub read_start: u32,\n pub read_end: u32,\n pub mapq: u8,\n pub cigar: String,\n aux: HashMap,\n}\n\npub fn decode_flags(code: u16) -> Vec {\n let flags_map = vec![\n 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800,\n ];\n let mut read_map = Vec::new();\n\n for flag in flags_map {\n if (flag & code) == flag {\n read_map.push(flag);\n }\n }\n\n read_map\n}\n\npub fn read_indexed_bam>(path: P, region: &Region) -> Result> {\n let mut bam = bam::IndexedReader::from_path(&path)?;\n let chrom = ®ion.target;\n let (from, to) = (region.start, region.end);\n let tid = bam.header().tid(chrom.as_bytes()).unwrap() as i32;\n\n let mut alignments: Vec = Vec::new();\n\n bam.fetch(FetchDefinition::Region(tid, from as i64, to as i64))?;\n\n for r in bam.records() {\n let rec = r?;\n let a = make_alignment(&rec);\n alignments.push(a);\n }\n\n Ok(alignments)\n}\n\nfn make_alignment(record: &bam::Record) -> Alignment {\n let has_pair = record.is_paired();\n\n let mate_pos = record.mpos();\n\n let tid = record.tid();\n let mtid = record.mtid();\n\n //Cigar String\n let cigstring = record.cigar();\n\n //Position\n let pos = record.pos();\n\n //Länge\n let le = record.seq().len() as u16;\n\n let aux: HashMap = record\n .aux_iter()\n .map(|r| r.unwrap())\n .map(|(r, v)| (String::from_utf8(r.to_owned()).unwrap(), aux_to_string(v)))\n .collect();\n\n let seq = record.seq().as_bytes();\n let sequenz = String::from_utf8(seq).unwrap();\n\n //Flags\n let flgs = record.flags();\n let flag_string = decode_flags(flgs);\n\n //Name\n let n = record.qname().to_owned();\n let name = String::from_utf8(n).unwrap();\n\n Alignment {\n sequence: sequenz,\n pos,\n length: le,\n cigar: cigstring,\n flags: flag_string,\n name,\n paired: has_pair,\n mate_pos,\n tid,\n mate_tid: mtid,\n mapq: record.mapq(),\n aux,\n }\n}\n\npub fn make_nucleobases + std::fmt::Debug>(\n fasta_path: P,\n region: &Region,\n snippets: Vec,\n) -> Result<(Vec, Vec)> {\n let mut bases: Vec = Vec::new();\n let mut matches: Vec = Vec::new();\n\n let ref_bases = read_fasta(fasta_path, region, false)?;\n let (from, to) = (region.start, region.end);\n\n for snippet in snippets {\n let mut cigar_offset: i64 = 0;\n let mut read_offset: i64 = 0;\n let base_string = snippet.sequence.clone();\n let char_vec: Vec = base_string.chars().collect();\n\n let mut soft_clip_begin = true;\n\n let temp_snippet = snippet.clone();\n\n if temp_snippet.paired\n && (temp_snippet.pos + temp_snippet.length as i64) < temp_snippet.mate_pos\n && temp_snippet.tid == temp_snippet.mate_tid\n {\n let pairing = AlignmentMatch {\n marker_type: Marker::Pairing,\n start_position: (temp_snippet.pos + temp_snippet.length as i64) as f64 - 0.5,\n end_position: temp_snippet.mate_pos as f64 - 0.5,\n flags: temp_snippet.flags.clone(),\n name: temp_snippet.name.clone(),\n read_start: temp_snippet.pos as u32,\n read_end: (temp_snippet.mate_pos + 100) as u32,\n mapq: snippet.mapq,\n cigar: snippet.cigar.to_string(),\n aux: snippet.aux.clone(),\n };\n\n matches.push(pairing);\n }\n\n for c in snippet.cigar.iter() {\n let mut match_count = 0;\n let mut match_start = 0;\n let mut match_ending = false;\n\n match c {\n rust_htslib::bam::record::Cigar::Match(c) => {\n for _i in 0..rust_htslib::bam::record::Cigar::Match(*c).len() {\n let snip = snippet.clone();\n let b = char_vec[cigar_offset as usize];\n\n if snip.pos + read_offset >= from as i64\n && snip.pos + read_offset < to as i64\n {\n let ref_index = snip.pos + read_offset - from as i64;\n let ref_base = &ref_bases[ref_index as usize];\n\n if ref_base.get_marker_type() == b {\n // Create long rule while bases match\n if match_count == 0 {\n match_start = snip.pos as i64 + read_offset;\n }\n match_count += 1;\n match_ending = true;\n\n //m = Marker::Match; // Match with reference fasta\n } else {\n let (mtch, base) = make_markers(\n snip.clone(),\n b,\n read_offset,\n match_start,\n match_count,\n );\n if let Some(m) = mtch {\n matches.push(m)\n }\n bases.push(base);\n\n match_count = 0;\n match_start = 0;\n\n match_ending = false;\n }\n }\n cigar_offset += 1;\n read_offset += 1;\n }\n\n if match_ending {\n let mtch =\n end_mismatch_detection(snippet.clone(), match_start, match_count);\n matches.push(mtch);\n }\n\n soft_clip_begin = false;\n }\n rust_htslib::bam::record::Cigar::Ins(c) => {\n let snip = snippet.clone();\n let p: f64 = snip.pos as f64 + read_offset as f64 - 0.5;\n let m: Marker = Marker::Insertion;\n let rs;\n let re;\n\n let mut b = String::from('');\n for _i in 0..rust_htslib::bam::record::Cigar::Ins(*c).len() {\n let char = char_vec[cigar_offset as usize];\n b.push(char);\n\n cigar_offset += 1;\n }\n\n if snip.paired && snip.tid == snip.mate_tid {\n if snip.pos < snip.mate_pos {\n re = snip.mate_pos + 100;\n rs = snip.pos;\n } else {\n rs = snip.mate_pos;\n re = snip.pos + snip.length as i64;\n }\n } else {\n rs = snip.pos;\n re = snip.pos + snip.length as i64;\n }\n\n let base = AlignmentNucleobase {\n marker_type: m,\n bases: b,\n start_position: p as f64 + 0.7,\n end_position: p as f64 + 1.3,\n flags: snip.flags,\n name: snip.name,\n read_start: rs as u32,\n read_end: re as u32,\n mapq: snippet.mapq,\n cigar: snippet.cigar.to_string(),\n aux: snippet.aux.clone(),\n };\n\n if from as f64 <= (base.start_position + 0.5)\n && (base.start_position + 0.5) <= to as f64\n {\n bases.push(base);\n }\n\n soft_clip_begin = false;\n }\n rust_htslib::bam::record::Cigar::Del(c) => {\n for _i in 0..rust_htslib::bam::record::Cigar::Del(*c).len() {\n let snip = snippet.clone();\n let marker = Marker::Deletion;\n let position = snip.pos as i64 + read_offset;\n let flags = snip.flags;\n let name = snip.name;\n let read_start;\n let read_end;\n let empty_bases = String::from('');\n\n if snip.paired && snip.tid == snip.mate_tid {\n if snip.pos < snip.mate_pos {\n read_end = snip.mate_pos + 100;\n read_start = snip.pos;\n } else {\n read_start = snip.mate_pos;\n read_end = snip.pos + snip.length as i64;\n }\n } else {\n read_start = snip.pos;\n read_end = snip.pos + snip.length as i64;\n }\n\n let base = AlignmentNucleobase {\n marker_type: marker,\n bases: empty_bases,\n start_position: position as f64 + 0.5,\n end_position: position as f64 + 1.5,\n flags,\n name,\n read_start: read_start as u32,\n read_end: read_end as u32,\n mapq: snippet.mapq,\n cigar: snippet.cigar.to_string(),\n aux: snippet.aux.clone(),\n };\n\n read_offset += 1;\n\n if from as f64 <= (base.start_position + 0.5)\n && (base.start_position + 0.5) <= to as f64\n {\n bases.push(base);\n }\n }\n\n soft_clip_begin = false;\n }\n rust_htslib::bam::record::Cigar::SoftClip(c) => {\n for _i in 0..rust_htslib::bam::record::Cigar::SoftClip(*c).len() {\n let snip = snippet.clone();\n let b = char_vec[cigar_offset as usize];\n\n if snip.pos + read_offset >= from as i64\n && snip.pos + read_offset < to as i64\n {\n let ref_index = snip.pos + read_offset - from as i64;\n let ref_base = &ref_bases[ref_index as usize];\n\n if ref_base.get_marker_type() == b {\n // Create long rule while bases match\n if match_count == 0 {\n match_start = snip.pos as i64 + read_offset;\n }\n match_count += 1;\n match_ending = true;\n } else {\n let (mtch, base) = make_markers(\n snip.clone(),\n b,\n read_offset,\n match_start,\n match_count,\n );\n if let Some(m) = mtch {\n matches.push(m)\n }\n bases.push(base);\n\n match_count = 0;\n match_start = 0;\n\n match_ending = false;\n }\n }\n\n cigar_offset += 1;\n if !soft_clip_begin {\n read_offset += 1;\n }\n }\n\n if match_ending {\n let mtch =\n end_mismatch_detection(snippet.clone(), match_start, match_count);\n matches.push(mtch);\n }\n\n soft_clip_begin = false;\n }\n _ => {\n soft_clip_begin = false;\n }\n }\n }\n }\n Ok((bases, matches))\n}\n\nfn make_markers(\n snip: Alignment,\n base: char,\n read_offset: i64,\n match_start: i64,\n match_count: i64,\n) -> (Option, AlignmentNucleobase) {\n let marker: Marker = match base {\n // Mismatch\n 'A' => Marker::A,\n 'T' => Marker::T,\n 'C' => Marker::C,\n 'N' => Marker::N,\n 'G' => Marker::G,\n _ => Marker::Deletion,\n };\n\n let position = snip.pos as i64 + read_offset;\n let flags = snip.flags;\n let name = snip.name;\n\n let read_start: i64;\n let read_end: i64;\n\n if snip.paired && snip.tid == snip.mate_tid {\n if snip.pos < snip.mate_pos {\n read_end = snip.mate_pos + 100;\n read_start = snip.pos;\n } else {\n read_start = snip.mate_pos;\n read_end = snip.pos + snip.length as i64;\n }\n } else {\n read_start = snip.pos;\n read_end = snip.pos + snip.length as i64;\n }\n\n let mut mtch = None;\n\n if match_count > 0 {\n // First mismatch detection must lead to new creation of all previous matches\n mtch = Some(AlignmentMatch {\n marker_type: Marker::Match,\n start_position: match_start as f64 + 0.5,\n end_position: (match_start + match_count - 1) as f64 + 1.5,\n flags: flags.clone(),\n name: name.clone(),\n read_start: read_start as u32,\n read_end: read_end as u32,\n mapq: snip.mapq,\n cigar: snip.cigar.to_string(),\n aux: snip.aux.clone(),\n });\n }\n\n let base = AlignmentNucleobase {\n marker_type: marker,\n bases: base.to_string(),\n start_position: position as f64 + 0.5,\n end_position: position as f64 + 1.5,\n flags,\n name,\n read_start: read_start as u32,\n read_end: read_end as u32,\n mapq: snip.mapq,\n cigar: snip.cigar.to_string(),\n aux: snip.aux,\n };\n (mtch, base)\n}\n\nfn end_mismatch_detection(snip: Alignment, match_start: i64, match_count: i64) -> AlignmentMatch {\n let f = snip.flags;\n let n = snip.name;\n\n let rs: i64;\n let re: i64;\n\n if snip.paired && snip.tid == snip.mate_tid {\n if snip.pos < snip.mate_pos {\n re = snip.mate_pos + 100;\n rs = snip.pos;\n } else {\n rs = snip.mate_pos;\n re = snip.pos + snip.length as i64;\n }\n } else {\n rs = snip.pos;\n re = snip.pos + snip.length as i64;\n }\n\n AlignmentMatch {\n marker_type: Marker::Match,\n start_position: match_start as f64 + 0.5,\n end_position: (match_start + match_count - 1) as f64 + 1.5,\n flags: f,\n name: n,\n read_start: rs as u32,\n read_end: re as u32,\n mapq: snip.mapq,\n cigar: snip.cigar.to_string(),\n aux: snip.aux,\n }\n}\n\nfn aux_to_string(aux: rust_htslib::bam::record::Aux) -> String {\n match aux {\n rust_htslib::bam::record::Aux::Char(c) => String::from_utf8(vec![c]).unwrap(),\n rust_htslib::bam::record::Aux::I8(i) => i.to_string(),\n rust_htslib::bam::record::Aux::U8(i) => i.to_string(),\n rust_htslib::bam::record::Aux::I16(i) => i.to_string(),\n rust_htslib::bam::record::Aux::U16(i) => i.to_string(),\n rust_htslib::bam::record::Aux::I32(i) => i.to_string(),\n rust_htslib::bam::record::Aux::U32(i) => i.to_string(),\n rust_htslib::bam::record::Aux::Float(i) => i.to_string(),\n rust_htslib::bam::record::Aux::Double(i) => i.to_string(),\n rust_htslib::bam::record::Aux::String(s) => s.to_owned(),\n rust_htslib::bam::record::Aux::HexByteArray(i) => i.to_string(),\n rust_htslib::bam::record::Aux::ArrayI8(a) => a.iter().join(','),\n rust_htslib::bam::record::Aux::ArrayU8(a) => a.iter().join(','),\n rust_htslib::bam::record::Aux::ArrayU16(a) => a.iter().join(','),\n rust_htslib::bam::record::Aux::ArrayI16(a) => a.iter().join(','),\n rust_htslib::bam::record::Aux::ArrayU32(a) => a.iter().join(','),\n rust_htslib::bam::record::Aux::ArrayI32(a) => a.iter().join(','),\n rust_htslib::bam::record::Aux::ArrayFloat(a) => a.iter().join(','),\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/table_report/mod.rs","mod alignment_reader;\npub mod create_report_table;\nmod fasta_reader;\nmod static_reader;\n\nuse crate::bcf::report::oncoprint::WriteErr;\nuse crate::bcf::report::table_report::create_report_table::make_table_report;\nuse anyhow::{Context, Result};\nuse std::fs;\nuse std::path::Path;\n\n#[allow(clippy::too_many_arguments)]\npub fn table_report(\n vcf: &str,\n fasta: &str,\n bam: &[(String, String)],\n output_path: &str,\n sample: &str,\n info_strings: Option>,\n format_strings: Option>,\n max_read_depth: u32,\n js_files: Vec,\n annotation_field: &str,\n) -> Result<()> {\n let detail_path = output_path.to_owned() + '/details/' + sample;\n fs::create_dir(Path::new(&detail_path)).context(WriteErr::CantCreateDir {\n dir_path: detail_path.to_owned(),\n })?;\n\n let plot_path = detail_path + '/plots/';\n fs::create_dir(Path::new(&plot_path)).context(WriteErr::CantCreateDir {\n dir_path: plot_path.to_owned(),\n })?;\n\n make_table_report(\n Path::new(vcf),\n Path::new(fasta),\n bam,\n info_strings,\n format_strings,\n sample.to_owned(),\n output_path,\n max_read_depth,\n js_files,\n annotation_field,\n )\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/table_report/create_report_table.rs","use crate::bcf::report::table_report::fasta_reader::{get_fasta_lengths, read_fasta};\nuse crate::bcf::report::table_report::static_reader::{get_static_reads, Variant};\nuse crate::common::Region;\nuse anyhow::anyhow;\nuse anyhow::Context as AnyhowContext;\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse itertools::Itertools;\nuse jsonm::packer::{PackOptions, Packer};\nuse log::warn;\nuse lz_str::compress_to_utf16;\nuse rust_htslib::bcf::header::{HeaderView, TagType};\nuse rust_htslib::bcf::{HeaderRecord, Read, Record};\nuse rustc_serialize::json::Json;\nuse serde::Serialize;\nuse serde_json::{json, Value};\nuse std::collections::{BTreeMap, HashMap, HashSet};\nuse std::fs::File;\nuse std::io::Write;\nuse std::path::Path;\nuse tera::{Context, Tera};\n\n#[derive(Serialize, Clone, Debug, PartialEq)]\npub enum VariantType {\n Deletion,\n Insertion,\n Duplicate,\n Inversion,\n Variant,\n}\n\n#[derive(Serialize, Clone, Debug, PartialEq)]\npub struct Report {\n id: String,\n name: String,\n position: i64,\n reference: String,\n var_type: VariantType,\n alternatives: Option,\n ann: Option>>,\n format: Option>>,\n info: Option>>,\n json_format: Option,\n json_info: Option,\n vis: BTreeMap,\n}\n\n#[allow(clippy::too_many_arguments)]\npub(crate) fn make_table_report(\n vcf_path: &Path,\n fasta_path: &Path,\n bam_sample_path: &[(String, String)],\n infos: Option>,\n formats: Option>,\n sample: String,\n output_path: &str,\n max_read_depth: u32,\n js_files: Vec,\n annotation_field: &str,\n) -> Result<()> {\n // HashMap>, Vec\n // let mut reports = HashMap::new();\n let mut ann_indices = HashMap::new();\n let mut vcf = rust_htslib::bcf::Reader::from_path(&vcf_path).unwrap();\n let header = vcf.header().clone();\n let header_records = header.header_records();\n let ann_field_description: Vec<_> = get_ann_description(header_records, annotation_field)?;\n let samples: Vec<_> = header\n .samples()\n .iter()\n .map(|s| std::str::from_utf8(s).map(|s| s.to_owned()))\n .collect::>()?;\n\n for (i, field) in ann_field_description.iter().enumerate() {\n ann_indices.insert(field, i);\n }\n\n let reference_lengths = get_fasta_lengths(fasta_path)?;\n\n for v in vcf.records() {\n let mut variant = v.unwrap();\n\n let n = header.rid2name(variant.rid().unwrap()).unwrap().to_owned();\n let i = variant.id();\n\n let chrom = String::from_utf8(n).unwrap();\n\n let id = String::from_utf8(i).unwrap();\n\n let pos = variant.pos();\n let end_pos = match variant.info(b'END').integer() {\n Ok(Some(end_pos)) => {\n // Subtraction of 0.5 because of the 0-based positioning in the whole plot\n let end_pos = end_pos[0] as f64 - 0.5; // -1 due to 0-basing, + 0.5 du to end pos\n Some(end_pos)\n }\n _ => None,\n };\n\n let (info_tags, json_info_tags) = if infos.is_some() {\n let mut info_map = HashMap::new();\n for tag in infos.clone().unwrap() {\n if tag.chars().last().unwrap().eq(&'*') {\n let prefix_tags = header\n .header_records()\n .iter()\n .filter_map(|header_record| match header_record {\n HeaderRecord::Info { key: _, values } => {\n if values['ID'].starts_with(&tag[..(tag.len() - 1)]) {\n Some(values['ID'].to_owned())\n } else {\n None\n }\n }\n _ => None,\n })\n .collect::>();\n for prefix_tag in prefix_tags {\n read_tag_entries(&mut info_map, &mut variant, &header, &prefix_tag)?;\n }\n } else {\n read_tag_entries(&mut info_map, &mut variant, &header, &tag)?;\n }\n }\n (\n Some(info_map.clone()),\n Some(serde_json::to_string(&json!(info_map))?),\n )\n } else {\n (None, None)\n };\n\n let (format_tags, json_format_tags) = if formats.is_some() {\n let mut format_map = BTreeMap::new();\n for tag in formats.clone().unwrap() {\n let (tag_type, _) = header.format_type(tag.as_bytes())?;\n match tag_type {\n TagType::String => {\n let values = variant.format(tag.as_bytes()).string()?;\n for (i, v) in values.clone().into_iter().enumerate() {\n let value = String::from_utf8(v.to_owned())?;\n let entry = format_map\n .entry(tag.to_owned())\n .or_insert_with(BTreeMap::new);\n entry.insert(samples[i].clone(), json!(value));\n }\n }\n TagType::Float => {\n let values = variant.format(tag.as_bytes()).float()?;\n for (i, v) in values.iter().enumerate() {\n let value = v.to_vec();\n let entry = format_map\n .entry(tag.to_owned())\n .or_insert_with(BTreeMap::new);\n entry.insert(samples[i].clone(), json!(value));\n }\n }\n TagType::Integer => {\n let values = variant.format(tag.as_bytes()).integer()?;\n for (i, v) in values.iter().enumerate() {\n let value = v.to_vec();\n let entry = format_map\n .entry(tag.to_owned())\n .or_insert_with(BTreeMap::new);\n entry.insert(samples[i].clone(), json!(value));\n }\n }\n _ => {}\n }\n }\n (\n Some(format_map.clone()),\n Some(serde_json::to_string(&json!(format_map))?),\n )\n } else {\n (None, None)\n };\n\n let alleles: Vec<_> = variant\n .alleles()\n .into_iter()\n .map(|allele| allele.to_owned())\n .collect();\n\n let mut annotations = Vec::new();\n\n let mut alterations = Vec::new();\n let mut hgvsgs = Vec::new();\n\n if let Some(ann) = variant.info(annotation_field.as_bytes()).string()? {\n for entry in ann.iter() {\n let fields = entry.split(|c| *c == b'|').collect_vec();\n\n let get_field = |field: &str| {\n let field: String = std::str::from_utf8(\n fields[*ann_indices\n .get(&field.to_owned())\n .context({\n format!(\n 'No field named {} found. Please only use VEP-annotated VCF-files.',\n field\n )\n })\n .unwrap()],\n ).unwrap().to_string();\n field\n };\n\n let alteration = if !get_field('HGVSp').is_empty() {\n get_field('HGVSp')\n } else if !get_field('HGVSg').is_empty() {\n get_field('HGVSg')\n } else {\n continue;\n };\n\n let allele = if !get_field('Allele').is_empty() {\n get_field('Allele')\n } else {\n continue;\n };\n\n if !get_field('HGVSg').is_empty() {\n hgvsgs.push((allele, get_field('HGVSg')));\n }\n\n alterations.push(alteration);\n\n let mut ann_strings = Vec::new();\n for f in fields {\n let attr = String::from_utf8(f.to_owned())?;\n ann_strings.push(attr);\n }\n annotations.push(ann_strings);\n }\n }\n\n if !alleles.is_empty() {\n let ref_vec = alleles[0].to_owned();\n let ref_allele = String::from_utf8(ref_vec)?;\n\n let len: u8 = ref_allele.len() as u8;\n\n for allel in alleles.iter().skip(1) {\n let alt = allel.as_slice();\n let var_string = String::from('Variant');\n let var_type: VariantType;\n let alternatives: Option;\n let end_position: f64;\n let plot_start_position;\n\n let hgvsg: String = if alleles.len() > 2 {\n if let Some(hgvsg) = hgvsgs\n .iter()\n .find(|(a, _)| *a == std::str::from_utf8(allel).unwrap())\n {\n hgvsg.0.to_owned()\n } else {\n warn!('Found variant {} at position {}:{} without HGVSg field for every given allele.', &id, &chrom, &pos + 1);\n continue;\n }\n } else {\n let mut unique_hgsvgs = hgvsgs.iter().map(|(_, b)| b).unique().collect_vec();\n if unique_hgsvgs.len() > 1 {\n warn!('Found variant {} at position {}:{} with multiple HGVSg values and only one alternative allele.', &id, &chrom, &pos + 1);\n }\n if let Some(hgvsg) = unique_hgsvgs.pop() {\n hgvsg.to_owned()\n } else {\n warn!(\n 'Found variant {} at position {}:{} with no HGVSg value.',\n &id,\n &chrom,\n &pos + 1\n );\n continue;\n }\n };\n\n match alt {\n b'' => {\n var_type = VariantType::Deletion;\n alternatives = None;\n end_position = end_pos.unwrap();\n plot_start_position = pos as f64 - 0.5;\n }\n b'' => {\n var_type = VariantType::Inversion;\n let rev: String = ref_allele.chars().rev().collect();\n alternatives = Some(rev.clone());\n end_position = end_pos.unwrap();\n plot_start_position = pos as f64 - 0.5;\n }\n b'' => {\n var_type = VariantType::Duplicate;\n let dup: String = [ref_allele.clone(), ref_allele.clone()].concat();\n alternatives = Some(dup.clone());\n end_position = end_pos.unwrap();\n plot_start_position = pos as f64 - 0.5;\n }\n _ => {\n let mut alt_allele = String::from('');\n\n for c in alt {\n if *c as char != '<' && *c as char != '>' {\n alt_allele.push(*c as char);\n }\n }\n\n match alt_allele.len() {\n a if a < ref_allele.len() => {\n plot_start_position = pos as f64 + 0.5; // start position + 1 due to alignment with deletions from bam (example: ref: ACTT alt: A -> deletion is just CTT)\n end_position = pos as f64 - 0.5 + len as f64;\n var_type = VariantType::Deletion;\n alternatives = Some(alt_allele.clone());\n }\n a if a > ref_allele.len() => {\n plot_start_position = pos as f64;\n end_position = pos as f64 + len as f64;\n var_type = VariantType::Insertion;\n alternatives = Some(alt_allele.clone());\n }\n _ => {\n plot_start_position = pos as f64 - 0.5;\n end_position = pos as f64 - 0.5 + len as f64;\n var_type = VariantType::Variant;\n alternatives = Some(alt_allele.clone());\n }\n }\n }\n }\n\n let var = Variant {\n marker_type: var_string,\n reference: ref_allele.clone(),\n alternatives,\n start_position: plot_start_position + 1.0,\n end_position: end_position + 1.0,\n row: -1,\n var_type,\n };\n\n let mut visualizations = BTreeMap::new();\n\n for (sample, bam) in bam_sample_path {\n let bam_path = Path::new(bam);\n let fasta_length = *reference_lengths.get(&chrom).unwrap();\n let visualization: String;\n if pos < 75 {\n let content = create_report_data(\n fasta_path,\n Some(var.clone()),\n bam_path,\n &Region {\n target: chrom.clone(),\n start: 0,\n end: end_position as u64 + 75,\n },\n max_read_depth,\n )?;\n visualization = manipulate_json(content, 0, end_position as u64 + 75)?;\n } else if end_position as i64 + 75 >= fasta_length as i64 {\n let content = create_report_data(\n fasta_path,\n Some(var.clone()),\n bam_path,\n &Region {\n target: chrom.clone(),\n start: pos as u64 - 75,\n end: fasta_length - 1,\n },\n max_read_depth,\n )?;\n visualization =\n manipulate_json(content, pos as u64 - 75, fasta_length - 1)?;\n } else {\n let content = create_report_data(\n fasta_path,\n Some(var.clone()),\n bam_path,\n &Region {\n target: chrom.clone(),\n start: pos as u64 - 75,\n end: end_position as u64 + 75,\n },\n max_read_depth,\n )?;\n visualization =\n manipulate_json(content, pos as u64 - 75, end_position as u64 + 75)?;\n }\n\n visualizations.insert(sample.to_owned(), visualization.to_string());\n }\n\n let report_data = Report {\n id: id.clone(),\n name: chrom.clone(),\n position: pos + 1,\n reference: ref_allele.clone(),\n var_type: var.var_type,\n alternatives: var.alternatives,\n ann: Some(annotations.clone()),\n format: format_tags.clone(),\n info: info_tags.clone(),\n json_format: json_format_tags.clone(),\n json_info: json_info_tags.clone(),\n vis: visualizations,\n };\n\n let escaped_hgvsg = escape_hgvsg(&hgvsg);\n let detail_path = Path::new(&output_path)\n .join(Path::new('details'))\n .join(Path::new(sample.as_str()));\n let local: DateTime = Local::now();\n\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'table_report.html.tera',\n include_str!('report_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('ann_description', &json!(ann_field_description).to_string());\n context.insert('variant', &report_data);\n context.insert('hgvsg', &hgvsg);\n context.insert('escaped_hgvsg', &escaped_hgvsg);\n context.insert('description', &ann_field_description);\n context.insert('sample', &sample);\n context.insert('js_imports', &js_files);\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n\n let html = templates.render('table_report.html.tera', &context)?;\n let filepath = detail_path.join(Path::new(&escaped_hgvsg).with_extension('html'));\n let mut file = File::create(filepath)?;\n file.write_all(html.as_bytes())?;\n\n let mut templates = Tera::default();\n templates.add_raw_template('plot.js.tera', include_str!('plot.js.tera'))?;\n\n let plot_path = detail_path\n .join(Path::new('plots'))\n .join(Path::new(&escaped_hgvsg).with_extension('js'));\n\n let mut plot_context = Context::new();\n plot_context.insert('variant', &report_data);\n let plot_html = templates.render('plot.js.tera', &plot_context)?;\n let mut plot_file = File::create(plot_path)?;\n plot_file.write_all(plot_html.as_bytes())?;\n }\n }\n }\n Ok(())\n}\n\nfn escape_hgvsg(hgvsg: &str) -> String {\n hgvsg.replace('.', '_').replace('>', '_').replace(':', '_')\n}\n\npub(crate) fn get_ann_description(\n header_records: Vec,\n annotation_field: &str,\n) -> Result> {\n for rec in header_records {\n if let rust_htslib::bcf::HeaderRecord::Info { key: _, values } = rec {\n if values.get('ID').unwrap() == annotation_field {\n let description = values.get('Description').unwrap();\n let fields: Vec<_> = description.split('|').collect();\n let mut owned_fields = Vec::new();\n for mut entry in fields {\n entry = entry.trim();\n entry = entry.trim_start_matches(\n &'\'Consequence annotations from Ensembl VEP. Format: ',\n );\n entry = entry.trim_end_matches(&'\'');\n entry = entry.trim();\n owned_fields.push(entry.to_owned());\n }\n return Ok(owned_fields);\n }\n }\n }\n Err(anyhow!(\n 'Could not find any annotations by VEP. Please only use VEP-annotated VCF-files.'\n ))\n}\n\npub(crate) fn read_tag_entries(\n info_map: &mut HashMap>,\n variant: &mut Record,\n header: &HeaderView,\n tag: &str,\n) -> Result<()> {\n let (tag_type, _) = header.info_type(tag.as_bytes())?;\n match tag_type {\n TagType::String => {\n if let Some(values) = variant.info(tag.as_bytes()).string()? {\n for v in values.iter() {\n let value = String::from_utf8(Vec::from(v.to_owned()))?;\n let entry = info_map.entry(tag.to_owned()).or_insert_with(Vec::new);\n entry.push(json!(value));\n }\n }\n }\n TagType::Float => {\n if let Some(values) = variant.info(tag.as_bytes()).float()? {\n for v in values.iter() {\n let entry = info_map.entry(tag.to_owned()).or_insert_with(Vec::new);\n entry.push(json!(v));\n }\n }\n }\n TagType::Integer => {\n if let Some(values) = variant.info(tag.as_bytes()).integer()? {\n for v in values.iter() {\n let entry = info_map.entry(tag.to_owned()).or_insert_with(Vec::new);\n entry.push(json!(v));\n }\n }\n }\n _ => {}\n }\n Ok(())\n}\n\npub(crate) fn create_report_data + std::fmt::Debug>(\n fasta_path: P,\n variant: Option,\n bam_path: P,\n region: &Region,\n max_read_depth: u32,\n) -> Result {\n let mut data = Vec::new();\n\n for f in read_fasta(&fasta_path, region, true)? {\n let nucleobase = json!(f);\n data.push(nucleobase);\n }\n\n let (bases, matches) = get_static_reads(\n bam_path,\n fasta_path,\n region,\n max_read_depth,\n variant.as_ref(),\n )?;\n\n for b in bases {\n let base = json!(b);\n data.push(base);\n }\n\n for m in matches {\n let mat = json!(m);\n data.push(mat);\n }\n\n if variant.is_some() {\n data.push(json!(variant));\n }\n\n Ok(Json::from_str(&json!(data).to_string()).unwrap())\n}\n\n/// Inserts the json containing the genome data into the vega specs.\n/// It also changes keys and values of the json data for the vega plot to look better and compresses the json with jsonm.\npub(crate) fn manipulate_json(data: Json, from: u64, to: u64) -> Result {\n let json_string = include_str!('vegaSpecs.json');\n\n let mut vega_specs: Value = serde_json::from_str(json_string)?;\n let values: Value = serde_json::from_str(&data.to_string())?;\n let mut values = json!({'values': values, 'name': 'fasta'});\n\n let v = values['values'].as_array().unwrap().clone();\n\n let mut rows = HashSet::new();\n\n for (i, _) in v.iter().enumerate() {\n let k = v[i]['marker_type'].clone().as_str().unwrap().to_owned();\n let r = v[i]['row'].clone().as_i64().unwrap();\n rows.insert(r);\n\n if k == 'A' || k == 'T' || k == 'G' || k == 'C' || k == 'U' || k == 'N' {\n values['values'][i]['base'] = values['values'][i]['marker_type'].clone();\n } else if k == 'Deletion'\n || k == 'Match'\n || k == 'Pairing'\n || k == 'Duplicate'\n || k == 'Inversion'\n {\n values['values'][i]['typ'] = values['values'][i]['marker_type'].clone();\n } else if k == 'Insertion' {\n values['values'][i]['typ'] = values['values'][i]['marker_type'].clone();\n values['values'][i]['inserts'] = values['values'][i]['bases'].clone();\n }\n }\n\n vega_specs['width'] = json!(700);\n vega_specs['height'] = json!(core::cmp::max(10 * rows.len() + 60, 203));\n let domain = json!([from, to]);\n\n vega_specs['scales'][0]['domain'] = domain;\n vega_specs['data'][1] = values;\n\n let mut packer = Packer::new();\n packer.set_max_dict_size(100000);\n let options = PackOptions::new();\n let packed_specs = packer.pack(&vega_specs, &options)?;\n\n Ok(json!(compress_to_utf16(&packed_specs.to_string())).to_string())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/table_report/fasta_reader.rs","use crate::common::Region;\nuse anyhow::Context;\nuse anyhow::Result;\nuse bio::io::fasta;\nuse itertools::Itertools;\nuse serde::Serialize;\nuse std::collections::HashMap;\nuse std::path::Path;\n\npub fn read_fasta + std::fmt::Debug>(\n path: P,\n region: &Region,\n compensate_0_basing: bool,\n) -> Result> {\n let mut reader = fasta::IndexedReader::from_file(&path).unwrap();\n let index =\n fasta::Index::with_fasta_file(&path).context('error reading index file of input FASTA')?;\n let _sequences = index.sequences();\n\n let mut seq: Vec = Vec::new();\n\n reader.fetch(®ion.target, region.start, region.end)?;\n reader.read(&mut seq)?;\n\n let mut fasta = Vec::new();\n let mut ind = region.start;\n if compensate_0_basing {\n ind += 1;\n }\n for a in seq {\n let base = char::from(a);\n let marker = base.to_uppercase().collect_vec().pop().unwrap();\n let b = Nucleobase {\n position: ind,\n marker_type: marker,\n row: 0,\n repeat: base.is_lowercase(),\n };\n fasta.push(b);\n ind += 1;\n }\n\n Ok(fasta)\n}\n\npub fn get_fasta_lengths(path: &Path) -> Result> {\n let index = fasta::Index::with_fasta_file(&path).context('error reading input FASTA')?;\n let sequences = index.sequences();\n Ok(sequences\n .iter()\n .map(|s| (s.name.to_owned(), s.len))\n .collect())\n}\n\n#[derive(Serialize, Clone, Debug, PartialEq)]\npub struct Nucleobase {\n position: u64,\n marker_type: char,\n row: u8,\n repeat: bool,\n}\n\nimpl Nucleobase {\n pub fn get_marker_type(&self) -> char {\n self.marker_type\n }\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/table_report/static_reader.rs","use crate::bcf::report::table_report::alignment_reader::{\n make_nucleobases, read_indexed_bam, AlignmentMatch, AlignmentNucleobase,\n};\nuse crate::bcf::report::table_report::create_report_table::VariantType;\nuse crate::common::Region;\nuse anyhow::Result;\nuse rand::rngs::StdRng;\nuse rand::seq::IteratorRandom;\nuse rand_core::SeedableRng;\nuse serde::Serialize;\nuse std::collections::{BTreeMap, HashSet};\nuse std::path::Path;\n\n#[derive(Serialize, Clone, Debug)]\npub struct StaticAlignmentMatch {\n #[serde(flatten)]\n alignment: AlignmentMatch,\n row: u16,\n}\n\n#[derive(Serialize, Clone)]\npub struct StaticAlignmentNucleobase {\n #[serde(flatten)]\n nucleobase: AlignmentNucleobase,\n row: u16,\n}\n\n#[derive(Serialize, Clone)]\npub struct Variant {\n pub(crate) marker_type: String,\n pub(crate) reference: String,\n pub(crate) alternatives: Option,\n pub(crate) start_position: f64,\n pub(crate) end_position: f64,\n pub(crate) row: i8,\n pub(crate) var_type: VariantType,\n}\n\nfn calc_rows(\n reads: Vec,\n matches: Vec,\n max_read_depth: u32,\n variant: Option<&Variant>,\n) -> (Vec, Vec) {\n let mut row_ends = vec![0; 10000];\n\n let mut read_names: BTreeMap = BTreeMap::new();\n\n let mut reads_wr: Vec = Vec::new();\n let mut matches_wr: Vec = Vec::new();\n\n let mut max_row = 0;\n\n for r in matches {\n let overlaps = if let Some(variant_entry) = variant {\n r.read_start < variant_entry.start_position as u32\n && r.read_end > variant_entry.end_position as u32\n } else {\n true\n };\n\n if overlaps {\n let mut row: u16 = 0;\n\n if read_names.contains_key(&r.name) {\n row = *read_names.get(&r.name).unwrap();\n } else {\n for (i, _) in row_ends.iter().enumerate().take(10000).skip(1) {\n if r.read_start > row_ends[i] {\n if i > max_row {\n max_row = i;\n }\n row = i as u16;\n row_ends[i] = r.read_end;\n read_names.insert(r.name.clone(), i as u16);\n break;\n }\n }\n }\n\n let base = StaticAlignmentMatch {\n alignment: r.clone(),\n row,\n };\n\n matches_wr.push(base);\n }\n }\n\n for r in reads {\n let overlaps = if let Some(variant_entry) = variant {\n r.read_start < variant_entry.start_position as u32\n && r.read_end > variant_entry.end_position as u32\n } else {\n true\n };\n\n if overlaps {\n let mut row: u16 = 0;\n\n if read_names.contains_key(&r.name) {\n row = *read_names.get(&r.name).unwrap();\n } else {\n for (i, _) in row_ends.iter().enumerate().take(10000).skip(1) {\n if r.read_start > row_ends[i] {\n if i > max_row {\n max_row = i;\n }\n row = i as u16;\n row_ends[i] = r.read_end;\n read_names.insert(r.name.clone(), i as u16);\n break;\n }\n }\n }\n\n let base = StaticAlignmentNucleobase {\n nucleobase: r.clone(),\n row,\n };\n\n reads_wr.push(base);\n }\n }\n\n if max_row > max_read_depth as usize {\n let mut rng = StdRng::seed_from_u64(42);\n let random_rows: HashSet<_> = (0..max_row as u32)\n .choose_multiple(&mut rng, max_read_depth as usize)\n .into_iter()\n .collect();\n reads_wr = reads_wr\n .into_iter()\n .filter(|b| random_rows.contains(&(b.row as u32)))\n .collect();\n matches_wr = matches_wr\n .into_iter()\n .filter(|b| random_rows.contains(&(b.row as u32)))\n .collect();\n }\n\n (reads_wr, matches_wr)\n}\n\npub fn get_static_reads + std::fmt::Debug>(\n path: P,\n fasta_path: P,\n region: &Region,\n max_read_depth: u32,\n variant: Option<&Variant>,\n) -> Result<(Vec, Vec)> {\n let alignments = read_indexed_bam(path, region)?;\n let (msm, m) = make_nucleobases(fasta_path, region, alignments)?;\n Ok(calc_rows(msm, m, max_read_depth, variant))\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/oncoprint.rs","use std::collections::HashMap;\nuse std::io::Write;\nuse std::{fs, str};\n\nuse derive_new::new;\nuse itertools::Itertools;\nuse lazy_static::lazy_static;\nuse regex::Regex;\nuse serde_derive::Serialize;\nuse tera::{self, Context, Tera};\n\nuse crate::bcf::report::table_report::create_report_table::get_ann_description;\nuse crate::bcf::report::table_report::create_report_table::read_tag_entries;\nuse anyhow::Context as AnyhowContext;\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse core::cmp::max;\nuse jsonm::packer::{PackOptions, Packer};\nuse log::warn;\nuse lz_str::compress_to_utf16;\nuse rust_htslib::bcf::{self, Read};\nuse serde_json::{json, Value};\nuse std::fs::File;\nuse std::path::Path;\nuse std::str::FromStr;\nuse thiserror::Error;\n\nlazy_static! {\n static ref HGVSP_PROTEIN_RE: Regex = Regex::new(r'ENSP[0-9]+(\.[0-9]+)?:').unwrap();\n}\n\npub fn oncoprint(\n sample_calls: &HashMap,\n output_path: &str,\n max_cells: u32,\n tsv_data_path: Option<&str>,\n plot_info: Option>,\n annotation_field: &str,\n) -> Result<()> {\n let mut data = HashMap::new();\n let mut gene_data = HashMap::new();\n let mut impact_data = Vec::new();\n let mut gene_impact_data = HashMap::new();\n let mut existing_var_data = Vec::new();\n let mut gene_existing_var_data = HashMap::new();\n let mut consequence_data = Vec::new();\n let mut gene_consequence_data = HashMap::new();\n let mut clin_sig_data = Vec::new();\n let mut gene_clin_sig_data = HashMap::new();\n let mut af_data = Vec::new();\n let mut gene_af_data = HashMap::new();\n let mut unique_genes = HashMap::new();\n let mut plot_info_data = HashMap::new();\n let mut gene_plot_info_data = HashMap::new();\n let mut remove_existing_variation = true;\n\n let tsv_data = if let Some(tsv) = tsv_data_path {\n Some(make_tsv_records(tsv.to_owned())?)\n } else {\n None\n };\n\n // Check every VCF file for presence of CLIN_SIG\n let mut clin_sig_present = HashMap::new();\n for (sample, path) in sample_calls.iter().sorted() {\n let bcf_reader = bcf::Reader::from_path(path)?;\n let header_records = bcf_reader.header().header_records();\n let ann_fields: Vec<_> = get_ann_description(header_records, annotation_field)?;\n clin_sig_present.insert(\n sample.to_owned(),\n ann_fields.contains(&'CLIN_SIG'.to_owned()),\n );\n }\n\n // Check wether any of the VCF files contain CLIN_SIG at all\n let cs_present_folded = clin_sig_present.iter().fold(false, |b, (_, c)| b || *c);\n\n for (sample, path) in sample_calls.iter().sorted() {\n let mut genes = HashMap::new();\n let mut impacts = HashMap::new();\n let mut gene_impacts = HashMap::new();\n let mut existing_variations = HashMap::new();\n let mut gene_existing_variations = HashMap::new();\n let mut consequences = HashMap::new();\n let mut gene_consequences = HashMap::new();\n let mut clin_sigs = HashMap::new();\n let mut gene_clin_sigs = HashMap::new();\n let mut ann_indices = HashMap::new();\n let mut pi_data = HashMap::new();\n let mut gene_pi_data = HashMap::new();\n\n let mut bcf_reader = bcf::Reader::from_path(path)?;\n let header = bcf_reader.header().clone();\n let mut sample_names = Vec::new();\n for s in header.samples() {\n sample_names.push(String::from_utf8(s.to_owned())?);\n }\n let header_records = header.header_records();\n let ann_fields: Vec<_> = get_ann_description(header_records, annotation_field)?;\n\n for (i, field) in ann_fields.iter().enumerate() {\n ann_indices.insert(field, i);\n }\n\n let clin_sig_pres = clin_sig_present.get(sample).unwrap();\n\n for res in bcf_reader.records() {\n let mut gene_data_per_record = HashMap::new();\n let mut record = res?;\n let pos = record.pos();\n let alleles = record\n .alleles()\n .into_iter()\n .map(|allele| allele.to_owned())\n .collect_vec();\n let alt_alleles = &alleles[1..];\n let ref_allele = alleles[0].to_owned();\n\n let mut info_map = HashMap::new();\n if plot_info.is_some() {\n for tag in &plot_info.clone().unwrap() {\n read_tag_entries(&mut info_map, &mut record, &header, tag)?;\n }\n }\n\n let allele_frequencies = record\n .format(b'AF')\n .float()?\n .iter()\n .map(|s| s.to_vec())\n .collect_vec();\n\n let ann = record.info(annotation_field.as_bytes()).string()?;\n if let Some(ann) = ann {\n for alt_allele in alt_alleles {\n let variant = if alt_allele == b'' {\n 'BND'\n } else if alt_allele == b'' {\n 'DUP'\n } else if alt_allele == b''\n || alt_allele.len() == 1 && ref_allele.len() > 1\n {\n 'DEL'\n } else if alt_allele == b''\n || alt_allele.len() > 1 && ref_allele.len() == 1\n {\n 'INS'\n } else if alt_allele.len() == 1 && ref_allele.len() == 1 {\n 'SNV'\n } else if alt_allele == b'' {\n 'INV'\n } else if alt_allele.len() == ref_allele.len() {\n 'MNV'\n } else {\n 'Replacement'\n };\n\n for entry in ann.iter() {\n let fields: Vec<_> = entry.split(|c| *c == b'|').collect();\n\n let get_field = |field: &str| {\n str::from_utf8(\n fields[*ann_indices.get(&field.to_owned()).unwrap_or_else(|| panic!('No field named {} found. Please only use VEP-annotated VCF-files.', field))],\n )\n };\n\n let mut impact = get_field('IMPACT')?;\n let clin_sig = if *clin_sig_pres {\n get_field('CLIN_SIG')?\n } else {\n ''\n };\n let gene = if !get_field('SYMBOL')?.is_empty() {\n get_field('SYMBOL')?\n } else if !get_field('Gene')?.is_empty() {\n get_field('Gene')?\n } else if !get_field('Feature')?.is_empty() {\n get_field('Feature')?\n } else if !get_field('HGVSg')?.is_empty() {\n get_field('HGVSg')?\n } else {\n continue;\n };\n let dna_alteration = get_field('HGVSg')?;\n let canonical =\n if let Some(index) = ann_indices.get(&'CANONICAL'.to_owned()) {\n str::from_utf8(fields[*index])? == 'YES'\n } else {\n false\n };\n let protein_alteration = get_field('HGVSp')?;\n let consequence = get_field('Consequence')?;\n let existing_var = get_field('Existing_variation')?;\n\n let gene_rec = unique_genes.entry(gene.to_owned()).or_insert_with(Vec::new);\n gene_rec.push(sample.to_owned());\n\n let rec = genes\n .entry(gene.to_owned())\n .or_insert_with(|| Record::new(sample.to_owned(), gene.to_owned()));\n\n // Data for second stage including whether the record is marked as canonical or not.\n let gene_entry = gene_data_per_record\n .entry(gene.to_owned())\n .or_insert_with(&Vec::new);\n gene_entry.push((\n SecondStageRecord {\n sample: rec.sample.clone(),\n alteration: if protein_alteration.is_empty() {\n dna_alteration.to_owned()\n } else {\n protein_alteration.to_owned()\n },\n variant: variant.to_owned(),\n dna_alt: dna_alteration.to_owned(),\n },\n canonical,\n &pos,\n ));\n\n rec.variants.push(variant.to_owned());\n\n let imp_rec = impacts.entry(gene.to_owned()).or_insert_with(Vec::new);\n if impact.is_empty() {\n impact = 'unknown';\n }\n imp_rec.push(BarPlotRecord::new(gene.to_owned(), impact.to_owned()));\n\n let ev_rec = existing_variations\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n let gene_ev_rec = gene_existing_variations\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n\n let alt = if protein_alteration.is_empty() {\n dna_alteration\n } else {\n protein_alteration\n };\n\n if plot_info.is_some() {\n for key in plot_info.clone().unwrap() {\n let info = info_map.get(&key.clone());\n if info.is_none() {\n let e = pi_data.entry(key.clone()).or_insert_with(HashMap::new);\n let rec = e.entry(gene.to_owned()).or_insert_with(Vec::new);\n rec.push(BarPlotRecord::new(\n gene.to_owned(),\n 'unknown'.to_string(),\n ));\n let e2 = gene_pi_data\n .entry(key.clone())\n .or_insert_with(HashMap::new);\n let rec2 = e2.entry(gene.to_owned()).or_insert_with(Vec::new);\n rec2.push(BarPlotRecord::new(\n alt.to_owned(),\n 'unknown'.to_string(),\n ));\n } else {\n for val in info.unwrap() {\n let value = if val == &json!('') || val == &json!('.') {\n 'unknown'.to_string()\n } else {\n val.to_string().trim_matches('\'').to_owned()\n };\n let e =\n pi_data.entry(key.clone()).or_insert_with(HashMap::new);\n let rec = e.entry(gene.to_owned()).or_insert_with(Vec::new);\n rec.push(BarPlotRecord::new(\n gene.to_owned(),\n value.clone(),\n ));\n let e2 = gene_pi_data\n .entry(key.clone())\n .or_insert_with(HashMap::new);\n let rec2 =\n e2.entry(gene.to_owned()).or_insert_with(Vec::new);\n rec2.push(BarPlotRecord::new(\n alt.to_owned(),\n value.clone(),\n ));\n }\n }\n }\n }\n\n let split_ev = existing_var.split('&').collect_vec();\n for ex_var in split_ev {\n let mut ev: String =\n ex_var.chars().filter(|c| !c.is_digit(10)).collect();\n if ev.is_empty() {\n ev = String::from('unknown');\n } else {\n remove_existing_variation = false;\n }\n ev_rec.push(BarPlotRecord::new(gene.to_owned(), ev.clone()));\n gene_ev_rec.push(BarPlotRecord::new(alt.to_owned(), ev));\n }\n\n let gene_imp_rec =\n gene_impacts.entry(gene.to_owned()).or_insert_with(Vec::new);\n gene_imp_rec.push(BarPlotRecord::new(alt.to_owned(), impact.to_owned()));\n\n let split_consequences: Vec<_> = consequence.split('&').collect();\n\n let cons_rec = consequences.entry(gene.to_owned()).or_insert_with(Vec::new);\n\n let gene_cons_rec = gene_consequences\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n\n for mut c in split_consequences {\n if c.is_empty() {\n c = 'unknown';\n }\n cons_rec.push(BarPlotRecord::new(gene.to_owned(), c.to_owned()));\n gene_cons_rec.push(BarPlotRecord::new(alt.to_owned(), c.to_owned()));\n }\n\n let gene_clin_sig_rec = gene_clin_sigs\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n\n let clin_rec = clin_sigs.entry(gene.to_owned()).or_insert_with(Vec::new);\n let sigs: Vec<_> = clin_sig.split('&').collect();\n for mut s in sigs {\n if s.is_empty() {\n s = 'unknown';\n }\n s = s.trim_matches('_');\n clin_rec.push(BarPlotRecord::new(gene.to_owned(), s.to_owned()));\n gene_clin_sig_rec\n .push(BarPlotRecord::new(alt.to_owned(), s.to_owned()));\n }\n\n for (i, name) in sample_names.iter().enumerate() {\n for frequency in &allele_frequencies[i] {\n let af = AlleleFrequency {\n sample: sample.to_owned() + ':' + name,\n key: gene.to_owned(),\n allele_frequency: *frequency,\n };\n\n af_data.push(af);\n\n let gene_af = AlleleFrequency {\n sample: sample.to_owned() + ':' + name,\n key: alt.to_owned(),\n allele_frequency: *frequency,\n };\n\n let f =\n gene_af_data.entry(gene.to_owned()).or_insert_with(Vec::new);\n f.push(gene_af);\n }\n }\n }\n }\n }\n // Filter records marked with canonical. Keep all if no one is marked.\n for (k, record_tuples) in &gene_data_per_record {\n let rec = gene_data.entry(k.to_owned()).or_insert_with(&Vec::new);\n let filter_canonical = record_tuples\n .iter()\n .filter(|(_, canonical, _)| *canonical)\n .collect_vec();\n match filter_canonical.len() {\n 0 => {\n rec.extend(record_tuples.iter().map(|(r, _, _)| r.clone()));\n }\n 1 => rec.extend(filter_canonical.iter().map(|(r, _, _)| r.clone())),\n _ => {\n rec.extend(filter_canonical.iter().map(|(r, _, _)| r.clone()));\n let alterations = filter_canonical\n .iter()\n .map(|(r, _, _)| r.alteration.clone())\n .collect_vec();\n let positions = filter_canonical.iter().map(|(_, _, p)| p).collect_vec();\n warn!('Found more than one transcript in gene {} annotated as canonical! The corresponding alterations are {:?}, located at {:?}', &k, &alterations, &positions);\n }\n }\n }\n }\n\n for gene in genes.keys().sorted() {\n let record = genes.get(gene).unwrap();\n // data for first stage\n let entry = data.entry(gene.to_owned()).or_insert_with(&Vec::new);\n entry.push(FinalRecord::from(record));\n\n // data for first stage impact\n let impact = impacts.get(gene).unwrap();\n let final_impacts = make_final_bar_plot_records(impact);\n impact_data.push(final_impacts);\n\n let ex_var = existing_variations.get(gene).unwrap();\n let final_evs = make_final_bar_plot_records(ex_var);\n existing_var_data.push(final_evs);\n\n for (tag, map) in pi_data.clone() {\n if let Some(t_data) = map.get(gene) {\n let final_data = make_final_bar_plot_records(t_data);\n let e = plot_info_data.entry(tag).or_insert_with(Vec::new);\n e.push(final_data);\n }\n }\n\n let consequence = consequences.get(gene).unwrap();\n let final_consequences = make_final_bar_plot_records(consequence);\n consequence_data.push(final_consequences);\n\n // data for first stage clin_sig\n let cls = clin_sigs.get(gene).unwrap();\n let final_clin_sig = make_final_bar_plot_records(cls);\n clin_sig_data.push(final_clin_sig);\n\n // data for second stage impact\n let gene_impact = gene_impacts.get(gene).unwrap();\n let final_gene_impacts = make_final_bar_plot_records(gene_impact);\n let e = gene_impact_data\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n e.push(final_gene_impacts);\n\n for (tag, map) in gene_pi_data.clone() {\n if let Some(t_data) = map.get(gene) {\n let final_data = make_final_bar_plot_records(t_data);\n let m = gene_plot_info_data.entry(tag).or_insert_with(HashMap::new);\n let e = m.entry(gene.to_owned()).or_insert_with(Vec::new);\n e.push(final_data);\n }\n }\n\n let gene_evs = gene_existing_variations.get(gene).unwrap();\n let final_gene_evs = make_final_bar_plot_records(gene_evs);\n let v = gene_existing_var_data\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n v.push(final_gene_evs);\n\n // data for second stage consequence\n let gene_consequence = gene_consequences.get(gene).unwrap();\n let final_gene_consequences = make_final_bar_plot_records(gene_consequence);\n let g = gene_consequence_data\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n g.push(final_gene_consequences);\n\n // data for second stage clin_sig\n let gene_clin_sig = gene_clin_sigs.get(gene).unwrap();\n let final_gene_clin_sigs = make_final_bar_plot_records(gene_clin_sig);\n let f = gene_clin_sig_data\n .entry(gene.to_owned())\n .or_insert_with(Vec::new);\n f.push(final_gene_clin_sigs);\n }\n }\n\n let gene_path = output_path.to_owned() + '/genes/';\n fs::create_dir(Path::new(&gene_path)).context(WriteErr::CantCreateDir {\n dir_path: gene_path.to_owned(),\n })?;\n\n let gene_plots_path = output_path.to_owned() + '/genes/plots/';\n fs::create_dir(Path::new(&gene_plots_path)).context(WriteErr::CantCreateDir {\n dir_path: gene_plots_path.to_owned(),\n })?;\n\n let mut gene_templates = Tera::default();\n gene_templates.add_raw_template('genes.html.tera', include_str!('genes.html.tera'))?;\n gene_templates.add_raw_template('plots.js.tera', include_str!('plots.js.tera'))?;\n\n let gene_specs: Value = serde_json::from_str(include_str!('gene_specs.json'))?;\n\n let page_size = max_cells as usize / sample_calls.len();\n\n // create html for second stage\n for (gene, data) in gene_data {\n let gene_data: Vec<_> = data.iter().sorted().collect();\n let mut alterations = HashMap::new();\n for rec in &gene_data {\n let entry = alterations.entry(&rec.alteration).or_insert_with(Vec::new);\n entry.push(rec.sample.to_owned());\n }\n let mut sort_alterations = HashMap::new();\n for (alteration, mut samples) in alterations {\n samples.sort();\n samples.dedup();\n sort_alterations.insert(alteration, samples.len());\n }\n\n let impact_data = gene_impact_data.get(&gene).unwrap();\n let final_impact: Vec<_> = impact_data.iter().flatten().sorted().collect();\n let existing_var_data = gene_existing_var_data.get(&gene).unwrap();\n let final_ev: Vec<_> = existing_var_data.iter().flatten().sorted().collect();\n let mut inf_plot_data = HashMap::new();\n for (tag, data) in &gene_plot_info_data {\n if let Some(d) = data.get(&gene) {\n let final_data: Vec<_> = d.iter().flatten().sorted().collect();\n inf_plot_data.insert(tag.to_owned(), final_data.clone());\n }\n }\n let consequence_data = gene_consequence_data.get(&gene).unwrap();\n let final_consequence: Vec<_> = consequence_data.iter().flatten().sorted().collect();\n let clin_sig_data = gene_clin_sig_data.get(&gene).unwrap();\n let final_clin_sig: Vec<_> = clin_sig_data.iter().flatten().sorted().collect();\n let allele_frequency_data = gene_af_data.get(&gene).unwrap();\n\n let sorted_impacts = order_by_impact(final_impact.clone());\n let sorted_clin_sigs = order_by_clin_sig(final_clin_sig.clone());\n let mut order = Vec::new();\n for (alt, sample_count) in sort_alterations {\n let impact_order = sorted_impacts.get(alt).unwrap();\n let clin_sig_order = sorted_clin_sigs.get(alt).unwrap();\n order.push((alt.to_owned(), sample_count, impact_order, clin_sig_order))\n }\n\n order.sort_by(|(g1, c1, i1, cs1), (g2, c2, i2, cs2)| {\n c2.cmp(c1) // first order by different sample occurrences\n .then(i2.cmp(i1)) // then by impact\n .then(cs2.cmp(cs1)) // then by clin_sig\n .then(g2.cmp(g1)) // lastly by alteration name for consistency\n });\n\n let ordered_alts: Vec<_> = order.iter().map(|(x, _, _, _)| x).collect();\n\n let pages = if order.len() % page_size == 0 {\n (order.len() / page_size) - 1\n } else {\n order.len() / page_size\n };\n\n for i in 0..pages + 1 {\n let current_alterations = if i != pages {\n &order[(i * page_size)..((i + 1) * page_size)] // get genes for current page\n } else {\n &order[(i * page_size)..] // get genes for last page\n };\n\n if !current_alterations.is_empty() {\n let mut sorted_alterations = Vec::new();\n for (g, _, _, _) in current_alterations {\n sorted_alterations.push(g);\n }\n\n let page = i + 1;\n\n let page_data: Vec<_> = gene_data\n .iter()\n .filter(|entry| sorted_alterations.contains(&&entry.alteration))\n .sorted()\n .collect();\n\n let impact_page_data: Vec<_> = final_impact\n .iter()\n .filter(|entry| sorted_alterations.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let ev_page_data: Vec<_> = final_ev\n .iter()\n .filter(|entry| sorted_alterations.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let consequence_page_data: Vec<_> = final_consequence\n .iter()\n .filter(|entry| sorted_alterations.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let clin_sig_page_data: Vec<_> = final_clin_sig\n .iter()\n .filter(|entry| sorted_alterations.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let af_page_data: Vec<_> = allele_frequency_data\n .iter()\n .filter(|entry| sorted_alterations.contains(&&entry.key))\n .collect();\n\n let mut info_page_data = HashMap::new();\n if plot_info.is_some() {\n for (tag, data) in inf_plot_data.clone() {\n info_page_data.insert(\n tag,\n data.into_iter()\n .filter(|entry| sorted_alterations.contains(&&entry.record.key))\n .collect_vec(),\n );\n }\n }\n\n let order: Vec<_> = ordered_alts\n .iter()\n .filter(|gene| sorted_alterations.contains(gene))\n .collect();\n\n let samples: Vec<_> = page_data.iter().map(|r| r.sample.clone()).collect();\n let unique_samples = samples.iter().unique().count();\n\n let mut specs = gene_specs.clone();\n\n let mut values = if cs_present_folded {\n json!({ 'main': page_data, 'impact': impact_page_data, 'ev': ev_page_data, 'consequence': consequence_page_data, 'clin_sig': clin_sig_page_data, 'allele_frequency': af_page_data})\n } else {\n json!({ 'main': page_data, 'impact': impact_page_data, 'ev': ev_page_data, 'consequence': consequence_page_data, 'allele_frequency': af_page_data})\n };\n\n if plot_info.is_some() {\n for (tag, data) in info_page_data {\n values[tag] = json!(data);\n }\n }\n\n if let Some(ref tsv) = tsv_data {\n for (title, data) in tsv {\n values[title] = json!(data);\n }\n }\n\n specs['datasets'] = values;\n // Set allele frequency heatmap width according to number of samples\n specs['vconcat'][1]['hconcat'][5]['width'] = json!(max(unique_samples * 20, 60));\n if !cs_present_folded || remove_existing_variation {\n let hconcat = specs['vconcat'][1]['hconcat'].as_array_mut().unwrap();\n match (!cs_present_folded, remove_existing_variation) {\n (true, true) => {\n hconcat.remove(6);\n hconcat.remove(4)\n }\n (true, false) => hconcat.remove(4),\n (false, true) => hconcat.remove(6),\n (_, _) => unreachable!(),\n };\n specs['vconcat'][1]['hconcat'] = json!(hconcat);\n }\n\n if plot_info.is_some() {\n let info_specs: Value = serde_json::from_str(include_str!('info_specs.json'))?;\n let highlight_specs: Value =\n serde_json::from_str(include_str!('highlight_specs.json'))?;\n let hconcat = specs['vconcat'][1]['hconcat'].as_array_mut().unwrap();\n for tag in plot_info_data.keys() {\n let mut tag_specs = info_specs.clone();\n tag_specs['data'] = json!({ 'name': tag });\n let highlight_name = 'highlight_'.to_string() + tag;\n tag_specs['selection'] = json!({ &highlight_name: highlight_specs });\n tag_specs['encoding']['color']['title'] = json!(tag);\n tag_specs['encoding']['x']['title'] = json!(tag);\n tag_specs['encoding']['fillOpacity']['condition']['selection'] =\n json!(highlight_name);\n hconcat.push(tag_specs);\n }\n\n specs['vconcat'][1]['hconcat'] = json!(hconcat);\n }\n\n if let Some(ref tsv) = tsv_data {\n let tsv_specs: Value = serde_json::from_str(include_str!('tsv_specs.json'))?;\n\n for title in tsv.keys() {\n let mut tsv_plot = tsv_specs.clone();\n tsv_plot['data'] = json!({ 'name': title });\n tsv_plot['encoding']['color']['title'] = json!(title);\n let vconcat = specs['vconcat'].as_array_mut().unwrap();\n vconcat.insert(1, tsv_plot);\n specs['vconcat'] = json!(vconcat);\n }\n }\n\n let mut packer = Packer::new();\n let options = PackOptions::new();\n let packed_gene_specs = packer.pack(&specs, &options)?;\n let mut context = Context::new();\n let oncoprint = json!(compress_to_utf16(&serde_json::to_string(\n &packed_gene_specs\n )?))\n .to_string();\n context.insert('oncoprint', &oncoprint);\n context.insert('gene', &gene);\n context.insert('samples', &unique_samples);\n context.insert('current_page', &page);\n context.insert('pages', &(pages + 1));\n context.insert('order', &serde_json::to_string(&json!(order))?);\n let local: DateTime = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n let html = gene_templates.render('genes.html.tera', &context)?;\n let js = gene_templates.render('plots.js.tera', &context)?;\n let filepath = gene_path.clone() + &gene + &page.to_string() + '.html';\n let js_filepath = gene_plots_path.clone() + &gene + &page.to_string() + '.js';\n let mut file = File::create(filepath)?;\n let mut js_file = File::create(js_filepath)?;\n file.write_all(html.as_bytes())?;\n js_file.write_all(js.as_bytes())?;\n }\n }\n }\n\n // only keep recurrent entries\n let data: Vec<_> = data\n .values()\n .filter(|entry| !entry.is_empty())\n .flatten()\n .sorted()\n .collect();\n let mut sort_genes = HashMap::new();\n\n // remove duplicate samples and calculate order for oncoprint\n for (gene, mut samples) in unique_genes {\n samples.sort();\n samples.dedup();\n sort_genes.insert(gene.to_owned(), samples.len());\n }\n\n let impact_data: Vec<_> = impact_data.iter().flatten().collect();\n let ev_data: Vec<_> = existing_var_data.iter().flatten().collect();\n let consequence_data: Vec<_> = consequence_data.iter().flatten().collect();\n let clin_sig_data: Vec<_> = clin_sig_data.iter().flatten().collect();\n let mut i_plot_data = HashMap::new();\n if plot_info.is_some() {\n for (tag, data) in plot_info_data.clone() {\n i_plot_data.insert(tag, data.into_iter().flatten().collect_vec());\n }\n }\n\n let sorted_impacts = order_by_impact(impact_data.clone());\n let sorted_clin_sigs = order_by_clin_sig(clin_sig_data.clone());\n let mut v = Vec::new();\n\n for (gene, sample_count) in sort_genes {\n let impact_order = sorted_impacts.get(&gene).unwrap();\n let clin_sig_order = sorted_clin_sigs.get(&gene).unwrap();\n v.push((gene, sample_count, impact_order, clin_sig_order))\n }\n\n v.sort_by(|(g1, c1, i1, cs1), (g2, c2, i2, cs2)| {\n c2.cmp(c1) // first order by different sample occurrences\n .then(i2.cmp(i1)) // then by impact\n .then(cs2.cmp(cs1)) // then by clin_sig\n .then(g2.cmp(g1)) // lastly by gene name for consistency\n });\n let ordered_genes: Vec<_> = v.iter().map(|(x, _, _, _)| x).collect();\n\n let pages = if v.len() % page_size == 0 {\n (v.len() / page_size) - 1\n } else {\n v.len() / page_size\n };\n\n let index_path = output_path.to_owned() + '/indexes';\n fs::create_dir(Path::new(&index_path)).context(WriteErr::CantCreateDir {\n dir_path: index_path.to_owned(),\n })?;\n\n let prefixes = make_prefixes(ordered_genes.clone(), page_size);\n let prefix_path = output_path.to_owned() + '/prefixes/';\n fs::create_dir(Path::new(&prefix_path)).context(WriteErr::CantCreateDir {\n dir_path: prefix_path.to_owned(),\n })?;\n\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'prefix_table.html.tera',\n include_str!('prefix_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('table', &prefixes);\n let html = templates.render('prefix_table.html.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/prefixes/prefixes.html';\n let mut file = File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n\n let gene_path = prefix_path + '/genes/';\n fs::create_dir(Path::new(&gene_path)).context(WriteErr::CantCreateDir {\n dir_path: gene_path.to_owned(),\n })?;\n\n for (prefix, values) in prefixes {\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'lookup_table.html.tera',\n include_str!('lookup_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('values', &values);\n let html = templates.render('lookup_table.html.tera', &context)?;\n\n let file_path = gene_path.to_owned() + &prefix + '.html';\n let mut file = File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n }\n\n for i in 0..pages + 1 {\n let current_genes = if i != pages {\n &v[(i * page_size)..((i + 1) * page_size)] // get genes for current page\n } else {\n &v[(i * page_size)..] // get genes for last page\n };\n\n if !current_genes.is_empty() {\n let mut sorted_genes = Vec::new();\n for (g, _, _, _) in current_genes {\n sorted_genes.push(g);\n }\n\n let page = i + 1;\n\n let page_data: Vec<_> = data\n .iter()\n .filter(|entry| sorted_genes.contains(&&entry.gene))\n .sorted()\n .collect();\n\n let impact_page_data: Vec<_> = impact_data\n .iter()\n .filter(|entry| sorted_genes.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let ev_page_data: Vec<_> = ev_data\n .iter()\n .filter(|entry| sorted_genes.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let consequence_page_data: Vec<_> = consequence_data\n .iter()\n .filter(|entry| sorted_genes.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let clin_sig_page_data: Vec<_> = clin_sig_data\n .iter()\n .filter(|entry| sorted_genes.contains(&&entry.record.key))\n .sorted()\n .collect();\n\n let af_page_data: Vec<_> = af_data\n .iter()\n .filter(|entry| sorted_genes.contains(&&entry.key))\n .collect();\n\n let mut info_page_data = HashMap::new();\n if plot_info.is_some() {\n for (tag, data) in i_plot_data.clone() {\n info_page_data.insert(\n tag,\n data.into_iter()\n .filter(|entry| sorted_genes.contains(&&entry.record.key))\n .collect_vec(),\n );\n }\n }\n\n let order: Vec<_> = ordered_genes\n .iter()\n .filter(|gene| sorted_genes.contains(gene))\n .collect();\n\n let samples: Vec<_> = page_data.iter().map(|r| r.sample.clone()).collect();\n let unique_samples = samples.iter().unique().count();\n\n let mut vl_specs: Value = serde_json::from_str(include_str!('report_specs.json'))?;\n\n let mut values = if cs_present_folded {\n json!({ 'main': page_data, 'impact': impact_page_data, 'ev': ev_page_data, 'consequence': consequence_page_data, 'clin_sig': clin_sig_page_data, 'allele_frequency': af_page_data})\n } else {\n json!({ 'main': page_data, 'impact': impact_page_data, 'ev': ev_page_data, 'consequence': consequence_page_data, 'allele_frequency': af_page_data})\n };\n\n if plot_info.is_some() {\n for (tag, data) in info_page_data {\n values[tag] = json!(data);\n }\n }\n\n if let Some(ref tsv) = tsv_data {\n for (title, data) in tsv {\n values[title] = json!(data);\n }\n }\n\n vl_specs['datasets'] = values;\n if !cs_present_folded || remove_existing_variation {\n let hconcat = vl_specs['vconcat'][1]['hconcat'].as_array_mut().unwrap();\n match (!cs_present_folded, remove_existing_variation) {\n (true, true) => {\n hconcat.remove(6);\n hconcat.remove(4)\n }\n (true, false) => hconcat.remove(4),\n (false, true) => hconcat.remove(6),\n (_, _) => unreachable!(),\n };\n vl_specs['vconcat'][1]['hconcat'] = json!(hconcat);\n }\n\n if plot_info.is_some() {\n let info_specs: Value = serde_json::from_str(include_str!('info_specs.json'))?;\n let highlight_specs: Value =\n serde_json::from_str(include_str!('highlight_specs.json'))?;\n let hconcat = vl_specs['vconcat'][1]['hconcat'].as_array_mut().unwrap();\n for tag in plot_info_data.keys() {\n let mut tag_specs = info_specs.clone();\n tag_specs['data'] = json!({ 'name': tag });\n let highlight_name = 'highlight_'.to_string() + tag;\n tag_specs['selection'] = json!({ &highlight_name: highlight_specs });\n tag_specs['encoding']['color']['title'] = json!(tag);\n tag_specs['encoding']['x']['title'] = json!(tag);\n tag_specs['encoding']['fillOpacity']['condition']['selection'] =\n json!(highlight_name);\n hconcat.push(tag_specs);\n }\n\n vl_specs['vconcat'][1]['hconcat'] = json!(hconcat);\n }\n\n if let Some(ref tsv) = tsv_data {\n let tsv_specs: Value = serde_json::from_str(include_str!('tsv_specs.json'))?;\n\n for title in tsv.keys() {\n let mut tsv_plot = tsv_specs.clone();\n tsv_plot['data'] = json!({ 'name': title });\n tsv_plot['encoding']['color']['title'] = json!(title);\n let vconcat = vl_specs['vconcat'].as_array_mut().unwrap();\n vconcat.insert(1, tsv_plot);\n vl_specs['vconcat'] = json!(vconcat);\n }\n }\n\n let mut packer = Packer::new();\n let options = PackOptions::new();\n let packed_specs = packer.pack(&vl_specs, &options)?;\n let mut templates = Tera::default();\n templates.add_raw_template('report.html.tera', include_str!('report.html.tera'))?;\n templates.add_raw_template('plots.js.tera', include_str!('plots.js.tera'))?;\n let mut context = Context::new();\n let data = json!(compress_to_utf16(&serde_json::to_string(&packed_specs)?)).to_string();\n context.insert('oncoprint', &data);\n context.insert('current_page', &page);\n context.insert('pages', &(pages + 1));\n context.insert('order', &serde_json::to_string(&json!(order))?);\n context.insert('samples', &unique_samples);\n let local: DateTime = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n\n let html = templates.render('report.html.tera', &context)?;\n let js = templates.render('plots.js.tera', &context)?;\n\n let index = format!('{}/index{}.html', index_path, page);\n let js_index = format!('{}/plot{}.js', index_path, page);\n let mut file = File::create(index)?;\n let mut js_file = File::create(js_index)?;\n file.write_all(html.as_bytes())?;\n js_file.write_all(js.as_bytes())?;\n }\n }\n\n // Add index1 when no variants are found\n let index1_path = output_path.to_owned() + '/indexes/index1.html';\n if !Path::new(&index1_path).exists() {\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'empty_report.html',\n include_str!('html/empty_report.html.tera'),\n )?;\n let mut context = Context::new();\n let local: DateTime = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n let no_variants = templates.render('empty_report.html', &context)?;\n let mut file = File::create(index1_path)?;\n file.write_all(no_variants.as_bytes())?;\n }\n\n Ok(())\n}\n\n#[derive(new, Debug, Clone)]\nstruct Record {\n sample: String,\n gene: String,\n #[new(default)]\n variants: Vec,\n}\n\n#[derive(Serialize, Debug, PartialEq, PartialOrd, Clone)]\nstruct AlleleFrequency {\n sample: String,\n key: String,\n allele_frequency: f32,\n}\n\n#[derive(new, Serialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]\nstruct BarPlotRecord {\n key: String,\n value: String,\n}\n\n#[derive(new, Serialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]\nstruct TsvRecord {\n sample: String,\n value: String,\n}\n\n#[derive(Serialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]\nstruct Counter {\n count: u32,\n #[serde(flatten)]\n record: BarPlotRecord,\n}\n\n// Record for the first stage of the report\n#[derive(Serialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]\nstruct FinalRecord {\n sample: String,\n gene: String,\n variants: String,\n}\n\n// Record for the second stage of the report\n#[derive(Serialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]\nstruct SecondStageRecord {\n sample: String,\n alteration: String,\n variant: String,\n dna_alt: String,\n}\n\n#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]\nenum Impact {\n Unknown,\n Low,\n Modifier,\n Moderate,\n High,\n}\n\nimpl FromStr for Impact {\n type Err = ();\n\n fn from_str(s: &str) -> Result {\n match s {\n 'HIGH' => Ok(Impact::High),\n 'MODERATE' => Ok(Impact::Moderate),\n 'MODIFIER' => Ok(Impact::Modifier),\n 'LOW' => Ok(Impact::Low),\n _ => Ok(Impact::Unknown),\n }\n }\n}\n\n#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]\nenum ClinSig {\n Unknown,\n NotProvided,\n Other,\n Benign,\n BenignLikelyBenign,\n LikelyBenign,\n Protective,\n UncertainSignificance,\n ConflictingInterpretationsOfPathogenicity,\n Association,\n Affects,\n DrugResponse,\n RiskFactor,\n LikelyPathogenic,\n LikelyPathogenicPathogenic,\n Pathogenic,\n}\n\nimpl FromStr for ClinSig {\n type Err = ();\n\n fn from_str(s: &str) -> Result {\n match s {\n 'pathogenic' => Ok(ClinSig::Pathogenic),\n 'likely_pathogenic/pathogenic' => Ok(ClinSig::LikelyPathogenicPathogenic),\n 'likely_pathogenic' => Ok(ClinSig::LikelyPathogenic),\n 'risk_factor' => Ok(ClinSig::RiskFactor),\n 'drug_response' => Ok(ClinSig::DrugResponse),\n 'affects' => Ok(ClinSig::Affects),\n 'association' => Ok(ClinSig::Association),\n 'uncertain_significance' => Ok(ClinSig::UncertainSignificance),\n 'conflicting_interpretations_of_pathogenicity' => {\n Ok(ClinSig::ConflictingInterpretationsOfPathogenicity)\n }\n 'protective' => Ok(ClinSig::Protective),\n 'likely_benign' => Ok(ClinSig::LikelyBenign),\n 'benign/likely_benign' => Ok(ClinSig::BenignLikelyBenign),\n 'benign' => Ok(ClinSig::Benign),\n 'other' => Ok(ClinSig::Other),\n 'not_provided' => Ok(ClinSig::NotProvided),\n _ => Ok(ClinSig::Unknown),\n }\n }\n}\n\nimpl From<&Record> for FinalRecord {\n fn from(record: &Record) -> Self {\n FinalRecord {\n sample: record.sample.to_owned(),\n gene: record.gene.to_owned(),\n variants: record.variants.iter().sorted().unique().join('/'),\n }\n }\n}\n\nfn make_tsv_records(tsv_path: String) -> Result>> {\n let mut tsv_values = HashMap::new();\n let mut rdr = csv::ReaderBuilder::new()\n .delimiter(b'\t')\n .from_path(tsv_path)?;\n\n let header = rdr.headers()?.clone();\n let titles: Vec<_> = header.iter().skip(1).collect();\n for res in rdr.records() {\n let row = res?;\n let sample = row[0].to_owned();\n for (i, value) in row.iter().skip(1).enumerate() {\n let rec = tsv_values\n .entry(titles[i].to_owned())\n .or_insert_with(Vec::new);\n let entry = TsvRecord {\n sample: sample.clone(),\n value: value.to_owned(),\n };\n rec.push(entry);\n }\n }\n Ok(tsv_values)\n}\n\nfn make_final_bar_plot_records(records: &[BarPlotRecord]) -> Vec {\n let mut count_map = HashMap::new();\n for i in records {\n let r = count_map.entry((&i.key, &i.value)).or_insert_with(|| 0);\n *r += 1;\n }\n\n let mut res = Vec::new();\n\n for ((alt, imp), count) in count_map {\n let plot_rec = BarPlotRecord {\n key: alt.to_owned(),\n value: imp.to_owned(),\n };\n let record = Counter {\n count,\n record: plot_rec,\n };\n\n res.push(record);\n }\n\n res\n}\n\nfn order_by_impact(impacts: Vec<&Counter>) -> HashMap> {\n let mut order = HashMap::new();\n let mut order_tuples = HashMap::new();\n for c in impacts {\n let impact = Impact::from_str(&c.record.value).unwrap();\n let rec = order_tuples\n .entry(c.record.key.to_owned())\n .or_insert_with(Vec::new);\n rec.push((impact, c.count))\n }\n\n for v in order_tuples.values_mut() {\n v.sort_by(|(i1, a), (i2, b)| b.cmp(a).then(i2.cmp(i1)))\n }\n\n for (k, v) in order_tuples {\n let removed_count = v.into_iter().map(|(x, _)| x).collect();\n order.insert(k, removed_count);\n }\n order\n}\n\nfn order_by_clin_sig(clin_sigs: Vec<&Counter>) -> HashMap> {\n let mut order = HashMap::new();\n let mut order_tuples = HashMap::new();\n for c in clin_sigs {\n let impact = ClinSig::from_str(&c.record.value).unwrap();\n let rec = order_tuples\n .entry(c.record.key.to_owned())\n .or_insert_with(Vec::new);\n rec.push((impact, c.count))\n }\n\n for v in order_tuples.values_mut() {\n v.sort_by(|(c1, a), (c2, b)| b.cmp(a).then(c2.cmp(c1)));\n }\n\n for (k, v) in order_tuples {\n let removed_count = v.into_iter().map(|(x, _)| x).collect();\n order.insert(k, removed_count);\n }\n order\n}\n\nfn make_prefixes(\n genes: Vec<&String>,\n rows_per_page: usize,\n) -> HashMap> {\n let mut prefix_map = HashMap::new();\n for (i, partial_table) in genes.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n for gene in partial_table {\n let entry = prefix_map.entry(gene.to_string()).or_insert_with(Vec::new);\n entry.push((gene.to_owned(), page));\n }\n }\n prefix_map\n}\n\n#[derive(Error, Debug)]\npub enum WriteErr {\n #[error('could not create directory at {dir_path}')]\n CantCreateDir { dir_path: String },\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/report/mod.rs","use crate::bcf::report::oncoprint::WriteErr;\nuse anyhow::{Context, Result};\nuse itertools::Itertools;\nuse std::fs;\nuse std::fs::File;\nuse std::io::{Read, Write};\nuse std::path::Path;\n\npub mod oncoprint;\npub mod table_report;\n\npub fn embed_js(\n output_path: &str,\n vcf_report: bool,\n custom_table_report_js: Option<&str>,\n custom_js_files: Vec,\n) -> Result<()> {\n let js_path = output_path.to_owned() + '/js/';\n fs::create_dir(Path::new(&js_path)).context(WriteErr::CantCreateDir {\n dir_path: js_path.to_owned(),\n })?;\n let mut files = vec![\n (\n 'bootstrap.bundle.min.js',\n include_str!('js/bootstrap.bundle.min.js'),\n ),\n ('jquery.min.js', include_str!('js/jquery.min.js')),\n ('popper.min.js', include_str!('js/popper.min.js')),\n ('lz-string.min.js', include_str!('js/lz-string.min.js')),\n (\n 'bootstrap-table.min.js',\n include_str!('js/bootstrap-table.min.js'),\n ),\n ('vega.min.js', include_str!('js/vega.min.js')),\n ('vega-lite.min.js', include_str!('js/vega-lite.min.js')),\n ('vega-embed.min.js', include_str!('js/vega-embed.min.js')),\n ];\n let vcf_report_files = vec![\n ('jsonm.min.js', include_str!('js/jsonm.min.js')),\n ('table-report.js', include_str!('js/table-report.js')),\n ('report.js', include_str!('js/report.js')),\n ('gene-report.js', include_str!('js/gene-report.js')),\n ];\n if vcf_report {\n files.extend(vcf_report_files.iter());\n if let Some(path) = custom_table_report_js {\n let mut file_string = String::new();\n let mut custom_file = File::open(path).context('Unable to open custom JS file')?;\n custom_file\n .read_to_string(&mut file_string)\n .context('Unable to read string')?;\n let mut out_file = File::create(js_path.to_owned() + 'table-report.js')?;\n out_file.write_all(file_string.as_bytes())?;\n } else {\n files.push(('table-report.js', include_str!('js/table-report.js')))\n }\n }\n for (name, file) in files {\n let mut out_file = File::create(js_path.to_owned() + name)?;\n out_file.write_all(file.as_bytes())?;\n }\n\n for file in custom_js_files {\n let file_name = file\n .split('/')\n .collect_vec()\n .pop()\n .context(format!('Unable to extract file name from path: {}', file))?;\n let mut file_string = String::new();\n let mut custom_file = File::open(&file).context('Unable to open JS file')?;\n custom_file\n .read_to_string(&mut file_string)\n .context('Unable to read string')?;\n let mut out_file = File::create(js_path.to_owned() + file_name)?;\n out_file.write_all(file_string.as_bytes())?;\n }\n\n Ok(())\n}\n\npub fn embed_css(output_path: &str, vcf_report: bool) -> Result<()> {\n let css_path = output_path.to_owned() + '/css/';\n fs::create_dir(Path::new(&css_path)).context(WriteErr::CantCreateDir {\n dir_path: css_path.to_owned(),\n })?;\n let mut files = vec![\n ('bootstrap.min.css', include_str!('css/bootstrap.min.css')),\n (\n 'bootstrap-table.min.css',\n include_str!('css/bootstrap-table.min.css'),\n ),\n ];\n let vcf_report_files = vec![('oncoprint.css', include_str!('css/oncoprint.css'))];\n let csv_report_files = vec![('csv_report.css', include_str!('../../csv/csv_report.css'))];\n if vcf_report {\n files.extend(vcf_report_files.iter());\n } else {\n files.extend(csv_report_files.iter());\n }\n for (name, file) in files {\n let mut out_file = File::create(css_path.to_owned() + name)?;\n out_file.write_all(file.as_bytes())?;\n }\n Ok(())\n}\n\npub fn embed_html(output_path: &str) -> Result<()> {\n let files = vec![('index.html', include_str!('html/index.html'))];\n for (name, file) in files {\n let out_path = output_path.to_owned() + '/' + name;\n let mut out_file = File::create(&out_path).context(WriteErr::CantCreateDir {\n dir_path: out_path.to_owned(),\n })?;\n out_file.write_all(file.as_bytes())?;\n }\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/match_variants.rs","//! Annotate for each variant in a VCF/BCF at STDIN whether it is contained in a given second VCF/BCF.\n//!\n//! The matching is fuzzy for indels and exact for SNVs.\n//! Results are printed as BCF to STDOUT, with an additional INFO tag MATCHING.\n//! The two vcfs do not have to be sorted.\n//!\n//! ## Usage:\n//! ```bash\n//! rbt vcf-match -d 50 -l 20 tests/test3.vcf < tests/test2.vcf > tests/matching.bcf\n//! ```\n//!\nuse anyhow::{bail, Result};\nuse itertools::Itertools;\nuse log::{info, warn};\nuse rust_htslib::bcf;\nuse rust_htslib::bcf::{Format, Read};\nuse std::collections::{btree_map, BTreeMap, HashMap};\nuse std::path::Path;\nuse std::str;\nuse thiserror::Error;\n\npub struct VarIndex {\n inner: HashMap, BTreeMap>>,\n max_dist: u64,\n}\n\nimpl VarIndex {\n pub fn new(mut reader: bcf::Reader, max_dist: u64) -> Result {\n let mut inner: HashMap, BTreeMap>> = HashMap::new();\n let mut i = 0;\n let mut rec = reader.empty_record();\n loop {\n match reader.read(&mut rec) {\n Some(Ok(())) => (),\n None => break,\n Some(Err(e)) => bail!(e),\n };\n if let Some(rid) = rec.rid() {\n let chrom = reader.header().rid2name(rid)?;\n let recs = inner.entry(chrom.to_owned()).or_insert_with(BTreeMap::new);\n recs.entry(rec.pos() as u64)\n .or_insert_with(Vec::new)\n .push(Variant::new(&mut rec, &mut i)?);\n //recs.insert(rec.pos(), Variant::new(&mut rec, &mut i)?);\n } else {\n // skip records without rid\n let alt_count = rec.alleles().len() as u32 - 1;\n i += alt_count;\n }\n }\n\n Ok(VarIndex { inner, max_dist })\n }\n\n pub fn range(&self, chrom: &[u8], pos: u64) -> Option>> {\n self.inner\n .get(chrom)\n .map(|recs| recs.range(pos.saturating_sub(self.max_dist)..pos + self.max_dist))\n }\n}\n\npub fn match_variants>(matchbcf: P, max_dist: u32, max_len_diff: u32) -> Result<()> {\n let mut inbcf = bcf::Reader::from_stdin()?;\n let mut header = bcf::Header::from_template(inbcf.header());\n\n header.push_record(\n format!('##INFO==0 points to the i-th variant in the VCF/BCF (counting each \\n alternative allele separately). For indels, matching is fuzzy: distance of centres <= {}, difference of \\n lengths <= {}\'>', max_dist, max_len_diff).as_bytes()\n );\n let mut outbcf = bcf::Writer::from_path(&'-', &header, false, Format::Bcf)?;\n let index = VarIndex::new(bcf::Reader::from_path(matchbcf)?, max_dist as u64)?;\n\n let mut rec = inbcf.empty_record();\n let mut i = 0;\n loop {\n match inbcf.read(&mut rec) {\n Some(Ok(())) => (),\n None => break,\n Some(Err(e)) => bail!(e),\n };\n outbcf.translate(&mut rec);\n\n if let Some(rid) = rec.rid() {\n let chrom = inbcf.header().rid2name(rid)?;\n let pos = rec.pos();\n\n let var = Variant::new(&mut rec, &mut i)?;\n let matching = var\n .alleles\n .iter()\n .map(|a| {\n if let Some(range) = index.range(chrom, pos as u64) {\n for v in range.flat_map(|(_, idx_vars)| idx_vars) {\n if let Some(id) = var.matches(v, a, max_dist, max_len_diff) {\n return id as i32;\n }\n }\n }\n -1\n })\n .collect_vec();\n\n rec.push_info_integer(b'MATCHING', &matching)?;\n }\n outbcf.write(&rec)?;\n\n if (i) % 1000 == 0 {\n info!('{} variants written.', i);\n }\n }\n info!('{} variants written.', i);\n\n Ok(())\n}\n\n#[derive(Debug)]\npub struct Variant {\n id: u32,\n pos: u64,\n alleles: Vec,\n}\n\nimpl Variant {\n pub fn new(rec: &mut bcf::Record, id: &mut u32) -> Result {\n let pos = rec.pos();\n\n let svlens = if let Ok(Some(svlens)) = rec.info(b'SVLEN').integer() {\n Some(svlens.iter().map(|l| l.abs() as u32).collect_vec())\n } else {\n None\n };\n let svtype = if let Ok(Some(svtype)) = rec.info(b'SVTYPE').string() {\n Some(svtype[0].to_owned())\n } else {\n None\n };\n let end = if let Ok(Some(end)) = rec.info(b'END').integer() {\n Some(end[0] as u32)\n } else {\n None\n };\n let inslen = if let Ok(Some(inslen)) = rec.info(b'INSLEN').integer() {\n Some(inslen[0] as u32)\n } else {\n None\n };\n let alleles = rec.alleles();\n let refallele = alleles[0];\n\n let _alleles: Vec = if let Some(svtype) = svtype {\n vec![if svtype == b'INS' {\n match (svlens, inslen) {\n (Some(svlens), _) => VariantType::Insertion(svlens[0] as u64),\n (None, Some(inslen)) => VariantType::Insertion(inslen as u64),\n _ => {\n warn!('Unsupported variant INS without SVLEN or INSLEN');\n VariantType::Unsupported\n }\n }\n } else if svtype == b'DEL' {\n let svlen = match (svlens, end) {\n (Some(svlens), _) => svlens[0] as u64,\n (None, Some(end)) => end as u64 - 1 - pos as u64,\n _ => {\n bail!(MatchError::MissingTag {\n tag: 'SVLEN or END'.to_owned()\n });\n }\n };\n VariantType::Deletion(svlen)\n } else {\n warn!('Unsupported variant {}', str::from_utf8(&svtype)?);\n VariantType::Unsupported\n }]\n } else {\n let mut _alleles = Vec::with_capacity(alleles.len() - 1);\n for (i, a) in alleles[1..].iter().enumerate() {\n _alleles.push(if a == b'' {\n if let Some(ref svlens) = svlens {\n VariantType::Deletion(svlens[i] as u64)\n } else {\n bail!(MatchError::MissingTag {\n tag: 'SVLEN'.to_owned()\n });\n }\n } else if a.len() < refallele.len() {\n VariantType::Deletion((refallele.len() - a.len()) as u64)\n } else if a.len() > refallele.len() {\n VariantType::Insertion((a.len() - refallele.len()) as u64)\n } else if a.len() == 1 {\n VariantType::Snv(a[0])\n } else {\n warn!(\n 'Unsupported variant {} -> {}',\n str::from_utf8(refallele)?,\n str::from_utf8(a)?\n );\n VariantType::Unsupported\n });\n }\n _alleles\n };\n let var = Variant {\n id: *id,\n pos: pos as u64,\n alleles: _alleles,\n };\n *id += alleles.len() as u32 - 1;\n Ok(var)\n }\n\n pub fn centerpoint(&self, allele: &VariantType) -> u64 {\n match *allele {\n VariantType::Snv(_) => self.pos,\n VariantType::Insertion(_) => self.pos,\n VariantType::Deletion(len) => (self.pos as f64 + len as f64 / 2.0) as u64,\n VariantType::Unsupported => panic!('Unsupported variant.'),\n }\n }\n\n pub fn matches(\n &self,\n other: &Variant,\n allele: &VariantType,\n max_dist: u32,\n max_len_diff: u32,\n ) -> Option {\n if allele.is_unsupported() {\n return None;\n }\n for (j, b) in other.alleles.iter().enumerate() {\n if b.is_unsupported() {\n continue;\n }\n let dist = (self.centerpoint(allele) as i32 - other.centerpoint(b) as i32).abs() as u32;\n match (allele, b) {\n (&VariantType::Snv(a), &VariantType::Snv(b)) => {\n if a == b && dist == 0 {\n return Some(other.id(j));\n }\n }\n (&VariantType::Insertion(l1), &VariantType::Insertion(l2))\n | (&VariantType::Deletion(l1), &VariantType::Deletion(l2)) => {\n if (l1 as i32 - l2 as i32).abs() as u32 <= max_len_diff && dist <= max_dist {\n return Some(other.id(j));\n }\n }\n // TODO: for now, ignore complex variants\n _ => continue,\n }\n }\n None\n }\n\n pub fn id(&self, allele: usize) -> u32 {\n self.id + allele as u32\n }\n}\n\n#[derive(Debug)]\npub enum VariantType {\n Snv(u8),\n Insertion(u64),\n Deletion(u64),\n Unsupported,\n}\n\nimpl VariantType {\n pub fn is_unsupported(&self) -> bool {\n matches!(self, &VariantType::Unsupported)\n }\n}\n\n#[derive(Error, Debug)]\npub enum MatchError {\n #[error('missing tag {tag}')]\n MissingTag { tag: String },\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/mod.rs","//! Tools that work on VCF and BCF files.\npub mod annotate_dgidb;\npub mod baf;\npub mod fix_iupac_alleles;\npub mod match_variants;\npub mod report;\npub mod split;\npub mod to_txt;\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/to_txt.rs","//! Create a variant table from a VCF file.\n//!\n//! ## Usage:\n//! ```bash\n//! $ rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC < tests/test.vcf > tests/variant-table.txt\n//! ```\n//!\nuse anyhow::{bail, Result};\nuse derive_new::new;\nuse itertools::Itertools;\nuse rust_htslib::bcf;\nuse rust_htslib::bcf::record::Numeric;\nuse rust_htslib::bcf::Read;\nuse std::io;\nuse std::io::Write;\nuse std::str;\nuse thiserror::Error;\n\n#[derive(new)]\npub struct Writer {\n inner: io::BufWriter,\n #[new(value = '0')]\n field_count: usize,\n}\n\nimpl Writer {\n fn write_integer(&mut self, value: i32) -> Result<()> {\n let fmt = if value.is_missing() {\n ''.to_owned()\n } else {\n format!('{}', value)\n };\n self.write_field(fmt.as_bytes())\n }\n\n fn write_float(&mut self, value: f32) -> Result<()> {\n let fmt = if value.is_missing() {\n ''.to_owned()\n } else {\n format!('{}', value)\n };\n self.write_field(fmt.as_bytes())\n }\n\n fn write_flag(&mut self, value: bool) -> Result<()> {\n self.write_field(format!('{}', value).as_bytes())\n }\n\n fn write_field(&mut self, value: &[u8]) -> Result<()> {\n if self.field_count > 0 {\n self.inner.write_all(b'\t')?;\n }\n self.inner.write_all(value)?;\n self.field_count += 1;\n Ok(())\n }\n\n fn newline(&mut self) -> Result<()> {\n self.inner.write_all(b'\n')?;\n self.field_count = 0;\n Ok(())\n }\n}\n\nconst HEADER_COMMON: &[u8] = b'VARIANT';\n\npub fn to_txt(\n info_tags: &[&str],\n format_tags: &[&str],\n show_genotypes: bool,\n show_filter: bool,\n) -> Result<()> {\n let mut reader = bcf::Reader::from_stdin()?;\n let mut writer = Writer::new(io::BufWriter::new(io::stdout()));\n\n let mut common_n = 5 + info_tags.len();\n if show_filter {\n common_n += 1\n }\n writer.write_field(HEADER_COMMON)?;\n for _ in 1..common_n {\n writer.write_field(HEADER_COMMON)?;\n }\n let show_samples = show_genotypes || !format_tags.is_empty();\n if show_samples {\n for sample in reader.header().samples() {\n writer.write_field(sample)?;\n for _ in 1..format_tags.len() + show_genotypes as usize {\n writer.write_field(sample)?;\n }\n }\n }\n writer.newline()?;\n writer.write_field(b'CHROM')?;\n writer.write_field(b'POS')?;\n writer.write_field(b'REF')?;\n writer.write_field(b'ALT')?;\n writer.write_field(b'QUAL')?;\n if show_filter {\n writer.write_field(b'FILTER')?;\n }\n for name in info_tags {\n writer.write_field(name.as_bytes())?;\n }\n if show_samples {\n for _ in 0..reader.header().sample_count() {\n if show_genotypes {\n writer.write_field(b'GT')?;\n }\n for name in format_tags {\n writer.write_field(name.as_bytes())?;\n }\n }\n }\n writer.newline()?;\n let mut rec = reader.empty_record();\n loop {\n match reader.read(&mut rec) {\n Some(Ok(())) => (),\n None => break,\n Some(Err(e)) => bail!(e),\n };\n let alleles = rec\n .alleles()\n .into_iter()\n .map(|a| a.to_owned())\n .collect_vec();\n for (i, allele) in alleles[1..].iter().enumerate() {\n writer.write_field(reader.header().rid2name(rec.rid().unwrap())?)?;\n writer.write_integer(rec.pos() as i32 + 1)?;\n writer.write_field(&alleles[0])?;\n writer.write_field(allele)?;\n match rec.qual() {\n q if q.is_missing() => writer.write_field(b'')?,\n q => writer.write_float(q)?,\n }\n\n if show_filter {\n if rec.has_filter('.'.as_bytes()) {\n writer.write_field(b'')?\n } else if rec.has_filter('PASS'.as_bytes()) {\n writer.write_field(b'PASS')?\n } else {\n let mut filters = Vec::new();\n for (i, filter) in rec.filters().enumerate() {\n if i != 0 {\n filters.push(b';');\n }\n filters.extend_from_slice(&reader.header().id_to_name(filter));\n }\n writer.write_field(&filters)?;\n }\n }\n\n for name in info_tags {\n let _name = name.as_bytes();\n if let Ok((tag_type, tag_length)) = rec.header().info_type(_name) {\n let get_idx = || match tag_length {\n bcf::header::TagLength::Fixed(_) => Ok(0),\n bcf::header::TagLength::AltAlleles => Ok(i),\n bcf::header::TagLength::Alleles => Ok(i + 1),\n bcf::header::TagLength::Variable => Ok(0),\n _ => Err(Box::new(ParseError::UnsupportedTagLength)),\n };\n\n match tag_type {\n bcf::header::TagType::Flag => {\n writer.write_flag(rec.info(_name).flag()?)?;\n }\n bcf::header::TagType::Integer => {\n let i = get_idx()?;\n if let Some(values) = rec.info(_name).integer()? {\n writer.write_integer(values[i])?;\n } else {\n writer.write_field(b'')?;\n }\n }\n bcf::header::TagType::Float => {\n let i = get_idx()?;\n if let Some(values) = rec.info(_name).float()? {\n writer.write_float(values[i])?;\n } else {\n writer.write_field(b'')?;\n }\n }\n bcf::header::TagType::String => {\n let i = get_idx()?;\n if let Some(values) = rec.info(_name).string()? {\n writer.write_field(values[i])?;\n } else {\n writer.write_field(b'')?;\n }\n }\n }\n } else {\n // tag undefined, write NA\n writer.write_field(b'')?;\n }\n }\n\n let genotypes = if show_genotypes {\n let genotypes = rec.genotypes()?;\n\n Some(\n (0..reader.header().sample_count() as usize)\n .map(|s| format!('{}', genotypes.get(s)))\n .collect_vec(),\n )\n } else {\n None\n };\n\n for s in 0..reader.header().sample_count() as usize {\n if let Some(ref genotypes) = genotypes {\n writer.write_field(genotypes[s].as_bytes())?;\n }\n for name in format_tags {\n let _name = name.as_bytes();\n if let Ok((tag_type, tag_length)) = reader.header().format_type(_name) {\n let i = match tag_length {\n bcf::header::TagLength::Fixed(_) => 0,\n bcf::header::TagLength::AltAlleles => i,\n bcf::header::TagLength::Alleles => i + 1,\n _ => bail!(ParseError::UnsupportedTagLength),\n };\n\n match tag_type {\n bcf::header::TagType::Flag => {\n panic!('Unable to find FORMAT \'{0}\' in the input file! Is \'{0}\' an INFO tag?', name);\n }\n bcf::header::TagType::Integer => {\n writer.write_field(\n format!('{}', rec.format(_name).integer()?[s][i]).as_bytes(),\n )?;\n }\n bcf::header::TagType::Float => {\n writer.write_field(\n format!('{}', rec.format(_name).float()?[s][i]).as_bytes(),\n )?;\n }\n bcf::header::TagType::String => {\n writer.write_field(rec.format(_name).string()?[s])?;\n }\n }\n } else {\n // tag undefined, write NA\n writer.write_field(b'')?;\n }\n }\n }\n writer.newline()?;\n }\n }\n\n Ok(())\n}\n\n#[derive(Error, Debug)]\npub enum ParseError {\n #[error('currently, only R, A, and 1 are supported multiplicities of tags')]\n UnsupportedTagLength,\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/annotate_dgidb.rs","use anyhow::Result;\nuse itertools::Itertools;\nuse regex::Regex;\nuse rust_htslib::bcf;\nuse rust_htslib::bcf::{Format, Read};\nuse serde::{Deserialize, Serialize};\nuse std::collections::{HashMap, HashSet};\nuse std::path::Path;\nuse std::str;\n\n#[derive(Serialize, Deserialize, Debug)]\nstruct Dgidb {\n #[serde(rename = 'matchedTerms')]\n matched_terms: Vec,\n}\n\n#[derive(Serialize, Deserialize, Debug)]\nstruct MatchedTerm {\n #[serde(rename = 'geneName')]\n gene_name: String,\n interactions: Vec,\n}\n\n#[derive(Serialize, Deserialize, Debug)]\nstruct Interaction {\n #[serde(rename = 'drugName')]\n drug_name: String,\n #[serde(rename = 'interactionTypes')]\n interaction_types: Vec,\n}\n\npub fn annotate_dgidb, T: AsRef>(\n vcf_path: P,\n api_path: String,\n field_name: &str,\n datasources: Option<&[T]>,\n genes_per_request: usize,\n) -> Result<()> {\n let datasources = datasources.map(|d| d.iter().map(|s| s.as_ref()).collect());\n let gene_drug_interactions =\n request_interaction_drugs(vcf_path.as_ref(), api_path, datasources, genes_per_request)?;\n modify_vcf_entries(vcf_path.as_ref(), gene_drug_interactions, field_name)\n}\n\ntype Interactions = HashMap)>>;\n\nfn request_interaction_drugs>(\n vcf_path: P,\n api_path: String,\n datasources_opt: Option>,\n genes_per_request: usize,\n) -> Result> {\n let mut genes = collect_genes(vcf_path)?;\n let datasources = if let Some(entries) = datasources_opt {\n let mut b = String::from('&interaction_sources=');\n b.push_str(entries.join(',').as_str());\n b\n } else {\n String::new()\n };\n if genes.is_empty() {\n return Ok(None);\n }\n let mut gene_drug_interactions: HashMap)>> = HashMap::new();\n for gene_slice in genes.drain().collect_vec().chunks(genes_per_request) {\n let mut slice_api_path = api_path.clone();\n slice_api_path.push_str(gene_slice.join(',').as_str());\n slice_api_path.push_str(datasources.as_str());\n let res: Dgidb = reqwest::get(&slice_api_path)?.json()?;\n\n for term in res.matched_terms {\n if !term.interactions.is_empty() {\n gene_drug_interactions.insert(\n term.gene_name,\n term.interactions\n .iter()\n .map(|interaction| {\n (\n interaction.drug_name.clone(),\n interaction.interaction_types.clone(),\n )\n })\n .collect(),\n );\n }\n }\n }\n Ok(Some(gene_drug_interactions))\n}\n\nfn collect_genes>(vcf_path: P) -> Result> {\n let mut total_genes = HashSet::new();\n let mut reader = bcf::Reader::from_path(vcf_path)?;\n for result in reader.records() {\n let mut rec = result?;\n let genes_opt = extract_genes(&mut rec)?;\n if let Some(genes) = genes_opt {\n for gene in genes {\n total_genes.insert(gene);\n }\n }\n }\n Ok(total_genes)\n}\n\nfn extract_genes(rec: &mut bcf::Record) -> Result + '_>> {\n let annotation = rec.info(b'ANN').string()?;\n match annotation {\n Some(transcripts) => Ok(Some(transcripts.clone().into_iter().map(|transcript| {\n str::from_utf8(transcript.split(|c| *c == b'|').nth(3).unwrap())\n .unwrap()\n .to_owned()\n }))),\n None => Ok(None),\n }\n}\n\nfn modify_vcf_entries>(\n vcf_path: P,\n gene_drug_interactions_opt: Option,\n field_name: &str,\n) -> Result<()> {\n let mut reader = bcf::Reader::from_path(vcf_path)?;\n let mut header = bcf::header::Header::from_template(reader.header());\n header.push_record(format!('##INFO=', field_name).as_bytes());\n let mut writer = bcf::Writer::from_stdout(&header, true, Format::Bcf)?;\n match gene_drug_interactions_opt {\n None => {\n for result in reader.records() {\n let mut rec = result?;\n writer.translate(&mut rec);\n writer.write(&rec)?;\n }\n }\n Some(gene_drug_interactions) => {\n for result in reader.records() {\n let mut rec = result?;\n writer.translate(&mut rec);\n let genes = extract_genes(&mut rec)?.map(|genes| genes.collect_vec());\n if let Some(mut genes) = genes {\n genes.sort();\n genes.dedup();\n let field_entries = build_dgidb_field(&gene_drug_interactions, genes);\n let field_entries: Vec<&[u8]> =\n field_entries.iter().map(|v| v.as_slice()).collect();\n rec.push_info_string(field_name.as_bytes(), &field_entries[..])?;\n }\n writer.write(&rec)?;\n }\n }\n }\n Ok(())\n}\n\nfn build_dgidb_field(\n gene_drug_interactions: &HashMap)>>,\n genes: Vec,\n) -> Vec> {\n let mut field_entries: Vec> = Vec::new();\n let re = Regex::new(r'\s\(\w+\)').unwrap();\n for gene in genes.iter() {\n match gene_drug_interactions.get(gene) {\n Some(drug_interactions) => {\n drug_interactions\n .iter()\n .for_each(|(drug, interaction_types)| {\n if !interaction_types.is_empty() {\n interaction_types.iter().for_each(|interaction_type| {\n field_entries.push(\n format!(\n '{g}|{d}|{t}',\n g = gene,\n d = re.replace(drug, ''),\n t = interaction_type\n )\n .as_bytes()\n .to_vec(),\n )\n })\n } else {\n field_entries.push(\n format!('{g}|{d}|.', g = gene, d = re.replace(drug, ''))\n .as_bytes()\n .to_vec(),\n )\n }\n });\n }\n None => field_entries.push(format!('{g}|.|.', g = gene).as_bytes().to_vec()),\n }\n }\n field_entries\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/fix_iupac_alleles.rs","use anyhow::Result;\nuse bio::alphabets::dna::n_alphabet;\nuse itertools::Itertools;\nuse rust_htslib::bcf::{self, Format, Read};\n\npub fn fix_iupac_alleles() -> Result<()> {\n let mut inbcf = bcf::Reader::from_stdin()?;\n let mut outbcf = bcf::Writer::from_stdout(\n &bcf::Header::from_template(inbcf.header()),\n false,\n Format::Bcf,\n )?;\n let valid_alphabet = n_alphabet();\n\n for res in inbcf.records() {\n let mut rec = res?;\n\n let alleles = rec.alleles();\n if !alleles.iter().all(|allele| valid_alphabet.is_word(*allele)) {\n let fixed = alleles\n .into_iter()\n .map(|allele| {\n allele\n .iter()\n .map(|base| {\n if valid_alphabet.is_word(&[*base]) {\n *base\n } else {\n b'N'\n }\n })\n .collect_vec()\n })\n .collect_vec();\n\n rec.set_alleles(&fixed.iter().map(|allele| allele.as_slice()).collect_vec())?;\n }\n\n outbcf.write(&rec)?;\n }\n\n Ok(())\n}\n","mit" "rust-bio-tools","./rust-bio-tools/src/bcf/baf.rs","//! Compute the B-allele frequencies for a given VCF file.\n//!\n//! ## Usage:\n//! ```bash\n//! $ rbt vcf-baf < tests/test-freebayes.vcf > tests/baf.\n//! ```\n//!\nuse anyhow::Result;\nuse itertools::repeat_n;\nuse itertools::Itertools;\nuse rust_htslib::bcf;\nuse rust_htslib::bcf::record::Numeric;\nuse rust_htslib::bcf::{Format, Read};\nuse std::f32;\n\npub fn calculate_baf() -> Result<()> {\n let mut reader = bcf::Reader::from_stdin()?;\n\n let mut header = bcf::Header::from_template(reader.header());\n header.push_record(b'##FORMAT=');\n\n let mut writer = bcf::Writer::from_stdout(&header, false, Format::Bcf)?;\n\n for record in reader.records() {\n let mut record = record?;\n\n let allele_lens = record.alleles().iter().map(|a| a.len()).collect_vec();\n let mut bafs = Vec::new();\n {\n let ref_depths = record\n .format(b'RO')\n .integer()?\n .clone()\n .into_iter()\n .map(|d| d.to_owned())\n .collect_vec();\n let alt_depths = record.format(b'AO').integer()?;\n\n for (sample_ref_depth, sample_alt_depth) in ref_depths.iter().zip(alt_depths.iter()) {\n if allele_lens[0] != 1 || sample_ref_depth[0].is_missing() {\n bafs.extend(repeat_n(f32::missing(), allele_lens.len() - 1));\n } else {\n let total_depth = sample_ref_depth[0] + sample_alt_depth.iter().sum::();\n bafs.extend(allele_lens[1..].iter().zip(sample_alt_depth.iter()).map(\n |(alen, d)| {\n if *alen == 1 {\n *d as f32 / total_depth as f32\n } else {\n f32::missing()\n }\n },\n ));\n };\n }\n }\n writer.translate(&mut record);\n record.push_format_float(b'BAF', &bafs)?;\n writer.write(&record)?;\n }\n\n Ok(())\n}\n","mit"