|
import gzip |
|
import logging |
|
import os |
|
import re |
|
import shutil |
|
import ssl |
|
import urllib |
|
from abc import ABC, abstractmethod |
|
from pathlib import Path |
|
from typing import List |
|
|
|
import datasets |
|
import pandas as pd |
|
from datasets import DatasetInfo |
|
from pyfaidx import Fasta |
|
from tqdm import tqdm |
|
|
|
ssl._create_default_https_context = ssl._create_unverified_context |
|
|
|
""" |
|
-------------------------------------------------------------------------------------------- |
|
Reference Genome URLS: |
|
------------------------------------------------------------------------------------------- |
|
""" |
|
H38_REFERENCE_GENOME_URL = ( |
|
"https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/" "hg38.fa.gz" |
|
) |
|
|
|
""" |
|
-------------------------------------------------------------------------------------------- |
|
Task Specific Handlers: |
|
------------------------------------------------------------------------------------------- |
|
""" |
|
|
|
logger = logging.getLogger("multi_omics_transcript_expression") |
|
logger.setLevel("INFO") |
|
|
|
LABELS_V1 = [ |
|
"Adipose Tissue", |
|
"Adrenal Gland", |
|
"Bladder", |
|
"Blood", |
|
"Blood Vessel", |
|
"Brain", |
|
"Breast", |
|
"Cervix Uteri", |
|
"Colon", |
|
"Esophagus", |
|
"Fallopian Tube", |
|
"Heart", |
|
"Kidney", |
|
"Liver", |
|
"Lung", |
|
"Muscle", |
|
"Nerve", |
|
"Ovary", |
|
"Pancreas", |
|
"Pituitary", |
|
"Prostate", |
|
"Salivary Gland", |
|
"Skin", |
|
"Small Intestine", |
|
"Spleen", |
|
"Stomach", |
|
"Testis", |
|
"Thyroid", |
|
"Uterus", |
|
"Vagina", |
|
] |
|
|
|
LABELS_V2 = [ |
|
"Adipose_Subcutaneous", |
|
"Adipose_Visceral (Omentum)", |
|
"Adrenal Gland", |
|
"Artery_Aorta", |
|
"Artery_Coronary", |
|
"Artery_Tibial", |
|
"Bladder", |
|
"Brain_Amygdala", |
|
"Brain_Anterior cingulate cortex (BA24)", |
|
"Brain_Caudate (basal ganglia)", |
|
"Brain_Cerebellar Hemisphere", |
|
"Brain_Cerebellum", |
|
"Brain_Cortex", |
|
"Brain_Frontal Cortex (BA9)", |
|
"Brain_Hippocampus", |
|
"Brain_Hypothalamus", |
|
"Brain_Nucleus accumbens (basal ganglia)", |
|
"Brain_Putamen (basal ganglia)", |
|
"Brain_Spinal cord (cervical c-1)", |
|
"Brain_Substantia nigra", |
|
"Breast_Mammary Tissue", |
|
"Cells_Cultured fibroblasts", |
|
"Cells_EBV-transformed lymphocytes", |
|
"Cervix_Ectocervix", |
|
"Cervix_Endocervix", |
|
"Colon_Sigmoid", |
|
"Colon_Transverse", |
|
"Esophagus_Gastroesophageal Junction", |
|
"Esophagus_Mucosa", |
|
"Esophagus_Muscularis", |
|
"Fallopian Tube", |
|
"Heart_Atrial Appendage", |
|
"Heart_Left Ventricle", |
|
"Kidney_Cortex", |
|
"Kidney_Medulla", |
|
"Liver", |
|
"Lung", |
|
"Minor Salivary Gland", |
|
"Muscle_Skeletal", |
|
"Nerve_Tibial", |
|
"Ovary", |
|
"Pancreas", |
|
"Pituitary", |
|
"Prostate", |
|
"Skin_Not Sun Exposed (Suprapubic)", |
|
"Skin_Sun Exposed (Lower leg)", |
|
"Small Intestine_Terminal Ileum", |
|
"Spleen", |
|
"Stomach", |
|
"Testis", |
|
"Thyroid", |
|
"Uterus", |
|
"Vagina", |
|
"Whole Blood", |
|
] |
|
|
|
|
|
LABELS_LIGHT = [ |
|
"Adipose Tissue", |
|
"Brain", |
|
"Heart", |
|
"Liver", |
|
"Lung", |
|
"Muscle", |
|
"Pancreas", |
|
"Skin", |
|
] |
|
|
|
class GenomicLRATaskHandler(ABC): |
|
""" |
|
Abstract method for the Genomic LRA task handlers. Each handler |
|
""" |
|
|
|
@abstractmethod |
|
def __init__(self, **kwargs): |
|
pass |
|
|
|
@abstractmethod |
|
def get_info(self, description: str) -> DatasetInfo: |
|
""" |
|
Returns the DatasetInfo for the task |
|
""" |
|
pass |
|
|
|
def split_generators( |
|
self, dl_manager, cache_dir_root |
|
) -> List[datasets.SplitGenerator]: |
|
""" |
|
Downloads required files using dl_manager and separates them by split. |
|
""" |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"handler": self, "split": "train"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"handler": self, "split": "test"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"handler": self, "split": "test"}, |
|
), |
|
] |
|
|
|
@abstractmethod |
|
def generate_examples(self, split): |
|
""" |
|
A generator that yields examples for the specified split. |
|
""" |
|
pass |
|
|
|
@staticmethod |
|
def hook(t): |
|
last_b = [0] |
|
|
|
def inner(b=1, bsize=1, tsize=None): |
|
""" |
|
b : int, optional |
|
Number of blocks just transferred [default: 1]. |
|
bsize : int, optional |
|
Size of each block (in tqdm units) [default: 1]. |
|
tsize : int, optional |
|
Total size (in tqdm units). If [default: None] remains unchanged. |
|
""" |
|
if tsize is not None: |
|
t.total = tsize |
|
t.update((b - last_b[0]) * bsize) |
|
last_b[0] = b |
|
|
|
return inner |
|
|
|
def download_and_extract_gz(self, file_url, cache_dir_root): |
|
""" |
|
Downloads and extracts a gz file into the given cache directory. Returns the full file path |
|
of the extracted gz file. |
|
Args: |
|
file_url: url of the gz file to be downloaded and extracted. |
|
cache_dir_root: Directory to extract file into. |
|
""" |
|
file_fname = Path(file_url).stem |
|
file_complete_path = os.path.join(cache_dir_root, "downloads", file_fname) |
|
|
|
if not os.path.exists(file_complete_path): |
|
if not os.path.exists(file_complete_path + ".gz"): |
|
os.makedirs(os.path.dirname(file_complete_path), exist_ok=True) |
|
with tqdm( |
|
unit="B", |
|
unit_scale=True, |
|
unit_divisor=1024, |
|
miniters=1, |
|
desc=file_url.split("/")[-1], |
|
) as t: |
|
urllib.request.urlretrieve( |
|
file_url, file_complete_path + ".gz", reporthook=self.hook(t) |
|
) |
|
with gzip.open(file_complete_path + ".gz", "rb") as file_in: |
|
with open(file_complete_path, "wb") as file_out: |
|
shutil.copyfileobj(file_in, file_out) |
|
return file_complete_path |
|
|
|
|
|
class TranscriptExpressionHandler(GenomicLRATaskHandler): |
|
""" |
|
Handler for the Transcript Expression task. |
|
""" |
|
|
|
DEFAULT_LENGTH = 200_000 |
|
DEFAULT_FILTER_OUT_LENGTH = 196_608 |
|
|
|
def __init__( |
|
self, |
|
sequence_length: int = DEFAULT_LENGTH, |
|
filter_out_sequence_length: int = DEFAULT_FILTER_OUT_LENGTH, |
|
expression_method: str = "read_counts_old", |
|
light_version: bool = False, |
|
**kwargs, |
|
): |
|
""" |
|
Creates a new handler for the Transcript Expression Prediction Task. |
|
Args: |
|
sequence_length: Length of the sequence around the TSS_CAGE start site |
|
light_version: If True, uses a smaller subset of tissues and fewer samples |
|
""" |
|
self.reference_genome = None |
|
self.coordinate_csv_file = None |
|
self.labels_csv_file = None |
|
self.light_version = light_version |
|
self.sequence_length = sequence_length |
|
self.filter_out_sequence_length = filter_out_sequence_length |
|
|
|
if self.filter_out_sequence_length is not None: |
|
assert isinstance(self.filter_out_sequence_length, int) |
|
assert ( |
|
self.sequence_length <= self.filter_out_sequence_length |
|
), f"{self.sequence_length=} > {self.filter_out_sequence_length=}" |
|
assert isinstance(self.sequence_length, int) |
|
|
|
def get_info(self, description: str) -> DatasetInfo: |
|
""" |
|
Returns the DatasetInfor for the Transcript Expression dataset. Each example |
|
includes a genomic sequence and a list of label values. |
|
""" |
|
features = datasets.Features( |
|
{ |
|
|
|
"DNA": datasets.Value("string"), |
|
|
|
"labels": datasets.Sequence(datasets.Value("float32")), |
|
"m_t": datasets.Sequence(datasets.Value("float32")), |
|
"sigma_t": datasets.Sequence(datasets.Value("float32")), |
|
"m_g": datasets.Sequence(datasets.Value("float32")), |
|
"sigma_g": datasets.Sequence(datasets.Value("float32")), |
|
"labels_name": datasets.Sequence(datasets.Value("string")), |
|
|
|
"chromosome": datasets.Value(dtype="string"), |
|
"RNA": datasets.Value("string"), |
|
"five_prime_utr": datasets.Value("string"), |
|
"coding_sequence": datasets.Value("string"), |
|
"three_prime_utr": datasets.Value("string"), |
|
"Protein": datasets.Value("string"), |
|
"transcript_id": datasets.Value("string"), |
|
"gene_id": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=description, |
|
features=features, |
|
) |
|
|
|
def split_generators(self, dl_manager, cache_dir_root): |
|
""" |
|
Separates files by split and stores filenames in instance variables. |
|
The Transcript Expression dataset requires the reference hg19 genome, coordinate |
|
csv file,and label csv file to be saved. |
|
""" |
|
|
|
reference_genome_file = self.download_and_extract_gz( |
|
H38_REFERENCE_GENOME_URL, cache_dir_root |
|
) |
|
self.reference_genome = Fasta(reference_genome_file, one_based_attributes=False) |
|
|
|
self.df_csv_file = dl_manager.download_and_extract( |
|
"transcript_expression/GTEx_final.csv" |
|
) |
|
self.normalization_values_csv_file = dl_manager.download_and_extract( |
|
"transcript_expression/normalization_values.csv" |
|
) |
|
|
|
return super().split_generators(dl_manager, cache_dir_root) |
|
|
|
def generate_examples(self, split): |
|
""" |
|
A generator which produces examples for the given split, each with a sequence |
|
and the corresponding labels. The sequences are padded to the correct sequence |
|
length and standardized before returning. |
|
""" |
|
df = pd.read_csv(self.df_csv_file) |
|
df = df.loc[df["chr"] != "chrMT"] |
|
|
|
|
|
labels_name = LABELS_LIGHT if self.light_version else LABELS_V1 |
|
|
|
split_df = df.loc[df["split"] == split] |
|
|
|
|
|
if self.light_version: |
|
split_df = split_df.sample(n=min(1000, len(split_df)), random_state=42) |
|
|
|
norm_values_df = pd.read_csv(self.normalization_values_csv_file) |
|
|
|
|
|
label_columns = [f"m_t_{tissue}" for tissue in labels_name] |
|
m_t = norm_values_df[label_columns].to_numpy().reshape(-1) |
|
|
|
label_columns = [f"sigma_t_{tissue}" for tissue in labels_name] |
|
sigma_t = norm_values_df[label_columns].to_numpy().reshape(-1) |
|
|
|
label_columns = [f"m_g_{tissue}" for tissue in labels_name] |
|
m_g = norm_values_df[label_columns].to_numpy().reshape(-1) |
|
|
|
label_columns = [f"sigma_g_{tissue}" for tissue in labels_name] |
|
sigma_g = norm_values_df[label_columns].to_numpy().reshape(-1) |
|
|
|
key = 0 |
|
for idx, coordinates_row in split_df.iterrows(): |
|
negative_strand = coordinates_row["strand"] == "-" |
|
|
|
if negative_strand: |
|
start = coordinates_row["end"] - 1 |
|
else: |
|
start = coordinates_row["start"] - 1 |
|
|
|
chromosome = coordinates_row["chr"] |
|
labels_row = coordinates_row[labels_name] |
|
padded_sequence = pad_sequence( |
|
chromosome=self.reference_genome[chromosome], |
|
start=start, |
|
sequence_length=self.sequence_length, |
|
negative_strand=negative_strand, |
|
filter_out_sequence_length=self.filter_out_sequence_length, |
|
) |
|
if padded_sequence: |
|
yield key, { |
|
"transcript_id": coordinates_row["transcript_id_gtex"], |
|
"gene_id": coordinates_row["gene_id_gtex"], |
|
"labels_name": labels_name, |
|
"labels": labels_row.to_numpy(), |
|
"m_t": m_t, |
|
"sigma_t": sigma_t, |
|
"m_g": m_g, |
|
"sigma_g": sigma_g, |
|
"DNA": standardize_sequence(padded_sequence), |
|
"chromosome": re.sub("chr", "", chromosome), |
|
"RNA": coordinates_row["RNA"], |
|
"five_prime_utr": coordinates_row["5UTR"], |
|
"coding_sequence": coordinates_row["CDS"], |
|
"three_prime_utr": coordinates_row["3UTR"], |
|
"Protein": coordinates_row["Protein"], |
|
} |
|
key += 1 |
|
logger.info(f"filtering out {len(split_df)-key} " f"elements from the dataset") |
|
|
|
|
|
""" |
|
-------------------------------------------------------------------------------------------- |
|
Dataset loader: |
|
------------------------------------------------------------------------------------------- |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
Dataset for benchmark of genomic deep learning models. |
|
""" |
|
|
|
|
|
|
|
class GenomicsLRAConfig(datasets.BuilderConfig): |
|
""" |
|
BuilderConfig. |
|
""" |
|
|
|
def __init__(self, *args, **kwargs): |
|
"""BuilderConfig for the location tasks dataset. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__() |
|
self.handler = TranscriptExpressionHandler(**kwargs) |
|
|
|
|
|
|
|
class GenomicsLRATasks(datasets.GeneratorBasedBuilder): |
|
""" |
|
Tasks to annotate human genome. |
|
""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
BUILDER_CONFIG_CLASS = GenomicsLRAConfig |
|
|
|
def _info(self) -> DatasetInfo: |
|
return self.config.handler.get_info(description=_DESCRIPTION) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
""" |
|
Downloads data files and organizes it into train/test/val splits |
|
""" |
|
return self.config.handler.split_generators(dl_manager, self._cache_dir_root) |
|
|
|
def _generate_examples(self, handler, split): |
|
""" |
|
Read data files and create examples(yield) |
|
Args: |
|
handler: The handler for the current task |
|
split: A string in ['train', 'test', 'valid'] |
|
""" |
|
yield from handler.generate_examples(split) |
|
|
|
|
|
""" |
|
-------------------------------------------------------------------------------------------- |
|
Global Utils: |
|
------------------------------------------------------------------------------------------- |
|
""" |
|
|
|
|
|
def standardize_sequence(sequence: str): |
|
""" |
|
Standardizes the sequence by replacing all unknown characters with N and |
|
converting to all uppercase. |
|
Args: |
|
sequence: genomic sequence to standardize |
|
""" |
|
pattern = "[^ATCG]" |
|
|
|
sequence = sequence.upper() |
|
|
|
sequence = re.sub(pattern, "N", sequence) |
|
return sequence |
|
|
|
|
|
def pad_sequence( |
|
chromosome, |
|
start, |
|
sequence_length, |
|
negative_strand=False, |
|
filter_out_sequence_length=None, |
|
): |
|
""" |
|
Extends a given sequence to length sequence_length. If |
|
padding to the given length is outside the gene, returns |
|
None. |
|
Args: |
|
chromosome: Chromosome from pyfaidx extracted Fasta. |
|
start: Start index of original sequence. |
|
sequence_length: Desired sequence length. If sequence length is odd, the |
|
remainder is added to the end of the sequence. |
|
end: End index of original sequence. If no end is specified, it creates a |
|
centered sequence around the start index. |
|
negative_strand: If negative_strand, returns the reverse compliment of the sequence |
|
""" |
|
|
|
pad = sequence_length // 2 |
|
end = start + pad + (sequence_length % 2) |
|
start = start - pad |
|
|
|
if filter_out_sequence_length is not None: |
|
filter_out_pad = filter_out_sequence_length // 2 |
|
filter_out_end = start + filter_out_pad + (filter_out_sequence_length % 2) |
|
filter_out_start = start - filter_out_pad |
|
|
|
if filter_out_start < 0 or filter_out_end >= len(chromosome): |
|
return |
|
|
|
if start < 0 or end >= len(chromosome): |
|
return |
|
|
|
if negative_strand: |
|
return chromosome[start:end].reverse.complement.seq |
|
return chromosome[start:end].seq |