Commit
·
dfced34
1
Parent(s):
3e877aa
first commit
Browse files
.gitattributes
CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
transcript_expression/expression_values_v1.csv filter=lfs diff=lfs merge=lfs -text
|
57 |
+
transcript_expression/expression_values_v2.csv filter=lfs diff=lfs merge=lfs -text
|
58 |
+
transcript_expression/transcript_coordinates.csv filter=lfs diff=lfs merge=lfs -text
|
multi_omics_transcript_expression.py
ADDED
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
import gzip
|
5 |
+
import os
|
6 |
+
import pandas as pd
|
7 |
+
import re
|
8 |
+
import shutil
|
9 |
+
import urllib
|
10 |
+
from abc import ABC, abstractmethod
|
11 |
+
from datasets import DatasetInfo
|
12 |
+
from pathlib import Path
|
13 |
+
from pyfaidx import Fasta
|
14 |
+
from tqdm import tqdm
|
15 |
+
from typing import List
|
16 |
+
|
17 |
+
"""
|
18 |
+
--------------------------------------------------------------------------------------------
|
19 |
+
Reference Genome URLS:
|
20 |
+
-------------------------------------------------------------------------------------------
|
21 |
+
"""
|
22 |
+
H38_REFERENCE_GENOME_URL = (
|
23 |
+
"https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/" "hg38.fa.gz"
|
24 |
+
)
|
25 |
+
|
26 |
+
"""
|
27 |
+
--------------------------------------------------------------------------------------------
|
28 |
+
Task Specific Handlers:
|
29 |
+
-------------------------------------------------------------------------------------------
|
30 |
+
"""
|
31 |
+
|
32 |
+
logger = logging.getLogger("multi_omics_bulk_rna")
|
33 |
+
logger.setLevel("INFO")
|
34 |
+
|
35 |
+
|
36 |
+
class GenomicLRATaskHandler(ABC):
|
37 |
+
"""
|
38 |
+
Abstract method for the Genomic LRA task handlers. Each handler
|
39 |
+
"""
|
40 |
+
|
41 |
+
@abstractmethod
|
42 |
+
def __init__(self, **kwargs):
|
43 |
+
pass
|
44 |
+
|
45 |
+
@abstractmethod
|
46 |
+
def get_info(self, description: str) -> DatasetInfo:
|
47 |
+
"""
|
48 |
+
Returns the DatasetInfo for the task
|
49 |
+
"""
|
50 |
+
pass
|
51 |
+
|
52 |
+
def split_generators(
|
53 |
+
self, dl_manager, cache_dir_root
|
54 |
+
) -> List[datasets.SplitGenerator]:
|
55 |
+
"""
|
56 |
+
Downloads required files using dl_manager and separates them by split.
|
57 |
+
"""
|
58 |
+
return [
|
59 |
+
datasets.SplitGenerator(
|
60 |
+
name=datasets.Split.TRAIN,
|
61 |
+
gen_kwargs={"handler": self, "split": "train"},
|
62 |
+
),
|
63 |
+
datasets.SplitGenerator(
|
64 |
+
name=datasets.Split.TEST, gen_kwargs={"handler": self, "split": "test"}
|
65 |
+
),
|
66 |
+
]
|
67 |
+
|
68 |
+
@abstractmethod
|
69 |
+
def generate_examples(self, split):
|
70 |
+
"""
|
71 |
+
A generator that yields examples for the specified split.
|
72 |
+
"""
|
73 |
+
pass
|
74 |
+
|
75 |
+
@staticmethod
|
76 |
+
def hook(t):
|
77 |
+
last_b = [0]
|
78 |
+
|
79 |
+
def inner(b=1, bsize=1, tsize=None):
|
80 |
+
"""
|
81 |
+
b : int, optional
|
82 |
+
Number of blocks just transferred [default: 1].
|
83 |
+
bsize : int, optional
|
84 |
+
Size of each block (in tqdm units) [default: 1].
|
85 |
+
tsize : int, optional
|
86 |
+
Total size (in tqdm units). If [default: None] remains unchanged.
|
87 |
+
"""
|
88 |
+
if tsize is not None:
|
89 |
+
t.total = tsize
|
90 |
+
t.update((b - last_b[0]) * bsize)
|
91 |
+
last_b[0] = b
|
92 |
+
|
93 |
+
return inner
|
94 |
+
|
95 |
+
def download_and_extract_gz(self, file_url, cache_dir_root):
|
96 |
+
"""
|
97 |
+
Downloads and extracts a gz file into the given cache directory. Returns the full file path
|
98 |
+
of the extracted gz file.
|
99 |
+
Args:
|
100 |
+
file_url: url of the gz file to be downloaded and extracted.
|
101 |
+
cache_dir_root: Directory to extract file into.
|
102 |
+
"""
|
103 |
+
file_fname = Path(file_url).stem
|
104 |
+
file_complete_path = os.path.join(cache_dir_root, "downloads", file_fname)
|
105 |
+
|
106 |
+
if not os.path.exists(file_complete_path):
|
107 |
+
if not os.path.exists(file_complete_path + ".gz"):
|
108 |
+
with tqdm(
|
109 |
+
unit="B",
|
110 |
+
unit_scale=True,
|
111 |
+
unit_divisor=1024,
|
112 |
+
miniters=1,
|
113 |
+
desc=file_url.split("/")[-1],
|
114 |
+
) as t:
|
115 |
+
urllib.request.urlretrieve(
|
116 |
+
file_url, file_complete_path + ".gz", reporthook=self.hook(t)
|
117 |
+
)
|
118 |
+
with gzip.open(file_complete_path + ".gz", "rb") as file_in:
|
119 |
+
with open(file_complete_path, "wb") as file_out:
|
120 |
+
shutil.copyfileobj(file_in, file_out)
|
121 |
+
return file_complete_path
|
122 |
+
|
123 |
+
|
124 |
+
class TranscriptExpressionHandler(GenomicLRATaskHandler):
|
125 |
+
"""
|
126 |
+
Handler for the Bulk RNA Expression task.
|
127 |
+
"""
|
128 |
+
|
129 |
+
DEFAULT_LENGTH = 200000
|
130 |
+
DEFAULT_FILTER_OUT_LENGTH = 196608
|
131 |
+
|
132 |
+
def __init__(
|
133 |
+
self,
|
134 |
+
sequence_length: int = DEFAULT_LENGTH,
|
135 |
+
filter_out_sequence_length: int = DEFAULT_FILTER_OUT_LENGTH,
|
136 |
+
**kwargs,
|
137 |
+
):
|
138 |
+
"""
|
139 |
+
Creates a new handler for the Bulk RNA Expression Prediction Task.
|
140 |
+
Args:
|
141 |
+
sequence_length: Length of the sequence around the TSS_CAGE start site
|
142 |
+
Instance Vars:
|
143 |
+
reference_genome: The Fasta extracted reference genome.
|
144 |
+
coordinate_csv_file: The csv file that stores the coordinates and filename of the target
|
145 |
+
labels_csv_file: The csv file that stores the labels with one sample per row.
|
146 |
+
sequence_length: Sequence length for this handler.
|
147 |
+
"""
|
148 |
+
self.reference_genome = None
|
149 |
+
self.coordinate_csv_file = None
|
150 |
+
self.labels_csv_file = None
|
151 |
+
self.sequence_length = sequence_length
|
152 |
+
self.filter_out_sequence_length = filter_out_sequence_length
|
153 |
+
|
154 |
+
if filter_out_sequence_length is not None:
|
155 |
+
assert isinstance(filter_out_sequence_length, int)
|
156 |
+
assert (
|
157 |
+
sequence_length <= filter_out_sequence_length
|
158 |
+
), f"{sequence_length=} > {filter_out_sequence_length=}"
|
159 |
+
assert isinstance(sequence_length, int)
|
160 |
+
|
161 |
+
def get_info(self, description: str) -> DatasetInfo:
|
162 |
+
"""
|
163 |
+
Returns the DatasetInfor for the Bulk RNA Expression dataset. Each example
|
164 |
+
includes a genomic sequence and a list of label values.
|
165 |
+
"""
|
166 |
+
features = datasets.Features(
|
167 |
+
{
|
168 |
+
# DNA sequence
|
169 |
+
"DNA": datasets.Value("string"),
|
170 |
+
# list of expression values in each tissue
|
171 |
+
"labels": datasets.Sequence(datasets.Value("float32")),
|
172 |
+
"labels_name": datasets.Sequence(datasets.Value("string")),
|
173 |
+
# chromosome number
|
174 |
+
"chromosome": datasets.Value(dtype="string"),
|
175 |
+
"RNA": datasets.Value("string"),
|
176 |
+
"Protein": datasets.Value("string"),
|
177 |
+
}
|
178 |
+
)
|
179 |
+
return datasets.DatasetInfo(
|
180 |
+
# This is the description that will appear on the datasets page.
|
181 |
+
description=description,
|
182 |
+
# This defines the different columns of the dataset and their types
|
183 |
+
features=features,
|
184 |
+
)
|
185 |
+
|
186 |
+
def split_generators(self, dl_manager, cache_dir_root):
|
187 |
+
"""
|
188 |
+
Separates files by split and stores filenames in instance variables.
|
189 |
+
The Bulk RNA Expression dataset requires the reference hg19 genome, coordinate
|
190 |
+
csv file,and label csv file to be saved.
|
191 |
+
"""
|
192 |
+
# Manually download the reference genome since there are difficulties when streaming
|
193 |
+
reference_genome_file = self.download_and_extract_gz(
|
194 |
+
H38_REFERENCE_GENOME_URL, cache_dir_root
|
195 |
+
)
|
196 |
+
self.reference_genome = Fasta(reference_genome_file, one_based_attributes=False)
|
197 |
+
|
198 |
+
self.coordinate_csv_file = dl_manager.download_and_extract(
|
199 |
+
"bulk_rna_expression/transcript_coordinates.csv"
|
200 |
+
)
|
201 |
+
|
202 |
+
self.labels_csv_file = dl_manager.download_and_extract(
|
203 |
+
"bulk_rna_expression/rna_expression_values.csv"
|
204 |
+
)
|
205 |
+
|
206 |
+
return super().split_generators(dl_manager, cache_dir_root)
|
207 |
+
|
208 |
+
def generate_examples(self, split):
|
209 |
+
"""
|
210 |
+
A generator which produces examples for the given split, each with a sequence
|
211 |
+
and the corresponding labels. The sequences are padded to the correct sequence
|
212 |
+
length and standardized before returning.
|
213 |
+
"""
|
214 |
+
coordinates_df = pd.read_csv(self.coordinate_csv_file)
|
215 |
+
labels_name = coordinates_df.columns[2:]
|
216 |
+
coordinates_split_df = coordinates_df[coordinates_df["split"] == split]
|
217 |
+
|
218 |
+
key = 0
|
219 |
+
for idx, coordinates_row in coordinates_split_df.iterrows():
|
220 |
+
start = (
|
221 |
+
coordinates_row["position"] - 1
|
222 |
+
) # -1 since vcf coords are 1-based
|
223 |
+
|
224 |
+
chromosome = coordinates_row["chr"]
|
225 |
+
labels_row = coordinates_row.loc[idx].values[2:]
|
226 |
+
padded_sequence = pad_sequence(
|
227 |
+
chromosome=self.reference_genome[chromosome],
|
228 |
+
start=start,
|
229 |
+
sequence_length=self.sequence_length,
|
230 |
+
negative_strand=coordinates_row["strand"] == "-",
|
231 |
+
filter_out_sequence_length=self.filter_out_sequence_length,
|
232 |
+
)
|
233 |
+
if padded_sequence:
|
234 |
+
yield key, {
|
235 |
+
"labels_name": labels_name,
|
236 |
+
"labels": labels_row,
|
237 |
+
"DNA": standardize_sequence(padded_sequence),
|
238 |
+
"chromosome": re.sub("chr", "", chromosome),
|
239 |
+
"RNA": coordinates_row["RNA"],
|
240 |
+
"Protein": coordinates_row["Protein"],
|
241 |
+
}
|
242 |
+
key += 1
|
243 |
+
logger.info(
|
244 |
+
f"filtering out {len(coordinates_split_df)-key} "
|
245 |
+
f"elements from the dataset"
|
246 |
+
)
|
247 |
+
|
248 |
+
|
249 |
+
"""
|
250 |
+
--------------------------------------------------------------------------------------------
|
251 |
+
Dataset loader:
|
252 |
+
-------------------------------------------------------------------------------------------
|
253 |
+
"""
|
254 |
+
|
255 |
+
_DESCRIPTION = """
|
256 |
+
Dataset for benchmark of genomic deep learning models.
|
257 |
+
"""
|
258 |
+
|
259 |
+
|
260 |
+
|
261 |
+
# define dataset configs
|
262 |
+
class GenomicsLRAConfig(datasets.BuilderConfig):
|
263 |
+
"""
|
264 |
+
BuilderConfig.
|
265 |
+
"""
|
266 |
+
|
267 |
+
def __init__(self, *args, task_name: str, **kwargs): # type: ignore
|
268 |
+
"""BuilderConfig for the location tasks dataset.
|
269 |
+
Args:
|
270 |
+
**kwargs: keyword arguments forwarded to super.
|
271 |
+
"""
|
272 |
+
super().__init__()
|
273 |
+
self.handler = TranscriptExpressionHandler(**kwargs)
|
274 |
+
|
275 |
+
|
276 |
+
# DatasetBuilder
|
277 |
+
class GenomicsLRATasks(datasets.GeneratorBasedBuilder):
|
278 |
+
"""
|
279 |
+
Tasks to annotate human genome.
|
280 |
+
"""
|
281 |
+
|
282 |
+
VERSION = datasets.Version("1.1.0")
|
283 |
+
BUILDER_CONFIG_CLASS = GenomicsLRAConfig
|
284 |
+
|
285 |
+
def _info(self) -> DatasetInfo:
|
286 |
+
return self.config.handler.get_info(description=_DESCRIPTION)
|
287 |
+
|
288 |
+
def _split_generators(
|
289 |
+
self, dl_manager: datasets.DownloadManager
|
290 |
+
) -> List[datasets.SplitGenerator]:
|
291 |
+
"""
|
292 |
+
Downloads data files and organizes it into train/test/val splits
|
293 |
+
"""
|
294 |
+
return self.config.handler.split_generators(dl_manager, self._cache_dir_root)
|
295 |
+
|
296 |
+
def _generate_examples(self, handler, split):
|
297 |
+
"""
|
298 |
+
Read data files and create examples(yield)
|
299 |
+
Args:
|
300 |
+
handler: The handler for the current task
|
301 |
+
split: A string in ['train', 'test', 'valid']
|
302 |
+
"""
|
303 |
+
yield from handler.generate_examples(split)
|
304 |
+
|
305 |
+
|
306 |
+
"""
|
307 |
+
--------------------------------------------------------------------------------------------
|
308 |
+
Global Utils:
|
309 |
+
-------------------------------------------------------------------------------------------
|
310 |
+
"""
|
311 |
+
|
312 |
+
|
313 |
+
def standardize_sequence(sequence: str):
|
314 |
+
"""
|
315 |
+
Standardizes the sequence by replacing all unknown characters with N and
|
316 |
+
converting to all uppercase.
|
317 |
+
Args:
|
318 |
+
sequence: genomic sequence to standardize
|
319 |
+
"""
|
320 |
+
pattern = "[^ATCG]"
|
321 |
+
# all characters to upper case
|
322 |
+
sequence = sequence.upper()
|
323 |
+
# replace all characters that are not A,T,C,G with N
|
324 |
+
sequence = re.sub(pattern, "N", sequence)
|
325 |
+
return sequence
|
326 |
+
|
327 |
+
|
328 |
+
def pad_sequence(
|
329 |
+
chromosome,
|
330 |
+
start,
|
331 |
+
sequence_length,
|
332 |
+
negative_strand=False,
|
333 |
+
filter_out_sequence_length=None,
|
334 |
+
):
|
335 |
+
"""
|
336 |
+
Extends a given sequence to length sequence_length. If
|
337 |
+
padding to the given length is outside the gene, returns
|
338 |
+
None.
|
339 |
+
Args:
|
340 |
+
chromosome: Chromosome from pyfaidx extracted Fasta.
|
341 |
+
start: Start index of original sequence.
|
342 |
+
sequence_length: Desired sequence length. If sequence length is odd, the
|
343 |
+
remainder is added to the end of the sequence.
|
344 |
+
end: End index of original sequence. If no end is specified, it creates a
|
345 |
+
centered sequence around the start index.
|
346 |
+
negative_strand: If negative_strand, returns the reverse compliment of the sequence
|
347 |
+
"""
|
348 |
+
|
349 |
+
pad = sequence_length // 2
|
350 |
+
end = start + pad + (sequence_length % 2)
|
351 |
+
start = start - pad
|
352 |
+
|
353 |
+
if filter_out_sequence_length is not None:
|
354 |
+
filter_out_pad = filter_out_sequence_length // 2
|
355 |
+
filter_out_end = start + filter_out_pad + (filter_out_sequence_length % 2)
|
356 |
+
filter_out_start = start - filter_out_pad
|
357 |
+
|
358 |
+
if filter_out_start < 0 or filter_out_end >= len(chromosome):
|
359 |
+
return
|
360 |
+
|
361 |
+
if start < 0 or end >= len(chromosome):
|
362 |
+
return
|
363 |
+
|
364 |
+
if negative_strand:
|
365 |
+
return chromosome[start:end].reverse.complement.seq
|
366 |
+
return chromosome[start:end].seq
|
transcript_expression/expression_values_v1.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8928bee330ed539c90ad9b564f34cabead155e705f88d60495d480d023332578
|
3 |
+
size 111402901
|
transcript_expression/expression_values_v2.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:324538a478fa8a5ff441203888b27938551ae498b47c069867c983c1bfbd41f8
|
3 |
+
size 194364132
|
transcript_expression/transcript_coordinates.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:256d406ada1bea51a6cd42b8100de7e10d436e71f72fd2be5ac1f75e9baebe5f
|
3 |
+
size 6423912
|