content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import time def dt_to_timestamp(datetime_): """Convert :class:`datetime` to UNIX timestamp.""" return time.mktime(datetime_.timetuple())
7448f3576f4de3129bb9bae1c8f3a951b27f24c4
329,298
def compare_content(fpath1, fpath2): """Tell if the content of both fpaths are equal. This does not check modification times, just internal bytes. """ with open(fpath1, 'rb') as fh1: with open(fpath2, 'rb') as fh2: while True: data1 = fh1.read(65536) data2 = fh2.read(65536) if data1 != data2: return False if not data1: return True
52bc4a2dcde79bd09e8d175937805b944dc16487
499,058
import pathlib def get_directory_contents(path: pathlib.Path): """Iterates through a directory and returns a set of its contents.""" contents = set() for filename in path.glob('**/*'): # Remove the original parent directories to get just the relative path. contents.add(filename.relative_to(path)) return contents
a6e5e8bb2a4894de4b0ef54a1f2f727ec68c86a5
111,393
def stringify_sdg_number(sdg): """Converts integer to string and zero pads to 2 digits. Used for saving and loading individual goal data. Args: sdg (int): Typically 1 - 17 Returns: (str): e.g. '06' """ return str(sdg).zfill(2)
a2d854ca6265d28de34fcaf8a0a792f36ba203ae
444,279
from pathlib import Path def filename_for_plot(directory, zarr_path: str, qualifier: str = "") -> str: """Get a suitable output filename for a plot produced from a Zarr path""" basename = Path(zarr_path).stem.replace( ".zarr", "" ) # handle ".zarr" and ".zarr.zip" path = Path(directory, f"{basename}{qualifier}.png") return str(path)
8361b4fc037c3bf3a28da1c0f9101b1dbac5d6e7
569,760
def rnn_args_from_config(rnn_config): """ Takes a Config object corresponding to RNN settings (for example `config.algo.rnn` in BCConfig) and extracts rnn kwargs for instantiating rnn networks. """ return dict( rnn_hidden_dim=rnn_config.hidden_dim, rnn_num_layers=rnn_config.num_layers, rnn_type=rnn_config.rnn_type, rnn_kwargs=dict(rnn_config.kwargs), )
54cf542122036510c70fe7a53a47dd724880a912
697,242
def flatten_request_header(header): """ Transform a dict representing header parameters into a flat string of comma separated parameters suitable for inserting into the actual headers """ flattened_header = '' if isinstance(header, dict): contents = [] for content_key, content_val in header.items(): contents.append('%s="%s"' % (content_key, content_val)) flattened_header = ','.join(contents) else: flattened_header = str(header) return flattened_header
e6a3b97e502ab1afbb6c6eef4241ba45ec00e7ef
446,585
def add_slash(url): """Adds a trailing slash for consistency in urls.""" if not url.endswith('/'): url = url + '/' return url
8ac722da072ec0bf83efdb0a290f46195203bd42
454,624
def format_timedelta(timedelta_obj): """Helper function to format timedelta to a human-readable time string""" if timedelta_obj: timedelta_string = '%02d:%02d:%02d' % ( int(timedelta_obj.total_seconds() // 3600), (timedelta_obj.seconds // 60) % 60, timedelta_obj.seconds % 60 ) return timedelta_string return None
f10ddb22d682fcb6c1f406246f0819451b87a136
585,086
import six def as_bytes(bytes_or_text, encoding='utf-8'): """Converts bytes or unicode to `bytes`, using utf-8 encoding for text. # Arguments bytes_or_text: A `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for encoding unicode. # Returns A `bytes` object. # Raises TypeError: If `bytes_or_text` is not a binary or unicode string. """ if isinstance(bytes_or_text, six.text_type): return bytes_or_text.encode(encoding) elif isinstance(bytes_or_text, bytes): return bytes_or_text else: raise TypeError('Expected binary or unicode string, got %r' % (bytes_or_text,))
b973821db5060c6f8209642024ed71f023b40714
271,248
def mailverif(mail): """ Verifica si una cadena contiene un @. Es incorrecto si la @ está al final de la cadena o si existe más de una @ en el string. >>> mailverif("Alex@gmail.com") True >>> mailverif("Alex@gmail.@com") False >>> mailverif("Alexgmail.com") False >>> mailverif("Alexgmail.com@") False """ arroba = mail.count("@") if arroba!=1 or mail.rfind("@") == (len(mail) -1): return False else: return True
6c7ff162eda514bc4c7e7edd173ff9e5f9f552a4
335,852
import math def eoq(demand_in_units, cost_of_ordering, cost_of_carrying): """Return the Economic Order Quantity (EOQ) for a product. Args: demand_in_units (int): cost_of_ordering (float): cost_of_carrying (float): Returns: Economic Order Quantity or EOQ (float). """ return math.sqrt(((demand_in_units * cost_of_ordering) * 2) / cost_of_carrying)
97fe8880876bf785157303451a566acedbbfcc3a
363,196
def standard_error(sample_size, successes): """ Calculates the standard error of a sample proportion. Formula: σp = sqrt [ p(1 - p) / n ]. with: p = proportion of successes in sample (successes / sample size) :param sample_size: the size of the sample :param successes: the number of successes on the given sample. :return: the standard error on the sample proportion -> σp """ p = successes / sample_size return (p * (1 - p) / sample_size) ** 0.5
77cbde0689dec5e2432043362337b925c5ea7296
107,536
def load_ids(filename): """Loads document ids from a newline separated list of filenames.""" fin = open(filename, "r") return set(_.strip() for _ in fin)
dd8df0007a57be9995ef62d8a4032f4afc9395da
426,396
from typing import List def count_freq_keywords(keywords: List[str]) -> List[tuple]: """ Returns the count of each unique keyword of a list of keywords. Parameters: keywords (List[str]): list with all keywords as strings. Returns: a list of tuples of the form (keyword, count). """ unique_keywords = set(keywords) return [(keyword, keywords.count(keyword)) for keyword in unique_keywords]
5c28beebc425ef4575e39ae315f78ed776e3f5fd
250,534
def label_to_color_image(label, colormap=None): """Adds color defined by the dataset colormap to the label. Args: label: A 2D array with integer type, storing the segmentation label. colormap: A colormap for visualizing segmentation results. Returns: result: A 2D array with floating type. The element of the array is the color indexed by the corresponding element in the input label to the dataset color map. Raises: ValueError: If label is not of rank 2 or its value is larger than color map maximum entry. """ if label.ndim != 2: raise ValueError('Expect 2-D input label. Got {}'.format(label.shape)) if colormap is None: raise ValueError('Expect a valid colormap.') return colormap[label]
7dd66d4350e5f82bfe705fc35c7f02abaa4ebee9
380,507
import torch def list2vec(z1_list): """Convert list of tensors to a vector""" bsz = z1_list[0].size(0) return torch.cat([elem.reshape(bsz, -1, 1) for elem in z1_list], dim=1)
41ad6557f92b5fe602dd1fd2ff81bd20155517d5
627,202
def normalize(tensor, mean, std): """Normalize a ``torch.tensor`` Args: tensor (torch.tensor): tensor to be normalized. mean: (list): the mean of BGR std: (list): the std of BGR Returns: Tensor: Normalized tensor. """ for t, m, s in zip(tensor, mean, std): t.sub_(m).div_(s) return tensor
2dea96d14fd52898bd967725d8805d1ab10ea7cd
691,793
def else_while(n, numbers): """ A function illustrating the use of else with a loop. This function will determine if n is in the iterable numbers. :param n: The thing to search for. :param numbers: An iterable to search over. :return: True if the thing is in the iterable, false otherwise. """ # Loop over the numbers for e in numbers: # If we've found it, break out of the loop. if e == n: break else: # The else clause runs if we exited the loop normally (ie, didn't break) return False # Otherwise, if we execute break, then we get to here. return True
400a4bd48fd04577707793e12e7782e55fbdacab
250,867
def get_boxes(warp_sudoku_board): """ Splits image into 81 small boxes. :param warp_sudoku_board: OpenCV image :return: 9x9 2D list; each cell contains 2D numpy array """ temp = [None for i in range(9)] boxes = [temp.copy() for i in range(9)] board_height = warp_sudoku_board.shape[0] board_width = warp_sudoku_board.shape[1] for y in range(9): for x in range(9): x1 = x * board_width // 9 x2 = (x + 1) * board_width // 9 y1 = y * board_height // 9 y2 = (y + 1) * board_height // 9 boxes[y][x] = warp_sudoku_board[y1:y2, x1:x2] return boxes
c6e00b320f59540754e9d75ca527b3e41790298a
439,500
def get_tree_root_element(tree_object): """ Return the root of the tree_object :param tree_object: ElementTree instance for the xml_file :return: Root object of the ElementTree instance """ return tree_object.getroot()
58fc44541dfa0f934958e8bf2f005eff8bc8876e
119,097
def get_file_contents(filename): """Returns file content as string.""" with open(filename, 'r') as srcfile: return srcfile.read()
5417b3301fe48b72c45172ec88a4f0cd8b2f3006
594,364
import six def make(*args): """Return the plug built up from the given arguments. Args: *args (str | int): Token(s) to build the plug name from. Returns: str """ parts = [] for arg in args: if isinstance(arg, int): parts[-1] = "{}[{}]".format(parts[-1], arg) elif isinstance(arg, six.string_types) and len(arg) == 1: parts[-1] = "{}{}".format(parts[-1], arg) else: parts.append(arg) return ".".join(parts)
eb87f2b8facf801c2fb5085f7e6a9627567e2419
451,718
from pathlib import Path import re def normalise_nci_symlinks(input_path: Path) -> Path: """ If it's an NCI lustre path, always use the symlink (`/g/data`) rather than specific drives (eg. `/g/data2`). >>> normalise_nci_symlinks(Path('/g/data2/v10/some/dataset.tar')).as_posix() '/g/data/v10/some/dataset.tar' >>> normalise_nci_symlinks(Path('/g/data1a/v10/some/dataset.tar')).as_posix() '/g/data/v10/some/dataset.tar' >>> # Don't change other paths! >>> normalise_nci_symlinks(Path('/g/data/v10/some/dataset.tar')).as_posix() '/g/data/v10/some/dataset.tar' >>> normalise_nci_symlinks(Path('/Users/testuser/unrelated-path.yaml')).as_posix() '/Users/testuser/unrelated-path.yaml' """ match = re.match(r'^/g/data[0-9a-z]+/(.*)', str(input_path)) if not match: return input_path [offset] = match.groups() return Path('/g/data/' + offset)
9dde3bfc5750ac9bd50680a6ab50b3db9699dd09
655,538
import torch def masks_to_boxes(masks): """Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format """ if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device) h, w = masks.shape[-2:] y = torch.arange(0, h, dtype=torch.float) x = torch.arange(0, w, dtype=torch.float) y, x = torch.meshgrid(y, x) x_mask = masks * x.unsqueeze(0) x_max = x_mask.flatten(1).max(-1)[0] x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] y_mask = masks * y.unsqueeze(0) y_max = y_mask.flatten(1).max(-1)[0] y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] return torch.stack([x_min, y_min, x_max, y_max], 1)
0f76285e9c398b48545842c7bd17acf7da589638
223,365
from typing import List import collections import csv import copy def read_csv(csv_fpath: str, delimiter="\t") -> List[collections.OrderedDict]: """Copy the data out of a csv file, as a list of OrderedDicts. Args: csv_fpath: string representing path to a csv file. Rows: rows: list of OrderedDicts. key is column name, value is entry at (row,column) of csv file. """ rows = [] with open(csv_fpath, "r") as csv_file: csv_reader = csv.DictReader(csv_file, delimiter=delimiter) for row in csv_reader: rows += [copy.deepcopy(row)] return rows
f2bdbf7317cd0d0db9abb6aceef2a8b33c0cfc11
249,264
from typing import List from typing import Dict def content_check(data: List[Dict[str, str]], check: str, json_file: str, msg: str) -> bool: """ Checks whether a dictionary contains a column specific to expected data and returns a corresponding boolean value. This avoids writing files of an invalid gallery type. :param data: list containing poster dictionary items. :param check: name of the column that is supposed to be found in the dictionary items. :param json_file: name of the file containing the data :param msg: String displayed in command line print. :return: True if the column was found in the data, False otherwise. """ if not data or check not in data[0].keys(): print(f"'{json_file}' does not seem to be a valid {msg} file ...") print("Aborting ...") return False return True
e4ce5389fae39fb36743d81ca5171534202678ec
661,370
def line_search_bisection(f, bound, accuracy): """ Maximize c so that constraint fulfilled. This algorithm assumes continuity of f; that is, there exists a fixed value c, such that f(x) is False for x < c and True otherwise. This holds true, for example, for the level sets that we consider. Parameters ---------- f: callable A function that takes a scalar value and return True if the constraint is fulfilled, False otherwise. bound: iterable Interval within which to search accuracy: float The interval up to which the algorithm shall search Returns ------- c: list The interval in which the optimum lies. """ # Break if lower bound does not fulfill constraint if not f(bound[0]): return None # Break if bound was too small if f(bound[1]): bound[0] = bound[1] return bound # Improve bound until accuracy is achieved while bound[1] - bound[0] > accuracy: mean = (bound[0] + bound[1]) / 2 if f(mean): bound[0] = mean else: bound[1] = mean return bound
123d58b3782d8e70dd3f965ca1b9940f0b32c9a1
591,586
def check_detection_overlap(gs, dd): """ Evaluates if two detections overlap Paramters --------- gs: list Gold standard detection [start,stop] dd: list Detector detection [start,stop] Returns ------- overlap: bool Whether two events overlap. """ overlap = False # dd stop in gs + (dd inside gs) if (dd[1] >= gs[0]) and (dd[1] <= gs[1]): overlap = True # dd start in gs + (dd inside gs) if (dd[0] >= gs[0]) and (dd[0] <= gs[1]): overlap = True # gs inside dd if (dd[0] <= gs[0]) and (dd[1] >= gs[1]): overlap = True return overlap
af59564c268de9cf0d30d98a0fdba6cd0bac1232
620,394
def longueur_bit(n: int) -> int: """ Description: Renvoie le nombre de bits utilisés dans la représentation binaire d'un nombre entier. Paramètres: n: {int} -- Le nombre entier dont on veut connaître la longueur de sa représentation binaire. Retourne: {int} -- Le nombre de bits utilisés dans la représentation binaire du nombre. Exemple: >>> longueur_bit(1) 1 >>> longueur_bit(8) 4 >>> longueur_bit(512) 10 1 => 1, 8 => 1000, 512 => 1000000000 (décimal => binaire) """ nbBit = 0 while n > 0: nbBit += 1 n = n >> 1 return nbBit
b793a84b69127e08a5d20aa9a069f69aa98efed4
189,482
def svg_fill_rule(obj): """Returns an SVG style string describing the fill rule. 'fill-rule:nonzero' or 'fill-rule:winding' """ if obj.fillrule == 'winding': return "fill-rule:nonzero" else: return "fill-rule:evenodd"
390e678708def972a03ed6745867cf7f6a3ac901
426,485
def to_camel(s): """Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'""" bits = [(x.lower() if i == 0 else x.title()) for (i, x) in enumerate(s.split("_"))] return "".join(bits)
34400be6a346d886b2fca562b737b7811b871af1
694,831
def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument('-i', '--input', type=str, default="", help='full path to the input text (phareses separated by new line); \ if not provided then use default text') parser.add_argument('-o', '--output', required=True, help='output folder to save audio (file per phrase)') parser.add_argument('--tacotron2', type=str, default="", help='full path to the Tacotron2 model checkpoint file') parser.add_argument('--mel-file', type=str, default="", help='set if using mel spectrograms instead of Tacotron2 model') parser.add_argument('--waveglow', required=True, help='full path to the WaveGlow model checkpoint file') parser.add_argument('--old-waveglow', action='store_true', help='set if WaveGlow checkpoint is from GitHub.com/NVIDIA/waveglow') parser.add_argument('-s', '--sigma-infer', default=0.6, type=float) parser.add_argument('-sr', '--sampling-rate', default=22050, type=int, help='Sampling rate') parser.add_argument('--fp16-run', action='store_true', help='inference in fp16') parser.add_argument('--log-file', type=str, default='nvlog.json', help='Filename for logging') parser.add_argument('--include-warmup', action='store_true', help='Include warmup') return parser
b6ae40526f7dff6a5f716ffbce15faf3e823773a
197,689
def timeticks_to_str(ticks): """Return "days, hours, minutes, seconds and ms" string from ticks""" days, rem1 = divmod(ticks, 24 * 60 * 60 * 100) hours, rem2 = divmod(rem1, 60 * 60 * 100) minutes, rem3 = divmod(rem2, 60 * 100) seconds, milliseconds = divmod(rem3, 100) ending = 's' if days > 1 else '' days_fmt = '{} day{}, '.format(days, ending) if days > 0 else '' return '{}{:-02}:{:-02}:{:-02}.{:-02}'.format(days_fmt, hours, minutes, seconds, milliseconds)
e625ad82e212f265cb01ffe34713f9853a880565
542,392
def calculate_dynamics(pos_vector, vel_vector, accel_vector): """Calculate the effort vector for a custom 2-DoF SCARA.""" m1 = 1.0 # kg weight of first body r1 = 0.35 # distance to cg d1 = 0.5 # full link length i1 = (1./8)*m1*d1**2 m2 = 1.0 r2 = 0.35 i2 = (1./8)*m2*0.5**2 A = i1 + m1*r1**2 B = i2 + m2*r2**2 # Simple friction model only looking at j1 torque friction = 0 # np.tanh(vel_vector[1]*10)*0.8 # Flip joint torques because joints are flipped tau2 = B * (-accel_vector[0] + accel_vector[1]) + friction tau1 = A*accel_vector[0] - tau2 return [tau1, tau2]
b23a54004cd31812d63918b66e8931f269541fe8
561,203
def padding_mask(seq_k, seq_q): """For masking out the padding part of the keys sequence. Args: seq_k: Keys tensor, with shape [B, L_k] seq_q: Query tensor, with shape [B, L_q] Returns: A masking tensor, with shape [B, L_1, L_k] """ len_q = seq_q.size(1) # `PAD` is 0 pad_mask = seq_k.eq(0) pad_mask = pad_mask.unsqueeze(1).expand(-1, len_q, -1) # shape [B, L_q, L_k] return pad_mask
22f84e5b2e7b0ef31c0aa36771f50de110a1fa9d
639,269
def err2str(error: Exception, /, *, msg_only: bool = False) -> str: """Return string of error""" if msg_only: return str(error) return f"{error.__class__.__name__} - {str(error)}"
56a8210e8c1f95963fd127cdcf243f5b23ec3cca
226,534
import re def is_ext_code(s: str) -> bool: """ Returns True if `s` appears to be a single extended 256 escape code. """ return re.match('^\033\\[((38)|(48));5;\\d{1,3}m$', s) is not None
8010b9400dafd60003ec59c8d269ba76bace0350
400,030
def composekey(*keys): """Compose a sequence of keys into one key. Example: composekey("attr1.attr2", "attr3") == "attr1.attr2.attr3" """ keys = [key.split() for key in keys] composites = [[]] for alternatives in keys: composites = [com + [alt] for alt in alternatives for com in composites] return " ".join(".".join(key) for key in composites)
4695523c2ea670c929389db29ce9e7c3e962b3b2
198,972
def extract_dict(path): """ Read dictionary from file """ with open(path, 'rb') as f: dict_bytes = f.read(4096) return dict_bytes
17995949d50e974edef96c3a393570d5d34e98e2
348,101
def mod_div(n, d, m): """Returns (n/d) mod m. Works because the modular multiplicative inverse of d is equal to d^(m-2) mod m as long as m is prime.""" inverse = pow(d, m-2, m) return (n*inverse) % m
68054b52a5884adbc85073e60e69f7739aa6a164
377,894
import re def Char_Tokenizer(sentence, boundary_chars, tokenized_chars): """ Separates boundary_chars from the boundary of a word and tokenized_chars from any part of the string """ tok_sentence = sentence # separates boundary_chars when they're found at word boundary for curr_char in boundary_chars.split(): tok_sentence = re.sub(r"(\W|^)(" + curr_char + r"+)(\w)", r"\1 \2 \3", tok_sentence) tok_sentence = re.sub(r"(\w)(" + curr_char + r"+)(\W|$)", r"\1 \2 \3", tok_sentence) # separates all tokenized_chars trans_table = dict((ord(char), " " + char + " ") for char in tokenized_chars) tok_sentence = tok_sentence.translate(trans_table) return tok_sentence
e2798047fdf92cf1548a78027e6eebc4eb84b689
180,873
def locations_of_substring(string, substring): """Return a list of locations of a substring.""" substring_length = len(substring) def recurse(locations_found, start): location = string.find(substring, start) if location != -1: return recurse(locations_found + [location], location+substring_length) else: return locations_found return recurse([], 0)
e276de58aceb3a84f19773d4ec0b37be948ee5c3
604,304
def _pearson_corr(mat_X, mat_Y): """Pearson's correlation between every columns in mat_X and mat_Y Args ---- mat_X (N,M1): np.ndarray mat_Y (N,M2): np.ndarray Returns ------- mat_corr: (M1,M2): np.ndarray Correlation matrix """ # Reshape if len(mat_X.shape)==1: mat_X = mat_X.reshape([-1,1]) if len(mat_Y.shape)==1: mat_Y = mat_Y.reshape([-1,1]) mat_X = (mat_X-mat_X.mean(axis=0))/mat_X.std(axis=0).clip(min=1e-8) mat_Y = (mat_Y-mat_Y.mean(axis=0))/mat_Y.std(axis=0).clip(min=1e-8) mat_corr = mat_X.T.dot(mat_Y)/mat_X.shape[0] if mat_corr.shape[1]==1: return mat_corr.reshape([-1]) else: return mat_corr
41249c15c213a73e6228875661088b0803e1d1b8
396,614
import re import inspect def extract_code_blocks_from_md(docstr): """ Extract code blocks from markdown content. """ code_blocks = [] pat = re.compile(r"```i?python[23]?(.*?)```", re.MULTILINE + re.DOTALL) for cbit in pat.finditer(docstr): code_blocks.append(inspect.cleandoc(cbit.group())) # logger.info('extracted %d code blocks.', len(code_blocks)) return code_blocks
e6d3d8f5fd85c7f33bbc024f6cde32e1c23e4ea8
498,444
import json def format_rpc_response(data, exception=None): """ Formats a response from a RPC Manager. It provides the data and/or a serialized exception so it can be re-created by the caller. :param Any data: A JSON Serializable object. :param Exception exception: An Exception object :return str: JSON Response. """ exception_data = None if exception: args = exception.__getargs__() if hasattr(exception, "__getargs__") else exception.args kwargs = exception.__getkwargs__() if hasattr(exception, "__getkwargs__") else {} if kwargs is None: kwargs = {} try: module = exception.__module__ except: module = None exception_data = { 'exception': type(exception).__name__, 'message': str(exception), 'args': args, "kwargs": kwargs, 'module': module, } return json.dumps({ 'data': data, 'exception': exception_data })
c900e2512fd486c91789ab4312883061553a2fb1
704,379
def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1): """Extract the target frame from 2D pose results, and pad the sequence to a fixed length. Args: pose_results (List[List[Dict]]): Multi-frame pose detection results stored in a nested list. Each element of the outer list is the pose detection results of a single frame, and each element of the inner list is the pose information of one person, which contains: keypoints (ndarray[K, 2 or 3]): x, y, [score] track_id (int): unique id of each person, required when ``with_track_id==True``` bbox ((4, ) or (5, )): left, right, top, bottom, [score] frame_idx (int): The index of the frame in the original video. causal (bool): If True, the target frame is the last frame in a sequence. Otherwise, the target frame is in the middle of a sequence. seq_len (int): The number of frames in the input sequence. step (int): Step size to extract frames from the video. Returns: List[List[Dict]]: Multi-frame pose detection results stored in a nested list with a length of seq_len. int: The target frame index in the padded sequence. """ if causal: frames_left = seq_len - 1 frames_right = 0 else: frames_left = (seq_len - 1) // 2 frames_right = frames_left num_frames = len(pose_results) # get the padded sequence pad_left = max(0, frames_left - frame_idx // step) pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step) start = max(frame_idx % step, frame_idx - frames_left * step) end = min(num_frames - (num_frames - 1 - frame_idx) % step, frame_idx + frames_right * step + 1) pose_results_seq = [pose_results[0]] * pad_left + \ pose_results[start:end:step] + [pose_results[-1]] * pad_right return pose_results_seq
429f0296630897f3eff227bb661dbaa572bd7d2b
492,114
from typing import Dict from typing import Any def prefix_keys(dict_: Dict[str, Any], *, prefix: str, sep: str = "/") -> Dict[str, Any]: """Prepend the prefix to all keys of the dict""" return {f"{prefix}{sep}{key}": value for key, value in dict_.items()}
a80c8fc1d20222dc60d1be734e242a57d2aa66f9
539,504
def get_lonlat(iindex, jindex, grd, Cpos='rho'): """ lon, lat = get_lonlat(iindex, jindex, grd) return the longitude (degree east) and latitude (degree north) for grid point (iindex, jindex) """ if Cpos == 'u': lon = grd.hgrid.lon_u[:,:] lat = grd.hgrid.lat_u[:,:] elif Cpos == 'v': lon = grd.hgrid.lon_v[:,:] lat = grd.hgrid.lat_v[:,:] elif Cpos == 'rho': lon = grd.hgrid.lon_rho[:,:] lat = grd.hgrid.lat_rho[:,:] elif Cpos == 'psi': lon = grd.hgrid.lon_psi[:,:] lat = grd.hgrid.lat_psi[:,:] else: raise Warning('%s bad position. Cpos must be rho, psi, u or v.' % Cpos) return lon[jindex, iindex], lat[jindex, iindex]
f3ae66c0a230d0b33409f98738d2c127c8b46232
466,035
def enumerate_options(course_list): """Given a course_list generates a propt string which enumerats the courses the user may download. Args: course_list (list): a list of Course objects Returns: str : a string detailing the options a user has (courses to fetch files from, and how to exit) """ options = "Courses:\n" for i, course in enumerate(course_list): options += f"\t{i+1}: {course.get_name()}\n" options += "Enter a course number, or type \"q\" to quit: " return options
d0cf025f30760f1b16e06c66ffa394d4a876adb6
106,528
import random def epsilon_greedy(variant_vals, eps=0.1): """Epsilon-greedy algorithm implementation on Variant model values. Parameters ---------- variant_vals : list A list of dictionary mappings of Variant field values for a given Campaign object. Required ``Variant`` fields are ``code`` ``impressions`` ``conversions`` ``conversion_rate`` ``html_template``. For example: :: Campaign.objects.get(code='xxx').values( 'code', 'impressions', 'conversions', 'conversion_rate', 'html_template' ) eps : float Exploration parameter. Values between 0.0 and 1.0. Defaults to 0.1 Returns ------- selected_variant : dict The selected dictionary mapping of Variant fields from ``variant_vals`` list, as chosen by the epsilon_greedy algorithm """ if random.random() < eps: # If random number < eps, exploration is chosen over # exploitation selected_variant = random.sample(list(variant_vals), 1)[0] else: # If random number >= eps, exploitation is chosen over # exploration best_conversion_rate = 0.0 selected_variant = None for var in variant_vals: if var['conversion_rate'] > best_conversion_rate: best_conversion_rate = var['conversion_rate'] selected_variant = var if var['conversion_rate'] == best_conversion_rate: # Break tie - randomly choose between current and best selected_variant = random.sample([var, selected_variant], 1)[0] return selected_variant
617f98f05e83b2076ab2b7726e71d22489f8566d
325,751
def get_boundary_from_response(response): """ Parses the response header and returns the boundary. :param response: response containing the header that contains the boundary :return: a binary string of the boundary """ # Read only the first value with key 'content-type' (duplicate keys are allowed) content = response.headers.pop('content-type')[0] # Find the start and end index of the boundary b_start = content.find(b'boundary=') b_end = content[b_start:].find(b';') # Separate out boundary if b_end == -1: # If the end point is not found, just go to the end of the content string boundary = content[b_start+9:] else: boundary = content[b_start+9:b_start+b_end] return boundary
66a0112598b2210cca1a2210f6af963dfee641f7
5,553
def dict_to_json_keys(pydict: dict) -> dict: """this converts a dict using the python coding style to a dict where the keys are in JSON format style :pydict dict keys are strings in python style format :returns dict """ d = {} for key in pydict.keys(): new_key = key.title().replace('_', '') new_key = new_key[:1].lower() + new_key[1:] d[new_key] = pydict[key] return d
cf13453d16a3e90bbd974d7c2352221165765f5c
616,820
from typing import List def construct_index_name(table: str, columns: List[str]) -> str: """ Constructs the name of an index. """ return f"{table}_" + "_".join(columns)
95186af705e8224c8fcef289af42742984a46d33
279,576
def get_task_runs(task, task_runs_object): """ Return a list of all associated task runs for a task """ output_list = [] task_id = task['id'] for tr in task_runs_object: if task_id == tr['task_id']: output_list.append(tr) return output_list
ca3dd0b660b1c2c9c06dd487f7015b2a4e19c57b
538,491
def BUTIA_COUNT(parent, r): """Get the number of boards connected""" return parent.robot.getButiaCount()
6dde132cdf7609a3cb3a157242b30b9ec851ce75
532,872
def any_in(a, b): """Checks if 'a in b' is true for any element of a.""" return any(x in b for x in a)
bf74a6b0b4e23f280ce4622ba94e2b006f5196cc
120,534
def get_local_as(node, vrf="default"): """Discover the local AS of the node.""" cmd = "show bgp instance" instance_data = node.enable([cmd])[0]["result"]["vrfs"][vrf] return f"AS{instance_data['localAs']}"
c2555dba93f8e4608665a3aa004928c17b30cd36
129,883
from typing import List def _onemax(x: List[int]) -> float: """onemax(x) is the most classical case of discrete functions, adapted to minimization. It is originally designed for lists of bits. It just counts the number of 1, and returns len(x) - number of ones.. It also works in the continuous case but in that cases discretizes the input domain by ]0.5,1.5] --> 1 and 0 everywhere else. """ return len(x) - sum(1 if int(round(w)) == 1 else 0 for w in x)
48b3b1faa7ffbc75ded8fcf14aad11ff618a5d4a
627,263
from typing import Dict from typing import Any def _get_max_attempts(config: Dict[str, Any]) -> int: """Retrieves the max attempts from the config. Args: config: a dictionary with the platform configuration. Returns: The number of attempts. """ return config['queue_config']['retry_config']['max_attempts']
ccfb2b9528f18d8d59d10b72876b6328a51de63d
392,410
def _strip_frbr_section_from_pnx(pnx_xml): """ We must strip the frbr section from a PNX record before re-inserting it """ for frbr_section in pnx_xml.findall('frbr'): pnx_xml.remove(frbr_section) return(pnx_xml)
bcc3f45a0cf8f787602c9b3d861ad8c69d2c198d
630,132
from pathlib import Path import dill import time import traceback def load_session(path, use_backup=True): """ Loads a session :param path: The session folder (labelled .ses) containng the session.dill and session.back file :param use_backup: If True, will attempt to load backup if main fails :return: """ target = Path(path) / "session.dill" try: with open(target, "rb") as fp: return dill.load(fp)[0] except Exception as e: if use_backup: print(f"failed to load {target.name}. Reason:") target = target.with_suffix(".back") time.sleep(0.1) # Get nicer print statements traceback.print_exc() time.sleep(0.1) # Get nicer print statements print("Trying to load backup") with open(target, "rb") as fp: return dill.load(fp)[0] else: raise e
ad285cb3d377f5de54d6813885c90bc07902835f
269,530
import requests def api_get_course(srcdb, crn, code): """ Given a CU Boulder course code and CRN, return the API response for details on that course (including its sections). """ url = "https://classes.colorado.edu/api/?page=fose&route=details" data = { "group": "code:{}".format(code), "key": "crn:{}".format(crn), "srcdb": srcdb, } resp = requests.post(url, json=data) resp.raise_for_status() return resp.json()
e8260aefaa5e4f2dd4a919bbc3af6cdce675f8fb
600,432
def make_string_literal(string): """Make python string literal out of a given string.""" return "'{}'".format(string.replace("'", "\\'"))
6bf9694f8a2d2a872ea48f862898715dbf7d1d49
525,354
def gather_lists(list_): """ Concatenate all the sublists of L and return the result. @param list[list[object]] list_: list of lists to concatenate @rtype: list[object] >>> gather_lists([[1, 2], [3, 4, 5]]) [1, 2, 3, 4, 5] >>> gather_lists([[6, 7], [8], [9, 10, 11]]) [6, 7, 8, 9, 10, 11] """ new_list = [] for l in list_: new_list += l return new_list # equivalent to... # return sum([l for l in list_], [])
f1da7a201cade349a331cf38651f0ce40fff9a45
526,202
def plot_number_of_structures_per_kinase_pdb_pair(structures): """ Plot the number of structures that have x structures per kinase-PDB ID pair. Parameters ---------- structures : pandas.DataFrame Structures DataFrame from opencadd.databases.klifs module. Returns ------- matplotlib.pyplot.axis Plot axis. """ kinase_pdb_pair_sizes = structures.groupby(["kinase.klifs_name", "structure.pdb_id"]).size() ax = kinase_pdb_pair_sizes.plot( kind="hist", title="Number of structures per kinase-PDB pair", bins=kinase_pdb_pair_sizes.max(), ) ax.set_xlabel("Number of structures per kinase-PDB pair") ax.set_ylabel("Number of kinase-PDB pairs") return ax
405a8581da0c3d2a8c32133d90a06e2f09692cbd
192,854
def remove_empty_terms(pot): """ Remove terms from the potential that do not have a value associated with them """ return {k: v for k, v in pot.items() if v is not None}
1566b00853fcb9662f2b6af3da34f7ca2adc03e4
203,984
import re def preprocess(text_string): """ Accepts a text string and replaces: 1) urls with URLHERE 2) lots of whitespace with one instance 3) mentions with MENTIONHERE This allows us to get standardized counts of urls and mentions Without caring about specific people mentioned """ space_pattern = '\s+' giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|' '[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') mention_regex = '@[\w\-]+' parsed_text = re.sub(space_pattern, ' ', text_string) parsed_text = re.sub(giant_url_regex, '', parsed_text) parsed_text = re.sub(mention_regex, '', parsed_text) return parsed_text
6bc82ab98c194b6c7accc777aaa78d23efc12364
680,683
def _norm_hsl2rgb(h, s, l): """Convert HSL to RGB colours. This function assumes the input has been sanitised and does no validation on the input parameters. This calculation has been adapted from Wikipedia: https://en.wikipedia.org/wiki/HSL_and_HSV#To_RGB :param h: Hue :param s: Saturation :param l: Lightness :return: A tuple containing R, G, B colours """ C = (1 - abs(2 * l - 1)) * s m = l - C / 2 h_ = h / 60.0 # H' is not necessarily an integer X = C * (1 - abs(h_ % 2 - 1)) r, g, b = 0, 0, 0 if 0 <= h_ <= 1: r, g, b = C, X, 0 elif 1 < h_ <= 2: r, g, b = X, C, 0 elif 2 < h_ <= 3: r, g, b = 0, C, X elif 3 < h_ <= 4: r, g, b = 0, X, C elif 4 <= h_ <= 5: r, g, b = C, 0, X elif 5 < h_ <= 6: r, g, b = X, 0, C return r + m, g + m, b + m
1964f3647ee8763508ad39f0c6604694f2486c04
183,705
def educational_building(data): """Extract educational. :param Dataframe data: Pandas Dataframe of building dataset :return: DataFrame containing educational buildings :retype: DataFrame """ education = data.loc[["kindergarten", "school", "university"], :] listOfString_edu = ['educational' for i in range(len(education))] education.insert(1, "buildings", listOfString_edu, True) educational = education.reset_index(drop=True) return educational
102d9d55326f688ad68b8f31a2cf7398efbeb878
407,532
def fibUnderN(n): """ This function returns a list of fibonacci numbers under n --param n : integer --return list : all fibs under n """ fibs = [0,1] ctr = 2 while (fibs[-1] + fibs[-2]) < n: fibs.append(fibs[ctr-1] + fibs[ctr-2]) ctr += 1 return fibs.copy()
cf6244c19bdd62381ea9a3004d533a389a3806e9
401,758
def preconvert_preinstanced_type(value, name, type_): """ Converts the given `value` to an acceptable value by the wrapper. Parameters ---------- value : `Any` The value to convert. name : `str` The name of the value. type_ : ``PreinstancedBase`` instance The preinstanced type. Returns ------- value : ``PreinstancedBase`` instance Raises ------ TypeError If `value` was not given as `type_` instance, neither as `type_.value`'s type's instance. ValueError If there is no preinstanced object for the given `value`. """ value_type = value.__class__ if (value_type is not type_): value_expected_type = type_.VALUE_TYPE if value_type is value_expected_type: pass elif issubclass(value_type, value_expected_type): value = value_expected_type(value) else: raise TypeError(f'`{name}` can be passed as {type_.__name__} or as {value_expected_type.__name__} ' f'instance, got {value_type.__name__}.') try: value = type_.INSTANCES[value] except LookupError: raise ValueError(f'There is no predefined `{name}` for the following value: {value!r}.') from None return value
257948a0ebed93dcb8772d74c8378b9f5a42af36
81,701
def is_anagram(word1, word2): """Checks whether two words are anagrams word1: string or list word2: string or list returns: boolean """ return sorted(word1) == sorted(word2)
dd1417f0ced25149e029ec87c0064fbaed7156c9
654,615
def orSearch(inverseIndex, query): """ Input: an inverse index, as created by makeInverseIndex, and a list of words to query Output: the set of document ids that contain _any_ of the specified words """ rsetlist = [inverseIndex[word] for word in query if word in inverseIndex ] results = set.union(*rsetlist) return results
c0f18e999ab1a75aa5bbb16f2a39dbbbe3943f2a
554,814
def square_area(side): """ 2. Function with one input and one output This function demonstrates how a function returns a processed output based on the received input This function calculates the area of a square side: the side of the square, must be a positive number area: the area of the square, must be a positive number """ area = side * side return area
bec6587fb3de8d638ff3e1de8b3ca40a067923d0
484,607
import random def get_label_indices(num_labels, sample_label): """ Function to get sample label indices for a given number of labels and a sampling policy :param num_labels: int number of labels :param sample_label: method for sampling the labels :return: list of labels defined by the sampling method. """ if sample_label == "sample": # sample a random label return [random.randrange(num_labels)] elif sample_label == "first": # use the first label return [0] elif sample_label == "all": # use all labels return list(range(num_labels)) else: raise ValueError("Unknown label sampling policy %s" % sample_label)
267cc559386d94f6e4b0eea1576d70074422704d
330,159
def auc(truth, recommend): """Area under the ROC curve (AUC). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AUC. """ tp = correct = 0. for r in recommend: if r in truth: # keep track number of true positives placed before tp += 1. else: correct += tp # number of all possible tp-fp pairs pairs = tp * (recommend.size - tp) # if there is no TP (or no FP), it's meaningless for this metric (i.e., AUC=0.5) if pairs == 0: return 0.5 return correct / pairs
eb2f137927cb733729bf3f896dc4d1f897235c43
268,359
def _get_aliased_variables( role_name, dimension_variables): """ Returns list of variable DTOs for a virtual/aliased Dimension "Role" Keyword Parameters: role_name -- String, representing name of the OLAP dimension alias (Role) for which variables are to be returned. dimension_variables -- list of 'variable' dto,representing the fields associated with the base Dimension which Role is an alias to. >>> from pprint import pprint >>> role_name = 'tuesdays_foo_dim' >>> var1 = { 'id': 1, 'column':'flavor', 'title':'Widget flavor' ... ,'python_type': 'float', 'table': 'foo_dim'} >>> var2 = { 'id': 2, 'column':'brand', 'title':'Widget brand' ... ,'python_type': 'str', 'table': 'foo_dim'} >>> aliased = _get_aliased_variables( role_name, [var1, var2]) >>> pprint( aliased) [{'column': 'flavor', 'id': 1, 'python_type': 'float', 'table': 'tuesdays_foo_dim', 'title': 'Widget flavor'}, {'column': 'brand', 'id': 2, 'python_type': 'str', 'table': 'tuesdays_foo_dim', 'title': 'Widget brand'}] """ aliased_variables = [] for dimension_variable in dimension_variables: new_variable = dict(dimension_variable) new_variable['table'] = role_name aliased_variables.append( new_variable) return aliased_variables
623a1b105e65d4135895aa7ce2ce810db797e0a7
581,509
from typing import Counter def get_mode(counter: Counter) -> int: """ Helper function to return the count of the most common element from an instance of Counter() :param counter: collections.Counter instance """ mode = counter.most_common(1) if not mode: return 0 # if mode is not empty it will be a list containing a # single tuple from which we want the second element return mode[0][1]
2095db6bae8d8ecb93ce4078f1692ecfe3dd38a4
299,793
def find_nth(s, x, n): """ find the nth occurence in a string takes string where to search, substring, nth-occurence """ i = -1 for _ in range(n): i = s.find(x, i + len(x)) if i == -1: break return i
b54998db817272ec534e022a9f04ec8d350b08fb
5,859
def smallest(upper, lower): """Returns smallest of the two values.""" val = min(upper, lower) return val, val
83e6bbb7d550bf84dde43e748078dfcfa229fcbe
237,304
def transition_model(corpus, page, damping_factor): """ Return a probability distribution over which page to visit next, given a current page. With probability `damping_factor`, choose a link at random linked to by `page`. With probability `1 - damping_factor`, choose a link at random chosen from all pages in the corpus. """ probability_distribution = dict() if page is None or len(corpus.get(page)) == 0: #probabilities are all the same probability = float(1) / len(corpus.keys()) for key in corpus.keys(): probability_distribution.update({key : probability}) else: #probabilities are weighted probability = float(1 - damping_factor) / len(corpus.keys()) linked_pages = corpus.get(page) linked_probability = float(damping_factor) / len(linked_pages) for key in corpus.keys(): if linked_pages.issuperset({key}): probability_distribution.update({key : probability + linked_probability}) else: probability_distribution.update({key : probability}) return probability_distribution
8305a40304e2c9cd62f1dbba5721e2319abcab11
286,793
def partition_by_adjacent_tiles(tile_ids, dimension=2): """ Partition a set of tile ids into sets of adjacent tiles. For example, if we're requesting a set of four tiles that form a rectangle, then those four tiles will become one set of adjacent tiles. Non-contiguous tiles are not grouped together. Parameters ---------- tile_ids: [str,...] A list of tile_ids (e.g. xyx.0.0.1) identifying the tiles to be retrieved dimension: int The dimensionality of the tiles Returns ------- tile_lists: [tile_ids, tile_ids] A list of tile lists, all of which have tiles that are within 1 position of another tile in the list """ tile_id_lists = [] for tile_id in sorted( tile_ids, key=lambda x: [int(p) for p in x.split(".")[2 : 2 + dimension]] ): tile_id_parts = tile_id.split(".") # exclude the zoom level in the position # because the tiles should already have been partitioned # by zoom level tile_position = list(map(int, tile_id_parts[2:4])) added = False for tile_id_list in tile_id_lists: # iterate over each group of adjacent tiles for ct_tile_id in tile_id_list: ct_tile_id_parts = ct_tile_id.split(".") ct_tile_position = list(map(int, ct_tile_id_parts[2 : 2 + dimension])) far_apart = False # iterate over each dimension and see if this tile is close for p1, p2 in zip(tile_position, ct_tile_position): if abs(int(p1) - int(p2)) > 1: # too far apart can't be part of the same group far_apart = True if not far_apart: # no position was too far tile_id_list += [tile_id] added = True break if added: break if not added: tile_id_lists += [[tile_id]] return tile_id_lists
c0c37fd4b15a8d1e661412bf5d4eb9c887ba6f98
577,443
def subsetindex(full,subset): """ Get the indices of the subset of a list. """ if isinstance(subset,str):subset=[subset] idx=[] for s in subset: idx += [i for i, x in enumerate(full) if x == s] return idx
1d8edf2dc270755bbf831aed1539395623c2acd8
92,552
def _is_epub(file_bytes: bytes) -> bool: """ Decide if a file is an epub file. From https://github.com/h2non/filetype.py (MIT license) """ return (len(file_bytes) > 57 and file_bytes[0] == 0x50 and file_bytes[1] == 0x4B and file_bytes[2] == 0x3 and file_bytes[3] == 0x4 and file_bytes[30] == 0x6D and file_bytes[31] == 0x69 and file_bytes[32] == 0x6D and file_bytes[33] == 0x65 and file_bytes[34] == 0x74 and file_bytes[35] == 0x79 and file_bytes[36] == 0x70 and file_bytes[37] == 0x65 and file_bytes[38] == 0x61 and file_bytes[39] == 0x70 and file_bytes[40] == 0x70 and file_bytes[41] == 0x6C and file_bytes[42] == 0x69 and file_bytes[43] == 0x63 and file_bytes[44] == 0x61 and file_bytes[45] == 0x74 and file_bytes[46] == 0x69 and file_bytes[47] == 0x6F and file_bytes[48] == 0x6E and file_bytes[49] == 0x2F and file_bytes[50] == 0x65 and file_bytes[51] == 0x70 and file_bytes[52] == 0x75 and file_bytes[53] == 0x62 and file_bytes[54] == 0x2B and file_bytes[55] == 0x7A and file_bytes[56] == 0x69 and file_bytes[57] == 0x70)
55a536e71964af4d00789c21b42b81bb02487249
289,252
import torch def tensor_linspace(start, end, steps=10): """ Vectorized version of torch.linspace. Inputs: - start: Tensor of any shape - end: Tensor of the same shape as start - steps: Integer Returns: - out: Tensor of shape start.size() + (steps,), such that out.select(-1, 0) == start, out.select(-1, -1) == end, and the other elements of out linearly interpolate between start and end. """ assert start.size() == end.size() view_size = start.size() + (1,) w_size = (1,) * start.dim() + (steps,) out_size = start.size() + (steps,) start_w = torch.linspace(1, 0, steps=steps).to(start) start_w = start_w.view(w_size).expand(out_size) end_w = torch.linspace(0, 1, steps=steps).to(start) end_w = end_w.view(w_size).expand(out_size) start = start.contiguous().view(view_size).expand(out_size) end = end.contiguous().view(view_size).expand(out_size) out = start_w * start + end_w * end return out
50342f991879036bce0c93c76762b8056d8f0705
312,194
def replace_correction(series, **kwargs): """Corrects replacing values. Parameters ---------- series: pd.Series The pandas series **kwargs: Arguments as per pandas replace function Returns ------- pd.Series The corrected series. See Also -------- Examples: :ref:`sphx_glr__examples_correctors_plot_replace.py`. """ return series.replace(**kwargs)
107a664411bfa3b04911857baa125878658492d8
369,136
def no_blanks(string): """Removes all the blanks in string :param string: A string to remove blanks from :type string: str :returns the same string with all blank characters removed """ return string.replace("\n", "").replace("\t", "").replace(" ", "")
9de5f0fa6bb81b346ea1deed0477ec8e48338e19
419,409
def getvalue(s): """ getvalue() takes a string like <aaa>bbbbb<cc> and returns bbbbb """ p = s.find('>') s = s[p+1:] s = s[::-1] # string reverse p = s.find('<') s = s[p+1:] s = s[::-1] # string reverse, again return s
1f0c03ea4f06b1bd71635d921aeebf7e9c1a1014
571,923
def split_out_internet_rules(rule_list): """Separate rules targeting the Internet versus normal rules""" normal_rules = filter(lambda x: x.target_zone != 'internet', rule_list) internet_rules = filter(lambda x: x.target_zone == 'internet', rule_list) return list(normal_rules), list(internet_rules)
aa838ef7655658b3255c127f392c536bceb5a3bd
30,661
def floatify(scalar): """ Useful to make float from strings compatible from fortran Args: scalar (str, float): When string representing a float that might be given in fortran notation, otherwise it might be a floating point Returns: float. The value associated to scalar as a floating point number Example: >>> # this would be the same with "1.e-4" or with 0.0001 >>> floatify('1.d-4') 1.e-4 """ if isinstance(scalar, str): return float(scalar.replace('d', 'e').replace('D', 'E')) else: return scalar
9df5deaf619fe39cd90fc0f100bf0e588ca4d780
66,543
import json def read_file(file_path, is_json=False): """ Reads the content of a file. :param file_path: path of the file :param is_json: True if it's a json file :return: file's content """ with open(file_path, 'r') as infile: if is_json: content = json.load(infile) else: content = infile.read() return content
c01d56b0a63b6f616824ae653182566b0b22eda9
125,339
def hash_point_pair(p1, p2): """Helper function to generate a hash from two time/frequency points.""" return hash((p1[0], p2[0], p2[1]-p2[1]))
8220fcd48d065707908e7c884d04745c2fc6cdcb
417,212
def asInteger(epsg): """ convert EPSG code to integer """ return int(epsg)
18a14944f5f29ec09585757f0edc912b896a12ba
701,720
import torch def normalized_state_to_tensor(state, building): """ Transforms a state dict to a pytorch tensor. The function ensures the correct ordering of the elements according to the list building.global_state_variables. It expects a **normalized** state as input. """ ten = [[ state[sval] for sval in building.global_state_variables ]] return torch.tensor(ten)
4aea246f388f941290d2e4aeb6da16f91e210caa
6,420
def guess_age_group(swimmer_age: int) -> tuple[int, int]: """Guess the age group from a swimmer's age. Args: swimmer_age (int): The swimmer's age. Returns: tuple[int, int]: The age group in terms of (age_min, age_max). """ if swimmer_age <= 8: # Probably 8&U return 0, 8 elif 9 <= swimmer_age <= 10: # Probably 9-10 return 9, 10 elif 11 <= swimmer_age <= 12: # Probably 11-12 return 11, 12 elif 13 <= swimmer_age <= 14: # Probably 13-14 return 13, 14 else: # Probably open return 0, 109
8f44ad7217ab2d4d273860e732bc68cb2218838c
63,593
def ACF_brute(df_cov, lag): """ Brute force calculate the ACF from coverage data Args: df_cov: a dataframe of coverage data lag: the lag for which to calculate the ACF """ # Assign columns and index df_cov.columns = ['chrom', 'pos', 'depth'] df_cov.index = df_cov.pos # Normalize the depth df_cov.depth = df_cov.depth / df_cov.depth.mean() # Brute force find the positons that are $lag distance apart df_lag = df_cov.loc[df_cov.pos+lag, :].dropna() df_start = df_cov.loc[df_lag.pos-lag, :] # Calc ACF n = len(df_lag) mu_lag = df_lag.depth.mean() mu_start = df_start.depth.mean() ACF = (1. / n) * ((df_lag.depth - mu_lag) * (df_start.depth - mu_start)).sum() return ACF
9d17f8a6799165e6e63b55361d33db672ff3b205
348,731
def unionRect(rect1, rect2): """Determine union of bounding rectangles. Args: rect1: First bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. rect2: Second bounding rectangle. Returns: The smallest rectangle in which both input rectangles are fully enclosed. """ (xMin1, yMin1, xMax1, yMax1) = rect1 (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), max(xMax1, xMax2), max(yMax1, yMax2)) return (xMin, yMin, xMax, yMax)
1c89439efb082159400acd25396cbf43d7ea1ddf
675,530
from functools import reduce def chomp_empty_strings(strings, c, reverse=False): """ Given a list of strings, some of which are the empty string "", replace the empty strings with c and combine them with the closest non-empty string on the left or "" if it is the first string. Examples: for c="_" ['hey', '', 'why', '', '', 'whoa', '', ''] -> ['hey_', 'why__', 'whoa__'] ['', 'hi', '', "I'm", 'bob', '', ''] -> ['_', 'hi_', "I'm", 'bob__'] ['hi', "i'm", 'a', 'good', 'string'] -> ['hi', "i'm", 'a', 'good', 'string'] Some special cases are: [] -> [] [''] -> [''] ['', ''] -> ['_'] ['', '', '', ''] -> ['___'] If reverse is true, empty strings are combined with closest non-empty string on the right or "" if it is the last string. """ def _rev(l): return [s[::-1] for s in l][::-1] if reverse: return _rev(chomp_empty_strings(_rev(strings), c)) if not len(strings): return strings if sum(map(len, strings)) == 0: return [c * (len(strings) - 1)] class _Chomper: def __init__(self, c): self.c = c def __call__(self, x, y): # x is list up to now # y is next item in list # x should be [""] initially, and then empty strings filtered out at the # end if len(y) == 0: return x[:-1] + [x[-1] + self.c] else: return x + [y] return list(filter(len, reduce(_Chomper(c), strings, [""])))
918bc2b3972140398eee0b90e250cccf0333c8ce
214,643
def remove_additional_whitespace(tokens): """ Removes additional whitespaces :param tokens: A list of tokens :return: A comparable list of tokens to the input but with additional whitespaces removed """ cleaned_tokens = [] for token in tokens: token = token.replace(' ', '') cleaned_tokens.append(token) return cleaned_tokens
096f0ae0d88d21159d4bc7349fd1b7d8eb5ebfe7
72,286