content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import base64 def get_content_b64(filepath): """Given a filepath, return the base64 encoded representation of the file's content. """ with open(filepath, 'rb') as f: content = f.read() return base64.b64encode(content).decode('utf-8')
b95ad844796425a9b5e112b5d1801dd2c8ee28c3
38,066
import ast def isConstant(x): """Determine whether the provided AST is a constant""" return (type(x) in [ast.Num, ast.Str, ast.Bytes, ast.NameConstant])
232946a37e8bce7c093d6f8a446e11019cfc798c
38,069
def get_instances_not_in_output(possible_instances, stdout_output): """return instances where their id is not in the stdout output""" return [instance for instance in possible_instances if instance['id'] not in stdout_output]
540663c38511c42f95466da06cb05153ce15a872
38,070
import re def is_url(url: str): """Uses RegEx to check whether a string is a HTTP(s) link""" # https://stackoverflow.com/a/17773849/8314159 return re.search(r"(https?://(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]" r"[a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?://(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}" r"|www\.[a-zA-Z0-9]+\.[^\s]{2,})", url)
c58be1bc1775024ef412ff15529de7a780bde5ea
38,078
def _make_dataset( df, label_column_name, left_value_column_name, right_value_column_name, standing_out_label_name_list): """ Make the required dataset from the data frame. Parameters ---------- df : pandas.DataFrame The target data frame. label_column_name : str Column name of the label. left_value_column_name : str Column name of the value on the left. right_value_column_name : str Column name of the value on the right. standing_out_label_name_list : list of str List of label names to make it stand out. Returns ------- dataset : list of dicts The generated data set. The following keys are set to the dictionary in the list. - label : str - left : int or float - right : int or float - isStandingOutData : int, 0 or 1. """ df = df.copy() df.rename(columns={ label_column_name: 'label', left_value_column_name: 'left', right_value_column_name: 'right', }, inplace=True) dataset = [] for index, sr in df.iterrows(): is_in = sr['label'] in standing_out_label_name_list if is_in: continue data_dict = { 'label': sr['label'], 'left': sr['left'], 'right': sr['right'], 'isStandingOutData': 0, } dataset.append(data_dict) for index, sr in df.iterrows(): is_in = sr['label'] in standing_out_label_name_list if not is_in: continue data_dict = { 'label': sr['label'], 'left': sr['left'], 'right': sr['right'], 'isStandingOutData': 1, } dataset.append(data_dict) return dataset
3eb220101354ea919369dfeb1317310f18a4df97
38,085
from pathlib import Path import logging def evaluate_implementation(callback_file: Path, function_name: str) -> bool: """Checks whether a function name is found in a source file or not.""" found = False impl = callback_file.read_text() if function_name in impl: logging.info(f"Found '{function_name}' in '{callback_file}'.") found = True else: logging.error(f"Did not find '{function_name}' in '{callback_file}'.") return found
3ad087dc2db09aeb78874c9b934a553f7457e511
38,088
def _get_user_id(data): """ Get user ID which this event is generated by. Event payload schema highly depends on event type. :param data: event payload :type data: dict :rtype: non empty string or None """ for key in ['user', 'user_id']: user = data.get(key) if user: return user return None
4f4b418da99bdd6ae99e628a30fc0c02c2cfe4ab
38,089
def validate_number(num, low, high): """ Takes user input as a string and validates that it is an integer between low and high """ try: num = int(num) except ValueError: return False if num < low or num > high: return False return True
ffe695934b09c8c6e34a3ea1880f473efb3a36c9
38,090
def has_path(coll, path): """Checks if path exists in the given nested collection.""" for p in path: try: coll = coll[p] except (KeyError, IndexError): return False return True
9904656c367c466dc6fcf352a828e7601a5ce6d2
38,097
def headers(questions): """ Generate the headers for the CSV file - take an array of questions. The order is important - Each question id is a column header - Paired with the index+1 of it's position in the array - Example: 43:SQ3-1d - SQ3-1d is the old question id - 43 is it's number on the supplier application - Note array is prefixed with the DM supplier ID :param questions: :return array of strings representing headers: """ csv_headers = list() csv_headers.append('Digital Marketplace ID') csv_headers.append('Digital Marketplace Name') csv_headers.append('Digital Marketplace Duns number') csv_headers.append('State of Declaration') for index, value in enumerate(questions): csv_headers.append("{}:{}".format(index+1, value)) return csv_headers
81f7da32157ea4bbc2467ccf112f97c7f68fe688
38,100
import logging def scope_logger(cls): """ Class decorator for adding a class local logger Example: >>> @scope_logger >>> class Test: >>> def __init__(self): >>> self.log.info("class instantiated") >>> t = Test() """ cls.log = logging.getLogger('{0}.{1}'.format(cls.__module__, cls.__name__)) return cls
84e00f8f668accd362d4fc29016fd2ec95f0bcef
38,102
def cvtInt(intstr): """ Convert integer string into integer. Parameters: intstr - integer string format: "[0-9]+" Return Value: Returns converted integer value on success. None on error. """ try: val = int(intstr) return val except (SyntaxError, NameError, TypeError, ValueError): return None
907a73c358d6de3231caffc610afaf3f032613ab
38,104
def add_variable_to_dataset(dataset, var_name): """ Convenience function to add a variable to the dataset only if the variable doesn't already exist. This function is case sensitive. "radius" and "Radius" are two different variable names. returns the Variable object associated with the variable name """ if var_name not in dataset.variable_names: return dataset.add_variable(var_name) return dataset.variable(var_name)
109359e3f986ea08c1166fd72a76d878238f6a13
38,106
import unicodedata def filter_non_printable(string_to_filter): """ Filter string 's' by removing non-printable chars :param string_to_filter: :return: """ output_string = ''.join( c for c in string_to_filter if not unicodedata.category(c) in set('Cf') ) return output_string
dc9a3513d29ea8891a952b879b31bcb0752f4bb0
38,107
def _type_to_template(qname, subject_type, predicate, object_type): """ >>> _type_to_template(lambda x: "q:"+x, "subject", "pred", "object") 'rdf/q:subject/q:pred/q:object.html' >>> _type_to_template(lambda x: "q:"+x, "subject", "pred", None) 'rdf/q:subject/q:pred/rdf:Resource.html' >>> _type_to_template(lambda x: "q:"+x, "subject", None, "object") 'rdf/q:subject/q:object.html' >>> _type_to_template(lambda x: "q:"+x, "subject", None, None) 'rdf/q:subject.html' >>> _type_to_template(lambda x: "q:"+x, None, None, None) 'rdf/rdf:Resource.html' """ if not subject_type: subject_qname = "rdf:Resource" else: subject_qname = qname(subject_type) if not object_type: object_qname = "rdf:Resource" else: object_qname = qname(object_type) if predicate: bits = [subject_qname, qname(predicate), object_qname] elif object_type: bits = [subject_qname, object_qname] else: bits = [subject_qname] return "rdf/{}.html".format("/".join(bits))
1d7243071cca7226947bfd763d00b7be50620c15
38,108
def __normalize_str(name): """Removes all non-alphanumeric characters from a string and converts it to lowercase. """ return ''.join(ch for ch in name if ch.isalnum()).lower()
bf174967de9d8aa812fd78ad9a18e6f076bc137f
38,113
def without_ends(string: str) -> str: """ >>> without_ends('abc') 'b' """ return string[1:-1]
eedc605702d67a22341a10a1df3a719b78b6174d
38,115
async def healthcheck(): """ Return the API status. """ return {"ping": "pong"}
cad4018cda809dcc3713b5a104bc25acf4830936
38,117
def vbar(vmodel, event_depth, station_elevation): """ Calculates the average velocity between source and receiver for a given velocity model. Only need the difference in the vertical axis as sines of angles will cancel. Parameters ---------- vmodel : pandas DataFrame Contains the velocity model, ordered by deepest layer first. event_depth : float Depth of event. Units should be consistent with station_elevation. station_elevation : float Elevation of event. Units should be consistent with depth of event. Returns ------- vbar : float Average velocity within the model. """ average = 0.0 for i, layer in vmodel.iterrows(): layer_top = vmodel.iloc[i+1][0] layer_bottom = layer[0] if station_elevation < layer_bottom and station_elevation > layer_top: layer_top = station_elevation if layer_top == -100.0: break if event_depth <= layer_top: continue elif event_depth > layer_top and event_depth <= layer_bottom: # Handle interpolated distance dist_in_layer = abs(event_depth - layer_top) else: # Handle full layer dist_in_layer = abs(layer_bottom - layer_top) average += dist_in_layer * layer[2] return average / (event_depth - station_elevation)
ce10c0148d9b292b9a825a49bc983b7c81d975b2
38,119
def is_weekend(data): """ Adds a binary is_weekend column to a pandas DataFrame Args: data - a pandas DataFrame containing a 'created_time' column of datetime objects Returns: A DataFrame with an additional 'is_weekend' column if data['created_time'][i] is Friday, Saturday or Sunday then data['is_weekend'][i] = 1 """ data['is_weekend'] = 0 weekend_days = {4, 5, 6} for idx in data.index: wd = data.created_time[idx].weekday() if wd in weekend_days: data['is_weekend'][idx] = 1 return data
1dd00dda7e41031bc7bac462618124ca8fd133bd
38,122
def route(url, endpoint=None, methods=None): """ A decorator that apply a route to the view or action. :param str url: The url rule. :param str endpoint: The endpoint. :param list methods: A list of http methods. :return: A function. """ if not url: raise ValueError('url cannot be empty.') def decorator(func): routes = getattr(func, 'routes', None) if not routes: func.routes = routes = [] routes.append((url, endpoint, methods)) return func return decorator
18b5c3fb287e8f21e8a8eeeac8643f123df75b7f
38,123
import string import secrets def rand_token(length: int = 25, chars: str = string.ascii_uppercase + string.digits) -> str: """ Generate a random token. Does not check for duplicates yet. A length of 25 should give us 8.082812775E38 keys. length: - length of token to generate chars: - characters used in seeding of token """ return "".join(secrets.choice(chars) for i in range(length))
1cecef09eca30dee6bb607d7e5ce359d977e8beb
38,127
def word_overlap(left_words, right_words): """Returns the Jaccard similarity between two sets. Note ---- The topics are considered sets of words, and not distributions. Parameters ---------- left_words : set The set of words for first topic right_words : set The set of words for the other topic Returns ------- jaccard_similarity : float """ intersection = len(left_words.intersection(right_words)) union = len(left_words.union(right_words)) jaccard = intersection / union return jaccard
3baa3ec5605bef4658815ac4d546539c480ae4b5
38,133
def convert_weight(val, old_scale="kg", new_scale="pound"): """ Convert from a weight scale to another one among kg, gram, and pound. Parameters ---------- val: float or int Value of the weight to be converted expressed in the original scale. old_scale: str Original scale from which the weight value will be converted. Supported scales are Kilogram ['Kilogram', 'kilogram', 'kg'], Gram ['Gram', 'gram', 'gr'] or Pound ['Pound', 'pound', 'pd']. new_scale: str New scale from which the weight value will be converted. Supported scales are Kilogram ['Kilogram', 'kilogram', 'kg'], Gram ['Gram', 'gram', 'gr'] or Pound ['Pound', 'pound', 'pd']. Raises ------- NotImplementedError if either of the scales are not one of the requested ones. Returns ------- res: float Value of the converted weight expressed in the new scale. """ # Convert from 'old_scale' to Kg if old_scale.lower() in ['kilogram', 'kg']: temp = val elif old_scale.lower() in ['gram', 'gr']: temp = val / 1000.0 elif old_scale.lower() in ['pound', 'pd']: temp = 0.4535924 * val else: raise AttributeError( f'{old_scale} is unsupported. kg, gr, and pound are supported') # and from kg to 'new_scale' if new_scale.lower() in ['kilogram', 'kg']: result = temp elif new_scale.lower() in ['gram', 'gr']: result = 1000 * temp elif new_scale.lower() in ['pound', 'pd']: result= temp / 0.4535924 else: raise AttributeError( f'{new_scale} is unsupported. kg, gr, and pound are supported') return result
d38fed48ac998b8c21b8dd25fb497479d04e899a
38,138
def is_chinese(target_str): """ determine whether the word is Chinese Args: target_str (str): target string """ for ch in target_str: if '\u4e00' <= ch <= '\u9fff': return True return False
ee75d9ea6d6e396964f511c10eaeeaf96a836aaa
38,141
def format_record(record): """Format float values to high precision, not in exponential form.""" return [f'{r:.15f}' if isinstance(r, float) else r for r in record]
a4b6b25ac429129d843ef59b25d9ae9fc7531969
38,144
import math def inverseJukesCantor(d): """Takes a substitution distance and calculates the number of expected changes per site (inverse jukes cantor) d = -3/4 * log(1 - 4/3 * p) exp(-4/3 * d) = 1 - 4/3 * p 4/3 * p = 1 - exp(-4/3 * d) p = 3/4 * (1 - exp(-4/3 * d)) """ assert d >= 0.0 return 0.75 * (1 - math.exp(-d * 4.0/3.0))
648f091d2a8daf0b41cf939c007e50f6c0eef52a
38,145
def cnvtol(self, lab="", value="", toler="", norm="", minref="", **kwargs): """Sets convergence values for nonlinear analyses. APDL Command: CNVTOL Parameters ---------- lab Valid convergence labels. If STAT, list the status of the currently specified criteria. value Typical reference value for the specified convergence label (Lab). toler Tolerance; defaults to 0.005 (0.5%) for force and moment, 1.0E-4 (0.01%) for DVOL, 0.05 (5%) for displacement when rotational DOFs are not present, and 0.05 (5%) for HDSP. norm Specifies norm selection: 2 - L2 norm (check SRSS value). Default, except for Lab = U. 1 - L1 norm (check absolute value sum). 0 - Infinite norm (check each DOF separately). Default for Lab = U. minref The minimum value allowed for the program calculated reference value. If negative, no minimum is enforced. Used only if VALUE is blank. Defaults to 0.01 for force, moment, and volume convergence, 1.0E-6 for heat flow, 1.0E-12 for VLTG and CHRG, 1.0E-6 for HDSP, and 0.0 otherwise. Notes ----- This command is usually not needed because the default convergence criteria are sufficient for most nonlinear analyses. In rare cases, you may need to use this command to diagnose convergence difficulties. Values may be set for the degrees of freedom (DOF) and/or the out-of- balance load for the corresponding forcing quantities. Issuing CNVTOL to set a convergence criterion for a specific convergence label (Lab) does not affect the convergence criterion for any other label. All other convergence criteria will remain at their default setting or at the value set by a previous CNVTOL command. When the GUI is on, if a "Delete" operation in a Nonlinear Convergence Criteria dialog box writes this command to a log file (Jobname.LOG or Jobname.LGW), you will observe that Lab is blank, VALUE = -1, and TOLER is an integer number. In this case, the GUI has assigned a value of TOLER that corresponds to the location of a chosen convergence label in the dialog box's list. It is not intended that you type in such a location value for TOLER in an ANSYS session. However, a file that contains a GUI-generated CNVTOL command of this form can be used for batch input or with the /INPUT command. Convergence norms specified with CNVTOL may be graphically tracked while the solution is in process using the ANSYS program's Graphical Solution Tracking (GST) feature. Use the /GST command to turn GST on or off. By default, GST is ON for interactive sessions and OFF for batch runs. This command is also valid in PREP7. """ command = f"CNVTOL,{lab},{value},{toler},{norm},{minref}" return self.run(command, **kwargs)
632a895db755cdf23c88b1df88700cb9c0529f69
38,147
from typing import Any def find_key(d: dict, key: str) -> Any: """Finds a key nested arbitrarily deeply inside a dictself. Principally useful since the structure of NRPE relation data is not completely reliable. """ if key in d: return d[key] for child in d.values(): if not isinstance(child, dict): continue val = find_key(child, key) if val: return val
e6b176450d25ea1e194019c7d4bdb85d500488ae
38,149
def gather_results(detectors): """ Execute the d.compute method for each given detector. After that the result (d.getDetectorOutput()) method is called and added as value under the key d.getName() to the result which is returned in the end. :param detectors: :return: """ results = {} for d in detectors: d.compute() results[d.getName()] = d.getDetectorOutput() return results
10366cc7880474f54d093c5f4bd8c11b4b454aab
38,150
def _find_label_rows(sheet): """Search excel file column A for cells containing 'Label'. Return a list of zero-indexed rows. """ label_rows = [] for i in range(sheet.nrows): if "Label" in sheet.cell_value(i, 0): label_rows.append(i) return label_rows
5515874500c5ef514df02019e745d609b0474b2f
38,151
import hashlib def hash_all(strs, digest=None): """Returns a hash of the concatenation of all the strings in strs. If a hashlib message digest is not supplied a new sha1 message digest is used. """ digest = digest or hashlib.sha1() for s in strs: digest.update(s) return digest.hexdigest()
585496aaae534d24cba512482765aeb9250ef6b8
38,156
import socket import struct def parse_ipv6(addr): """ Return a numeric representation of the given IPv6 address. """ binary_ip = socket.inet_pton(socket.AF_INET6, addr) high, low = struct.unpack('!QQ', binary_ip) return high << 64 | low
6456d02ae7b4b5eadd2666fad7308ef5547b98dc
38,157
def get_protein(chain): """ Get protein (residues without hetero-residues). """ return [residue for residue in chain.get_residues() if residue.full_id[3][0] == ' ']
f5d20e0a7edf15f3a90a4b2c3f889211cb44d87d
38,159
import zlib def text_decompress(text) -> str: """对文本进行解压 Args: text (str or bytes): 待解压文本 Returns: str: 解压后的文本 """ return zlib.decompress(text).decode() if type(text).__name__ == "bytes" else text
933d8fe5ac4f615b60831551deee80fc429171ac
38,160
def is_rect_intersection(minlat, maxlat, minlon, maxlon, latitude, longitude): """ Checks if there is a radial intersection between a point radius boundary and a latitude/longitude point. :param: minlat : the minimum rectangular latitude :type: float :param: maxlat : the maximum rectangular latitude :type: float :param: minlon : the minimum rectangular longitude :type: float :param: maxlon : the maximum rectangular longitude :type: float :param: latitude : the latitude of the point to check :type: float :param: longitude : the longitude of the point to check :type: float """ if minlat is not None and float( minlat) > float(latitude): return False elif minlon is not None and float( minlon) > float(longitude): return False elif maxlat is not None and float( maxlat) < float(latitude): return False elif maxlon is not None and float( maxlon) < float(longitude): return False else: return True
39f872e74a9cf6d77521a5a5666bf8701662ba0c
38,161
from datetime import datetime import time def rddToFileName(prefix, suffix, timestamp): """ Return string prefix-time(.suffix) >>> rddToFileName("spark", None, 12345678910) 'spark-12345678910' >>> rddToFileName("spark", "tmp", 12345678910) 'spark-12345678910.tmp' """ if isinstance(timestamp, datetime): seconds = time.mktime(timestamp.timetuple()) timestamp = int(seconds * 1000) + timestamp.microsecond // 1000 if suffix is None: return prefix + "-" + str(timestamp) else: return prefix + "-" + str(timestamp) + "." + suffix
ecf09d0fc16b23b892635197c87495ad8aa42bdf
38,163
def group_com(mat_A, mat_B): """Compute the group commutator A B A^dagger B^dagger for two matrices A, B.""" return mat_A @ mat_B @ mat_A.T.conj() @ mat_B.T.conj()
b84170a21fc85f7a3dc68014483137804661f3c0
38,165
def compare_dict_keys(dict_a, dict_b, compare_keys): """Compare two dictionaries with the specified keys""" return all(dict_a[k] == dict_b[k] for k in dict_a if k in compare_keys)
00de8cc97f8b56608575570150a97038ed61b997
38,170
def get_index(sheet, *names): """ Returns the column index for the first matching name, assuming first row is header Matching is done with leading and trailing whitespace stripped, and case insensitive """ header = [(c.value or "").strip().lower() for c in sheet[1]] for name in names: name = name.strip().lower() if name in header: return header.index(name) raise AttributeError(f"None of {names} found in header {header}")
45af5638cc66fb51a4addbf8ecec27ffadd2cf09
38,174
def _hex_to_triplet(h): """Convert an hexadecimal color to a triplet of int8 integers.""" if h.startswith('#'): h = h[1:] return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))
e84b3de0d94eda11a63390cfd0448708bd69cc66
38,180
def unique_color_from_identifier(identifier): """Return unique color as RGB tuple. Useful for creating PNG images where each color is used as an identifier. Raises TypeError if the identifier is not an integer. Raises ValueError if the identifier is not in the range 0 to 16777215 inclusive. :param identifier: positive integer in range from 0 to 16777215 inclusive :raises: TypeError, ValueError :returns: RGB tuple """ if not isinstance(identifier, int): raise(TypeError("Identifier is not an integer {}".format(identifier))) if identifier < 0: raise(ValueError("Negative identifier not allowed")) if identifier >= 256*256*256: raise(ValueError("Identifier {} >= {}".format(identifier, 256*256*256))) blue = identifier % 256 green = (identifier // 256) % 256 red = (identifier // (256*256)) % 256 return (red, green, blue)
dcf3555c95e6799c1d9042c2342d9181e44d56cd
38,186
def get_recall(rec, tru): """Recommendation recall: |{R & P}|/|P| (R - recommended products, P - relevant products)""" return len(rec & tru)/len(tru) if len(tru) != 0 else 0
83ec9f53a43a8d4f0b2d6174457cd8a7937a1bed
38,189
import json def load_config(config_files): """ loads json configuration files the latter configs overwrite the previous configs """ config = dict() for f in config_files: with open(f, 'rt') as cfg: config.update(json.load(cfg)) return config
4a61ca063bf8147a0f2576cddc8bf438b33f8792
38,192
from typing import Optional def askyn(question: str, default: Optional[bool] = None) -> bool: """ Asks a yes or no question and returns a bool. REF: https://gist.github.com/garrettdreyfus/8153571 """ # Modify the question with the default value capitalized if default is not None: if default: question += " [Y/n]: " else: question += " [y/N]: " else: question += " [y/n]: " # Iterate until an answer is determined while True: reply = str(input(question).lower().strip()) if reply == "" and default is not None: return default elif reply in ("y", "yes"): return True if reply in ("n", "no"): return False else: print(f"Unrecognized answer: '{reply}'")
f038ffce000e5c39d707dd61d3f1967195df4e6d
38,195
import torch def box_center_to_corners(b): """ Converts a set of oriented bounding boxes from centered representation (x_c, y_c, w, h, theta) to corner representation (x0, y0, ..., x3, y3). Arguments: b (Tensor[N, 6]): boxes to be converted. They are expected to be in (x_c, y_c, w, h, c, s) format. * c, s: unnormalized cos, sin Returns: c (Tensor[N, 8]): converted boxes in (x0, y0, ..., x3, y3) format, where the corners are sorted counterclockwise. """ # print(b.shape) x_c, y_c, w, h, c, s = b.unbind(-1) # [N,] # print(x_c.shape) s = 2 * s - 1 # center = torch.stack([x_c, y_c], dim=-1).repeat(1, 4) # [N, 8] center = torch.stack([x_c, y_c, x_c, y_c, x_c, y_c, x_c, y_c], dim=-1) dx = 0.5 * w dy = 0.5 * h c = c + 1e-5 s = s + 1e-5 cos = c / ((c ** 2 + s ** 2).sqrt() + 1e-10) sin = s / ((c ** 2 + s ** 2).sqrt() + 1e-10) dxcos = dx * cos dxsin = dx * sin dycos = dy * cos dysin = dy * sin dxy = [ -dxcos + dysin, -dxsin - dycos, dxcos + dysin, dxsin - dycos, dxcos - dysin, dxsin + dycos, -dxcos - dysin, -dxsin + dycos, ] return center + torch.stack(dxy, dim=-1)
0ab937e31fc8c2e67748b5d791a7061fa0fb70fe
38,196
def make_iterator(it): """ Create iterator from iterable. """ return iter(it)
ec683c4d109fd9afedc57b1a9476080400930122
38,199
from pathlib import Path def get_datapath_base(data_type: str, filename: str) -> Path: """Return the path to the footprints test data file""" return Path(__file__).resolve(strict=True).parent.joinpath(f"../data/{data_type}/{filename}")
9bccb92b1c4a5dbaa625b2fa52b3c77163104c11
38,200
from typing import Collection def find_valid_words(dictionary: Collection[str], candidates: Collection[str]) -> Collection[str]: """Finds valid words from 'candidates' as found in the given words list. dictionary: the list to be used as a dictionary. Only strings in the dictionary are considered valid words candidates: strings to be tested for validity """ dictionary, perms = set(dictionary), set(candidates) return dictionary & perms
14706ca99787869d0eee1c77a929bc1f34cf2238
38,201
def matchingTest(vector): """ input: a list of corr coeff scores from moments output: match or not criteria: all > 0.9 - yes all > 0.8, all but one > 0.9, four>.99 - yes else - no """ point99 = len([v for v in vector if v>0.99]) point9 = len([v for v in vector if v>0.90]) point8 = len([v for v in vector if v>0.80]) if point9 ==6: testResult = True elif point9 ==5 and point99 >=4 and point8==6: testResult = True else: testResult = False return testResult
bfc0bd830948cb0c5f4f6342c699ef280d0c4480
38,202
def drop_column(df, columns_to_drop): """ Removes columns from a DataFrame del df[name] Args: df (`pandas.DataFrame`): The dataframe to drop columns on columns_to_drop (:type:`list` of :type:`str`): A list of the columns to remove Returns: `pandas.DataFrame`: `df` with the provided columns removed """ for ctd in columns_to_drop: del df[ctd] return df
1eadbf301aff80752c93ca4393910dfa19a76b3a
38,203
def add_commas(num: int) -> str: """Adds commas to an integer in the international number format - 1000 -> 1,000 - 100000 -> 100,000 - 1000000 -> 1,000,000 Args: num (int): The number Returns: str: The number with commas """ return "{:,}".format(num)
9f63a6389df5b46ebbe0dc7489d4967919da18d7
38,210
from typing import Iterable from typing import List import locale def sorted_locale(iterable: Iterable[str], *, reverse: bool = False) -> List[str]: """Sort a list of strings according to locale. Parameters ---------- iterable : iterable of str A list of strings. reverse : bool, default=False If ``True``, reverse the sorted result. Returns ------- sorted_list : list The sorted list of strings. """ return sorted(iterable, key=locale.strxfrm, reverse=reverse)
e5092e7343989757ffe5fad9bfaf26637b6b5834
38,212
from math import sin, cos, sqrt, atan, radians def calc_distance(position_start: tuple, position_end: tuple) -> float: """Calculates the distance between two positions in format (lat, lon) """ f = 1 / 298.257223563 a = 6378173 F = radians((position_start[0] + position_end[0]) / 2.0) G = radians((position_start[0] - position_end[0]) / 2.0) l = radians((position_start[1] - position_end[1]) / 2.0) S = sin(G) ** 2 * cos(l) ** 2 + cos(F) ** 2 * sin(l) ** 2 C = cos(G) ** 2 * cos(l) ** 2 + sin(F) ** 2 * sin(l) ** 2 w = atan(sqrt(S / C)) if float(w) == 0.0: return 0.0 D = 2 * w * a T = sqrt(S * C) / w H_1 = (3 * T - 1) / (2 * C) H_2 = (3 * T + 1) / (2 * S) return D * (1 + f * H_1 * sin(F) ** 2 * cos(G) ** 2 - f * H_2 * cos(F) ** 2 * sin(G) ** 2)
c330e7e0e45f7643c7c14c6b066a854a97bd196a
38,215
def strip_schema_version(json_dict): """Returns the given JSON dict after stripping its schema version out :param json_dict: The JSON dict :type json_dict: dict :returns: The JSON dict with its schema version stripped out :rtype: dict """ if 'version' in json_dict: del json_dict['version'] return json_dict
2c5e7b5bfb401e1adef5479f0d787c2788d1d735
38,220
def sum_square_difference(ceiling): """Compute the difference between the sum of squares and the square of the sum of the natural numbers up to and including the provided ceiling. """ numbers = range(ceiling + 1) sum_squares = sum(map(lambda number: number**2, numbers)) square_sum = sum(numbers)**2 sum_square_difference = square_sum - sum_squares return sum_square_difference
5898969697c2c8500dda0d0ef9ca5f3e7125ff77
38,225
import fnmatch def fnmatch_all(names, patterns): """Determine whether all strings in `names` match at least one of the `patterns`, which should be shell glob expressions. """ for name in names: matches = False for pattern in patterns: matches = fnmatch.fnmatch(name, pattern) if matches: break if not matches: return False return True
52be9c216d222fed331a836d529bc1399ba8e9b4
38,227
def read_seq(handle): """ Read sequence from plain text file (no format). Used for importing reference sequence. :param handle: :return: str, sequence """ seq = '' for line in handle: seq += line.strip() return seq
063f9e5300093537d81ed6ee8eb96579ae0dfcf5
38,230
def split(n): """ Split string or Array >>> split("hello") ['he', 'llo'] >>> split([1,2,3,1,2,4]) [[1, 2, 3], [1, 2, 4]] """ return [ n[:len(n)//2:], n[len(n)//2::] ]
ab132de4077bbc390a8b4f2f38c5154ecd75d579
38,231
def parse_tile(tile_string): """ >>> parse_tile("esew") (1, -1) >>> parse_tile("esewnw") (0, 0) >>> parse_tile("nwwswee") (0, 0) """ data = list(tile_string) cur_pos_ew = 0 cur_pos_ns = 0 while len(data) != 0: c1 = data.pop(0) if c1 == 'w': cur_pos_ew -= 1 elif c1 == 'e': cur_pos_ew += 1 elif c1 == 's': c2 = data.pop(0) cur_pos_ns -= 1 if c2 == 'e': cur_pos_ew += 1 pass elif c1 == 'n': c2 = data.pop(0) cur_pos_ns += 1 if c2 == 'w': cur_pos_ew -= 1 pass else: raise RuntimeError("") return cur_pos_ew, cur_pos_ns
7b6ee9725a65e88f3110c288000fb8d9626826b7
38,236
def parse_storage_mappings(storage_mappings): """ Given the 'storage_mappings' API field, returns a tuple with the 'default' option, the 'backend_mappings' and 'disk_mappings'. """ # NOTE: the 'storage_mappings' property is Nullable: if storage_mappings is None: return None, {}, {} backend_mappings = { mapping['source']: mapping['destination'] for mapping in storage_mappings.get("backend_mappings", [])} disk_mappings = { mapping['disk_id']: mapping['destination'] for mapping in storage_mappings.get("disk_mappings", [])} return ( storage_mappings.get("default"), backend_mappings, disk_mappings)
ea182c91ff5e2fe1e9a7a7066071a618eb039a5f
38,239
def bbox3d2result(bboxes, scores, labels, attrs=None): """Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor): Bounding boxes with shape (N, 5). labels (torch.Tensor): Labels with shape (N, ). scores (torch.Tensor): Scores with shape (N, ). attrs (torch.Tensor, optional): Attributes with shape (N, ). Defaults to None. Returns: dict[str, torch.Tensor]: Bounding box results in cpu mode. - boxes_3d (torch.Tensor): 3D boxes. - scores (torch.Tensor): Prediction scores. - labels_3d (torch.Tensor): Box labels. - attrs_3d (torch.Tensor, optional): Box attributes. """ result_dict = dict( boxes_3d=bboxes.to('cpu'), scores_3d=scores.cpu(), labels_3d=labels.cpu()) if attrs is not None: result_dict['attrs_3d'] = attrs.cpu() return result_dict
d31481229e17bc25d4f1d6fe5b9ac757b194357e
38,242
import pathlib def _load_file_contents(path: pathlib.Path) -> str: """Return the contents of a file.""" with path.open("r") as fp: return fp.read()
ca90bcc6f346e69f10323388b8bc2faf49e2553d
38,244
def recordCounter(x): """ Simple record sizer that just returns 1 for each record """ return 1
a9b735c34b7978a866de6ffcf268875940664160
38,249
import hashlib def sha256(byte_array) -> bytes: """ Perform a SHA256 operation on the input. :param byte_array: data to hash. :type byte_array: bytearray or bytes :return: hashed data :rtype: bytes """ return hashlib.sha256(byte_array).digest()
9017ccfa9f548502fcebdc61aedec85924907225
38,251
def get_first_and_last_line(fname): """ Get the first and last line of the file. Since the common_crawl files are alphabetical, we can use this information to determine the alphabetic range of entries covered by the file. This information will later be used to limit the zone comparison to only those zones that would be within that alphabetic range. This is a speed improvement. :param fname: The filename to examine for the first and last lines :return: Two strings representing the first and last lines, respectively. """ with open(fname, "rb") as fh: first = next(fh) offs = -10 while True: fh.seek(offs, 2) lines = fh.readlines() if len(lines) > 1: last = lines[-1] break offs *= 2 # Return lines by converting bytes back to strings return (first.decode("utf-8"), last.decode("utf-8"))
6c69957698cf9357c0c223ec55ba01ecf1aff6ea
38,252
def obtain_points(func, theta_0, theta_1, min_x, max_x, step=0.1): """ Return a tuple of x and the corresponding points for the given x """ x_values = [] y_values = [] x = min_x while x <= max_x: y_values.append(func(x, theta_0, theta_1)) x_values.append(x) x += step return (x_values, y_values)
a603660d60a8fdc8c99b5fb05c756afe972dd1ab
38,256
def create_aln_expr(id, start=None, stop=None): """ Create an alignment expression, such as ``n2[5:8]`` or ``tw1`` given an id, and start/stop range. :param id: ID with which to align :type id: str :param start: Range at which to start :type start: int :param stop: Range at which to stop :type stop: int """ if start is None and stop is None: return id elif start is not None and stop is not None: return '%s[%d:%d]' % (id, start, stop) else: raise Exception('Invalid alignment expression request')
e06c4e65beffc1ec14bd4b16e34e6f14b22f2576
38,257
import torch def ellip_gaussian2D(radius, sigma_x, sigma_y, dtype=torch.float32, device='cpu'): """Generate 2D ellipse gaussian kernel. Args: radius (tuple(int)): Ellipse radius (radius_x, radius_y) of gaussian kernel. sigma_x (int): X-axis sigma of gaussian function. sigma_y (int): Y-axis sigma of gaussian function. dtype (torch.dtype, optional): Dtype of gaussian tensor. Default: torch.float32. device (str, optional): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius_y + 1) * (2 * radius_x + 1)`` shape. """ x = torch.arange( -radius[0], radius[0] + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius[1], radius[1] + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x) / (2 * sigma_x * sigma_x) - (y * y) / (2 * sigma_y * sigma_y)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h
97caa00f535321b4c7831c251d3f0d6aaf3d2e32
38,262
import heapq def _simple_chooser(queue, remaining): """Default contraction chooser that simply takes the minimum cost option. """ cost, k1, k2, k12 = heapq.heappop(queue) if k1 not in remaining or k2 not in remaining: return None # candidate is obsolete return cost, k1, k2, k12
5bb92184767ba68247b124d4a935ea9dab327f96
38,263
import torch def btranspose(tensor: torch.Tensor) -> torch.Tensor: """Batch-wise transpose. Assumes that tensor has dimension of 3: [batch, features, samples]""" if tensor.dim() != 3: raise ValueError("The given shape is not supported.") return torch.transpose(tensor, 1, 2)
4b238672a2cfca33abb86116949acd6d392434f0
38,266
import re def opensearch_clean(f): """ Some opensearch clients send along optional parameters from the opensearch description when they're not needed. For example: state={openoni:state?} These can cause search results not to come back, and even can cause Solr's query parsing to throw an exception, so it's best to remove them when present. """ def f1(request, **kwargs): new_get = request.GET.copy() for k, v in list(new_get.items()): if type(v) == str and re.match(r'^\{.+\?\}$', v): new_get.pop(k) request.GET = new_get return f(request, **kwargs) return f1
862bf8cbb9a2629949746a92b78b3b23bdfd7c49
38,273
def key2num(key): """ Translates MIDI key to a number. """ key2num = {"C": 0, "Db": 1, "D": 2, "Eb": 3, "E": 4, "F": 5, "Gb": 6, "G": 7, "Ab": 8, "A": 9, "Bb": 10, "B": 11, "Cb": 11, "C#": 1, "D#": 3, "F#": 6, "G#": 8, "A#": 10, "B#": 0, "Cmin": 20, "Dbmin": 21, "Dmin": 22, "Ebmin": 23, "Emin": 24, "Fmin": 25, "Gbmin": 26, "Gmin": 27, "Abmin": 28, "Amin": 29, "Bbmin": 30, "Bmin": 31, "Cbmin": 31, "C#min": 21, "D#min": 23, "F#min": 26, "G#min": 28, "A#min": 30, "minB#": 20, "(null)": -1} return key2num[key]
a11a22a62c94c84a946df710e39d2d874f3bf343
38,274
import re def check_for_function(function: str, data: str) -> bool: """ Checks for a function in javascript code function: the name of the function data: the javascript code returns: Whether the code contains the function """ return bool(re.search(f'[^a-zA-Z]{function}[^a-zA-Z]', data))
671b3554a70407d447cac26b27f542812cbba97c
38,276
def user_discrim(user): """ Return the user's username and disc in the format <username>#<discriminator> """ return f"{user.name}#{user.discriminator}"
22866ad0c23a23bfbd7460844a9582916970991c
38,280
import configparser import json def write_config_to_file(config_dict, ini_fpath): """ Writes a configuration to an ini file. :param config_dict: (Dict) config to write :param ini_fpath: (str) fpath to ini file :return: (str) ini_file written to """ config = configparser.ConfigParser() config["DEFAULT"] = {key: json.dumps(value) for key, value in config_dict.items()} with open(ini_fpath, "w") as ini: config.write(ini) return ini_fpath
8b9c9c64e08afe64bc4fc6f9570ea84f53b9f72c
38,281
import json def model_comment(comment_type, text, other=None): """ Print a model comment. This is a base function for some functions implemented below but sometimes it is necessary to use it directly. :param comment_type: Comment type string. :param text: Comment text. :param other: Additional existing dictionary with some data. :return: String with the model comment. """ if other and isinstance(other, dict): comment = other else: comment = dict() comment['type'] = comment_type.upper() if text: comment['comment'] = text string = json.dumps(comment) return "/* LDV {} */".format(string)
23b7278bd9bcf1dbe0b41e908bcd41bd792789f1
38,284
def fetch(spec, **kwargs): """ Fetches a file on the local filesystem into memory. """ with open(spec['path'], 'rb') as f: return f.read()
84442c8df0efa0fa095b2d8282ea9c0d4cd2995f
38,286
from typing import Union import torch def reshape_list(flat_list: list, size: Union[torch.Size, tuple]) -> list: """ Reshape a (nested) list to a given shape Args: flat_list: (nested) list to reshape size: shape to reshape to Returns: list: reshape list """ if len(size) == 1: return [flat_list.pop(0) for _ in range(size[0])] else: return [reshape_list(flat_list, size[1:]) for _ in range(size[0])]
c9c9b09bb0d91ed3229f0b5e5b28130ac2d93377
38,287
def prod(*x): """ Returns the product of elements, just like built-in function `sum`. Example: ---------- >>> prod([5, 2, 1, 4, 2]) 80 """ if len(x) == 1 and isinstance(x[0], list): x = x[0] p = 1 for i in x: if hasattr(i, "__mul__"): p *= i return p
94569594eab0c6823733d76d69d59d21a6b4d96b
38,289
def hello(bot, update): """ Greet the user with their first name and Telegram ID. """ user_firstname = update.message.from_user.first_name user_id = update.message.from_user.id return update.message.reply_text( 'Hello {}, your Telegram ID is {}'.format(user_firstname, user_id) )
e5567d6748f202093bc44c514b38d6d32c162d4b
38,291
def create_price_sqm(data): """Create price per square meter feature.""" data['Prezzo_per_m2'] = data['Prezzo'] / data['Superficie'] return data
6a50084f69f233374ffa4512de2653f37f749467
38,293
def convert_num(mode, num): """Converts a number in any given number scale Example: `convert_num("100K", 600000) returns 6` Args: - mode: (string) the scale for the conversion ("100K", "M", "10M", "100M", "B") - num: the number to be converted Returns: the converted number """ num = int(num) if mode == "100K": num = int(num / 100000) elif mode == "M": num = int(num / 1000000) elif mode == "10M": num = int(num / 10000000) elif mode == "100M": num = int(num / 100000000) elif mode == "B": num = int(num / 1000000000) return num
461d5cf0d35e43509db3cffebf5487ab85470545
38,294
import re def stripms(stamp): """ Given ISO 8601 datestamp, strip out any milliseconds in representation using a regular expression safe for either stamps with or stamps without milliseconds included. """ parts = stamp.split('.') if len(parts) == 1: return stamp # no millisecond part; return original found = re.search('([0-9]*)([+-].*)', parts[1]) if not found: return parts[0] # no offset, so the first part is sufficent return '%s%s' % (parts[0], found.groups()[1])
937136fc563f0b6084521ddd912a21bade2166cf
38,296
def get_parent(vmfs): """ From a set of VMFs, determines which one has the lowest map version number, and is therefore the parent. """ # This avoids the need to subscript and slice. vmfs = iter(vmfs) parent = next(vmfs) lowestRevision = parent.revision for vmf in vmfs: revision = vmf.revision if revision < lowestRevision: parent = vmf lowestRevision = revision return parent
bae83d2e02edc835c533873994285870246c7623
38,298
def format_parameter(*args, **kwargs): """Format a parameter string >>> format_parameter(ex=['example', 'one']) '"ex=example:one"' >>> format_parameter('one', 'two', 'three') 'one:two:three' You can mix the arguments und keyword arguments. """ parameter_list = [] for value in args: if value is not None: parameter_list.append(str(value)) for key, value in kwargs.items(): try: if not value: parameter_list.append(key) else: parameter_list.append("=".join([key, value])) except TypeError: values = ':'.join(kwargs[key]) parameter_list.append("=".join([key, values])) result = ':'.join(parameter_list) if kwargs: return '"%s"' % result return result
77c84e3edb22de0a4cec58b9bcbeb1ef67ee2d5e
38,304
import torch def prep_tensor_for_vis(x): """Prepare tensor for visualization If only has one channel, concatenate to produce 3 channels Clone, detach and pass to cpu before clamping between 0 and 1 Parameters ---------- x: torch.FloatTensor Tensor with image (CHW) Returns ---------- torch.FloatTensor 3HW detached tensor on cpu clamped between 0 and 1 """ if x.shape[0] == 1: x = torch.cat([x] * 3, 0) return torch.clamp(x.clone().detach().cpu(), 0., 1.)
20426f0c3aef6f467ccc0b7b1bca26b30eb3bba9
38,307
from datetime import datetime def is_datetime(string): """ Check if a string can be converted to a datetime object. :param string: the string :return: True if the string can be converted to a datetime object, False otherwise """ try: datetime.strptime(string, "%Y-%m-%d %H.%M.%S") except Exception: return False return True
d1fa368d1b7ac45b85661bd4b72771d088c4ac6f
38,309
def _compare_properties(sub_set: list, set_: list) -> bool: """ Check for a subset in a set of properties. Parameters ---------- sub_set : list The smaller set that should be contained in the 'set'. schema : dict The set for which to check if 'sub_set' is a part of. Returns ------- bool True is 'sub_set' is a subset of the 'set'. False otherwise. """ for sub_set_property in sub_set: found = False for set_property in set_: if sub_set_property["name"] == set_property["name"]: found = True break if not found: return False return True
a13a29b4cb0b1728277c237b40bd4c5567beb001
38,310
def calculate_accuracy(combined_decisions, Y_test_1): """calculates percentage accuracy of a combined decisions array Args: combined_decisions: predicted values for combined model Y_test_1: True values Returns: percentage accuracy of predictions """ total_decisions = len(combined_decisions) correct_decisions = 0 for index, decision in combined_decisions: if decision == Y_test_1[index]: correct_decisions +=1 return correct_decisions / total_decisions * 100
7494ef3bc017e628f9621f803c27bd2c77ccff2b
38,312
import requests def load_mta_archived_feed(feed='gtfs', timestamp='2014-09-17-09-31'): """ Returns archived GTFS data for a particular time_assigned. Parameters ---------- feed: {'gtfs', 'gtfs-l', 'gtfs-si'} Archival data is provided in these three rollups. The first one covers 1-6 and the S, the second covers the L, and the third, the Staten Island Railway. timestamp: str The time_assigned associated with the data rollup. The files are time stamped at 01, 06, 11, 16, 21, 26, 31, 36, 41, 46, 51, and 56 minutes after the hour, so only these times will be valid. """ return requests.get( "https://datamine-history.s3.amazonaws.com/{0}-{1}".format(feed, timestamp) )
d1d38854dbd35f2c30342b9958d0640541162dd1
38,317
from typing import Dict from typing import Any def _setdefault(dictionary: Dict[str, Any], key: str, value: Any) -> Dict[str, Any]: """Sets the default value of `key` to `value` if necessary. Args: dictionary: the dictionary to add a default for. key: The key to add a default for. value: The default value to add if key is missing. Returns: Either dictionary or a copy wiht the default value added. """ if key in dictionary: return dictionary else: return dict(dictionary, **{key: value})
d989b167769aaf6674027b68f5ec0463fea5251d
38,318
def _no_op(*args, **kwargs): """No operation.""" return None
fa389f2d8aae0e38dd414b0294a3da948e0c9595
38,319
def set_size(self, mode): """Calculates the number of samples in the dataset partition""" return len(self.data_index[mode])
8337089875f70d0d4db68f1ff67bd29705774747
38,321
def is_derivatized(monosaccharide): """Tests whether any of the substituents attached to `monosaccharide` were added by derivatization. Parameters ---------- monosaccharide : Monosaccharide The object to test Returns ------- bool """ for pos, sub in monosaccharide.substituents(): if sub._derivatize: return True return False
f77cc01e14939b94652bb9d19f7d0729201f35bb
38,322
def gradient_summand(weights, lp): """Calculates the gradient summand for a given weight and `LabeledPoint`. Note: `DenseVector` behaves similarly to a `numpy.ndarray` and they can be used interchangably within this function. For example, they both implement the `dot` method. Args: weights (DenseVector): An array of model weights (betas). lp (LabeledPoint): The `LabeledPoint` for a single observation. Returns: DenseVector: An array of values the same length as `weights`. The gradient summand. """ return (weights.dot(lp.features) - lp.label) * lp.features
b34de095fb762aa933570ae58744772205ded5de
38,325
def schedule(intervals): """Schedule the maximum number of compatible intervals. This uses the greedy interval scheduling algorithm to find (schedule) the maximum number of compatible (non-overlapping) intervals. Args: intervals: list of intervals, each of which is a tuple (x,y) in which x is a tuple of the form (start, end) and y is a reference to an object represented by the interval. Returns: list of the objects corresponding to the chosen intervals (i.e., the 'y' for each chosen element) """ # Sort all intervals by their endpoint (the "finishing time") # x[0] gives the interval and x[0][1] gives the endpoint intervals = sorted(intervals, key=lambda x: x[0][1]) # Scan through the intervals in sorted order and choose # compatible ones with the earliest endpoint last_chosen_interval = None chosen_objects = [] for interval, obj in intervals: is_compatible = False if last_chosen_interval is None: # No intervals have been chosen yet, so interval is # of course compatible is_compatible = True else: # interval is compatible with the chosen intervals iff # its start comes after the finish of the last chosen # interval if interval[0] >= last_chosen_interval[1]: is_compatible = True if is_compatible: last_chosen_interval = interval chosen_objects += [obj] return chosen_objects
19480ff5b24070e53e9243066c3f34f8a91d1e98
38,328
def parse_hgnc_line(line, header): """Parse an hgnc formated line Args: line(list): A list with hgnc gene info header(list): A list with the header info Returns: hgnc_info(dict): A dictionary with the relevant info """ hgnc_gene = {} line = line.rstrip().split('\t') raw_info = dict(zip(header, line)) # Skip all genes that have status withdrawn if 'Withdrawn' in raw_info['status']: return hgnc_gene hgnc_symbol = raw_info['symbol'] hgnc_gene['hgnc_symbol'] = hgnc_symbol hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[-1]) hgnc_gene['description'] = raw_info['name'] # We want to have the current symbol as an alias aliases = set([hgnc_symbol, hgnc_symbol.upper()]) # We then need to add both the previous symbols and # alias symbols previous_names = raw_info['prev_symbol'] if previous_names: for alias in previous_names.strip('"').split('|'): aliases.add(alias) alias_symbols = raw_info['alias_symbol'] if alias_symbols: for alias in alias_symbols.strip('"').split('|'): aliases.add(alias) hgnc_gene['previous_symbols'] = list(aliases) # We need the ensembl_gene_id to link the genes with ensembl hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id') omim_id = raw_info.get('omim_id') if omim_id: hgnc_gene['omim_id'] = int(omim_id.strip('"').split('|')[0]) else: hgnc_gene['omim_id'] = None entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id') if entrez_id: hgnc_gene['entrez_id'] = int(entrez_id) else: hgnc_gene['entrez_id'] = None # These are the primary transcripts according to HGNC ref_seq = raw_info.get('refseq_accession') if ref_seq: hgnc_gene['ref_seq'] = ref_seq.strip('"').split('|') else: hgnc_gene['ref_seq'] = [] uniprot_ids = raw_info.get('uniprot_ids') if uniprot_ids: hgnc_gene['uniprot_ids'] = uniprot_ids.strip('""').split('|') else: hgnc_gene['uniprot_ids'] = [] ucsc_id = raw_info.get('ucsc_id') if ucsc_id: hgnc_gene['ucsc_id'] = ucsc_id else: hgnc_gene['ucsc_id'] = None vega_id = raw_info.get('vega_id') if vega_id: hgnc_gene['vega_id'] = vega_id else: hgnc_gene['vega_id'] = None return hgnc_gene
9b0c373a107782d81b818b258e9c273e89b9ec12
38,329
def get_integer(message, minimum, maximum): """Retrieves an integer value prompted from console, in such a way that `minimum ≤ value ≤ maximum` """ while True: try: value = int(input(message)) if not minimum <= value <= maximum: raise ValueError() return value except ValueError: print('Please enter an integer value between {} and {}' .format(minimum, maximum))
b3c0708a17b03c66555dfc41e12b2b33cab0914c
38,337
def get_smallest_entry(visited, distance): """ Returns the position of the unvisited node with the smallest distance. Returns None if no options are left. """ smallest = None smallest_entry = None for i in range(0, len(visited)): if not visited[i] and distance[i] is not None: if distance[i] < smallest or smallest is None: smallest_entry = i smallest = distance[i] return smallest_entry
5097ab5587c495a7fa8c14f173f1109bea55cb4a
38,345