content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def empty_str(in_str): """ Simple helper to return True if the passed string reference is None or '' or all whitespace """ if in_str is not None and not isinstance(in_str, str): raise TypeError('Arg must be None or a string type') return in_str is None or \ len(in_str.strip()) == 0
068d86797f6b19b77133eb8f1c5b28d3bd007592
521,072
def chunks(l, n): """ Successive n-sized chunks from l. """ res = [] for i in range(0, len(l), n): assert len(l[i:i + n]) == n res += [l[i:i + n]] return res
148467d681e545487ea1a52c3b4d548726c77f6c
21,815
def CalcE(U): """Return the result of the non-linear variable E = 0.5U**2.""" return 0.5 * U * U
850ccc66737e127bc64a8b9940e84dadc22edf86
471,750
def step_function(x: float, step_coordinate: float = 0, value_1: float = 0, value_2: float = 1) -> float: """Simple step function. Parameters ---------- x : float Coordinate. step_coordinate: float Step coordiante. value_1 : float Function returns value_1 if x < step_coordinate. value_2 : float Function returns value_2 if x >= step_coordinate. Returns ------- float """ return value_1 if x < step_coordinate else value_2
0e42385a349e17ee6f4981f33ad3cdbee89f68ad
124,196
def get_total_mnsp_capacity_violation(model): """Get total MNSP capacity violation""" return sum(v.value for v in model.V_CV_MNSP_CAPACITY.values())
2b84d4525e8692c0d31052b90e57c1c6ecda0cfa
264,205
import zipfile def unzipfile(zip_file_path: str, unzipped_dir_path: str) -> bool: """ unzip file :param zip_file_path: path to zipfile :param unzipped_dir_path: directory path to unzip :return: bool, if success """ z = zipfile.ZipFile(zip_file_path, 'r') # for file in z.namelist(): # z.extract(file, path) z.extractall(path=unzipped_dir_path) return True
4fbafaaefca06b640837bef4ddb3aa07a732ece7
579,880
def weightedAverage(fractions, weight_sum): """Computes an RMSE-weighted fractional cover image. Args: fractions: a multi-band ee.Image object with a 'weight' band. weight_sum: a single-band _eeImage object with the global weight sum. Returns: weighted: scaled fractional cover Image. """ # harmonize band info band_names = list(fractions.bandNames().getInfo()) band_names.pop(band_names.index("weight")) band_range = list(range(len(band_names))) scaler = fractions.select(["weight"]).divide(weight_sum) weighted = fractions.select(band_range, band_names).multiply(scaler) return weighted
f6c705385a61fec8cd04bb12a10fbcc64c003f2e
276,723
import re def is_ticker_symbol(value): """Checks whether a string is a possibly valid stock ticker symbol. Examples: 'GOOG', 'BRK/B', '263750 KS', 'EMBRAC B SS' :param value: A string to evaluate. :returns: True if string is in the form of a valid stock ticker symbol. """ return re.match(r'^[A-Z0-9/]{1,7}(\s+[ABC])?(\s+[A-Z]{1,3})?$', value)
a43f030845b2a5e611fd5410f8eb541a7d6d5295
614,129
def create_last_n_1_bits_mask(n): """ Return a binary mask of last n bits of 1 """ if n < 0: raise ValueError("n and k cannot be negative number") if n == 0: return 0 return (2 << n) - 1
aa37cbfc0bdb290d9b00e6e2dcf484d207bb0f24
431,429
from functools import reduce def dict_item(dictionary: dict, path: str): """Extracts dictionary item by the given path. Args: path: path to dictionary item. May point to nested elements. In such case dot is used to notate children nodes. E.g. "name.first". Returns: Dictionary element value. """ return reduce(lambda d, key: d[key], path.split('.'), dictionary)
00d41ea11455fc58b8d6e2c76c037b6b408e498f
315,348
def get_difference_rate(actual, expected): """Calculate difference rate between actual value and expected value.""" return actual if expected == 0 else (actual - expected) / expected
ee24d97a66898653024845be9a605482b848016b
426,222
def format_keys(keys): """ Converts keys like findHighlightForeground to find_highlight_foreground. """ for key in keys: formatted_key = ''.join([f"_{c.lower()}" if c.isupper() else c for c in key.text]) key.text = formatted_key return keys
b7877b5dc1dfe54f93fc43719cc4b074282fc1cc
113,952
from typing import OrderedDict def order_recommended_skills(skills_list): """This function is used to keep unique skills in order""" recommended_skills = list(OrderedDict.fromkeys(skills_list)) return recommended_skills
8580f75e6838a18d5498489c4b1015f9056507e8
191,039
def to_api_doc_repr(regdoc): """ Transform RegistryDocument { "_id": "bts" "@context": { ... } "@graph": [ ... ] } with meta property-> { "url": "http..." } to { "url": "http...", "namespace": "bts, "source": { "@context": { ... } "@graph": [ ... ] } } """ api_doc = {} api_doc['url'] = regdoc.meta.url if 'url' in regdoc.meta else None api_doc['namespace'] = regdoc.pop('_id', None) if regdoc: api_doc['source'] = regdoc return api_doc
f7854dd7adeed25c0ad15ddba8443f063cb40fe2
477,759
from typing import List def format_tags(tags: str, arg = "--tag") -> List[str]: """ Takes a comma separated list of tags. Splits them and appends --tag to each tag. Use the output as the command line argument for the restic cli. Example: foo,bar,test becomes --tag foo --tag bar --tag test """ if not tags: return [] tags = tags.strip() splitTags = tags.split(",") output = [] for tag in splitTags: tag = tag.strip() if tag: output.extend([arg, tag]) return output
d129859470e94a67266bb76450053377a56f2fb3
380,723
import requests def get_mac_addr(userkey, mac): """ Function to get the Mac Address Info requested from macaddress.io Makes a request to macaddress.io via their API using Headers Parameters: userkey (str): User key to access the macaddress.io API mac (str): Mac address to lookup Returns: Response: Returns Response object from lookup """ session = requests.Session() session.headers.update({'X-Authentication-Token': userkey, 'Content-Type':'application/json'}) url = "https://api.macaddress.io/v1" params = {"search": mac, "output": "json"} response_data = requests.get(url, headers=session.headers, params=params) return response_data
3e6e5a8b9b2889a6f788295fdd6e7c06a09e5c14
135,633
def create_query_token_func(session, model_class): """Create an ``query_token`` function that can be used in resource protector. :param session: SQLAlchemy session :param model_class: TokenCredential class """ def query_token(client_id, oauth_token): q = session.query(model_class) return q.filter_by( client_id=client_id, oauth_token=oauth_token).first() return query_token
a25d876ef1dd5f7548741f27e584724f27447506
44,077
def bare_spectrum(sweep, subsys, which=-1, **kwargs): """ Plots energy spectrum of bare system `subsys` for given ParameterSweep `sweep`. Parameters ---------- sweep: ParameterSweep subsys: QuantumSystem which: int or list(int), optional default: -1, signals to plot all wavefunctions within the truncated Hilbert space; int>0: plot wavefunctions 0..int-1; list(int) plot specific wavefunctions **kwargs: dict standard plotting option (see separate documentation) Returns ------- fig, axes """ subsys_index = sweep.get_subsys_index(subsys) specdata = sweep.bare_specdata_list[subsys_index] if which is None: which = subsys.truncated_dim return specdata.plot_evals_vs_paramvals(which=which, **kwargs)
a441f5e838092aff6b7328c25d18357570d59c35
667,769
def row_number(index): """ get row number of the 0x10 byte row containing the given index """ return index // 0x10
09de6ba20c208d1b184450fd2f021878a3ad849a
212,970
def legendre_symbol(a, p): """Compute the Legendre symbol.""" if a % p == 0: return 0 ls = pow(a, (p - 1)//2, p) return -1 if ls == p - 1 else ls
50869b86c42629c49ffb36db606bab616b21ea8a
121,608
def _extract_protocol_layers(deserialized_data): """ Removes unnecessary values from packets dictionaries. :param deserialized_data: Deserialized data from tshark. :return: List of filtered packets in dictionary format. """ packets_filtered = [] for packet in deserialized_data: packets_filtered.append(packet["_source"]["layers"]) return packets_filtered
3c3a899909c5278b29ffb402ccb4d8dde24fce3a
1,682
import networkx def compute_dominance_frontier(graph, domtree): """ Compute a dominance frontier based on the given post-dominator tree. This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. :param graph: The graph where we want to compute the dominance frontier. :param domtree: The dominator tree :returns: A dict of dominance frontier """ df = {} # Perform a post-order search on the dominator tree for x in networkx.dfs_postorder_nodes(domtree): if x not in graph: # Skip nodes that are not in the graph continue df[x] = set() # local set for y in graph.successors(x): if x not in domtree.predecessors(y): df[x].add(y) # up set if x is None: continue for z in domtree.successors(x): if z is x: continue if z not in df: continue for y in df[z]: if x not in list(domtree.predecessors(y)): df[x].add(y) return df
a7afc34286a17d8a8085930d121a06579f363ac8
219,211
def MergeBounds(bounds1, bounds2): """Merge two rectangles to create minimum rectangle that contains both. Args: bounds1: List of lower left and upper right boundary of first rectangle. bounds2: List of lower left and upper right boundary of second rectangle. Returns: List of lower left and upper right boundary containing both rectangles. """ new_bounds = [] if bounds1[0] < bounds2[0]: new_bounds.append(bounds1[0]) else: new_bounds.append(bounds2[0]) if bounds1[1] < bounds2[1]: new_bounds.append(bounds1[1]) else: new_bounds.append(bounds2[1]) if bounds1[2] > bounds2[2]: new_bounds.append(bounds1[2]) else: new_bounds.append(bounds2[2]) if bounds1[3] > bounds2[3]: new_bounds.append(bounds1[3]) else: new_bounds.append(bounds2[3]) return new_bounds
3f1917f9f6167866fa8e47935ad8aed9662d95c0
292,124
def load_secret(path: str = "../.secret") -> str: """Loading secret file :param path: Path to secret file. Defaults to "../.secret". :type path: str :returns: Secret key :rtype: str """ with open(path, "r+") as file: return file.readline().strip()
3e0da3bd90c4789e63a87aa3d7ffa5bdf6869d19
429,243
def _get_dpv(statvar: dict, config: dict) -> list: """A function that goes through the statvar dict and the config and returns a list of properties to ignore when generating the dcid. Args: statvar: A dictionary of prop:values of the statvar config: A dict which expects the keys to be the column name and value to be another dict. This dict maps column values to key-value pairs of a statvar. See scripts/fbi/hate_crime/config.json for an example. In this function, the '_DPV_' key is used to identify dependent properties. Returns: A list of properties to ignore when generating the dcid. """ ignore_props = [] for spec in config['_DPV_']: if spec['cprop'] in statvar: dpv_prop = spec['dpv']['prop'] dpv_val = spec['dpv']['val'] if dpv_val == statvar.get(dpv_prop, None): ignore_props.append(dpv_prop) return ignore_props
301624c4b02d05aac3da9e1853bc376991741520
525,225
def transform_and_filter_result(result,camp_ids_to_filter): """Transforms the result of get_campaign_performance_per_period() function, and only includes campaign IDs that are in the list of camp_ids_to_filter""" final_result = list() for x in result[0][0]: if x["campaignId"] in camp_ids_to_filter: result_per_id = list() for result in x["results"]: result_per_id_per_day = dict() # The resulting dict can be modified # if you need different items in it for your reporting result_per_id_per_day["campaign_id"] = x["campaignId"] result_per_id_per_day["date_from"] = result.get("metadata").get("fromDate") result_per_id_per_day["date_to"] = result.get("metadata").get("toDate") result_per_id_per_day["impressions"] = result.get("metrics").get("impressions") result_per_id_per_day["clicks"] = result.get("metrics").get("clicks") result_per_id_per_day["conversions"] = result.get("metrics").get("conversions") result_per_id_per_day["spend"] = result.get("metrics").get("spend") result_per_id.append(result_per_id_per_day) final_result.append(result_per_id) return final_result
09c0c057faf512ebb77172fe51498f91f4cb6c4b
190,744
def parse_int(value): """Casts value to integer if possible, otherwise returns None""" try: return int(value) except (ValueError, TypeError): return None
c84e776f67a2526f7f1d8258c3afabb1a94207df
440,253
def sentence_to_bigrams(sentence): """ Add start '<s>' and stop '</s>' tags to the sentence and tokenize it into a list of lower-case words (sentence_tokens) and bigrams (sentence_bigrams) :param sentence: string :return: list, list sentence_tokens: ordered list of words found in the sentence sentence_bigrams: a list of ordered two-word tuples found in the sentence """ sentence_tokens = ['<s>'] + sentence.lower().split() + ['</s>'] sentence_bigrams = [] for i in range(len(sentence_tokens)-1): sentence_bigrams.append((sentence_tokens[i], sentence_tokens[i+1])) return sentence_tokens, sentence_bigrams
8fd3425517a31114c9b6560001fba21d5b62ef19
606,798
def urljoin(*args: str) -> str: """ Join an array of strings using a forward-slash representing an url. :param args: list of strings to join :return: joined strings """ return "/".join(map(lambda x: str(x).rstrip('/'), args))
f4dbfa29a6ad15c83032edc7cab3e9e745fae5d1
394,013
def bmatrix(arr): """ Converts a numpy array (matrix) to a LaTeX bmatrix Args: arr: Array Returns: LaTeX bmatrix as a string Raises: ValueError: If the array has more than two dimensions """ if len(arr.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(arr).replace('[', '').replace(']', '').splitlines() latex_str = [r'\begin{bmatrix}'] latex_str += [' ' + ' & '.join(line.split()) + r'\\' for line in lines] latex_str += [r'\end{bmatrix}'] return '\n'.join(latex_str)
5382c8c623f7e4e2114949c254b8516c3fb798b3
666,962
def rotate_matrix(A, m, n): """ Rotates the given m by n matrix A 90 degrees clockwise. """ B = [[0] * m for i in range(n)] for i in range(m): for j in range(n): B[j][m - i - 1] = A[i][j] return B
3115a659ff3afe0c448e6736c5d3b165dc18d2bd
584,244
def get_ecs(definition): """Extract EC numbers from a KO definition. Parameters ---------- definition : str KO definition. Returns ------- list of str Extracted EC numbers. Examples -------- K00930 acetylglutamate kinase [EC:2.7.2.8] K02618 oxepin-CoA hydrolase [EC:3.3.2.12 1.2.1.91] K09866 aquaporin-4 """ if definition.endswith(']'): idx = definition.find(' [EC:') if idx > 0: return definition[idx + 5:-1].split()
68d56816aa0afd9a523d0f64be99bcebe012deaf
317,335
def char(cA, cB): """Returns the appropriate single qubit pauli character when merging.""" if cA == "I": return cB return cA
977f9acb4d476aee70c20301b952f1118b3d5349
591,283
from typing import Tuple def split_segment_id(segment_id: str) -> Tuple[float, float, int]: """Split a segment ID to segment begin, segment end, and a running number. Args: segment_id (str): Segment ids are in the form session-001-2015-START-END[NUMBER] or session-001-2015-START-END-NUMBER Returns: Tuple[float, float, int]: start, end, and number """ if "[" in segment_id: _, begin, end = segment_id.rsplit("-", 2) end, number = end.split("[") number = number.replace("]", "") else: _, begin, end, number = segment_id.rsplit("-", 3) return float(begin) / 100.0, float(end) / 100.0, int(number)
9c878b25836e993e114c6af9eb6c576580d16b36
68,735
import pathlib def create_reporting_fnames(qc_db_path, num_res, inst_name, raw_fname): """ Creates the filenames for the graphical report and Excel output Based on the relative path which depends on the QC database path qc_db_path num_res: the number of the latest results fetched from the database inst_name: instrument name raw_fname: raw file name returns the dictionary: {'excel_out':full path, 'graph_out':full_path} """ qc_db_pobj = pathlib.Path(qc_db_path) excel_name = inst_name + '_latest_' + str(num_res) + '_results.xlsx' excel_path = qc_db_pobj.parents[1].joinpath(excel_name) png_name = 'qc_plot_' + raw_fname[:-4] + '.png' png_path = qc_db_pobj.parents[1].joinpath(png_name) return {'excel_out':excel_path, 'graph_out':png_path}
c8a8b8d625c05062a49578c18b918fc406bc88af
297,953
def create_path(wire): """ Takes a list of tuples containing instructions as to the wire path. e.g. ('R', 124) -> Right 124 units Generates and returns a list of tuples representing the coordinates that a wire travels along on its path. """ path = [] position = [0, 0] path.append(position[:]) for i in range(len(wire)): if wire[i][0] == 'U': for j in range(wire[i][1]): position[0] += 1 path.append(position[:]) if wire[i][0] == 'D': for j in range(wire[i][1]): position[0] -= 1 path.append(position[:]) if wire[i][0] == 'R': for j in range(wire[i][1]): position[1] += 1 path.append(position[:]) if wire[i][0] == 'L': for j in range(wire[i][1]): position[1] -= 1 path.append(position[:]) return path
04c9229bf8f8d4c51f9b209c0e9d01a5c33a6e69
422,021
def _get_env_var(repository_ctx, name, default = None): """Returns a value from an environment variable.""" for key, value in repository_ctx.os.environ.items(): if name == key: return value return default
b58fdb7a807ca00db0f3a6336617950e2a9e492a
565,173
def common_base_current_gain(spec=None,i_c=0,i_e=1,beta=0): """ The low frequency common-base current gain of a bipolar junction transistor. Parameters ---------- spec : string, required Specifies calculation type. Choose 'beta' for calculations based on common-emitter current gain. The default is None. i_c : float, optional Collector current. The default is 0. i_e : float, optional Emitter current. The default is 1. beta : float, optional Common-emitter current gain. The default is 0. Returns ------- Float value corresponding to the low freq common-base current gain """ if(spec == 'beta'): alpha_dc = beta / (beta + 1) else: alpha_dc = i_c / i_e return alpha_dc
96b2862921d9dd16266e132d382c920768b9557a
556,798
import json def getConfigNames(xen, session, logger, ref): """ Return all config names associated with the given disk by reading the disk's other config in Xen. :param logger: A logger used for logging possible errors. :type logger: seealso:: :class:`logging:Logger` :param ref: Xen reference of the disk. :type ref: str :returns: An array with config names that reference this disk :rtype: [str] """ result = xen.VDI.get_other_config(session, ref) if result['Status'] == "Failure": logger.warning("Error while getting other config of VDI. Error: {1}.".format(str(result['ErrorDescription']))) return otherConfig = result['Value'] if "configs" in otherConfig: return json.loads(otherConfig['configs']) else: return []
d108c718b244163a23ee974ca456714fdca88ee9
646,279
def group_mols_by_container_index(mol_lst): """Take a list of MyMol.MyMol objects, and place them in lists according to their associated contnr_idx values. These lists are accessed via a dictionary, where they keys are the contnr_idx values themselves. :param mol_lst: The list of MyMol.MyMol objects. :type mol_lst: list :return: A dictionary, where keys are contnr_idx values and values are lists of MyMol.MyMol objects :rtype: dict """ # Make the dictionary. grouped_results = {} for mol in mol_lst: if mol is None: # Ignore molecules that are None. continue idx = mol.contnr_idx if not idx in grouped_results: grouped_results[idx] = [] grouped_results[idx].append(mol) # Remove redundant entries. for key in list(grouped_results.keys()): grouped_results[key] = list(set(grouped_results[key])) return grouped_results
f8b1c1fb405516b1cca375155cd29f9fbdf93380
437,924
def shift_substitution(sub_current: dict, num_places: int) -> dict: """ Edit entries in the substitution dictionary by shifting the output characters by num_places through the alphabet (wrapping around) """ for cipherchar in sub_current: plainchar = sub_current[cipherchar] num = ord(plainchar) if 97 <= num <= 122: num = ((num - 97 + num_places) % 26) + 97 plainchar = chr(num) sub_current[cipherchar] = plainchar return sub_current
c7fb51851ee0fe6fdaf7bd418ce542ad442eb526
345,032
def getDatasetName(token, channel_list, colors, slice_type): """Return a dataset name given the token, channel, colors and slice_type""" if colors is not None: channel_list = ["{}:{}".format(a,b) for a,b in zip(channel_list, colors)] return "{}-{}-{}".format(token, ','.join(channel_list), slice_type)
98ee171091a9e48bf62d346c3c2d0bf0a6d27390
441,092
from typing import Dict from typing import Any def aligned_train_configs(config1: Dict[str, Any], config2: Dict[str, Any]) -> bool: """ Check whether two training configurations are aligned, i.e. a training run with ``config1`` that was interrupted can be resumed with ``config2``. """ aligned_settings = [ "env_name", "lr_schedule_type", "initial_lr", "final_lr", "normalize_transition", "normalize_first_n", "architecture_config", "evaluation_freq", "evaluation_episodes", "time_limit", ] equal = True for setting in aligned_settings: equal = equal and config1[setting] == config2[setting] return equal
929cb1b1a65965cc76bd60652e9494c772a22d4a
501,628
def final(fs): """Returns the final loss value.""" return fs[-1] / fs[0]
a49831d1ee343e6a9a7b050f36ed1eb1b5f767a9
539,464
from typing import OrderedDict def unique(seq): """Return list of unique elements preserving original order.""" return list(OrderedDict.fromkeys(seq))
cde7ccde5461682633fef039190634ba9aa59d23
592,213
import math def _fitfunc1(a, color, FeH, err): """Fitting function of *θ* = 5040/|Teff| vs color index and [Fe/H]. This function calculates *θ* as a function of (color, [Fe/H]) using the following relation: .. math:: \\theta = a_0 + a_1X + a_2X^2 + a_3X\mathrm{[Fe/H]} + a_4\mathrm{[Fe/H]} + a_5\mathrm{[Fe/H]}^2 where *X* denotes the value of color index. The partial deviatives of *θ* with respect to *X* and [Fe/H] are: .. math:: \\frac{\partial\,\\theta}{\partial\,X} = a_1 + 2a_2X + a_3\mathrm{[Fe/H]} \\\\ \\frac{\partial\,\\theta}{\partial\,\mathrm{[Fe/H]}} = a_3X + a_4 + 2a_5\mathrm{[Fe/H]} The uncertainty of *θ* are calculated as the quadratic sum of uncertainties caused by Δ\ *X* and Δ[Fe/H], and the standard deviation of fitted relation: .. math:: \Delta\\theta = \sqrt{ \left(\\frac{\partial\,\\theta}{\partial\,X}\Delta X\\right)^2 + \left(\\frac{\partial\,\\theta}{\partial\,\mathrm{[Fe/H]}} \Delta\mathrm{[Fe/H]}\\right)^2 + \sigma^2(\\theta) } Args: a (list or tuple): Coefficients. color (tuple): Value of color index and its uncertainty. FeH (tuple): Metallicity [Fe/H] and its uncertainty. err (float): Standard deviation of *θ*. Returns: tuple: A tuple containing: * *float*: *θ* = 5040/|Teff| * *float*: Δ\ *θ* """ color, color_err = color FeH, FeH_err = FeH theta = a[0] + a[1]*color + a[2]*color**2 + a[3]*color*FeH + a[4]*FeH + \ a[5]*FeH**2 dc = a[1] + 2*a[2]*color + a[3]*FeH dm = a[3]*color + a[4] + 2*a[5]*FeH dtheta = math.sqrt(err**2 + (dc*color_err)**2 + (dm*FeH_err)**2) return theta, dtheta
54c564fa76ca06f1cee6aedffba6e4e5f91d969e
254,504
def get_html_lang_attribute(language_code: str) -> str: """ return the HTML lang attribute for a given language code, e. g. "en-us" -> "en", "en" -> "en" """ try: pos = language_code.index("-") except ValueError: # no "-" in language_code return language_code return language_code[:pos]
e2afc1037e66a46467a8724f4b2248cb1b32a867
135,447
def frequencies(variant_obj): """Add frequencies in the correct way for the template This function converts the raw annotations to something better to visualize. GnomAD is mandatory and will always be shown. Args: variant_obj(scout.models.Variant) Returns: frequencies(list(tuple)): A list of frequencies to display """ if variant_obj["category"] == "sv": freqs = { "gnomad_frequency": {"display_name": "GnomAD", "link": None}, "clingen_cgh_benign": { "display_name": "ClinGen CGH (benign)", "link": None, }, "clingen_cgh_pathogenic": { "display_name": "ClinGen CGH (pathogenic)", "link": None, }, "clingen_ngi": {"display_name": "ClinGen NGI", "link": None}, "clingen_mip": {"display_name": "ClinGen MIP", "link": None}, "swegen": {"display_name": "SweGen", "link": None}, "decipher": {"display_name": "Decipher", "link": None}, "thousand_genomes_frequency": {"display_name": "1000G", "link": None}, "thousand_genomes_frequency_left": { "display_name": "1000G(left)", "link": None, }, "thousand_genomes_frequency_right": { "display_name": "1000G(right)", "link": None, }, } else: freqs = { "gnomad_frequency": { "display_name": "GnomAD", "link": variant_obj.get("gnomad_link"), }, "thousand_genomes_frequency": { "display_name": "1000G", "link": variant_obj.get("thousandg_link"), }, "max_thousand_genomes_frequency": { "display_name": "1000G(max)", "link": variant_obj.get("thousandg_link"), }, "exac_frequency": { "display_name": "ExAC", "link": variant_obj.get("exac_link"), }, "max_exac_frequency": { "display_name": "ExAC(max)", "link": variant_obj.get("exac_link"), }, } frequency_list = [] for freq_key in freqs: display_name = freqs[freq_key]["display_name"] value = variant_obj.get(freq_key) link = freqs[freq_key]["link"] # Allways add gnomad if freq_key == "gnomad_frequency": # If gnomad not found search for exac if not value: value = variant_obj.get("exac_frequency") value = value or "NA" frequency_list.append((display_name, value, link)) continue if value: frequency_list.append((display_name, value, link)) return frequency_list
10357697b5b3429a07455aea2e22c327c91f18a5
530,116
import re def parse_config_str(config_str: str): """ Args: config_str (str): [description] ### Examples: >>> input_1: 'rand-re0.25' >>> output_1: {'rand': True, 're': 0.25} >>> input_2: 'baseline' >>> output_2: {'baseline': True} """ configs = dict() for kv_pair in config_str.split('-'): result = re.split(r'(\d.*)', kv_pair) if len(result) == 1: k = result[0] configs[k] = True else: assert len(result) == 3 and result[2] == '' k, v, _ = re.split(r'(\d.*)', kv_pair) configs[k] = float(v) return configs
bcbf3d0a1f4cb3123f8ec9b08a11a1383650627a
90,061
def adjust_returns_for_slippage(returns, turnover, slippage_bps): """Apply a slippage penalty for every dollar traded. Parameters ---------- returns : pd.Series Time series of daily returns. turnover: pd.Series Time series of daily total of buys and sells divided by portfolio value. - See txn.get_turnover. slippage_bps: int/float Basis points of slippage to apply. Returns ------- pd.Series Time series of daily returns, adjusted for slippage. """ slippage = 0.0001 * slippage_bps # Only include returns in the period where the algo traded. trim_returns = returns.loc[turnover.index] return trim_returns - turnover * slippage
d445bf566f5c228ffda793089d7bfe23f3897df2
20,869
import json def read_json(file_name): """ Reads the JSON file with name file_name and returns its contents. :param file_name: The name of the JSON file :return: The content of the JSON file """ return json.load(open(file_name, 'r'))
97ca0b219f0b4dfba1ebe6d5afc69ed8f80575c9
245,183
def modelled_anomaly(ssh_m, ssh_tides): """ Calculates the modelled ssh anomaly by finding the difference between a simulation with all forcing and a simulation with tides only. :arg ssh_m: An array of modelled ssh :type ssh_m: numpy array :arg ssh_tides: Array tides only simulation :type ssh_tides: numpy array :returns: anom: the difference between all_forcing and tidesonly """ anom=ssh_m-ssh_tides return anom
0713f676543e97df211961886512b6837db831c6
512,734
import time def prediction_timer(model, samples): """ Timeshow long a model takes to make predictions on samples. """ start_time=time.perf_counter() model.predict(samples) end_time=time.perf_counter() total_time=end_time-start_time time_per_pred= total_time/len(samples) return total_time, time_per_pred
8f5651f9744b54401baff986ef23a260da8b4d4c
64,887
def qualified_name(object_instance): """Return the fully qualified type name of an object. :param object_instance: Object instance. :return: Fully qualified name string. """ if hasattr(object_instance, '__module__'): return object_instance.__module__ + '.' + type(object_instance).__name__ else: return type(object_instance).__name__
f3b909b08603391c1661c05fa2ba34e848483a6a
649,702
from typing import ByteString def os2ip(x: ByteString) -> int: """ OctetString to Integer Primitive: https://tools.ietf.org/html/rfc8017#section-4.2 """ return int.from_bytes(x, "big")
773b09cc775b7f9f77f0fc5843b254bfa8fbed66
532,600
def beta_model_derivative(r3d_kpc, n0, r_c, beta): """ Compute the derivative of a beta model dn/dr Parameters ---------- - r3d_kpc: array of radius in kpc - r_c : core radius parameter - n_0 : normalization - beta : slope of the profile Outputs -------- - beta model derivative profile as a function of the input radius vector """ return -3.0*n0*beta*r3d_kpc*(1+(r3d_kpc/r_c)**2)**(-3.0*beta/2.0-1.0)/r_c**2
69d65ac3389dc7c838c84d0cbde34aa5c99a9397
542,308
def k_fold_boundaries(values, folds): """Take a list of values and number of folds, return equally spaced boundaries as tuples""" return [ (int((i / folds) * len(values)), int(((i + 1) / folds) * (len(values)))) for i in range(folds) ]
8b8b3fe1e2b191e538fb5c96a58de01f59ef66a8
39,892
from typing import Callable def get_pagination_request_result(limit: int, page: int, max_page_size: int, client_request: Callable, **kwargs) -> dict: """ Perform API request for pagination utility. Args: limit (int): The number of results to retrieve. page (int): The page number of the results to retrieve. max_page_size (int): API maximum page size limitation. client_request (int): API Client function. Returns: dict: API response from GCP. """ offset = (page - 1) * limit page_token = None steps = max_page_size if offset > max_page_size else offset for i in range(0, offset, steps): response = client_request(limit=steps, page_token=page_token, **kwargs) page_token = response.get('nextPageToken') if not page_token: return {} return client_request(limit=limit, page_token=page_token, **kwargs)
0d3580f17914132441665c67fe2ab2f25cf22bd5
560,196
def quantile_turnover(quantile_factor, quantile, period=1): """ Computes the proportion of names in a factor quantile that were not in that quantile in the previous period. Parameters ---------- quantile_factor : pd.Series DataFrame with date, asset and factor quantile. quantile : int Quantile on which to perform turnover analysis. period: int, optional Number of days over which to calculate the turnover. Returns ------- quant_turnover : pd.Series Period by period turnover for that quantile. """ quant_names = quantile_factor[quantile_factor == quantile] quant_name_sets = quant_names.groupby(level=['date']).apply( lambda x: set(x.index.get_level_values('asset'))) name_shifted = quant_name_sets.shift(period) new_names = (quant_name_sets - name_shifted).dropna() quant_turnover = new_names.apply( lambda x: len(x)) / quant_name_sets.apply(lambda x: len(x)) quant_turnover.name = quantile return quant_turnover
6c7b2afdd4c4f0a2dbf38064d2d8664a25370ca2
709,979
import re def get_desc_ga(filename): """ Get the gathering threshold (GA) from the DESC file. """ with open(filename, 'r') as f: for line in f: if not line.startswith('GA'): continue parts = re.split(r'\s+', line.strip()) return float(parts[-1])
b870bd3a6bd42c74c31ac4ac305ee6c568ca0a31
400,310
from functools import reduce def _parse_address(full_address): """Parse a string containing a range address. Parse an Excel range address into a sheet name and a range specification. Example: ```_parse_address("'my sheet'!A1:X100")``` would return ```("my_sheet", "A1:X100")```. """ splitaddr = full_address.split("!") if len(splitaddr) == 1: sheet_name = None address = splitaddr[0] elif len(splitaddr) == 2: characters_to_strip = list("'<>=") sheet_name = reduce( lambda x, y: x.strip(y), characters_to_strip, splitaddr[0] ) sheet_name = sheet_name.split("]")[-1] address = splitaddr[1].strip(">") else: raise ValueError( "Invalid address in _parse_address: " + 'cannot contain multiple "!" characters' ) return (sheet_name, address)
b9da87e80ad65054bf3fec00d8fe18fc1780914d
414,715
def get_wdl_boolean_string(boolean): """ WDL expects `true` or `false` strings for `read_boolean`, Python `str` doesn't work """ return str(boolean).lower()
0482aa1f3d4234859fa03cbe053eb97dc5c09479
108,694
def _make_url(addr): """Create a URL that will be recognized by urlparse.""" return addr if str(addr).startswith("//") else "//{}".format(addr)
1a7fb0c7b3ccb65b764a92b4ecff8516acd7e15e
388,316
def rescale(X, x_min, x_max): """ Rescaled the array of input to the range [x_min, x_max] linearly. This method is often used in the context of making maps with matplotlib.pyplot.inshow. The matrix to be accepted must contain arrays in the [0,1] range. :param X: numpy.ndarray This is the input array to be rescaled. :param x_min: float or int The lower boundary for the array to have. :param x_max: float or int The upper boundary for the new array to have. :return: numpy.ndarray The array, linearly rescaled to the range [x_min, x_max]. """ nom = (X - X.min(axis=0)) * (x_max - x_min) denom = X.max(axis=0) - X.min(axis=0) return x_min + nom / denom
6f002651a48519e1f2a82365ba1c03f2e6a7c828
652,937
def number_of_nodes(G, t=None): """Return the number of nodes in the t snpashot of a dynamic graph. Parameters ---------- G : Graph opject DyNetx graph object t : snapshot id (default=None) If None return the number of nodes in the flattened graph. Returns ------- nnodes : int The number of nodes in the graph. See Also -------- order which is identical Examples -------- >>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2], t=0) >>> dn.number_of_nodes(G, 0) 3 """ return G.number_of_nodes(t)
37aa6dc256f72e471664cc334b1cefe2608bab69
264,675
def sgn(x): """ Returns sign of number """ if x==0: return 0. elif x>0: return 1. else: return -1.
d0fc9115fc5b99f38e5913a100c003cb4b895169
205,707
import math def ac15_defl(w, l, E, I, x): """Deflection at x - Beam fixed at both ends - Uniformly dist. loads Calculates the deflection in the beam at any location, x, along the beam due to a uniformly distributed load. d = w*math.pow(x,2)/(24.0*E*I)*math.pow(l-x,2) Args: w (float): uniformly distributed load l (float): length of beam between supports E (float): modulus of elasticity I (float): section modulus x (float): distance along beam from left support Returns: d (tuple(float, str)): deflection at x Notes: 1. Consistent units are the responsibility of the user. """ d = d = w*math.pow(x,2)/(24.0*E*I)*math.pow(l-x,2) text = (f'd = w*math.pow(x,2)/(24.0*E*I)*math.pow(l-x,2) \n' + f'd = {w:.3f}*math.pow({x:.2f},2)/(24.0*{E:.1f}*{I:.1f})' f'*math.pow({l:.2f}-{x:.2f},2) \n' + f'd = {d:.3f}') return d, text
cb2dbd005bee6c92d789a3a35d335514a48d2228
155,111
def logging_lvl_to_kaldi_lvl(lvl: int) -> int: """Convert logging level to kaldi level""" if lvl >= 10: lvl = max(-3, (lvl - 20) // -10) else: lvl = 11 - lvl return lvl
49063875491479b1c986d58e97781c83d4a74b2c
620,937
def string_to_bool(s, default=False): """ Turns a string into a bool, giving a default value preference. """ if len(str(s).strip()) == 0: return default if default: if s[0].upper() == "F": return False else: return True else: if s[0].upper() == "T": return True else: return False
e80de5c80844da6bfa3533ce66c680175db2886d
312,779
def _get_header(request_type): """Returns header str for talking with cosmos :param request_type: name of specified request (ie uninstall-request) :type request_type: str :returns: header information :rtype: str """ return ("application/vnd.dcos.package.{}+json;" "charset=utf-8;version=v1").format(request_type)
9cb0f296455b78ef522a2797d7a07046853d11c8
679,296
import math def minimum_shock_angle(m): """ Calculates the shock angle for which the deflection angle is zero Input: m - Mach number """ return math.asin(1/float(m))
6467e3512314aa1fa55966f0388a3b94ffed5060
106,513
def first_relationship_that_matches(end_def, end_def_type, end_def_name, relationship_typedefs): """ Find the first relationship type that matches the end_def number, type and name from the provided typedefs. :param str end_def: Either 'endDef1' or 'endDef2' :param str end_def_type: The type within the end_def. :param str end_def_name: The name of the relationship attribute applied to the end def's entity. (e.g. columns, table, columnLineages, query) :param list(dict) relationship_typedefs: A list of dictionaries that follow the relationship type defs. :raises ValueError: An relationship dict was not found in the provided typedefs. :return: The matching relationship type definition. :rtype: dict """ output = None for typedef in relationship_typedefs: if ((end_def in typedef) and (typedef[end_def]["type"] == end_def_type) and (typedef[end_def]["name"] == end_def_name)): output = typedef if output is None: raise ValueError( "Unable to find a relationship type that matches: {endDef} " "with type {end_def_type} and the name {end_def_name} from " "the {num_defs} provided." .format( endDef=end_def, end_def_type=end_def_type, end_def_name=end_def_name, num_defs=len(relationship_typedefs) ) ) return output
c81afca6232d831cc866314ddf5c375782936f7a
272,608
def check_tag(s, l): """Checks if any string in the list l is contained in string s.""" for i in l: if s in i: return True return False
87d151bd5ee6355770f7e64f9ee78544f75d4bf2
92,968
def atomic_number(a): """ Atomic number of atom """ return a.GetAtomicNum()
985cd18af8b6659fc4a32439810f4cce12966582
417,195
def try_int(val): """Tries to convert val to int. Raises ValueError upon failure. In contrast to builtin.int this function returns 0 for an empty string. """ try: return int(val) except ValueError: if len(val) == 0: return 0 else: raise ValueError("Cannot convert to integer: {}".format(val))
447392fd68cd4e6a57ea39c06707697632b9edbb
543,929
def get_hosts_from_group(group): """ Return hosts from Inventory by given group :param group: Name of the group :return: Hosts """ return group['hosts']
21aac2ef49d2a87a270e94886495085baf2bacdd
317,995
def getEpisodeNumInStr(episode_num: str) -> str: """ Change the string representation of one-digit int to two-digit (e.g. "3" -> "03") """ if int(episode_num) >= 10: return episode_num else: return '0' + episode_num
11cf25a36bc5759c4eb070e5d3750cbdb4e69b6a
497,742
def normalize_angle_deg(angle: float) -> float: """ Given an angle in degrees, normalises in [-179, 180] """ # ATTRIBUTION: https://github.com/Gor-Ren/gym-jsbsim new_angle = angle % 360 if new_angle > 180: new_angle -= 360 return new_angle
2b81037a4b8cfa1b770ff18232a103c7537a4961
433,255
import time def make_side_effect(messages, delay=None): """Make a side effect from a list of messages, optionally adding a delay.""" msg_queue = list(reversed(messages)) sleep_delay = delay def side_effect(*args, **kwargs): if sleep_delay is not None: time.sleep(sleep_delay) return msg_queue.pop() return side_effect
35ca33afd2587eb4e6883a3f8eda2a1b205691b8
267,639
from typing import Optional from typing import Callable from typing import Iterator from typing import Any import inspect def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]: """Iterates a response JSON yielding data point by point. Can be used with both regular and chunked responses. By default, returns just a plain list of values representing each point, without column names, or other metadata. In case a specific format is needed, an optional ``parser`` argument can be passed. ``parser`` is a function/callable that takes data point values and, optionally, a ``meta`` parameter containing which takes a dictionary containing all or a subset of the following: ``{'columns', 'name', 'tags', 'statement_id'}``. Sample parser functions: .. code:: python # Function optional meta argument def parser(*x, meta): return dict(zip(meta['columns'], x)) # Namedtuple (callable) from collections import namedtuple parser = namedtuple('MyPoint', ['col1', 'col2', 'col3']) :param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query) :param parser: Optional parser function/callable :return: Generator object """ for statement in resp['results']: if 'series' not in statement: continue for series in statement['series']: if parser is None: return (x for x in series['values']) elif 'meta' in inspect.signature(parser).parameters: meta = {k: series[k] for k in series if k != 'values'} meta['statement_id'] = statement['statement_id'] return (parser(*x, meta=meta) for x in series['values']) else: return (parser(*x) for x in series['values']) return iter([])
000c2c873ab38378bb42945ed3304213b254061a
704,064
def _to_gj_point(obj): """ Dump a Esri JSON Point to GeoJSON Point. :param dict obj: A EsriJSON-like `dict` representing a Point. :returns: GeoJSON representation of the Esri JSON Point """ if obj.get("x", None) is None or \ obj.get("y", None) is None: return {'type': 'Point', 'coordinates': ()} return {'type': 'Point', 'coordinates': (obj.get("x"), obj.get("y"))}
8ee9299be34fe7402eb350589a58a5fdb471c20a
32,205
def extract_csv_links(text): """Get a list of csv links from the download link response text""" links = text.replace("\r", "").split("\n") links.remove("") return links
e512a785759b4a80d903975ab7309b892c5c22c5
648,622
import re def minor_fixes(text): """Fix some minor errors before processing the page.""" text = re.sub(r"^==.*?==\n+(==.*?==)$", "\\1", text, flags=re.M) # empty sections return text
f4dbce64a29eb8010a7bedf3e28624dfd2664ee5
328,941
def exp(decay_const, epoch): """ Applies exponential decay as a function of the number of training epochs. Args: decay_const (str): Constant use in exponential decay function epoch (int): Current training epoch Returns: (float) Updated scheduled sampling rate """ return decay_const ** epoch
47f94967a861d805da47e0131cd716798fe87ffb
203,506
def monorepo_simple_clang_remote_git_dir(monorepo_test_fixture) -> str: """ Return a path to the git directory which acts as the remote for the 'clang' split repo. """ return monorepo_test_fixture.clang_split_remote_path
2cd9c08c6a6041dc80de39f46f0c44c76ed826c2
380,751
def calculate_frequencies(tokens: list) -> dict: """ Calculates frequencies of given tokens :param tokens: a list of tokens without stop words :return: a dictionary with frequencies e.g. tokens = ['weather', 'sunny', 'man', 'happy'] --> {'weather': 1, 'sunny': 1, 'man': 1, 'happy': 1} """ if not isinstance(tokens, list): return {} if len(tokens) > 0 and not isinstance(tokens[0], str): return {} set_words = set(tokens.copy()) dict_freq = {word: tokens.count(word) for word in set_words} return dict_freq
8db30ca04e21be98ddea72a3bd67899ac17e886a
456,987
def file_to_ints(input_file): """ Input: A file containing one number per line Output: An int iterable Blank lines and lines starting with '#' are ignored """ ints = [] with open(input_file) as f: for line in f.readlines(): line = line.strip() if line and not line.startswith('#'): ints.append(int(line)) return ints
cb091f3d48a6770dc2f37529b9243038dbd44411
688,916
def calculate_protein_weight(protein, mass_table): """ Given a protein sequence as string and a mass table as dictionary (with amino acids as keys and their respective weights as values), calculate the molecular weight of the protein by summing up the weight of each amino acid in the protein. """ total_weight = 0 for amino_acid in protein: weight = mass_table[amino_acid] total_weight += weight return round(total_weight, 3)
e46c111694b5d9b89523bd1c1cde4ca10664ffe2
543,776
def _profile_tag_from_conditions(conditions): """ Given a list of conditions, return the profile tag of the device rule if there is one """ for c in conditions: if c['kind'] == 'device': return c['profile_tag'] return None
ca9a640ecfe52714ba901eeaf952e7c18a5a4210
98,819
def sort_relative(item): """ Helper to sort by relative error """ return abs(item.error_rel)
97c41872e3b2212eaaf6b5ff9b381952b6ddef83
300,546
import re def check_for_title(line): """ Check the current line for whether it reveals the title of a new entry. :param srtr line: the line to check :return: tuple (the entry title, the entry type) or (None, None) """ re_title = re.compile( '^(?P<title>.+) \\((?P<type>EPHEMERA OBJECT|SPELL|INCANTATION|OBJECT OF POWER|CONJURATION|INVOCATION|ENCHANTMENT|RITUAL|CHARACTER SECRETS|HOUSE SECRETS|FORTE ABILITY)\\)$') m = re_title.match(line) if m: return m.group('title'), m.group('type') return None, None
73d7b73d29e51b87a37810d434582c975c6d0c07
47,015
import math def angle_between(first, second) -> float: """Find the angle of a line between the two points""" dy = second.y - first.y dx = second.x - first.x theta = math.atan2(dy, dx) if theta < 0: theta += 2 * math.pi return theta
46832fc1643e6746cc237915cc4bfc2bf9fa4a4b
153,167
def MakeFieldString(field): """Represents OptWrapper as a string prepending '(opt)' for optional fields. Args: field: an OptWrapper for a field Returns: a string representation of the field """ field_str = '' if field.optional: field_str += '(opt) ' field_str += '{0}/{1}{2}'.format(field.field.namespace, field.field.field, field.field.increment) return field_str
9d18ab11778f38e5db11dd106169600a05b4f897
143,657
def get_output_attribute(out, attribute_name, cuda_device, reduction="sum"): """ This function handles processing/reduction of output for both DataParallel or non-DataParallel situations. For the case of multiple GPUs, This function will sum all values for a certain output attribute in various batches together. Parameters --------------------- :param out: Dictionary, output of model during forward pass, :param attribute_name: str, :param cuda_device: list or int :param reduction: (string, optional) reduction to apply to the output. Default: 'sum'. """ if isinstance(cuda_device, list): if reduction == "sum": return out[attribute_name].sum() elif reduction == "mean": return out[attribute_name].sum() / float(len(out[attribute_name])) else: raise ValueError("invalid reduction type argument") else: return out[attribute_name]
c09ff6a3dd4ae2371b1bbec12d4617e9ed6c6e1e
706,948
def validateUnits(cmodel, layers): """Validate model units. Args: cmodel (dict): Sub-dictionary from config for specific model. layers (dict): Dictionary of file names for all input layers. Returns: dict: Model units. """ units = {} for key in cmodel['layers'].keys(): if 'units' in cmodel['layers'][key]: units[key] = cmodel['layers'][key]['units'] else: raise Exception('No unit string configured for layer %s' % key) return units
002b023c7b0b9ed04128c6246dfa2837101cfeb5
492,373
def is_valid_username(username): """Ensure username meets requirements. Returns True if successful.""" return bool(username and username.isalnum() and len(username) <= 30)
0eca6ce5fe448e3056532404ab9fe415541fc130
663,060
import math def one_to_one_matches(matches: dict): """ A filter that takes a dict of column matches and returns a dict of 1 to 1 matches. The filter works in the following way: At first it gets the median similarity of the set of the values and removes all matches that have a similarity lower than that. Then from what remained it matches columns for me highest similarity to the lowest till the columns have at most one match. Parameters ---------- matches : dict The ranked list of matches Returns ------- dict The ranked list of matches after the 1 to 1 filter """ set_match_values = set(matches.values()) if len(set_match_values) < 2: return matches matched = dict() for key in matches.keys(): matched[key[0]] = False matched[key[1]] = False median = list(set_match_values)[math.ceil(len(set_match_values)/2)] matches1to1 = dict() for key in matches.keys(): if (not matched[key[0]]) and (not matched[key[1]]): similarity = matches.get(key) if similarity >= median: matches1to1[key] = similarity matched[key[0]] = True matched[key[1]] = True else: break return matches1to1
a630d717c4c14e84289fb11588b79a7a79e6854e
123,495
def basic_ttr(n_terms, n_words): """ Type-token ratio (TTR) computed as t/w, where t is the number of unique terms/vocab, and w is the total number of words. (Chotlos 1944, Templin 1957) """ if n_words == 0: return 0 return n_terms / n_words
3d56fd414d6d462c722a2d29bd15bb7ef8bf7559
17,621
from pathlib import Path def stem(p: str) -> str: """ Remove all stems from a filename, e.g. foo.test.golden.fidl -> foo. """ while Path(p).stem != p: p = Path(p).stem return p
466e5e2bd63130f8114d6f25ef64ce1787f6472f
111,339
def find_entity(entity_dict, entity_type, name): """ Find an entity by its type and name. :param dict entity_dict: dictionary of parsed entities :param str entity_name: entity type to search for :param str name: entity name to search for """ return entity_dict.get(entity_type, {}).get(name)
6d62729b9871d93e1f36c6a31a433f73c3fccf08
543,748