content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def basic_name_formatter(name): """Basic formmater turning '_' in ' ' and capitalising. """ return name.replace('_', ' ').capitalize()
4b87af7df7bfc23c8c63d958e29a4fe614780fa8
33,295
def no_inference(csp, var, assignment, removals): """ If we do not implement an inference algorithm, just return that everything is ok.""" return True
6bea7afca66a73a5236ef7667e857b746b4c95a4
33,297
def geojson_to_polygons(geojson): """ Convert geojson as generated by geojs's annotation layer. :param geojson: geojson record. :returns: an array of polygons, each of which is an array of points. """ polys = [] for feature in geojson['features']: if feature.get('geometry', {}).get('type') == 'Polygon': polys.append(feature['geometry']['coordinates']) return polys
6b82742335841897511640fc9323c5e4895bf367
33,302
def int_or_none(arg): """Returns None or int from a `int_or_none` input argument. """ if arg is None or str(arg).lower() == 'none': return None return int(arg)
651168240209540966ced78a5a2642a5979022fd
33,305
def pluralize(count, singular, plural): """Return singular or plural based on count""" return singular if count == 1 else plural
752b744ea80571e2a01332dd5a050ee4919d5637
33,307
def column_exists(df, col): """Check the column name exists in the DataFrame. Args: df (:obj:`DataFrame`): Pandas DataFrame. col (str): Column name. Returns: bool: True if exists, False if not exists. """ if col and (col not in df.columns): print("The specify column `{0!s}` not found in the input file" .format(col)) return False else: return True
18a1d5c9e1b40bdab38332e9aee17911d1fe153e
33,309
def get_file_name(file_url): """ Extracts file name from the file path url """ file_name = None if file_url.find('/'): file_name = file_url.rsplit('/', 1)[1] return file_name
3d62696506ad27e8d84ba8d2a8c17b1ff6706553
33,321
from pathlib import Path import shutil def copy_assembly(src, dst): """ Copy the directory passed to src to the dst directory and return the destination path. An exception is raised if the dst directory already exists. Args: src (str): The path of the directory that is copied. dst (str): The path of the destination directory. Returns: The path of the destination directory """ dst = Path(dst) if dst.exists() and dst.is_dir(): raise Exception('destination folder already exists') src = Path(src) shutil.copytree(src, dst) return dst
66334040447907c99cbcd7e3dab884c5fd5a0da7
33,322
def _prune_dockerfile(string, comment_char="#"): """Remove comments, emptylines, and last layer (serialize to JSON).""" json_removed = '\n\n'.join(string.split('\n\n')[:-1]) return '\n'.join(row for row in json_removed.split('\n') if not row.startswith(comment_char) and row)
484b1113dff432c56c23a36da507063561ec4a90
33,323
def eq_operator(one: object, another: object) -> bool: """ Compare whether one value equals another. Function equivalent to the equals (`__eq__`) operator. :param one: One value to compare :param another: Another value to compare :returns: `True` if `one == another`. """ return one == another
118ed13401b5d3b9a0446197c1f056e3e60c08c4
33,324
def need_attention(status_msg): """Return True if a repo status is not exactly same as that of remote""" msg = ["not staged", "behind", "ahead", "Untracked"] if any([each in status_msg for each in msg]): return True return False
c0a28b32313ce4c6c0b079c27a7274545bf48a80
33,326
def initMyGraph(ctor): """Constructs and returns a hard-coded sample graph. ctor must be a callable that gets the number of vertices and creates a graph with the given number of vertces and with no edges""" g = ctor(5) g.addEdge(0, 1) g.addEdge(1, 0) g.addEdge(1, 1) g.addEdge(1, 2) g.addEdge(4, 0) g.addEdge(4, 2) return g
4fecca553d2b6d8accadd2f1ce6ccb2a1baed2d7
33,331
def doc_wrapper(squad_para, title=""): """ This function wrap paragraphs into a document. :param squad_para: paragraphs in SQuAD format. :param title: the title of paragraphs. :return: wrap of title and paragraphs """ squad_doc = { 'title': title, 'paragraphs': [squad_para] } return squad_doc
bcc513bbaa2ba885d242009eaae72e7c5b04aea3
33,337
def smart_round_format(number, precision): """ Args: number (float): precision (int): Returns: str: Examples: >>> smart_round_format(258.658, 2) '258.66' >>> smart_round_format(0.258658, 2) '0.26' >>> smart_round_format(0.0000258658, 2) '2.59e-05' """ if number >= 0.1: return str(round(number, 2)) else: return ('{:0.' + str(precision) + 'e}').format(number)
9bc2cac03892e868a83e90a0df987692ad2d0a1e
33,340
import pickle def get_earlier_cpds(month): """ Finds all compounds which were inputted into SureChemBL prior to or equal to a given month Args: month (string): Month, in the form YYYY-MM Returns: pandas dataframe: dataframe containing SureChemBL patent id, month of first entry, and igraph index """ #Read in master compound-date-index dataframe agave_fp = "Data/Cpd_Data/master_cpd_date_index_df.p" #drive_fp = "G:/Shared drives/SureChemBL_Patents/Cpd_Data/master_cpd_date_index_df.p" df = pickle.load(file=open(agave_fp, "rb")) # #Small dataframe analysis # check_indicies(df) return df[df["Month"] <= month]
b6f7c976d523f3c308eb647bb31851b99a8b7856
33,341
def function_example(point_cloud,bool_flag=False): """ Returns the given point cloud, with the potential to raise errors if the input is too large. Parameters ---------- point_cloud : (n,3) numpy array Array containing the x-y-z coords of a 3d point cloud. bool_flag : boolean, optional A boolean flag to toggle error checking. Returns ------- out : (n,3) numpy array Array containing the x-y-z coords of a 3d point cloud. Raises ------ ValueError If the length of our point cloud exceeds 10,000. """ if bool and len(point_cloud) > 10000: raise ValueError('length of point_cloud cannot exceed 10,000 points') out = point_cloud return out
d4449ea8d9ea9db61a712679236ddcb903c41adc
33,343
def make_aa_to_codon_backtable(codontable): """Convert a codontable for use with convert_seq_to_seqspace() Returns a codontable (dict) in the format `{'F': ['TTT', 'TTC'], 'L': ['TTA', ...` Parameters ---------- codontable dict of codons in the format `{'TTT': 'F', 'TTC': 'F', 'TTA': 'L', ...` """ aa_list = list(set(codontable.values())) backtable = {key: [] for key in aa_list} for codon, aa in codontable.items(): backtable[aa] = backtable[aa] + [codon] return backtable
3bd8327e639a11742a1914bf7234769b3f240e6e
33,344
def fitness(bits): """ Gets a numeric value corresponding to the fitness of a given solution :param bits: A solution :return: A fitness value """ return sum(bits)
ecc86a1e91298f6ecd1aebb3796afdfbf9ab628d
33,345
def calc_eval_metrics(aa_match_binary_list, orig_total_num_aa, pred_total_num_aa): """ Calculate evaluation metrics using amino acid matches Parameters ---------- aa_match_binary_list : list of lists List of amino acid matches in each predicted peptide orig_total_num_aa : int Number of amino acids in the original peptide sequences pred_total_num_aa : int Number of amino acids in the predicted peptide sequences Returns ------- aa_precision: float Number of correct aa predictions divided by all predicted aa aa_recall: float Number of correct aa predictions divided by all original aa pep_recall: float Number of correct peptide predictions divided by all original peptide """ correct_aa_count = sum([sum(pred_tuple[0]) for pred_tuple in aa_match_binary_list]) aa_recall = correct_aa_count/(orig_total_num_aa+1e-8) aa_precision = correct_aa_count/(pred_total_num_aa+1e-8) pep_recall = sum([pred_tuple[1] for pred_tuple in aa_match_binary_list])/(len(aa_match_binary_list)+1e-8) return aa_precision, aa_recall, pep_recall
c0c843a3bc26587bdd6607dde4ad958e01fe38a2
33,351
import math def find_first_divisor(N): """Finds the first divisor of N Args: N: an integer to be factored Returns: an integer representing the first divisor of N """ for i in range(2, int(math.sqrt(N)) + 1): if N % i == 0: return i return N
8ea0aa52341ce3d11ef27e7eafd6665a93e201ca
33,352
def token_value(token): """Get the value from a token.""" return token.value
32b0697328b30df5c36e88db426b78e495be3c72
33,355
def to_dict_index(df): """ Pandas dataframe to dictionary (index method) Parameters --------------- df Dataframe Returns -------------- dict dict like {index -> {column -> value}} """ return df.to_dict('index')
b30a84b581c2ea7392958a3b0752a3126d360c61
33,356
def proxied_attribute(local_attr, proxied_attr, doc): """Create a property that proxies attribute ``proxied_attr`` through the local attribute ``local_attr``. """ def fget(self): return getattr(getattr(self, local_attr), proxied_attr) def fset(self, value): setattr(getattr(self, local_attr), proxied_attr, value) def fdel(self): delattr(getattr(self, local_attr), proxied_attr) return property(fget, fset, fdel, doc)
0a751a980db6de45bbafd12a24aeb743284750a3
33,358
import six import re def listsearch(query, item): """Return match with query on an item from the list of input/output files The lists of input and output files can consist either of file names (strings), or lists containing the filename and the hash of the file (if hashing is enabled). This function provides a transparent interface to these two options. Parameters: query : str The search query item : str or list containing two strings A file name or a list containing a file name and hash Returns: boolean """ fh = '' if not isinstance(item, six.string_types): fh = item[1] item = item[0] return bool(re.search(query, item) or re.search(query, fh))
143debe27f3a206021aa42272da08763a8cae425
33,360
def get_type_qualname(cls): """Get a string uniquely identifying the supplied class""" if isinstance(cls, str): return cls if cls.__module__ == "__main__": return cls.__qualname__ return f"{cls.__module__}.{cls.__qualname__}"
968346708d2f0f8be6c92d84e3d65e4050904f86
33,363
def split_indices(a: int, b: int, val_split=0.15, test_split=0.15): """ Calculate the necessary indices for splitting a dataset into training, validation and test set in a diversified manner. :param a: First index. :param b: Last index. :param val_split: Float describing the fraction used for validation. :param test_split: Float describing the fraction used for testing. :return: A 7-tuple of integers with the following values: a: First index for training data val1: Starting index of first validation split test1: Starting index of first testset split data: Second index for training data val2: Starting index of second validation split test2: Starting index of second testset split b: Last index of the data """ half = int((b - a) / 2) val_len = int(half * val_split) test_len = int(half * test_split) val1 = a + half - val_len - test_len test1 = a + half - test_len data = a + half val2 = b - val_len - test_len test2 = b - test_len return a, val1, test1, data, val2, test2, b
6054e171d67405c2ba2bd653a2f9e08e8781c4f4
33,364
import math def calc_spec_smh(T, specs): """Calculate standard-state entropies minus enthalpies for all species. Parameters ---------- T : float Temperature of gas mixture. specs : list of SpecInfo List of species. Returns ------- spec_smh : list of float List of species' standard-state entropies minus enthalpies. """ spec_smh = [] Tlog = math.log(T) T2 = T * T T3 = T2 * T T4 = T3 * T Thalf = T / 2.0 T2 = T2 / 6.0 T3 = T3 / 12.0 T4 = T4 / 20.0 for sp in specs: if T <= sp.Trange[1]: smh = (sp.lo[0] * (Tlog - 1.0) + sp.lo[1] * Thalf + sp.lo[2] * T2 + sp.lo[3] * T3 + sp.lo[4] * T4 - (sp.lo[5] / T) + sp.lo[6] ) else: smh = (sp.hi[0] * (Tlog - 1.0) + sp.hi[1] * Thalf + sp.hi[2] * T2 + sp.hi[3] * T3 + sp.hi[4] * T4 - (sp.hi[5] / T) + sp.hi[6] ) spec_smh.append(smh) return (spec_smh)
c773a0807fa5b4b199e3db775f6b455d15b643be
33,384
import requests def get_resolution(pdb_id): """ Get the resolution for a PDB id, or None case it doesn't have. """ ret = requests.post( "https://data.rcsb.org/graphql", json={ "query": f""" {{ entry(entry_id: "{pdb_id}") {{ pdbx_vrpt_summary {{ PDB_resolution }} }} }} """ }, ) data = ret.json() resol = data["data"]["entry"]["pdbx_vrpt_summary"]["PDB_resolution"] return resol
76249898e0b159235e8c9e5a3e6ede2c23c0e565
33,388
def calculate_deployment_wait_time(test_obj, failure_count=0): """ Calculates wait time based potentially a number of factors. If we need an exponential backoff this is the place. possbilities: deploy_results.avg_response_time, deploy_results.last_response_time failure_count outstanding_deployments current_scale max response time is 10s, as we approach that bad things happen return time in seconds to wait """ deploy_results = test_obj.deploy_results wait_time = 1 if deploy_results.last_response_time < 1: wait_time = 1 elif deploy_results.last_response_time > 8: wait_time = 5 if failure_count > 3 and failure_count < 7: wait_time = wait_time + 5 elif failure_count > 7: wait_time = wait_time + 10 return wait_time
d6f195c3963686e5ad955c1770587de374893b09
33,389
import gzip import json def load_gzip(file): """ Read a gzip file. :param file: file to read :return: the dictionary contained in the gzip file """ with gzip.open(file, "rb") as f: data = f.read() d = json.loads(data) return d
10b2639b30fd90a06c9182ad15a0beb5c09e8efd
33,393
import csv def _csv_sniff(f): """ Sniff using csv module whether or not a csv file (csv or gz) has a header. Arguments: f (filehandle) : filehandle of the file to be read """ sniff_size = 2**20 - 1 dialect = csv.Sniffer().sniff(f.read(sniff_size)) f.seek(0) has_header = csv.Sniffer().has_header(f.read(sniff_size)) f.seek(0) return dialect, has_header
f03f684d00fff20ac0f8c17fbbd4811c32aae9aa
33,394
def _arch_file_filter(arch_members, fname=None): """Filter func to return one archive member with name 'fname' of an archive """ return [f for f in arch_members if f.name == fname]
6a19c3b8f7d2faa410a21922b5a5f6f3e0fd380c
33,397
def prepend_dollar_sign_to_ticker(list, str): """Add a dollar sign character to the beginning of each ticker in the list""" str += '{0}' list = [str.format(i) for i in list] return(list)
d192e5450dc7d7793a7daf06a8578433fefda35e
33,398
def banner() -> None: """ Return an ascii art """ with open("src/interface/art.txt", "r") as file: return print(f"[red]{file.read()}[/]")
5e3e0dab46e7e6e33fad90a516490151c80f3719
33,399
def linear_interpolate(a, b, v1, v2, i): """Linear interpolation""" if v1 == v2: return a else: return a + (b - a) * (i - v1) / (v2 - v1)
dd686797f5311ff08ef5c0f7bb3642344ce8c705
33,401
def cat_from(strlist, v, suf): """ Concatenate sublist of strings :param strlist: list of strings :param v: sublist starting position :param suf: glue string :return: concatenated string """ # return ''.join(map(lambda s: s + suf, strlist[v:])) return suf.join(strlist[v:])
1c4e2c0a4e7e8861c477bfd75e23c36a8ec8c370
33,405
def p2roundup(val: int, align: int) -> int: """ Round up `val` to the next `align` boundary. """ return ((val - 1) | (align - 1)) + 1
639793ab502297ecdfbf243084ccba31fdcc2a31
33,411
import inspect def list_functions(module): """ List top-level function name & function :param module: the module :return: dict of all functions """ return dict(inspect.getmembers(module, inspect.isfunction))
5c0b97101de64c4c48db92209547c9e1c2675a29
33,412
import re def has_drm_match(ocr_result, drm): """ Checks if a drm matches the ocr_result format. Args: ocr_result (str): OCR result string; drm (dict): DRM dict object for parsing the OCR string. Returns: (bool): Returns True if the DRM identifier matches with OCR result string. """ id_regexps = drm["identifiers"] for id_regexp in id_regexps: regexp = re.compile(id_regexp, re.IGNORECASE) if not re.search(regexp, ocr_result): return False return True
2f0f067bee08ce9a309ccacd76c07fb81681675f
33,422
def forward_segment(text, dic): """ 正向最长匹配的中文分词算法 :param text:待分词的文本 :param dic:词典 :return:单词列表 """ word_list = [] i = 0 while i < len(text): longest_word = text[i] for j in range(i + 1, len(text) + 1): word = text[i:j] if (word in dic) and (len(word) > len(longest_word)): longest_word = word word_list.append(longest_word) # 正向搜索,越先找到的单词排在越前面 i += len(longest_word) return word_list
24c1158551563e82ea856de1322b65512d1aceaa
33,427
def wulffmaker_gamma(energy): """ Returns the string to be used for the Wulffmaker default gamma values. Arguments --------- energy: iterable Any iterable that holds the surface energies Returns ------- str String to be copied to wulffmaker for the surface energies. """ gamma_string = "pickGamma[i_] :=\n" gamma_string += "Which[\n" idx = 1 for idx,value in enumerate(energy): idx += 1 gamma_string += "i=={},\n".format(idx) gamma_string += "{:.4f},\n".format(value) gamma_string += "True,\n" gamma_string += "1]" return gamma_string
756bd972cab96ef143303c3084e293d0c4e82c28
33,430
def check_repetitive(combination, df_main): """Check to avoid repetitive combinations Parameters ---------- combination: combinations that want to be checked df_main: pandas.DataFrame source dataframe Returns ------- boolean: True: it exists in df_main False: it's not """ comparison_df = df_main.merge(combination, indicator=True, how='outer') if 'both' in comparison_df._merge.unique(): return False else: return True
8d6a40e2e233c5b04c27fee0db851bde52694cfd
33,442
def result_pks(response, cast=None): """ returns ids from wagtail admin search result :param cast: cast pks to a type, default int :param response: webtest response :return: ids list """ cast = cast or int result_rows = response.lxml.xpath('.//tr[@data-object-pk]/@data-object-pk') return [ cast(r) for r in result_rows ]
c46373733cf1451ccb7dbadd842726445112c9f2
33,450
from typing import Tuple def format_xml_property( metric_type: str, summary_method: str, metric_value: float ) -> Tuple[str, float]: """ Formats metric summary into XML name-value tuple in the form of (metric_type[summary_method], metric_value) ex: (cpu_util[avg], 88.23) """ return f"{metric_type}[{summary_method}]", metric_value
f26bfdefae85e220b9f4285a1122501ee7b7329a
33,451
import re def split(text): """ Split text into arguments accounting for muti-word arguments which are double quoted """ # Cleanup text text = text.strip() text = re.sub('\s+', ' ', text) # collpse multiple spaces space, quote, parts = ' ', '"', [] part, quoted = '', False for char in text: # Encoutered beginning double quote if char is quote and quoted is False: quoted = True continue # Encountered the ending double quote if char is quote and quoted is True: quoted = False parts.append(part.strip()) part = '' continue # Found space in quoted if char is space and quoted is True: part += char continue # Found space but not quoted if char is space: if part: parts.append(part) part = '' continue # Found other character if char is not space: part += char continue if part: parts.append(part.strip()) return parts
061f76fefc7888b16cda87a1f6f4219bf5a74af6
33,452
def get_conn_args(database="mydb", user="user", password="password", host="mydb", port="5432"): """ Get arguments for the connection to the PostgreSQL database server Parameters ---------- database: str String of the database (default: "mydb") user: str String of the user (default: "user") password: str String of the password (default: "password") host: str String of the host (default: "mydb"), "mydb" in docker and "localhost" in local port: str String of the port (default: 5432) Returns ------- conn_args: dict Dictionnary for the connection to the PostgreSQL database server """ conn_args = { "database": database, "user": user, "password": password, "host": host, "port": port } return conn_args
e6898449371a477bdc103e8011ccf53f922a7705
33,454
def thresholdForIdentity(identity, colors): """ Get the best identity threshold for a specific identity value. @param identity: A C{float} nucleotide identity. @param colors: A C{list} of (threshold, color) tuples, where threshold is a C{float} and color is a C{str} to be used as a cell background. This is as returned by C{parseColors}. @return: The first C{float} threshold that the given identity is at least as big as. """ for threshold, _ in colors: if identity >= threshold: return threshold raise ValueError('This should never happen! Last threshold is not 0.0?')
34d4f84ae7339fda68c2bf3fbaa1a1ef7cbd6e95
33,455
def __prefixNumber(num, leading): """ Prefixes "num" with %leading zeroes. """ length = int(leading)+1 num = str(num) while len(num) < length: num = '0' + num return num
55a3745a993ffd75b0186d918dbf2a2b74771f2e
33,460
import requests from bs4 import BeautifulSoup def scrap_page(slug): """ Scrape the documentation page body text. :param slug: Documentation URL slug eg. 'the-lead-page' :return: Documentation page body text if it exists """ url = f"https://help.close.com/docs/{slug}" response = requests.get(url) document = BeautifulSoup(response.text, 'html.parser') containers = document.select('#content-container') if containers: return containers[0].get_text()
c11279d6292d9a0f7171a8bc5ed94b1f5cdc363d
33,463
def stations_by_river(stations): """for Task1D, to return a dictionary that maps rivers to stations""" dict_1d={} for i in range(len(stations)): if stations[i].river in dict_1d: dict_1d[stations[i].river].append(stations[i].name) else: dict_1d[stations[i].river]=[] dict_1d[stations[i].river].append(stations[i].name) return dict_1d
f3fe723813552b6bdf40410c700aab7a66a7b894
33,468
def Shift(xs, shift): """Adds a constant to a sequence of values. Args: xs: sequence of values shift: value to add Returns: sequence of numbers """ return [x+shift for x in xs]
e3e3f8b32c0cc4633ef09bbd1dbb2ef197ed79e6
33,471
def decode_story(id2word, result): """ :param id2word: vocab :param result: (batch_size, story_size, seq_length) :return: out: a list of stories. the size of the list is batch_size """ batch_size, story_size, seq_length = result.size() out = [] for i in range(batch_size): txt = '' for j in range(story_size): for k in range(seq_length): vocab_id = result[i, j, k] if vocab_id != 2: txt = txt + ' ' + id2word[int(vocab_id.item())] else: break out.append(txt) return out
0ad315bfde06904a1ca2590f32781361e75f1edd
33,472
def get_computed_response_parameter_number(response): """ extract the number of parameters from the Dialogflow response, fallback: 0 """ try: return len(response.query_result.parameters) except: return 0
4a469c2a543662d4b1826c3cba309d216db0e831
33,473
from typing import Dict from typing import Any def _get_nested_metadata(dataset: Dict[str, Any], prefix: str) -> Dict[str, Any]: """Generate a metadata dictionary using flattened metadata keys. Args: dataset: dictionary containing the dataset keys and values. Keys are flatened. prefix: common prefix of the metadata fields. Returns: Nested dictionary with the episode metadata. If the dataset contains: { 'metadata/v1/v2': 1, 'metadata/v3': 2, } and prefix='metadata', it returns: { 'v1':{ 'v2': 1, } 'v3': 2, } It assumes that the flattened metadata keys are well-formed. """ episode_metadata = {} for k in dataset.keys(): if f'{prefix}/' not in k: continue keys = k.split('/')[1:] nested_dict = episode_metadata leaf_value = dataset[k] for index, nested_key in enumerate(keys): if index == (len(keys) - 1): nested_dict[nested_key] = leaf_value else: if nested_key not in nested_dict: nested_dict[nested_key] = {} nested_dict = nested_dict[nested_key] return episode_metadata
6c2e2b430cc8a977c5cf2136ad456f3f533afe22
33,478
import base64 def str2b64(string: str)->str: """ 字符串转base64 :param string: 源字符串 :return: base64编码的字符串 """ return base64.b64encode(string.encode("utf8")).decode()
2d6c0fc14df29426c64c91690db63bb49f5807da
33,481
def vvo2max(vo2max): """ Calculates velocity (kilometers/hour) at a specified VO2Max (mL/(kg*min)) args: vo2max (float): VO2Max, given in mL/(kg * min) Returns: float: kilometers / hour """ return vo2max / 3.5
a90c8fc20710782d991731324483d37e7cbf3a54
33,484
import numbers def get_dtype(item): """ Attempt to get the datatype from an item. >>> get_dtype(1) 'float32' >>> get_dtype(True) 'bool' >>> get_dtype(1.1) 'float32' >>> get_dtype([1]) 'float32' >>> get_dtype([[[1, 2, 3]]]) 'float32' >>> get_dtype(np.array([True, False, True], dtype=bool)) 'bool' """ if hasattr(item, "dtype"): return item.dtype.name elif isinstance(item, bool): return "bool" elif isinstance(item, str): return "str" elif isinstance(item, numbers.Real): return "float32" else: try: return get_dtype(item[0]) except: return None
9f908527af2c508c331f692c86f214d30c6b2dc0
33,487
import torch def quat_to_rmat(quaternion: torch.Tensor) -> torch.Tensor: """Converts quaternion(s) to rotation matrix. The quaternion should be in (w, x, y, z) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape (*, 4). Return: torch.Tensor: the rotation matrix of shape (*, 3, 3). """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack the normalized quaternion components w, x, y, z = torch.chunk(quaternion, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1., device=quaternion.device) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1) shape = quaternion.shape[:-1] + (3, 3) return matrix.view(shape)
83f86b41842843b316c2b9ca13f5c1d1eab38dd9
33,489
def citem2higher(citem): """This gets the higher representation of a given :class:``citem``, \ that is, a :class:``cinsn_t`` or :class:``cexpr_t`` :param citem: a :class:``citem`` object :type citem: :class:``citem`` """ if citem.is_expr(): return citem.cexpr return citem.cinsn
5055467250308a01cfc82ea980b63066cfdecb5b
33,491
def parse_modified(full_dict, ignored_keys=('#', '?')): """ Extract 'staged' and 'modified' counts from Git status lines. Arguments --------- full_dict: dict full meta data dictionary ignored_keys: iterable keys that should not contribute towards the staged and modified counts (e.g., branch meta data or untracked files) Returns ------- list a list of two counts: [num_staged, num_modified] """ counts = [0, 0] for k in full_dict: if k not in ignored_keys: values = [x.split()[1].index('.') for x in full_dict[k]] for v in values: counts[v] += 1 return counts
d8d05a3afd5c57d8f2e10bb2992a0dfd1b7a0363
33,493
import re def _add_folders_to_path(path_line, folders_to_add_list): """Ensures that the given list of folders are inside the given line (for the PATH environment variable). Args: path_line(str): line in /etc/environment for the PATH environment variable folder_to_add_list(list of str): list of strings, where each string is a folder that must be present in the PATH environment variable. This list is assumed to be free of duplicate folders. Returns: (bool, str): The boolean value indicates if the line for the PATH environment variable has been modified; true means modified, false means not modified. The string value is the new line for the PATH environment variable, with all the folders in the `folders_to_add_list` """ PATH_STRING_TEMPLATE = "PATH=\"{folders}\"" # strip off surrounding quotes match_obj = re.match(r"""^PATH=['"]*(.*)$""", path_line) existing_folders_line = None if match_obj is None: return (True, PATH_STRING_TEMPLATE.format( folders=":".join(folders_to_add_list) ),) else: # strip trailing quotes. # We cannot just add a ['"]* to the above regex due to how greedy matching # works. It's possible to use non-greedy pattern matching but we do not do # that here. existing_folders_line = re.sub(r"""['"]*$""", "", match_obj.group(1)) # obtain existing folders existing_folders_list = [ folder for folder in existing_folders_line.split(":") if folder.strip() != "" ] existing_folders_set = set(existing_folders_list) path_line_modified = False for folder in folders_to_add_list: if folder not in existing_folders_set: path_line_modified = True existing_folders_list.append(folder) existing_folders_set.add(folder) return (path_line_modified, PATH_STRING_TEMPLATE.format( folders=":".join(existing_folders_list) ),)
d7d1940c4794f483eebcf6c70ae120de222c7034
33,498
def tuple_to_m_list(tup: tuple, c: str = '') -> str: """Return a string representation of a tuple to be used as an NBT list in Minecraft.""" if type(tup[0]) is float: return '[' + ', '.join(tuple('{:f}'.format(i) + c for i in tup)) + ']' else: return '[' + ', '.join(tuple(str(i) + c for i in tup)) + ']'
7d4346e288d7751a6e710b605aed0a1fc50521c7
33,499
def get_distance(cell1, cell2): """Calculates the distance between two cells.""" return abs(cell1.position - cell2.position)
e1845663c40a97bc6e4bcf56867cc9268632fa17
33,500
def convert(stat): """Convert byte value to pretty string""" size = 1024 * 1024 * 1024 * 1024 * 1024 for mag in ['P', 'T', 'G', 'M', 'K']: if stat > size: return "%10.2f %s" % (float(stat) / size, mag) size = size / 1024 return "%10d " % stat
619e228fa652dd876a42958e39bb5dd9c000c020
33,504
import torch def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment
4f2953b331eb81bffb3fc7cda19fe3d96826cadb
33,509
def update_dict(old_dict, values): """ Update dictionary without change the original object """ new_dict = old_dict.copy() new_dict.update(values) return new_dict
c1246e1849904ea23864d5e11f734497c6ff1e09
33,511
def c2st_rfi(acc_prop, acc_base, M_prop, M_base, g): """ Args: acc_prop (float): Proposed model accuracy. acc_base (float): Baseline model accuracy. M_prop (int): Number of parameters for proposed model. M_base (int): Number of parameters for baseline model. g (function): Scalar-valued function. """ delta_prop = g(acc_prop - 0.5) delta_base = g(acc_base - 0.5) return 1 - (M_prop / M_base) * (delta_prop / delta_base)
383c89bb6488c8a3564a950990e275b02d887133
33,512
def render_template(template, context): """ Generate an HTML test report. Args: template (Template): Jinja2 Template object containing the template to render context (dict): the context to pass to the template Returns: str: the contents of the rendered template """ return template.render(context)
5cf0b16855a62439b6b4b76f383bea684fb9b4ec
33,514
import hashlib def create_base_url(softwareversion): """ Make the root URL for production server files. :param softwareversion: Software version to hash. :type softwareversion: str """ # Hash software version swhash = hashlib.sha1(softwareversion.encode('utf-8')) hashedsoftwareversion = swhash.hexdigest() # Root of all urls baseurl = "http://cdn.fs.sl.blackberry.com/fs/qnx/production/{0}".format(hashedsoftwareversion) return baseurl
cda1283505f5e31208f39e12d422ccb99826c702
33,515
def num2char(numpair, key): """Takes in a numpair like '34', and returns the character in row 3 (actually 4th row) and column 4 (actually 5th column) of the key""" row_num = int(numpair[0]) column_num = int(numpair[1]) return(key[row_num][column_num])
a86cf1ef327d2f1fbedf8b661e812c4d486fb3ac
33,521
def draw_labels(ax, labels, positions, scores=None, class_names=None, color='w', font_size=8, scales=None, horizontal_alignment='left'): """Draw labels on the axes. Args: ax (matplotlib.Axes): The input axes. labels (ndarray): The labels with the shape of (n, ). positions (ndarray): The positions to draw each labels. scores (ndarray): The scores for each labels. class_names (list[str]): The class names. color (list[tuple] | matplotlib.color): The colors for labels. font_size (int): Font size of texts. Default: 8. scales (list[float]): Scales of texts. Default: None. horizontal_alignment (str): The horizontal alignment method of texts. Default: 'left'. Returns: matplotlib.Axes: The result axes. """ for i, (pos, label) in enumerate(zip(positions, labels)): label_text = class_names[ label] if class_names is not None else f'class {label}' if scores is not None: label_text += f'|{scores[i]:.02f}' text_color = color[i] if isinstance(color, list) else color font_size_mask = font_size if scales is None else font_size * scales[i] ax.text( pos[0], pos[1], f'{label_text}', bbox={ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }, color=text_color, fontsize=font_size_mask, verticalalignment='top', horizontalalignment=horizontal_alignment) return ax
1d8551bce6421e4e697dc33667a51111a0d2c9d4
33,522
import base64 def basic_auth_creds(request): """ Extract any HTTP Basic authentication credentials for the request. Returns a tuple with the HTTP Basic access authentication credentials ``(username, password)`` if provided, otherwise ``None``. :param request: the request object :type request: pyramid.request.Request :returns: a tuple of (username, password) or None :rtype: tuple or NoneType """ try: authtype, value = request.authorization except TypeError: # no authorization header return None if authtype.lower() != 'basic': return None try: user_pass_bytes = base64.standard_b64decode(value) except TypeError: # failed to decode return None try: # See the lengthy comment in the tests about why we assume UTF-8 # encoding here. user_pass = user_pass_bytes.decode('utf-8') except UnicodeError: # not UTF-8 return None try: username, password = user_pass.split(':', 1) except ValueError: # not enough values to unpack return None return (username, password)
285e9732d77ebc5cd0ac96f8579d743b7bf527f0
33,528
def bottom_index_iter(shape): """Iterator for the bottom boundary indices of a structured grid.""" return range(0, shape[1])
ee952e8c5129a4f504414756361ce55e0f85a095
33,531
def qit(fmpp, f0pp, fmf0=4.88): """Calculate qIt qIt = (fmf0 / ((fmpp / f0pp) - 1)) - 1 :param fmpp: Fm'' :param f0pp: F0'' :param fmf0: Fv/Fm (default: 4.88) :returns: qIt (float) """ return (fmf0 / ((fmpp / f0pp) - 1)) - 1
4fc18ff9604b7514ff4bd8f6df0c8640b7514fbe
33,534
def map_msg_extent(msg): """ Returns the extent of the map in world coordinates :param msg: ((nav_msgs.OccupancyMap|gmapping.doubleMap) A map message. :return: (list) The extents of the map in world coordinates [x0, x1, y0, y1] """ w = msg.info.width h = msg.info.height # Set the plot's extension in world coordinates for meaningful plot ticks delta = msg.info.resolution x0 = msg.info.origin.position.x y0 = msg.info.origin.position.y x1 = x0 + w * delta y1 = y0 + h * delta extent = [x0, x1, y0, y1] return extent
0e597bd77a7c64bea2a689718574944307facf1b
33,537
def set_invenio(ctx, production): """Add Invenio details: api urls, communities, to context object Parameters ---------- ctx: click context obj Api details production: bool If True using production api, if False using sandbox Returns ------- ctx: click context obj Api details with url and community added """ if production: base_url = 'https://oneclimate.dmponline.cloud.edu.au/api' else: base_url = 'https://test.dmponline.cloud.edu.au/api' ctx.obj['url'] = f'{base_url}/records' ctx.obj['deposit'] = f'{base_url}/records' ctx.obj['communities'] = f'{base_url}/communities' return ctx
f34d0c2806f9a8594d4cc07037daa6288686f786
33,538
def map_range( x: float, in_min: float, in_max: float, out_min: float, out_max: float ) -> float: """ Maps a number from one range to another. Somewhat similar to the Arduino ``map()`` function, but returns a floating point result, and constrains the output value to be between ``out_min`` and ``out_max``. If ``in_min`` is greater than ``in_max`` or ``out_min`` is greater than ``out_max``, the corresponding range is reversed, allowing, for example, mapping a range of 0-10 to 50-0. :param float in_min: Start value of input range. :param float in_max: End value of input range. :param float out_min: Start value of output range. :param float out_max: End value of output range. :return: Returns value mapped to new range. :rtype: float """ in_range = in_max - in_min in_delta = x - in_min if in_range != 0: mapped = in_delta / in_range elif in_delta != 0: mapped = in_delta else: mapped = 0.5 mapped *= out_max - out_min mapped += out_min if out_min <= out_max: return max(min(mapped, out_max), out_min) return min(max(mapped, out_max), out_min)
d85affb68b711236fcf455876c5fd6f8f3d9940c
33,540
def flatten_nested_lists(activation_maps): """Flattens a nested list of depth 3 in a row major order. Args: activation_maps: list of list of list of z3.ExprRef with dimensions (channels, activation_map_size, activation_map_size), activation_maps. Returns: list of z3.ExprRef. """ flattened_activation_maps = [] for activation_map in activation_maps: for activation_map_row in activation_map: flattened_activation_maps.extend(activation_map_row) return flattened_activation_maps
7eee52d809dbc659f94623634814ccaacd575183
33,541
def recursive_conditional_map(xr, f, condition): """Walks recursively through iterable data structure ``xr``. Applies ``f`` on objects that satisfy ``condition``.""" return tuple(f(x) if condition(x) else recursive_conditional_map(x, f, condition) for x in xr)
807a9ce5ac42cd10ad7cfbcb42f1912bb7fca1a0
33,543
import queue def get_messages_from_queue(mp_queue, timeout=0.01): """Safely get all messages from a multiprocessing queue. Args: mp_queue (queue): a multiprocess Queue instance timeout (float): seconds to block other processes out from the queue Returns: list: List of messages or an empty list if there weren't any """ msgs = [] # According to the python docs https://docs.python.org/2/library/multiprocessing.html # after putting an object on an empty queue there may be an # infinitesimal delay before the queue's empty() method returns False # # We've actually run into this (SPT-1354) so we'll first kick the # tires with a get() and then see if there are more using empty() try: msgs.append(mp_queue.get(True, timeout)) except queue.Empty: pass else: while not mp_queue.empty(): msgs.append(mp_queue.get_nowait()) return msgs
1d9aa7f404f87206d48c881d4ed50dcde8b8006a
33,544
def filter_sql_query(log): """Extract SQL statements from log""" sqls = [] in_sql = False for q in log: if 'sqlalchemy.engine.base.Engine' in q: # Remove '2017-11-22 15:17:14,810 INFO [sqlalchemy.engine.base.Engine] ' q = q[61:].strip() sqls.append(q) in_sql = True elif in_sql and not q.startswith('2017-'): # Rest of previous SQL query: append to previous sql sqls[-1] = sqls[-1] + ' ' + q.strip() else: in_sql = False return sqls
b0340b73408ee7dce3ebbcb34f75ac225114d611
33,551
from datetime import datetime def parse_iso(timestamp: str) -> datetime: """A function to convert the ISO 8601 timestamp to :class:`datetime`. Parameters ---------- timestamp: str The ISO 8601 timestamp to be converted. Returns ------- datetime The converted :class:`datetime` timestamp. """ return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f%z')
54a313a1100d67b3411452aa60f79826bce8ab66
33,558
def mplayer_cmd( song, volume=100 ): """ defines a command string to launch mplayer """ return "mplayer %s -volume %.3f"%(song, volume)
e33e2439d43d7becf30a0e355fe238687635fc53
33,560
def parse_fasta_header(header): """ Parses a FASTA format header (with our without the initial '>') and returns a tuple of sequence id and sequence name/description. If NCBI SeqID format (gi|gi-number|gb|accession etc, is detected the first id in the list is used as the canonical id (see see http://www.ncbi.nlm.nih.gov/books/NBK21097/#A631 ). """ if header[0] == '>': header = header[1:] tokens = header.split('|') # check to see if we have an NCBI-style header if header.find("|") != -1 and len(tokens[0]) <= 3: # "gi|ginumber|gb|accession bla bla" becomes "gi|ginumber" seqid = "%s|%s" % (tokens[0], tokens[1].split()[0]) name = tokens[-1:][0].strip() # otherwise just split on spaces & hope for the best else: tokens = header.split() seqid = tokens[0] name = header[0:-1].strip() return seqid, name
ee51e9aab6ae75d7d0a59ff3e2adc6211e2cf897
33,562
import torch def pack(inputs, is_tensor=False): """Pack the inputs into tuple if they were a single tensor""" single = torch.is_tensor(inputs) outputs = (inputs, ) if single else inputs return (outputs, single) if is_tensor else outputs
14c1f7c16e0871d1fa7ec265e4db062cdc62b82d
33,570
def convert_signature_id(sigid): """Standardize the signature ID to XXX-XXX if info is available.""" escaped_sigid = sigid.replace(' ', '').replace('-', '').upper() if len(escaped_sigid) == 6: return "%s-%s" % (escaped_sigid[:3], escaped_sigid[3:]) else: return sigid.upper()
63f0af55415d58a7db4791f716e35a6dbacc8e89
33,572
def get_srid(crs): """Returns the SRID for the provided CRS definition The CRS can be defined in the following formats - urn:ogc:def:crs:EPSG::4326 - EPSG:4326 - 4326 """ if ':' in crs: crs = crs.split(':') srid = crs[len(crs)-1] else: srid = crs return int(srid)
7780ed484ddb7d653b99198f15e1d8178e60aced
33,575
def find_matching_paren(string, index, lparen='(', rparen=')'): """Find the closing paren corresponding to the open paren at <index> in <string>. Optionally, can provide other characters to match on. If found, returns the index of the matching parenthesis. If not found, returns -1. """ if not string[index] == lparen: raise ValueError("Character at index %d is '%s'. Expected '%s'" % (index, string[index], lparen)) index += 1 count = 1 while index < len(string) and count > 0: while index < len(string) and string[index] not in (lparen, rparen): index += 1 if string[index] == lparen: count += 1 elif string[index] == rparen: count -= 1 if count == 0: return index else: return -1
3216a020403eb33f557f3f032f0986e078384421
33,580
import json def from_json(text): """Parse text as json. """ # If `text` is a byte string, decode it as utf-8. if isinstance(text, bytes): text = text.decode('utf-8') return json.loads(text)
3374c29a5e36dd096f4cb1c3d6692bb9af242386
33,583
def is_success(code): """List of status codes considered to be successful.""" okay = [200, 202] return code in okay
442ac0b94a14afe31be26cee454f7a4e69e46d7c
33,589
def exponential_smoothing(series, alpha): """ :define: Exponential smoothing weights all of the observations, while exponentially decreasing the weights as we move further back in time. :define2: Exponentiality is hidden in the resuriveness of the function: y-hat = a * y-not + (1-a) * (previous y-not) :param series: dataframe with time stamps :param alpha: float [0.0, 1.0], smoothing parameter. The smaller alpha is, the more influence the previous observations have, and the smoother the series is :return: exponentially smoothed dataframe, predicts one observation in the future """ result = [series[0]] # first value is same as series for n in range(1, len(series)): result.append(alpha * series[n] + (1 - alpha) * result[n - 1]) return result
0d62b329daea56355ba81e0bff5cac9b65858877
33,590
def _iterate_over_nested_obj(obj, key): """It iterates over the nested dict object. It iterates over two types of data * list * dict for the rest data type it returns the value Args: obj (any type): object to process key (str, int): key to find in object """ if isinstance(obj, dict): if obj.get(key): return obj[key] elif isinstance(obj, list): for item in obj: value = _iterate_over_nested_obj(item, key) if value: return value return None else: return None
7b6352d6f24a8753b700c2c6491455460d5a4274
33,593
def ExperimentTemplate() -> str: """A template with Markdown syntax. :return: str with Markdown template """ return """ Experiment ========== Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment. For instance, you can find the automatically generated used settings of this run below. Current Settings ---------------- | Argument | Value | | -------- | ----- | """
302d41a33dc9bfebfdca53980a87c8c77e8f475a
33,600
def encode_bool(value: bool) -> bytes: """Encodes a boolean. """ return bytes([int(value)])
fdfc695dcede9df5e79df8789837cd30a1deb0f8
33,603
def std_label(value): """Given Crystal specific uppercase species names, returns the capitalized versions. """ labels = [] for label in value: labels.append(label.lower().capitalize()) return labels
ffad92d9ed272bf9fabcc650e2e9b1e967d00246
33,614
import hashlib def compute_hash(filepath: str) -> str: """Compute an MD5 hash for a filepath string.""" h = hashlib.md5() h.update(filepath.encode()) return h.hexdigest()
ac311fd236a5250402231e506387a2a42073af3e
33,618
def add_pem_headfoot(public_key): """ Return string, representing PEM text for a public key Keyword Parameters: public_key -- String, representing the public key text >>> add_pem_headfoot('foo') '-----BEGIN PUBLIC KEY-----\\nfoo\\n-----END PUBLIC KEY-----' """ preamble = "-----BEGIN PUBLIC KEY-----\n" suffix = "\n-----END PUBLIC KEY-----" return preamble+public_key+suffix
dcb093f942d47f6a11bd2dc82441ba6e82f55b01
33,619
import functools import operator def prod(it): """Product of an iterable. """ return functools.reduce(operator.mul, it)
e20df189d56656f680782579759c5080e6cb75c8
33,620
def extract_dict_key(dataframe, column, key, new_column=None, separator='.'): """ Extract values of ``key`` into ``new_column``. If key is missing, ``None`` is added to the column. .. code-block:: python >>> df = DataFrame({ ... 'trial_num': [1, 2, 1, 2], ... 'subject': [1, 1, 2, 2], ... 'samples': [ ... {'A': 1, 'B': 2, 'C': None}, ... {'A': 3, 'B': 4, 'C': 5}, ... {'A': 6, 'B': 7, 'C': None}, ... None, ... ] ...}) >>>df.pipe(extract_dict_key, 'samples', key='A') trial_num subject samples.A samples 0 1 1 1 {'A': 1, 'B': 2, 'C': None} 1 2 1 3 {'A': 3, 'B': 4, 'C': 5} 2 1 2 6 {'A': 6, 'B': 7, 'C': None} 3 2 2 NaN NaN :param dataframe: The DataFrame object to work on. :type dataframe: :class:`DataFrame <pandas.DataFrame>` :param str column: The name of the column which should be extracted. :param str key: Key that should be extracted. :param str new_column: Name of the new column. By default, ``column`` will be applied as prefix to ``key``. :param str separator: The separator between ``column`` and ``key`` if ``new_column`` is not specified. :returns: The extracted DataFrame :rtype: :class:`DataFrame <pandas.DataFrame>` """ new_column = new_column or '{}{}{}'.format(column, separator, key) if new_column != "" else key dataframe.loc[:, new_column] = dataframe[column].apply( lambda x: x.get(key) if isinstance(x, dict) else x ).rename(new_column) return dataframe
5567a3e8e837e45b851779942e2c9729d4fe856c
33,622
def compression(path): """ Based on filename, is compression being used? """ compress = None ext = path.suffix if ext == ".gz": compress = True elif ext == ".dil": compress = False else: raise Exception(f"invalid file extension [{ext}], must be .dil or .dil.gz") return compress
37d556c149bf15876b352979e00f12033c40b796
33,623