content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_dparse2_supported_file_name(file_name): """ Return the file_name if this is supported or None given a `file_name` string. """ # this is kludgy but the upstream data structure and API needs this dfile_names = ( 'Pipfile.lock', 'Pipfile', 'conda.yml', 'setup.cfg', ) for dfile_name in dfile_names: if file_name.endswith(dfile_name): return file_name
ce4e1120f6d7ddc403d8eb351df72bb5b8d6e9c0
228,561
import re def _is_valid_frequency(value: str) -> bool: """Returns true if the frequency passed is an instance of string, is not None, and adheres to the regex patterns for frequency strings. """ pattern = r"^\d+[smhdwM]$" return isinstance(value, str) and re.match(pattern, value) is not None
b450ae2b18f06d006b3693bc6281cf793ebeae29
612,438
def filename_handler_null(fname): """A lazy filename handler that simply returns its input. Args: fname (str): A file name. Returns: str: Same file name. """ return fname
36393198ef8aa6723e175e41c3493c81f64e08f0
470,235
def parse_bm_alleles_col(_str): """ Parse 'alleles' column of biomart response """ if _str == '': return [] else: return _str.split('/')
31e562608781e14decca447073b45bae1f20c8f4
691,562
import __main__ as main def is_ipython_interpreter() -> bool: # pragma: no cover """Return if we are in an IPython interpreter or not.""" return hasattr(main, '__IPYTHON__')
a06509e767641a56fbe10e777dab174165ed9af5
350,801
def _map_relation(c, language='any'): """ Map related concept or collection, leaving out the relations. :param c: the concept or collection to map :param string language: Language to render the relation's label in :rtype: :class:`dict` """ label = c.label(language) return { 'id': c.id, 'type': c.type, 'uri': c.uri, 'label': label.label if label else None }
ee8d82eaf2e1153b943d1851c85c81546c427f16
550,377
def get_counts_by_tract(mf_with_tract, output_col_name): """Return a dataframe containing the the mortgage foreclosure counts by Census Tract by year. Parameters ---------- mf_with_parcels : pandas df Mortgage foreclosure data, where each row represents a foreclosed property and its parcel ID Returns ------- pandas df Dataframe with columns ['year', 'GEOID', 'num_mortgage_foreclosures'] """ # Get the counts by census tract by year counts_by_ct_by_year = mf_with_tract.groupby(["year", "GEOID"]).size().to_frame('num').reset_index() # Reset index to add year column & clean up mf_counts_by_ct = ( counts_by_ct_by_year .rename(columns={"num": output_col_name},) # "num_mortgage_foreclosures" or "lien-foreclosures" ) return mf_counts_by_ct
e4d204889ab67e28e1abf90cc0618fd23fedaef7
114,573
def get_azure_users(azure, users): """ Get list of user data from Azure Active Directory Parameters ---------- az: object flask-dance Azure object users: list email addresses Returns ------- list Azure user principal names (should match email) """ employees = [] # break the user list down, only allowed to have 15 filters n = 15 users_list = [users[i * n:(i + 1) * n] for i in range((len(users) + n - 1) // n)] # loop through users and add to query for u1 in users_list: query_url = "/v1.0/users?$select=userPrincipalName&$filter=" count = 0 for u2 in u1: if count == 0: query_url += "startswith(userPrincipalName,'{}')".format( u2['email']) else: query_url += " or startswith(userPrincipalName,'{}')".format( u2['email']) count += 1 azure_resp = azure.get(query_url) assert azure_resp.ok # process the response into a list of employees payload = azure_resp.json() for user in payload["value"]: employees.append(user['userPrincipalName'].lower()) return employees
6708421e6c4e93a047c597cf721a7fe225578c7b
474,633
def match_with_gaps(my_word, other_word): """ my_word: string with _ characters, current guess of secret word other_word: string, regular English word returns: boolean, True if all the actual letters of my_word match the corresponding letters of other_word, or the letter is the special symbol _ , and my_word and other_word are of the same length; False otherwise: """ my_word = my_word.replace('_ ', '_') other_word_letters = [] non_other_word_letters = [] if len(my_word) != len(other_word): return False for index, letter in enumerate(my_word): other_letter = other_word[index] if letter == '_': non_other_word_letters.append(other_letter) if other_letter in other_word_letters: return False else: other_word_letters.append(other_letter) if letter != other_letter or letter in non_other_word_letters: return False return True
0e4907caed4931b9d85fa8571c476d88691ee1a0
312,993
def _get_json_schema_node_id(fully_qualified_name: str) -> str: """Returns the reference id (i.e. HTML fragment id) for a schema.""" return 'json-%s' % (fully_qualified_name,)
eafdacc1e7c4f2feabcd5b486fb264d33332266d
26,701
def calculate_rtu_inter_char(baudrate): """calculates the interchar delay from the baudrate""" if baudrate <= 19200: return 11.0 / baudrate else: return 0.0005
dcda292631a335e69ad209812aee684876081ca7
166,314
def split_part_key(key): """ Split the standard part key. Only returns port if present in key. Parameters ---------- key : str Standard part key as hpn:rev Returns ------- tuple hpn, rev, [,port] """ split_key = key.split(":") if len(split_key) == 2: return split_key[0], split_key[1] return split_key[0], split_key[1], split_key[2]
dfd59f64a442b98fb744d78110a8eba600500d5f
317,951
import six def utf8(string): """Convert a Unicode or utf-8 encoded string to a native Python str. This function's name is misleading: while on Python 2 it really returns a utf-8-encoded string, on Python 3 it return a native Unicode Python str. This function is robust to incorrectly-encoded utf-8 strings, and will replace undecodable sequences with the Unicode U+FFFD REPLACEMENT CHARACTER. """ if isinstance(string, bytes): unicode_string = string.decode('utf-8', errors='replace') else: unicode_string = six.text_type(string) # to handle QString return six.ensure_str(unicode_string, 'utf-8')
aa696782d03fc8e96046e47e8595c007ba446bfe
573,635
def get_end_linenumber(text): """Utility to get the last line's number in a Tk text widget.""" return int(float(text.index('end-1c')))
8768a6ce777087ee1730b41efd9a41180672c33b
638,889
import re def stat_check(string): """Validate input of filter string for values like atime, mtime, ctime. These shuld be integer values prefixed by a + or a - only with no spaces Eg: 1 5 -10 +20 Invalid: 1.5 abc $#@ + 5 a5 Return: String passed in Rasies ValueError if check doesn't pass """ matched = re.match(r"^[+,-]?\d+$", string) if bool(matched): return string raise ValueError("Intagers only, optionally prefixed with + or -")
2d40873d3838d817cfd0932f436fa8eeb2cdc1b8
622,302
def find_seq_rec(block, name, case_sensitive=True): """Given part of a sequence ID, find the first matching record.""" if case_sensitive: def test(name, rec): return name in rec['id'] else: def test(name, rec): return name.upper() in rec['id'].upper() for rec in block['sequences']: if test(name, rec): return rec raise ValueError("No sequence ID matches %s" % repr(name))
58d590cda5a427416ade3756edfff7b1eec6546e
522,370
import re def parse_show_core_dump(raw_result): """ Parse the show core-dump output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show core-dump \ in a dictionary of the form: :: { 0:{ 'instance_id': 1202, 'timestamp': '2016-04-19 08:12:11', 'crash_reason': 'Segmentation Fault', 'daemon_name': 'ops-fand' } } """ show_re = ( r'\s*(?P<daemon_name>\S+)' r'\s*(?P<instance_id>\S+)' r'\s*(?P<crash_reason>.{1,30})' r'\s*(?P<timestamp>[0-9\s:-]{18,20})' ) show_re_kernel = ( r'\s*(?P<daemon_name>\S+)' r'\s*(?P<timestamp>[0-9\s:-]{18,20})' ) if "No core dumps are present" in raw_result: return {} result = {} core_dump_count = 0 coredumps = raw_result.splitlines() for line in coredumps: if("Total number of core dumps" in line): break elif("=====" in line or "Crash Reason" in line): continue else: if "kernel" in line: re_result = re.match(show_re_kernel, line) else: re_result = re.match(show_re, line) assert re_result coredump_result = re_result.groupdict() if "kernel" in line: coredump_result['crash_reason'] = 'unknown' coredump_result['instance_id'] = '1' for key, value in coredump_result.items(): if value is not None: if value.isdigit(): coredump_result[key] = int(value) else: coredump_result[key] = value.strip() result[core_dump_count] = coredump_result core_dump_count += 1 return result
124ed157bbb4cc8a60a4b05af94dc3b85489fcfd
553,580
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id): """Merge segments A and B, add [CLS] and [SEP] and build tokentypes.""" tokens = [] tokentypes = [] # [CLS]. tokens.append(cls_id) tokentypes.append(0) # Segment A. for token in tokens_a: tokens.append(token) tokentypes.append(0) # [SEP]. tokens.append(sep_id) tokentypes.append(0) # Segment B. for token in tokens_b: tokens.append(token) tokentypes.append(1) if tokens_b: # [SEP]. tokens.append(sep_id) tokentypes.append(1) return tokens, tokentypes
0f72f261ff1e0ee2d304321cd0bbc0af9c662b4b
688,190
def extract_files_to_lint(file_diffs): """Grab only files out of a list of FileDiffs that have a ACMRT status.""" if not file_diffs: return [] lint_files = [f.name for f in file_diffs if f.status in b'ACMRT'] return lint_files
3594bd942f132f7c815df3423eaef6c0de882588
82,123
def copy_dict_or_new(original: dict) -> dict: """Makes a copy of the original dict if not None; otherwise returns an empty dict. """ if original is None: return dict() return dict(original)
d3315b39fe26e97aca1ebf4fa320f87852348bbc
443,128
def get_message( items: list[str], sort: bool = False, oxford: bool = True, separator: str = "," ) -> str: """Get message. >>> get_message(["chips", "pie", "soda"], separator=";") 'You are bringing chips; pie; and soda.' >>> get_message(["chips", "pie", "soda"], oxford=False) 'You are bringing chips, pie and soda.' >>> get_message([]) Traceback (most recent call last): ValueError: Empty list. """ if not items: raise ValueError("Empty list.") items.sort() if sort else items connect = f"{separator if oxford and len(items) > 2 else ''} and " if len(items) > 1 else "" return f"You are bringing {f'{separator} '.join(items[:-1])}{connect}{items[-1]}."
78c81340c7cfa62497bf4554b6dc0dd66c083e06
344,320
def format_datetime(session): """Convert date or datetime object into formatted string representation. """ if session.data is not None: date_format = session.field.opts.date_format if date_format == 'iso8601': session.data = session.data.isoformat() else: session.data = session.data.strftime(date_format) return session.data
53a99843e47dde6b82cb48e77fd553bbf65dd646
698,881
def conv_F2C(f): """Convert fahrenheit to Celsius""" return (f - 32.0) * 0.555556
24daec5ea411dd1eb793193b8b6c42f85236d813
393,900
import re def decodetype(type_): """ Decode Type. >>> decodetype('r') (True, None, False, False) >>> decodetype('w') (False, None, True, False) >>> decodetype('u') (False, None, False, True) """ r = re.compile(r"(r)([1-9]?)") m = r.match(type_) if m: read = m.group(1) is not None prio = int(m.group(2)) if m.group(2) else None else: read, prio = False, None write = "w" in type_ update = not read and len(type_) > (1 if write else 0) return read, prio, write, update
26fb491c6759cef4c38c2832afa330070229f104
415,354
def is_part_of(L1, L2): """Return True if *L2* contains all elements of *L1*, False otherwise.""" for i in L1: if i not in L2: return False return True
1518489ffab2ea2efdc8aeb04b94359f3279f08d
345,577
import pathlib def get_fixture_path(filename: str) -> pathlib.Path: """Get path of a fixture.""" return pathlib.Path(__file__).parent.joinpath("fixtures", filename)
ec058b5204778a39912bf7cb5a3022945f0f8fb5
333,464
import torch def right_shift_to_zero(x, bits): """Right shift with quantization towards zero implementation. Parameters ---------- x : torch.int32 or torch.int64 input tensor. bits : int number of bits to shift. Returns ------- torch.int32 or torch.int64 right shift to zero result. """ if not(x.dtype == torch.int32 or x.dtype == torch.int64): raise Exception( f'Expected torch.int32 or torch.int64 data, found {x.dtype}.' ) x_sign = 2 * (x > 0) - 1 # return x_sign * (x_sign * x >> bits) # This seems to return torch.int64! return (x_sign * ((x_sign * x) >> bits)).to(x.dtype) # return (x_sign * ((x_sign * x) / (1<<bits))).to(torch.int32)
c83ffdec97dfc57c7f44e14467495b7105cb969a
580,119
import hashlib def hash_file(filename, hash_type, as_hex=True): """ Hash a file using SHA1 or MD5 :param filename: :param hash_type: 'sha1' or 'md5' :param as_hex: True to return a string of hex digits :return: The hash of the requested file """ h = getattr(hashlib, hash_type)() with open(filename, 'rb') as file_to_hash: # loop till the end of the file chunk = 0 while chunk != b'': # read only 1024 bytes at a time chunk = file_to_hash.read(1024) h.update(chunk) # return the hex representation of digest if as_hex: return h.hexdigest() else: # return regular digest return h.digest()
2e81d506880dd88371f4b764634cd525ee2b762d
535,072
def get_estimated_max_weight(weight, num_reps): """Returns the estimated max weight based on weight and reps.""" estimated_max = weight * (1 + num_reps / 30.0) return float("{0:.2f}".format(estimated_max))
28a608ee34173b66008f2ba2d7a761ca7b758739
620,857
def dot(a, b): """Compute the dot product between the two vectors a and b.""" return sum(i * j for i, j in zip(a, b))
93c727467a62ec3ce93a0b612624cb0eab57b120
593,135
def dataclass_param(request): """Parametrize over both implementations of the @dataclass decorator.""" return request.param
4954d0fe17b620f592d1899af885d1a221f8ad24
568,908
import math def prime_check(number: int) -> bool: """Checks to see if a number is a prime in O(sqrt(n)). A number is prime if it has exactly two factors: 1 and itself. >>> prime_check(0) False >>> prime_check(1) False >>> prime_check(2) True >>> prime_check(3) True >>> prime_check(27) False >>> prime_check(87) False >>> prime_check(563) True >>> prime_check(2999) True >>> prime_check(67483) False """ if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False odd_numbers = range(3, int(math.sqrt(number) + 1), 2) return not any(not number % i for i in odd_numbers)
6e67b379262437f5eb04473c9259d98997566a3e
622,937
def doprefix(site_url): """ Returns protocol prefix for url if needed. """ if site_url.startswith("http://") or site_url.startswith("https://"): return "" return "http://"
fc53d4f9f113be79b68becec8b7332c96d779127
123,624
from typing import Union def num_cvt(string: str) -> Union[int, float]: """ >>> num_cvt("2007") 2007 >>> num_cvt("3.14") 3.14 >>> num_cvt("1,234") 1234 """ try: return int(string) except ValueError: pass try: return float(string) except ValueError: pass return int(string.replace(",", ""))
d99019a5bbd5a6490adf85758c14b815a0c8a3b3
568,690
def convert_alt_list_to_string(alt): """ The ALT field in the VCF file is represented as a list, can convert this to a comma seperated string. """ _vars = list() for _var in alt: _vars.append(str(_var)) return ','.join(_vars)
11aef24df506094d711eb6e8e644a1272d461c78
73,123
from pathlib import Path import yaml def yaml_to_dict(filepath): """Get a dictionary from a yaml file. :param str filepath: the file path to the yaml file. :return: dictionary representation of the yaml file. :rtype: dict """ with Path(filepath).open('r') as f: return yaml.safe_load(f)
411435694fd8bc2f107decace982a46854d5b437
75,625
def should_include(page, target_name): """Report whether a given page should be part of the given target""" if "targets" not in page: return False if target_name in page["targets"]: return True else: return False
e3b69ca957ad152a783a177e0cc59355c4e54c27
358,219
def float_to_text(value, sig): """ Convert float to text string for computing hash. Preseve up to N significant number given by sig. :param value: the float value to convert :param sig: choose how many digits after the comma should be output """ if value == 0: value = 0. # Identify value of -0. and overwrite with 0. fmt = f'{{:.{sig}g}}' return fmt.format(value)
2f43348ce9f434dc62064f84b8d61e572a02d7cc
608,798
def is_integer(variable): """Checks if a variable is an integer value""" return type(variable) == int
d8f13deab6aea7fbd3b2b546a43284ae5b787f15
416,293
def _models_save_all_function_name(model): """Returns the name of the function to save a list of models as existing records""" return '{}_save_all_existing'.format(model.get_table_name())
14cae95731dcb0e42ebd7b2be31d3a7294d9a120
583,609
def empty_dropper(item): """Used for list/dict types that may contain empty strings Recurses over the lists and dicts, replacing '' with None values""" if isinstance(item, dict): return {k: empty_dropper(v) for k, v in item.items()} elif isinstance(item, list): return [empty_dropper(v) for v in item if v != ''] elif item == '': return return item
98d56a74cd5a5fe36f17371dd42c4a3eaaf5729f
148,888
def find_tri_div(n): """ Find the smallest triangle number with over `n` divisors. """ if n < 1: return 1 # Start with the 2nd triangular number tri = 3 num = 2 while True: # Start divisor count with 1 and the number itself. div_cnt = 2 i = 2 limit = tri while True: if i >= limit: break if tri % i == 0: div_cnt += 2 limit = int(tri / i) i += 1 if div_cnt > n: return tri num += 1 tri += num
628b1e7a62a5dd438be8ebcdeb5a67800808d024
127,712
def get_tip_labels(tree_or_node): """Returns a `set` of tip labels for a node or tree.""" try: return {x.taxon.label for x in tree_or_node.leaf_node_iter()} except AttributeError: return {x.taxon.label for x in tree_or_node.leaf_iter()}
2d6edfb1e7bf9671cd3b78b73aa006339b21ee0c
82,942
def v2_subscriptions_response(subscriptions_response): """Define a fixture that returns a V2 subscriptions response.""" subscriptions_response["subscriptions"][0]["location"]["system"]["version"] = 2 return subscriptions_response
59319dd263672bb37b68a996fb9b713160bc8449
309,070
def get_min_dist_node(dist, visited): """ Gets the node with the minimum distance from the unvisited nodes Args: dist (dict): dictionary with nodes as keys and values is a liste which first element is the minimum distance and second one is the closest node visited (array): array of visited nodes Returns: (string): node with minimum distance """ a = [] for key in dist.keys(): if key not in visited: a.append((dist[key][0], key)) idx = a.index(min(a)) return a[idx][1]
b13bfd4b5c0203ae6b4629100cd8afba4be51450
158,273
def my_split(text, delimiter = ' '): """ Particiona uma string a cada ocorrência do delimitador. Considerando a string e o delimitador passados como parâmetro, cada ocorrência do delimitador gera um novo particionamento da string. As partições são armazenadas numa lista que será retornada. O espaço em branco (' ') é o delimitador padrão. Parameters ---------- text : str A string a ser particionada delimiter : str, optional O delimitador das partições geradas (default ' ') Return ------ splitted : list Lista contendo as partições geradas pelo algoritmo """ splitted = [] partition = '' for i in range(len(text)): if (text[i] == delimiter): if (partition != ''): splitted.append(partition) partition = '' else: partition += text[i] if (len(partition) > 0): splitted.append(partition) return splitted
b0e357b2efeb828ceda5959a8f46a4b0bb03cafe
218,547
def find_cms_attribute(attrs, name): """ Find and return CMS attribute values of a given type. :param attrs: The :class:`.cms.CMSAttributes` object. :param name: The attribute type as a string (as defined in ``asn1crypto``). :return: The values associated with the requested type, if present. :raise KeyError: Raised when no such type entry could be found in the :class:`.cms.CMSAttributes` object. """ for attr in attrs: if attr['type'].native == name: return attr['values'] raise KeyError(f'Unable to locate attribute {name}.')
7ce3d9c12ca919b7b35539f1a85ddfe4a96cb449
489,638
def seconds_between(left, right): """ Calculates seconds between two datetime values Args: left (time): Greater time value right (time): Smaller time value Returns: time: Total difference between two datetime values in seconds """ return (left - right).total_seconds()
071a01b98ca310d008df43b5cb8c56bd76ddcb78
414,582
def count(geo): """ count the number of atoms in the geometry """ return len(geo)
73efc9a1c32a9c8abc1f4fcda38b2cf1cb23fd26
452,146
def _transpose_list(list_of_lists): """Transpose a list of lists.""" return list(map(list, zip(*list_of_lists)))
854c8cea5ef7f54bf4c41bb505980474c8daba28
486,919
def gps_to_utc(gpssec): """ Convert GPS seconds to UTC seconds. Parameters ---------- gpssec: int Time in GPS seconds. Returns ------- Time in UTC seconds. Notes ----- The code is ported from Offline. Examples -------- >>> gps_to_utc(0) # Jan 6th, 1980 315964800 """ kSecPerDay = 24 * 3600 kUTCGPSOffset0 = (10 * 365 + 7) * kSecPerDay kLeapSecondList = ( ((361 + 0 * 365 + 0 + 181) * kSecPerDay + 0, 1), # 1 JUL 1981 ((361 + 1 * 365 + 0 + 181) * kSecPerDay + 1, 2), # 1 JUL 1982 ((361 + 2 * 365 + 0 + 181) * kSecPerDay + 2, 3), # 1 JUL 1983 ((361 + 4 * 365 + 1 + 181) * kSecPerDay + 3, 4), # 1 JUL 1985 ((361 + 7 * 365 + 1) * kSecPerDay + 4, 5), # 1 JAN 1988 ((361 + 9 * 365 + 2) * kSecPerDay + 5, 6), # 1 JAN 1990 ((361 + 10 * 365 + 2) * kSecPerDay + 6, 7), # 1 JAN 1991 ((361 + 11 * 365 + 3 + 181) * kSecPerDay + 7, 8), # 1 JUL 1992 ((361 + 12 * 365 + 3 + 181) * kSecPerDay + 8, 9), # 1 JUL 1993 ((361 + 13 * 365 + 3 + 181) * kSecPerDay + 9, 10), # 1 JUL 1994 ((361 + 15 * 365 + 3) * kSecPerDay + 10, 11), # 1 JAN 1996 ((361 + 16 * 365 + 4 + 181) * kSecPerDay + 11, 12), # 1 JUL 1997 ((361 + 18 * 365 + 4) * kSecPerDay + 12, 13), # 1 JAN 1999 # DV: 2000 IS a leap year since it is divisible by 400, # ie leap years here are 2000 and 2004 -> leap days = 6 ((361 + 25 * 365 + 6) * kSecPerDay + 13, 14), # 1 JAN 2006 ((361 + 28 * 365 + 7) * kSecPerDay + 14, 15), # 1 JAN 2009 ((361 + 31 * 365 + 8 + 181) * kSecPerDay + 15, 16), # 1 JUL 2012 ((361 + 34 * 365 + 8 + 181) * kSecPerDay + 16, 17), # 1 JUL 2015 ((361 + 36 * 365 + 9) * kSecPerDay + 17, 18) # 1 JAN 2017 ) leapSeconds = 0 for x in reversed(kLeapSecondList): if gpssec >= x[0]: leapSeconds = x[1] break return gpssec + kUTCGPSOffset0 - leapSeconds
342c479df8d4c864592494e8d50452e9f02c04d5
18,804
import random def sample(max_index, size=40000, split=8000): """Samples indices.""" indices = random.sample(range(max_index), size) train = indices[:-split] test = indices[-split:] return train, test
aa650781c35d0b73b9edc441f0ebcd4577fa5821
587,201
def toHexString(value): """ Returns a clean hex (no hash) string for a given input """ return str(hex(value))[2:]
9d02cd3cbc469c25c0235de658cf69bb8861d260
589,555
def default_zero(input): """ Helper function that returns the input, replacing anything Falsey (such as Nones or empty strings) with 0.0. """ return round(input if input else 0.0, 2)
d5c06c9f0449277e5fc2b8083335fa7e35595305
40,256
def gpm2m3_s(gpm): """gpm -> m^3/s""" return 6.3090196e-05*gpm
84feddeca5bffe56eb17842a97b2a436c916a2db
263,941
from typing import List import random def rollDice(dice1=True) -> int: """Returns random number between 1 and 6 or 1 and 12 depending on if dice1 is Treu or False""" if dice1 is True: rolls: List[int] = [1, 2, 3, 4, 5, 6] return random.choice(rolls) rolls = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] return random.choice(rolls)
24c74f727e3b620d173182c09bee38d5c29a8af6
138,874
def classname(object, modname): """Get a class name and qualify it with a module name if necessary.""" name = object.__name__ if object.__module__ != modname: name = object.__module__ + '.' + name return name
af4e05b0adaa9c90bb9946edf1dba67a40e78323
766
def divide(n, iterable): """Divide the elements from *iterable* into *n* parts, maintaining order. >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) >>> list(group_1) [1, 2, 3] >>> list(group_2) [4, 5, 6] If the length of *iterable* is not evenly divisible by *n*, then the length of the returned iterables will not be identical: >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) >>> [list(c) for c in children] [[1, 2, 3], [4, 5], [6, 7]] If the length of the iterable is smaller than n, then the last returned iterables will be empty: >>> children = divide(5, [1, 2, 3]) >>> [list(c) for c in children] [[1], [2], [3], [], []] This function will exhaust the iterable before returning and may require significant storage. If order is not important, see :func:`distribute`, which does not first pull the iterable into memory. """ if n < 1: raise ValueError('n must be at least 1') try: iterable[:0] except TypeError: seq = tuple(iterable) else: seq = iterable q, r = divmod(len(seq), n) ret = [] stop = 0 for i in range(1, n + 1): start = stop stop += q + 1 if i <= r else q ret.append(iter(seq[start:stop])) return ret
649b057378e74bb94e5d085d1dd583db677c6616
347,662
import re def find_alignment_gaps(alignment): """ Returns location (in list form) of alignment gaps in a sequence """ return([char.start() for char in re.finditer("-", alignment)])
813d1fe587cec1094475395b9057c820e49557ab
530,699
def fillnan(series, value, **kwargs): """ fill nan values in series with value """ return series.fillna(value)
c7be865ec91f0fc5c385c6ba836697a949300e9f
308,373
import numbers def is_numeric(value): """ Test if a value is numeric. :param value: input value :return: boolean value. """ return isinstance(value, numbers.Number)
297df40e3682f7427d7bae7493d43d013d9523c1
358,522
def unique_fname(full_path: str) -> str: """Get unique file name for given full path to MELD data file. The return format is '[dialog]_[utterance]'. :param full_path: full path to MELD .mp4 data file :return: unique id of data file (only unique within dataset directory) """ fname = full_path.split('/')[-1].split('.')[0] return fname.replace('dia', '').replace('utt', '').replace('final_videos_test', '')
29bffc8a2028ac126709fe17d9e2e1d2914bf769
26,658
def pastis_matrix_measurements(nseg): """ Calculate the total number of measurements needed for a PASTIS matrix with nseg segments :param nseg: int, total number of segments :return: int, total number of measurements """ total_number = (nseg**2 + nseg) / 2 return int(total_number)
133cb69837651ac3e6d0891179b365367fe848cc
694,508
import time def convert_to_minutes(seconds): """ convert an amount of seconds to minutes and seconds """ return time.strftime("%M:%S", time.gmtime(seconds))
51fe1a52f02a60fb3440f035dbefd85410601ff9
329,564
def filter_for_sprl(c): """ Given a BIDSFile object, filter for sprl type file """ try: val = "sprlcombined" in c.entities["acquisition"] except KeyError: return False else: return val
6f05313701ecc01512fedf05709e5e13629c467d
21,920
from typing import Tuple def _unmerge_points( board_points: Tuple[int, ...] ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: """Return player and opponent board positions starting from their respective ace points.""" player: Tuple[int, ...] = tuple( map( lambda n: 0 if n < 0 else n, board_points, ) ) opponent: Tuple[int, ...] = tuple( map( lambda n: 0 if n > 0 else -n, board_points[::-1], ) ) return player, opponent
25965e023030266cc92e6b1456483204ad2c863a
703,206
import re def remove_punctuation(string, hyphens_are_separators=True, keep_commas=False): """removes punctuation from a string (simple regex replacing everything but w and s with nothing) and returns the string Keyword arguments: string -- the string to remove punctuation from hyphens_are_separators -- (optional) replace hyphens with a space first (creates a space in hyphenated words instead of concatenating them) (default True) keep_commas - (optional) don't delete commas if true (default False) """ if hyphens_are_separators: string = re.sub(r"\-", " ", string) if not keep_commas: return re.sub(r"[^\w\s]", "", string) else: return re.sub(r"[^\w\s,]", "", string)
0cb9184d748fba00ed0a683a2328ddab3bd61a8a
589,003
def compare(a, b): """Return -1/0/1 if a is less/equal/greater than b.""" return 0 if a == b else 1 if a > b else -1 if a < b else None
88fcfb93da232f13047bd04528f462da322fd967
283,015
def binary_count(a: int) -> int: """ ambil 1 bilangan integer dan kemudian mengambil angka yaitu jumlah bit yang berisi 1 dalam representasi biner dari nomor itu contoh bilangan biner dari 25 25 = 11001 yang berarti 3 angka 1 dari 25 >>> binary_count(25) 3 >>> binary_count(36) 2 >>> binary_count(16) 1 >>> binary_count(58) 4 >>> binary_count(4294967295) 32 >>> binary_count(0) 0 >>> binary_count(-10) Traceback (most recent call last): ... ValueError: Angka harus positif >>> binary_count(0.3) Traceback (most recent call last): ... TypeError: Input harus berupa tipe 'int' """ if a < 0: raise ValueError("Angka harus positif") elif isinstance(a, float): raise TypeError("Input harus berupa tipe 'int'") return bin(a).count("1")
d8c882be9ccc0fa4d2cc0be134a7579c5df572bb
246,701
def is_success(status): """ Check if HTTP status code is successful. :param status: http status code :returns: True if status is successful, else False """ return 200 <= status <= 299
5d28acbc98ebd5b5baaadca1668a29e649c8c1d2
317,119
def _jsnp_unescape(jsn_s): """ Parse and decode given encoded JSON Pointer expression, convert ~1 to / and ~0 to ~. .. note:: JSON Pointer: http://tools.ietf.org/html/rfc6901 >>> _jsnp_unescape("/a~1b") '/a/b' >>> _jsnp_unescape("~1aaa~1~0bbb") '/aaa/~bbb' """ return jsn_s.replace('~1', '/').replace('~0', '~')
36260a38b42119e1f78a2cdd2c3f3994fae8ae47
487,297
import bz2 def decompress(data): """ Helper function to decompress data (using bz2) """ c = bz2.BZ2Decompressor() return c.decompress(data)
18dee25febbc1e3ce9e709d19129169e60fb928a
369,071
from typing import List from typing import Dict import heapq def top_anomalies(points: List[Dict], n=10) -> List[Dict]: """Returns top n anomalies according to severity.""" return heapq.nlargest( n, points, key=lambda point: point["severity"] )
a7ed3495070e3c8b913137736b1cb841c93443db
118,093
import json def read_file(file_path, is_json=False): """ Read a file. Parameters ---------- file_path : str Path that the file should be read from. is_json : boolean Flag representing if the file should is JSON. """ if is_json: with open(file_path, 'r') as metadata_file: return json.load(metadata_file) else: return open(file_path, 'rb').read()
12da18361ceb98e76dcb14aefdb48f0688f7e145
379,821
def read_list_from_file(filename): """ Reads in a list from file. Assumes that every line is a different entry. Args: filename - the name of the file that contains the list Returns: list """ l = [] with open(filename, 'r') as inputfile: for username in inputfile: l.append(username.strip()) return l
b4fa66d5f1f7f485ba1d5295557307fa43089a83
365,724
def bdp_B(bw_Mbps, rtt_us): """ Calculates the BDP in bytes. """ return (bw_Mbps / 8. * 1e6) * (rtt_us / 1e6)
b2c5a23cab22812b3f6bd2cf184b2bc0d21ced02
274,737
def floatnan(s): """converts string to float returns NaN on conversion error""" try: return float(s) except ValueError: return float('NaN')
b618013ea76346e248b21ed0fc5f0b6a3cbbab95
622,270
def print_perf(start, end): """Print diff as ms between start and end times (floats in secs).""" diff = (end - start) * 1000 print('{:.2f} milliseconds elapsed'.format(diff)) return diff
796b38069e25d77697601fdedb9a86409d12eaca
168,804
import itertools def contains_peroxide(structure, relative_cutoff=1.2): """ Determines if a structure contains peroxide anions. Args: structure: Input structure. relative_cutoff: The peroxide bond distance is 1.49 Angstrom. Relative_cutoff * 1.49 stipulates the maximum distance two O atoms must be to each other to be considered a peroxide. Returns: Boolean indicating if structure contains a peroxide anion. """ max_dist = relative_cutoff * 1.49 o_sites = [] for site in structure: syms = [sp.symbol for sp in site.species_and_occu.keys()] if "O" in syms: o_sites.append(site) for i, j in itertools.combinations(o_sites, 2): if i.distance(j) < max_dist: return True return False
5db51989f823b3a6cc113267371459e8cf37d920
649,296
import requests def getSTCNItem( itemLocations: list, holdingArchive: str = "Amsterdam, Rijksmuseum Research Library", ENDPOINT: str = "http://data.bibliotheken.nl/sparql"): """ Find the STCN item for a given itemLocation and holdingArchive. Args: itemLocations (list): The itemLocations of the item to search for. holdingArchive (str, optional): The holdingArchive of the item to search for. ENDPOINT (str, optional): The SPARQL endpoint to use. Returns: List of dicts with with items """ books = [] for itemLocation in itemLocations: q = f""" PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX schema: <http://schema.org/> SELECT * WHERE {{ ?book a schema:IndividualProduct ; schema:itemLocation ?shelfmark ; schema:holdingArchive ?archive . FILTER(?shelfmark = "{itemLocation}") FILTER(CONTAINS(?archive, "{holdingArchive}")) }} """ headers = {"Accept": "application/sparql-results+json"} params = {"query": q} r = requests.get(ENDPOINT, headers=headers, params=params) data = r.json() for r in data["results"]["bindings"]: record = dict() for k, v in r.items(): record[k] = v["value"] books.append(record) return books
4520d115a2fd2dc4397f0030109e400d9d8a1317
129,925
def add_iam_binding(policy, member, role): """Adds binding to given policy. Args: policy: Policy. member: Account, e.g. user:joe@doe.com, serviceAccount:..., etc. role: Role Returns: True if binding was added. False, if binding was already present in policy. """ # Check if member is already bound to the role target_binding = None for binding in policy['bindings']: if binding['role'] == role: if member in binding['members']: # Member is already bound to role. Nothing further to do. return False else: # We found the role we are looking for, stop traversing the rest. # Just keep a reference to avoid looping again. target_binding = binding break if target_binding: target_binding['members'].append(member) else: # Create a new binding if role wasn't in the policy before policy['bindings'].append(dict(role=role, members=[member])) return True
90769af439ee4a054365fece170fd7887edbec6c
252,968
def tokens_to_text(token_list): """Convert a list of binja tokens to plain text Mostly useful for testing """ return ''.join([tok.text for tok in token_list])
9aae7abd351578f52b37de28ae886ffa3fee06a7
590,764
def _flatten_array_to_str(array): """ Helper function to reduce an array to a string to make it immutable. Args: array (array): Array to make to string Returns: string: String where each value from the array is appended to each other """ s = "" for i in array: s += str(i) return s
229bb0f3908717d87964c24cb2e13f357727c837
619,820
def skeaf_code(aiida_local_code_factory): """Get a skeaf code.""" return aiida_local_code_factory(executable="diff", entry_point="skeaf")
7aa3b904796ad6f38e1abfda96959269732c3f93
596,051
def _maybe_correct_vars(vars): """Change vars from string to singleton tuple of string, if necessary.""" if isinstance(vars, str): return (vars,) else: return vars
1aa46b03988f06a3697b703991c64899e173d0eb
40,453
import re def is_valid_remote_clone_id(remote_clone_job_id): """ Validates a remote clone job ID, also known as the remote clone job "job_name". A valid remote clone job name should look like: dstrclone-00000001 - It should end with 8 hexadecimal characters in lower case. :type remote_clone_job_id: str :param remote_clone_job_id: The remote clone job name to be validated. :rtype: bool :return: True or False depending on whether remote_clone_id passes validation. """ if remote_clone_job_id is None: return False match = re.match(r'^(src|dst)rclone-[0-9a-f]{8}$', remote_clone_job_id) if not match: return False return True
186700e080bc372679d14b7af6ed8e83dcc4f04b
620,099
def get_streamer_id(video_str): """Gets the streamer ID based on the name of the video :param video_str: String name of the video :return: String containing the streamers ID """ streamer_id = "" started = False for char in video_str: if char == '_': if not started: started = True else: return streamer_id elif started: streamer_id += char
f2cc61d55d401d727a753c833bd85a2916effe43
553,018
import torch def get_cyclic_scheduler(optimizer, batches_per_epoch, base_lr=10e-6, max_lr=0.1, step_size_factor=5): """ Get cyclic scheduler. Note that the resulting optimizer's step function is called after each batch! :param optimizer: optimizer :type optimizer: torch.optim.Optimizer :param batches_per_epoch: number of batches per epoch :type batches_per_epoch: int :param base_lr: base learning rate :type base_lr: float :param max_lr: max learning rate :type max_lr: float :param step_size_factor: step size in multiples of batches :type step_size_factor: int :return: scheduler :rtype: torch.optim.lr_scheduler.LRScheduler """ return torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=batches_per_epoch*step_size_factor)
826837231f5ec5e9db159ee2ff00a2f49e265d60
252,619
def polynomial_3_differential(x, a, b, c, d): """Polynomial order 3 where f(x) = a + b * x + c * x**2 + d * x**3 Differential(f(x) = b + 2.0 * c * x**1 + 3.0 * d * x**2 """ return b + 2.0 * c * x + 3.0 * d * x**2
40bd4df94b956cd7e7145f850446840a913b09fc
432,914
def robot_point_creator(dict_point): """Fonction qui permet de convertir les données dictionnaires de configuration de la forme du robot en une liste de point Args: dict_point (dict): dictionnaire de configuration de la forme du robot Returns: liste: liste des coordonnées dans le repère du robot de sa forme """ # initialisation de la liste de capteurs robot_pointX = [] robot_pointY = [] # Ajout des capteurs en fonction du fichier capteur_config.yaml for point in dict_point: data = dict_point.get(point) robot_pointX.append(data['x']) robot_pointY.append(data['y']) # Ajout de la ligne pour fermer le robot robot_pointX.append(robot_pointX[0]) robot_pointY.append(robot_pointY[0]) return [robot_pointX, robot_pointY]
33ce0607ec7c59c903c63050abeafe4f8dcd1259
68,364
import re def split_revisions(version_string): """ Returns a list of revisions pulled from the version string. Revisions are separated by -, _, or ~ """ return re.split('[-_~]', version_string)
d0d49d540ee770826fc3e615ec361203d8f0d65b
509,299
def abs_list(in_list): """ Return abs() of each element in in_list, e.g. [2, 3+4j, -6] -> [2, 5, 6] """ return [abs(x) for x in in_list]
ea3721ad8cc334d1a049756e4dcff55a936dd0fe
367,276
def guess_module_name(fct): """ Guesses the module name based on a function. @param fct function @return module name """ mod = fct.__module__ spl = mod.split('.') name = spl[0] if name == 'src': return spl[1] return spl[0]
e7153ad73178840108c60167fc155abf32edca61
574,256
def register(client, username, password, confirm_password): """ Fires a post request to the register route """ return client.post("/register", data=dict( username = username, password = password, confirm_password = confirm_password ), follow_redirects=True)
f5557a6c9aee7af58bbccb6e085bdf81d12a5d7b
405,665
def gather_types(input_step, varname): """ Given and input step, return a SPARQL fragment to gather the types for the step :param input_step: :return: SPARQL fragment as string """ if not input_step['object']['literal']: return ' ?' + input_step['object']['name'] + ' a ?' + varname + ' . ' else: return ''
b8e07817ee160b05b5e97db9a8e7dc334e3dcf95
312,109
import colorsys def hsv_to_rgb(h, s, v): """ Convert an HSV tuple to an RGB hex string. h -- hue (0-360) s -- saturation (0-100) v -- value (0-255) Returns a hex RGB string, i.e. #123456. """ r, g, b = tuple(int(i * 255) for i in colorsys.hsv_to_rgb(h / 360, s / 100, v / 255)) return '#{:02X}{:02X}{:02X}'.format(r, g, b)
48bb7e31f16b6c435094aa990bb4d02f06cc37f7
696,673
import requests def _request_limit_reached(exception): """ Checks if exception was raised because of too many executed requests. (This is a temporal solution and will be changed in later package versions.) :param exception: Exception raised during download :type exception: Exception :return: `True` if exception is caused because too many requests were executed at once and `False` otherwise :rtype: bool """ return isinstance(exception, requests.HTTPError) and \ exception.response.status_code == requests.status_codes.codes.TOO_MANY_REQUESTS
67280ea48cce3238d0c574ec8ad1b13719df4990
25,197
def CreateHyperspectralImageRectangular(embedding, array_size, coordinates, scale=True): """Fill a hyperspectral image from n-dimensional embedding of high-dimensional imaging data by rescaling each channel from 0-1 (optional). All coordinates in the image not listed in coordinates object will be masked and set to 0 (background). This function assumes that the data you want to reconstruct can be automatically reshaped into a rectangular array. Parameters ---------- embedding: Pandas DataFrame Indicates embedding coordinates from UMAP or another method. array_size: tuple Indicates size of image. coordinates: 1-indexed list of tuples Indicates pixel coordinates of image. scale: Bool (Default: True) Rescale pixel intensities on the range of 0-1. Returns ------- im: array Reconstructed image. """ # get the embedding shape number_channels = embedding.shape[1] # Create zeros array to fill with number channels equal to embedding dimension im = embedding.values.reshape((array_size[0], array_size[1], number_channels)) # Check to see if scaling the pixel values 0 to 1 if scale: # Scale the data 0-1 for hyperspectral image construction for dim in range(im.shape[2]): # min-max scaler im[:, :, dim] = (im[:, :, dim] - im[:, :, dim].min()) / ( im[:, :, dim].max() - im[:, :, dim].min() ) # Return the hyperspectral image return im
3760c420a512859efd60c0e34c204a4dae3f0cb6
451,680
import re def _compute_replacement(dependency_version_map, androidx_repository_url, line): """Computes output line for build.gradle from build.gradle.template line. Replaces {{android_repository_url}}, {{androidx_dependency_version}} and {{version_overrides}}. Args: dependency_version_map: An "dependency_group:dependency_name"->dependency_version mapping. androidx_repository_url: URL of the maven repository. line: Input line from the build.gradle.template. """ line = line.replace('{{androidx_repository_url}}', androidx_repository_url) if line.strip() == '{{version_overrides}}': lines = ['versionOverrideMap = [:]'] for dependency, version in dependency_version_map.items(): lines.append(f"versionOverrideMap['{dependency}'] = '{version}'") return '\n'.join(lines) match = re.search(r'\'(\S+):{{androidx_dependency_version}}\'', line) if not match: return line dependency = match.group(1) version = dependency_version_map.get(dependency) if not version: raise Exception(f'Version for {dependency} not found.') return line.replace('{{androidx_dependency_version}}', version)
1c5031e3b0ab483bf8cf06661cab1170fc0bf2c9
402,524
def get_unique_ops_names(all_ops): """ Find unique op names. Params: all_ops: list, of dictionary of all operations. Return: list of unique op names. """ return set(op['name'] for op in all_ops)
c731ace82342b0dd5c1fb71be5221d7845ff1657
374,878