content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def fmt_price(amt): """ Format price as string with 2 decimals """ return '{:,.2f}'.format(amt)
03c36c1d297f26d173bb5d325e9be45c406ac8c7
677,659
def get_surface_specs(surface): """ Get the OpenAPI specifications of a SED surface Args: surface (:obj:`Surface`): surface Returns: :obj:`dict` with schema `SedSurface` """ specs = { '_type': 'SedSurface', 'id': surface.id, 'xDataGenerator': surface.x_data_generator.id, 'yDataGenerator': surface.y_data_generator.id, 'zDataGenerator': surface.z_data_generator.id, } if surface.name: specs['name'] = surface.name return specs
c742d1a64868cd3e9a5a38487fa45aef710fc14d
231,216
from typing import Iterable from typing import Any def null(it: Iterable[Any]) -> bool: """ Return empty or boolean value of iterable object Args: it: Iterable object Examples: >>> fpsm.null([]) True >>> fpsm.null(range(100)) False """ return not list(it)
c4fdf053bbc0ea37e64f895af577608161a8ef5c
91,578
def describe_list_indices(full_list): """ Describe the indices of the given list. Parameters ---------- full_list : list The list of items to order. Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list. """ unique_elements = [] element_indices = {} for i in range(len(full_list)): item = full_list[i] # new item if item not in unique_elements: unique_elements.append(item) element_indices[item] = [i] # previously seen item else: element_indices[item].append(i) return unique_elements, element_indices
664bcfd63dd0d5d5114ce24a4c7b2850b61364c5
12,447
def arrowheadCoordsForSide(s): """ Upwards pointing equilateral triangle with equal sides s origin = 0,0 """ factor = 3**0.5/2 coords = ( ( # path (0,0), (s/2,factor*s/4), (s,0), (s/2,factor*s), ), ) return coords
7dd7cb20f76d374c73bcebb69c7bfae09ea6cc86
320,680
from typing import OrderedDict def map_layers(weight): """ Since the pre-trained weights provided for bls17 by us were trained with different layer names, we map the layer names in the state dictionaries to the new names using the following function map_layers(). """ return OrderedDict([(k.replace('z', 'w'), v) if 'z' in k else (k, v) for k, v in weight.items()])
21f7e0a9a9447dcffda0c2f60adedc41bc7a17a0
381,540
def functional_border_sizes(border_size): """Calculate border sizing used in process to gen user specified border size If border_size is negative then a stand-in border size is used to allow better keypoint tracking (i think... ?); negative border is then applied at end. :param border_size: user supplied border size :return: (border_size, neg_border_size) tuple of functional border sizes >>> functional_border_sizes(100) (100, 0) >>> functional_border_sizes(-10) (100, 110) """ # 用户提供的边框宽度是负数 if border_size < 0: # neg_border_size等于100加上负数的绝对值 neg_border_size = 100 + abs(border_size) # border_size为100 border_size = 100 # 是非负数 else: neg_border_size = 0 return border_size, neg_border_size
3ffd428aed3680c736192ef4568eea8916995ea7
103,554
def upper_bound(arr, value, first, last): """Find the upper bound of the value in the array upper bound: the first element in arr that is larger than value Args: arr : input array value : target value first : starting point of the search, inclusive last : ending point of the search, exclusive Return index : integer if index == last => upper bound does not exist else => arr[index] > value """ while first < last: mid = first + (last - first) // 2 if arr[mid] <= value: first = mid + 1 else: last = mid return first
fb66b3d5deedc67156843f18e5b7b60ed58b617e
621,078
def wants_metadata(request): """ Determine if a client request wants metadata to be included in the response. This method checks to see if an incoming request has an Accept header with the 'metadata=true' qualified attached to the MIME-type. :param request: HTTPServerRequest or equivalent from client :return boolean, True if metadata is requested. """ wants_metadata = False if "Accept" in request.headers: accept_elems = request.headers["Accept"].split(';') if len(accept_elems) > 1: for elem in accept_elems[1:]: if '=' in elem: elem = elem.split('=') if elem[0].strip() == "metadata": wants_metadata = str(elem[1]).strip().lower() == 'true' return wants_metadata
b6ee44ff4ff07ee3a5bdd881e5bb1ec62ef95481
579,889
def kml_start(params): """Define basic kml header string""" kmlstart = ''' <Document> <name>%s</name> <open>1</open> <description>%s</description> ''' return kmlstart % (params[0], params[1])
c2fa4c1eeff086dfc3baa41ecd067634920b25b1
707,975
def get_max(arr: list) -> int: """Returns the maximum value in an list Args: arr (list): the list to find the value in Returns: int: the value itself """ max_value = arr[0] for value in arr: if value > max_value: max_value = value return max_value
ddd007e509f79bcdd155906220413dec9d81dd21
661,971
def build_features_ba_less_pa_over_cl(df): """ This function takes the wrangled data DataFrame as its argument, calculates the ratio of ((bill amount−pay amount)/credit limit) for each of the six pairs of BILL_AMT and PAY_AMT features, and returns a new DataFrame that includes the six new ((bill amount−pay amount)/credit limit) features, in addition to the original wrangled data DataFrame. """ bill_amount_column_list = ['BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6'] pay_amount_column_list = ['PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6'] for i, (ba, pa) in enumerate(zip(bill_amount_column_list, pay_amount_column_list), 1): new_column_name = 'ba_less_pa_over_cl_' + str(i) df[new_column_name] = \ (df[ba] -df[pa])/ df['LIMIT_BAL'] return df
ee00cca2c41061a0726c0f3534a5e2100e0d2e74
346,460
def to_int(s, fallback=0): """Try to cast an int to a string. If you can't, return the fallback value""" try: result = int(s) except ValueError: # logging.warning("Couldn't cast %s to int" % s) result = fallback except TypeError: # logging.warning("Couldn't cast %s to int" % s) result = fallback return result
c1fcdf6b8a9140139e9e28336a827d736a561d88
626,527
def endpointId_to_entityId(endpointId: str) -> str: """Use for converting Alexa endpoint to HA entity id.""" return endpointId.replace("_", ".", 1)
2290f9c58488172d34a49d1b2756f4bd60c73915
100,853
def update_dict(origin, extras): """Update the content of ORIGIN with the content of EXTRAS :param origin: the dictionary to update :type origin: dict :param extras: the dictionary to update ORIGIN with :type extras: dict :rtype: dict """ final = origin.copy() final.update(extras) return final
92905992dcac3e23e5f93863de2e78508abc3f0c
412,149
def formatCoreEvent(eventComp): """Formats Core event components for use as "vmkperf start [event]".""" eventN = eventComp[1] # Place in which the event was processed name = eventComp[3] # Name of the event eventSel = eventComp[4] # eventSel of the event unitMask = eventComp[5] # unitMask of the event modifiers = eventComp[6] # modifiers of the event cmask = eventComp[7] # cmask of the event # Determines which event components were provided with the event, and # constructs the event string accordingly. event = eventN if eventSel is None and unitMask is None: event = name if eventSel is not None: event = '%s -e%s' % (event, eventSel) if unitMask is not None: event = '%s -u%s' % (event, unitMask) if modifiers is not None: event = '%s -d%s' % (event, modifiers) if cmask is not None: event = '%s -c%s' % (event, cmask) return event
db5c7cedb561205069d9b44a7a745e15f0f470bb
213,285
def dict_contain(dict_a, dict_b): """Test if all the key:value pairs of dict_b are in dict_a. Arguments: dict_a -- The dictionary dict_b -- The sub-dictionary Return: True if all the key:value pairs of dict_b are in dict_a, False otherwise """ if len(dict_b) > len(dict_a): return False for k,v in dict_b.iteritems(): if not (k in dict_a and dict_a[k] == v): return False return True
6f44f1bb80c641362bca52fed3fe343a5d9bcdfc
623,845
import torch def decorate_batch(batch, device='cpu'): """Decorate the input batch with a proper device Parameters ---------- batch : {[torch.Tensor | list | dict]} The input batch, where the list or dict can contain non-tensor objects device: str, optional 'cpu' or 'cuda' Raises: ---------- Exception: Unsupported data type Return ---------- torch.Tensor | list | dict Maintain the same structure as the input batch, but with tensors moved to a proper device. """ if isinstance(batch, torch.Tensor): batch = batch.to(device) return batch elif isinstance(batch, dict): for key, value in batch.items(): if isinstance(value, torch.Tensor): batch[key] = value.to(device) elif isinstance(value, dict) or isinstance(value, list): batch[key] = decorate_batch(value, device) # retain other value types in the batch dict return batch elif isinstance(batch, list): new_batch = [] for value in batch: if isinstance(value, torch.Tensor): new_batch.append(value.to(device)) elif isinstance(value, dict) or isinstance(value, list): new_batch.append(decorate_batch(value, device)) else: # retain other value types in the batch list new_batch.append(value) return new_batch else: raise Exception('Unsupported batch type {}'.format(type(batch)))
a0bd4a5dff0b5cf6e304aede678c5d56cb93d1dd
6,286
def select_3_with_duplicates(items): """ Generate selections of 3 items allowing duplicates.""" results = [] for i in range(len(items)): for j in range(i, len(items)): for k in range(j, len(items)): results.append(items[i] + items[j] + items[k]) return results
40535727b33cf7b4595368281603b847781366d7
227,259
def snap_key_to_snapnum(snap_key): """ Given the name of a snapshot key, finds the associated snapshot number. This is necessary because the 0th snapshot key may not be snapshot 000 and there could be missing snapshots. This function searches backwards for a group of digits that identify the snapshot number. If there are numbers outside of this cluster they will be disregarded and a warning raised. For example, if the key is "Snap1_030", the function will return 30 and issue a warning that there were digits ignored. Parameters ---------- snap_key: String. The name of the snapshot key. Returns ---------- snapnum: Integer. The snapshot number that corresponds to the snapshot key. Examples ---------- >>> snap_key_to_snapnum('Snap_018') 18 >>> snap_key_to_snapnum('018_Snap') 18 >>> snap_key_to_snapnum('Sn3p_018') --WARNING-- For Snapshot key 'Sn3p_018' there were numbers that were not \ clustered together at the end of the key. We assume the snapshot number corresponding to this key is 18; \ please check that this is correct. 18 """ snapnum = "" reached_numbers = None for letter in reversed(snap_key): # Go backwards through the key. if letter.isdigit(): if reached_numbers == False and len(snapnum): print("--WARNING--") print("For Snapshot key '{0}' there were numbers that were not" " clustered together at the end of the key.\nWe assume " "the snapshot number corresponding to this key is {1}; " "please check that this is correct." .format(snap_key, int(snapnum[::-1]))) break # When a number is found, we concatenate it with the others and # flag that we have encountered a cluster of numbers. snapnum = "{0}{1}".format(snapnum, letter) reached_numbers = True else: # When we reach something that's not a number, turn flag off. reached_numbers = False snapnum = snapnum[::-1] # We searched backwards so flip the string around. return int(snapnum)
9b30f0efc978e50beaf50427eb4aac35dcb1f964
632,335
def get_one_mask_layer(mask_layers, layer_name): """ Returns the layer definitions that need to be suppressed. Args: mask_layers (list): The layers that need to be suppressed. layer_name (str): The name of target layer. Returns: Union[MaskLayerDes, None], the layer definitions that need to be suppressed. """ for each_mask_layer in mask_layers: if each_mask_layer.layer_name in layer_name and not each_mask_layer.inited: return each_mask_layer return None
25621b95601a9ceee68397146fc6902a25b562c1
292,553
def create_subtoken_map(tokens_len, indices): """ Creates subtoken map by grouping specified indices. """ if len(indices) == 0: return list(range(tokens_len)) # No entities, nothing to do k = 0 # Tracks new tokens with named entities grouped j = 0 # Tracks which named entity we are processing subtoken_map = {} sorted_indices = sorted(indices, key=lambda x: x[0]) curr_start, curr_end = sorted_indices[j] for i in range(tokens_len): if i > curr_end and j < len(sorted_indices) - 1: # Move up to next named entity j += 1 curr_start, curr_end = sorted_indices[j] if curr_start <= i < curr_end: subtoken_map[i] = k else: subtoken_map[i] = k k += 1 return list(subtoken_map.values())
b158cbb4ad0cdb7907693bebb4590e88b50e7fd2
325,916
import base64 def encode_message(string: str) -> str: """ Encode string in base64. Args: string (str): String to decode. Returns: str: Encoded string. """ message_bytes = string.encode('utf-8') return base64.b64encode(message_bytes).decode("utf-8")
ff6869b0a6c4781a1c5e53116f771c919d4e1784
293,698
def is_aws_managed_policy(policy_arn): """ Look for only the AWS Managed Policies (not policies you created) by looking for "iam::aws" in the ARN Got this trick from: https://gist.github.com/0xdabbad00/4ed4a7a56bbb93d70505a709de227414#file-grab-sh-L21 """ if 'iam::aws' in policy_arn: return True else: return False
825a44f879adfe50a1d509d8e0b340277216ac0b
557,008
import math def write_troe_dt(lang, rxn, beta_0minf, E_0minf, k0kinf): """Writes section of line for temperature partial derivative of Troe falloff. Parameters ---------- lang : str Programming language, {'c', 'cuda'} rxn : `ReacInfo` Reaction of interest; pressure dependence expressed with Troe falloff. beta_0minf : float Low-pressure limit temperature exponent minus high-pressure value. E_0minf : float Low-pressure limit activation energy minus high-pressure value. k0kinf : float Low-pressure limit reaction coefficient divided by high-pressure value. Returns ------- jline : str Line fragment with Troe temperature derivative """ jline = (' + (((1.0 / ' '(Fcent * (1.0 + A * A / (B * B)))) - ' 'lnF_AB * (' '-{:.16e}'.format(0.67 / math.log(10.0)) + ' * B + ' '{:.16e} * '.format(1.1762 / math.log(10.0)) + 'A) / Fcent)' ' * ({:.16e}'.format(-(1.0 - rxn.troe_par[0]) / rxn.troe_par[1]) + ' * exp(T / ' '{:.16e}) - '.format(-rxn.troe_par[1]) + '{:.16e} * '.format(rxn.troe_par[0] / rxn.troe_par[2]) + 'exp(T / ' '{:.16e})'.format(-rxn.troe_par[2]) ) if len(rxn.troe_par) == 4 and rxn.troe_par[3] != 0.0: jline += (' + ({:.16e} / '.format(rxn.troe_par[3]) + '(T * T)) * exp(' '{:.16e} / T)'.format(-rxn.troe_par[3]) ) jline += '))' jline += (' - lnF_AB * (' '{:.16e}'.format(1.0 / math.log(10.0)) + ' * B + ' '{:.16e}'.format(0.14 / math.log(10.0)) + ' * A) * ' '({:.16e} + ('.format(beta_0minf) + '{:.16e} / T) - 1.0) / T'.format(E_0minf) ) return jline
7ddc29aaa5223a27ba9f29b591d4674d9f93b38b
220,382
def get_egress_lossless_buffer_size(host_ans): """ Get egress lossless buffer size of a switch Args: host_ans: Ansible host instance of the device Returns: total switch buffer size in byte (int) """ config_facts = host_ans.config_facts(host=host_ans.hostname, source="running")['ansible_facts'] if "BUFFER_POOL" not in config_facts.keys(): return None buffer_pools = config_facts['BUFFER_POOL'] profile_name = 'egress_lossless_pool' if profile_name not in buffer_pools.keys(): return None egress_lossless_pool = buffer_pools[profile_name] return int(egress_lossless_pool['size'])
a2bd17d4d8b522f80e4f3749feb29aa63fd80184
674,017
def getGussVariables(project_vars: dict) -> tuple: """Collects from project_variables.csv the parameters related to the activation of GUSS tool. Args: project_vars (dict): project variables collected from project_variables.csv. Raises: Exception: GUSS should be "yes" or "no" in project_variables.csv Exception: GUSS_parallel should be "yes" or "no" in project_variables.csv Exception: GUSS_parallel_threads must be an integer in project_variables.csv Returns: tuple: 3-element tuple containing - **guss** (*bool*): activation of GUSS tool. - **guss_parallel** (*bool*): run GUSS tool in parallel. - **guss_parallel_threads** (*int*): number CPUs used to run GUSS tool in parallel. """ if project_vars["GUSS"].lower() in ["yes", "no"]: guss = True if project_vars["GUSS"].lower() == "yes" else False else: raise Exception('GUSS should be "yes" or "no"') if project_vars["GUSS_parallel"].lower() in ["yes", "no"]: guss_parallel = ( True if project_vars["GUSS_parallel"].lower() == "yes" else False ) else: raise Exception('GUSS_parallel should be "yes" or "no"') if isinstance(project_vars["GUSS_parallel_threads"], int): guss_parallel_threads = project_vars["GUSS_parallel_threads"] elif project_vars["GUSS_parallel_threads"].isdigit(): guss_parallel_threads = int(project_vars["GUSS_parallel_threads"]) else: raise Exception("GUSS_parallel_threads must be an integer") return guss, guss_parallel, guss_parallel_threads
04a6f39aa0f5ce9f6325ccd37dbb5d2bf358bc8f
430,985
import math def _rad2deg(value): """ Convert radian to decimal degrees """ return math.degrees(value)
90a9b4339125807853b4c0ad23a45f86195ec64c
533,825
def private_byte_prefix(is_test): """WIF prefix. Returns b'\x80' for main network and b'\xef' for testnet""" return b'\xef' if is_test else b'\x80'
7f719715e9905726cc470443e90a85a60fec7dda
281,558
import six def to_unicode(s): """Return the object as unicode (only matters for Python 2.x). If s is already Unicode, return s as is. Otherwise, assume that s is UTF-8 encoded, and convert to Unicode. :param (basestring) s: a str, unicode or other basestring object :return (unicode): the object as unicode """ if not isinstance(s, six.string_types): raise ValueError("{} must be str or unicode.".format(s)) if not isinstance(s, six.text_type): s = six.text_type(s, 'utf-8') return s
a3689042dbdc1f6873f11c6efce2c5620a137a29
563,016
def rel_err(v_theo:float, v_exp:float) -> float: """Computes the relative error between v_theo and v_exp. Args: v_theo (float): Theoretical value. v_exp (float): Experimental value. Returns: float: Relative error between the values """ return abs(v_theo - v_exp) / v_theo * 100
0348d49115ad105853f70e3c22d2fcf300cb21e2
181,919
def bin_to_gray(bin_list): """ Convert from binary coding to gray coding. We assume big endian encoding. Examples ======== >>> bin_to_gray('111') '100' See Also ======== gray_to_bin """ b = [bin_list[0]] for i in range(len(bin_list) - 1): b += str(int(bin_list[i]) ^ int(b[i - 1])) return ''.join(b)
7ad89f7f58db25ac44420b8d8e9539cce2752ef9
587,858
def _compressKerningPhase2(kerning): """ >>> kerning = { ... ("A", 100) : set(["A", "Aacute"]), ... ("Aacute", 100) : set(["A", "Aacute"]), ... ("A", 200) : set(["Agrave"]), ... ("Agrave", 200) : set(["A"]), ... ("A", 300) : set(["Adieresis"]), ... } >>> expected = { ... (("A", "Aacute"), 100) : set(["A", "Aacute"]), ... (tuple(["Agrave"]), 200) : set(["A"]), ... (tuple(["A"]), 200) : set(["Agrave"]), ... (tuple(["Adieresis"]), 300) : set(["A"]), ... } >>> result = _compressKerningPhase2(kerning) >>> result == expected True """ # create a dict of form {(glyph2s, value) : set(glyph1s)} compressed = {} for (glyph1, value), glyph2List in kerning.items(): k = (tuple(sorted(glyph2List)), value) if k not in compressed: compressed[k] = set() compressed[k].add(glyph1) return compressed
585a0db38023150eba78189900a237d2e86bed75
419,216
def as_bare_id(string): """ Return an id stripped from its leading checksum algorithm prefix if present. """ if not string: return string if string.startswith('sha256:'): _, _, string = string.partition('sha256:') return string
79680bacf5b67e4cf5883d06d599552ec264f88d
326,256
def comp_normalize(comp_dict, target=1.0): """Normalize gas composition saved in a dictionary. Parameters ---------- comp_dict : dict Gas composition in the measurement volume stored in a dictionary. target : float, optional Normalization factor, by default 1.0. Returns ------- dict Normalized gas composition stored in a dictionary. """ raw = sum(comp_dict.values()) factor = target/raw return {key: value*factor for key, value in comp_dict.items()}
a2954e46f880db1284a67e927782022bc292b85e
448,267
def create_bm_dictionary(name, federate_count, core_type, real_time, cpu_time, threads): """This function creates a dictionary for a single benchmark run. Args: name (str) - The name of the benchmark, e.g. BMecho_singleCore federate_count (int) - The number of federates. core_type (str) - The name of the core type. real_time (float) - The human-interpreted time it takes to execute this script. cpu_time (float) - The time it takes a CPU to execute this script. threads (int) - The number of threads. Returns: bm_dict (dict) - A dictionary of the benchmark results. """ if name == "BMecho_singleCore": bm_dict = { "name": "{}/{}/iterations:1/real_time".format(name, federate_count), "run_name": "{}/{}/iterations:1/real_time".format(name, federate_count), "run_type": "iteration", "repetitions": 1, "repetitions_index": 1, "threads": threads, "iterations": 1, "real_time": real_time, "cpu_time": cpu_time, "time_unit": "s", } else: bm_dict = { "name": "{}/{}Core/{}/real_time".format(name, core_type, federate_count), "run_name": "{}/{}Core/{}/real_time".format(name, core_type, federate_count), "run_type": "iteration", "repetitions": 1, "repetitions_index": 1, "threads": threads, "iterations": 1, "real_time": real_time, "cpu_time": cpu_time, "time_unit": "s", } return bm_dict
afacd2cce6695ca56a2f6346b72bb405b5cbcb7f
251,387
from typing import List def remove_existing_nodes_from_new_node_list(new_nodes, current_nodes) -> List[str]: """Return a list of nodes minus the nodes (and masters) already in the inventory (groups 'nodes' and 'masters').""" return [node for node in new_nodes if node not in current_nodes]
e87e29ee0e9592922e568a1eec1f63d5065b7bc5
654,323
import re def extract_year(string: str) -> int: """ Extracts a four-digit valid year (1900-2099) from a string. If there are multiple four-digit valid years on the string, the first occurrence is returned. Parameters ---------- string String to extract the year from. Returns ------- int Four-digit integer representing the year. """ expr = r"(?:19|20)\d{2}" matches = re.findall(expr, string) if matches: year = matches[0] else: raise Exception("The string does not have any valid year.") return int(year)
beb3c03bdc1405f8bca959ebd357cadacd0548e8
202,202
def is_very_long(password): """Return True if password is very long.""" return len(password) >= 13
7be381aa079c5b70a2fce82143c70b6242a6c5ec
75,447
import codecs def bytes_to_str(b): """ Converts bytes into a hex-encoded string. Args: b (bytes): bytes to encode Returns: h (str): hex-encoded string corresponding to b. """ return codecs.encode(b, 'hex_codec').decode('ascii')
d58f3ab0387aa5567f3033b1de231d1376e216ce
213,316
def reversed_hit(locus_list, decoy_string): """Checks if any proteins are reversed (decoy) entries. """ rev = False for loci in locus_list: if decoy_string in loci.ID: rev = True return rev
dd14dc02e4dcd070a4aebfc5a2d9aa59d7785bd7
196,075
def calculate_batch(batch_size, length): """ Calculate the batch size for the data of given length. Parameters ---------- batch_size : int, float, default=None Batch size for training. Must be one of: - int : Use `batch_size`. - float : Use `batch_size * n_samples`. - None : Use `n_samples`. length : int Length of the data to be batched. Returns ------- batch : int Actual batch size. """ if batch_size is None : return length elif isinstance(batch_size, int) and batch_size > 0 and \ batch_size <= length: return batch_size elif isinstance(batch_size, float) and 0 < batch_size <= 1: return int(batch_size * length) else: raise ValueError("Batch size must be None, an int less than %d," % length, "or a float within (0,1]")
5615c6973cff66b3ca35ababf3584075bef1db1a
167,137
def DropWikiSuffix(wiki_filename): """Removes the .wiki suffix (if any) from the given filename.""" return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki') else wiki_filename)
96ec5a3c81ee31c5a6c7f049ec83b5e3c9d4f1b8
578,667
def gcd_steps(a, b): """ Return the number of steps needed to calculate GCD(a, b).""" # GCD(a, b) = GCD(b, a mod b). steps = 0 while b != 0: steps += 1 # Calculate the remainder. remainder = a % b # Calculate GCD(b, remainder). a = b b = remainder # GCD(a, 0) is a. #return a return steps
768d52c795c8c8eb20f4adfaf26f10da12962534
45,827
import decimal def round_half_up(money): """ Explicitly round a decimal to 2 places half up, as should be used for money. >>> exponent = decimal.Decimal('0.01') >>> should_not_be_one = decimal.Decimal('1.005') >>> should_not_be_one.quantize(exponent) Decimal('1.00') >>> round_half_up(should_not_be_one) Decimal('1.01') """ return money.quantize(decimal.Decimal('0.01'), decimal.ROUND_HALF_UP)
e87199531ae45be4a17359751c68a5a15a8edecd
122,623
def extract_teams(summary): """ split the summary to get team names: teamA - team B return team names if found, (None, None) otherwise """ if summary.startswith(( "AS SAINT-ÉTIENNE", "AS SAINT-?TIENNE", "PARIS SAINT-GERMAIN" )): a1, a2, b = summary.split('-', 2) return "%s-%s" % (a1, a2), b return summary.split('-', 1)
a0dfb0612c18479570806dcd04f9d5a9f263bfb4
291,866
def get_processable_layers(layers): """ Returns computable layers from a list of layers, along with their types. We check that by searching specific keywords in layer's name, since we can't be sure about layer's type. If you wish to extend the project and add compatibility for more layers, you should as well update this function. """ processable_layers = [] for layer in layers: layer_name = layer.name.lower() if "conv" in layer_name: processable_layers.append([layer_name, "CONVOLUTIONAL"]) elif "primary" in layer_name and "caps" in layer_name: processable_layers.append([layer_name, "PRIMARY_CAPS"]) elif "caps" in layer_name: processable_layers.append([layer_name, "DENSE_CAPS"]) elif "mask" in layer_name: processable_layers.append([layer_name, "MASK"]) return processable_layers
ac77e821afc58b0d3bdbb0a23035b2ca48eb3fb0
91,990
def lremove(s, prefix): """Remove prefix from string s""" return s[len(prefix):] if s.startswith(prefix) else s
b19235b643c3c0074e08f2b939353f05b6d0f630
683,303
def add_to_dict_of_levels(dict_levels, c, cur_level, index): """ Function returns the dict of positions according to the level of space or comma Example: {'1_,': [14], '2_,': [9], '0_ ': [3], '1_ ': [12]} comma of level 1: position 14 comma of level 2: position 9 space of level 0: position 3 space of level 1: position 12 """ symbol = str(cur_level) + '_' + c if symbol not in dict_levels: dict_levels[symbol] = [] dict_levels[symbol].append(index) return dict_levels
26f6b142a05ed1eedd3508413c1f48a423c5b22a
209,233
def get_sorted_exons(session, transcript, protein_coding=False): """ Retrieve all Exons associated with input Transcript and return in sorted order. Option to filter on protein coding Exons only Parameters session: A session connected to the DB where the Exons and Transcripts live transcript (Transcript): Transcript for which the Exons will be retrieved protein_coding (Bool): If True, only protein coding Exons will be returned Returns List of (filtered) Exons, sorted by order of appearance in the protein, so for Transcripts from the reverse strand, order will be descending. """ exons = [] for exon in transcript.exons: if protein_coding and not exon.cds: continue exons.append(exon) if transcript.gene.strand == "fw": return list(sorted(exons, key=lambda x : x.begin)) elif transcript.gene.strand == "rv": return list(sorted(exons, key=lambda x : x.begin, reverse=True))
3f99f20a3bf2c1f956d4fb3ca361721b877c9b6d
237,604
def serialize_profile(user_profile): """ Serializes an user profile object. :param user_profile: user profile object :return: dictionary with the user profile info """ return { 'bio': user_profile.bio, 'description': user_profile.description, 'resume': user_profile.resume, 'full_name': user_profile.full_name, 'mail': user_profile.mail, 'birth_date': user_profile.birth_date.strftime("%d-%m-%Y"), 'avatar': user_profile.avatar, }
4eb1f88c197117c9dd31ab3090d541c0ae7b65bb
20,941
def removeSpaces(string): """Returns a new string with spaces removed from the original string >>> string = '1 173' >>> removeSpaces(string) '1173' """ return ''.join([char for char in string if char != ' '])
ce00687c43ce521c14b578105bd9412c31b9817a
28,019
from pathlib import Path import json def get_secrets_list(env: str) -> dict[str, set]: """Create a list of available app secrets, without their values.""" path = (Path() / "configuration").resolve() default_file = json.loads((path / "default.json").read_text()) env_file = json.loads((path / f"{env}.json").read_text()) config_keys: set[str] = set() config_keys.update(default_file["secrets"], env_file["secrets"]) return {"AVAILABLE_SECRETS": config_keys}
f31905458529d1dae8ad6da7a4a60acc8aa9f3d8
647,170
def get_edge_name(edge): """Separates the edge name from its abbreviation""" # the true edge name is everything before the final '_' character # so if we have PROCESS_OF_PpoP, we still want to keep 'PROCESS_OF' with the underscores intact. return '_'.join(edge.split('_')[:-1])
2c7f569877857a81debd0d1237f1957b0f58091a
207,952
from typing import List import math def _percentile_index(histogram: List[int], select_percentile: float = 0.95) -> int: """ Given an array of elements where each ith element refer to ith frequency, finds the index of the percentile indicated by percentile. :param histogram: Array of frequencies of integers. Value of ith element should refer to the frequency of i. :param select_percentile: Percentile to use for selecting the number of seeds. For example, 0.5 will take the median number of seeds. :return: The median value of the values. """ curr_total = 0 median_pos = math.ceil(sum(histogram) * select_percentile) for i in range(len(histogram)): if curr_total + histogram[i] >= median_pos: return i curr_total += histogram[i] return len(histogram)
89eabb7af8c216fe69b5c7b3f04b34cf0d749915
579,368
def tree_to_list(tree): """ Method joins tree leaves and label in one list. Parameters ---------- tree : tree nltk tree instance Returns ------- tree_list : list tree represented as list with its label """ return [" ".join(tree.leaves()), tree.label()]
f22948e018ed3b5bb8a5c89f014c2becd60000e3
193,119
from typing import List from typing import Tuple import copy def convert_scene_for_placing(opt, scene: List) -> Tuple: """Converts the scene into a modified scene for placing, with the following steps: 1. Denote the (x, y) location of object index 0 as the placing destination (x, y). 2. Remove object index 0 from the scene list. Args: scene: The original scene. Returns: new_scene: The scene modified for placing. place_dst_xy: The (x, y) placing destination for the placing task. """ # Make sure that removing the destination object from the scene won't modify the # source object index. assert opt.scene_place_src_idx < opt.scene_place_dst_idx # Remove the destination object from the scene. new_scene = copy.deepcopy( scene[: opt.scene_place_dst_idx] + scene[opt.scene_place_dst_idx + 1 :] ) # Use the location of the destination object as the (x, y) placing destination. place_dst_xy = scene[opt.scene_place_dst_idx]["position"][:2] # Construct an imaginary object with the same shape attribute as the source object # to visualize the placing destination. place_dest_object = copy.deepcopy(scene[opt.scene_place_src_idx]) place_dest_object["position"] = place_dst_xy + [0.0] place_dest_object["height"] = 0.005 return new_scene, place_dst_xy, place_dest_object
9fab3940ec215dc0b70720b3a3ccb6e588be80d5
467,926
import click def demander_mouvement(mvmts_possibles: str): """ Fonction qui demande au joueur le mouvement qu'il souhaite effectuer Args: mvmts_possibles (str): Mouvements que le joueur peut faire Retourne: str: Mouvement choisi par le joueur H : Aller vers le Haut B : Aller vers le Bas G : Aller à Gauche D : Aller à Droite """ mouvement = "" mouvements_to_fr = {"H":'Monter', "B":"Descendre", "G":"Aller à gauche", "D":"Aller à droite"} while mouvement == "" or mouvement not in "HBGD": print('Tu peux :') for mvmt in mvmts_possibles: print(f"- {mouvements_to_fr[mvmt]}") click.echo('Où aller ? [ZQSD / Flèches] ', nl=False) c = click.getchar() click.echo() if (c == '\xe0H' or c.upper() == "Z") and "H" in mvmts_possibles: mouvement = "H" elif (c == '\xe0K' or c.upper() == "Q") and "G" in mvmts_possibles: mouvement = "G" elif (c == '\xe0P' or c.upper() == "S") and "B" in mvmts_possibles: mouvement = "B" elif (c == '\xe0M' or c.upper() == "D") and "D" in mvmts_possibles: mouvement = "D" else: click.echo('Entrée invalide ou mouvement impossible :(') return mouvement
40f06c023e6792bf1b752c2181fcd87a3a0b722f
659,711
import locale def sort_func(row1, row2): """Compare two rows to determine which should be first. Sort them alphabetically by podcast title. Parameters ---------- row1 : Gtk.ListBoxRow The first row row2 : Gtk.ListBoxRow The second row Returns ------- int -1 if row1 should be before row2, 0 if they are equal, 1 otherwise """ if row1.podcast.title is None: return 1 elif row2.podcast.title is None: return -1 else: return locale.strcoll(row1.podcast.title, row2.podcast.title)
450ef53d8f9e89315e23ed428e6ec03126469075
521,282
import re from typing import List def parse_input(s) -> List[tuple]: """ Parses the search input query and creates a data structure: ( column_to_search_in, string_to_search, should we filter filter for quoted string) :param s: input string to parse :return: the list of the search terms to perform and where """ # implementation for 'AND' combined_queries = s.split(' AND ') queries_to_perform = [] # find content operators (e.g. "title:") regex = r"([a-z]+):([a-zA-Z0-9 _]+( |$))" for query in combined_queries: matches = re.finditer(regex, query, re.MULTILINE) for match in matches: query = list(match.groups()) # match 0 is the column # match 1 is the string to query queries_to_perform.append((query[0], query[1], False)) # assumption: quoted queries are not combined with search operators if not queries_to_perform: if s.startswith('"') and s.endswith('"'): s.replace('"', '') # remove quotes queries_to_perform.append(('content', s, True)) else: queries_to_perform.append(('content', s, False)) return queries_to_perform
8b7b388e82d69723875a2281a21e625fce34c063
65,471
from operator import contains def project_contains_keywords(project, keywords): """Returns True if project contains at least one of the keywords.""" # Iterate over all sprites and backdrops, and the project itself. for child in project["children"] + [project]: # Perform a DFS on each script looking for keywords. if any(contains(script, keywords) for script in child.get("scripts", [])): return True return False
00a2749d54a189d5ff4b6bbe2833e97f3ba89aba
657,015
def merge_l_t(l, lt): """ Merge a list of tuples lt, each of three values into three lists l. For example: [('a', 'b', 'c'), ('a', 'd', 'e')] -> [['a', 'a'], ['b', 'd'], ['c', 'e']] """ for t in lt: l[0].append(t[1]) l[1].append(t[2]) l[2].append(t[0]) return l
145c09bfbe1d351e7044a36622414fde610b8c1d
674,917
def _plot_ids(ax, x, y, size, marker='.', color=None, **kwargs): """Plot points, return the used color""" # plot is faster than scatter, go figure... return ax.plot(x, y, color=color, markersize=size, marker=marker, linestyle='None', **kwargs)[0].get_color()
7e4dda2315005f576aff6d2d2746dd4dc3f8f807
71,983
def string_to_int_list(line) -> list[int]: """ Given a line containing integers, return a list of ints. Return list[int]: A list of integers from the string. >>> string_to_int_list('22 13 17 11 0') [22, 13, 17, 11, 0] >>> string_to_int_list('22,13,17,11,0') [22, 13, 17, 11, 0] """ line = line.strip() if ',' in line: return [int(x) for x in line.split(',')] else: return [int(x) for x in line.split()]
0a31a7e3b320effb1851d1f37e9fb640c78d0c72
653,751
def scale_array(arr, s): """Scale an array by s Parameters: 1. array: a numeric array or list 2. s: scaling factor, real number """ return [a*s for a in arr]
3b7cbec86a73c8e5f99f08ce8eb091ce6aecf4ac
688,072
import math def mass_flow(row): """Calculate the mass flow.""" l = row.column_length w = row.column_width a = math.pi / 4 * row.column_width ** 2 p = row.porosity p_vs = row.influent_pore_volumes return l * w * a * p * p_vs
df060bd46934cde1aba7f6cf4e148eac142cabf7
276,869
from pathlib import Path def default_conf(testdatadir): """ Returns validated configuration suitable for most tests """ configuration = { "max_open_trades": 1, "stake_currency": "BTC", "stake_amount": 0.001, "fiat_display_currency": "USD", "ticker_interval": '5m', "dry_run": True, "minimal_roi": { "40": 0.0, "30": 0.01, "20": 0.02, "0": 0.04 }, "stoploss": -0.10, "unfilledtimeout": { "buy": 10, "sell": 30 }, "bid_strategy": { "ask_last_balance": 0.0, "use_order_book": False, "order_book_top": 1, "check_depth_of_market": { "enabled": False, "bids_to_ask_delta": 1 } }, "ask_strategy": { "use_order_book": False, "order_book_min": 1, "order_book_max": 1 }, "exchange": { "name": "bittrex", "enabled": True, "key": "key", "secret": "secret", "pair_whitelist": [ "ETH/BTC", "LTC/BTC", "XRP/BTC", "NEO/BTC" ], "pair_blacklist": [ "DOGE/BTC", "HOT/BTC", ] }, "telegram": { "enabled": True, "token": "token", "chat_id": "0" }, "datadir": str(testdatadir), "initial_state": "running", "db_url": "sqlite://", "user_data_dir": Path("user_data"), "verbosity": 3, } return configuration
0ceb6b8ecc9aca14b122e5ecf8f84da2fe4d9f74
332,095
def flatten_dict_data(data, fun="{}/{}".format): """ Flatten nested dictionary data into one layer dictionary. :return: Dictionary """ if isinstance(data, dict): ret = {} for i in data: tmp = flatten_dict_data(data[i]) if isinstance(tmp, dict): for j in tmp: ret[fun(i, j)] = tmp[j] else: ret[i] = tmp return ret else: return data
edf69fb32375dbaa635049a3f675061408427083
224,761
def app_model_or_none(raw_model): """ Transforms `raw_model` to its application model. Function can handle `None` value. """ return raw_model.get_app_model() if raw_model is not None else None
fdaa53876e4d0eba6260b4d8b5ecf90c5e8a33aa
104,390
import random def text_loader(caption, tokenizer, max_length, training, drop_rate=0, transform_cnt=1): """ args: caption: str tokenizer: str, transformers.AutoTokenizer max_length: int, max num of tokens training: bool drop_rate: float, default 0, rate to use empty caption transform_cnt: int, default 1, >= 1 if self-supervised returns: tokens['input_ids']: tensor, [transform_cnt, max_length] tokens['attention_mask']: tensor, [transform_cnt, max_length], 0-padding """ if training: if caption == '': text_batch = ['' for _ in range(transform_cnt)] else: text_batch = [] for _ in range(transform_cnt): if random.random() < drop_rate: text_batch.append('') else: text_batch.append(caption) else: text_batch = [caption] tokens = tokenizer(text_batch, return_tensors='pt', padding='max_length', truncation=True, max_length=max_length) return tokens['input_ids'], tokens['attention_mask']
7332d0d6c44732ef0a3be577be0b54f201e7ffff
254,669
def _CropAndResizeGradBoxesShape(op): """Shape function for CropAndResizeGradBoxes.""" return [op.inputs[2].get_shape()]
b3f9e2e5649dd819bca515b1431555686ca39f7c
364,737
def convert_weka_to_py_date_pattern(p): """ Converts the date format pattern used by Weka to the date format pattern used by Python's datetime.strftime(). """ # https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior # https://www.cs.waikato.ac.nz/ml/weka/arff.html p = p.replace('yyyy', r'%Y') p = p.replace('MM', r'%m') p = p.replace('dd', r'%d') p = p.replace('HH', r'%H') p = p.replace('mm', r'%M') p = p.replace('ss', r'%S') return p
2028f3c22ee8d34f9c78aa285bf63c4bbdfe5428
242,116
def get_attributes(obj, names): """Return attributes dictionary with keys from `names`. Object is queried for each attribute name, if it doesn't have this attribute, default value None will be returned. >>> class Class: ... pass >>> obj = Class() >>> obj.attr = True >>> obj.value = 13 >>> obj.string = "Hello" >>> d = get_attributes(obj, ['attr', 'string', 'other']) >>> d == {'attr': True, 'string': "Hello", 'other': None} True """ attrs = {} for name in names: attrs[name] = getattr(obj, name, None) return attrs
603a0411e6ac2d602343306234e0a71fbb3b2f9a
624,116
def in_facebook_app(request): """ Determine if the given request points to a page within the Facebook app. """ return request.path.startswith('/fb')
d256953f78ef8dd02f3cf3c3e1f9c006dec94cdb
613,465
def is_hex(color): """ Method to check if a color code is hexadecimal (HEX) This method returns the value True if the input argument corresponds to an hexadecimal color code. Parameters ---------- color: :obj: Color code Returns ------- bool True if the input color code takes the hexadecimal (HEX) form. Examples -------- >>> from uibcdf_stdlib.colors import is_hex >>> is_hex('#008080') True >>> is_hex([0.0, 0.5, 0.5]) False """ output = False if type(color)==str: if (len(color)==6) or (len(color)==7 and color.startswith('#')): output = True return output
8b8dafa1d59ceba27f423a5dc592c9936b85df54
573,391
from datetime import datetime def now(includetime=True): """Get current time in ISO 8601 format in string format Args: includetime(bool): includes current hour and minutes. Default is True. Returns: str: return the current date (and time) in string """ if includetime: return (datetime.now().strftime("%Y-%m-%dT%H:%M")) else: return (datetime.now().strftime("%Y-%m-%d"))
9c43e06d5f20f0dfa9d6c7251dfc0297096c858e
346,427
def _colorize(t): """Convert (r, g, b) triple to "#RRGGBB" string For use with ``visualize(color=...)`` Examples -------- >>> _colorize((255, 255, 255)) '#FFFFFF' >>> _colorize((0, 32, 128)) '#002080' """ t = t[:3] i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t)) h = hex(int(i))[2:].upper() h = "0" * (6 - len(h)) + h return "#" + h
d986e76722113bbd94d4de7195d6d76aecced92e
529,041
def words_before_index(text, idx): """Counts the number of words in the text before idx""" while text[idx] != ' ': idx -= 1 if idx <= 0: return 0 n_words = len(text[:idx].split(' ')) return n_words
d9f36d6d70efc9c0a4699f2c5bb0e39645d1ec34
174,901
import string import random def GenerateRandomString(length=30): """Make a unique string that is difficult to guess.""" valid_chars = ''.join(set(string.ascii_lowercase) - set('aeiou')) valid_chars += valid_chars.upper() + string.digits return ''.join(random.choice(valid_chars) for _ in range(length))
9fb74d32f9f30d198e34e662cdcab1b5003f0453
461,753
def make_type_entity_id(type_id=None, entity_id=None): """ Assemble a type_id and entity_id and return a composite identifier. If the entity Id is blank, ignore the supplied type id >>> make_type_entity_id(type_id="type_id", entity_id="entity_id") == "type_id/entity_id" True >>> make_type_entity_id(type_id="type_id", entity_id="") == "" True """ assert type_id is not None, "make_type_entity_id: no type id (%s, %s)"%(type_id, entity_id) assert entity_id is not None, "make_type_entity_id: no entity id (%s, %s)"%(type_id, entity_id) if entity_id != "": return type_id + "/" + entity_id return ""
8de40843e2bc35431333a2ea21947e5cd91d2db2
23,732
def merge_mappings(target, other, function=lambda x, y: x + y): """ Merge two mappings into a single mapping. The set of keys in both mappings must be equal. """ assert set(target) == set(other), 'keys must match' return {k: function(v, other[k]) for k, v in target.items()}
b2cb4c27c83f00b6d8ce2431c00223bef825c33d
537,579
def make_text(text, i18n_texts=None): """ make text. reference - `Common Message Property <https://developers.worksmobile.com/jp/document/100500801?lang=en>`_ :return: text content. """ if i18n_texts is not None: return {"type": "text", "text": text, "i18nTexts": i18n_texts} return {"type": "text", "text": text}
36d53f8190ecce5147c3ae9920e8c84c3ce57b5a
611,028
import re def get_links(html): """ Return a list of links from an html string""" return re.findall(r'(?:href|link|src)=["\'](.*?)["\']', html)
184b20816d14c7c4dec49f021514ee4cc4b57a96
335,634
def is_list_obj(obj): """Return True if obj is a list or tuple.""" return isinstance(obj, (list, tuple))
2146505e63e143086a5780ee2e15b95fbfb5d480
505,146
def parse_fos(response, paper_id): """Parse the fields of study of a paper from a MAG API response. Args: response (json): Response from MAG API in JSON format. Contains all paper information. paper_id (int): Paper ID. Returns: fields_of_study (:obj:`list` of :obj:`dict`): List of dictionaries with fields of study information. There's one dictionary per field of study. paper_with_fos (:obj:`list` of :obj:`dict`): Matching fields of study and paper IDs. """ # two outputs: fos_id with fos_name, fos_id with paper_id paper_with_fos = [] fields_of_study = [] for fos in response["F"]: # mag_fields_of_study fields_of_study.append( {"id": fos["FId"], "name": fos["DFN"], "norm_name": fos["FN"]} ) # mag_paper_fields_of_study paper_with_fos.append({"field_of_study_id": fos["FId"], "paper_id": paper_id}) return paper_with_fos, fields_of_study
a31b6a32012054f123380836384bb000580da73c
531,537
def reportnulls(df): """ Takes a data frame and check de nulls and sum the resutls and organizes them from highest to lowest """ null_counts = df.isnull().sum().sort_values(ascending=False) # return count of null values return null_counts
a3dc20feeaaf0f3467de76812531f1d0b791dc01
9,810
from datetime import datetime def parse_doc_timestamp(timestamp: str): """ Parse the timestamp of an elasticsearch document. """ try: parsed = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ') except ValueError: parsed = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ') return parsed
0715e7a7345fceed3055594d3f8e12e6772fc6f0
296,987
def deal_with_out_boundary(img): """ Deal with the outlier. :param img: input image. :return: image without outlier. """ img[img > 255.] = 255. img[img < 0.] = 0. return img
9dc8549183adab719b7e9b557e9b99b8f91b18b8
665,638
def generate_arn(service, arn_suffix, region=None): """Returns a formatted arn for AWS. Keyword arguments: service -- the AWS service arn_suffix -- the majority of the arn after the initial common data region -- the region (can be None for region free arns) """ arn_value = "arn" aws_value = "aws" region_qualified = region if region else "" return f"{arn_value}:{aws_value}:{service}:{region_qualified}:{arn_suffix}"
53dcf55c3fb15784770d1c2d62375d1e750469f8
706,229
def percent(count, total): #-------------------------------------------------<<< """Return a percent value, or 0 if undefined. Arguments may float, int, or str. """ if not count or not total: return 0 return 100 * float(count) / float(total)
06805a4622db36d019df711df250e3e14e8cebf0
122,391
def show_seconds(value, show_units): """Return nicely formatted seconds.""" if value is None: return "unknown" if show_units: return f"{value} s" return value
24aed8018571c233aaf94079aeee6cff34f63e26
647,436
def write_uint8(data, value, index): """ Write 8bit value into data string at index and return new string """ data = data.decode('utf-8') # This line is added to make sure both Python 2 and 3 works return '{}{:02x}{}'.format( data[:index*2], value, data[index*2 + 2:])
2ec2fa0cae31fde71db15f00e3e5ecee4e56835f
473,187
import base64 def encode_to_b16(inp: str) -> bytes: """ Encodes a given utf-8 string into base-16. >>> encode_to_b16('Hello World!') b'48656C6C6F20576F726C6421' >>> encode_to_b16('HELLO WORLD!') b'48454C4C4F20574F524C4421' >>> encode_to_b16('') b'' """ # encode the input into a bytes-like object and then encode b16encode that return base64.b16encode(inp.encode("utf-8"))
b47a6cc599c3e201012d556f8e795a3427a25ebc
327,556
def recall_neg(y, y_pred): """compute recall of the negative class""" return (y[y == 0] == y_pred[y==0]).sum()/float((y==0).sum())
16fd1a07031ab7094801099b078c26ce793ba31e
401,569
def find_count(substring, string): """finds the number of occurences of substring in string""" counter = 0 index = string.find(substring) while index >= 0: counter += 1 index = string.find(substring, index + 1) return counter
4cf562045c20d8acbfe372bc315c50f089925663
634,035
from typing import List import re import yaml def parse_multidicts(config: str) -> List[dict]: """Parse multiple YAML dictionaries from string, return list of them""" data = "" for line in config.split("\n"): if line.startswith("#") or re.fullmatch(r"^\s+$", line): print("IGNORE", line) continue elif re.match(r"^\S", line): data += "\n--- \n" data += line + "\n" res = list(yaml.safe_load_all(data)) return res
fbb5b8cee0346eb6257578665ebbbcb88539f645
574,150
def xy_to_im_coords(xy, im_size): """ Args: xy [batch_size, 2] in [-1, 1] x [-1, 1] im_size Returns: im_coords [batch_size, 2] in [0, im_size] x [0, im_size] """ im_coords = xy.clone() im_coords[:, 0] = (xy[:, 0] + 1) / 2 * im_size im_coords[:, 1] = im_size - (xy[:, 1] + 1) / 2 * im_size return im_coords
270085038c18dad1306199fedaf30d5826710a5a
612,507
async def prefix_wrapper_regex(re_pattern, message): """ Function to execute asynchronous callable prefix. This function is a coroutine. Parameters ---------- re_pattern : `async-callable` Async callable returning the prefix. message : ``Message`` The received message to parse the prefix from. Returns ------- prefix : `None`, `str` The prefix used by the user. Returned as `None` of parsing failed. end : `int` The start of the content after the prefix. Returned as `-1` if parsing failed. """ content = message.content if content is None: prefix = None end = -1 else: parsed = re_pattern.match(content) if parsed is None: prefix = None end = -1 else: prefix = parsed.group(0) end = parsed.end() return prefix, end
047d02b4544dd1edd1685e5e83944688ec5478e5
223,996
def make_testitem(nodeid, keywords, location): """Return JSON-serializable test item.""" item = { 'nodeid': nodeid, 'lineno': location[1], # The outcome will be overridden in case of failure 'outcome': 'passed', } if keywords: item['keywords'] = keywords return item
cfcf0a4233f8ef73cce889cf4b674c47936deb09
111,461
def _final_is_inside_doc(line, index_of_final, doc_start_symbol): """Checks whether the `final` modifier is inside a doc defined by starting symbol. Doc starting symbols can be, for example, `//` for the ordinary comment and `*` for the Javadoc. Args: line: the line to check. index_of_final: the index where the `final` keyword is located in the line. doc_start_symbol: the symbol defining where the code ends and the doc starts. Returns: bool: `True` if the `final` modifier is inside a doc and `False` otherwise. """ if doc_start_symbol not in line: return False doc_start = line.find(doc_start_symbol) final_is_part_of_doc = index_of_final > doc_start return final_is_part_of_doc
49fe526aab1e3e4655b4c2520dee22970ce078eb
501,804