content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_buildings(expo, tax, ds): """ Helper method to get the buildings value. """ expo_tax = expo[expo.Taxonomy == tax] expo_ds = expo_tax[expo_tax.Damage == ds] return expo_ds.Buildings.iloc[0]
59f99854cad80d5037cad09b96cf0254923f0376
34,264
def stat_ahead_behind(git, local_ref, remote_ref): """ Returns a tuple (ahead, behind) describing how far from the remote ref the local ref is. """ behind = 0 ahead = 0 (ret, out) = git.call("rev-list", "--left-right", "%s..%s" % (remote_ref, local_ref), raises=False) if ret == 0: ahead = len(out.split()) (ret, out) = git.call("rev-list", "--left-right", "%s..%s" % (local_ref, remote_ref), raises=False) if ret == 0: behind = len(out.split()) return ahead, behind
7275a43a94bf8bc9d4971aa79a198cdedca39031
34,265
def bpas_log_snr_new(file_name: str, mode: str = "INIT"): """ Retrieve ``new`` SNR values from bandpass log file. Parameters ---------- file_name : str Log file name. mode : str, optional Bandpass stage. The default is 'INIT' that is all stages. Returns ------- res : dict Return dictionary {obs: snr}. """ res = {} in_bpass = False bpas_mode_line = f"PIMA_BPASS_{mode}" with open(file_name) as file: for line in file: if bpas_mode_line in line: in_bpass = True continue if in_bpass and line.startswith("Obs:"): cols = line.split() obs = int(cols[1]) snr_new_ind = cols.index("new:") + 1 try: res[obs] = float(cols[snr_new_ind]) except ValueError: res[obs] = 0.0 return res
2a323067ace8b8f2965be5ac69d16206a5b93cf3
34,266
import re def _remove_phrasing(ipt_txt, phrase, replacement): """ removes the given phrase and replaces it with the replacement :param ipt_txt: string to change :param phrase: unwatned phrase to be removed :param replacement: replacement phrase :return: string with the phrase replaced with replacement """ a = re.search( phrase, ipt_txt) it =0 while a and it<=50: ipt_txt = re.sub(a.group(), replacement, ipt_txt) a = re.search( phrase, ipt_txt) it+=1 if it>50: raise Exception('While Loop fail. ' + str(phrase)) return ipt_txt
9b36a46f28c222d44b7b5290e0c02dded81562f7
34,270
from typing import Tuple from typing import Optional def get_dbt_prefix_config(config) -> Tuple[Optional[str], Optional[str]]: """ Return (bucket, prefix) tuple """ if config.dbtPrefixConfig: return ( config.dbtPrefixConfig.dbtBucketName, config.dbtPrefixConfig.dbtObjectPrefix, ) return None, None
762e8afc8b40db49d8fa115b33a0cfc69ea038ac
34,271
def isUsdCrate(path): """ Check if a file is a USD crate file by reading in the first line of the file. Doesn't check the file extension. :Parameters: path : `str` USD file path :Returns: If the USD file is a crate (binary) file. :Rtype: `bool` """ with open(path) as f: return f.readline().startswith("PXR-USDC")
aa6bc6a94552f49b83f2a17239a061d7f0672950
34,272
def xml_escape(str): """Replaces chars ' " < > & with xml safe counterparts""" if str is None: return None str = str.replace("&", "&amp;") str = str.replace("'", "&apos;") str = str.replace("\"", "&quot;") str = str.replace("<", "&lt;") str = str.replace(">", "&gt;") return str
8bec0fc289be43e84fba847d4d51bccb44df2839
34,273
def makeSquare(x, y, w, h): """ Convert a rectangle ROI to square. @param: left-most column @param: top-most row @param: width of region @param: height of region @return: x, y, w, h of the new ROI """ c_x = x + w // 2 c_y = y + h // 2 sz = max(w, h) x = c_x - sz // 2 y = c_y - sz // 2 return int(x), int(y), int(sz), int(sz)
c6b9cae46fbcb929511802ef125a808a81087e7f
34,275
def make_relative(path: str, root_path: str) -> str: """Make path relative with respect to a root directory Arguments: path (str): current path. root_path (str): root directory path. Returns: str: relative path. """ r_path = path.replace(root_path, '') if r_path: if r_path[0] == '/': r_path = r_path[1:] return r_path
04e98d971a3b1ee0a698f4b082c8cb3529b60185
34,276
def count_missense_per_gene(lines): """ count the number of missense variants in each gene. """ counts = {} for x in lines: x = x.split("\t") gene = x[0] consequence = x[3] if gene not in counts: counts[gene] = 0 if consequence != "missense_variant": continue counts[gene] += 1 return counts
b20b7deca5b48af30378ee40ed03763069a45768
34,278
import datetime as dt def make_dates_ordinal(df, dates_column): """ This function converts the dates of a DataFrame column to integers, in order to easily fit the data to a regression model. More specifically, the function toordinal() returns the proleptic Gregorian ordinal of a date. In simple terms datetime.toordinal() returns the day count from the date 01/01/01 Though Gregorian calendar was not followed before October 1582, several computer systems follow the Gregorian calendar for the dates that comes even before October 1582. Python's date class also does the same. Args: df - Pandas DataFrame dates_column - column of DataFrame, input as a string. All values in column must be of type datetime64[ns]. Output: Processed DataFrame is returned. """ # The function imports the required datetime module. # Applies datetime.toordinal() function to desired column of DataFrame. df[dates_column] = df[dates_column].map(dt.datetime.toordinal) # Returns processed DataFrame return df
43b84eca8c9329d6c7598a506c2703122f2fb634
34,284
def generate_filename(in_path, out_dir, bitrate, encoder): """ Create a new filename based on the original video file and test bitrate. Parameters ---------- in_path : str Full path of input video. out_dir : str Directory of output video. bitrate : int Video bitrate in kbit/s. encoder : str Encoder for FFmpeg to use. Returns ------- out_path : str Full path of new output video. """ if in_path.count('.') >= 2: raise Exception('Filename has multiple full stops') out_video = in_path.split( '/')[-1].replace('.', f'_{encoder}_{int(bitrate)}.') out_path = out_dir + out_video return out_path
6e8edb9a0bfd7e1c8ae779bd5c546f0f50389415
34,286
def preconvert_bool(value, name): """ Converts the given `value` to an acceptable boolean by the wrapper. Parameters ---------- value : `int` The value to convert. name : `str` The name of the value. Returns ------- value : `str` Raises ------ TypeError If `value` was not passed as `int` instance. ValueError If value was passed as an `int` instance, but not as `0` or `1` either. """ if (type(value) is bool): pass elif isinstance(value, int): if (value not in (0, 1)): raise ValueError(f'`{name}` was given as `int` instance, but neither as `0` or `1`, got {value!r}.') value = bool(value) else: raise TypeError(f'`{name}` can be `bool` or `int` instance as `0` or `1`, got {value.__class__.__name__}.') return value
d326f4d414a01f156e7ba1793d8f5a667a0e8b72
34,287
def _compare(data, k, threshold, sign=True): """ Obtain indicator vector for the samples with k-th feature > threshold :param data: array-like of shape (n_sample, n_feat) :param k: int Index of feature in question :param threshold: float Threshold for the comparison :param sign: bool Flag, if False, return indicator of the complement :return: array-like of shape (n_samples,) """ if sign: return data[:, k] > threshold else: return data[:, k] <= threshold
730582aa42dec9087de0d4869af42b3b47dd83e0
34,290
def get_common_subpeptides(list1, list2): """Get intersection of two sub-peptide chain lists (chain names)""" common_list = [sub_peptide for sub_peptide in list1 if sub_peptide in list2] return common_list
fa088fa56c2e77cb252af26d7cc848e99bc5dd04
34,296
def trace_driven_cache_hit_ratio(workload, cache, warmup_ratio=0.25): """Compute cache hit ratio of a cache under an arbitrary trace-driven workload. Parameters ---------- workload : list or array List of URLs or content identifiers extracted from a trace. This list only needs to contains content identifiers and not timestamps cache : Cache Instance of a cache object warmup_ratio : float, optional Ratio of requests of the workload used to warm up the cache (i.e. whose cache hit/miss results are discarded) Returns ------- cache_hit_ratio : float The cache hit ratio """ if warmup_ratio < 0 or warmup_ratio > 1: raise ValueError("warmup_ratio must be comprised between 0 and 1") n = len(workload) cache_hits = 0 n_warmup = int(warmup_ratio * n) n_req = 0 for content in workload: if cache.get(content): if n_req >= n_warmup: cache_hits += 1 else: cache.put(content) n_req += 1 return cache_hits / (n - n_warmup)
f19da290fc4edf5284e8e52ff14ef60b2c59956b
34,299
def post_process_weird(system_mentions): """ Removes all mentions which are "mm", "hmm", "ahem", "um", "US" or "U.S.". Args: system_mentions (list(Mention): A list of system mentions. Returns: list(Mention): the filtered list of mentions. """ return sorted( [mention for mention in system_mentions if " ".join(mention.attributes["tokens"]).lower() not in ["mm", "hmm", "ahem", "um"] and " ".join(mention.attributes["tokens"]) != "US" and " ".join(mention.attributes["tokens"]) != "U.S."] )
4cc8198ff29768178859b6115d28dd958f8599be
34,311
def getCluster(self): """Get the HDInsight cluster object""" # Get all the HDInsight clusters hdinsight_clusters = self.hdinsight_client.clusters.list() # Get current cluster for hdinsight_cluster in hdinsight_clusters: if hdinsight_cluster.name == self.params["CLUSTER_DNS_NAME"]: return hdinsight_cluster return None
a6dbd63c1e85db8893d32cbe12c01b5d9df5b350
34,315
import requests def get_remaining_rate_limit(api_key: str) -> int: """ Returns your remaining rate limit by making a request to :ref:`Apod <extensions/apod:Apod>` and getting the header ``X-RateLimit-Remaining``, that's returned on every API response. For example, if you are using an API key different from ``DEMO_KEY``, you have a default hourly rate limit of 1.000 requests, acording to the `Portal <https://api.nasa.gov/>`_. So, if you make 2 requests, your remaining rate limit will be equal to 998. **Example** .. code-block:: python3 from nasawrapper.utils import get_remaining_rate_limit remaining = get_remaining_rate_limit("DEMO_KEY") print(reamining) """ headers = requests.get(f"https://api.nasa.gov/planetary/apod?api_key={api_key}").headers return int(headers["X-RateLimit-Remaining"])
39a1b49ca9148a655cc90e25f8a1b8027f4821b5
34,318
import six def is_iterable(x): """Determine whether ``x`` is a non-string iterable""" if isinstance(x, six.string_types): return False return hasattr(x, "__iter__")
38e1d8955c6114bd1dfa2b7b6acdbed7572b5212
34,319
def _is_fileobj(obj): """ Is `obj` a file-like object?""" return hasattr(obj, 'read') and hasattr(obj, 'write')
8d09dce78239fe134116e641d4cf5934ffc173ec
34,321
def ends_with_blank_line(block): """ Returns true if block ends with a blank line, descending if needed into lists and sublists. """ if block.last_line_blank: return True if block.t in ['List', 'ListItem'] and block.children: return ends_with_blank_line(block.children[-1]) else: return False
2b079351e69ad288ce17b9d42b5774814a97aed3
34,322
def clause_time(verb, api): """Look for an adjunctive Time element in the clause.""" L, F = api.L, api.F clause_atom = L.u(verb, 'clause_atom')[0] time_phs = [ p for p in L.d(clause_atom, 'phrase') if F.function.v(p) == 'Time' ] data = {'has_time': 0} if time_phs: data['has_time'] = 1 return data
8a4ca0ac32970089e9a4d19dc029a91dfcc1512b
34,324
import pip import site def install_package(package): """Install a pip package to the user's site-packages directory.""" exitval = pip.main(['install', '--user', package]) if exitval == 0: # Reload sys.path to make sure the user's site-packages are in sys.path site.main() return exitval == 0
3c9781a290e84a2414ba28ea737a774e41dc49e1
34,327
def get_hit_table(hit): """Create context for a single hit in the search. Args: hit(Dict): a dictionary representing a single hit in the search. Returns: (dict).The hit context. (list).the headers of the hit. """ table_context = { '_index': hit.get('_index'), '_id': hit.get('_id'), '_type': hit.get('_type'), '_score': hit.get('_score'), } headers = ['_index', '_id', '_type', '_score'] if hit.get('_source') is not None: for source_field in hit.get('_source').keys(): table_context[str(source_field)] = hit.get('_source').get(str(source_field)) headers.append(source_field) return table_context, headers
50a6d4b2304381b7cd9977213fb64f36d3485e80
34,328
def scaler(scale): """Create a function that scales by a specific value.""" def inner(val): return val * scale return inner
b2e9a8efb5f0aff079fbfaf8c5326978a6990660
34,329
def _remove_empty_events(sse): """ Given a sequence of synchronous events (SSE) `sse` consisting of a pool of pixel positions and associated synchronous events (see below), returns a copy of `sse` where all empty events have been removed. `sse` must be provided as a dictionary of type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse : dict A dictionary of pixel positions `(i, j)` as keys, and sets `S` of synchronous events as values (see above). Returns ------- sse_new : dict A copy of `sse` where all empty events have been removed. """ sse_new = sse.copy() for pixel, link in sse.items(): if link == set([]): del sse_new[pixel] return sse_new
0596d43cc75fdd040c5096e3ddb81277b48d7456
34,332
def remove_comments(s): """removes the comments starting with # in the text.""" pos = s.find("#") if pos == -1: return s return s[0:pos].strip()
47e44d12f35c7b254f3f4ec001c630b5175b1e32
34,335
import re def rmspecialchars(value): """ Remove any special characters except period (.) and negative (-) from numeric values Parameters ---------- value : string String value to remove any existing characters from Returns ------- value : string String value to without any special characters Examples -------- >>> import helpers >>> helpers.rmspecialchars(value = "*6.5_") 6.5 >>> helpers.rmspecialchars(value = "ICE") ICE >>> helpers.rmspecialchars(value = "-4.2") -4.2 >>> helpers.rmspecialchars(value = "") >>> helpers.rmspecialchars(value = "%&!@#8.32&#*;") 8.32 """ value = re.sub("[^A-Za-z0-9.-]+", "", value) return value
26de451a5cfef33f9384ce13bda9e495ae81fc5d
34,342
import re def remove_urls(text): """Remove urls from text""" return re.sub('(https:|http:|www\.)\S*', '', text)
db7c2fa5e96ee525aa8c2d17ddf2888a354958f7
34,347
def get_config_rules_statuses(config): """Retrieves all of the AWS Config rules. Args: config: boto3 config object Returns: List of AWS Config rules """ config_rules = [] page_iterator = config.get_paginator("describe_compliance_by_config_rule") for page in page_iterator.paginate(): config_rules += page["ComplianceByConfigRules"] return config_rules
7c3ec7281d06966fb97193274fc62465e8910d08
34,348
def check_params(params, field_list): """ Helper to validate params. Use this in function definitions if they require specific fields to be present. :param params: structure that contains the fields :type params: ``dict`` :param field_list: list of dict representing the fields [{'name': str, 'required': True/False', 'type': cls}] :type field_list: ``list`` of ``dict`` :return True or raises ValueError :rtype: ``bool`` or `class:ValueError` """ for d in field_list: if not d['name'] in params: if 'required' in d and d['required'] is True: raise ValueError(("%s is required and must be of type: %s" % (d['name'], str(d['type'])))) else: if not isinstance(params[d['name']], d['type']): raise ValueError(("%s must be of type: %s. %s (%s) provided." % ( d['name'], str(d['type']), params[d['name']], type(params[d['name']])))) if 'values' in d: if params[d['name']] not in d['values']: raise ValueError(("%s must be one of: %s" % ( d['name'], ','.join(d['values'])))) if isinstance(params[d['name']], int): if 'min' in d: if params[d['name']] < d['min']: raise ValueError(("%s must be greater than or equal to: %s" % ( d['name'], d['min']))) if 'max' in d: if params[d['name']] > d['max']: raise ValueError("%s must be less than or equal to: %s" % ( d['name'], d['max'])) return True
8ce0ad0123c3dfed278564b6cd7fdb8545fdc7a7
34,349
def resolve_attr(obj, path): """A recursive version of getattr for navigating dotted paths. Args: obj: An object for which we want to retrieve a nested attribute. path: A dot separated string containing zero or more attribute names. Returns: The attribute referred to by obj.a1.a2.a3... Raises: AttributeError: If there is no such attribute. """ if not path: return obj head, _, tail = path.partition('.') head_obj = getattr(obj, head) return resolve_attr(head_obj, tail)
3951e8436b2e51f7f2815ea0ed2bda35a8fc08ce
34,350
def column_to_list(data, index): """ Função para adicionar as colunas(features) de uma lista em outra lista, na mesma ordem. Argumentos: data: amostra de dados. index: índice da coluna na amostra. Retorna: Uma lista apenas com os dados da coluna informada no index. """ column_list = [] # Dica: Você pode usar um for para iterar sobre as amostras, pegar a feature pelo seu índice, e dar append para uma lista for line in data: column_list.append(line[index]) return column_list
55c8201e72234af57289d546078953aa07f3f1bb
34,353
def selection_sort(nums: list[float]) -> list[float]: """Sorts a list in-place using the Selection Sort approach. Time complexity: O(n^2) for best, worst, and average. Space complexity: total O(n) auxiliary O(1). Args: nums: A list of numbers. Returns: The sorted list. """ for pivot in range(0, len(nums) - 1): smallest = pivot # Find smallest value, then swap it with the pivot for target in range(pivot + 1, len(nums)): if nums[target] < nums[smallest]: smallest = target nums[pivot], nums[smallest] = nums[smallest], nums[pivot] return nums
c96c12e7361e6b617528b9cc632b4003963ea8ab
34,361
import itertools def get_pwdist_indices(sequences): """ From a list of sequences get lower triangle indices tuples (i,j) for pairwise comparisons. Parameters ---------- sequences : list of strings list of (likely amino acid) strings Returns ------- ind_tuples : list ist of tuples (i,j) """ ind_tuples = [] L = len(sequences) for i, j in itertools.product(list(range(L)), list(range(L))): if i <= j: ind_tuples.append((i, j)) return(ind_tuples)
f9ab2bbcc4481f898ff83963ad595240b95ad2b2
34,363
def shot_direct_neighbors(graph, reconstruction, shot_id): """Reconstructed shots sharing reconstructed points with a given shot.""" neighbors = set() for track_id in graph[shot_id]: if track_id in reconstruction.points: for neighbor in graph[track_id]: if neighbor in reconstruction.shots: neighbors.add(neighbor) return neighbors
9b7ff5689aa32c30b85d62e535feab15ca6916e9
34,365
def fix_iso(job): """ Add couple xyz to the fix_ensemble inside LAMMPS Args: job (LAMMPS): Lammps job object Returns: LAMMPS: Return updated job object """ job.input.control["fix___ensemble"] = ( job.input.control["fix___ensemble"] + " couple xyz" ) return job
376bfb69abc2d59f42e766f4c9bda198468046ee
34,366
def get_version_v2(uri): """ Canned response nova v2 version. Cf: http://developer.openstack.org/api-ref-compute-v2.1.html #listVersionsv2.1 """ return {"version": {"status": "SUPPORTED", "updated": "2011-01-21T11:33:21Z", "links": [{"href": uri, "rel": "self"}, {"href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby"}], "min_version": "", "version": "", "media-types": [{ "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2" }], } }
a34b5a0e1ee3e055dd3e559e9e4a79f611654414
34,368
def read_iba_file(file_path): """ :param file_path: absolute file path :return: a list of strings, each item being a row in the text file :rtype: list """ f = open(file_path, 'r') text = f.read() return text.replace('\r', '').split('\n')
f536f2b2d520799fe4f81825055315ed3a1f9d6d
34,370
def format_messages(messages_dict): """ Formats input messages in Polymer format to native Android format. This means replacing hyphens with underscores in keys and escaping apostrophes in values. """ formatted_messages = {} for k,v in messages_dict.items(): formatted_messages[k.replace("-", "_")] = v.replace("'", "\\'") return formatted_messages
00fe6bfb76ce8e146a16a3bc3f108fc2d1277908
34,372
from functools import reduce def getAllTextWords(texts): """ Returns a set of all the words in the given texts """ return reduce(lambda x,y: x | set(y), texts.values(), set())
52e475b180031b0abf67bad3fe148b9509baaed1
34,375
def get_tensor_dependencies(tensor): """ Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph). Args: tensor (tf.Tensor): The input tensor. Returns: Set of all dependencies (including needed placeholders) for the input tensor. """ dependencies = set() dependencies.update(tensor.op.inputs) for sub_op in tensor.op.inputs: dependencies.update(get_tensor_dependencies(sub_op)) return dependencies
1c92b9fda9e5ca563bc43638f74f09c0bc16f264
34,378
def _get_vals_wo_None(iter_of_vals): """ Returns a list of values without Nones. """ return [x for x in iter_of_vals if x is not None]
e020ec4049217c5656c74bed6d20bfcdb8a89e78
34,379
def read_rescue(spark): """ Reads animal rescue CSV data from HDFS Args: spark (SparkSession): active Spark session Returns: spark DataFrame: animal rescue data """ return spark.read.csv("/training/animal_rescue.csv", header=True, inferSchema=True)
6260b6914e2b1d8747791a1b2531793cd3c82781
34,384
def to_bool(val): """Conservative boolean cast - don't cast lists and objects to True, just existing booleans and strings.""" if val is None: return None if val is True or val is False: return val if isinstance(val, str): if val.lower() == 'true': return True elif val.lower() == 'false': return False raise ValueError("Could not convert string {val} to boolean. Expecting string to either say 'true' or 'false' (not case-sensitive).".format(val=val)) raise ValueError("Could not convert {val} to boolean. Expect either boolean or string.".format(val=val))
21934eb7ab28d53e2c415da988eee2cd3af2d2cc
34,386
def node_value(node, input_values, neuron_outputs): # PROVIDED BY THE STAFF """ Given * a node (as an input or as a neuron), * a dictionary mapping input names to their values, and * a dictionary mapping neuron names to their outputs returns the output value of the node. This function does NOT do any computation; it simply looks up values in the provided dictionaries. """ if isinstance(node, str): # A string node (either an input or a neuron) if node in input_values: return input_values[node] if node in neuron_outputs: return neuron_outputs[node] raise KeyError("Node '{}' not found in either the input values or neuron outputs dictionary.".format(node)) if isinstance(node, (int, float)): # A constant input, such as -1 return node raise TypeError("Node argument is {}; should be either a string or a number.".format(node))
4088211ce025b6e868c1a9ecffdd7955d5adf168
34,389
def check_domain_whitelist(string, whitelist): """ Returns True if a white listed domain appears in the string, otherwise returns False. :param string: A string possibly containing a domain name. :param whitelist: A list of strings containing white listed domains. :return: Bool """ for i in whitelist: if i in string: return True return False
e92376a001fee8365ade4b829dd5daa7079063d8
34,394
def summarize_reduced_diffs(reduced_diffs): """ Print a human-readable summary of the relevant reduced diff data """ buf = "" ### General summary if 'sum_data_units_read_gibs' not in reduced_diffs: read_gibs = reduced_diffs.get('sum_data_units_read_bytes', 0) * 2.0**(-40) write_gibs = reduced_diffs.get('sum_data_units_written_bytes', 0) * 2.0**(-40) else: read_gibs = reduced_diffs.get('sum_data_units_read_gibs', 0) write_gibs = reduced_diffs.get('sum_data_units_written_gibs', 0) buf += "Read: %10.2f TiB, %10.2f MOps\n" % ( read_gibs, reduced_diffs.get('sum_host_read_commands', 0) / 1000000.0) buf += "Written: %10.2f TiB, %10.2f MOps\n" % ( write_gibs, reduced_diffs.get('sum_host_write_commands', 0) / 1000000.0) buf += "WAF: %+10.4f\n" % reduced_diffs.get('max_write_amplification_factor', 0) return buf
8e3be22d11a3ae9f011edfa0fb28fdb859c7a980
34,395
import torch def copy_model_to_gpu(model, loss=None): """ Copies a model and (optional) loss to GPU and enables cudnn benchmarking. For multiple gpus training, the model in DistributedDataParallel for distributed training. """ if not torch.backends.cudnn.deterministic: torch.backends.cudnn.benchmark = True model = model.cuda() if loss is not None: loss = loss.cuda() return model, loss else: return model
4fa61832eacdf3ab055e931fd9f952406f4aeca4
34,397
def get_strings(filename): """ Read strings from files generated by an IDAPython script and store them in a list for further processing. """ list_strings= [] with open(filename,'rU') as f: list_strings= [line[:-1] for line in f.readlines()] return list_strings
1f01bce01bd601e9bf25c8673fdcc97443384719
34,398
from typing import Any import hashlib def hex(data: Any) -> str: """Get sha512.""" if isinstance(data, str): data = data.encode("utf-8") return hashlib.sha512(data).hexdigest()
aad95dbcf69245d41b23115fd022f99340de060d
34,406
import difflib def compare_configs(cfg1, cfg2): """ This function, using the unified diff function, will compare two config files and identify the changes. '+' or '-' will be prepended in front of the lines with changes :param cfg1: old configuration file path and filename :param cfg2: new configuration file path and filename :return: text with the configuration lines that changed. The return will include the configuration for the sections that include the changes """ # open the old and new configuration files f1 = open(cfg1, 'r') old_cfg = f1.readlines() f1.close() f2 = open(cfg2, 'r') new_cfg = f2.readlines() f2.close() # compare the two specified config files {cfg1} and {cfg2} d = difflib.unified_diff(old_cfg, new_cfg, n=9) # create a diff_list that will include all the lines that changed # create a diff_output string that will collect the generator output from the unified_diff function diff_list = [] diff_output = '' for line in d: diff_output += line if line.find('Current configuration') == -1: if line.find('Last configuration change') == -1: if (line.find('+++') == -1) and (line.find('---') == -1): if (line.find('-!') == -1) and (line.find('+!') == -1): if line.startswith('+'): diff_list.append('\n' + line) elif line.startswith('-'): diff_list.append('\n' + line) # process the diff_output to select only the sections between '!' characters for the sections that changed, # replace the empty '+' or '-' lines with space diff_output = diff_output.replace('+!', '!') diff_output = diff_output.replace('-!', '!') diff_output_list = diff_output.split('!') all_changes = [] for changes in diff_list: for config_changes in diff_output_list: if changes in config_changes: if config_changes not in all_changes: all_changes.append(config_changes) # create a config_text string with all the sections that include changes config_text = '' for items in all_changes: config_text += items return config_text
e0ad56de3f601a4f04347036106eb4de112afa83
34,410
def _limit_description_to_key(description): """Translate between the description of the Text widget and the corresonding key and value pos in the fitarg dictionary. Parameters ---------- description : str The string describing the widget Returns -------- key : string The key in the fitarg dictionary attr: (0, 1) The entry position in the value of the fitarg dictionary """ # Splits parameter name and _min or _max off # attr is either min or max key, attr = description.split('_') # Add limit so its is the correct key for self.fitarg key = 'limit_' + key if attr == 'min': attr = 0 elif attr == 'max': attr = 1 else: raise NotImplementedError( "Uuups there is something wrong." \ "attr was %s but min/max was expected" % attr ) return key, attr
2fe559dbce1c61ac0c298e3132ca188b52a4b9e2
34,413
import warnings import collections import re def read_quants_gianninas(fobj): """Read and parse custom file format of physical stellar parameters from Gianninas et al 2014, [1]_. Parameters ---------- fobj : file object An opened file object to the text file with parameters. Example file format: line 0: 'Name SpT Teff errT log g errg '... line 1: '========== ===== ======= ====== ===== ====='... line 2: 'J1600+2721 DA6.0 8353. 126. 5.244 0.118'... Returns ------- dobj : collections.OrderedDict Ordered dictionary with parameter field names as keys and parameter field quantities as values. Examples -------- >>> with open('path/to/file.txt', 'rb') as fobj: ... dobj = read_quants_gianninas(fobj) References ---------- .. [1] http://adsabs.harvard.edu/abs/2014ApJ...794...35G """ # Read in lines of file and use second line (line number 1, 0-indexed) # to parse fields. Convert string values to floats. Split specific values # that have mixed types (e.g. '1.000 Gyr'). lines = [] for line in fobj: lines.append(line.strip()) if len(lines) != 3: warnings.warn( ("\n" + "File has {num_lines}. File is expected to only have 3 lines.\n" + "Example file format:\n" + "line 0: 'Name SpT Teff errT log g'...\n" + "line 1: '========== ===== ======= ====== ====='...\n" + "line 2: 'J1600+2721 DA6.0 8353. 126. 5.244'...").format( num_lines=len(lines))) dobj = collections.OrderedDict() for mobj in re.finditer('=+', lines[1]): key = lines[0][slice(*mobj.span())].strip() value = lines[2][slice(*mobj.span())].strip() try: value = float(value) except ValueError: try: value = float(value.rstrip('Gyr')) except ValueError: pass if key == 'og L/L': key = 'log L/Lo' dobj[key] = value return dobj
0b0750686cb127c81d80566a7d6e40d48570fd72
34,414
import math def getCosineSetSim(concepts_1: set, concepts_2: set): """ Returns Cosine Set Similarity for the given concept sets """ intersection = len(concepts_1.intersection(concepts_2)) return intersection/(math.sqrt(len(concepts_2)*len(concepts_1)))
f6c03fa83d55c19f6d5953ef84a4d9233262386d
34,420
import functools def dec_busy(func): """ Decorator to set the amp/lcd controller state to busy while executing function. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): self.busy = True output = func(self, *args, **kwargs) self.busy = False return output return wrapper
730a7cbe34f8323098aca441142c1c3c5fd1788c
34,421
def is_run(part): """ >>> is_run([4]) False >>> is_run([1, 2, 3]) True >>> is_run([3, 2, 1]) False """ if len(part) != 3: return False return part[0] + 2 == part[1] + 1 == part[2]
a7719c1a02a8a8a573e2bc69052711ec551b100f
34,422
def form_tree_definition(proj, formname): """Turn the form (instrument) definition in the project xml file into a python dictionary tree. In the XML structure items are defined in different places and the are various places where 'ref' items have to be looked up to find 'def'. Turn it into a nice neat: FormDef ItemGroup(s) Item(s) """ """ Form Def <FormDef OID="Form.ob_template" Name="Ob Template" Repeating="No" redcap:FormName="ob_template"> <ItemGroupRef ItemGroupOID="ob_template.timestamp_template" Mandatory="No"/> ... </FormDef> <ItemGroupDef OID="ob_template.timestamp_template" Name="Ob Template" Repeating="No"> <ItemRef ItemOID="timestamp_template" Mandatory="No" redcap:Variable="timestamp_template"/> <ItemRef ItemOID="title_measurements_template" Mandatory="No" redcap:Variable="title_measurements_template"/> ... </ItemGroupDef> <ItemGroupDef OID="ob_template.supqn_tired_template" Name="Confirm and Submit" Repeating="No"> ... </ItemGroupDef> ... """ tree = {} xpath = r"./Study/MetaDataVersion/FormDef/[@OID='%s']" % formname root = proj.find(xpath, proj.nsmap) tree = {**tree, **root.attrib} # tree['ItemGroupRef'] = [] # tree['ItemGroupDef'] = [] tree['ItemGroup'] = [] for igr in root.getchildren(): igr_oid = igr.attrib['ItemGroupOID'] igd_xpath = r"./Study/MetaDataVersion/ItemGroupDef/[@OID='%s']" % igr_oid igd = proj.find(igd_xpath, proj.nsmap) itemgroup = {**igr.attrib, **igd.attrib} itemgroup['Item'] = [] for itemref in igd.getchildren(): item_oid = itemref.attrib['ItemOID'] itmd_xpath = r"./Study/MetaDataVersion/ItemDef/[@OID='%s']" % item_oid itm = proj.find(itmd_xpath, proj.nsmap) item = { **itm.attrib } itemgroup['Item'].append(item) tree['ItemGroup'].append(itemgroup) return tree
d1a2eb638eed9443af11ec933942f2179e20c79b
34,423
def parse_one_line(line): """ Get one line of a file in this format 16:17:266:2864 3:4:194:2443 and return a couple of lists [16,17,266,2864], [3,4,194,2443] """ line_trial, line_success = line.split(' ') one_trial = [int(s) for s in line_trial.split(':')] one_success = [int(s) for s in line_success.split(':')] return one_trial, one_success
8397e9fa742f42af5348d0bab0d459e98991b95f
34,426
def get_employee_sick_leave_days(employee, month): """Calls the sick leave days for the given employee and month.""" return month.get_employee_sick_leave_days(employee)
171a17828c834718e79267bd1524194c66904bf7
34,429
import requests def get_tnstats(url, cookie): """ Retrieve Tunneled Node's agregated statistics :param url: base url :param cookie: Cookie value :return: TN Statisctics JSON :Example: result = get_tnstats(base_url, sessionid) """ header = {'cookie': cookie} get_tn_stats = requests.get(url + "tunneled_node_server/ports/aggregate_statistics", headers=header, verify=False, timeout=2) return get_tn_stats.json()
2b182029d5e7fabebcb8df5fdd5ce42bf83ae1cd
34,432
import re def normalize(text: str) -> str: """ Replace all whitespaces in the text with a single space. For example " foo bar " is converted to "foo bar". """ return re.sub(r"\s+", " ", text).strip()
b8015d6004b57ff13c9d78acac4ff479ea327a11
34,438
def check_digit13(firsttwelvedigits): """Check sum ISBN-13.""" # minimum checks if len(firsttwelvedigits) != 12: return None try: int(firsttwelvedigits) except Exception: # pragma: no cover return None # checksum val = sum( (i % 2 * 2 + 1) * int(x) for i, x in enumerate(firsttwelvedigits)) thirteenthdigit = 10 - int(val % 10) if thirteenthdigit == 10: thirteenthdigit = '0' return str(thirteenthdigit)
434cb3e71ecba49a0e5a13c73a6808c8c5e80446
34,446
import torch def batch_colout(X: torch.Tensor, p_row: float, p_col: float) -> torch.Tensor: """Applies ColOut augmentation to a batch of images, dropping the same random rows and columns from all images in a batch. Args: X: Batch of images of shape (N, C, H, W). p_row: Fraction of rows to drop (drop along H). p_col: Fraction of columns to drop (drop along W). Returns: torch.Tensor: Input batch tensor with randomly dropped columns and rows. """ # Get the dimensions of the image row_size = X.shape[2] col_size = X.shape[3] # Determine how many rows and columns to keep kept_row_size = int((1 - p_row) * row_size) kept_col_size = int((1 - p_col) * col_size) # Randomly choose indices to keep. Must be sorted for slicing kept_row_idx = sorted(torch.randperm(row_size)[:kept_row_size].numpy()) kept_col_idx = sorted(torch.randperm(col_size)[:kept_col_size].numpy()) # Keep only the selected row and columns X_colout = X[:, :, kept_row_idx, :] X_colout = X_colout[:, :, :, kept_col_idx] return X_colout
1ff4311127749afc9eb56797b76d18c706ebf378
34,450
def paginate(query, page, count): """ Returns the items given the count and page specified :param query: :param page: :param count: """ return query.paginate(page, count)
ce6bc412de90b9333c8febbf75a35b2abfa3e0ae
34,453
def _get_s3_presigned_put_url(s3_client, bucket, filepath, md5sum, lifetime_sec): """ Creates a pre-signed URL for S3-like backends, e.g. Minio. Note that since our production object storage backend is GCS, we do not enforce or require any Content-MD5 value. :param: s3_client: an initialized S3 client. We will use this to create the presigned PUT url. :param: bucket: the bucket where the user can PUT their object. :param: filepath: the file path inside the bucket that the user can PUT their object. :param: md5sum: the base64-encoded MD5sum of the object the user is planning to PUT. This is ignored for this function and added solely to maintain API compatibility with other private presigned URL functions. :param: lifetime_sec: how long before the presigned URL expires, in seconds. """ # S3's PUT Object parameters: # https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html method = "put_object" fields = { "Bucket": bucket, "Key": filepath, } response = s3_client.generate_presigned_url( ClientMethod=method, Params=fields, ExpiresIn=lifetime_sec, ) return response
d020281a0ce4d71eb3492d5f8a7a693b35d03c32
34,454
def reverse_bits(n): """ Reverses the bits in an integer. :param n: an integer value. :return: finds the integer value of a reversed integer bit. """ return int(bin(n)[::-1][0:-2], 2)
e19979b487c84ab4a7925ca64aaebeddd2fb0655
34,463
from math import isnan def clamp(x: float, lower=0., upper=1.) -> float: """ Clamps a float to within a range (default [0, 1]). """ if x <= lower: return lower elif x >= upper: return upper elif isnan(x): raise FloatingPointError('clamp is undefined for NaN') return x
e1af0a40b4f0e9ecb0f917faa4b8ab5f4c9b2ac5
34,467
import socket def get_free_ports(n): """Based on https://gist.github.com/dbrgn/3979133""" ports = [] sockets = [] for i in range(n): s = socket.socket() s.bind(('', 0)) port = s.getsockname()[1] ports.append(port) sockets.append(s) for s in sockets: s.close() return ports
eff425d1c1c10267271bbf821de7294c58d03765
34,471
import random def rand_trial(tgt_num,min_itv,max_itv): """ Generate a list of number, which would be used to make sure that there are some filler sentence(s) between any 2 target sentences. Parameters ---------- tgt_num: Int The number of target sentences min_itv: Int The minimum number of filler sentences between 2 target sentences max_itv >= 1. max_itv: Int The maximum number of filler sentences between 2 target sentences. Note that "tgt_num*max_itv <= the number of filler sentences". Returns ---------- li : List "li" contains 2 types of numbers, one is the number zero, the other is numbers that are greater than or equal to one. Zero indicates a target sentence. Numbers greater than or equal to 0 indicate the number of filler sentences between 2 targets. eg. "[2, 0, 1, 0, 1, 0, 2, 0, 2, 0]" would helps generating a trial list in which 2 filler sentences is at the beginning of the trial list, then 1 target sentence, then 1 filler sentence, etc. """ li=[] for i in range(tgt_num): #Randomly choose the interval between 2 target sentences rand_itv=random.randint(min_itv,max_itv) li.append(rand_itv) li.append(0) return li
8d16b943804556c80d84a9050ce89cc2e4cf82dc
34,474
def qual_class_name(cls): """ Returns the fully qualifieid class name (module + class name). """ return cls.__module__ + "." + cls.__name__
ff433a30616851fdb0951fba4295230f9fa01907
34,478
def WDM_suppression(m, m_c, a_wdm, b_wdm, c_wdm): """ Suppression function from Lovell et al. 2020 :return: the factor that multiplies the CDM halo mass function to give the WDM halo mass function dN/dm (WDM) = dN/dm (CDM) * WDM_suppression where WDM suppression is (1 + (a_wdm * m_c / m)^b_wdm)^c_wdm """ ratio = a_wdm * m_c / m factor = 1 + ratio ** b_wdm return factor ** c_wdm
e9ad362bc63be58a465d0e11a0bfb45ad3f1bc2d
34,483
def remove_chars(value,char_list=''): """ Remove specific chars from a string :param value: Value to be formatted :type value: String :param char_list: String containing the characters you want to remove :type char_list: String :returns: String without punctuation symbols :rtype: String Example: >>> remove_chars('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse id lacus rhoncus, varius lorem vel, congue quam.', '.,:;-_/*') 'Lorem ipsum dolor sit amet consectetur adipiscing elit Suspendisse id lacus rhoncus varius lorem vel congue quam' """ return ''.join(ch for ch in value if ch not in char_list)
2090ad14efff19974af8aa7bd68cdee11536e056
34,485
def rule(index): """Convert decimal index to binary rule list.""" return [int(x) for x in list(format(index, '08b'))]
33b167503e9caac338d747240dcc07b0d40bc057
34,487
def get_sections_url(request): """ A fixture that returns URL for two different section endpoints: - /v1/hearing/<hearing id>/sections/ - /v1/section/?hearing=<hearing id> """ return { 'nested': lambda hearing: '/v1/hearing/%s/sections/' % hearing.id, 'root': lambda hearing: '/v1/section/?hearing=%s' % hearing.id }[request.param]
1ecae93851fe50f26e6f6bb0fc0ae6f386ddb626
34,490
import torch def charbonnier_loss(pred: torch.Tensor, target: torch.Tensor, q: float = 0.2, eps: float = 0.01) -> torch.Tensor: """Generalized Charbonnier loss function between output and ground truth. The loss function is .. math:: loss = ((u-u_gt)^2+(v-v_gt)^2+eps)^q Generalized Charbonnier loss was used in LiteFlowNet when fine tuning, with eps=0.01 q=0.2. Args: pred (torch.Tensor): output flow map from flow_estimator shape(B, 2, H, W). target (torch.Tensor): ground truth flow map shape(B, 2, H, W). q (float): the exponent in charbonnier loss. eps (float): small constant to numerical stability when fine-tuning model. Defaults to 0.01. Returns: Tensor: loss map with the shape (B, H, W). """ assert pred.shape == target.shape, \ (f'pred shape {pred.shape} does not match target ' f'shape {target.shape}.') diff = torch.add(pred, -target) loss_map = (torch.sum(diff * diff, dim=1) + eps)**q # shape (B, H, W). return loss_map
481de25b3d379f0c5f6c6f79f353166bc1bc1194
34,500
from typing import Optional def format_op_params(params: Optional[list]) -> str: """format operation params nicely for display""" if params is not None: return "(" + ", ".join(map(str, params)) + ")" if len(params) > 0 else "" return ""
4620011e1f12ac0b0165e591eaf50ed539142cdf
34,504
import json def build_address_map(tilegrid_file): """ Loads the tilegrid and generates a map (baseaddr, offset) -> tile name(s). Parameters ---------- tilegrid_file: The tilegrid.json file/ Returns ------- A dict with lists of tile names. """ address_map = {} # Load tilegrid with open(tilegrid_file, "r") as fp: tilegrid = json.load(fp) # Loop over tiles for tile_name, tile_data in tilegrid.items(): # No bits or bits empty if "bits" not in tile_data: continue if not len(tile_data["bits"]): continue bits = tile_data["bits"] # No bus if "CLB_IO_CLK" not in bits: continue bus = bits["CLB_IO_CLK"] # Make the address as integers baseaddr = int(bus["baseaddr"], 16) offset = int(bus["offset"]) address = ( baseaddr, offset, ) # Add tile to the map if address not in address_map: address_map[address] = [] address_map[address].append(tile_name) return address_map
c5debba6159f847267b228cefdbe3ae88015c887
34,506
def get_word(word_type): """Get a word from a user and return that word.""" # The lower() function converts the string to lowercase before testing it if word_type.lower() == 'adjective': # Use 'an' in front of 'adjective' a_or_an = 'an' else: # Otherwise, use 'a' in front of 'noun' or 'verb' a_or_an = 'a' return input('Enter a word that is {0} {1}: '.format(a_or_an, word_type))
44447b8478e857956b5a328c514d632a8ba90bb3
34,509
import io import array def part1(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper) -> int: """Find two numbers that sum to 2020, then multiply them.""" numbers = array.array('i') iterations = 0 for line in stdin: iterations += 1 number = int(line.strip()) pair = 2020 - number try: numbers.index(pair) stderr.write(f"{number} + {pair} in {iterations} iterations\n") return number * pair except ValueError: numbers.append(number) raise Exception("No matches found.")
cd0b88df8d6dbb7cd6aefff70a4fe68a43ee4c8c
34,515
from string import printable def unprintable(mystring): """return only the unprintable characters of a string""" return ''.join( character for character in mystring if character not in printable )
bb48580d525d1e829f5b4b33cd4c0e540aa3a21a
34,516
def can_review(user): """Checks if a user can review a translate""" return user.permissions['perm_review']
ab0dcb8cb0372c1421e6b9171e01e2be7113e3f1
34,519
def select_frames(frames, frames_per_video): """ Select a certain number of frames determined by the number (frames_per_video) :param frames: list of frames :param frames_per_video: number of frames to select :return: selection of frames """ step = len(frames)//frames_per_video if step == 0: step = 1 first_frames_selection = frames[::step] final_frames_selection = first_frames_selection[:frames_per_video] return final_frames_selection
d00ca8381e919cb38858d236186c4ebbd2b8a064
34,520
def doc_freqs(docs) -> dict: """ Takes in a list of spacy Doc objects and return a dictionary of frequencies for each token over all the documents. E.g. {"Aarhus": 20, "the": 2301, ...} """ res_dict = {} for doc in docs: # create empty list to check whether token appears multiple times in doc duplicates = [] for token in doc: if token.text not in duplicates: # if token is not in dict; add token as key and 1 as value if token.text not in res_dict: res_dict[token.text] = 1 # if the token is already in dic; add 1 to the value of that token else: res_dict[token.text] += 1 duplicates.append(token.text) return res_dict
2bdeaf2150b4c2ecb8f240984d6d5f7eb210c585
34,524
def get_pubkey(elem) -> str: """Returns the primary key element from a tag""" return elem.get('key')
2f11661dee73d858dbd8c37b5045442c192f8799
34,525
def round_tat(num): """Changes the turnaround time from a float into natural language hours + minutes.""" remain = num%1 if num.is_integer(): if int(num) == 1: return str(int(num)) + " hour." else: return str(int(num)) + " hours." else: if num < 1: return str(int(remain*60)) + " minutes." elif num-remain == 1: return str(int(num-remain)) + " hour and " + \ str(int(remain*60)) + " minutes." else: return str(int(num-remain)) + " hours and " + \ str(int(remain*60)) + " minutes."
cccf14355b7fc090df855a45b88e9b8562449f9a
34,530
def score_pop( population_value, min_population_acceptable=10, max_population_acceptable=30 ): """ :param population_value: population value to be scored :param min_population_acceptable: minimum population value in the scoring range (saturates at 0) :param max_population_acceptable: maximum population value in the scoring range (saturates at 1) :return: a score value between 0 and 1 """ if population_value > max_population_acceptable: score_population = 1.0 elif population_value < min_population_acceptable: score_population = 0.0 else: # linear scaling between 0 and 1 for between min and max acceptable score_population = (population_value - min_population_acceptable) / ( max_population_acceptable - min_population_acceptable ) return score_population
074837e892cea618705139513ea03889aa76fac3
34,532
def _first_upper(k): """Returns string k, with the first letter being upper-case""" return k[0].upper() + k[1:]
f28bfa9f6457c4c0b3d75704e44486f31f247831
34,535
def has_samples(args): """Returns whether there's some kind of sample option in the command """ return args.sample or args.samples or args.sample_tag
47728a442d90538e72f47b72882ba22dd102d61a
34,537
def graph_order(graph): """Get graph order.""" graph = graph.get_vertices() order = len(graph.keys()) return order
1ea6fea10bdd80033a67fac1e5326f6a80239c58
34,538
import json def make_json(data_dict, simple=None): """Make well formatted JSON for insertion into cascade word docs. JSON will be enclosed by '$ like: '${"key":"value"}$' JSON will be on one line (simple) if it contains only one key/value pair, or if the argument simple==true """ if simple is None: # Default to simple as long as the JSON contains only one item and # that items is not a dict. simple = False if len(data_dict) <= 1: for key in data_dict: if not isinstance(data_dict[key], dict): simple = True if simple: return '${}$'.format(json.dumps(data_dict, separators=(', ', ':'))) return '${}$'.format(json.dumps(data_dict, indent=4, separators=(',', ':'))).replace('${\n ', '${')
c7b8400995ed105f88de4bb6c3e22c5b17aedd4a
34,546
import collections def get_sorted_transitive_dependencies(top, deps_func): """Gets the list of all transitive dependencies in sorted order. There should be no cycles in the dependency graph (crashes if cycles exist). Args: top: A list of the top level nodes deps_func: A function that takes a node and returns a list of its direct dependencies. Returns: A list of all transitive dependencies of nodes in top, in order (a node will appear in the list at a higher index than all of its dependencies). """ # Find all deps depth-first, maintaining original order in the case of ties. deps_map = collections.OrderedDict() def discover(nodes): for node in nodes: if node in deps_map: continue deps = deps_func(node) discover(deps) deps_map[node] = deps discover(top) return list(deps_map.keys())
7e758410c785e7f1b6df0dbd2a3571a402b95641
34,547
def r1_p_r2(R1, R2): """ Calculate the Resistance of a parallel connection """ return R1 * R2 / (R1 + R2)
3c98e8a24020e76b008d151a2611fa85856b8417
34,549
def enum2str(enumType, enum): """ Translates a pokerth_pb2 enum type to a string. :param enumType: enum type class :param enum: the enum element of the type :return: identifier string of enum """ return [k for k, v in enumType.items() if v == enum][0]
46de0fcd78f2e8b450ede050679f9e776b5a0bf9
34,550
def get_data_from_context(context): """Get the django paginator data object from the given *context*. The context is a dict-like object. If the context key ``endless`` is not found, a *PaginationError* is raised. """ try: return context['endless'] except KeyError: raise Exception('Cannot find endless data in context.')
304fd11f75ec72f703e03e7a8431c613a3648f47
34,555
def window_width(g_core: float, t_in: float, t_out: float, g: float, t_r: float, g_r: float) -> float: """ is the calculated winding width if there x g_core - is the distance between the core and the inner winding in [mm] x t_in - is the thickness of the inner winding in [mm] x t_out - is the thickness of the outer winding in [mm] x g is - is the main gap in [mm] x t_r - is the width of the regulating winding in [mm] x g_r - is the distance between the outer winding and the regulating winding [mm] g is considered as a phase distance at the end of the windings """ return round(g_core + t_in + t_out + g + t_r + g_r + g, 1)
fd86eeb816c75b8e7d940d5321259c62abc0ec50
34,556
def build_key_name(app_name, os_name, file_name): """ Creates key using app name, os and filename :param app_name: app name :param os_name: OS the app is written for :param filename: the name of the file :return: S3 bucket key for given app/os/filename combination """ return (app_name.replace(" ", "").lower() + "/" + os_name.replace(" ", "").lower() + "/" + file_name)
845e6c2734ec105c6a9bbcd5032bc7569063c297
34,558
import re def name_in_string(string: str, name: str) -> bool: """Checks if string contains name. Args: string (str): input searchable string name (str): input name Examples: >>> assert name_in_string("Across the rivers", "chris") >>> assert not name_in_string("Next to a lake", "chris") >>> assert not name_in_string("A crew that boards the ship", "chris") >>> assert name_in_string("thomas", "Thomas") """ return bool(re.compile(f'(?i){".*".join(name)}').search(string))
168abc4ebfd078a2d9220bcea3c0efd2e0e79091
34,559
from typing import Dict from typing import List from typing import Set from typing import Tuple def _validate_reply( reply: Dict[str, List[str]], performatives_set: Set[str] ) -> Tuple[bool, str]: """ Evaluate whether the reply structure in a protocol specification is valid. :param reply: Reply structure of a dialogue. :param performatives_set: set of all performatives in the dialogue. :return: Boolean result, and associated message. """ performatives_set_2 = performatives_set.copy() for performative in reply.keys(): if performative not in performatives_set_2: return ( False, "Performative '{}' specified in \"reply\" is not defined in the protocol's speech-acts.".format( performative, ), ) performatives_set_2.remove(performative) if len(performatives_set_2) != 0: return ( False, "No reply is provided for the following performatives: {}".format( performatives_set_2, ), ) return True, "Reply structure is valid."
2243e60edd6497a7f699676be5fe765711db4134
34,563