content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def is_quote(code, idx=0): """Position in string is an unescaped quotation mark.""" return (0 <= idx < len(code) and code[idx] == '"' and (idx == 0 or code[idx-1] != '\\'))
054354ea0372df436d4c2c8a0c52ec98c2b16986
623,947
import click def option_output_file(required: bool = False): """Get parameter options for output file.""" return click.option( "--output-file", "--of", "output_file", metavar="string", required=required, help="name of file to write to", type=click.File("w"), )
346d25d615f3b2aab0e5af590ecae83b14762ad0
333,770
def format_POS(token, light, flat): """Helper: form the POS output for a token.""" subtree = dict([ ("word", token.text), ("lemma", token.lemma_), # trigger ("NE", token.ent_type_), # trigger ("POS_fine", token.tag_), ("POS_coarse", token.pos_), ("arc", token.dep_), ("modifiers", []) ]) if light: subtree.pop("lemma") subtree.pop("NE") if flat: subtree.pop("arc") subtree.pop("modifiers") return subtree
e28085669ec0188d45bb6cce1ae2b163490f4bcd
414,921
import ast def _build_st_write_call(nodes): """Build AST node for `__streamlit__._transparent_write(*nodes)`.""" return ast.Call( func=ast.Attribute( attr="_transparent_write", value=ast.Name(id="__streamlit__", ctx=ast.Load()), ctx=ast.Load(), ), args=nodes, keywords=[], kwargs=None, starargs=None, )
e6263af7378ed69019fbd3090ccc6c3ffca44b74
381,066
def _round_pow_2(value): """Round value to next power of 2 value - max 16""" if value > 8: return 16 if value > 4: return 8 if value > 2: return 4 return value
35458a3a894df8fc579d027f72a6328ba8b8061d
674,973
from typing import List import gc def get_child_ids(obj: object) -> List[int]: """ Return children of the provided object using gc.get_referents :param obj: The object :return: List of object ids """ return [id(child) for child in gc.get_referents(obj)]
09e205293ada98080a23196ad74d8e0c65fad1b6
81,383
def is_valid_esd_json(data: dict, is_train_document: bool = False) -> bool: """Test whether a dictionary (parsed JSON) adheres to the input data format as specified in the README file. Parameters ---------- data : dict The parsed JSON data (found by applying json.loads on the text contents of the input file). is_train_document : bool, optional Whether the document is a train or test document (default : False). Returns ------- bool True if the data is a valid input file. Raises ------ ValueError If the data is not a dictionary. ValueError If 'text' is not found in the data. ValueError if 'abstract' is not found in the train/test data. ValueError If 'entities' is not found in the train/test data. ValueError If 'entities' is not a list in the train/test data. ValueError If there exists an entity without "salience" key. ValueError If there exists an entity without "entity" key. """ if 'text' not in data: raise ValueError('The "text" field is not found in the data.') if is_train_document: if 'abstract' not in data: raise ValueError('The "abstract" field is not found in the train/test data.') if 'entities' not in data: raise ValueError('The "entities" field is not found in the train/test data.') if type(data['entities']) != list: raise ValueError('The "entities" field should be a list.') for entity in data['entities']: if 'salience' not in entity: raise ValueError('All entities should have a "salience" field.') if 'entity' not in entity: raise ValueError('All entities should have an "entity" field.') return True
7175c3b1b5512ef0ec51e2e4e569981f1578b38f
221,644
import re def identify_entity(identity): """ Detect the type of OpsGenie entity being processed: an id, username or name. identity: string : Expects to be a string containing an OpsGenie entity and it's type separated by a single '-' return: A dictionary with the form and type of the entity or None if the identity can't be processed. """ res = identity.rsplit("-", 1) if len(res) != 2: print("Skipping {} because it's not formed as '<identiy>-<type>'".format(identity)) return None re_email = re.compile(r"^[^@]+@[^@]+$") re_id = re.compile(r"^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}$") form, type_ = res # Detect email form: "trinity@opsgenie.com" if type_ == "user" and re.match(re_email, form): form_key = "username" # Detect id form: "4513b7ea-3b91-438f-b7e4-e3e54af9147c" elif re.match(re_id, form): form_key = "id" # Any other forms are just names. else: form_key = "name" return {form_key: str(form), "type": str(type_)}
0b6c9619ca2ff37fd3acbc4493ea2a9aa72b1980
338,275
def compute_files_to_download(client_hashes, server_hashes): """ Given a dictionary of file hashes from the client and the server, specify which files should be downloaded from the server :param client_hashes: a dictionary where the filenames are keys and the values are md5 hashes as strings :param server_hashes: a dictionary where the filenames are keys and the values are md5 hashes as strings :return: a list of 2 lists -> [to_dload, to_delete] to_dload- a list of filenames to get from the server to_delete- a list of filenames to delete from the folder Note: we will get a file from the server if a) it is not on the client or b) the md5 differs between the client and server Note: we will mark a file for deletion if it is not available on the server """ to_dload, to_delete = [], [] for filename in server_hashes: if filename not in client_hashes: to_dload.append(filename) continue if client_hashes[filename] != server_hashes[filename]: to_dload.append(filename) for filename in client_hashes: if filename not in server_hashes: to_delete.append(filename) return [to_dload, to_delete]
f67220d82852edbba751ca794457adfa347483be
328,443
def is_iri(string): """ Return True if the given string looks like an IRI, and False otherwise. Used for finding type IRIs in the schema. Right now only supports http(s) URLs because that's all we have in our schema. """ return string.startswith('http')
c503d36555ce46b6fcecb4d151f3ef7d43818a6d
621,030
def IsAnyScopeFlagSpecified(flag_values): """Returns True if any scope related flags are present, False otherwise.""" if 'zone' in flag_values and flag_values['zone'].present: return True if 'region' in flag_values and flag_values['region'].present: return True if 'global' in flag_values and flag_values['global'].present: return True return False
dbba18123ac65158a2050225eca096d715125efc
426,133
def get_count(self): """ Return count value with a default of 1 """ return self.get("count", 1)
643064b29fff0b65a39f2eefb4f35d7468db09ae
50,298
from typing import Union from typing import Iterable import torch from typing import Dict def get_params_to_average(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]): """ Returns a list of parameters that need to average, which filters out the parameters that do not contain any gradients. Args: params: The parameters of a model or parameter groups of an optimizer. """ filtered_params = [] for param in params: if isinstance(param, torch.nn.Parameter): # model.parameters() input param_data = param if param_data.grad is not None: filtered_params.append(param_data) elif isinstance(param, dict): # optimizer.param_groups input for param_data in param["params"]: if param_data.grad is not None: filtered_params.append(param_data) else: raise NotImplementedError(f"Parameter input of type {type(param)} is not supported") return filtered_params
e467ab0ec789868d09da8c13f18d3d7430891549
321,937
def element_junction_tuples(junction_elements=True, branch_elements=True, res_elements=False): """ Utility function Provides the tuples of elements and corresponding columns for junctions they are connected to :param junction_elements: whether tuples for junction elements e.g. sink, source, ... are included :param branch_elements: whether branch elements e.g. pipe, pumps, ... are included :return: set of tuples with element names and column names """ ejts = set() if junction_elements: elements = ["sink", "source", "ext_grid"] for elm in elements: ejts.update([(elm, "junction")]) if branch_elements: elements = ["pipe", "valve", "pump", "circ_pump_mass", "circ_pump_pressure", "heat_exchanger"] for elm in elements: ejts.update([(elm, "from_junction"), (elm, "to_junction")]) if res_elements: elements_without_res = ["valve"] ejts.update( [("res_" + ejt[0], ejt[1]) for ejt in ejts if ejt[0] not in elements_without_res]) return ejts
2f7927ecc04d9df24581ec10fdfa41a27d08776f
327,761
def depth_name(depth_index): """ Returns the name of the depth map for index: 0-48 """ if depth_index < 10: return 'depth_map_000{}.pfm'.format(depth_index) else: return 'depth_map_00{}.pfm'.format(depth_index)
8602c03ecf6e505437f550f60e3135c76e8d92e6
388,882
def line_range(lines, ind1, comment_flag='#'): """ Find a range of data lines within a line list. Given an input line list and a starting index, subsequent lines are examined to see where the next comment line is. Comment lines are assumed to start with the # character by default, or one can set this with the comment_flag variable in the call. Lines that are not comments are assumed to be data lines. The index of the next comment line is returned, or the index that gives a range to the end of the line list where there is no such comment line after the index specified. Parameters ---------- lines : A list of input lines (assumed to be from the readlines() function) ind1 : A starting index in the list of lines comment_flag: An optional string variable that marks comment lines Returns ------- n1 : an integer value for the next comment line (assumed to start with '#') in the list of input lines, or the index for the length of the line list if no other comment line is found """ ncomment = len(comment_flag) for n1 in range(ind1+1, len(lines)): if comment_flag in lines[n1][0:ncomment]: return n1 return len(lines)
6e845f3d44c4093e8e403eaf41317cf14b0299c4
695,543
def parse_bool(obj): """Return true if the object represents a truth value, false otherwise. For bool and numeric objects, uses Python's built-in bool function. For str objects, checks string against a list of possible truth values. Args: obj: object to determine boolean value of; expected Returns: Boolean value according to 5.1 of Python docs if object is not a str object. For str objects, return True if str is in TRUTH_VALUE_SET and False otherwise. http://docs.python.org/library/stdtypes.html """ if type(obj) is str: TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"] return obj.lower() in TRUTH_VALUE_SET else: return bool(obj)
f8115f725871eed1d6cfd6020bd40fb9bdbcd11e
361,721
def generate_state(td_count: int, node_count: int, records_count: int, ttl: int): """Utility method to generate a state dict""" return { f"td{i}.example.com": { "name": f"td{i}.example.com", "nodes": [f"node{n}" for n in range(node_count)], "records": [ {"hostname": f"rec{r}", "weight": 100} for r in range(records_count) ], "ttl": ttl, } for i in range(td_count) }
ee4acb68f282926c301000e1f14f791a5b7eb8a9
268,512
def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" arquivo = open(filename, "r") conteudo = arquivo.read() palavras = conteudo.lower().split() arquivo.close() _dict = {} anterior = '' for palavra in palavras: posteriores = _dict.get(anterior, []) posteriores.append(palavra) _dict[anterior] = posteriores anterior = palavra return _dict
de7225dd2bbdfae887a7d5477bd4403ba8dd5f5f
235,068
def unpack_vlq(data): """Return the first VLQ number and byte offset from a list of bytes.""" offset = 0 value = 0 while True: tmp = data[offset] value = (value << 7) | (tmp & 0x7f) offset += 1 if tmp & 0x80 == 0: break return value, offset
b459327c50d806e5c4bd251fa3f849a8680b7f27
109,857
def remove_duplicates(original_list): """ removes duplicate items from original_list, keeping first instance of duplicate :param original_list: list that contains duplicate items :return: list without duplicate items, keeping the first instance of the duplicated item """ final_list = [] for item in original_list: if item not in final_list: final_list.append(item) return final_list
08726db4b84f44a93e7a948247bbaeec30a3fc20
159,910
def count_valid_passports(passports, validation_method): """ Count valid passports :param passports: List of passports :param validation_method: validation method :return: int """ counter = 0 for passport in passports: if validation_method(passport): counter += 1 return counter
e9559cdb19b445964b5cea9212eb554fc124e740
207,945
import re def trim_stopwords(s, stop_words): """Case-insensitive removal of stop phrases/words from a string >>> trim_stopwords('Depártment de Testing Test royale', ['depártment de', 'royale']) 'Testing Test' """ for stop in stop_words: if ' ' in stop: # phrase s = re.sub(stop, '', s, flags=re.IGNORECASE) else: # individual word s = s.split() for i, w in enumerate(s): if w.lower() == stop: s.pop(i) s = ' '.join(s) return s.strip()
21b161ace4dd0ea288719856c03156fc8b08ec3a
46,592
import re def normalize_keys(dict_, lowercase=True, separator='_'): """ Recoursively changes keys to their normalized version: - replaces any special symbol by `separator` - lowercases (if necessary). Example: In [1]: input_ = {"Content-Type": "text/html", ...: "Last-Modified": { ...: "Day-Of-Week": "Sat", ...: "Day": 4, ...: "Month": "Apr" ...: } ...: } Out[1]: {'content_type': 'text/html', 'last_modified': {'day_of_week': 'Sat', 'day': 4, 'month': 'Apr'}} """ normalized = {} for key, val in dict_.items(): new_key = re.sub('[^A-Za-z0-9]+', separator, key) new_key = new_key.lower() if lowercase else new_key if isinstance(val, dict): val = normalize_keys(val, lowercase, separator) normalized[new_key] = val return normalized
a15a3f4ccfe860af2ca7e758b293b297f8b0c3b3
687,036
import yaml def load_conf(conf_path: str) -> dict: """ Loads the configuration from provided YAML file. :param conf_path: path to the configuration file :return: configuration loaded as dict """ with open(conf_path, 'rt') as in_fd: conf = yaml.load(in_fd, Loader=yaml.FullLoader) return conf
d9c72069ccd3f40b71f661acdd0db0a6eb9b8675
118,394
def total_duration_parser(line): """ Parses lines of the following form: Total duration: 5248.89s :param line: string :return: float containing total duration in seconds """ try: return float(line.rstrip('s')[len('Total duration:'):].strip()) except: return 0.
30cc8da46293654b8d4001dbd708160ba208bacb
30,367
def _dynamic_inputs_creation(dynamic_io_settings): """Creates a list of inputs names, supplied to the Task class.""" parameters = dynamic_io_settings["TaskSettings"]["Parameters"] if parameters["ModelType"] == "LDA": return [f"TF({parameters['DataSource']})"] elif parameters["ModelType"] == "NMF": return [f"TFIDF({parameters['DataSource']})"]
1a1dfb8e71a3ce00015cdff6253350f7bdeeb482
433,894
def get_f_min(f_max, cents_per_value, v_min, v_max): """ This function takes in a y value max and min, a maximum frequency and a y scale parameter in units of cents/y value, and returns the minimum frequency that fits to such a scale. Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)). Parameters ---------- f_max : float Maximum frequency. cents_per_value : float A y scale parameter in units of cents/y value. v_min : float Minimum y value. v_max : float Maximum y value. Returns ------- float Minimum frequency. """ f_min = f_max / (2 ** ((v_max - v_min) * cents_per_value / 1200)) return f_min
c2e92d0f2aa63f8553d85d9fb0bdcf79156deff1
663,533
import math def fractionalPart(floater): """Returns the Fractional part (_fractionalPart(1.5) == .5) of a number""" return(math.modf(float(floater))[0]) # float conversion is there just in case I get an int or something
85cd53af31f33052b612a3d6196bcd9f6deb58a8
85,514
import hashlib def compute_md5_hash(file, buf_size=65536): """Utility method to generate a md5 hash of file.""" md5_hash = hashlib.md5() while True: data = file.read(buf_size) if not data: break md5_hash.update(data) return md5_hash.hexdigest()
fe01ee97a7b85a7365c9f9a9540d0e304cdaf24a
189,955
import re def fmt_cmd_template(cmd): """ Format the cmd template `cmd` so that in can be used e.g. for a os.system call """ return re.sub("\s+", " ", cmd)
4020f9ae36e5714c1c41a5d608fb719e3f125b06
112,992
def Not(query): """The negation of a query""" return '(NOT %s)' % (query,)
9bddebbd3c3c8e4c2e8e74b6c87049f0e4706dcb
635,044
import re def template_to_regex(template): """Convert a string template to a parsable regular expression. Given a data_path_format string template, parse the template into a parsable regular expression string for extracting each %VAR% variable. Supported %VAR% variables: * %EXPERIMENT_ID% - experiment ID (str) * %INSTRUMENT_SAT% - instrument/satellite ID (str) * %DATA_TYPE% - data type (str) * %YEAR% - full year (int, 4 digits) * %YEAR4% - full year (int, 4 digits) * %YEAR2% - last two digits of year (int, 2 digits) * %MONTH% - integer month (int, 2 digits) * %MONTH2% - integer month (int, 2 digits) * %DAY% - integer day (int, 2 digits) * %DAY2% - integer day (int, 2 digits) * %HOUR% - integer hour (int, 2 digits) * %HOUR2% - integer hour (int, 2 digits) Args: template (str): A string with the data_path_format template to be converted to a parsable regular expression. Returns: tuple: Tuple with the first element being a parsable regular expression string, and the second element being a list of the detected %VAR%s, in order found (from left to right). """ # Initialize the final template regex string template_final = "" # Initialize the final matching group list matching_groups = [] # Define the variables to replace, with their corresponding regex # capturing patterns. regex_replace_dict = { "%EXPERIMENT_ID%" : r'(.*)', "%INSTRUMENT_SAT%" : r'(.*)', "%DATA_TYPE%" : r'(.*)', "%YEAR%" : r'(\d{4})', "%YEAR4%" : r'(\d{4})', "%YEAR2%" : r'(\d{2})', "%MONTH%" : r'(\d{2})', "%MONTH2%" : r'(\d{2})', "%DAY%" : r'(\d{2})', "%DAY2%" : r'(\d{2})', "%HOUR%" : r'(\d{2})', "%HOUR2%" : r'(\d{2})', } # Search for %VAR% variables with a %VAR% matching pattern matches = re.findall(r'(.*?)(%.*?%)(.*?)', template) # Loop through each match! for match in matches: # Grab the %VAR% part in the match. # (match returns a tuple with 3 elements - the misc string on # the left side, the %VAR% part in the middle, and the misc # string on the right side) template_part = match[1] # Check to see if this %VAR% is in our replacement table! if template_part in regex_replace_dict.keys(): # Add it to the matching group list for future indexing # reference! matching_groups.append(template_part) # Then make the variable to regex replacement. template_part = template_part.replace(template_part, regex_replace_dict[template_part]) # Finally, assemble the string back together. template_final += re.escape(match[0]) + template_part + re.escape(match[2]) # Return the regex template and the list of matching groups! return (template_final, matching_groups)
b24378fab8eb80568b5dcdaaaba80d602c28bcab
668,631
import csv def get_usernames_from_csv(filename): """Return a list of usernames""" with open(filename, 'r') as csvfile: csvreader = csv.reader(row for row in csvfile if not row.startswith('#')) return [row[0] for row in csvreader]
f51ea5d0c5ae50c1edc31c00438ab26cc7cc6224
123,881
def _add_reciprocal_relations(triples_df): """Add reciprocal relations to the triples Parameters ---------- triples_df : Dataframe Dataframe of triples Returns ------- triples_df : Dataframe Dataframe of triples and their reciprocals """ # create a copy of the original triples to add reciprocal relations df_reciprocal = triples_df.copy() # swap subjects and objects cols = list(df_reciprocal.columns) cols[0], cols[2] = cols[2], cols[0] df_reciprocal.columns = cols # add reciprocal relations df_reciprocal.iloc[:, 1] = df_reciprocal.iloc[:, 1] + "_reciprocal" # append to original triples triples_df = triples_df.append(df_reciprocal) return triples_df
8ca96fc2162d80041c21db8e6b81718781784ffe
29,307
def get_key(dict, value): """ Return the first key in the dictionary "dict" that contains the received value "value". Parameters ========== dict: Dict[Any, Any] Dictionary to be used. value: Any Value to be found in the dictionary. """ return list(dict.keys())[list(dict.values()).index(value)]
fd0c3f2d0421941e2320cbc2b83593ab64dab23f
267,420
def normalize_color(color): """Gets a 3-tuple of RGB ints and return a 3-tuple of unity floats""" return (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
c242f7467c9125a7cb189ecf2b0a690352d39525
318,959
def index_containing_substring(vars_list, var): """ Return the index of the first string in vars_list that contains the substring var :param vars_list: list of string. :param var: string. :return: integer. """ for i, s in enumerate(vars_list): if var in s: return i return -1
805832afba21e52a4eb22304e09b77071ed21b03
228,904
import json def name2dict(name) -> dict: """ Converts JSON format string to dictionary. Args: name:: str A JSON format string. For example, "'penPair':'z1r'". Examples: >>> from fwig.tools import attributetools as at >>> name = "'penPair':'z1r','serif':'1'" >>> at.name2dict(name) {'penPair': 'z1r', 'serif': '1'} """ if name is None: return {} name = '{' + name.replace("'", '"') + '}' name_dict = json.loads(name) return name_dict
85895a49062f4bd458be8721ba9d69c2327f5ba1
337,395
def last_power_2(n: int) -> int: """Return the largest power of 2 less than or equal to x""" return 1 << (n.bit_length() - 1)
b89602d9568388648a72595d58e485dccbd59d20
508,902
def char2cid(char, char2id_dict, OOV="<oov>"): """ Transform single character to character index. :param char: a character :param char2id_dict: a dict map characters to indexes :param OOV: a token that represents Out-of-Vocabulary characters :return: int index of the character """ if char in char2id_dict: return char2id_dict[char] return char2id_dict[OOV]
4a872cb12f11ed8ba2f3369749a3a2f356b7b97e
696,081
def _escapeWildCard(klassContent): """ >>> _escapeWildCard('') '' >>> _escapeWildCard(':*') '' >>> _escapeWildCard(':Object') ':Object' """ return klassContent.replace(':*', '')
537b3969dabb46c3a093dacc3ba49a58833f8c18
701,336
def time_per_line( desired_wpm, words_per_line ): """ Args: desired_wpm (int): the target words-per-minute value you wish to achieve words_per_line (int): how many words per line your test book contains on average Returns: seconds """ # words_per_line * 1 min / 3 * desired_wpm minute_fraction = words_per_line/(3 * desired_wpm) return minute_fraction * 60
7a9eeb90f6b684bf9c83ad24c119cd8e2f4fd202
330,568
def sum_of_multiples(below, multiples): """Returns sum of multiples below given maximum.""" sums = set() for number in multiples: for i in range(number, below, number): sums.add(i) return sum(sums)
8f9a16c406c9c7fdc352a9b462ca3acfc6b76b1e
582,464
def _get_header_info(line): """ Get number of sequences and length of sequence """ header_parts = line.split() num_seqs, length = list(map(int, header_parts[:2])) is_interleaved = len(header_parts) > 2 return num_seqs, length, is_interleaved
be7fc522fb8d195af6e45c93e42867aecbd23fb6
20,026
def flatten(sequence): """Given a sequence possibly containing nested lists or tuples, flatten the sequence to a single non-nested list of primitives. >>> flatten((('META.INSTRUMENT.DETECTOR', 'META.SUBARRAY.NAME'), ('META.OBSERVATION.DATE', 'META.OBSERVATION.TIME'))) ['META.INSTRUMENT.DETECTOR', 'META.SUBARRAY.NAME', 'META.OBSERVATION.DATE', 'META.OBSERVATION.TIME'] """ flattened = [] for elem in sequence: if isinstance(elem, (list, tuple)): elem = flatten(elem) else: elem = [elem] flattened.extend(elem) return flattened
6ca3fe470757dc4081c4387d917d5e285c2a3f06
12,560
import re def preprocess(sent): """ substitute multiple spaces with one and remove non latin-1 symbols :param sent: string to process :return: the string without multiple spaces and latin-1 as encoding """ whitespaces = re.compile(r"\s+") sent = whitespaces.sub(" ", sent) sent = sent.replace("\xad", "") return sent.encode('latin-1', 'ignore').decode('latin-1')
121f2e48c2d1bbf5613c360bb94814fa9169479e
531,447
import hashlib def get_digest(value): """Return a hashlib digest algorithm from a string.""" if isinstance(value, str): value = value.lower() if value not in ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'): raise ValueError("Invalid digest algorithm: %s" % value) value = getattr(hashlib, value) return value
d513a81b62a81a2c479f713bb3fd97595268132b
564,425
def reverce_8bit(data_8bit: int) -> int: """8bit を反転させる Parameters ---------- data_8bit : int 8bit数値 Returns ------- int ビット反転後の数値 """ data_8bit = ((data_8bit & 0b01010101) << 1) | ((data_8bit & 0b10101010) >> 1) data_8bit = ((data_8bit & 0b00110011) << 2) | ((data_8bit & 0b11001100) >> 2) return (data_8bit & 0b00001111) << 4 | data_8bit >> 4
3dcdf58f771848284c58897ea7e2f4727d518e61
171,791
def solution(n: int = 1000) -> int: """ Returns ∑ r_max for 3 <= a <= n as explained above >>> solution(10) 300 >>> solution(100) 330750 >>> solution(1000) 333082500 """ return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1))
c6c6e990bf5aaac8dc809aa82429a805e7241f3c
617,251
from typing import Iterable from typing import Hashable from typing import Counter def major_vote(all_votes: Iterable[Iterable[Hashable]]) -> Iterable[Hashable]: """ For the given iterable of object iterations, return an iterable of the most common object at each position of the inner iterations. E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects at the first and second positions respectively. :param all_votes: an iterable of object iterations :return: the most common objects in the iterations (the major vote) """ return [Counter(votes).most_common()[0][0] for votes in zip(*all_votes)]
5a4705f1c6fc401ac1629239c2929cdc0b67e28d
674,876
def get_international(start_country, end_country): """checking if the trip is international or not (from Canada to USA and vice versa)""" return start_country != end_country
eafefad5d8a0558f13a86e682ca68dcce5c47b40
522,284
def Bps_to_mbps(speed): """ Utility function to convert Bytes per second to Megabits per second. """ return (speed / 1000 / 1000) * 8
ca4126135e3d22f5667da5068d96df620b0418dc
373,840
def prompt_vialnumber(first_vial, last_vial): """Prompt user for a vial number and check it against possible vial range""" n = input('Vial? ') try: n = int(n) if n > last_vial or n < first_vial: raise(ValueError('Number out of range')) except ValueError: print('Please enter an integer between %g and %g' % (first_vial, last_vial)) return prompt_vialnumber(first_vial, last_vial) return n
c2198d6a3f17928faf922419af75598533863962
148,948
def forecastTimes(hrStr, d): """For wind forecast, create a string showing which forecase this is and valid times. Args: hrStr (str): String with hour of forecast (one of ``06``, ``12``, ``24``) d (dict): Wind message. Returns: str: String with winds times. Example ``06 11/02-11/09``. """ dtFrom = d['for_use_from_time'] dtTo = d['for_use_to_time'] timeStr = '{} {:02d}/{:02d}-{:02d}/{:02d}'.format(hrStr, \ dtFrom.day, dtFrom.hour, dtTo.day, dtTo.hour) return timeStr
5635e5463af0f7fa3ea7b42d0892a8f26ea4a56d
417,280
def get_machines_by_vnet_interface_name(config, ifname): """ Returns a list of machine that use a particular VNet interface :param dict config: The config generated by get_config() :param str ifname: The interface to check for :return: list of VNet machines using that interface """ machines = [] for m_name, m_data in config["machines"].items(): for int_data in m_data["interfaces"].values(): if int(int_data["bridge"]) == int(ifname[-1]): machines.append(m_name) return machines
9f26b01243664f0af596db2eaf0d067d481076e6
82,788
def is_subset(l1, l2): """Checks whether list l1 is a subset of list l2""" for x in l1: if x not in l2: return False return True
8189d161947b95097887e5df234dd1c71b3b71a3
592,902
import six def downgrader(version): """ A decorator for marking a method as a downgrader to an older version of a given object. Note that downgrader methods are implicitly class methods. Also note that downgraders take a single argument--a dictionary of attributes--and must return a dictionary. Downgraders may modify the argument in place, if desired. :param version: The version number the downgrader returns the attributes for. Must be provided. :returns: A decorator. """ def decorator(func): # Save the version to downgrade to func.__vers_downgrader__ = version return func # Sanity-check the version number if not isinstance(version, six.integer_types) or version < 1: raise TypeError("Invalid downgrader version number %r" % version) return decorator
e136829783f2e002ee741fed7acf210d6235c535
655,984
import random def Ui(lo, hi): """Uniformly distributed integer, inclusive limits.""" return random.randint(lo, hi)
f2cd9020ff8297367c387cd14b27cf50e75bb59f
655,555
import base64 def b64str_to_img_stream(src, urlsafe=False): """ Decode a Base64 string of image to the image file's byte stream. :param src: The Base64 string you want to decode. :param urlsafe: Trigger using URL-Safe format. Must be consistent with what you generate with. :return: Decoded byte stream of image. """ stream_base64 = src.encode('utf-8') if urlsafe: stream = base64.urlsafe_b64decode(stream_base64) else: stream = base64.standard_b64decode(stream_base64) return stream
5c1de8227c66c68701ddd4bffd31f87941f7f9a9
403,000
def _MasterToBotsToDeprecatedDict(suites): """Makes a dictionary listing masters, bots and deprecated for tests. Args: suites: A collection of test suite Test entities. All of the keys in this set should have the same test suite name. Returns: A dictionary mapping master names to bot names to deprecated. """ def MasterName(key): return key.pairs()[0][1] def BotName(key): return key.pairs()[1][1] result = {} for master in {MasterName(s.key) for s in suites}: bot = {} for suite in suites: if MasterName(suite.key) == master: bot[BotName(suite.key)] = suite.deprecated result[master] = bot return result
3ce07ed17ab826a9a08ac0d43e9440002a6cb25e
145,184
def get_messages(soup): """Parses the messages from the Soup object.""" raw_messages = soup.find_all('p') # There are two <p> tags for every message, so just get rid of the # odd-numbered ones raw_messages = raw_messages[1::2] return [message.text for message in raw_messages]
1918c4d1116db06248f26690f322fde7c3a0a385
168,718
import torch def n_step_returns(q_values, rewards, kls, discount=0.99): """ Calculates all n-step returns. Args: q_values (torch.Tensor): the Q-value estimates at each time step [time_steps+1, batch_size, 1] rewards (torch.Tensor): the rewards at each time step [time_steps, batch_size, 1] kls (torch.Tensor): the scaled kl divergences at each time step [time_steps, batch_size, 1] discount (float): the temporal discount factor """ discounts = torch.cat([(discount*torch.ones_like(q_values[:1]))**i for i in range(rewards.shape[0])], 0) rewards[1:] = rewards[1:] - kls[:-1] discounted_returns = torch.cumsum(discounts * rewards, dim=0) terminal_values = discount * discounts * (q_values[1:] - kls) # return torch.cat([q_values[:1], discounted_returns], dim=0) return torch.cat([q_values[:1], discounted_returns + terminal_values], dim=0)
3bbd6026046328dc8ef63ab3e871f6c47636cb80
5,010
import json def format(obj): # pylint: disable=W0622 """Output object as json.""" return json.dumps(obj)
2a41cd3aebf9ad329298de388d38c2290fa1fc2b
207,229
def get_pb_id_from_deploy_id(deploy_id: str) -> str: """Get processing block ID from deployment ID. This assumes that all deployments associated with a processing block of ID `[type]-[date]-[number]` have a deployment ID of the form `[type]-[date]-[number]-*`. """ return '-'.join(deploy_id.split('-')[0:3])
0ae109a57dd28260206c8d81a5545e1b1803d278
383,942
def load_voxel_params(param): """ Based on the lidar range and resolution of voxel, calcuate the anchor box and target resolution. Parameters ---------- param : dict Original loaded parameter dictionary. Returns ------- param : dict Modified parameter dictionary with new attribute `anchor_args[W][H][L]` """ anchor_args = param['postprocess']['anchor_args'] cav_lidar_range = anchor_args['cav_lidar_range'] voxel_size = param['preprocess']['args']['voxel_size'] vw = voxel_size[0] vh = voxel_size[1] vd = voxel_size[2] anchor_args['vw'] = vw anchor_args['vh'] = vh anchor_args['vd'] = vd anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw) anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh) anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd) param['postprocess'].update({'anchor_args': anchor_args}) # sometimes we just want to visualize the data without implementing model if 'model' in param: param['model']['args']['W'] = anchor_args['W'] param['model']['args']['H'] = anchor_args['H'] param['model']['args']['D'] = anchor_args['D'] return param
6e364e23ecc450ed6d12879ba50217897adcfc4d
106,110
def indent(txt, indent_level): """ Indent a piece of text >>> indent('foo', 2) ' foo' """ indent = " " * indent_level return "\n".join(indent + x for x in txt.splitlines())
b0b48e304cfafd65c8d8f7be6b056cf755951cdf
632,369
def bin_to_dec(l): """Converts a list "l" of 1s and 0s into a decimal""" return int(''.join(map(str, l)), 2)
8876bfd35bc2cf14026455b833a1244b6bb424b6
398,284
def str2hexstr(md5sum): """Return the hex representation of a string.""" return "".join([ "%02x" % ord(c) for c in md5sum ])
e2c01d5cb05775c846d223ca90c5a78797907f60
295,514
def sans_virgule(x): """ Retourne la partie du nombre x sans sa partie décimale. Ex : -2.5 devient -2 Arguments: x (float): Un nombre decimal. """ return int(x)
d095d336d11c9884a4149633fcd1c82c5dfa4a93
215,753
def conversion(amount, rates, output_currency=None): """ Converts amount from input currency to the output currency. If output currency is not defined, conversion is made for all supported currencies. Parameters ---------- amount : float Amount of money to be converted. rates : dict Conversion rates. output_currency : str, optional Currency for which conversion is made. Returns ------- dict Converted rates. """ result = { k: round(v * amount, 2) for k, v in rates.items() } # it is necessary to have float(amount)? if output_currency: result = {output_currency: result[output_currency]} return result
ad9f506d3597b22d54e612fe0ee572ee019732ef
82,835
def merge_short_sentences(lst: list) -> list: """Merge subsequent sentences that consist of only a single word.""" last_is_short = False out_lst = [] for s in lst: if s.find(' ') == -1: if last_is_short: out_lst[-1] = out_lst[-1] + s else: last_is_short = True out_lst.append(s) else: last_is_short = False out_lst.append(s) return out_lst
9b0f77c009c250c58e732c93fc04532c8cdfb5b0
154,175
from typing import List def filter_prefix(str_list: List[str], prefix: str) -> list: """ Filter the list for strings with the given prefix. :param str_list: list of strings to filter :param prefix: prefix to filter on :return: list of filtered strings """ return [string for string in str_list if string.startswith(prefix)]
b22ad728bbdfd58a7b4f52117dcb3bb24fde0ead
257,615
def components(issue): """Get the component names of an issue""" if hasattr(issue.fields, 'components'): return [component.name for component in issue.fields.components] return []
6d3daf700c63d344a69348f3e60dc1e85b059995
216,817
import uuid def random_name(prefix: str = "") -> str: """Generate random name (with given prefix)""" return '{p}{r}'.format(p=prefix, r=uuid.uuid4().hex[:8])
b4d2d8f271069f973357a5e38f1a31b6c40fe4ea
326,533
def tab_fibonacci(n): """ A fibonacci function that uses tabulation. Start with the smallest problems and use returned values to calculate larger values. Bottom up approach. The complexity is O(n). """ n = int(n) if n == 0 or n == 1: return n x = 0 y = 1 for each in range(n)[2:]: subSum = x + y x = y y = subSum return x + y
1a0ee4870d326a6c3e370913da48f853efaac4cb
400,818
import requests def is_pkg_available(pkg_name: str, channel: str = "conda-forge") -> bool: """Verify if the package is available on Anaconda for a specific channel. :param pkg_name: Package name :param channel: Anaconda channel :return: Return True if the package is present on the given channel """ response = requests.get( url=f"https://anaconda.org/{channel}/{pkg_name}/files", allow_redirects=False ) return response.status_code == 200
aa69523b2e73df5fd84d9c6d77f79166e76cdb0a
255,649
import torch import random def one_hot_tensor(n: int) -> torch.Tensor: """ Sample a one hot vector of length n, return as a torch Tensor. """ one_hot = torch.zeros(n) k = random.randrange(n) one_hot[k] = 1.0 return one_hot
d94b3d39237b2227eea4f9310d994721716eb6d2
106,343
def get_factors_(number, printed=False): """Get the factors of a number.""" factors = [] for x in range(1, number+1): # For each number from 1 to the given number. if number % x == 0: # If number divided by x has a remainder of 0. factors.append(x) # Then it is a factor of the given number. if printed: print(factors) else: return factors
b7bb2cdc8f667fa31f8bf3555bc1cc985d984532
488,984
def Sqrt(x): """Square root function.""" return x ** 0.5
e726dfad946077826bcc19f44cd6a682c3b6410c
13,774
def get_user_identity(event: dict) -> str: """ Get user identity from CloudTrail event. """ if 'arn' not in event['userIdentity'].keys(): return "Unknown" return event['userIdentity']['arn'].split(':')[-1]
b9418ddb4616b5062bf0dd2c3df75906e478e55b
587,810
import pkg_resources def riptide_assets_dir() -> str: """ Path to the assets directory of riptide_lib. """ return pkg_resources.resource_filename('riptide', 'assets')
6c82e359d46bb6a82cbf01559574c07629acc23b
41,040
def get_data_files(data_dir): """ Retrieves a list from data_files to train/test with from a txt file. Args: data_dir (string): the path to the txt file """ data_files = [x.strip() for x in open(data_dir).readlines()] return data_files
58e0096f94c367b59ea2c4bf9b2f7bd2b0f53c1a
287,574
def get_base(x): """Returns b | b ** i == x[i], or None""" x = tuple(x) if len(x) >= 2 and x[0] == 1: base = x[1] for i, xi in enumerate(x): if base ** i != xi: return return base
52e822acc22eb368dfba730b9b7f1923174caf9f
354,734
def not_null(value): """A validation function that checks that a value isn't None.""" return True if value is not None else False
fddea3681dc6f4d1ae1c393464223d5ace4b1bab
413,432
def fix_logger_name(logger, method_name, event_dict): """ Captured stdlib logging messages have logger=feedhq.logging and logger_name=original_logger_name. Overwrite logger with correct name. """ if 'logger_name' in event_dict: event_dict['logger'] = event_dict.pop('logger_name') return event_dict
e950da34b68c51d2b5c1349c04226e45b3b52523
159,476
def softmax_backward(Y, softmax_out): """ Y: labels of training data. shape: (vocab_size, m) softmax_out: output out of softmax. shape: (vocab_size, m) """ dL_dZ = softmax_out - Y assert (dL_dZ.shape == softmax_out.shape) return dL_dZ
cbae57b45ef6ea6b8cf25aa9919b03aa5aef048d
390,392
def common_errors_remark(common_errors): """Generate remark for common errors and issues detection.""" if common_errors["display_results"]: if common_errors["failed"] != 0: return "<li>fix common errors</li>" else: return "" else: return "<li>setup common errors detection tool</li>"
be4103f83cc05e85772e565630a088b17edec1ec
603,390
def mapPoint(number, start1, stop1, start2, stop2): """ This method maps the number number between start2 and stop2 with the same ratio it had between start1 and start2. @:param number: is the mapped number @:param start1: is the lowest value of the range in which number is @:param stop1: is the highest value of the range in which number is @:param start2: is the lowest value of the range in which number is going to be @:param stop2: is the highest value of the range in which number is going to be @:return the calculated number """ return ((number - start1) / (stop1 - start1)) * (stop2 - start2) + start2
f37324113a391ed9574cda57c413d58b8b01b53d
298,457
def banner() -> None: """ Return an ascii art """ with open("src/interface/art.txt", "r") as file: return print(f"[red]{file.read()}[/]")
5e3e0dab46e7e6e33fad90a516490151c80f3719
33,399
def solution_b(puzzle, stop=2020): """ This next solution is even faster, about 25%, thanks to Gravitar64. https://github.com/Gravitar64/Advent-of-Code-2020 Counting the turns from len(puzzle) instead of len(puzzle) + 1 makes everything so easy and nice! """ spoken = {last: turn for turn, last in enumerate(puzzle, 1)} last = puzzle[-1] for turn in range(len(puzzle), stop): recent = spoken.get(last, turn) spoken[last] = turn last = turn - recent return last
1bb5058840d67aec9d0fe06eed06f4b25596b662
352,937
def parse_splits(chunk_lines): """ Parse splits from a chunk_lines (as list) and return a list of splits. :param chunk_lines: list Note: ----- Assume split infos have 3 lines where a line which starts with 'S' begin split infos and the next 2 lines begin with '$' and 'E' in that order. """ split = {} splits = [] for index, line in enumerate(chunk_lines): prefix = line[0] data = line[1:] if prefix == 'S': split["category"] = data elif prefix == 'E': split["memo"] = data elif prefix == '$': split["amount"] = data splits.append(split) split = {} else: continue return splits
25440e1c0ccb284934f08a0aa5f3c37221c6eb4f
492,407
from typing import List from typing import Dict def get_bad_images(images: List[Dict], tag_whitelist: List[str]) -> List[Dict]: """Filter a list of images for non-whitelisted tags or no tags.""" bad_images = [] for image in images: image_tag = image.get("imageTag", "") image_digest = image.get("imageDigest", "") if not image_tag and not image_digest: print("Found entirely blank image") elif not image_tag: bad_images.append({"imageDigest": image_digest}) elif image_tag not in tag_whitelist: bad_images.append({"imageTag": image_tag}) return bad_images
33925df6a1e093f4756a35d25dde4b5948fda566
471,299
import calendar def month_bound_from_date(d): """Get first and last date in date's month.""" return d.replace(day=1), d.replace(day=calendar.monthrange(d.year, d.month)[1])
71524f9b274d89bf81dfacb848291e0e45b460a6
433,275
def compute_n_steps(control_timestep, physics_timestep, tolerance=1e-8): """Returns the number of physics timesteps in a single control timestep. Args: control_timestep: Control time-step, should be an integer multiple of the physics timestep. physics_timestep: The time-step of the physics simulation. tolerance: Optional tolerance value for checking if `physics_timestep` divides `control_timestep`. Returns: The number of physics timesteps in a single control timestep. Raises: ValueError: If `control_timestep` is smaller than `physics_timestep` or if `control_timestep` is not an integer multiple of `physics_timestep`. """ if control_timestep < physics_timestep: raise ValueError( 'Control timestep ({}) cannot be smaller than physics timestep ({}).'. format(control_timestep, physics_timestep)) if abs((control_timestep / physics_timestep - round( control_timestep / physics_timestep))) > tolerance: raise ValueError( 'Control timestep ({}) must be an integer multiple of physics timestep ' '({})'.format(control_timestep, physics_timestep)) return int(round(control_timestep / physics_timestep))
56fe329a7dc546262f25f388fdb28da75428e638
295,075
def AppendSolution(df, func, **kwargs): """ Appends a solution to the dataframe Parameters ---------- df: pd.DataFrame A dataframe containing the extracted properties for the edges/nodes. func: function A function that takes a dataframe and returns a solution. Returns ------- df: pd.DataFrame A dataframe containing the extracted properties for the edges/nodes with a solution. """ # Get solution df.loc[:, "solution"] = df.apply(lambda x: func(x), axis=1, **kwargs) return df
2138d27b8157cd031232a135f11f25653db66db0
316,872
def complete_cell(self): """ Return a cell where atoms have been translated to complete all molecules of the cell Returns ------- out_cell : Mol object The new untruncated cell full_mol_l : list of Mol objects Each molecule in the untruncated cell """ full_mol_l = [] remaining = self.copy() while len(remaining) != 0: full_mol, cell = remaining.complete_mol(0) full_mol_l.append(full_mol) remaining = cell for atom in full_mol: if atom in remaining: remaining.remove(atom) # Convinently, remaining is now an empty Mol out_cell = remaining for mol in full_mol_l: out_cell.extend(mol) return out_cell, full_mol_l
ad87c167f32035f99b59e4c0c832f5d0877e7523
235,044
import math def computeTelescopeTransmission(pars, offAxis): """ Compute tel. transmission (0 < T < 1) for a given set of parameters as defined by the MC model and for a given off-axis angle. Parameters ---------- pars: list of float Parameters of the telescope transmission. Len(pars) should be 4. offAxis: float Off-axis angle in deg. Returns ------- float Telescope transmission. """ _degToRad = math.pi / 180.0 if pars[1] == 0: return pars[0] else: t = math.sin(offAxis * _degToRad) / (pars[3] * _degToRad) return pars[0] / (1.0 + pars[2] * t ** pars[4])
50b2e2908726b8a77bc83a2821cf760b7475300b
708,732
def update_processing_mask(mask, index, window=None): """ Update the persistent processing mask. Because processes apply the mask first, index values given are in relation to that. So we must apply the mask to itself, then update the boolean values. The window slice object is to catch when it is in relation to some window of the masked values. So, we must mask against itself, then look at a subset of that result. This method should create a new view object to avoid mutability issues. Args: mask: 1-d boolean ndarray, current mask being used index: int/list/tuple of index(es) to be excluded from processing, or boolean array window: slice object identifying a further subset of the mask Returns: 1-d boolean ndarray """ new_mask = mask[:] sub_mask = new_mask[new_mask] if window: sub_mask[window][index] = False else: sub_mask[index] = False new_mask[new_mask] = sub_mask return new_mask
b5360dbce829f66d5ccf9de3a74984bf16b6e921
237,582
def GetBrowserScoreKeyName(suite, browser_instance): """Key name generator for browser score model. Args: suite: TestSuite Entity. browser_instance: Browser Entity. Returns: BrowserScore key name as string. """ return '%s_%s' % (suite.key().name(), browser_instance.key().name())
608eb61cdf8597bba9e9a93ddaebcc5bff234d22
187,597