content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _compute_all_mask(ds): """Computes a mask that is true everywhere""" da_mask = ds.z < ds.z + 1 da_mask.attrs["long_name"] = "All data mask" return da_mask
93f04dde3a20c504b017d30ceb654d3c62bce798
31,859
import logging def obr_repr(obj): """Return obj representation if possible.""" try: return repr(obj) # pylint: disable-msg=broad-except except Exception as e: logging.error(e) return 'String Representation not found'
8a95574dc0d8b18fe1c7f8c040b167654930380e
31,865
def generate_playlist_url(playlist_id:str)->str: """Takes playlist Id and generates the playlist url example https://www.youtube.com/playlist?list=PLGhvWnPsCr59gKqzqmUQrSNwl484NPvQY """ return f'https://www.youtube.com/playlist?list={playlist_id}'
f6f234e3f3eb061e37d573e35aff7c77637b0952
31,866
def int_check(self, value, key): """ Cast a value as an integer :param value: The value to cast as int :param key: The value name :return: The value as an integer, otherwise an error message """ try: int(value) return int(value), '' except: return None, 'Error: %s value must be a integer' % (key)
4529cc80104dd075539f6e846dddd8814756e116
31,873
def get_organizer_emails(form): """Get up to 15 organizer emails from an input form.""" return form.getlist("organizer")[:15]
129b21b7fe0e3c9d12c0c363facce2cc669baa60
31,875
def find_dependency_in_spec(spec, ref): """Utility to return the dict corresponding to the given ref in a dependency build spec document fragment """ for item in spec: if item['ref'] == ref: return item
e7fa516b5d7d88ec68cdd5e5f5994f37f6ca05ce
31,876
def encoding(fields, use_original_field_names=False): """Convert fields structure to encoding maps for values and field names. Expects a fields dictionary structure, returns a tuple of two dictionaries where the keys are the field names (either original or new depending on the use_original_field_names parameter). Resulting dictionaries map the values for each field and field names. """ encoding_map = {} encoding_field_names = {} for original_field_name, f in fields.items(): if "encoding" in f: if f["encoding"].get("expand", True): if use_original_field_names: key = original_field_name else: key = f.get("name", original_field_name) encoding_field_names[key] = f["encoding"].get("name", f"{key}_") encoding_map[key] = f["encoding"].get("map", {}) return encoding_map, encoding_field_names
4789733018eebbf7e9f7287359ebc6c56c5182f6
31,881
def getMatches(file, regex): """ Returns a list of all passages in `file` matching `regex` """ source = open(file, "r") return [x for x in regex.findall(source.read())]
17a791998d9bf2f2ba8aad9e15e6a395785b78c2
31,884
from typing import Tuple def ext_gcd(a: int, b: int) -> Tuple[int, int, int]: """Extended Euclidean algorithm solve ax + by = gcd(a, b) Parameters ---------- a b Returns ------- (d, x, y) s.t. ax + by = gcd(a, b) """ if b == 0: return a, 1, 0 d, y, x = ext_gcd(b, a % b) return d, x, y - (a // b) * x
a4c1c7f682d13ceab6bb3d06b855c61a9a88d9f8
31,886
def parse_zone_id(full_zone_id: str) -> str: """Parse the returned hosted zone id and returns only the ID itself.""" return full_zone_id.split("/")[2]
1dbcbcee8dbd09b24d22957d7e598e1f5692910f
31,888
import math def distance(A, B): """ Finds the distance between two points @parameter A: point #1 @parameter B: point #2 @returns: distance between the points A and B""" return math.sqrt((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2)
e189723dafbb984656ae28b2824f4bbebbffa99c
31,891
def get_results(soup): """ Gets the results div from the HTML soup @param {BeautifulSoup} soup Google Scholar BeautifulSoup instance @return {BeautifulSoup[]} List of Google Scholar results as BeautifulSoup instances """ return soup.find_all('div', class_='gs_r gs_or gs_scl')
efdcc2a5b827a84840868e250894c8f144ae0743
31,895
def has_ordered_sublist(lst, sublist): """ Determines whether the passed list contains, in the specified order, all the elements in sublist. """ sub_idx = 0 max_idx = len(sublist) - 1 for x in lst: if x == sublist[sub_idx]: sub_idx += 1 if sub_idx > max_idx: return True return sub_idx > max_idx
4aa72edc8b4020bc49d266cddccd70e18c641ab0
31,899
from typing import List from typing import Tuple from typing import Dict def convert(day_input: List[str]) -> Tuple[Dict[str, List[range]], List[int], List[List[int]]]: """Converts the input into a tuple with: 1. A dictionary with the fields, where for each field the value is a list of the valid ranges, each represented as a tuple with the min and max value 2. The values for your ticket, a list of ints 3. A list with the values for the other tickets""" iter_in = iter(day_input) # Convert fields fields = {} for line in iter_in: if line == '': break key, vals = line.split(': ') fields[key] = [range(int(v.split('-')[0]), int(v.split('-')[1]) + 1 ) for v in vals.split(' or ')] while next(iter_in) != 'your ticket:': continue our = [int(n) for n in next(iter_in).split(',')] while next(iter_in) != 'nearby tickets:': continue tickets = [[int(n) for n in line.split(',')] for line in iter_in] return (fields, our, tickets)
7ef01443251595891c4adcd147dd0487b8b2fedf
31,913
def sample_approx(approx, draws=100, include_transformed=True): """Draw samples from variational posterior. Parameters ---------- approx: :class:`Approximation` Approximation to sample from draws: `int` Number of random samples. include_transformed: `bool` If True, transformed variables are also sampled. Default is True. Returns ------- trace: class:`pymc.backends.base.MultiTrace` Samples drawn from variational posterior. """ return approx.sample(draws=draws, include_transformed=include_transformed)
2ff6eab14a5d42f0a9b2d531bcccee1245964f72
31,915
def find_hosts(ipa_client, pattern=None): """ Returns list of matching hosts from IPA. If no pattern is provided, returns all hosts. """ if pattern is None: pattern = '' return ipa_client.get_hosts( pattern=pattern )
38dd7a8e499af6372c9243c0cf91fe1c7e7f3d9b
31,921
def union_list(cmp_lists): """ Get the two or multiple lists' union. Support one empty list. :param cmp_lists: A list of will do union calculate lists. It must have two list at least. :return: result: The result of the lists union. """ result = list(set().union(*cmp_lists)) return result
013d7e45a1ea56bcd09fe8ed332d8d69962774fb
31,923
def replace_word(word_array, dict_of_words_to_replace): """ Given an array of words, replace any words matching a key in dict_of_words_to_replace with its corresponding value. :param word_array: The array of words to check. :param dict_of_words_to_replace: The dictionary of words to replace paired with their replacements. :return: The new array of words. """ new_word_array = [] for word in word_array: if word in dict_of_words_to_replace: new_word_array.extend(dict_of_words_to_replace[word]) else: new_word_array.append(word) return new_word_array
5e763a8f0af48b93c0eeeec4414e411dd4e2d69b
31,924
import hashlib def md5sum(targetfile): """md5sum a file. Return the hex digest.""" digest = hashlib.md5() with open(targetfile, 'rb') as f: chunk = f.read(1024) while chunk != "": digest.update(chunk) chunk = f.read(1024) return digest.hexdigest()
0a599cbdcfcb1061a6577c3db2947f4f08425fae
31,928
def check_if_only_decoys(sequences): """ Check if the sequences to consolidate are composed only of decoys """ only_decoys = True for sequence in sequences: if 'decoy' not in sequence.split()[2]: only_decoys = False break return only_decoys
aeb65a261bcea6db80cd7a81b566463ad3d0414f
31,934
def normalised_cooperation(cooperation, turns, repetitions): """ The per-turn normalised cooperation matrix for a tournament of n repetitions. Parameters ---------- cooperation : list The cooperation matrix (C) turns : integer The number of turns in each round robin. repetitions : integer The number of repetitions in the tournament. Returns ------- list A matrix (N) such that: N = C / t where t is the total number of turns played in the tournament. """ turns = turns * repetitions return[ [1.0 * element / turns for element in row] for row in cooperation]
9d840592df4f538e30cc961aa6a51934a351006c
31,939
def truncate_string_end(string, length=40): """ If a string is longer than "length" then snip out the middle and replace with an ellipsis. """ if len(string) <= length: return string return f"{string[:length-3]}..."
d1dfb8ea9ce82fd27777f02b5f01d35c1af4dfa8
31,943
import requests import json def get_todays_percent_change_of_symbol(symbol): """ fetch todays change in percent for the given symbol name :param symbol: ticker name which will be queried :return: todaysChangePercent """ p_auth = "Zay2cQZwZfUTozLiLmyprY4Sr3uK27Vp" query = """https://api.polygon.io/v2/snapshot/locale/us/markets/stocks/tickers/""" + symbol + """?&apiKey=""" + p_auth print(query) response = requests.get(query) json_data = json.loads(response.text) print(json_data) try: change = json_data["ticker"]["todaysChangePerc"] return change except: return None
16273218c5197171426071399151ce2c16d6c106
31,944
import re def get_single_junction_overhang(cigar): """ Returns the number of reads left/right of a junction as indicated by the LEFTMOST N in a cigar string. Return -1, -1 for reads that don't span junctions. :param cigar: string :return left: int :return right: int """ cigar_overhang_regex = r"(\d+)M[\d]+N(\d+)M" overhangs = re.findall(cigar_overhang_regex, cigar) if overhangs: return int(overhangs[0][0]), int(overhangs[0][1]) else: return -1, -1
96331b12ba05eb13ae783aab76589a5556fb1166
31,947
from typing import Sequence def get_problem_type_input_args() -> Sequence[str]: """Return ``typing.get_args(ProblemTypeInput)``.""" return ("trivial-gcd", "nontrivial-gcd", "trivial-factor", "nontrivial-factor")
937759fde8ebeb0cf0666f83e7e3741720eac324
31,955
def selection_sort(numbs: list) -> list: """ Go through the list from left to right and search for the minimum. Once the minimum is found, swap it with the element at the end of the array. Repeat the same procedure on the subarray that goes from the element after 'i' to the end, until the array is sorted (i.e. when the subarray is composed of only one element). :param numbs: The array to be sorted :return: The sorted array """ for i in range(len(numbs)): minimum = i for j in range(i + 1, len(numbs)): if numbs[minimum] > numbs[j]: minimum = j numbs[i], numbs[minimum] = numbs[minimum], numbs[i] return numbs
1d1a693a83c30753bb2829a18d7fd65d7f42b90d
31,957
import hashlib def generate_sha256_hash(string_to_hash: str): """Return the sha256 hash of the string.""" return hashlib.sha256(f"{string_to_hash}".encode()).hexdigest()
7691b6ec1939f30cd7f35861abb8cef33feab448
31,960
import re def similar_strings(s1, s2): """ Return true if at least half of the words in s1 are also in s2. >>> assert similar_strings('1 2 3', '2 1 4') >>> assert not similar_strings('1 2 3', '5 1 4') """ w1 = set(re.split(r'\W+', s1)) w2 = set(re.split(r'\W+', s2)) threshold = len(w1) // 2 + 1 return len(w1 & w2) >= threshold
c0d11d96e9d55b5774b718ba27f7382ac8460cf5
31,968
import torch def to_tensor(data, dtype=None): """Convert the data to a torch tensor. Args: data (array like): data for the tensor. Can be a list, tuple, numpy ndarray, scalar, and other types. dtype (torch.dtype): dtype of the converted tensors. Returns: A tensor of dtype """ if not torch.is_tensor(data): # as_tensor reuses the underlying data store of numpy array if possible. data = torch.as_tensor(data, dtype=dtype).detach() return data
d51fe5fcae8a32134eab771b302c08c609dda63a
31,970
def split_pair_insertion_rules(rule: str) -> tuple[str, str]: """Split pair insertion rule of the form 'XY -> Z' and return in the form 'XY', 'XZ'.""" mapping = rule.split(" -> ") return mapping[0], mapping[0][0] + mapping[1]
eb84b743e5b42791d5872e4c7a6ac5d91e98829a
31,971
def itoa(value, alphabet, padding=None): """ Converts an int value to a str, using the given alphabet. Padding can be computed as: ceil(log of max_val base alphabet_len) """ if value < 0: raise ValueError("Only positive numbers are allowed") elif value == 0: return alphabet[0] result = "" base = len(alphabet) while value: value, rem = divmod(value, base) result = alphabet[rem] + result if padding: fill = max(padding - len(result), 0) result = (alphabet[0] * fill) + result return result
506c9b809651eb8fc0fc3fcf5dc97a56d6350bf7
31,973
import re def condense(input_string): """ Trims leadings and trailing whitespace between tags in an html document Args: input_string: A (possible unicode) string representing HTML. Returns: A (possibly unicode) string representing HTML. Raises: TypeError: Raised if input_string isn't a unicode string or string. """ try: assert isinstance(input_string, str) except AssertionError: raise TypeError removed_leading_whitespace = re.sub('>\s+', '>', input_string).strip() removed_trailing_whitespace = re.sub('\s+<', '<', removed_leading_whitespace).strip() return removed_trailing_whitespace
ee3a73f6d17914eaa2079111c79151833d3648f2
31,979
import math def complex_abs_impl(context, builder, sig, args): """ abs(z) := hypot(z.real, z.imag) """ def complex_abs(z): return math.hypot(z.real, z.imag) return context.compile_internal(builder, complex_abs, sig, args)
f5cf9bb164bd89233c2aef6e3201fcb75bef8b37
31,980
import base64 import requests def get_as_base64(url): """ Encode the response of a url in base64 Args: url (str): The API URL to fetch Returns: the base64 string encoded """ return base64.b64encode(requests.get(url).content)
5924636566f9b501a5b865e9ce0ba6bc5672dd20
31,990
def render_command_exception(e): """ Return a formatted string for an external-command-related exception. Parameters: e: the exception to render """ if isinstance(e, OSError): return 'Details: [Errno {0}] {1}'.format(e.errno, e.strerror) else: return 'Details: {0}'.format(e)
c2dbbaf3b634d41aebfe551064d4b9870f0d4131
31,991
def _translate(num_time): """Translate number time to str time""" minutes, remain_time = num_time // 60, num_time % 60 str_minute, str_second = map(str, (round(minutes), round(remain_time, 2))) str_second = (str_second.split('.')[0].rjust(2, '0'), str_second.split('.')[1].ljust(2, '0')) str_time = str_minute.rjust(2, '0') + f':{str_second[0]}.{str_second[1]}' return str_time
af49424d3b9c6dfb660955b604c33e9a31e1c3de
31,992
def basic_word_sim(word1, word2): """ Simple measure of similarity: Number of letters in common / max length """ return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
ed9f8b79efefd9cca673f681a976120aaf8e9fe1
31,993
def fix_nested_filter(query, parent_key): """ Fix the invalid 'filter' in the Elasticsearch queries Args: query (dict): An Elasticsearch query parent_key (any): The parent key Returns: dict: An updated Elasticsearch query with filter replaced with query """ if isinstance(query, dict): if 'filter' in query and parent_key == 'nested': copy = dict(query) if 'query' in copy: raise Exception("Unexpected 'query' found") copy['query'] = copy['filter'] del copy['filter'] return copy else: return { key: fix_nested_filter(value, key) for key, value in query.items() } elif isinstance(query, list): return [ fix_nested_filter(piece, key) for key, piece in enumerate(query) ] else: return query
691f5f39720c8c608ab6e9828da67f625b3849f0
31,995
import hashlib def hash_file_at_path(filepath: str, algorithm: str) -> str: """Return str containing lowercase hash value of file at a file path""" block_size = 64 * 1024 hasher = getattr(hashlib, algorithm)() with open(filepath, "rb") as file_handler: while True: data = file_handler.read(block_size) if not data: break hasher.update(data) return hasher.hexdigest()
6c69ddb0fbb15890fd2d616808e00db04e8b14b3
31,996
import math def apparent_to_absolute(d_pc, mag): """ Converts apparent magnitude to absolute magnitude, given a distance to the object in pc. INPUTS d_pc: Distance to the object in parsecs. mag: Apparent magnitude. OUTPUT absMag: Absolute magnitude. """ absMag = mag - 5.0 * math.log10(d_pc / 10.0) return absMag
7348730a7d932c8eb55bf5145e17186164d37cb8
31,997
def get_vocabulary(dataset, min_word_count=0): """ Filter out words in the questions that are <= min_word_count and create a vocabulary from the filtered words :param dataset: The VQA dataset :param min_word_count: The minimum number of counts the word needs in order to be included :return: """ counts = {} print("Calculating word counts in questions") for d in dataset: for w in d["question_tokens"]: counts[w] = counts.get(w, 0) + 1 vocab = [w for w, n in counts.items() if n > min_word_count] # cw = sorted([(n, w) for w, n in counts.items() if n > min_word_count], reverse=True) # print('\n'.join(map(str, cw[:20]))) # Add the 'UNK' token vocab.append('UNK') # UNK has it's own ID return vocab
1ab05b1ba4df9251ce6077f9e3bc20b590bafe93
31,998
def n_subsystems(request): """Number of qubits or modes.""" return request.param
28ed56cc26e4bfa1d607bf415b3a7611eb030a27
31,999
def parse_ehdn_info_for_locus(ehdn_profile, locus_chrom, locus_start, locus_end, margin=700, motifs_of_interest=None): """Extract info related to a specific locus from an ExpansionHunterDenovo profile. NOTE: Here "irr" refers to "In-Repeat Read" (see [Dolzhenko 2020] for details). Args: ehdn_profile (dict): dictionary representing the data from an EHdn str_profile.json file. See https://github.com/Illumina/ExpansionHunterDenovo/blob/master/documentation/05_Computing_profiles.md for a description of the ExpansionHunterDenovo str_profile output file. locus_chrom (str): locus chromosome locus_start (int): locus start coord locus_end (int): locus end coord margin (int): when looking for anchored-IRR regions, include regions this many base pairs away from the locus. This should be approximately the fragment-length or slightly larger (700 is a reasonable value for Illumina short read data). motifs_of_interest (set): optionally, a set of motifs to include in the results even if EHdn found only paired-IRRs and no anchored IRRs near the given locus. Return: List of dictionaries where each dictionary represents a region and has this schema: { "region": "chr18:52204909-52204910", # EHdn region containing anchored IRRs "repeat_unit": "CAG", "n_anchored_regions_for_this_repeat_unit": 3, # number of anchored regions for this locus "anchored_irr_count_for_this_repeat_unit_and_region": 5, # number of IRRs found "total_anchored_irr_count_for_this_repeat_unit": 10, # number of IRRs found across all regions that have this same repeat unit "paired_irr_count_for_this_repeat_unit": 5, # number of paired IRRs "total_irr_count_for_this_repeat_unit_and_region": 7.5 # anchored_irr_count_for_this_repeat_unit_and_region plus the total_anchored_irr_count_for_this_repeat_unit weighted by the fraction of anchored IRRs at the locus of interest vs. all other loci "sample_read_depth": 30.6, # overall sample coverage computed by EHdn } """ locus_chrom = locus_chrom.replace("chr", "") sample_read_depth = ehdn_profile.pop("Depth") sample_read_length = ehdn_profile.pop("ReadLength") records = [] for repeat_unit, irr_counts in ehdn_profile.items(): # contains keys: IrrPairCounts, RegionsWithIrrAnchors total_anchored_irr_count = irr_counts.get("AnchoredIrrCount", 0) irr_pair_count = irr_counts.get("IrrPairCount", 0) anchored_irr_regions = irr_counts.get("RegionsWithIrrAnchors", {}) for region, read_count in anchored_irr_regions.items(): chrom, start_and_end = region.split(":") chrom = chrom.replace("chr", "") start, end = map(int, start_and_end.split("-")) overlaps_locus = ((chrom == locus_chrom) and (end >= locus_start - margin) and (start <= locus_end + margin)) if not overlaps_locus: continue records.append({ "region": region, "repeat_unit": repeat_unit, "n_anchored_regions_for_this_repeat_unit": len(anchored_irr_regions), "anchored_irr_count_for_this_repeat_unit_and_region": read_count, "total_anchored_irr_count_for_this_repeat_unit": total_anchored_irr_count, "paired_irr_count_for_this_repeat_unit": irr_pair_count, "total_irr_count_for_this_repeat_unit_and_region": read_count + irr_pair_count * read_count / float(total_anchored_irr_count), "sample_read_depth": sample_read_depth, }) break else: # If none of the regions associated with `repeat_unit` overlap specified locus, # and region isn't already appended to `records`/loop didn't exit with `break` statement, then # check if `repeat_unit` is a known repeat unit and if there are irr_pairs if motifs_of_interest and repeat_unit in motifs_of_interest: records.append({ "region": None, "repeat_unit": repeat_unit, "n_anchored_regions_for_this_repeat_unit": 0, "anchored_irr_count_for_this_repeat_unit_and_region": 0, "total_anchored_irr_count_for_this_repeat_unit": total_anchored_irr_count, "paired_irr_count_for_this_repeat_unit": irr_pair_count, "total_irr_count_for_this_repeat_unit_and_region": irr_pair_count, "sample_read_depth": sample_read_depth, }) return records, sample_read_depth, sample_read_length
93eec9ae09b03539112dfb6715c2e831293e4036
32,000
def preorder_traversal_iterative(root): """ Return the preorder traversal of nodes' values. - Worst Time complexity: O(n) - Worst Space complexity: O(n) :param root: root node of given binary tree :type root: TreeNode or None :return: preorder traversal of nodes' values :rtype: list[int] """ # basic case if root is None: return [] # use stack to traverse result = [] stack = [root] while len(stack) != 0: root = stack.pop() result.append(root.val) if root.right is not None: stack.append(root.right) if root.left is not None: stack.append(root.left) return result
202ebfa1e5ebb7a9f2632c66e9b7fe24f0041746
32,004
def lbm_lbm2grains(lbm_lbm): """lbm/lbm -> grains""" return lbm_lbm * 7000.
82de5e1a6bfdd9956c8719183d95822bad551c92
32,005
def reformat_timezone_offset(in_date_string): """ Reformats the datetime string to get rid of the colon in the timezone offset :param in_date_string: The datetime string (str) :return: The reformatted string (str) """ out_data = in_date_string if ":" == in_date_string[-3:-2]: out_data = out_data[:-3] + out_data[-2:] return out_data
279fbf00ff51f0926f3d284faee66a43327298e4
32,006
import random def select_starting_point(map): """Return at random one possible starting point [row_index, column_index] """ starting_points = [] for row_idx, row in enumerate(map): for col_idx, col in enumerate(row): if col == 's': starting_points.append([row_idx, col_idx]) return random.choice(starting_points)
eb7ae107aeba2e846913b85fce73991da08b3565
32,012
def check_freq(dict_to_check, text_list): """ Checks each given word's freqency in a list of posting strings. Params: words: (dict) a dict of word strings to check frequency for, format: {'languages': ['Python', 'R'..], 'big data': ['AWS', 'Azure'...], ..} text_list: (list) a list of posting strings to search in Returns: freq: (dict) frequency counts """ freq = {} # Join the text together and convert words to lowercase text = ' '.join(text_list).lower() for category, skill_list in dict_to_check.items(): # Initialize each category as a dictionary freq[category] = {} for skill in skill_list: if len(skill) == 1: # pad single letter skills such as "R" with spaces skill_name = ' ' + skill.lower() + ' ' else: skill_name = skill.lower() freq[category][skill] = text.count(skill_name) return freq
c36813b876ff62b26c5caecd58dcafdd0bfc6ded
32,014
def parse_package_arg(name, arg): """Make a command-line argument string specifing whether and which verison of a package to install. Args: name: The name of the package. arg: True if the package is required, False if the package is not required, or a string containing a version number if a specific version of the package is required. Returns: A string which can be used as an argument to the virtualenv command. """ if arg == True: option = "" elif arg == False: option = f"--no-{name}" else: option = f"--{name}={arg}" return option
360cd93c96ab06f55ef8145b32c7c074d9abf349
32,015
def fetch_diagnoses(cursor): """ Returns list of diagnoses """ cursor.execute("""SELECT * FROM diagnosis""") return cursor.fetchall()
d653451503cdd2dccc96ccd0ad79a4488be88521
32,017
def factorial(n): """ Returns the factorial of a number. """ fact = 1.0 if n > 1: fact = n * factorial(n - 1) return fact
7c20382a053cc3609fa041d1920d23b884b8aa0b
32,018
def parse_copy_startup_config_running_config(raw_result): """ Parse the 'copy startup-config running-config' command raw output. :param str raw_result: copy startup-config running-config raw result string. :rtype: dict :return: The parsed result of the copy startup-config running-config: :: { 'status': 'success' 'reason': 'Copied startup-config to running-config' } """ if ( "Copy in progress " in raw_result and "Success" in raw_result ): return { "status": "success", "reason": "Copied startup-config to running-config" } if ( "Copy in progress " in raw_result and "ERROR: Copy failed" in raw_result ): return { "status": "failed", "reason": "Copy startup-config to running-config failed" }
02f71846d2ba5b80469aac64f586f17eb135513a
32,020
def fizzbuzz() -> list: """Return Fizz Buzz from 1 to 100. Return a list of numbers from 1 to 100, replacing multiples of three with Fizz, multiples of five with Buzz and multiples of both with FizzBuzz. """ fizzbuzz_list = [] for num in range(1, 101): if num % 3 == 0 and num % 5 == 0: fizzbuzz_list.append("FizzBuzz") elif num % 3 == 0: fizzbuzz_list.append("Fizz") elif num % 5 == 0: fizzbuzz_list.append("Buzz") else: fizzbuzz_list.append(num) return fizzbuzz_list
fb068b55a331d836ea2fa68d910714fb242f9318
32,022
def get_graph_element_name(elem): """Obtain the name or string representation of a graph element. If the graph element has the attribute "name", return name. Otherwise, return a __str__ representation of the graph element. Certain graph elements, such as `SparseTensor`s, do not have the attribute "name". Args: elem: The graph element in question. Returns: If the attribute 'name' is available, return the name. Otherwise, return str(fetch). """ if hasattr(elem, "attr"): val = elem.attr("name") else: val = elem.name if hasattr(elem, "name") else str(elem) return val
fa91db4237ba5d89bd475deb1ee04e3307098c93
32,025
import tqdm def get_unique_value_used(all_sampled_adj_freq): """ Description ----------- Get the set of coordinates for which calculation is need to compute the mean and the standard deviation Parameters ---------- all_sampled_adj_freq : list list of sampled adjacency matrices Returns ------- list list of coordinates """ tuples_set = set() for sampled_adj in tqdm.tqdm(all_sampled_adj_freq): i_list = sampled_adj.nonzero()[0].tolist() j_list = sampled_adj.nonzero()[1].tolist() for i, j in zip(i_list,j_list): tuples_set.update([(i, j)]) return list(tuples_set)
c61d21d1e7d85fd2568a4375bd221f59fa0576fb
32,034
def getCriticalElementIdx(puck): """Returns the index of the most critical element :param puck: 2d array defining puckFF or puckIFF for each element and layer """ # identify critical element layermax = puck.max().argmax() return puck.idxmax()[layermax], layermax
d786775a08b7eedb7c5eeb2fbb6b63a4a2d75d32
32,036
def get_idx(prefix, itf): """ Gets the index of an interface string >>> get_idx('et', 'et12') 12 >>> get_idx('ap', 'ap32') 32 """ return int(itf[len(prefix) :])
0a4b1e49ad6c0a7e9a2a9ae1903480a3bf73d70e
32,042
def split_to_and_since_delong(df): """Split the frame into time periods that DeLong analyzed and those since his article. :param df: The frame to split :return: Tuple with (to_delong, since_delong) """ to_delong_index = [d for d in df.index if d.year <= 2004 and d.month < 6] since_delong_index = [d for d in df.index if d.year > 2004 or (d.year is 2004 and d.month >= 6)] return df.loc[to_delong_index], df.loc[since_delong_index]
9037acca6037ba4888b5ec1b3c8099e0216022d7
32,045
import logging def calc_voltage_extremes(volt): """This function calculates the extreme values in the ECG data. This functon takes the volt list as input which is the magnitude of the ECG data, and finds the extreme values using the max() and min() values. The max and min values are returned as a tuple. Args: volts (list): list of ECG voltage magnitudes Returns: tuple: (min, max) """ logging.info('Finding max and min ECG values') maximum = max(volt) minimum = min(volt) ans = (minimum, maximum) return ans
aa6e3fa75c15fdea87052f357b885398dd6adcd4
32,046
import typing from typing import TypeGuard import asyncio def is_async_iterator(obj: typing.Any) -> TypeGuard[typing.AsyncIterator[object]]: """Determine if the object is an async iterator or not.""" return asyncio.iscoroutinefunction(getattr(obj, "__anext__", None))
160d1dd2d5f1c9d6d2637e6006ae0ef268c7810f
32,050
import math def round_up_to_multiple(x, base): """Round up the given number to the nearest multiple of the given base number.""" return math.ceil(float(x) / float(base)) * base
09ef5da40f94bd713bfab3cd5e610f952bf0bf92
32,070
def get(self, url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs)
8b63a8e62d8185048448f93e6511b8069bc295c6
32,071
def rotate(items: list, k: int) -> list: """Rotate a list by k elements.""" items = items[k:] + items[:k] return items
c649f79d4ce3d501a9042288e6f83369a7899a84
32,073
from typing import Dict from typing import Any def _get_fp_len(fp_params: Dict[str, Any]) -> int: """ Return the length of the fingerprint with the given parameters. Parameters ---------- fp_params : Dict[str, Any] Parameters to get the fingerprint length from Returns ------- int The fingerprint length belonging to the given fingerprint parameters """ return fp_params['nBits'] if 'nBits' in fp_params else fp_params['fpSize'] if 'fpSize' in fp_params else 166
944e952ad07fa0fa5ea11d5bff8e46b98c1ab87e
32,078
def flatesteam_feed(Q_feed, r_steam): """ Calculates the flow rate steam of boiler. Parameters ---------- Q_feed : float The heat load feed of heat exchanger, [W] [J/s] r_steam : float The heat vaporazation of dist [J/kg] Returns ------- flatesteam_feed : float The flow rate steam of feed heat exchanger, [W] [J/s] References ---------- Дытнерский, формула 2.3, стр.45 """ return Q_feed / r_steam
ad4a0aa995c9333d70b8fbd003bb03f8bb231018
32,081
def convert_to_ids(dataset, vocabulary): """Convert tokens to integers. :param dataset a 2-d array, contains sequences of tokens :param vocabulary a map from tokens to unique ids :returns a 2-d arrays, contains sequences of unique ids (integers) """ return [[vocabulary[token] for token in sample] for sample in dataset]
153094a0fcc57880193a441fde0927010b583d19
32,088
def getColumnLocations(columnNumber): """ Return a list of all nine locations in a column. :param int rowNumber: Column :return: List of tuples :rtype: list """ return [(row, columnNumber) for row in range(9)]
8a7876284327c52badc15ba26a28856018790341
32,089
from typing import Dict import torch def clone_tensors(tensors: Dict[int, torch.Tensor]) -> Dict[int, torch.Tensor]: """ Clones all tensors in dictionary. Args: tensors (dict): Dictionary of tensors with string keys. Returns: New dictionary with cloned tensors. """ return {idx: tensor.clone() for idx, tensor in tensors.items()}
af5ae8f368c450d34ec412bd769abe03d77fd257
32,091
def lstripw(string, chars): """Strip matching leading characters from words in string""" return " ".join([word.lstrip(chars) for word in string.split()])
17b7bffd3e6f5e02cc184c1976eeedd93ebb4f3e
32,092
def is_folder_url(url_parts): """ Determine if the given URL points to a folder or a file: if URL looks like: - www.site.com/ - www.site.com then ==> Return True if URL looks like: - www.site.com/index.php - www.site.com/index.php?id=1&name=bb - www.site.com/index.php/id=1&name=bb then ==> Return False :param url_parts: Parsed URL to test. :type url_parts: ParsedURL :return: True if it's a folder, False otherwise. :rtype: bool """ return url_parts.path.endswith('/') and not url_parts.query_char == '/'
c5ba46005e6c82cbbcb2ef914947b5a154bdd3b0
32,094
def formatear_camino(pila): """Convierte una lista de ciudades en un string separado por ->""" return " -> ".join(map(str,pila))
3b85be818e8202e1b3626ce2020d91dd687e5383
32,095
import re def check_word(word, string): """ function will check if the word exists in a string uses word boundary for search word: is the word to be searched string: string to perform the operation on """ regexStr = re.search(r'(\b%s\b)' % word, string) if regexStr is not None: return True return False
da0f559b714bff6ec7a41a892e4c313a4eef13c0
32,096
def GetSettingTemplate(setting): """Create the template that will resolve to a setting from xcom. Args: setting: (string) The name of the setting. Returns: A templated string that resolves to a setting from xcom. """ return ('{{ task_instance.xcom_pull(task_ids="generate_workflow_args"' ').%s }}') % ( setting)
8d8c1c7b58d91b1d0a9561fa504887e725416fae
32,097
import inspect import re def getargspec(fn): """ Similar to Python 2's :py:func:`inspect.getargspec` but: - In Python 3 uses ``getfullargspec`` to avoid ``DeprecationWarning``. - For builtin functions like ``torch.matmul`` or ``numpy.matmul``, falls back to attempting to parse the function docstring, assuming torch-style or numpy-style. """ assert callable(fn) try: args, vargs, kwargs, defaults, _, _, _ = inspect.getfullargspec(fn) except TypeError: # Fall back to attempting to parse a PyTorch/NumPy-style docstring. match = re.match(r"\s*{}\(([^)]*)\)".format(fn.__name__), fn.__doc__) if match is None: raise parts = re.sub(r"[\[\]]", "", match.group(1)).split(", ") args = [a.split("=")[0] for a in parts if a not in ["/", "*"]] if not all(re.match(r"^[^\d\W]\w*\Z", arg) for arg in args): raise vargs = None kwargs = None defaults = () # Ignore defaults. return args, vargs, kwargs, defaults
3aa76a3915e42e9f90f0326c37b94d434eed588a
32,103
import platform def format_build_command(command): """Format a command string so that it runs in the Anroid build environment. Args: command: Command to format. Returns: Command modified to run in the Android build environment. """ environment = [] if platform.system() == 'Darwin': environment.append('export BUILD_MAC_SDK_EXPERIMENTAL=1') return ' && '.join(environment + ['source ./build/envsetup.sh', command])
4a0eb92d85f99c01c14a94b8df4fd996d9c23ba2
32,104
def create_additive_function(increment): """ return the addition of a fixed value as a function :param increment: float value that the returned function increments by :return: function function that can increment by the value parameter when called """ return lambda value: value + increment
b7432eaa11dcea49bb98ec2c6d3e0cc9dd979145
32,108
def get_dense_network_shapes(n_layers, hidden_size, n_features, n_outputs): """ Helper function to generate the input/output shapes for the layers of a densely connected network :param n_layers: Number of hidden layers in the network :param hidden_size: How many hidden neurons to use :param n_features: Number of features in the original input :param n_outputs: Output size/number of target variables :return: """ shapes = {'input': (n_features, hidden_size), 'hidden': [], 'output': (hidden_size * (n_layers+1) + n_features, n_outputs)} for i in range(n_layers): shapes['hidden'].append((hidden_size * (i + 1) + n_features, hidden_size)) return shapes
ea5e74fcdc3fe0b923f1377e202284f0576bff87
32,112
def list_type_check(lst, data_type): """ Checks if each element of lst has a given type. :param lst: List = list of objects :param data_type: type = estimated type for the objects :return: bool = true if all objects in list have the type data_type """ return all([type(e) == data_type for e in lst])
e0c774ddf09a843e5f2f52f7cbf1e332f3f862ad
32,113
import collections def characters(info, error, otext, tFormats, *tFeats): """Computes character data. For each text format, a frequency list of the characters in that format is made. Parameters ---------- info: function Method to write informational messages to the console. error: function Method to write error messages to the console. otext: iterable The data of the *otext* feature. tFormats: Dictionary keyed by text format and valued by the tuple of features used in that format. tFeats: iterable Each tFeat is the name and the data of a text feature. i.e. a feature used in text formats. Returns ------- dict Keyed by format valued by a frequency dict, which is itself keyed by single characters and valued by the frequency of that character in the whole corpus when rendered with that format. """ charFreqsByFeature = {} for (tFeat, data) in tFeats: freqList = collections.Counter() for v in data.values(): freqList[v] += 1 charFreq = collections.defaultdict(lambda: 0) for (v, freq) in freqList.items(): for c in str(v): charFreq[c] += freq charFreqsByFeature[tFeat] = charFreq charFreqsByFmt = {} for (fmt, tFeatures) in sorted(tFormats.items()): charFreq = collections.defaultdict(lambda: 0) for tFeat in tFeatures: thisCharFreq = charFreqsByFeature[tFeat] for (c, freq) in thisCharFreq.items(): charFreq[c] += freq charFreqsByFmt[fmt] = sorted(x for x in charFreq.items()) return charFreqsByFmt
d8e4cf16a3df05c18394483fc008fb453b6ab352
32,116
import xml.dom.minidom def format_xml(xml_str: str, exceptions: bool=False): """ Formats XML document as human-readable plain text. :param xml_str: str (Input XML str) :param exceptions: Raise exceptions on error :return: str (Formatted XML str) """ try: return xml.dom.minidom.parseString(xml_str).toprettyxml() except Exception: if exceptions: raise return xml_str
517dcd73dfeebaeb4828be2e57e3ab02042001fd
32,126
def sub(x,y): """ Returns the difference x-y Parameter x: The value to subtract from Precondition: x is a number Parameter y: The value to subtract Precondition: y is a number """ return x-y
9c7d9fcef236dff3e5d4b9840c082cbeacc9c7e5
32,129
def excel_column_label(n): """ Excel's column counting convention, counting from A at n=1 """ def inner(n): if n <= 0: return [] if not n: return [0] div, mod = divmod(n - 1, 26) return inner(div) + [mod] return "".join(chr(ord("A") + i) for i in inner(n))
1555dcc33420d107c9aa74ce4d7f0395ae6b3029
32,130
def find_data_source_url(a_name, url_prefs): """Return the url prefix for data source name, or None.""" for row in url_prefs: if row[0] == a_name: return row[1] return None
d91960040d4e572ff4c882a53f6ce66460253d9c
32,135
def tile(tensor, dim, repeat): """Repeat each element `repeat` times along dimension `dim`""" # We will insert a new dim in the tensor and torch.repeat it # First we get the repeating counts repeat_dims = [1] * len(tensor.size()) repeat_dims.insert(dim + 1, repeat) # And the final dims new_dims = list(tensor.size()) new_dims[dim] = 2 * tensor.size(dim) # Now unsqueeze, repeat and reshape return tensor.unsqueeze(dim + 1).repeat(*repeat_dims).view(*new_dims)
a8386c5ed8d6f89f226d64271a8fbddbf0ead543
32,138
def get_lowest_bits(n, number_of_bits): """Returns the lowest "number_of_bits" bits of n.""" mask = (1 << number_of_bits) - 1 return n & mask
086a48a359984bf950e44e49648bfcac05382c84
32,140
def search_step(f, x_k, alf, p_k): """ This function performs an optimization step given a step length and step direction INPUTS: f < function > : objective function f(x) -> f x_k < tensor > : current best guess for f(x) minimum alf < float > : step length p_k < tensor > : step direction OUTPUTS: x_(k+1) < tensor > : new best guess for f(x) minimum f_(k+1) < tensor > : function evaluated at new best guess """ x_k1 = x_k + alf*p_k f_k1 = f(x_k1) return x_k1, f_k1
51d634ef8a6196a884a0c2ec855fb785acf65db5
32,141
def split_str(string): """Split string in half to return two strings""" split = string.split(' ') return ' '.join(split[:len(split) // 2]), ' '.join(split[len(split) // 2:])
01268b6c47a4181c7a2e04cacf7651a8c0c81c50
32,143
from typing import Tuple def get_default_span_details(scope: dict) -> Tuple[str, dict]: """Default implementation for get_default_span_details Args: scope: the asgi scope dictionary Returns: a tuple of the span name, and any attributes to attach to the span. """ span_name = ( scope.get("path", "").strip() or f"HTTP {scope.get('method', '').strip()}" ) return span_name, {}
6177c4f32c5837752cce9c8b346350b480bfdcd2
32,144
def process_name_strings(language_data, df): """ Returns a dictionary of names for each of the different items specified in the DataFrame (df). The key denotes the row_number of each item. A nested dictionary is the value, with each language code as the key, and the value the name. If a language does not have a name/translation, None is provided in its place. The default language (typically EN) is specified as 'default'. This should be used as a fallback. """ return_object = {} included_languages = [] def map_to_dict(return_object, language_code, string_dict): """ Nested function that takes data from the DataFrame and puts it into the return_object. """ for row_number, name in string_dict.items(): if row_number not in return_object.keys(): return_object[row_number] = {} name = name.strip() if name == '': return_object[row_number][language_code] = None else: return_object[row_number][language_code] = name # Iterate over each of the columns, extracting the "name " fields. # Determines what languages we have available in the DataFrame. for column in df.columns: if column.startswith('name'): included_languages.append(column.lower().split()[-1]) # Build up the dictionary to return. for language_code in language_data: try: string_name = df[f'name {language_code}'] map_to_dict(return_object, language_code, string_name.to_dict()) if language_data[language_code]['DEFAULT']: map_to_dict(return_object, 'default', string_name.to_dict()) except KeyError: continue # Fill in the blanks - check each language we should be providing support for is mentioned. # If not, specify None in its place. for row_number in return_object.keys(): for language_code in language_data: if language_code not in return_object[row_number].keys(): return_object[row_number][language_code] = None return return_object
f77fd8a83c524f0bca8b0c8c15c2216437492e1f
32,150
def convert_list_type(x, type=int): """Convert elements in list to given type.""" return list(map(type, x))
36957a24aaeff11cedd2dcb0715c757b2c627083
32,151
def getExperimentAccuracy(list_of_results): """Returns a simple accuracy for the list of Result objects.""" num_correct, num_total = 0, 0 for result in list_of_results: if result.isCorrect(): num_correct += 1 num_total += 1 return num_correct / num_total
8276e06a41a1105700232ed1ccfb38bd2b3d5063
32,152
import json def parser(chunks): """ Parse a data chunk into a dictionary; catch failures and return suitable defaults """ dictionaries = [] for chunk in chunks: try: dictionaries.append(json.loads(chunk)) except ValueError: dictionaries.append({ 'unparsed': chunk }) return dictionaries
385b73026c079b635b6e33b35bfd8f5ebb453f64
32,154
def vlan_range_expander(all_vlans): """ Function for expanding list of allowed VLANs on trunk interface. Example: `1-4096` -> range(1, 4097). Can be used when trying to figure out whether certain VLAN is allowed or not. Reverse function is ``vlan_range_shortener``. :param all_vlans: Either list (`["1-10", "15", "20-30"]`) or string (`"1-10,15,20-30"`) of VLAN range. :return: List of VLAN numbers (integers). """ if isinstance(all_vlans, list): pass elif isinstance(all_vlans, str): all_vlans = all_vlans.split(",") elif isinstance(all_vlans, int): all_vlans = [str(all_vlans)] full_list = [] for vlan in all_vlans: if "-" in vlan: temp = vlan.split("-") full_list = full_list + list(range(int(temp[0]), int(temp[1])+1)) else: full_list.append(int(vlan)) return full_list
5224436c8bf10509df6d8ad52321e7dd9214792a
32,163
import torch def to_cuda(data): """ put input data into cuda device """ if isinstance(data, tuple): return [d.cuda() for d in data] elif isinstance(data, torch.Tensor): return data.cuda() raise RuntimeError
af9820bccbce3369357bf7c5b853efe3e88e052a
32,164
from typing import Optional import ast def get_version_using_ast(contents: bytes) -> Optional[str]: """Extract the version from the given file, using the Python AST.""" tree = ast.parse(contents) # Only need to check the top-level nodes, and not recurse deeper. version: Optional[str] = None for child in tree.body: # Look for a simple string assignment to __version__ if ( isinstance(child, ast.Assign) and len(child.targets) == 1 and isinstance(child.targets[0], ast.Name) and child.targets[0].id == "__version__" and isinstance(child.value, ast.Str) ): version = child.value.s break return version
690d4d04fd17263b90fa51a982519f685f9622a4
32,167
def _arg_scope_func_key(op): """Returns a key that can be used to index arg_scope dictionary.""" return getattr(op, '_key_op', str(op))
b713202ef1d53650996041bd15655b58f348423a
32,168
def _maybe_convert_to_int(value): """Returns the int representation contained by string |value| if it contains one. Otherwise returns |value|.""" try: return int(value) except ValueError: return value
2e67c4a8f6aa3ef5a0f982c85127f37b60f979ad
32,174
def get_centroid_idx(centroids, c): """ Returns the index of a given centroid c. Assumes that centroids is the ndarray of shape (k, d) where k is a number of centroids and d is a number od dimensions. """ return centroids.tolist().index(c.tolist())
08eae6aaa3ac7933c5f8bca08a9c1c75da26daf0
32,176
def undo_pad(data, pad_size): """Remove padding fromt edges of images Parameters ---------- data : array-like padded image pad_size : array-like amount of padding in every direction of the image Returns ------- data : array-like unpadded image """ if pad_size.ndim == 1 and data.ndim != 1: raise ValueError("Dimensions do not match") if data.ndim != pad_size.shape[0]: raise ValueError("Dimensions do not match") start = pad_size[:, 0].astype(int) end = (data.shape - pad_size[:, 1]).astype(int) coords = list(zip(start, end)) slices = tuple(slice(coord[0], coord[1]) for coord in coords) data = data[slices] return data
873feb3cf4daaf6153dfe87662ba10b531ba222f
32,178
def df_to_vega(df): """ Convert a Pandas dataframe to the format Vega-Lite expects. """ return [row[1].to_dict() for row in df.reset_index().iterrows()]
6ecbddde38cfc1420370c70a48161e21efd79980
32,183