content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import base64 def base64encode(data): """ Return bytes encoded as base64. :param bytes data: Binary data to encode. :return: Base64 encoded bytes :rtype: str """ return base64.b64encode(data).decode('utf-8')
455ba57e13980d8d144b1a319d3d85872648e0af
36,770
def format_channels(channels, maxdevices=4, numperhat=2): """ This function converts a list of channels to be used (e.g. [0,1,3]) into a list of lists [[0,1], [1], [None], [None]] Args: channels (list): List containing channels to use maxdevices (int): Maximum number of hats on device numperhat (int): Total number of channels per hat Returns: chans (list): List of lists describing channels in correct format """ chans = [] for i in range(maxdevices): chans.append([None]) for i in channels: if(i.channel % 2 == 0): ind = i.channel//numperhat chans[ind].append(0) if(chans[ind].count(None) > 0): chans[ind].remove(None) else: ind = (i.channel-1)//numperhat chans[ind].append(1) if(chans[ind].count(None) > 0): chans[ind].remove(None) return chans
40b8fe9f37be0a5e900b756687589d49d86b60fb
36,774
def fscore_from_sed_eval_metrics(sed_eval_metrics): """extract class-wise and averaged fscores, precisions and recalls from sed_eval metrics object Args: sed_eval_metrics: Returns: fscore (dict of float): fscore values precision (dict of float): precision values recall (dict of float): recall values """ f = {} p = {} r = {} sed_eval_results_classwise = sed_eval_metrics.results_class_wise_metrics() for key in sed_eval_results_classwise: f[key] = sed_eval_results_classwise[key]['f_measure']['f_measure'] p[key] = sed_eval_results_classwise[key]['f_measure']['precision'] r[key] = sed_eval_results_classwise[key]['f_measure']['recall'] sed_eval_results_macro = sed_eval_metrics.results_class_wise_average_metrics() f['macro_average'] = sed_eval_results_macro['f_measure']['f_measure'] p['macro_average'] = sed_eval_results_macro['f_measure']['precision'] r['macro_average'] = sed_eval_results_macro['f_measure']['recall'] sed_eval_results_micro = sed_eval_metrics.results_overall_metrics() f['micro_average'] = sed_eval_results_micro['f_measure']['f_measure'] p['micro_average'] = sed_eval_results_micro['f_measure']['precision'] r['micro_average'] = sed_eval_results_micro['f_measure']['recall'] return f, p, r
20beb5153182ad305a42b943da264f19e7694522
36,779
def concat(*args): """ This function join all args into single one :param args: :return: concat string """ return ''.join(args)
49080bd237610788c90d6aca2d6616c43de5144e
36,780
import datetime as _datetime def _get_last_day(datetime): """Return the start of the day before 'datetime', e.g. _get_last_day(April 1st) will return March 31st """ datetime = datetime - _datetime.timedelta(days=1) return _datetime.datetime( year=datetime.year, month=datetime.month, day=datetime.day, tzinfo=datetime.tzinfo )
01fbabcbac0d3dc4bab7cb93fa1fde39b0d16b8f
36,781
def player_bs_l(list, value, step=0, silent=False): """Player (B)inary (S)earch (L)ast name Search, stop and return object once it matches parameters :param list: Set to look for :param value: Value to match :param step: Number of steps the search have already taken :param silent: Keep this function from printing the steps :return: None if not found, the matching object otherwise """ found = None step += 1 if not silent: print("Step count: " + str(step)) if len(list) > 1: middle = len(list) // 2 if list[middle].last == value: found = list[middle] else: if list[middle].last < value: found = player_bs_l(list[middle:], value, step, silent) else: found = player_bs_l(list[:middle], value, step, silent) return found
31e8ea8943fcc37e7c00a58ce526b982ca57a942
36,784
import logging def get_logger(name): """ Get or create a logger with the specified name. Recommended usage: get_logger(__name__) """ return logging.getLogger(name)
d28588d8bc2a370a1711e81ccc42c51a81a1c8b2
36,785
def kl_gauss(x, y, sig2=1.): """ Kullback-Leibler divergence for Gaussian distributions.""" return (x - y) ** 2 / (2 * sig2)
caf6912e347192e85e3f4d6336d4b6bcb9d6468f
36,789
def parse_fasta(filename: str): """ Method to parse a FASTA file Opens a file and reads the content and returns the sequence inside each separate block Denoted with '>' header before each sequence """ stuff = open(filename, 'r').readlines() header: list = [] sequence: list = [] head = None seq = "" for line in stuff: if line.startswith('>'): header.append(line[1:-1]) if head: sequence.append(seq) seq = "" head = line[1:] else: seq += line.rstrip() sequence.append(seq) return header, sequence
93f785e6059e24c75139ffb0118ce3a3b3c1f793
36,797
def merge_components(local_component, target_component): """ Find resulting component from merging the first (local) component into the second. The resulting component will maintain the parent identifier of the target component. """ local_bounds, local_center, local_size, local_parent = local_component target_bounds, target_center, target_size, target_parent = target_component merged_bounds = [ min(local_bounds[0], target_bounds[0]), max(local_bounds[1], target_bounds[1]), min(local_bounds[2], target_bounds[2]), max(local_bounds[3], target_bounds[3]), min(local_bounds[4], target_bounds[4]), max(local_bounds[5], target_bounds[5]), ] merged_size = local_size + target_size # use weighted averaging to find center. the center point is not guaranteed to occur at a # position containing the component (eg if it is "C" shape) merged_center = ( local_center * local_size + target_center * target_size ) / merged_size return merged_bounds, merged_center, merged_size, target_parent
bdb9b437d0981f46b676c427034b7716c8727560
36,799
def get_number_of_ratings(ratings): """Gets the total number of ratings represented by the given ratings object. Args: ratings: dict. A dict whose keys are '1', '2', '3', '4', '5' and whose values are nonnegative integers representing frequency counts. Returns: int. The total number of ratings given. """ return sum(ratings.values())
f6096357bbb380760c5b0087d1f63d27d958de30
36,801
def tag(*tags): """Select a (list of) tag(s).""" vtag = [t for t in tags] return {"tag": vtag}
29b48da2ed4b56ff20feb1bc0fd0455cfbc7f8dc
36,807
def mat333mult(a, b): """ Multiply a 3x3 matrix with a 3x1 matrix. Parameters ---------- a : tuple of tuple of float 3x3 matrix b : tuple of tuple if float 3x1 matrix Returns ------- res : list of float 3x1 matrix """ res = [0, 0, 0] r3 = range(3) for i in r3: res[i] = sum([a[i][j]*b[j] for j in r3]) return res
f37803b028453f209b0a2e86f8b1b372962af47d
36,808
def zero(_): """ Return always 0. :param _: anything :return: 0 """ return 0
f43560651de15f3aa1633ef31d5eed425f30015c
36,810
import torch def mse(y_pred, y_true, masks=None): """Compute mean square error (MSE) loss with masks. Parameters ---------- y_pred : :obj:`torch.Tensor` predicted data y_true : :obj:`torch.Tensor` true data masks : :obj:`torch.Tensor`, optional binary mask that is the same size as `y_pred` and `y_true`; by placing 0 entries in the mask, the corresponding dimensions will not contribute to the loss term, and will therefore not contribute to parameter updates Returns ------- :obj:`torch.Tensor` mean square error computed across all dimensions """ if masks is not None: return torch.mean(((y_pred - y_true) ** 2) * masks) else: return torch.mean((y_pred - y_true) ** 2)
03354acc9538ebae85d83d28fc9c46a726c1dc92
36,813
def set_target(dataframe, target): """ :param dataframe: Full dataset :param target: Name of classification column :return x: Predictors dataset :return y: Classification dataset """ x = dataframe.drop(target, axis=1) y = dataframe[target] return x, y
767b212d54295ad8681731583a849c48ca766400
36,818
import math def isprime_ver2(n): """ Returns True if n is prime, False otherwise. * Only check up to the square root of n. # We only need to search up to the square root of n because if a number N is not prime, it # can be factored into 2 factors A and B. N = A * B # If A and B > squareroot(N), A*B would be greater than N. Therefore, at least one of those # factors must be less or equal to the square root of N. This is why we only need to test for # factors less than or equal to the square root. # Mathematical representation: if N = A*N and A <= B then A*A <= A*N = N """ if n < 2: return False i = 2 limit = math.sqrt(n) while i <= limit: if n % i == 0: return False i += 1 return True
093a8298ee1b5a38a87abdf4e5d6c7814039377f
36,825
import re def _remove_command(text, command): """Removes '\\command{*}' from the string 'text'. Regex expression used to match balanced parentheses taken from: https://stackoverflow.com/questions/546433/regular-expression-to-match-balanced-parentheses/35271017#35271017 """ return re.sub(r'\\' + command + r'\{(?:[^}{]+|\{(?:[^}{]+|\{[^}{]*\})*\})*\}', '', text)
b5a4284aac7fc28e5eb4f57295fd078f1931a849
36,827
def aerocom_n(x, bc, oc, so2, nh3): """ERFari linear in emissions including nitrate Inputs ------ x : obj:`numpy.array` Time series of aerosol emissions bc : float Radiative efficiency of black carbon, W m**-2 (TgC yr**-1)**-1 oc : float Radiative efficiency of organic carbon, W m**-2 (TgC yr**-1)**-1 so2 : float Radiative efficiency of sulfate (expressed as SO2 emissions), W m**-2 (TgSO2 yr**-1)**-1 nh3 : float Radiative efficiency of nitrate (expressed as NH3 emissions), W m**-2 (TgSO2 yr**-1)**-1 Returns ------- res : obj:`numpy.array` Time series of ERFari """ return bc*x[0] + oc*x[1] + so2*x[2] + nh3*x[3]
7a5b69a0b00a3840f4d974d47443af353bcfa06e
36,828
def perm(n, m): """ permutation: nPm >>> perm(5, 2) # 5*4 20 """ f = 1 for i in range(n - m + 1, n + 1): f *= i return f
5152906fa75bfef6141d59ca7e7d5d51ddd6b4dd
36,830
def get_item(dict, key): """Get an item from a mapping.""" return dict.get(key)
ba7e54f68c788319890deef6428f8969b042aed9
36,831
def extract_nothing(fileobj, keywords, comment_tags, options): """Pseudo extractor that does not actually extract anything, but simply returns an empty list. """ return []
db941abdf86c344863c56acf12312f97261e383f
36,842
def mod_inverse(a, m): """Return a^-1 mod m (modular inverse)""" def egcd(a, b): """Return an extended greatest common divisor for a, b""" if a == 0: return b, 0, 1 g, y, x = egcd(b % a, a) return g, x - y*(b // a), y g, x, y = egcd(a, m) if g != 1: raise ValueError("No modular inverse for: a={:d}, m={:d}".format(a, m)) return x % m
765d84ddaa2416f62d42ac45ecfff360a0579fa8
36,844
import re def parseExcludeAgentCases(spec): """ Parses "exclude-agent-cases" from the spec into a list of pairs of agent pattern and case pattern list. """ if spec.has_key("exclude-agent-cases"): ee = spec["exclude-agent-cases"] pats1 = [] for e in ee: s1 = "^" + e.replace('.', '\.').replace('*', '.*') + "$" p1 = re.compile(s1) pats2 = [] for z in ee[e]: s2 = "^" + z.replace('.', '\.').replace('*', '.*') + "$" p2 = re.compile(s2) pats2.append(p2) pats1.append((p1, pats2)) return pats1 else: return []
a0af6e00d1ff5fe099aa5a4e70beccc4ed72a1fa
36,846
def construct_parameters(**kwargs): """Translates data to a format suitable for Zabbix API Args: **kwargs: Arguments passed to the module. Returns: A dictionary of arguments in a format that is understandable by Zabbix API. """ if kwargs['mappings'] is None: return dict( name=kwargs['name'] ) return dict( name=kwargs['name'], mappings=[ dict( value=mapping['value'], newvalue=mapping['map_to'] ) for mapping in kwargs['mappings'] ] )
2e92756a61325932e57179e1b0df624a90b6dda8
36,850
from typing import Union from typing import Pattern import re def compile_regex(regex: Union[str, Pattern[str]], flags: int = 0) -> Pattern[str]: """Compile the regex string/object into a object with the given flags.""" if isinstance(regex, Pattern): if regex.flags == flags: return regex regex = regex.pattern return re.compile(regex, flags)
b6aaad538bd7c802d9969e38580c4feb349657a8
36,852
def __diff_strings(str1, str2): """ Compare two strings and return the substrings where they differ (e.g. "ABC/def" and "ABC/ddd" would return "ef" and "dd") """ len1 = len(str1) len2 = len(str2) minlen = min(len1, len2) diff = None for idx in range(minlen): if str1[idx] != str2[idx]: diff = idx break if diff is not None: return str1[diff-1:], str2[diff-1:] if len1 == len2: return "", "" return str1[minlen:], str2[minlen:]
a831513e14aa703d4e15859b493772c484863437
36,853
def reason_is_ne(field: str, expected, got) -> str: """ Create a string that is describes two values being unequal Args: field: the name of the mismatched field expected: the expected value got: the actual value """ return f'{field} mismatch: expected {expected}, got {got}'
2b2a641e97d5e48db1b6b90a9ae501157912c95b
36,854
from typing import Optional def safe_language_tag(name: Optional[str]) -> str: """Convert language names to tags that are safe to use for identifiers and file names. Args: name: Name to convert to a safe name. Can be `None`. Returns: A safe string to use for identifiers and file names. """ if name is None: return "" name = name.lower() return {"c++": "cpp", "objective-c": "objc"}.get(name, name)
ef128910a8b17d41f165147e5ac7eea82677a1d5
36,855
def filter_connections(connections, annotations): """ Keep connections if they were assigned the 'Equal' label or if they were not annotated. :param list connections: List of candidate connections. :param list annotations: List of annotations from the prodigy db-out command. :returns: Filtered connections. :rtype: list """ # 1 corresponds to the equal label. annotated_idxs = [ann["_input_hash"] for ann in annotations] not_annotated_idxs = [i for i in range(len(connections)) if i not in annotated_idxs] equal_idxs = [ann["_input_hash"] for ann in annotations if ann["answer"] == "accept" and 1 in ann["accept"]] keep_idxs = equal_idxs + not_annotated_idxs cnxs = [connections[i] for i in keep_idxs] return cnxs
36bb0f6b5c99062c9334a0b1ef14d18f18b2737b
36,857
def select_workflow(gi, folder_id, workflow_names, sample, run, lh): """ Select a workflow (either single or paired) based on the number of datasets contained in the current data library folder. """ workflow_name = None # Get the number of dataset within the folder. folder_contents_dict = gi.folders.show_folder(folder_id) num_datasets = folder_contents_dict['item_count'] if num_datasets == 1: workflow_name = workflow_names['SINGLE'] elif num_datasets == 2: workflow_name = workflow_names['PAIRED'] if workflow_name: lh.write('Selected workflow named %s for sample %s of run %s\n' % (workflow_name, sample, run)) return workflow_name, num_datasets
e5e659ab8b2dabd53456d01ed82c2430f4543da4
36,860
import inspect def is_exception(obj): """Check if an object is an exception.""" return inspect.isclass(obj) and issubclass(obj, Exception)
b6e84dbef8b55740d4c7caf13cfdc5efb9c4e47a
36,861
import aiohttp async def request_url(url: str, session: aiohttp.ClientSession) -> dict: """ requests a abuseipdb api url and returns its data :param url: str, abuseipdb api url :param session: aiohttp.ClientSession, client session with api key in header :return: dict, data about an api """ async with session.get(url) as response: if response.status == 200: return await response.json(encoding="utf-8") else: return {}
d47e7610a81690e3ed737a7c895104ecf18f4a06
36,866
def prefix(s1, s2): """ Return the length of the common prefix of s1 and s2 """ sz = len(s2) for i in range(sz): if s1[i % len(s1)] != s2[i]: return i return sz
ea8766c65e8640e7d0c25003389a62058b7117f9
36,867
import re def FirstTextMatch(node_list, search_regex): """Find the first Node in node_list which matches search_regex. Args: node_list: A container of Node objects. search_regex: A regular expression to match with the Node object. Returns: The first Node object in node_list which matches search_regex, or None if none were found. """ regex = re.compile(search_regex) for node in node_list: if regex.search(node.text): return node return None
eb773eb0b5ce783f4df589ecf785c520a33d4750
36,868
def flatten_single(x, begin_axis=1): """ Flatten a tensor in all dimensions from @begin_axis onwards. Args: x (torch.Tensor): tensor to flatten begin_axis (int): which axis to flatten from Returns: y (torch.Tensor): flattened tensor """ fixed_size = x.size()[:begin_axis] _s = list(fixed_size) + [-1] return x.reshape(*_s)
53899640bf8ee5e6b732e4e58257ae33b8285466
36,876
def velocity_from_transition_matrix(P, x, deltat): """Estimate velocity field from transition matrix (i.e. compute expected displacements) :param P: transition matrix :param x: input data -- `N` points of `M` dimensions in the form of a matrix with dimensions `(N, M)` :param deltat: timestep for which `P` was calculated. """ return (P @ x - x)/deltat
8f4f3bd6c130dacc5e8ec7431c801d03a4a0db00
36,886
def get_cheat_sheet(cheat_sheet): """converts a cheat sheet from .json to string to display Parameters ---------- :param dictionary cheat_sheet: dictionary that stores the content of given cheat sheet. :return: a str representation of a cheat sheet. """ sheet = [] separator = '\n' for data_type in cheat_sheet: sheet.append(f'__**{data_type}**__') for method in cheat_sheet[data_type]: method_description = cheat_sheet[data_type][method] sheet.append(f'**{method}** - {method_description}') sheet.append('') return separator.join(sheet)
b8d401e0c73c0f103cf0b3f404a2150b7bc28a36
36,887
def get_holding_data(holdings, stimulus_data, total_duration, default_holding): """Extract holding data from StepProtocol json dict and add amplitude to a holding list. Args: holdings (list): list of holding amplitudes (nA) to be updated stimulus_data (dict): stimulus dict from protocol json file containing holding data total_duration (float): total duration of the step (ms) default_holding (float): default value for the custom holding entry Returns: a tuple containing - float: delay of the holding stimulus (ms) - float: duration of the holding stimulus (ms) """ if "holding" in stimulus_data: holding = stimulus_data["holding"] # amp can be None in e.g. Rin recipe protocol if holding["amp"] is not None and holding["amp"] != default_holding: holdings.append(holding["amp"]) hold_step_delay = holding["delay"] hold_step_duration = holding["duration"] else: hold_step_delay = 0.0 hold_step_duration = total_duration return hold_step_delay, hold_step_duration
af7d870ba4a85a7767af9d8b73d5befe60519f8d
36,889
def has_errors(build): """Checks if there are errors present. Args: build: the whole build object Returns: True if has errors, else False """ return "errors" in build and len(build["errors"])
c5934d3c34f0248f20330f3ec2afb94572d624a1
36,894
def _segment_less_than(a: str, b: str) -> bool: """Return True if a is logically less that b.""" max_len = max(len(a), len(b)) return a.rjust(max_len) < b.rjust(max_len)
37446b4160dd4a99fa445126545b8fb0014e5814
36,896
from pathlib import Path def get_theme_base_dirs_from_settings(theme_base_dirs=None): """ Return base directories that contains all the themes. Example: >> get_theme_base_dirs_from_settings('/edx/app/ecommerce/ecommerce/themes') ['/edx/app/ecommerce/ecommerce/themes'] Args: themes_base_dirs (list of str): Paths to themes base directories. Returns: (List of Paths): Base theme directory paths """ theme_base_dirs_paths = [] if theme_base_dirs: theme_base_dirs_paths.extend([Path(theme_base_dir) for theme_base_dir in theme_base_dirs]) return theme_base_dirs_paths
2b0be9a3a65e8ec5356c667ab3ffe223f34245fe
36,907
import re def get_transcripts(transcript_file): """ Parses FusionInspector transcript file and returns dictionary of sequences :param str transcript_file: path to transcript FASTA :return: de novo assembled transcripts :rtype: dict """ with open(transcript_file, 'r') as fa: transcripts = {} regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)" regex = re.compile(regex_s) while True: # Usually the transcript is on one line try: info = next(fa) seq = next(fa) assert info.startswith('>') m = regex.search(info) if m: transcripts[m.group('ID')] = seq.strip() except StopIteration: break except AssertionError: print("WARNING: Malformed fusion transcript file") return transcripts
b5804760ecf2ac2e80d734108882b7917e6409f8
36,910
import json def fetch_credentials(service_name, creds_file="service-credentials.json"): """Fetch credentials for cloud services from file. Params ====== - service_name: a Watson service name, e.g. "discovery" or "conversation" - creds_file: file containing credentials in JSON format Returns ======= creds: dictionary containing credentials for specified service """ with open(creds_file, "r") as f: creds = json.load(f) return creds[service_name]
44d658af0dc72aec90cec51093fc5c891553223b
36,912
def Quoted(s): """Return the string s within quotes.""" return '"' + str(s) + '"'
20ddfdd5d815f71f092307438edb808681cecdb8
36,914
def required_index(a): """ Helper function to take a list of index lists and return whether it needs to be included as an index in demultiplexing. """ return len(set(tuple(a_i) for a_i in a)) != 1
fb3893c17ce75ae0085c7c6eccf20aab8ac653ee
36,923
def krueger12_eta(lpc): """Ratio of 56Ni to total iron-peak yield from Krueger+ 2012 Fitting formula for K12 central density results. Based on looking at iron-peak yields from Khokhlov, Mueller & Hoflich 1993, I assign a flat prior eta = 0.9 below a central density of 1e+9 g/cm^3. Could probably do better by incorporating other results e.g. from the MPA group (Seitenzahl+ 2013). Input lpc (log10 of central density), output eta = MNi/(MNi + MFe). """ pc9 = 1e-9 * 10**lpc return min(0.95, 0.95 - 0.05*pc9), max(0.025, 0.03*pc9)
96f87a9c490b0ad0feff6859399977bc58f6b48a
36,928
import math def calculateFuelForMass(mass): """calculate the fuel for a given mass""" return math.floor(int(mass) / 3) - 2
a7605515b99b2c3c57be239ea3e07da0014a4dad
36,932
import hashlib def compute_password_digest(message): """Helper method to compute the message digest for the given string. """ return hashlib.sha224(message).hexdigest()
71d7dd895b998e7b8d9698ec5db7970de4c2cbc6
36,936
def compose(x, *funcs): """ takes an initial value and a list of functions and composes those functions on the initial value :param x: object to call the composed functions on :param funcs: list of functions to compose :return: final output of composition """ for func in funcs: x = func(x) return x
11f7db802ff9345d2330b9081f5045fd7131d6c1
36,937
import operator def select_survivors(snakes, num_survivors, survival_thresh): """ Picks the survivors that stay for next generation of snakes params: snakes: list, current generation of snakes of class Snake num_survivors: int, how many survivors there should be survival_thresh: float, selection probability threshold that survivors must meet returns: list of survivors of class Snake, list of tuples of reverse sorted selection probabilities and the indices of the associated snakes """ survivors = [] select_probs = dict() for i in range(len(snakes)): select_probs[str(i)] = snakes[i].select_prob sorted_probs = sorted(select_probs.items(), key = operator.itemgetter(1), reverse = True) count = 0 for i in range(len(sorted_probs)): if (survival_thresh <= sorted_probs[i][1]): if (count < num_survivors): survivors.append(snakes[int(sorted_probs[i][0])]) snakes[int(sorted_probs[i][0])].selected = True count += 1 return survivors, sorted_probs
b5df43703936c1f3130d42ab5fe7596aef6c834d
36,939
def _calculate_rydeberg_spectra(_n1: int, _n2: int, _R: float) -> float: """Calculates the spectra from the Rydeberg formula. Decorators: nb.njit Arguments: _n1 {int} -- First principal quantum number. _n2 {int} -- Second principal quantum number. _R {float} -- Rydeberg constant. Returns: float -- Calculated wavelength. """ return _R * (1.0 / _n1 ** 2 - 1.0 / _n2 ** 2)
57de01915ca5148b9344be099e37ab02a973a381
36,945
def get_boundary_polyhedra(polyhedra, boundary_x=0, boundary_width=0.5, verbose=True, z_lim=[0, 100]): """ get indices of polyhedra at boundary (assumed to be parallel to x-axis) Parameter --------- polyhedra: dict dictionary of all polyhedra boundary_x: float position of boundary in Angstrom boundary_width: float width of boundary where center of polyhedra are considered in Angstrom verbose: boolean optional z_lim: list upper and lower limit of polyhedra to plot Returns ------- boundary_polyhedra: list list of polyhedra at boundary """ boundary_polyhedra = [] for key, polyhedron in polyhedra.items(): center = polyhedron['vertices'].mean(axis=0) if abs(center[0] - boundary_x) < 0.5 and (z_lim[0] < center[2] < z_lim[1]): boundary_polyhedra.append(key) if verbose: print(key, polyhedron['length'], center) return boundary_polyhedra
43402802aba553500ee292cb237d611c1ea58ae0
36,948
def assign_node_names(G, parcellation): """ Modify nodal attribute "name" for nodes of G, inplace. Parameters ---------- G : :class:`networkx.Graph` parcellation : list ``parcellation[i]`` is the name of node ``i`` in ``G`` Returns ------- :class:`networkx.Graph` graph with nodal attributes modified """ # Assign anatomical names to the nodes for i, node in enumerate(G.nodes()): G.node[i]['name'] = parcellation[i] # G.graph['parcellation'] = True return G
7e948ba751458fe5f527fbd494a4957c3a81ff81
36,949
def floodFill(points, startx, starty): """ Returns a set of the (x, y) points of a filled in area. `points` is an iterable of (x, y) tuples of an arbitrary shape. `startx` and `starty` mark the starting point (likely inside the arbitrary shape) to begin filling from. >>> drawPoints(polygon(5, 5, 4, 5)) ,,,O,,, ,,O,O,, ,O,,,O, O,,,,,O O,,,,,O O,,,,,O ,O,,,O, ,OOOOO, >>> pentagonOutline = list(polygon(5, 5, 4, 5)) >>> floodFill(pentagonOutline, 5, 5) {(7, 3), (4, 7), (4, 8), (5, 6), (6, 6), (7, 7), (6, 2), (5, 1), (3, 7), (2, 5), (8, 5), (5, 8), (6, 7), (3, 3), (5, 5), (7, 6), (4, 4), (6, 3), (3, 6), (3, 4), (8, 6), (6, 4), (5, 4), (2, 6), (4, 5), (5, 2), (7, 5), (4, 2), (6, 5), (5, 3), (3, 5), (6, 8), (4, 6), (5, 7), (3, 8), (7, 4), (4, 3), (7, 8), (2, 4), (8, 4)} >>> drawPoints(floodFill(pentagonOutline, 5, 5)) ,,,O,,, ,,OOO,, ,OOOOO, OOOOOOO OOOOOOO OOOOOOO ,OOOOO, ,OOOOO, """ # Note: We're not going to use recursion here because 1) recursion is # overrated 2) on a large enough shape it would cause a stackoverflow # 3) flood fill doesn't strictly need recursion because it doesn't require # a stack and 4) recursion is overrated. allPoints = set(points) # Use a set because the look ups will be faster. # Find the min/max x/y values to get the "boundaries" of this shape, to # prevent an infinite loop. minx = miny = maxx = maxy = None for bpx, bpy in points: if minx is None: # This is the first point, so set all the min/max to it. minx = maxx = bpx miny = maxy = bpy continue if bpx < minx: minx = bpx if bpx > maxx: maxx = bpx if bpy < miny: miny = bpy if bpy > maxy: maxy = bpy pointsToProcess = [(startx, starty)] while pointsToProcess: x, y = pointsToProcess.pop() # Process point to right left of x, y. if x + 1 < maxx and (x + 1, y) not in allPoints: pointsToProcess.append((x + 1, y)) allPoints.add((x + 1, y)) # Process point to the left of x, y. if x - 1 > minx and (x - 1, y) not in allPoints: pointsToProcess.append((x - 1, y)) allPoints.add((x - 1, y)) # Process point below x, y. if y + 1 < maxy and (x, y + 1) not in allPoints: pointsToProcess.append((x, y + 1)) allPoints.add((x, y + 1)) # Process point above x, y. if y - 1 > miny and (x, y - 1) not in allPoints: pointsToProcess.append((x, y - 1)) allPoints.add((x, y - 1)) return allPoints
a0d139b6736ae4c701b700d0bebb385948925849
36,955
import re def normalize_prometheus_label(str): """ Prometheus labels must match /[a-zA-Z_][a-zA-Z0-9_]*/ and so we should coerce our data to it. Source: https://prometheus.io/docs/concepts/data_model/ Every invalid character will be made to be an underscore `_`. """ return re.sub(r'[^[a-zA-Z_][a-zA-Z0-9_]*]',"_",str,0)
e851df78d3a7807e9cbaf6415f8059a56ff1d0bc
36,962
def time_delta_seconds(t1, t2): """Returns the number of seconds between two datetime.time objects""" t1_s = (t1.hour * 60 * 60 + t1.minute * 60 + t1.second) t2_s = (t2.hour * 60 * 60 + t2.minute * 60 + t2.second) return max([t1_s, t2_s]) - min([t1_s, t2_s])
ba251d741b1c81810a4ac1b4152038f771c23485
36,968
def url_gen_dicter(inlist, filelist): """ Prepare name:URL dicts for a given pair of names and URLs. :param inlist: List of dictionary keys (OS/radio platforms) :type inlist: list(str) :param filelist: List of dictionary values (URLs) :type filelist: list(str) """ pairs = {title: url for title, url in zip(inlist, filelist)} return pairs
36d4e066524903f7a5a19f5ca402502561d7ff52
36,970
def fix_pc_references(s): """Translate references to the current program counter from ca65 to ASM6. ca65 uses * for PC; ASM6 uses $. Only references at the start or end of an expression or of a parenthesized subexpression get translated. But that should be enough for our use case, as the source code can use (*) to produce ($) in the translation. """ if s.startswith('*'): s = '$' + s[1:] if s.endswith('*'): s = '$' + s[1:] return s.replace('(*', '($').replace('*)', '$)')
bee0e8bbf130136d72b30fc444bb75dde3c2e0d2
36,975
def ns_svg(item_name): """Prepends the svg xml-namespace to the item name.""" return '{http://www.w3.org/2000/svg}' + item_name
bad2c5fec183b44e3a04a620b134e233b7810dd0
36,976
def named_copy(variable, new_name): """Clones a variable and set a new name to the clone.""" result = variable.copy() result.name = new_name return result
14b84aedff4495bb480a98b4c9117b9ea7c03db8
36,980
def isScoreFile(f): """Checks whether file 'f' is a compressed MusicXML score file.""" return "score" in f and "analysis_on" not in f and f.endswith("mxl")
4a1b4ea9013486ba125f68db87b1b63d4228665d
36,983
def _map_object(result): """Return a more human friendly pyVmomi object, by creating a mapping of the objects name to the literal object. :Returns: Dictionary :param result: A series of pyVmomi objects, like vim.Network or vim.Datastore :type result: List """ return {x.name: x for x in result}
2bdbf2f357f51748a8a02780a54b656118e19e94
36,985
def reshape_to_ND(arr, N): """ Adds dimensions to arr until it is dimension N """ ND = len(arr.shape) if ND > N: raise ValueError("array is larger than {} dimensional, given shape {}".format(N, arr.shape)) extra_dims = (N - ND) * (1,) return arr.reshape(arr.shape + extra_dims)
c0d75c02e8a11091206a0cef72416853d6671dea
36,986
import requests def get_text_by_id(id): """Get text from Gutenberg based on id. Project Gutenberg sets a restriction on the way that text on their site must be downloaded. This function does not honor the restriction, so the function should be used with care. Parameters ---------- id : int or str Identifier. Returns ------- text : str or None The text. Returns none if the page does not exist. References ---------- https://www.gutenberg.org/wiki/Gutenberg:Information_About_Robot_Access_to_our_Pages """ url = "http://www.gutenberg.org/ebooks/{id}.txt.utf-8".format(id=id) response = requests.get(url) return response.content
9e5edbb0d9182be8b2440236ee3af77ddcf78384
36,990
def is_before(d1, d2): """ Return True if d1 is strictly before d2. :param datetime.date d1: date 1 :param datetime.date d2: date 2 :return: True is d1 is before d2. :rtype: bool """ return d1 < d2
194026215e35ce026142e7d0734aaa3ef6b4d254
36,993
import re def normalize_profession(profession): """ Normalize a profession so that it can be mapped to the text NOTE: Currently, we only return the last token """ profession = re.sub(r'[^\w\s]' , " ", profession.decode("utf-8"), re.UNICODE)#remove punctuations profession = profession.split()[-1] # only return the last token profession = profession.lower()# turn into lower case return profession
e6f0a9f9b7f4d6679f6e849d601f4d4567c47637
36,996
import re def regex_or(list_of_strings): """Compile a regex matching any of the strings provided.""" re_str = "(" + "|".join(list_of_strings) + ")" return re.compile(re_str, re.IGNORECASE)
5967c3bd54025cc37a8ffb44197c927040a45075
36,999
def get_dictionary(filename="c06d"): """Return a dictionary of the words and their pronunciations from the CMU Pronouncing Dictionary. Each pronunciation will be a string. """ pro = dict() # initialize word-pronuncation dictionary fin = open(filename) for line in fin: if line[0]=='#': continue # Like a soft break; jump straight back to top of loop # Kind of a shorcut for assigning a list to multiple # items. The same end result as the two commands # word = line.split(" ")[0] # pronounce = line.split(" ")[1] # The first space on each line of data is between the word # and its pronunciation. [word, pronounce] = line.split(" ",1) pro[word]=pronounce.strip() return pro
1ea495e175cf4d87a33ff835d32042ad45a821d4
37,002
def serialize_event_person(person): """Serialize EventPerson to JSON-like object.""" return {'_type': 'EventPerson', 'id': person.id, 'email': person.email, 'name': person.display_full_name, 'firstName': person.first_name, 'familyName': person.last_name, 'title': person.title, 'affiliation': person.affiliation, 'phone': person.phone, 'address': person.address, 'user_id': person.user_id}
d26cd8a49b7330a786c70ea80664f7e27d836aa7
37,006
def get_undecided_variable(problem): """ Return one variable that is still unset in the problem """ for variable, domain in problem['variables'].items(): if len(domain) > 1: # Undecided if more than 1 value possible return variable
b741f1733f5b20b1dfe389812ce1fe4feedd968c
37,007
def create_slurm_options_string(slurm_options: dict, srun: bool = False): """ Convert a dictionary with sbatch_options into a string that can be used in a bash script. Parameters ---------- slurm_options: Dictionary containing the sbatch options. srun: Construct options for an srun command instead of an sbatch script. Returns ------- slurm_options_str: sbatch option string. """ if srun: option_structure = " {prepend}{key}={value}" else: option_structure = "#SBATCH {prepend}{key}={value}\n" slurm_options_str = "" for key, value_raw in slurm_options.items(): prepend = '-' if len(key) == 1 else '--' if key in ['partition', 'p'] and isinstance(value_raw, list): value = ','.join(value_raw) else: value = value_raw slurm_options_str += option_structure.format(prepend=prepend, key=key, value=value) return slurm_options_str
26245b9f253f65775749358e4c69f76dcff733a7
37,008
from typing import Optional from typing import List from typing import Tuple from typing import Set def parse_node_types(node_type_specs: Optional[str]) -> List[Tuple[Set[str], Optional[float]]]: """ Parse a specification for zero or more node types. Takes a comma-separated list of node types. Each node type is a slash-separated list of at least one instance type name (like 'm5a.large' for AWS), and an optional bid in dollars after a colon. Raises ValueError if a node type cannot be parsed. Inputs should look something like this: >>> parse_node_types('c5.4xlarge/c5a.4xlarge:0.42,t2.large') [({'c5.4xlarge', 'c5a.4xlarge'}, 0.42), ({'t2.large'}, None)] :param node_type_specs: A string defining node types :returns: a list of node types, where each type is the set of instance types, and the float bid, or None. """ # Collect together all the node types parsed = [] if node_type_specs: # Some node types were actually specified for node_type_spec in node_type_specs.split(','): try: # Types are comma-separated # Then we have the colon and the bid parts = node_type_spec.split(':') if len(parts) > 2: # Only one bid allowed raise ValueError(f'Cound not parse node type "{node_type_spec}": multiple bids') # Instance types are slash-separated within an equivalence # class instance_types = set(parts[0].split('/')) for instance_type in instance_types: if instance_type == '': # No empty instance types allowed raise ValueError(f'Cound not parse node type "{node_type_spec}": empty instance type') # Build the node type tuple parsed.append((instance_types, float(parts[1]) if len(parts) > 1 else None)) except Exception as e: if isinstance(e, ValueError): raise else: raise ValueError(f'Cound not parse node type "{node_type_spec}"') return parsed
5df63b7e715f35aaf176c69cf3d294991d2dc0a3
37,013
import yaml def get_target_directory_from_config_file(cfg_src): """ Gets download directory from specified section in a configuration file. """ # reading complete configuration with open(cfg_src, 'r') as yml_file: cfg = yaml.safe_load(yml_file) try: tgt_dir = cfg['downloading']['tgt_dir'] except KeyError as e: print( "Unable to retrieve parameter '%s' " "from configuration file." % e.args[0]) return except Exception: print("Unable to read configuration file") return return tgt_dir
873a23d609702feea026d2e7f5ef98b1895239ff
37,015
def check_not_errors_tcp(requets_raw, response_raw, consulta): """ Chequea si hay errores en la trama, formato TCP :param requets_raw: trama con la cual se hizo la solicitud :type requets_raw: bytes :param response_raw: trama de respuesta :type response_raw: bytes :return: True si la no hay errores :rtype: bool """ OK = 1 # codigo funcion y id no concuerdan con el requests if requets_raw[:5] != response_raw[:5]: OK = 0 # la longitud de datos no coincide con la que reporta el esclavo que envia if len(response_raw[6:]) != (int.from_bytes(response_raw[4:6], "big")): OK = 0 return OK
e6fb81fa94c1d33ccaee3a2d93e20ab3d3077d06
37,016
def check_search_query(query: str) -> bool: """Checking for a valid search query.""" if query.startswith('/'): return False return True
5921ebb2e98a6b1fae44022b7598518555674e14
37,017
import io def readmodifierrules(filename): """Read a file containing heuristic rules for marking modifiers. Example line: ``S *-MOD``, which means that for an S constituent, any child with the MOD function tag is a modifier. A default rule can be specified by using * as the first label, which always matches (in addition to another matching rule, if any). If none of the rules matches, a non-terminal is assumed to be a complement. """ modifierrules = {} with io.open(filename, encoding='utf8') as inp: for line in inp: line = line.strip().upper() if line and not line.startswith("%"): label, modifiers = line.split(None, 1) if label in modifierrules: raise ValueError('duplicate rule for %r (each label' ' should occur at most once in the file)' % label) modifierrules[label] = modifiers.split() return modifierrules
a27420c682a1c8095fe0bd9b63ddcb8b63c9bf74
37,019
def num_spike_powers(FWHM): """ num_spike_powers(FWHM): Return the (approx.) number of powers from a triangular spike pulse profile which are greater than one half the power perfect sinusoidal pulse profile. Both the spike and the sine are assumed to have an area under one full pulse of 1 unit. Note: A gaussian profile gives almost identical numbers of high powers as a spike profile of the same width. This expression was determined using a least-squares fit. (Errors get large as FWHM -> 0). 'FWHM' is the full width at half-max of the spike. (0.0 < FWHM <= 0.5) """ return -3.95499721563e-05 / FWHM**2 + 0.562069634689 / FWHM - \ 0.683604041138
49bdca3d3e10f0aaddfff167a62ded0c35d694e9
37,023
import math def out_size(dim_in, k, s, p, d): """Calculates the resulting size after a convolutional layer. Args: dim_in (int): Input dimension size. k (int): Kernel size. s (int): Stride of convolution. p (int): Padding (of input). d (int): Dilation """ return math.floor((dim_in + 2 * p - d * (k - 1) - 1) / s + 1)
755d5769953b68f4c772332381576f95aed2022e
37,025
def strbool(x): """ Return an string representation of the specified boolean for an XML document. >>> strbool(False) '0' >>> strbool(True) '1' """ return '1' if x else '0'
dd9b6a406c0cc45d01aae1b17a4ceb2c8b78c140
37,030
from pathlib import Path def get_file_path(filepath): """Return the path of the config file if exists. :param filepath: The path of the file. :type filepath: str|Path :return: Path :raises: FileNotFoundError if file path does not exist """ if type(filepath) is str: real_filepath = Path(filepath) elif isinstance(filepath, Path): real_filepath = filepath else: real_filepath = None if real_filepath is None or not real_filepath.exists(): raise FileNotFoundError(f"{filepath} does not exist") return real_filepath
6597ba2dd51a3304862207504f81e7aae35c8a17
37,031
def get_filenames_of_set(train_set): """ This function reads the names of the files that we are going to use as our training test :param train_set - the file which contains the names of the training set :return: content - an array containing the names of each filew """ # read the names of the files that we are going to use as our training test with open(train_set) as f: content = f.readlines() content = [x.strip() for x in content] return content
0da5e7321606b1a863b081cace61ecb5f92cd83f
37,039
def querystr(d): """Create a query string from a dict""" if d: return '?' + '&'.join( ['%s=%s' % (name, val) for name, val in d.items()]) else: return ''
17cca6005f8ce685d40de7bc1cbe42a6640fba0c
37,043
def encode_bin(word): """ Encode a binary vector into an integer. """ return sum(a * 2**i for (i,a) in enumerate(word))
81b3acf0aaeb8ec5118ec340dc9b77df57956971
37,046
def find_closest_level(levels, elevation): """Find the level closest to the given elevation. """ closest = None difference = float("inf") for level in levels: level_difference = abs(level.Elevation - elevation) if level_difference < difference: closest = level difference = level_difference return closest
182538dd929516a1fd3581968fc0061fc7472963
37,049
import torch def select_action_ddpg(state, actor): """Selects action using actor. """ actor.eval() with torch.no_grad(): action = actor.forward(state).item() return action
f2e8476cabe6c6448cd522438a8cd69d788237ec
37,051
def playlist_transform(s,t,compareType="Song"): """ Computes the edit distance for two playlists s and t, and prints the minimal edits required to transform playlist s into playlist t. Inputs: s: 1st playlist (format: list of (track name, artist, genre) triples) t: 2nd playlist (format: list of (track name, artist, genre) triples) compareType: String indicating the type of comparison to make. "Song" (default): songs in a playlist are considered equivalent if the (song name, artist, genre) triples match. "Genre": songs in a playlist are considered equivalent if the same genre is used. "Artist": songs in a playlist are considered equivalent if the same artist is used. Output: The minimum edit distance and the minimal edits required to transform playlist s into playlist t. """ if compareType == "Song": type = 0 elif compareType == "Artist": type = 1 else: type = 2 A, B = [], [] s, t = [" "] + s, [" "] + t A.append(range(len(s) + 1)) B.append(range(len(t) + 1)) for i in range(len(s)): #appends index from s to A A.append([i]) B.append([4]) for j in range(len(t)): # appends index from t to A A.append([j]) B.append([3]) for i in range(1, len(s)): for j in range(1, len(t)): if type == 0: # if equal to a SONG if s[i] == t[j]: # if songs are equal c_match = A[i-1][j-1] match = True else: c_match = A[i-1][j-1] + 1 match = False else: # ARTIST or GENRE if s[i][type] == t[j][type]: c_match = A[i-1][j-1] match = True else: c_match = A[i-1][j-1]+1 match = False insert = A[i][j-1] + 1 delete = A[i-1][j] + 1 minimum = min(c_match, insert, delete) if minimum == c_match: if match: B[i].append(1) #do not change else: B[i].append(2) #change s[i] to t[j] elif minimum == insert: B[i].append(3) #insert t[j] else: B[i].append(4) #remove s[i] A[i].append(minimum) x = len(s)-1 y = len(t)-1 listt = [] while x >= 0 or y >= 0: # Printing out of operations if x == 0 and y == 0: break if B[x][y] == 1: a = "Leave " + str(s[x]) + " unaltered" listt.insert(0, a) x -= 1 y -= 1 elif B[x][y] == 2: b = "Change " + str(s[x]) + " to " + str(t[y]) listt.insert(0, b) x -= 1 y -= 1 elif B[x][y] == 3: c = "Insert " + str(t[y]) listt.insert(0, c) y -= 1 elif B[x][y] == 4: d = "Remove " + str(s[x]) listt.insert(0, d) x -= 1 for k in range(0, len(listt)): print(listt[k]) return A[len(s)-1][len(t)-1]
390239d78349b82176ed1a54828ad302df1b585e
37,053
def flatten_dictlist(dictlist): """ Turns a list of dictionaries into a single dictionary. :param dictlist: List of dictionaries. :type dictlist: list :return: Flattened dictionary. :rtype: dict :Example: >>> dictlist = [{"a": 1}, {"b": 2, "a": 3}, {"c": 4}] >>> flatten_dictlist(dictlist) {"a": 3, "b": 2, "c": 4} """ new_dict = {} for dict_ in dictlist: new_dict.update(dict_) return new_dict
86eb654965cd43bef6924a8aaa9873a7b36bc8f4
37,056
import math def _j_s(query_len: int, known_len: int, d_g_x: float, temp: float) -> float: """Estimate the free energy of length query_len based on one of length known_len. The Jacobson-Stockmayer entry extrapolation formula is used for bulges, hairpins, etc that fall outside the 30nt upper limit for pre-calculated free-energies. See SantaLucia and Hicks (2004). Args: query_len: Length of element without known free energy value known_len: Length of element with known free energy value (d_g_x) d_g_x: The free energy of the element known_len temp: Temperature in Kelvin Returns: float: The free energy for a structure of length query_len """ gas_constant = 1.9872e-3 return d_g_x + 2.44 * gas_constant * temp * math.log(query_len / float(known_len))
77dbc59e63d58e2f8a294411f85b7f1ae18ada08
37,058
def _return_empty_tuple(*_): """ Return empty tuple """ return ()
079f59f698f988a92763fb9093b7fb90fc2febc9
37,069
def _get_key_and_indices(maybe_key_with_indices): """Extracts key and indices from key in format 'key_name[index0][index1]'.""" patterns = maybe_key_with_indices.split('[') if len(patterns) == 1: return (maybe_key_with_indices, None) # For each index ensure that the brackets are closed and extract number indices = [] for split_pattern in patterns[1:]: # Remove surrounding whitespace. split_pattern = split_pattern.strip() if split_pattern[-1] != ']': raise ValueError( 'ParameterName {} has bad format. Supported format: key_name, ' 'key_name[index0], key_name[index0][index1], ...'.format( maybe_key_with_indices)) try: indices.append(int(split_pattern[:-1])) except ValueError: raise ValueError( 'Only integer indexing allowed for ParameterName. ' 'Faulty specification: {}'.format(maybe_key_with_indices)) return patterns[0], indices
ffc065c60da419b73b1283e06a69098eeac19fbb
37,070
def padTo(n, seq, default=None): """ Pads a sequence out to n elements, filling in with a default value if it is not long enough. If the input sequence is longer than n, raises ValueError. Details, details: This returns a new list; it does not extend the original sequence. The new list contains the values of the original sequence, not copies. """ if len(seq) > n: raise ValueError("%d elements is more than %d." % (len(seq), n)) blank = [default] * n blank[:len(seq)] = list(seq) return blank
d15030acec1144f94ceb1af8faa41f3d05db5621
37,078
import re def sanitize(name, space_allowed=False, replace_with_character="_"): """sanitizes string to remove unwanted characters Args: name ([type]): name to sanitize space_allowed (bool, optional): identify if space allowed in sanitized string.\ Defaults to False. replace_with_character (str, optional): replacement character. Defaults to "_". Returns: string: sanitized string """ if space_allowed: sanitized_name = re.sub( r"([^\sa-zA-Z0-9._-])", replace_with_character, name ) else: sanitized_name = re.sub( r"([^a-zA-Z0-9._-])", replace_with_character, name ) return sanitized_name
b1f85efab9e96fc1125223895aaa023770f839a7
37,080
def make_candidate_numbers(candidate_count): """Return an iterable of candidate numbers.""" return range(1, candidate_count + 1)
36bc2cb8f63556e1bf4622f420219be1992b1053
37,083
def _path(source, target, parent, path): """ This function finds the path from source to the target according to the parent dictionary. It must be used for shortest_path_faster function. :param source: Float Id of the start node :param target: Float Id of the goal node :param parent: Dictionary The value of each key is the parent node (predecessor node). :param path: list The list contains the id of the nodes of the path from source to the target. :return: list The list contains the id of the nodes of the path from source to the target. """ if len(path) == 0: path.append(target) if target == source: pass elif parent.get(target) is None: print("Target cannot be reached") return False else: path.append(parent.get(target)) _path(source, parent.get(target), parent, path) return path[::-1]
3c44a5ba53a1fa05b19b1da875273bb2d64ca521
37,085
def good_astrom_func(table): """Require the star to have good astrometry.""" return table['ruwe'] < 1.4
580dbf5e1095384277bb561a8e54fe3429ad9810
37,086
import yaml def yaml_dump(object, **kwargs): """ Give the yaml representation of an object as a unicode string. :param object: The object to get the unicode YAML representation from. :returns: A unicode string. """ encoding = 'utf-8' result = yaml.dump( object, encoding=encoding, allow_unicode=True, **kwargs ) return result.decode(encoding)
12c6028d60a262081b430a3cfd2e433f5e2b234f
37,089
def format_file_size(v): """Format file size into a human friendly format""" if abs(v) > 10**12: return '%.2f TB' % (v / 10**12) elif abs(v) > 10**9: return '%.2f GB' % (v / 10**9) elif abs(v) > 10**6: return '%.2f MB' % (v / 10**6) elif abs(v) > 10**3: return '%.2f kB' % (v / 10**3) else: return '%d B' % v
b56c9bd78ceee77cbe2dccb982380dbaf447ee43
37,091
from typing import List import random def generate_random_adjacent_matrix(size: int) -> List[List[int]]: """ This function generates a squared adjacent matrix, with different proportions for 1 and 0 vertexes. As it's a square matrix, it means that the numbers of columns are always the same numbers of the rows. :param int size: The size of the matrix, for example 4, will generate a 4x4 matrix. :rtype: List[List[int]] """ choices = [0] * 25 + [1] * 75 return [[random.choice(choices) for x in range(size)] for y in range(size)]
04114888f52cfcfea9a9eff8e7da09f83bf809df
37,092
def build_s3_url(filenames, bucket): """ convert filenames to AWS S3 URLs params: bucket: string, AWS S3 bucket name filenames: list of strings, AWS S3 filenames """ s3_urls = [] for f in filenames: s3_urls.append('https://{}.s3.amazonaws.com/{}'.format(bucket, f)) return s3_urls
74dde98752adbaf72a1739add7816120053b019f
37,094