content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import json def get_metadata(bf): """ Return the metadata of a BIDSFile Parameters ---------- bf : BIDSFile object Returns ------- Dictionnary containing the metadata """ filename = bf.path.replace( '.' + bf.get_entities()['extension'], '') with open(filename + '.json', 'r') as handle: return json.load(handle)
6d7503bd1da892a0e8d06c25fd95fb6b3fa51d95
45,129
import random def random_message(length): """ Generates a block of random bytes of a user-specified length. """ return bytes([random.randint(0, 255) for x in range(length)])
569b98033307bab548dbf636e2be12a2e4e846e4
45,130
import math def df2idf(docfreq, totaldocs, log_base=2.0, add=0.0): """ Compute default inverse-document-frequency for a term with document frequency `doc_freq`:: idf = add + log(totaldocs / doc_freq) """ return add + math.log(1.0 * totaldocs / docfreq, log_base)
983b31e2b15471091f482445f624c2d412b20df7
45,131
def build_class(names): """ Format ZLabels class file. >>> names = ['Label.AColleague', 'Label.ZipPostalCode'] >>> output = build_class(names) >>> print output @isTest private class ZLabels { private static List<String> labels = new List<String> { Label.AColleague, Label.ZipPostalCode }; } <BLANKLINE> """ header = '@isTest\nprivate class ZLabels {\n private static List<String> labels = new List<String> {\n' footer = ' };\n}\n' content = ''.join(' ' + n + ',' + '\n' for n in names) content = content[:-2] + '\n' return header + content + footer
1b8c51fc27567861d70556b978f95b514d0163b4
45,138
def sqlite_db_tables(c=None): """ List the tables of a sqlite database. How do I list all tables/indices contained in an SQLite database : https://www.sqlite.org/faq.html#q7 """ db_tables = [] if c is not None: c.execute('select name from sqlite_master where type = "table" and name NOT LIKE "%sqlite_%"') rows = c.fetchall() for r in rows: db_tables.append(r['name']) return db_tables
e0b98eccbbe0a197d00385d115208ac3bf7cf31c
45,140
def parse_x12_major_version(x12_implementation_version) -> str: """ Parses the x12 major version from an implementation version string. If the version is invalid, an empty string is returned. Example: x = parse_x12_major_version("005010X279A1") print(x) # prints 5010 x = parse_x12_major_version("00501") print(x) # prints "" :param x12_implementation_version: The X12 implementation version typically conveyed in ST03 :returns: The x12 major version or an empty string """ if x12_implementation_version is None or len(x12_implementation_version) < 6: return "" return x12_implementation_version[2:6]
ea7163825ada4d5453ca9e7dcbf915009f41f236
45,150
import re def generateTestKey(test_name): """ Generate a test 'key' for a given test name. This must not have illegal chars as it will be used for dict lookup in a template. Tests must be named such that they will have unique keys. """ key = test_name.strip().lower() key = key.replace(" ", "") # Remove any characters that cannot be used to represent a variable key = re.sub(r'[^a-zA-Z0-9]', '', key) return key
52fe95fa7f1625f9a01d8e119efdce4231fc5d76
45,151
def deltawords(num, arg): """An adverb to come after the word 'improved' or 'slipped'""" delta = abs(num - arg) # We only pick out changes over 10%; over 30% in 9 months is unheard of. if delta == 0: word = "not at all" elif delta < 10: word = "slightly" elif delta < 20: word = "moderately" elif delta < 30: word = "considerably" else: word = "massively" return word
354fd7f2f5e029cbbe6d1d0160fcf6c26cf4acab
45,154
def is_alt(chrom): """ check if chromosome is an ALT """ return chrom.endswith("_alt")
88a61db7ca7cbb4fe6533f7af8d625674d336232
45,155
def inConvergenceCorridor(d_struct, d_gc, BS_d_struct, BS_d_gc): """ Check if a solutions qualities are within the convergence corridor """ struct_var = ((BS_d_struct / float(4)) + 3) * 4 gc_var = (BS_d_gc + 1 / float(100) * 5) + BS_d_gc + 1 if d_struct <= struct_var and d_gc <= gc_var: return True else: return False
a86bd86ad259c9695fa5c4983a4715a39f7fd53b
45,158
def find_root(cfg): """ Find a root node for the given cfg """ toVisit = set() for adj in cfg['adjacency']: for n in adj: toVisit.add(n['id']) for i in range(len(cfg['nodes'])): if cfg['nodes'][i]['id'] not in toVisit: return i return 0
54436087cd5793771d457d349f1362e7bc6052f5
45,160
def basis(u, cumul_var, p = 0.5): """Return the minimum number of basis vectors from matrix U such that they account for at least p percent of total variance. Hint: Do the singular values really represent the variance? Args: u: (M, M) numpy array containing principal components. For example, i'th vector is u[:, i] cumul_var: (N, ) numpy array, variance along the principal components. Returns: v: (M, D) numpy array, contains M principal components from N containing at most p (percentile) of the variance. """ # # You code here # d = 0 var_cm = 0 # make sure d is at least 1 p = max(p, 1e-8) while var_cm / cumul_var[-1] < p and d < len(cumul_var): var_cm = cumul_var[d] d += 1 v = u[:, :d] return v
7c25674bddc0f6a9156ed79a6e26dab4b09b3112
45,162
def get_average(pixels): """ Given a list of pixels, finds the average red, blue, and green values Input: pixels (List[Pixel]): list of pixels to be averaged Returns: rgb (List[int]): list of average red, green, blue values across pixels respectively Assumes you are returning in the order: [red, green, blue] """ avg_red = 0 avg_green = 0 avg_blue = 0 # 在pixels list上一個個找過去,獲得它的rgb值 for i in range(len(pixels)): avg_red += pixels[i].red avg_green += pixels[i].green avg_blue += pixels[i].blue # 獲得平均數 avg_red /= len(pixels) avg_green /= len(pixels) avg_blue /= len(pixels) rgb_list = [int(avg_red), int(avg_green), int(avg_blue)] return rgb_list
a20c875db1c99e82c449cef0dadc6df138f6f556
45,163
def str2_bool(v): """ This function converts a string to bool if this is the case. If the value received is of type bool, it is just returned. If it is a string and has one of these values "yes", "true", "t", "1" regardless of whether it is uppercase or not, it will return a bool with a true value. """ if type(v) is bool: return v return v.lower() in ("yes", "true", "t", "1")
bfd12b9983b6da4039f76127c1b7fbb9aa0cb841
45,176
def process_issue_results(data): """Process the data returned by the issues GraphQL request. Args: data: The data returned Returns: issues: A list of dicts; each dict is the data for some of the results """ edges = data.get("data").get("repository").get("issues").get("edges") issues = [] for e in edges: issues.append(e["node"]) return issues
42a9aae67df7dc824c495983e63b88163c269c36
45,177
import io def remove_multiple_newlines_in_txt(txt: str) -> str: """ This function will remove multiple, sequential newlines in text (str) data. :param txt: a str containing the text to be cleaned. :return: a str containing the text with multiple, sequential newlines removed. """ clean_txt = '' # convert the text string into a buffer so that we can read lines. txt_buffer = io.StringIO(txt) last_line = True # initialize to True so that on our first pass through the loop we'll get the first line. next_line = txt_buffer.readline() while next_line: stripped_next_line = next_line.strip() # was our previous line also a new line? if last_line: # strings in Python are "falsey" so '' will not pass. # no, was not a newline... add the current line to our cleaned text. clean_txt += next_line else: # yes, our previous line was a newline... is our current? if stripped_next_line: # must have content... write it out. clean_txt += next_line # set last_line to our current line (stripped version) and then grab the next line. last_line = stripped_next_line next_line = txt_buffer.readline() return clean_txt
9b1be808c4253b0f2b58b1985b10417a65b7cdeb
45,184
import importlib def _import_class(cls: str): """Take a string FQP and return the imported class or identifier clas is of the form "package.module.klass". """ mod, name = cls.rsplit(".", maxsplit=1) module = importlib.import_module(mod) return getattr(module, name)
b2b3cddf49b88f99b35c6720d12d96dfa007441c
45,196
def check_var_constraints(var_constraints, rule_walks): """ Check variable constraints of the rule. Parameters: var_constraints (list): variable constraints from the rule rule_walks (pd.DataFrame): all walks matching the rule Returns: rule_walks (pd.DataFrame): all walks matching the rule including the variable constraints """ for const in var_constraints: for i in range(len(const) - 1): rule_walks = rule_walks[ rule_walks["entity_" + str(const[i])] == rule_walks["entity_" + str(const[i + 1])] ] return rule_walks
b379526ee6a288b2a7de1ca224edf19be698acc9
45,221
from typing import Iterator from typing import Tuple from typing import Any def _duplicate_avoiding_dict(pairs: Iterator[Tuple[Any, Any]]): """ The default output_type of CollectionParser.delimited_pairwise. Returns a dict from key-value pairs while ensuring there are no duplicate keys. """ ret = {} for k, v in pairs: if k in ret: raise ValueError(f'duplicate key {k}') ret[k] = v return ret
5be688e4e509997c4cf10403d7b7599b07cf83e5
45,222
def flatten_tree(tree: dict, full: bool = False) -> dict: """ Flatten an execution tree to make it easier to read. Task trees are often a single node nested several levels deep. These trees may be collapsed into a list. The execution order is the same, but it's easier for a human to read. Before: - foo - bar - xoo After: - xoo - bar - foo Before: - foo - xar - bar - xoo After: - foo - xar - xoo - bar :param tree: Tree to flatten :param full: Flatten tree into single list :return: flattened task list """ def flatten_node(node: dict) -> list: """ Flatten a single node. Always return a list for consistency, even when returning a single node. :param node: :param parent: parent task list to collapse into :return: flattened node """ node = node.copy() num_dependencies = len(node["dependencies"]) if num_dependencies == 0: # no dependencies: nothing to flatten, return as-is return [node] elif full or num_dependencies == 1: # flatten dependencies: flatten into single list that includes parent & child flattened = [] for dependency in node["dependencies"]: flattened_child = flatten_node(dependency) flattened.extend(flattened_child) # clear dependencies, since they are now siblings # this node is added last since it runs after dependencies node["dependencies"] = [] flattened.append(node) return flattened else: # multiple dependencies: do not flatten into parent. # # Any dependencies that are flattened need to be merged with other dependencies. # Dependency nodes should either be a single node, or a list of nodes dependencies = [] for dependency in node["dependencies"]: flattened = flatten_node(dependency) dependencies.extend(flattened) node["dependencies"] = dependencies return [node] root = flatten_node(tree) if len(root) > 1: # if root's dependencies were flattened into it, then the returned list will have all of # those dependencies. Create a new root node to contain them all. This keeps the structure # consistent-ish for downstream consumers. They still have to special case this node, but # it should be a little simpler since all nodes are of a similar shape return {"name": None, "dependencies": root} else: # a single node, unpack it and return as root. return root[0]
77b133b80d70256643e22d1b778c1bdecb00badf
45,232
import pathlib def mzcompose_location(mz_root: str) -> pathlib.Path: """Return the absolute path to mzcompose. MZ_ROOT is expected to be set via pyactivate. """ return pathlib.Path(mz_root, "bin", "mzcompose")
80de24e6d4ea4bde9e846b2f7e942ed64a0d0e3f
45,234
from typing import Dict from typing import Tuple def apply(dfg: Dict[Tuple[str, str], int]) -> Dict[Tuple[str, str], float]: """ Computes a causal graph based on a directly follows graph according to the heuristics miner Parameters ---------- dfg: :class:`dict` directly follows relation, should be a dict of the form (activity,activity) -> num of occ. Returns ------- :return: dictionary containing all causal relations as keys (with value inbetween -1 and 1 indicating that how strong it holds) """ causal_heur = {} for (f, t) in dfg: if (f, t) not in causal_heur: rev = dfg[(t, f)] if (t, f) in dfg else 0 causal_heur[(f, t)] = float((dfg[(f, t)] - rev) / (dfg[(f, t)] + rev + 1)) causal_heur[(t, f)] = -1 * causal_heur[(f, t)] return causal_heur
c022aa8da1d5436f62b000619959a14db75672b2
45,235
def center(x, y, canvas_w, canvas_h, object_w, object_h): """ Returns a positional tuple that will centre a surface on another surface. """ # Do some math and return a positional tuple for use in the outer scope return x + canvas_w // 2 - object_w // 2, y + canvas_h // 2 - object_h // 2
91603a33d381b8de5074e7b522e257c2b20e177b
45,237
def shape_to_strides(shape): """ Constructs strides from shape (for objects with no special strides). """ strides = [] curstride = 1 for s in reversed(shape): strides.append(curstride) curstride *= s return list(reversed(strides))
8bea7683eeff0cf4f8c528aa59058cb19ff48d7c
45,240
def is_segmentable(partic_id): """ A function that returns True if the participant's interview clip is not in the manually identified set of troubled clips. The clips below were not segmentable do to excessive static, proximity to the virtual interviewer, volume levels, etc. """ troubled = set(['P300', 'P305', 'P306', 'P308', 'P315', 'P316', 'P343', 'P354', 'P362', 'P375', 'P378', 'P381', 'P382', 'P385', 'P387', 'P388', 'P390', 'P392', 'P393', 'P395', 'P408', 'P413', 'P421', 'P438', 'P473', 'P476', 'P479', 'P490', 'P492']) return partic_id not in troubled
58859f155d315311f353d31df4dcee47bd21ceb6
45,242
def quote(env, args): """Returns its argument; stops evaluation: (quote (1 2 3)) = (1 2 3) """ if len(args) > 1: raise ValueError( "Function quote expectes one argument, got: '{}'" .format(args) ) else: return args.car()
4b652a61b535dc36cd5449ad609c07aac82e4615
45,243
def int_to_bytes(num, lens): """ int转bytes :param num: 整数 :param lens: 目标bytes的字节长度 :return: bytes类型 """ int_bytes = int(num).to_bytes(lens, byteorder='big') return int_bytes
2dee2d30ba5fb93cd9f8b74a0dc16e9c0ca20dad
45,245
import select def read_ready(*read_fds, timeout=None): """Returns a list of file descriptors that are ready to be read. Args: *read_fds (int): Integers that refer to the file descriptors. timeout (float): A timeout before returning an empty list if no file descriptor is ready. None waits until at least one file descriptor is ready. """ return [] if not read_fds else select.select(read_fds, [], [], timeout)[0]
ac0005928ca836c08f5064f34bb7093c695e2220
45,246
def solar_geometric_mean_longitude(julian_century): """Returns the Solar Geometric Mean with Julian Century, julian_century.""" solar_geometric_mean_longitude = ( 280.46646 + julian_century * (36000.76983 + julian_century * 0.0003032) ) % 360 return solar_geometric_mean_longitude
ef414d97cf620be1a367e3b3dc2b8ba8c7aa5a68
45,247
import logging def get_logger(name, level, fmt=None): """ Get logger from logging with given name, level and format without setting logging basicConfig. For setting basicConfig in paddle will disable basicConfig setting after import paddle. Args: name (str): The logger name. level (logging.LEVEL): The base level of the logger fmt (str): Format of logger output Returns: logging.Logger: logging logger with given settings Examples: .. code-block:: python logger = log_helper.get_logger(__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s') """ logger = logging.getLogger(name) logger.setLevel(level) handler = logging.StreamHandler() if fmt: formatter = logging.Formatter(fmt=fmt) handler.setFormatter(formatter) logger.addHandler(handler) return logger
ef8bc7864f0369b95dd7d08c4ef40cd5916a7d3a
45,248
from typing import List def ks_filter_ascending(features: List[dict]) -> List[dict]: """ Filter out ascending path/rows from a STAC query """ return [f for f in features if int(f['properties']['landsat:wrs_path']) < 100 and int(f['properties']['landsat:wrs_row']) < 100]
e55b80ee10cdc786d8418836532ca1cdfe7adfda
45,250
import json def get_ignore_info(request): """Extract a dict of residues to ignore from the post request""" ignore_dict = request.POST.get("ignore") return json.loads(ignore_dict)
a200378b959b4d5b5b560df6e4d101909c086894
45,253
def export(varname): """Returns "export varname={formated value}\\n" """ var = str(globals()[varname]) frmted_var = var.strip("'\"").replace("\"", "\\\"").replace(r"\n", r"\\n") return f"export {varname}=\"{frmted_var}\"\n"
0c0e0d09cfbc8784fc4ce7e265b5be4e534dc4ff
45,254
import secrets def _make_token(num_bytes=16): """ Creates a cryptographically-secure, URL-safe string (for Python 3.6+) """ return secrets.token_urlsafe(num_bytes)
6b23ff243380619d92a0e36da14fc1b5a64ca519
45,255
def properties_to_dict(filepath): """ Convert Java .properties file to a dict Only include non-commented lines """ out = {} with open(filepath) as prop_file: for line in prop_file.readlines(): line = line.strip() if line and (not line.startswith('#')): k, v = line.split('=') out[k.strip()] = v.strip() return out
d425bfa4a3e3d10b8a3625232d55a098b914db03
45,257
def assert_equals(source_dict, expected_dict): """ Check equality with expected values in dictionary keys. Args: - source_dict (dict): the dict containing the keys/values that should conform to expected values - expected_dict (dict): the dict containing keys of the `source_dict`, with expected values of those keys as values Returns: list: empty list if all checks succeeded, list of error message str if some checks failed """ error_tplt = "Expected value '{}' for key '{}', got '{}'" errors = [ error_tplt.format(value, key, source_dict.get(key)) for key, value in expected_dict.items() if source_dict.get(key) != value ] return errors
884d3bbb46d32f52fc758bffd8e7f4787ceb4b5f
45,269
def is_end_of_file(empty_line_count): """ Checks whether reading of file reaches bottom of file """ return empty_line_count > 5
c6ba5121180c7bb10beed460c10ed285e7aedecb
45,271
def approx_pretty_size(total_bytes) -> str: """ Return a humane and pretty size approximation. This looks silly bellow 1KB but I'm OK with that. Don't call this with negative total_bytes or your pet hamster will go bald. >>> approx_pretty_size(50) '1KB' >>> approx_pretty_size(2000) '2KB' >>> approx_pretty_size(2048) '2KB' >>> approx_pretty_size(3000000) '3MB' >>> approx_pretty_size(4000000000) '4GB' >>> approx_pretty_size(-314) Traceback (most recent call last): ... ValueError: total_bytes may not be negative """ if total_bytes < 0: raise ValueError("total_bytes may not be negative") for scale, _unit in ((1024 * 1024 * 1024, "GB"), (1024 * 1024, "MB"), (1024, "KB")): div, rem = divmod(total_bytes, scale) if div > 0: if rem > 0: div += 1 # always round up break else: div, _unit = 1, "KB" return f"{div:d}{_unit}"
dc9a15fed28e0bb9d5ca9d51d2f15a10887c70ea
45,275
import string def prettify_permission_name(perm_name: str) -> str: """Takes a internal D.py permission name (such as send_tts_messages) and converts it to a prettified form suitable for showing to users (send_tts_messages -> Send TTS Messages)""" pretty_perm_name = string.capwords(f"{perm_name}".replace('_', ' ')) # Capitalize the permission names and replace underlines with spaces. pretty_perm_name = "Send TTS Messages" if pretty_perm_name == "Send Tts Messages" else pretty_perm_name # Mak sure that we capitalize the TTS acronym properly. return pretty_perm_name
fea3a27a9f3a9f1c80641b571705a3d43a4678d3
45,283
def c_to_f(temp): """ Converts Celsius to Fahrenheit. """ return temp * 9/5 + 32
49071c9f52b47e3ae2d03133d68e63071ac8eb00
45,284
def prio_dscp_map(duthosts, rand_one_dut_hostname): """ This fixture reads the QOS parameters from SONiC DUT, and creates priority Vs. DSCP priority port map Args: duthosts (pytest fixture) : list of DUTs rand_one_dut_hostname (pytest fixture): DUT hostname Returns: Priority vs. DSCP map (dictionary, key = priority). Example: {0: [0], 1: [1], 2: [2], 3: [3], 4: [4] ....} """ duthost = duthosts[rand_one_dut_hostname] config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] if "DSCP_TO_TC_MAP" not in config_facts.keys(): return None dscp_to_tc_map_lists = config_facts["DSCP_TO_TC_MAP"] if len(dscp_to_tc_map_lists) != 1: return None profile = dscp_to_tc_map_lists.keys()[0] dscp_to_tc_map = dscp_to_tc_map_lists[profile] result = {} for dscp in dscp_to_tc_map: tc = int(dscp_to_tc_map[dscp]) result.setdefault(tc, []).append(int(dscp)) return result
9538063704fc0a09599f7a8577781d0900e92404
45,289
def lerp(value1, value2, amt): """Calculates a number between two numbers at a specific increment. The amt parameter is the amount to interpolate between the two values where 0.0 equal to the first point, 0.1 is very near the first point, 0.5 is half-way in between, etc""" return value1 + amt * (value2 - value1)
b94f21d3c6f646102f74560c815a3304a741e391
45,290
def split_folds(X, y, fold_series, test_fold): """Take a dataset whose observations have been grouped into folds, then perform a train-test split. X, y: feature and target DataFrames. fold_series: Series containing the fold numbers of the observations. test_fold: Integer, the fold number that will be used as the test fold. Returns: tuple of four DataFrames""" if fold_series.dtype != "int64": raise AttributeError("The fold list does not purely contain integers.") test_mask = (fold_series == test_fold) X_train = X.loc[~test_mask].copy() y_train = y.loc[~test_mask].copy() X_test = X.loc[test_mask].copy() y_test = y.loc[test_mask].copy() return X_train, X_test, y_train, y_test
38051e584c427ffe77273fbbcdd764b6fe432b2f
45,296
def ffs(x: int) -> int: """Find first set - returns the index, counting from 0 (from the right), of the least significant set bit in `x`. """ return (x&-x).bit_length()-1
1d9fef75b58bba59e0ddb442115d1f5c62dd7844
45,298
def all_services(request): """Return all test services.""" return request.param
165df889ce0e2729aaed431fbdc1020b6d3cf034
45,305
def _reformat_mspass_error( mserr, prefix_message, suffix_message="Some requested metrics may not be computed" ): """ Helper for below used to reformat a message from ccore functions that throw a MsPASSError. Needed to produce rational messages from different error metric calculations. :param mserr: MsPASSError object caught with a try: except: block :param prefix_message: string that becomes the first part of the revised message posted. :param suffix_message: string that becomes a third line of the revised message posted. Note this string is always preceded by a newline so do not put a newline in this arg unless you want a blank line. :return: expand message string """ log_message = "FD_snr_estimator: error in computing an snr metric" log_message += prefix_message log_message += mserr.message log_message += "\n" log_message += suffix_message return log_message
469755c1251e98ab1ab15708956e0d3bba048627
45,306
def get_largest_mol(mol_list): """ Given a list of rdkit mol objects, returns mol object containing the largest num of atoms. If multiple containing largest num of atoms, picks the first one Args: mol_list(list): a list of rdkit mol object. Returns: the largest mol. """ num_atoms_list = [len(m.GetAtoms()) for m in mol_list] largest_mol_idx = num_atoms_list.index(max(num_atoms_list)) return mol_list[largest_mol_idx]
f8d2692c34c5a49ecdeb6c3f3c5345091651c182
45,309
def patternhost(pattern, user): """ Given a 'something-%s-example.org' format, return that with %s replaced (once) by the username in question. """ return pattern % user
96127b71b701f2e112bced8fd7e299001bdefea7
45,311
def daylight_saving(m, wd, d): """ Assuming that DST start at last Sunday of March DST end at last Sunday of October Input: m: month, wd: week day, d: day of month are int() type month, week day and day of month count start from 1 Output: 1 if clock need to shift 1 hour forward -1 if clock need to shift 1 hour backward 0 does not need to adjust clock """ if m == 3: if wd == 7: if 31 >= d > 24: return 1 elif m == 10: if wd == 7: if 31 >= d > 24: return -1 else: return 0
e40f0c2539a3c06279a9ea200f0faed11c5ea50d
45,313
import importlib def dynamic_load(module_or_member): """ Dynamically loads a class or member of a class. If ``module_or_member`` is something like ``"a.b.c"``, will perform ``from a.b import c``. If ``module_or_member`` is something like ``"a"`` will perform ``import a`` :param module_or_member: the name of a module or member of a module to import. :return: the returned entity, be it a module or member of a module. """ parts = module_or_member.split(".") if len(parts) > 1: name_to_import = parts[-1] module_to_import = ".".join(parts[:-1]) else: name_to_import = None module_to_import = module_or_member module = importlib.import_module(module_to_import) if name_to_import: to_return = getattr(module, name_to_import) if not to_return: raise AttributeError("{} has no attribute {}".format(module, name_to_import)) return to_return else: return module
882a9b4c68cd7d34a6ff5d914b7b819ad62156ae
45,315
def GetId(datas: bytes, length: int): """ 从数据包内获取子站地址——ID :param datas: 数据包 :param length: 地址长度 :return: """ # int length = System.Convert.ToInt32(SqliteProvider.ReadPara(SqliteProvider.icepara, "LinkAddrLength")) if datas[0] == 0x10: # 固定帧长报文 return int.from_bytes(datas[2:2 + length], byteorder='little') # little big elif datas[0] == 0x68: # 可变帧长报文 return int.from_bytes(datas[5:5 + length], byteorder='little') else: return 0
e660a516a9418c0755467e7889d3d53dd60a5a5b
45,322
def is_filename_char(x): """Return True if x is an acceptable filename character.""" if x.isalnum(): return True if x in ['-', '_']: return True return False
bdf9ec319e16e11db8af3dcdabc1c16dbc8c317d
45,324
def carousel(app): """Fixture to get the carousel widget of the test app.""" return app.carousel
8229756e385e285d3dcc6a122d6e73a5b263fc70
45,327
def filter_df_by_cluster(df, clusters, number): """ Select only the members of a defined cluster. :param df: pandas dataframe with samples/subjects as index and features as columns. :param dict clusters: clusters dictionary from get_dendrogram function if div_clusters option was True. :param int number: cluster number (key). :return: Pandas dataframe with all the features (columns) and samples/subjects belonging to the defined cluster (index). """ return df[df.index.isin(clusters[number])]
3100b9ca59c24c102d6445babc0a247254cdd5a9
45,331
import itertools def get_strategy_map(strategy_list, teams): """ Creates a strategy map, with all the possible strategy profiles on the game. :return: A map with all the possible strategy profiles according the players and strategies available. """ strategy_maps = [] strategy_profiles = list(itertools.product(strategy_list, repeat=teams)) for profile in strategy_profiles: strategy_map = {'name': '', 'map': {}} # To keep the order preferred by Gambit for index, strategy in enumerate(reversed(list(profile))): strategy_name = strategy.name strategy_map['name'] += strategy_name + "_" strategy_map['map'][index] = strategy strategy_map['name'] = strategy_map['name'][:-1] strategy_maps.append(strategy_map) return strategy_maps
a4178f2b6768116f4f0fd85b9c76db3e368689d5
45,337
from io import StringIO def get_q2_comment_lines(md_file_loc): """Returns a list of line numbers in the file that start with "#q2:". These lines should be skipped when parsing the file outside of Q2 (i.e. in pandas). I guess we could also ostensibly use these lines' types here eventually, but for now we just skip them. Notes: -The line numbers are 0-indexed (so they can easily be thrown in to pandas.read_csv() as the skiprows parameter) -This doesn't check check the first line of the file (assumed to be the header) -This stops checking lines once it gets to the first non-header line that doesn't start with "#q2:". Currently, "#q2:types" is the only Q2 "comment directive" available, but ostensibly this could detect future Q2 comment directives. -This checks if md_file_loc is of type StringIO. If so, this will handle it properly (iterating over it directly); otherwise, this assumes that md_file_loc is an actual filename, and this will open it using open(). (I realize that ideally this wouldn't have to do any type checking, but it's either this or do a bunch of weird refactoring to get my test code working.) """ def iterate_over_file_obj_lines(file_obj): q2_lines = [] line_num = 0 for line in file_obj: # Don't check for a #q2: comment on the first line of the file, # since the first line (should) define the file header. if line_num > 0: if line.startswith("#q2:"): q2_lines.append(line_num) else: # We assume that all #q2: lines will occur at the start of # the file. Once we've reached a line that doesn't start # with "#q2:", we stop checking. break line_num += 1 return q2_lines if type(md_file_loc) == StringIO: q2_lines = iterate_over_file_obj_lines(md_file_loc) # HACK: Allow us to read through this StringIO again -- # https://stackoverflow.com/a/27261215/10730311 # Note that we're only ever bothering with StringIOs here during test # code, so this weirdness should be ignored during normal operation of # Qurro. md_file_loc.seek(0) return q2_lines else: with open(md_file_loc, "r") as md_file_obj: return iterate_over_file_obj_lines(md_file_obj)
a9f5b62ba5ce1de57214f587528f8f8819827f70
45,339
def list_found_duplicates(in_list): """ Check list for duplicate entries. Return True if duplicates found, and False if not duplicates found. >>> in_list = ["hallo", "hello"] >>> list_found_duplicates(in_list) False >>> in_list = ["hallo", "hello", "hollo", "hello"] >>> list_found_duplicates(in_list) True """ if len(set(in_list)) == len(in_list): return False else: return True
bcdfa07c7a4931baa3522487b015e4ed703ebe0e
45,341
def kappa(A: float, B: float, C: float): """ Calculate Ray's asymmetry parameter for a given set of A, B, and C rotational constants. This parameter determines how asymmetric a molecule is by setting a range between two limits: the prolate (+1) and the oblate (-1) limits. Parameters ---------- A, B, C: float Rotational constant in MHz for each respective axis Returns ------- kappa: float Ray's asymmetry parameter """ return (2 * B - A - C) / (A - C)
f2628858582645a43ffbe706d1f838196c6d4f20
45,342
def mark_coverage(percentage): """Return a mark from A to F based on the passed tests percentage. :param percentage: Percentage of passed unit tests. :type percentage: float :return: Mark from A to F. :rtype: str """ mark_table = { "A": (90, 101), "B": (80, 90), "C": (70, 80), "D": (60, 70), "F": (0, 59), } for mark, mark_range in mark_table.items(): if int(percentage) in range(*mark_range): return mark
2adae1e628219d073bb8cfaeb707fea0a1d0882d
45,343
def remove_fx_variables(cube): """ Remove fx variables present as cell measures or ancillary variables in the cube containing the data. Parameters ---------- cube: iris.cube.Cube Iris cube with data and cell measures or ancillary variables. Returns ------- iris.cube.Cube Cube without cell measures or ancillary variables. """ if cube.cell_measures(): for measure in cube.cell_measures(): cube.remove_cell_measure(measure.standard_name) if cube.ancillary_variables(): for variable in cube.ancillary_variables(): cube.remove_ancillary_variable(variable.standard_name) return cube
09427f418ac25e25e7fa658f17b2e5c346c9b7eb
45,344
import math def _get_dct_norm_factor(n, inorm, dct_type=2): """Normalization factors for DCT/DST I-IV. Parameters ---------- n : int Data size. inorm : {'none', 'sqrt', 'full'} When `inorm` is 'none', the scaling factor is 1.0 (unnormalized). When `inorm` is 1, scaling by ``1/sqrt(d)`` as needed for an orthogonal transform is used. When `inorm` is 2, normalization by ``1/d`` is applied. The value of ``d`` depends on both `n` and the `dct_type`. dct_type : {1, 2, 3, 4} Which type of DCT or DST is being normalized?. Returns ------- fct : float The normalization factor. """ if inorm == 'none': return 1 delta = -1 if dct_type == 1 else 0 d = 2 * (n + delta) if inorm == 'full': fct = 1 / d elif inorm == 'sqrt': fct = 1 / math.sqrt(d) else: raise ValueError('expected inorm = "none", "sqrt" or "full"') return fct
b6f8e07b6d708f78d616f4a357e94ec3249676b4
45,347
import uuid def is_string_uuid(val): """ Checks if the given string is a valid UUID :param val: str :return: boolean """ if val and isinstance(val, str): try: uuid.UUID(val) return True except ValueError: pass return False
573192cdb1b3a5001309b72bdf49e524984ccbe0
45,353
from typing import Counter def most_common(list): """Returns the most common value in the given list.""" counts = Counter(list) return counts.most_common(1)[0][0]
c8ed106c7b726849a687f729f1b808b95b5c3454
45,358
def _get_type(obj): """Return the type of an instance (old and new-style)""" return getattr(obj, '__class__', None) or type(obj)
e354a397e573f89a998ee6a25b1971d1d8cd578b
45,363
def compute_all_squares(of_range): """Compute the squares of all the natural numbers in a range.""" # Register the list of squares. squares = [] for i in range(of_range): squares.append(i*i) return squares
3c5cc90f7538393889f571a5e541c38945399123
45,365
def channel_reshape(x, channel_shape): """ (B, *, H, W) to (B, custom, H, W) """ return x.reshape((x.shape[0],) + channel_shape + x.shape[-2:])
7d08dc4fc20686f9797a1e36ddaedbf9ef990a0c
45,369
def load_lfw_identities(lfw_identities_files): """" input: celeba_identities_file - path to the file containing CELEB-A IDs identity_CelebA.txt image_name_1 person_id_1 ... image_name_n person_id_n output: identity_info - dictionary of the list image names per id identity_info[person_id] -> (image_name_1, ..., image_name_n) image_info[image_id] -> person_id """ identity_info = dict() image_info = dict() with open(lfw_identities_files) as identities: lines = identities.readlines() for idx, identity in enumerate(lines): identity = identity.rstrip().lstrip().split() # we have 2 infos per line, image name and identity id if len(identity) != 2: continue image_name = identity[0] image_id = idx identity_id = int(identity[1]) if identity_id not in identity_info: identity_info[identity_id] = [] identity_info[identity_id].append(image_name) image_info[image_id] = identity_id return identity_info, image_info
0e529331676297aaa3d1a1efa246948779d7d39a
45,374
def confirm_action(msg): """ Prompts user for confirmation of action. """ print(" [*] {}".format(msg)) prompt = input(" [*] Y/N? ") if prompt in ["yes", "y"]: return True elif prompt in ["no", "n"]: return False else: print(" [-] Please answer with y/n") return confirm_action(msg)
1baf657efd150639dd01a75a414d1d6f7fb961a0
45,384
from typing import List from typing import Tuple def minimum_bounding_box(coords: List[Tuple[float, float, float]], buffer: float = 10.) -> Tuple[Tuple, Tuple]: """Calculate the minimum bounding box for a list of coordinates Parameters ---------- coords : List[Tuple[float, float, float]] a list of tuples corresponding to x-, y-, and z-coordinates buffer : float (Default = 10.) the amount of buffer to add to the minimum bounding box Returns ------- center: Tuple[float, float, float] the x-, y-, and z-coordinates of the center of the minimum bounding box size: Tuple[float, float, float] the x-, y-, and z-radii of the minimum bounding box """ xs, ys, zs = zip(*coords) min_x, max_x = min(xs), max(xs) min_y, max_y = min(ys), max(ys) min_z, max_z = min(zs), max(zs) center_x = (max_x + min_x) / 2 center_y = (max_y + min_y) / 2 center_z = (max_z + min_z) / 2 size_x = (max_x - center_x) + buffer size_y = (max_y - center_y) + buffer size_z = (max_z - center_z) + buffer center = center_x, center_y, center_z size = size_x, size_y, size_z return center, size
462628c0c3ae7373d09c16b3895330aa9185c9c4
45,385
def should_run_crawl( force_crawl: bool, skip_crawl: bool, searching_path: bool, crawled_recently: bool ) -> bool: """ Check whether to run the crawl. Always crawl if force_crawl is True. Otherwise, never crawl if skip_crawl is True. Assuming neither of the above are true, then crawl if searching_path is True or crawled_recently is False. :param force_crawl: Always crawl if True. :param skip_crawl: Never crawl if True, unless force_crawl is also True. :param searching_path: If the above are both false, then crawl if we're searching a path. :param crawled_recently: If all the above are False, then crawl if this is also False, as we haven't crawled this path recently. :return: True if we should crawl. """ if force_crawl: return True elif skip_crawl: return False elif searching_path: return True elif not crawled_recently: return True return False
de29d392923e2cc34d8987cbd5b55bb25b621552
45,386
def gcd_float(numbers, tol=1e-8): """ Returns the greatest common divisor for a sequence of numbers. Uses a numerical tolerance, so can be used on floats Args: numbers: Sequence of numbers. tol: Numerical tolerance Returns: (int) Greatest common divisor of numbers. """ def pair_gcd_tol(a, b): """Calculate the Greatest Common Divisor of a and b. Unless b==0, the result will have the same sign as b (so that when b is divided by it, the result comes out positive). """ while b > tol: a, b = b, a % b return a n = numbers[0] for i in numbers: n = pair_gcd_tol(n, i) return n
bf84dc323a84413d49989300e135f66d52d334e8
45,389
from typing import Tuple def split_reg_path(reg_path: str) -> Tuple[str, str, str]: """Splits a full registry path into hive, key, and path. Examples ---------- >>> split_reg_path(\\REGISTRY\\MACHINE\\SYSTEM\\ControlSet001\\Control\\ComputerName) (REGISTRY, ComputerName, MACHINE\\SYSTEM\\ControlSet001\\Control) Parameters ---------- regpath : str The full registry key Returns ------- Tuple[str, str, str] Hive, registry key, and registry key path """ # RegistryKey Node Creation hive = reg_path.split("\\")[0] reg_key_path = "\\".join(reg_path.split("\\")[1:-1]) reg_key = reg_path.split("\\")[-1] return (hive, reg_key, reg_key_path)
545329fc5a4dacde35c4040e5becff972e67a897
45,393
def clean_string_input(value: str) -> str: """Converts a string to lower case and and removes leading and trailing white spaces. Parameters ---------- value: str The user input string. Returns ------- str value.lower().strip() """ return value.lower().strip()
66b6c1a84e6c98c0ae2c9eae655f565ba65a6213
45,394
import math def get_color_table_size(num_colors): """Total values in the color table is 2**(1 + int(result, base=2)). The result is a three-bit value (represented as a string with ones or zeros) that will become part of a packed byte encoding various details about the color table, used in the Logical Screen Descriptor block. """ nbits = max(math.ceil(math.log(num_colors, 2)), 2) return '{:03b}'.format(int(nbits - 1))
a61a1a73ebdb2f0b4ed263329f820daa45e1ce3b
45,396
import dis def _get_opcodes(codeobj): """_get_opcodes(codeobj) -> [opcodes] Extract the actual opcodes as a list from a code object >>> c = compile("[1 + 2, (1,2)]", "", "eval") >>> _get_opcodes(c) [100, 100, 23, 100, 100, 102, 103, 83] """ i = 0 opcodes = [] s = codeobj.co_code while i < len(s): code = ord(s[i]) opcodes.append(code) if code >= dis.HAVE_ARGUMENT: i += 3 else: i += 1 return opcodes
c48b80a6d742ad46c2cb034c43b15f19af9c5d3b
45,397
def _normalise_options(options): """ Return a sequence of (value, label) pairs for all options where each option can be a scalar value or a (value, label) tuple. """ out = [] if hasattr(options, '__call__'): options = options() for option in options: if isinstance(option, tuple): out.append( option ) else: out.append( (option, str(option)) ) return out
624c78695643aac0e7a26583e5196f6c28446ce4
45,400
import json def decode_response(res): """Parse a WebSuccess or WebError response.""" decoded_dict = json.loads(res.data.decode('utf-8')) return (decoded_dict['status'], decoded_dict['message'], decoded_dict['data'])
e0e8b74ce31d6db6d77f91730b2a921562911afe
45,406
def set_new_methods(**kwargs): """ Semantics: Set a set of new methods to a class, any quantity of methods. Args: **kwargs: name of method given by key, body by value. Returns: It returns the new class """ def wrapper(cls): for key in kwargs: setattr(cls, key, kwargs[key]) return cls return wrapper
e9e6c4e51f15bf058f64361d185749c8d845b860
45,418
def model_key(row, headers, use_problem = True): """ Generate a unique key for models that make them identifiable. """ model = ["ico_encoder", "article_encoder", "attn", "cond_attn", "tokenwise_attention", "pretrain_attention", "tune_embeddings", "no_pretrained_word_embeddings"] problem = ["article_sections", "data_config"] key = "" for id_ in model: key += row[headers[id_]] + "|||" if (use_problem): for id_ in problem: key += row[headers[id_]] + "|||" return key
6b8c66205e4a3184697f51aebede5ab71abe3736
45,434
def bytes2text(bs): """ Converts bytes (or array-like of bytes) to text. :param bs: Bytes or array-like of bytes. :return: Converted text. """ if type(bs) in (list, tuple) and len(bs) > 0: if isinstance(bs[0], bytes): return b''.join(bs).decode(errors='ignore').strip() if isinstance(bs[0], str): return ''.join(bs).strip() else: raise TypeError elif isinstance(bs, bytes): return bs.decode(errors='ignore').strip() else: return ''
a145a05211a20eb4787f9d217ba7f4226eb96125
45,435
import torch def get_accuracy_with_logits(y_true: torch.Tensor, y_pred: torch.Tensor) -> float: """ Calculate accuracy with logits :param y_true: torch.Tensor of ints containing true labels. shape = (N,) :param y_pred: torch.Tensor of predicted logits. shape = (N, N_classes) :return acc: float which equals to accuracy score """ return (y_true == y_pred.argmax(dim=1)).float().mean().item()
e975dad6efdd9221d565f6e7abfee40af20d77f1
45,442
def is_non_empty_string(str_): """checks if a string is not an empty string, True if not empty""" return bool(str_ != '')
b911322a33cb9387c86648bdb7c5be4f6b7f21a1
45,444
def graph_input_anssel(si0, si1, sj0, sj1, se0, se1, y, f0=None, f1=None, s0=None, s1=None, kw=None, akw=None): """ Produce Keras task specification from vocab-vectorized sentences. The resulting 'gr' structure is our master dataset container, as well as something that Keras can recognize as Graph model input. * si0, si1: Words as indices in vocab; 0 == not in vocab * sj0, sj1: Words as indices in GloVe; 0 == not in glove (or in vocab too, which is preferred; never si0>0 and si1>0 at once) * se0, se1: Words as embeddings (based on sj; 0 for nonzero-si) * y: Labels * f0, f1: NLP flags (word class, overlaps, ...) * s0, s1: Words as strings * kw, akw: Scalars for auxiliary pair scoring (overlap scores in yodaqa dataset) To get unique word indices, sum si0+sj1. """ gr = {'si0': si0, 'si1': si1, 'sj0': sj0, 'sj1': sj1, 'score': y} if se0 is not None: gr['se0'] = se0 gr['se1'] = se1 if f0 is not None: gr['f0'] = f0 gr['f1'] = f1 if s0 is not None: # This is useful for non-neural baselines gr['s0'] = s0 gr['s1'] = s1 if kw is not None: # yodaqa-specific keyword weight counters gr['kw'] = kw gr['akw'] = akw return gr
ef3627ebbc4b5fbc9b2c8e02e2cc4aa683ffd298
45,446
def test_file(test_dir): """ Returns path to the file with `filename` and `content` at `test_dir`. """ def wrapper(filename: str, content: str): file = test_dir / filename file.write_text(content, encoding="utf-8") return file return wrapper
e4e9d500ba7c5227add47833bdc8d3be3a8d4252
45,447
def get_slope(x, y): """Calculate slope by taking first and last values.""" return (y[-1]-y[0])/(x[-1]-x[0])
92f00bd246e27dae51552dbe546ae108abc17e40
45,453
def s3_get_bucket_versioning(s3_obj, bucketname, s3_client=None): """ Boto3 client based Get Bucket Versioning function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket s3_client: Any s3 client resource Returns: dict : GetBucketVersioning response """ if s3_client: return s3_client.get_bucket_versioning(Bucket=bucketname) else: return s3_obj.s3_client.get_bucket_versioning(Bucket=bucketname)
58017d4a414929d06e4da4c8cac83e6a02ef9975
45,454
from typing import Union from typing import List def append_forward_slash_path( paths: Union[List[str], str] ) -> Union[List[str], str, None]: """ Returns the input string(s), in the same format as they were passed in, with a minimum of one forward slash at the end, given that no forward slash exists at the end. :param paths: one or more paths to add a forward slash to. :return: """ if paths is None: return None if isinstance(paths, str): if paths[-1] != "/": paths += "/" return paths else: output = [path + ("/" if path[-1] != "/" else "") for path in paths] return output
e5726067818674457bd5da1b39ba7e47dc91c4d5
45,459
import copy def merge_config(base_config, new_config, in_place=False): """Recursively merges configurations. """ if base_config is None: return new_config if not in_place: base_config = copy.deepcopy(base_config) if new_config is None: return base_config # Merge a configuration file. for key, new_value in new_config.iteritems(): base_value = base_config.get(key) if isinstance(base_value, dict): assert isinstance(new_value, dict), \ "New value must be dict: {} - {}".format(key, new_value) # Recurse. value = merge_config(base_value, new_value, in_place=True) else: # Overwrite. value = new_value base_config[key] = value return base_config
9c26679c72559a82c46e99b61b1b5f8395258e9d
45,463
def div_growth_rateGm(curr_earn, cap_emp, dt): """ Calculates the growth rate of a dividend using the dividend growth rate valuation model where dividend is paid yearly. parameters: ----------- curr_earn = current earnings cap_emp = capital employed dt = current dividend """ roi = curr_earn/cap_emp b = (curr_earn - dt)/curr_earn g = (roi * b) * 100 return round(g, 4)
f81bb60fe4c0f903ad308d444c578e1447bbb598
45,464
import re def cigarToLen(cigar): """ Calculate sequence length from CIGAR string """ # Split "cigar" on capital letters span = re.split('[A-Z]', cigar) ops = re.split('[0-9]+', cigar) len = 0 del(span[-1]) del(ops[0]) for i, span in enumerate(span): if ops[i] in ["M", "I", "S"]: len += int(span) return len
fa7c2b0633a349cc3295519bffcff9965c6ae704
45,466
import torch def mask_out(tensor, start_ind, end_ind, value, dim=1): """ Set the elements before start_ind and after end_ind (both inclusive) to the value. """ if dim != 1: raise NotImplementedError batch_size, time = list(tensor.shape)[:2] # (oleg) This creates the indices every time, but doesn't seem to affect the speed a lot. inds = torch.arange(time, device=tensor.device, dtype=start_ind.dtype).expand(batch_size, -1) mask = (inds >= end_ind[:, None]) | (inds <= start_ind[:, None]) tensor[mask] = value return tensor, mask
1326220320679c32d728ce727d174e00065eaa0a
45,467
def interaction_strength(idx: int, mol_map: dict, acceptor_exp: float) -> float: """Calculate interaction strength for atom with index `idx`.""" acceptor_strength = mol_map[idx]["sa"] num_lp = mol_map[idx]["num_lp"] if num_lp != 0: return acceptor_strength * (num_lp ** acceptor_exp) return 0.0
382b69e56002a24691fdc9e8b6943eeeeee7293a
45,471
from json import JSONDecodeError, loads from typing import Any def decode_attrs(attrs: Any) -> Any: """Try to decode attributes as json if possible, otherwise return the attrs as they are Parameters ---------- attrs: Any an object containing attributes Returns ---------- Any either the attributes as json or the attributes as received """ try: attrs = loads(attrs) except (JSONDecodeError, TypeError): return attrs
e49f344769b75f3d37a0ce86d84ac55f681f8156
45,477
def calculate_score(hazard_list, dictionary): """This function will take the hazard codes from a list and grab the respectvie hazard scores from a dictionary """ score_list = [] for code in hazard_list: # some of the codes end with a colon from extarcting from jsons. Remove them here if present. if code.endswith(':'): # removes last string from item, which will be the colon. code = code[:-1] for i in dictionary['Code']: # loop through the dictionary if code == dictionary['Code'][i]: # if code is present in dictionary # append the hazard score to the score list score_list.append(dictionary['Hazard Score'][i]) return score_list
e0fe7d995ca263f47624929c84f1a87a506257ed
45,478
def verify(self, case='', level='', **kwargs): """Enter the verification run mode. .. note:: This command is only valid at the ``/BEGIN`` level, obtained with ``mapdl.finish()``. Parameters ---------- case : str, optional Optional title of the verification manual file. Also accepts ``'OFF'`` to disable the verification run mode. level : int, optional Verification level ranging from 1 to 6 defaulting to 4. Returns -------- Examples -------- Enter the verification routine with the default option. >>> mapdl.finish() >>> mapdl.verify('VM1') '*** VERIFICATION RUN - CASE VM1 *** OPTION= 4' """ return self.run(f'/VERIFY,{case},{level}', **kwargs)
0b25f240a6bc3bb551613727c32524ebc663f5ee
45,480
import unicodedata import re def _key_from_url(url: str) -> str: """ Convert a URL str to one valid for use as a file name or dict key. URL Protocols are removed entirely. The returned string will have characters in the set [a-zA-Z.-_]. Parameters ---------- url : str A URL string Returns ------- str A filename-safe string Example ------- >>> url = _key_from_url('http://test.alyx.internationalbrainlab.org/') 'test.alyx.internationalbrainlab.org' """ url = unicodedata.normalize('NFKC', url) # Ensure ASCII url = re.sub('^https?://', '', url).strip('/') # Remove protocol and trialing slashes url = re.sub(r'[^.\w\s-]', '_', url.lower()) # Convert non word chars to underscore return re.sub(r'[-\s]+', '-', url)
3e9f85fe38e68966f00f8d2a1fb0b3752d9efb75
45,487
def calculate_slasher_snapshot_difference(client, snapshot_old, snapshot_new): """ Calculates the difference between two slasher snapshots Parameters ---------- client : ``Client`` The respective client. snapshot_old : `None` or `tuple` of (`dict` of (`int`, `list` of `tuple` \ (`bool`, ``SlasherApplicationCommand``)) items, `None` or `set` of ``ComponentCommand``) An old snapshot taken. snapshot_new : `None` or `tuple` of (`dict` of (`int`, `list` of `tuple` \ (`bool`, ``SlasherApplicationCommand``)) items, `None` or `set` of ``ComponentCommand``) A new snapshot. Returns ------- snapshot_difference : `None` or `tuple` (`tuple` (`set` of ``SlasherApplicationCommand``, `set` of \ ``SlasherApplicationCommand``), `tuple` (`None` or `set` of ``ComponentCommand``, `None` or \ `set` of ``ComponentCommand``) The difference between the two snapshots. """ if (snapshot_old is None) and (snapshot_new is None): return None if snapshot_old is None: application_command_snapshot_old = None component_command_snapshot_old = None else: application_command_snapshot_old, component_command_snapshot_old = snapshot_old if snapshot_new is None: application_command_snapshot_new = None component_command_snapshot_new = None else: application_command_snapshot_new, component_command_snapshot_new = snapshot_new if (application_command_snapshot_old is not None) or (application_command_snapshot_new is not None): added_application_commands = [] removed_application_commands = [] guild_ids = set() if (application_command_snapshot_old is not None): guild_ids.update(application_command_snapshot_old.keys()) if (application_command_snapshot_new is not None): guild_ids.update(application_command_snapshot_new.keys()) for guild_id in guild_ids: local_added_application_commands = [] local_removed_application_commands = [] if (application_command_snapshot_new is not None): try: new_changes = application_command_snapshot_new[guild_id] except KeyError: pass else: for added, command in new_changes: if added: local_added_application_commands.append(command) else: local_removed_application_commands.remove(command) if (application_command_snapshot_old is not None): try: old_changes = application_command_snapshot_old[guild_id] except KeyError: pass else: for added, command in old_changes: if added: try: local_added_application_commands.remove(command) except ValueError: local_removed_application_commands.append(command) else: try: local_removed_application_commands.remove(command) except ValueError: local_added_application_commands.append(command) added_application_commands.extend(local_added_application_commands) removed_application_commands.extend(local_removed_application_commands) if (not added_application_commands): added_application_commands = None if (not removed_application_commands): removed_application_commands = None if (added_application_commands is None) and (removed_application_commands is None): application_command_difference = None else: if client.running and client.application.id: slasher = getattr(client, 'slasher', None) if (slasher is not None): slasher.sync() application_command_difference = added_application_commands, removed_application_commands else: application_command_difference = None if (component_command_snapshot_old is None) or (component_command_snapshot_new is None): removed_component_commands = component_command_snapshot_old added_component_commands = component_command_snapshot_new else: removed_component_commands = component_command_snapshot_old-component_command_snapshot_new added_component_commands = component_command_snapshot_new-component_command_snapshot_old if (not removed_component_commands): removed_component_commands = None if (not added_component_commands): added_component_commands = None if (added_component_commands is None) and (removed_component_commands is None): component_command_difference = None else: component_command_difference = (removed_component_commands, added_component_commands) if (application_command_difference is None) and (component_command_difference is None): snapshot_difference = None else: snapshot_difference = (application_command_difference, component_command_difference) return snapshot_difference
8e16f3943bcf7651496a7e6e6a2c2e7f59c6fb37
45,490
from typing import Tuple def summarize_location(location: Tuple[str, str, str, str, str, str]) -> Tuple[str, str, str]: """ Get manuscript location and summarize for usage in citavi Args: location (tuple): metadata of manuscript's location Returns: tuple: summary of metadata of manuscript's location """ location_list = list(location) for i in range(len(location_list)): if not location_list[i]: location_list[i] = "" continue location_list[i] = location_list[i] + ", " try: settlement = location_list[1] + location_list[0] settlement = settlement[:-2] except: settlement = "unknown" try: archive = location_list[2] + location_list[3] + location_list[4] archive = archive[:-2] except: archive = "unknown" signature = location_list[5] signature = signature[:-2] return settlement, archive, signature
fa6ed40a4ddc4510ea919773d9b68458c5edc738
45,501
def route_format(t_route_dest): """Shorten destinations to save space.""" # add more route name formatting to your choice to save dsiplay space # If there's a substitution in the dictionary, use it, otherwise return the original return {"TO GRANVILLE": "GRANVILLE", "COMM'L-BDWAY STN": "COM-BW STN"}.get(t_route_dest, t_route_dest)
17af8f25dc17c82292a23255676d4584bf44b89e
45,506
import pickle def load_pkl(file): """Loads data from pickle Parameters ---------- file : str Returns ------- data """ f = open(file, 'rb') data = pickle.load(f) f.close() return data
ff7a9cefa4231c9dee6030b8fd14b72e55a11548
45,510