content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def convert_quat_wxyz_to_xyzw(quat): """Convert quaternion from wxyz to xyzw representation.""" return quat[..., [1, 2, 3, 0]]
ed02298c4f7ff61297fdcb6f40aa394045b9a812
40,233
def yaml(path, _pod): """Retrieves a yaml file from the pod.""" return _pod.read_yaml(path)
ca610b1b916a83a535e578e3b6f102c36f0ed4af
40,234
def snake_case_to_title(string): """ A convenience function that converts a snake-cased function name into title case :param str string: A string encoded using snake case :return: A converted string """ return string.replace('_', ' ').title()
9229ebbfbe48fd13da17616a9b81dae01cfcb303
40,238
def 取最大数(数值列表): """ 传入要对比的列表,如(1,2,3),返回里面最大的数字 :param 数值列表: (1,2,3) :return: 3 """ return max(数值列表)
111eba5c9c37e7656ca410447e1ac5a3f0041ea7
40,239
def ucc(graph): """Identify connected components in undirected graph. Examples: >>> ucc(graph) {1: ['A', 'B', 'C', 'E', 'D', 'F', 'G'], 2: ['H', 'I', 'J'], 3: ['K']} """ visited = [] # Group each vertex with a component. components = {} # Component marker num_cc = 0 for v in graph: if v not in visited: num_cc += 1 components[num_cc] = [v] # BFS q = [v] visited.append(v) while q: current = q.pop(0) for a in graph[current]: if a not in visited: visited.append(a) q.append(a) components[num_cc].append(a) return components
60688d8c4bc025e4cb12bbfff08c07cc29a611bd
40,247
import datetime def epoch_to_datetime(value): """ Converts epoch(unix) time into python datatime """ # return datetime.datetime.fromtimestamp(value) return datetime.datetime.fromtimestamp(value).strftime('%c')
5e725f3b6e43828fc08e9fb68ba3ce6db13ae8b6
40,248
import random def random_number(bits): """Generate a random integer that will cleanly fit in a number of bits.""" max, min = 2**bits - 1, 2**(bits - 1) return random.randint(min, max)
89e7f167bc7af35b7193a7c148f863fd1e185a74
40,251
def check_ALL_DS(DS_ES_X_Map): """ ES used with ALL as DS can not be used with any other DS. This function checks if this is true. """ ES_with_ALL = [row[1] for row in DS_ES_X_Map if row[0] == "ALL"] ES_without_ALL = [ES for ES in ES_with_ALL for row in DS_ES_X_Map if row[0] != "ALL"] return len(ES_without_ALL) == 0
58b2f2fd4a4a1f20bba74aa6150d91169a4a9695
40,252
def default_zero(input): """ Helper function that returns the input, replacing anything Falsey (such as Nones or empty strings) with 0.0. """ return round(input if input else 0.0, 2)
d5c06c9f0449277e5fc2b8083335fa7e35595305
40,256
def unquote_colors(context): """ URL unqote colors from context. """ for k, v in context.items(): if len(v) == 9 and v.startswith("%23"): context[k] = "#" + v[3:] return context
040fa15b3f480db7762128b08d0e053a83697f50
40,258
def split_name(name): """ Split a name into two names. If there is only one name, the last name will be empty. If there are more than two, the extra names will be appended to the last name. Args: name (str): A name to split into first name, last name Returns: tuple: (first, last) """ if name is None: return "", "" names = name.split(maxsplit=1) if len(names) == 0: return "", "" else: return names[0], " ".join(names[1:])
e399cc06f780a2c0139de93c1070a50f4224b38d
40,260
def _GetAllBuildersInConfig(trybot_config): """Returns a list of all variable builders referenced in config.""" all_config_builders = set() for builders in trybot_config.itervalues(): for builder in builders.itervalues(): waterfall_builder = builder.get('waterfall_trybot') flake_builder = builder.get('flake_trybot') all_config_builders.add(waterfall_builder) all_config_builders.add(flake_builder) return list(all_config_builders)
1796a221aebcb9724568e49bde781bc568f867a1
40,262
def filter_process_by_name(name): """Filter process by process name.""" if name in ["cp", "tee", "date", "Null", "recon-all"]: return True return False
2b33d14e7e1bd3e6e09ce8af97a90e01d10dbe59
40,263
import time def _CalculatePastDate(days_ago, now=None): """Calculates the timestamp N days ago from now.""" if now is None: now = int(time.time()) ts = now - days_ago * 24 * 60 * 60 return ts
1a0affad807e1380dbb61a093cbc956dd3e86107
40,267
def assert_uniqueness_clause(property: str, node: str = 'node') -> str: """ Returns the *part* of a statement that ensures a property of a node is unique. Parameters ---------- property : str Name of the mean-to-be-unique property node : str, optional Name of the node (coming from other statement) Returns ------- out: str Neo4j statement """ return f"ASSERT {node}.`{property}` IS UNIQUE"
2433dfe24df0b58264387310b7875ce78ea7b3ac
40,268
def findLinker(seq, linker): """ Match the linker in the read sequence. """ pos = -1 for i in range(len(seq) - 9): seed = seq[i:i + 9] if linker.startswith(seed): pos = i break return pos
93e767552d289a004eb10385d15e46ad90e785f3
40,269
def split_lengths_for_ratios(nitems, *ratios): """Return the lengths of the splits obtained when splitting nitems by the given ratios""" lengths = [int(ratio * nitems) for ratio in ratios] i = 1 while sum(lengths) != nitems and i < len(ratios): lengths[-i] += 1 i += 1 assert sum(lengths) == nitems, f'{sum(lengths)} != {nitems}\n{ratios}' return lengths
8d2ccdd028afe74309d955e3a3a6741c87411b0b
40,271
def filter_corpus_category(corpus, category): """Returns only corrections with a defined category from the full corpus format. Args: corpus (dict): Corpus dictionary, loaded with 'load_enelvo_format_full'. category (str): Selected category. Returns: list A list of tuples with format (noisy_word,correction) if noisy_word belongs to ``category``. """ corrs = [] for i in corpus: for e in corpus[i]["errs"]: if e["cat"] == category: corrs.append((e["word"], e["corr"])) return corrs
e0fd35aaad9d0af429e21686fe56fcfdcf651533
40,274
def to_date_obj(date): """ Transforms a date into an obj """ obj = {} obj['year'] = date.year obj['month'] = date.month obj['day'] = date.day obj['hour'] = date.hour obj['minute'] = date.minute obj['second'] = date.second obj['microsecond'] = date.microsecond obj['tzinfo'] = str(date.tzinfo) return obj
34fcc0234a76c4644dc13ada139d11c0559ba4ea
40,276
def read(name): """ Read file in local current working directory and return the contents :param name: The name of the file :type name: string :returns: string -- Contents of file """ return open(name).read()
243f3e0c5818e3e498d3be14a5dea0a59495e417
40,278
import pickle def load_df(path): """Loads and returns an object from a pickle file in path Parameters: path (string): Path where the pickle file resides Returns: object: Object in pickle file """ infile = open(path, 'rb') df = pickle.load(infile) infile.close() return df
4399e897ef7ae9d7a2342548f0ad15819ded331e
40,285
def getStratifiedSampleBandPoints(image, region, bandName, **kwargs): """ Function to perform stratitfied sampling of an image over a given region, using ee.Image.stratifiedSample(image, region, bandName, **kwargs) Args: image (ee.Image): an image to sample region (ee.Geometry): the geometry over which to sample bandName (String): the bandName to select for stratification Returns: An ee.FeatureCollection of sampled points along with coordinates """ dargs = { 'numPoints': 1000, 'classBand': bandName, 'region': region } dargs.update(kwargs) stratified_sample = image.stratifiedSample(**dargs) return stratified_sample
c5a8ae26f8b4a76dc2d12e48524698f2d582d08e
40,287
def z2lin(array): """calculate linear values from a array of dBs""" return 10**(array/10.)
e0ed221666398c9ca8488fd20f5e3b0711ad6a7c
40,288
def compute_jaccard_similarity(site_a, site_b): """ Compute the Jaccard similarity between two given ActiveSite instances. Input: two ActiveSite instances Output: the similarity between them (a floating point number) """ a = [i.type for i in site_a.residues] b = [i.type for i in site_b.residues] similarity = 0.0 intersection = len(set(a) & set(b)) union = len(set(a) | set(b)) similarity = float(intersection)/float(union) return similarity
1e3f83e3a98c3e7f658a22f75a5f377efd6529d5
40,289
def images_path(instance, filename): """ Returns path where free parking place images will be stored. """ parking_name = instance.owning_parking.name if not parking_name: parking_name = '.' return 'parking_places/{0}/{1}'.format(parking_name.replace(' ', '_'), filename)
6ea3eb2f8ffbd2af3f729fd7c425a707cccf3c32
40,295
def stringifyPlayMove(pos, modifiers, name="game"): """ A utility method for automating getting code for playing moves. Helpful for generating code for testing particular cases of moves made. Can be used with things like the GUI or moves made by the QModels to replicate exact board states without having to manually place every piece. Parameters work in the same way as are passed to Game.play and Game.canPlay :param pos: The position of the piece to move :param modifiers: A 3-tuple of the modifiers (left, forward, jump) for the piece to move :param name: The variable name of the game that this play method call will use :return A string representing the code used to make the move """ return name + ".play(" + str(pos) + ", " + str(modifiers) + ")"
2a902ce02627be707664f125346f21d14f333e02
40,298
from typing import Counter def get_piece(turn, grid): """Counts the current piece on the grid :param turn: "X" or "O" :param grid: A 2-dimensional 7x7 list :return: Number of pieces of "turn" on the grid """ grid_combined = [] for row in grid: grid_combined += row counter = Counter(tuple(grid_combined)) return counter[turn]
1c7fda238ddba6d2620b5dfdee1b4173f606ede6
40,302
def get_distance(p1, p2): """It finds the minimum distance between two Points Parameters ---------- p1 : shapely geometric object The first point p2 : shapely geometric object The second point Returns ------- list Returns the minimum distance. The value follows the geometric object projection. """ dist = 5000 try: dist = min(dist, p1.distance(p2)) except TypeError as err: print(f'{err}') return [dist]
2bcfdc62b25e286d1a1d46c27f058e8da3e722e9
40,305
def is_proxy(obj): """ Return True if `obj` is an array proxy """ try: return obj.is_proxy except AttributeError: return False
871c163b30ccc1b31b435c9baac5c0d6063d271e
40,308
def is_key_all_values_equal(list_of_dict, key, value): """ Check if all values of a key are equal to the specified value. """ for d in list_of_dict: if d[key] != value: return False return True
c8e303ffd3f9de4f065ba0bd52d67e1a6c1f8708
40,309
def celsius_to_fahrenheit(celsius_temp): """Calculate fahrenheit temperature from celsius PARAMETERS ---------- celsius_temp : float A temperature in degrees RETURNS ------- temperature : float """ # apply formula return (celsius_temp * (9/5)) + 32
c6674816a463d022da8863e0f8fea78dd57c1a22
40,314
from functools import reduce def merge_dicts(list_of_dicts): """ This will merge a list of dicts. Any duplicate keys will end up with the last value seen. """ return reduce(lambda a, d: a.update(d) or a, list_of_dicts, {})
68c2d67c97f2276c31b4932c34e31034b1fe3213
40,317
def filename_flag(filename, flag): """Add a string to filenames to indicate how they've been processed.""" filename_parts = filename.split('.') output = '' count = len(filename_parts) for part in filename_parts: if count == 1: output = output + '-' + flag + '.' count = count - 1 output = output + part return str(output)
f3cd6016244cf050b732e273be8dea4f94693677
40,327
from typing import Union from pathlib import Path def is_valid_file(filepath: Union[str, Path, None]) -> bool: """check if the passed filepath points to a real file""" if filepath is None: return False return Path(filepath).exists()
dbe6713938ac335d38d4df6a1f80b6595a65969f
40,329
def shorten_record_name(record_name): """ Return the first part of the record (which can be None, comet or comet.connection) """ if record_name is None: return record_name return record_name.split(".", 1)[0]
b84c86b22153f403909e86aa452d6f2c7eea32ca
40,330
import hashlib def digest(key: str) -> str: """Get the hash digest for the key.""" return hashlib.sha256(key.encode()).hexdigest()
82c426ce7f396ac6c5bce38ffe83ba802bb7ed83
40,333
import functools import warnings def deprecated(custom_msg=None, new_func_name=None): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. We use this decorator instead of any deprecation library because all libraries raise a DeprecationWarning but since by default this warning is hidden, we use this decorator to manually activate DeprecationWarning and turning it off after the warn has been done.""" def wrap(func): @functools.wraps(func) def wrapped_method(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # Turn off filter msg = 'Call to deprecated function "{}".'.format(func.__name__) if new_func_name: msg += ' Please, use "{}" function instead.'.format(new_func_name) if custom_msg: msg = custom_msg warnings.warn(msg, category=DeprecationWarning, stacklevel=2) warnings.simplefilter('ignore', DeprecationWarning) # Reset filter return func(*args, **kwargs) return wrapped_method return wrap
119774d6e0eb093b0e9039411ca62b824d6fb4e0
40,334
def coalesce_dates(dates): """ Coalesces all date pairs into combined date pairs that makes it easy to find free time gaps. >>> from date_collapse import coalesce_dates >>> dates = [(1,4),(2,8),(12,16),(16,21)] >>> cdates = coalesce_dates(dates) >>> print(cdates) [(1, 8), (12, 21)] >>> dates = [(1,4),(2,8),(8,10),(12,16),(16,21),(21,31)] >>> cdates = coalesce_dates(dates) >>> print(cdates) [(1, 10), (12, 31)] """ parsed_dates = [] for date in dates: parsed_dates.extend([(date[0], 1),(date[1], -1)]) parsed_dates.sort(key = lambda d: d[0]) count = 0 coalesced = [] current_block = [None, None] for date in parsed_dates: if count == 0: if not coalesced or (coalesced[-1][1] != date[0]): current_block = [date[0], None] else: coalesced.pop() count += date[1] if count == 0: current_block[1] = date[0] coalesced.append((current_block[0], current_block[1])) return coalesced
161ef92c6c8946a277e11504cb3dee1082582123
40,337
def zscore(rate, mean, std): """Calculates the Z-score from the mean and std.""" zscore = (rate - mean) / std return zscore
f122034a930301182a85db67457ba18b76ceeea0
40,342
def apply_process(sequence, number_of_times, process): """Apply process function to sequence number_of_times.""" if isinstance(sequence, int): sequence = [int(num) for num in str(sequence)] for _ in range(number_of_times): sequence = process(sequence) return sequence
6db3657360c4dfdb6f38d9241429f66656913135
40,345
def apply_1overt_decay(params, t): """ Implements the mathematical form: a = a0 / (1 + k*t). Args: params: parameters for the annealing t: iteration number (or you can use number of epochs) Returns: Updated learning rate """ a0 = params['lr0'] # initial learning rate k = params['k'] # decay factor return a0 * 1. / (1 + k*t)
867e43cfe9733d66e469d9223d8b6e2521ca3362
40,347
import re def _SplitFreqRange(freq_range): """Splits a `freq_range` str in a list of numerical (fmin, fmax) tuples.""" try: fmin, fmax = re.split(',|-', freq_range.strip()) return [(float(fmin), float(fmax))] except AttributeError: freq_ranges = [] for one_range in freq_range: fmin, fmax = re.split(',|-', one_range.strip()) freq_ranges.append((float(fmin), float(fmax))) return freq_ranges
db3c7fc2d2a3576ab07b5acdbae9308408a04575
40,349
def _create_key_val_str(input_dict): """ Returns string of format {'key': val, 'key2': val2} Function is called recursively for nested dictionaries :param input_dict: dictionary to transform :return: (str) reformatted string """ def list_to_str(input_list): """ Convert all list items to string. Function is called recursively for nested lists """ converted_list = [] for item in sorted(input_list, key=lambda x: str(x)): if isinstance(item, dict): item = _create_key_val_str(item) elif isinstance(item, list): item = list_to_str(item) converted_list.append(str(item)) list_str = ", ".join(converted_list) return "[" + list_str + "]" items_list = [] for key in sorted(input_dict.keys(), key=lambda x: str(x)): val = input_dict[key] if isinstance(val, dict): val = _create_key_val_str(val) elif isinstance(val, list): val = list_to_str(input_list=val) items_list.append("{}: {}".format(key, val)) key_val_str = "{{{}}}".format(", ".join(items_list)) return key_val_str
4313054d7afd46b216fabe226530c75466fee527
40,350
import re def filter_tests(filters, test_ids): """Filter test_ids by the test_filters. :param list filters: A list of regex filters to apply to the test_ids. The output will contain any test_ids which have a re.search() match for any of the regexes in this list. If this is None all test_ids will be returned :param list test_ids: A list of test_ids that will be filtered :return: A list of test ids. """ if filters is None: return test_ids _filters = list(map(re.compile, filters)) def include(test_id): for pred in _filters: if pred.search(test_id): return True return list(filter(include, test_ids))
d8ca31fddb052dde7eaaa21c777e2963e705a598
40,353
def choose_best(ordered_choices, possible, check, default=None): """ Select the best xref from several possible xrefs given the ordered list of xref database names. This function will iterate over each database name and select all xrefs that come from the first (most preferred) database. This uses the check function to see if the database contains the correct information because this doesn't always just check based upon the database names or xref, but also the rna_type (in some cases). Using a function gives a lot of flexibility in how we select the acceptable xrefs. Parameters ---------- ordered_choices : list A list of several possible xref database names. These should be in the order in which they are preferred. possible : list The list of xrefs to find the best for. check : callable A callable object to see if given xref and database name match. default : obj, None The default value to return if we cannot find a good xref. Returns ------- selected : obj The list of xrefs which are 'best' given the choices. If there is no good xref the default value is returned. """ for choice in ordered_choices: found = [entry for entry in possible if check(choice, entry)] if found: return (choice, found) return (None, default)
d10d8d1d527fc2a04603a8f4a6c8e10bb5768bdd
40,357
def format_time_to_HMS( num_seconds ): """ Formats 'num_seconds' in H:MM:SS format. If the argument is a string, then it checks for a colon. If it has a colon, the string is returned untouched. Otherwise it assumes seconds and converts to an integer before changing to H:MM:SS format. """ if type(num_seconds) == type(''): if ':' in num_seconds: return num_seconds secs = int(num_seconds) nhrs = secs // 3600 secs = secs % 3600 nmin = secs // 60 nsec = secs % 60 hms = str(nhrs)+':' if nmin < 10: hms += '0' hms += str(nmin)+':' if nsec < 10: hms += '0' hms += str(nsec) return hms
2cd24911976f9d502458043b022a3007e1a1611b
40,361
def afri_16(b8a, b11): """ Aerosol Free Vegetation Index 1.6 \ (Karnieli, Kaufman, Remer, and Wald, 2001). .. math:: AFRI_16 = b8a - 0.66 * (b11 / b8a) + 0.66 * b11 :param b8a: NIR narrow. :type b8a: numpy.ndarray or float :param b11: SWIR 1. :type b11: numpy.ndarray or float :returns AFRI_16: Index value .. Tip:: Karnieli, A., Kaufman, Y. J., Remer, L., Wald, A. 2001. \ AFRI - aerosol free vegetation index. Remote Sensing of Environment \ 77,10-21. doi:10.1016/S0034-4257(01)00190-0. """ AFRI_16 = b8a - 0.66 * (b11 / b8a) + 0.66 * b11 return AFRI_16
0d0da371a4fea948032b2dfd1bb89a55ffe19680
40,364
def reserved(num): """ Return reserved bytes - zeros """ return bytearray(num)
3e76b61a3c71179d3b3c8573420b60431f711d31
40,372
def episode_player_url(episode): """Return the player URL for the given episode code""" player_url = 'http://www.bbc.co.uk/radio/player/{}' return player_url.format(episode)
99400b8d0b0a8ed8bcd73e788ecad0e19764cc33
40,375
import torch def build_sparse_adjacent_matrix(edges: list, n: int, device=None, dtype=torch.float, undirectional=True): """ Return adjacency matrix :param edges: list of edges, for example (st, ed) :param n: number of vertices :param device: :param dtype: :param undirectional: make adjacency matrix un-directional :return: the sparse adjacent matrix """ i = torch.tensor(list(zip(*edges))) v = torch.ones(i.shape[1], dtype=dtype) sparse = torch.sparse_coo_tensor(i, v, (n, n)) if device is not None: sparse = sparse.to(device) a = sparse.to_dense() if undirectional: ud_a = ((a > 0) | (a.transpose(-2, -1) > 0)).to(dtype) a = ud_a return a
bffcea8b65cd3c94c8b0da33aad3dc4108ce0519
40,384
def oz_to_g(oz): """ Convert ounces to grams """ return oz * 28.34952
0c547b3b95964e25ace4d00d9c491f367282b89f
40,393
def lam2f(l): """ Computes the photon frequency in Hz Parameters ---------- l : float Photon wavelength in m Returns ------- f : float Frequency in Hz """ f = 299792458/l return f
5e6d5745c1a19f4b2a8def3fbdca707e60634019
40,395
def _mangle(cls, name): """ Given a class and a name, apply python name mangling to it :param cls: Class to mangle with :param name: Name to mangle :return: Mangled name """ return f"_{cls.__name__}__{name}"
8b789a03b2f25c71bc661cc1eb394650087128b9
40,397
from typing import List from typing import Dict from typing import Optional def aggregate_statuses(statuses: List[Dict], dc_voltage=False) -> Optional[Dict]: """Aggregates inverter statuses for use for PVOutput.org uploads. Does some rounding and integer conversion. Args: statuses: List of inverter statuses as returned by Inverter.status(). dc_voltage: If True, aggregates DC voltage instead of AC voltage. Returns: Dictionary of keyword arguments for add_status() or None if no inverter has operation mode normal. """ def avg(items): """Calculates average.""" i = list(items) return sum(i) / len(i) # Calculate values for each inverter separately values = [] for s in statuses: # Filter systems with normal operating mode if s['operation_mode'] != "Normal": continue # Calculate voltage if dc_voltage: # Takes average of PV1 and PV2 voltage voltage = avg([s['pv1_voltage'], s['pv2_voltage']]) elif 'grid_voltage_r_phase' in s: # For three-phase inverters, take average voltage of all three phases voltage = avg([s['grid_voltage_r_phase'], s['grid_voltage_s_phase'], s['grid_voltage_t_phase']]) else: # For one phase inverter, pick the grid voltage voltage = s['grid_voltage'] values.append({ 'energy_gen': int(s['energy_today'] * 1000), 'power_gen': int(s['output_power']), 'temp': s['internal_temperature'], 'voltage': voltage, }) # Aggregate values of all inverters if not values: return None return { 'energy_gen': sum(v['energy_gen'] for v in values), 'power_gen': sum(v['power_gen'] for v in values), 'temp': round(avg(v['temp'] for v in values), 1), 'voltage': round(avg(v['voltage'] for v in values), 1), }
8432ec1f14c3e96360934df456e655eb08553f37
40,401
def traverse(fileName, steepness=(1, 3), start=0): """Count collisions of a traversal of a slope. Parameters: fileName: of tree-map start: index of start position steepness: tuple of horizontal speed (veer) and vertical speed (plummet).""" width = 0 collisions = 0 position = start progress = 0 plummet, veer = steepness with open(fileName) as inFile: for line in inFile: if width == 0: #firstLine width = len(line) - 1 # don't count newline progress %= plummet if progress == 0: if(line[position] == '#'): collisions += 1 position += veer position %= width progress += 1 return collisions
45b13edb3726b19d0df3e347dd177390568a54c2
40,403
def match_host(host, domainlist): """Return True if host matches an entry in given domain list.""" if not host: return False for domain in domainlist: if domain.startswith('.'): if host.endswith(domain): return True elif host == domain: return True return False
099ea605da3734433a564815c1eb58d7d58dfd5a
40,404
def isthaichar(ch: str) -> bool: """ Check if a character is Thai เป็นอักษรไทยหรือไม่ :param str ch: input character :return: True or False """ ch_val = ord(ch) if ch_val >= 3584 and ch_val <= 3711: return True return False
e50f78105c3db03dc4ee8bac7735a1d809d53656
40,406
def get_prop_architecture(typology_df, architecture_DB): """ This function obtains every building's architectural properties based on the construction and renovation years. :param typology_df: DataFrame containing each building's construction and renovation categories for each building component based on the construction and renovation years :type typology_df: DataFrame :param architecture_DB: DataFrame containing the archetypal architectural properties for each use type, construction and renovation year :type categories_df: DataFrame :return prop_architecture_df: DataFrame containing the architectural properties of each building in the area :rtype prop_architecture_df: DataFrame """ # create prop_architecture_df based on the construction categories and archetype architecture database prop_architecture_df = typology_df.merge(architecture_DB, left_on='STANDARD', right_on='STANDARD') return prop_architecture_df
3dc4bfe88783c2a20a12a8951db789b3cdcfd460
40,410
def sets_to_contingency(a, b, N): """ Creates a contingency table from two sets. params: a, b: sets to compare N: total number of possible items returns: (f11, f10, f01, f00) tuple of contingency table entries: f11 = # of items both in a and b f10 = # of items only in a f01 = # of items only in b f00 = # of items not in either a or b """ f11 = len(a.intersection(b)) f10 = len(a) - f11 f01 = len(b) - f11 f00 = N - (f11 + f10 + f01) return (f11, f10, f01, f00)
3d782fb47899c6e401034750dddfaffc98f9afc2
40,412
def uniq_list(inlist): """Remove unique elements from a list""" inset = set(inlist) return list(inset)
7699d42cbfad14f2479c8cf133113c62ae236ab4
40,413
from typing import Dict from typing import Any def get_field(data: Dict[str, Dict[str, Any]], key: str) -> Any: """ Get a field from nested dictionary, with the field denoted with dot-separated keys. For example, "a.b.c" -> data['a']['b']['c'] """ keys = key.split(".") while keys: data = data[keys.pop(0)] return data
efd342e3badde4e83d3ad344a2188b7fb49b6d04
40,414
def canonicalize_node_size(node): """ Given a node description from the GCE API returns the canonical butter format. """ return { "type": node.name, # Memory is returned in "MB" "memory": int(node.ram * 1000 * 1000), "cpus": float(node.extra["guestCpus"]), "storage": node.disk * 1024, "location": node.extra["zone"].name }
3bc9655234b6f7141eb3b311b3aecd9ba5ec1b99
40,420
def mix_images(background_img, foreground_img): """paste an image on top of another image Args: background_img: pillow image in background foreground_img: pillow image in foreground Returns: pillow image """ background_img = background_img.convert('RGBA') foreground_img = foreground_img.convert('RGBA') img_w, img_h = foreground_img.size bg_w, bg_h = background_img.size offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2) background_img.paste(foreground_img, offset, mask=foreground_img) return background_img
b86834041c42891b54795b72588f33f1c41e320a
40,421
def _decode_instance(encoded_data, decoded_objects, data_to_decode): """ Decode a data structure Args: encoded_data (:obj:`dict`, :obj:`list`, or scalar): data structure with encoded objects decoded_objects (:obj:`dict`): dictionary that maps the unique ids of encoded objects to dictionaries that represent the decoded objects data_to_decode (:obj:`list`): list of tuples of data structures that still need to decoded. The first element represents the data structure that needs to be decoded. The second element represents the object that will represent the decoded data structure. Returns: :obj:`dict`, :obj:`list`, or scalar: decoded data structure """ if isinstance(encoded_data, dict) and '__type' in encoded_data: obj_type = encoded_data.get('__type') obj = decoded_objects.get(encoded_data['__id'], None) if obj is None: obj = {'__type': obj_type} decoded_objects[encoded_data['__id']] = obj data_to_decode.append((encoded_data, obj)) elif isinstance(encoded_data, list): obj = [] data_to_decode.append((encoded_data, obj)) elif isinstance(encoded_data, dict): obj = {} data_to_decode.append((encoded_data, obj)) else: obj = encoded_data return obj
8e9cb5502aded89cc04268b3098cff9e25fb1a91
40,422
import bisect def find_min_revision_index(revisions_list, revision): """Find the min index for bisection. Find largest revision <= the given revision.""" # bisect_left partitions |revisions_list| into 2 such that: # all(val < revision for val in a[:index]) # all(val >= revision for val in a[index:]) index = bisect.bisect_left(revisions_list, revision) if index < len(revisions_list) and revisions_list[index] == revision: return index if index > 0: return index - 1 # No revisions <= given revision. return None
5a664b74613394a7376a5d2e54333dbf66e83b2c
40,423
def users_to_names(users): """Convert a list of Users to a list of user names (str). """ return [u.display_name if u is not None else '' for u in users]
881b6717e11d88971ef307fd6b128f9d83d0868c
40,424
def sensibleBulk(Tw,Ta,S,rhoa=1.2,Ch=1.5e-3,cpa=1004.67): """ Sensible heat flux from water using the bulk exchange formulation Inputs: Tw - Water temp [C] Ta - Air temp [C] S - Wind speed magnitude [m s^-1] rhoa - air density [kg m^-3] Ch - Stanton number cpa - Specific heat of air [J kg-1 K-1] """ return -rhoa*cpa*Ch*S*(Tw-Ta)
ec7bf965a58704c7cbd5e099f6771fa40698c4e8
40,429
def triage_hashes(hash_map): """Triage hash map in pair of names to keep and to remove in that order. Three cases: 0. size zero regardless of hash => remove 1. unique hash => keep 2. hash matching two entries => keep both 3. hash with more than two entries => keep first and last, rest remove """ keep, remove = [], [] for info in hash_map.values(): if info[0][1] == 0: remove.extend(name for name, _ in info) else: if len(info) == 1: keep.extend(name for name, _ in info) else: first, last = info[0][0], info[-1][0] keep.extend([first, last]) remove.extend(name for name, _ in info[1:-1]) return keep, remove
0d2cb2f6cbff3436b780ac45eb1db0c3b7753488
40,432
import re def remove_outer_parens(s): """ If there are outer parens when we don't need them, get rid of them Only one set >>> a = "(1910-1920)" >>> remove_outer_parens(a) '1910-1920' """ ret_val = s m = re.match("\s*\((?P<inside_data>.*)\)\s*", s) if m: ret_val = m.group("inside_data") return ret_val
353aa7adb19dd6e521c038acac3dabac72030325
40,434
import re def expNumRe(text): """ expand numeric regular expression to list e.g. 'n[01-03],n1[0-1]': ['n01','n02','n03','n10','n11'] e.g. 'n[09-11].com': ['n09.com','n10.com','n11.com'] """ explist = [] for regex in text.split(','): regex = regex.strip() r = re.match(r'(.*)\[(\d+)-(\d+)\](.*)', regex) if r: h = r.group(1) d1 = r.group(2) d2 = r.group(3) t = r.group(4) convert = lambda d: str(('%0' + str(min(len(d1), len(d2))) + 'd') % d) if d1 > d2: d1, d2 = d2, d1 explist.extend([h + convert(c) + t for c in range(int(d1), int(d2)+1)]) else: # keep original value if not matched explist.append(regex) return explist
cd42ba0fca726c69a0a3b4335373317467cc9463
40,435
import shutil def process_java_resources(target, source, env): """Copy resource file into .resources dir. """ shutil.copy2(str(source[0]), str(target[0])) return None
3ee5194703956d43187a0c4f802c3ee4c132c18a
40,440
def function_paths(func, tags): """Paths to all source files in tags defining a function func.""" return sorted([tag['file'] for tag in tags if tag['symbol'] == func])
39931421d8220bd9aa74dc9d5813d29e7e686b5c
40,441
def float_to_string(value: float, replacement: str = "0,00") -> str: """ Converts a float to a properly formatted string value """ return ("%.2f" % value).replace('.', ',') if value is not None else replacement
95570ff4fcb78911c9f9a66f5559aea7fa73bbee
40,442
import glob import re def _find_cpt_base(cpt_base): """ Find checkpoint file base name in current directory :param str cpt_base: Start of checkpoint file name that ends with a number of one to three digits followed by '.cpt' :return: The base name of the checkpoint files (everything but the number and ".cpt") :rtype: str """ possible_matches = glob.glob(cpt_base + "*.cpt") for f_name in possible_matches: match = re.match(r"({}.*?)\d{}\.cpt".format(cpt_base, "{1,3}"), f_name) if match: return match.group(1) raise ValueError( "No checkpoint file name found based on the base " "name {}.".format(cpt_base) )
765bc409c49ffdc9d0574fab93a4e3a8e5660ab2
40,448
def _as_bytes(s): """ Used to ensure string is treated as bytes The output Args: s (str): string to convert to bytes Returns: byte-encoded string Example: >>> str(_as_bytes('Hello, World').decode()) # Duck typing to check for byte-type object 'Hello, World' """ if isinstance(s, bytes): return s return bytes(s, encoding='latin_1')
fb5c2d09a1a1d930e05142ec4644554979156170
40,452
def _maybe_correct_vars(vars): """Change vars from string to singleton tuple of string, if necessary.""" if isinstance(vars, str): return (vars,) else: return vars
1aa46b03988f06a3697b703991c64899e173d0eb
40,453
def decrementing_pattern(size: int) -> bytes: """ Return `size` bytes with a pattern of decrementing byte values. """ ret = bytearray(size) for i in range(size - 1, -1, -1): ret[i] = i & 0xff return bytes(ret)
b33981468c9e23ae09582e873547147609ebd2e2
40,459
from typing import List import json def analyze_apache_logs(input_file: str, http_response_code_threshold=0.5) -> List: """ Analyze parsed Apache access log file to find malicious activity. :param input_file: Apache access log file (JSON format) :param http_response_code_threshold: HTTP response code ratio under which to flag as malicious :return: list of malicious logs """ malicious_logs = [] http_response_ratios = {} with open(input_file, 'r') as f: logs = json.load(f) # look for specific message types and count number of HTTP 200 response codes versus error codes for log in logs: if 'Nmap Scripting Engine' in log['user_agent']: mal_data = {'category': 'NMAP Scanning', 'client_ip': log['client_ip'], 'datetime': log['datetime']} malicious_logs.append(mal_data) if log['client_ip'] not in http_response_ratios: http_response_ratios[log['client_ip']] = {'200': 0, 'error': 0} if log['response_code'] != '200': http_response_ratios[log['client_ip']]['error'] += 1 else: http_response_ratios[log['client_ip']]['200'] += 1 http_response_ratios[log['client_ip']]['datetime'] = log['datetime'] # process HTTP response code ratios and append to malicious logs if ratio is under given threshold for k, v in http_response_ratios.items(): http_200 = v['200'] http_error = v['error'] total = http_200 + http_error ratio = http_200 / total if ratio < http_response_code_threshold: v['ratio'] = ratio v['category'] = 'Web Directory Enumeration' tmp_dict = {'category': 'Web Directory Enumeration', 'client_ip': k, 'datetime': v['datetime']} malicious_logs.append(tmp_dict) return malicious_logs
2255e9ca7c43f93d28f61e6e25687d3b5f61ebd8
40,460
def build_lookup_dict_snmp_trap(list_content): """ Build key/value lookup dict specifically for SNMP Traps which use "server-ip" + "version" :param list_content: List of dicts to derive lookup structs from :return: lookup dict """ lookup_dict = {} for item in list_content: item_server_ip = item.get('server_ip') item_version = item.get('version') item_id = item.get('id') if item_server_ip and item_version and item_id is not None: lookup_dict["{0}+{1}".format(item_server_ip, item_version)] = item_id return lookup_dict
42b38ff7cd26cd5c785f474d131c67c303fbe1ce
40,463
import math def format_hash(hash_str: str, hash_len: int, hash_seg_len: int, hash_sep: str) -> str: """ Format a hash string: keep only hash_len chars from it, and break it up into segments of len hash_seg_len, using the hash_sep as separator. Ex: >>> format_hash('abcdef1232567890', 8, 2, '-') ab-cd-ef-12 """ hash_str = hash_str[:hash_len] if hash_seg_len >= hash_len: return hash_str num_segs = math.ceil(len(hash_str) / hash_seg_len) return hash_sep.join(hash_str[hash_seg_len * i: (hash_seg_len * i + hash_seg_len)] for i in range(num_segs))
2e7866fcc871bab1c1758403bc198a10c54c1334
40,468
import importlib def package_is_installed(package_name): """Return true iff package can be successfully imported.""" try: importlib.import_module(package_name) return True except Exception: return False
eb0c279bd85aae209331d4e6677fb19c31ab037e
40,470
from typing import List import difflib def get_list_difference(list1: List[str], list2: List[str]) -> List[str]: """ Return list of elements that help turn list1 into list2. This should be a "minimal" list of differences based on changing one list into the other. >>> get_list_difference(["a", "b"], ["b", "a"]) ['b'] >>> get_list_difference(["a", "b"], ["a", "c", "b"]) ['c'] >>> get_list_difference(["a", "b", "c", "d", "e"], ["a", "c", "b", "e"]) ['c', 'd'] The last list happens because the matcher asks to insert 'c', then delete 'c' and 'd' later. """ diff = set() matcher = difflib.SequenceMatcher(None, list1, list2) # None means skip junk detection for code, left1, right1, left2, right2 in matcher.get_opcodes(): if code != "equal": diff.update(list1[left1:right1]) diff.update(list2[left2:right2]) return sorted(diff)
e09e87148f4827a766afe42d8878e2eeeb4f3127
40,473
def _lane_detail_to_ss(fcid, ldetail): """Convert information about a lane into Illumina samplesheet output. """ return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"], ldetail["bc_index"], ldetail["description"], "N", "", "", ldetail["project_name"]]
cad5549b67a9147685416e9982b219ed29577190
40,474
def nv_compute_capability(dev): """If *dev* is an Nvidia GPU :class:`pyopencl.Device`, return a tuple *(major, minor)* indicating the device's compute capability. """ try: return (dev.compute_capability_major_nv, dev.compute_capability_minor_nv) except: return None
eb7b6a9386b1f80019e94a0ed697f795d4474d79
40,477
def make_mmi_cmd(fa): """Return a minimap2 cmd string to build mmi index. """ return 'minimap2 -x map-pb -d {fa}.mmi {fa}'.format(fa=fa)
8f03063cb3abcfee66ad364f788f4851c955d0b9
40,481
def _dict_keys_get(d, keys): """Recursively get values from d using `__getitem__` """ d = d for k in keys: d = d[k] return d
d83dfce489ecff1b53eb7434e12d615aaf76def8
40,482
def _remove_tokens(tokenized_docs, counts, min_counts, max_counts): """ Words with count < min_counts or count > max_counts will be removed. """ total_tokens_count = sum( count for token, count in counts.most_common() ) print('total number of tokens:', total_tokens_count) unknown_tokens_count = sum( count for token, count in counts.most_common() if count < min_counts or count > max_counts ) print('number of tokens to be removed:', unknown_tokens_count) keep = {} for token, count in counts.most_common(): keep[token] = count >= min_counts and count <= max_counts return [(i, [t for t in doc if keep[t]]) for i, doc in tokenized_docs]
2ef618c0ef7c7180c1426ca99f67ee98862813c8
40,486
def _get_cmd_tree(subcmds): """Convert flat list of subcmd objects into hierarchical dictionary {'command name': {'subcommand name 1': subcmd1, 'subcommand name 2': subcmd2}}""" cmds = {} for sub_cmd in subcmds: cmd_dict = cmds.setdefault(sub_cmd.cmd, {}) cmd_dict[sub_cmd.name] = sub_cmd return cmds
76f44db545d298b94f9eb2323a5ade280b5f0380
40,490
def _key_split(matchobj): """Expands a {key a+b+c} syntax into <span class="key">a</span> + ... More explicitly, it takes a regex matching {key ctrl+alt+del} and returns: <span class="key">ctrl</span> + <span class="key">alt</span> + <span class="key">del</span> """ keys = [k.strip() for k in matchobj.group(1).split('+')] return ' + '.join(['<span class="key">%s</span>' % key for key in keys])
519aa2512967aabf266df604280c07d85575a291
40,494
def is_3d(ds, v): """Check if xr.DataArray has 3 dimensions.""" dims = ds[v].dims if len(dims) == 3: return True return False
e095561f47f9daeeb57be327a8af93bb4ac2c2f4
40,496
def flat_ind_zp_so3(l, m, n, b): """ The SO3 spectrum consists of matrices f_hat^l of size (2l+1, 2l+1) for l=0, ..., L_max = b - 1. These can be stored in a zero-padded array A of shape (b, 2b, 2b) with axes l, m, n with zero padding around the center of the last two axes. If we flatten this array A we get a vector v of size 4b^3. This function gives the flat index in this array v corresponding to element (l, m, n) The zero-based 3D index of (l, m, n) in A is (l, b + m, b + n). The corresponding flat index is i = l * 4b^2 + (b + m) * 2b + b + n :param l, m, n: spectral indices :return: index of (l, m, n) in flat zero-padded vector """ return l * 4 * (b ** 2) + (b + m) * 2 * b + b + n
f1e9327e33ae31fce28c33d18c2c49b72adafb22
40,498
import warnings def process_interaction_params(parameters): """Formats and completes interaction parameters. Interaction parameters are combined into a dictionary passed as a single argumnet. Parameters ---------- color: tuple Format (r,g,b). headheight: float Height of interaction head. headwidth: float Width of interaction head. zorder: int Matplotlib zorder value of the interaction. direction: string Determines what side of the construct the interaction is drawn on; 'forward' or 'reverse'. linewidth: float Determines the width of lines used to draw the interaction. heightskew: float Skews the total height of the interaction. sending_length_skew: float Skews the length of the origin line of the interaction. receiving_length_skew: float Skews the length of the receiving line of the interaction. distance_from_baseline: float Skews the distance between the interaction and the baseline. """ final_parameters = {'color': (0,0,0), 'headheight': 7.0, 'headwidth': 7.0, 'zorder': 0, 'direction': 'forward', 'linewidth': 1.0, 'heightskew': 10.0, 'sending_length_skew': 0.0, 'receiving_length_skew': 0.0, 'distance_from_baseline': 10.0} if parameters is None: return final_parameters # Collate interaction parameters for key in parameters: if key in final_parameters: final_parameters[key] = parameters[key] else: warnings.warn(f"""'{key}' is not a valid interaction parameter.""") # Amplify zorder to ensure all drawings composing the interaction can be grouped on Z axis final_parameters['zorder'] *= 100 return final_parameters
9665eb8ae602e3436609eda4920772c006a82127
40,500
import torch def kl_loss_full(mean, var, mean_prior, var_prior): """ KL divergence of two multivariate normal distributions. :param mean: mean of distribution 1 :param var: covariance of distribution 1 :param mean_prior: mean of distribution 2 :param var_prior: covariance of distribution 2 :return: KL divergence of distribution 1 and 2 """ mvn = torch.distributions.MultivariateNormal(loc=mean, covariance_matrix=var) prior = torch.distributions.MultivariateNormal(loc=mean_prior, covariance_matrix=var_prior) return torch.distributions.kl_divergence(mvn, prior).mean()
b3d7e01a37445b354f47daabb0159745d438b1df
40,504
def xml_tree_equivalence(e1, e2): """ Rough XML comparison function based on https://stackoverflow.com/a/24349916/1294458. This is necessary to provide some sort of structural equivalence of a generated XML tree; however there is no XML deserialisation implementation yet. A naive text comparison fails because it seems it enforces ordering, which seems to vary between python versions etc. Strictly speaking, I think, only the *leaf-list* element mandates ordering.. this function uses simple sorting on tag name, which I think, should maintain the relative order of these elements. """ if e1.tag != e2.tag: return False if e1.text != e2.text: return False if e1.tail != e2.tail: return False if e1.attrib != e2.attrib: return False if len(e1) != len(e2): return False e1_children = sorted(e1.getchildren(), key=lambda x: x.tag) e2_children = sorted(e2.getchildren(), key=lambda x: x.tag) if len(e1_children) != len(e2_children): return False return all(xml_tree_equivalence(c1, c2) for c1, c2 in zip(e1_children, e2_children))
bdd135de65e0ecdf9f6d9d22f03b4b5dc06c476c
40,505
def flip_check(intron, flip_dict): """ Checks an intron against a dictionary of introns with scores that did not survive boundary switching. If present, the score resulting from the boundary switch will be returned, otherwise None. """ name = intron.get_name() if name in flip_dict: return flip_dict[name] else: return None
9b1a392a102c757ccf4938fe0bf3ea6ae4955238
40,507
def correct_title(title): """ Return properly formatted job title. """ # Make sure title is a string title = str(title) if "grad student" in title.lower(): return "Grad student" # We will group all professors together if "professor" in title.lower(): return "Professor" else: return "Research staff"
c1ddd3392bc0e1c310d21a2e61086526a086b240
40,508
def _mk_key(srev_info): """Returns the key for a SignedRevInfo object.""" return (srev_info.rev_info().isd_as(), srev_info.rev_info().p.ifID)
780fb59859b514e0f6d4699cb1e6fef1d6e14042
40,511
def get_channels(ifo, plottype): """Get a list of channels to plot for a given IFO. Plot Type must be either 'irigb' or 'duotone'.""" if plottype == "irigb": return ['{}:CAL-PCAL{}_IRIGB_OUT_DQ'.format(ifo, arm) for arm in ['X', 'Y']] elif plottype == "duotone": return ['{}:CAL-PCAL{}_FPGA_DTONE_IN1_DQ'.format(ifo, arm) for arm in ['X', 'Y']] else: raise ValueError("Must specify 'irigb' or 'duotone' for plottype.")
d0c46f2a4f39b4a9eeb79a7ea9b43e994c95ad24
40,514
def use_tpu(tpu_cores: int, tpu_resource: str, tf_version: str): """An operator that configures GCP TPU spec in a container op. Args: tpu_cores: Required. The number of cores of TPU resource. For example, the value can be '8', '32', '128', etc. Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec. tpu_resource: Required. The resource name of the TPU resource. For example, the value can be 'v2', 'preemptible-v1', 'v3' or 'preemptible-v3'. Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec. tf_version: Required. The TensorFlow version that the TPU nodes use. For example, the value can be '1.12', '1.11', '1.9' or '1.8'. Check more details at: https://cloud.google.com/tpu/docs/supported-versions. """ def _set_tpu_spec(task): task.add_pod_annotation('tf-version.cloud-tpus.google.com', tf_version) task.add_resource_limit('cloud-tpus.google.com/{}'.format(tpu_resource), str(tpu_cores)) return task return _set_tpu_spec
ce5b9869512119966d59f1c0fb5ec53eeee53237
40,517
from typing import Any from pathlib import Path import json def _save_to_json(data: Any, path: Path) -> Path: """ json形式で保存する Parameters ---------- data : Any 保存対象のデータ path : Path 保存先 Returns ------- Path 保存されたPath """ with open(path, "w") as f: json.dump(data, f) return path
d3fb406bc4767e2e5ce88b83126e68b0906850b6
40,520