content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def field_name(component): """Return the name for the FormField built for the given component.""" return 'comp-' + component.name
84ed4617b3843d80c5cf899eca02cc2692e1413c
89,966
def parse_wildcards(props): """Pull out the wildcard attributes from the Component props. Parameters ---------- props: dict Dictionary with {propName: propMetadata} structure Returns ------- list List of Dash valid wildcard prefixes """ list_of_valid_wildcard_attr_prefixes = [] for wildcard_attr in ["data-*", "aria-*"]: if wildcard_attr in props: list_of_valid_wildcard_attr_prefixes.append(wildcard_attr[:-1]) return list_of_valid_wildcard_attr_prefixes
6751e178de74748f3cbb3032888dd57850c75086
602,668
def search_lhn(city): """ Search for existing LHN within city. Returns True, if LHN is found. Parameters ---------- city : object City object of pyCity_calc Returns ------- has_lhn : bool Defines, if LHN system exists in city """ has_lhn = False for edge in city.edges(data=True): if 'network_type' in edge[2]: if (edge[2]['network_type'] == 'heating' or edge[2]['network_type'] == 'heating_and_deg'): has_lhn = True break return has_lhn
37e435918d93179d8ffb11e476da99798a7ca701
595,162
def _remove_empty_events(sse): """ Given a sequence of synchronous events (SSE) `sse` consisting of a pool of pixel positions and associated synchronous events (see below), returns a copy of `sse` where all empty events have been removed. `sse` must be provided as a dictionary of type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse : dict A dictionary of pixel positions `(i, j)` as keys, and sets `S` of synchronous events as values (see above). Returns ------- sse_new : dict A copy of `sse` where all empty events have been removed. """ sse_new = sse.copy() for pixel, link in sse.items(): if link == set([]): del sse_new[pixel] return sse_new
0596d43cc75fdd040c5096e3ddb81277b48d7456
34,332
def get_log_info(prefix='', rconn=None): """Return info log as a list of log strings, newest first. On failure, returns empty list""" if rconn is None: return [] # get data from redis try: logset = rconn.lrange(prefix+"log_info", 0, -1) except: return [] if logset: loglines = [ item.decode('utf-8') for item in logset ] else: return [] loglines.reverse() return loglines
d333fbeaff754e352a0b84c10f4d28e148badfa0
7,816
import json def _read_notebook_data_dict(notebook_path: str) -> dict: """ Read a dictionary of notebook data. Parameters ---------- notebook_path : str Path of target notebook. Returns ------- notebook_data_dict : dict A dictionary of notebook data. """ with open(notebook_path, 'r') as f: notebook_data_str: str = f.read() notebook_data_dict: dict = json.loads(notebook_data_str) return notebook_data_dict
c74fabb3ad1ff7d0e5d002791b1aef08a353199a
39,241
def join_paths(*args): """Join paths without duplicating separators. This is roughly equivalent to Python's `os.path.join`. Args: *args (:obj:`list` of :obj:`str`): Path components to be joined. Returns: :obj:`str`: The concatenation of the input path components. """ result = "" for part in args: if part.endswith("/"): part = part[-1] if part == "" or part == ".": continue result += part + "/" return result[:-1]
b48f8805c59e6a7038121a85f828ff3f8e792e29
602,035
def single_cond_prob_to_str(grid_idx, val, num_indices = 6): """Generate the string representing the probability: Pr(o_i = val) ex: For Pr(o_2 = 1), we'd have the string '21' NB we're 1-indexing here """ assert grid_idx >= 1 and grid_idx <= num_indices return str(grid_idx) + str(val)
462b35da49ae54b1fef9853243e902871abc76f3
567,998
def wavelength_pm(voltage_kV): """ Calculates the relativistic electron wavelength in picometers based on the microscope accelerating voltage Parameters ---------- voltage_kV: float microscope operating voltage in kilo electronVolts Returns ------- wavelength: float relativistic electron wavelength in picometers :Authors: Debangshu Mukherjee <mukherjeed@ornl.gov> """ m = 9.109383 * (10 ** (-31)) # mass of an electron e = 1.602177 * (10 ** (-19)) # charge of an electron c = 299792458 # speed of light h = 6.62607 * (10 ** (-34)) # Planck's constant voltage = voltage_kV * 1000 numerator = (h ** 2) * (c ** 2) denominator = (e * voltage) * ((2*m*(c ** 2)) + (e * voltage)) wavelength = (10 ** 12) *((numerator/denominator) ** 0.5) #in angstroms return wavelength
cd8009db22e7d7e6113a611208a49ff5c7387842
245,728
import six def construct_mirror_name(volume): """Constructs MirrorView name for volume.""" return 'mirror_' + six.text_type(volume.id)
75ec30c8e5cf204f525301ea0fd988222c1d1cf5
39,870
import codecs import yaml def parse_yaml_or_json(path): """ Return parsed YAML or JSON for a path to a file. """ with codecs.open(path, mode='r', encoding='utf-8') as infile: doc = yaml.safe_load(infile) return doc
6902c3fcc67e636c35fc9576106943e8bd8d2541
655,883
def sort(df, sort_column, ascending=True): """ Sorts a given DataFrame by a given column and returns it. :param df: (pandas DataFrame) the data to be sorted. :param sort_column: (string) describes which column on the given DataFrame to sort. Raises KeyError if the column does not exist in the DataFrame. :param ascending: (boolean, optional) if True, sorts the given DataFrame in ascending order on the given column :return df: (pandas DataFrame) the data, now sorted on the given sort column. """ if sort_column not in df.columns: raise KeyError("The sorting column is not present in the given DataFrame.") return df.sort_values(sort_column, ascending=ascending)
b2bc3d2deefbd5134c0294824d7ab2cf83fc4549
616,258
import torch def compute_mean_range(x: torch.Tensor, print_values=False): """ Compute mean and range of values over dataset (for inputs / outputs standardization) """ Max, _ = torch.max(x, dim=0) Min, _ = torch.min(x, dim=0) Mean = (Max + Min) / 2.0 Range = (Max - Min) / 2.0 if print_values: print("Standardization enabled.") print("Mean:", Mean.shape, "-->", Mean) print("Range:", Range.shape, "-->", Range) if (Range < 1e-6).nonzero().sum() > 0: print( "[Warning] Normalization: the following features have a range of values < 1e-6:", (Range < 1e-6).nonzero(), ) Range[Range < 1e-6] = 1.0 return Mean, Range
c1b14e0c253366122a7daa777a8d1f7b8f8d4259
562,890
from typing import Counter def build_vocab(train_corpus): """ Build a vocabulary with word frequencies for an entire corpus. Returns: {word : (ID, frequency)} """ vocab = Counter() for line in train_corpus: for tokens in line: vocab.update([tokens]) return {word: (i, freq) for i, (word, freq) in enumerate(vocab.items())}
1404a5219df07999a1d40045eb02ac7fd651b354
412,591
def handle_nulls(function): """ Decorator to return null if any of the input arguments are null. """ def inner(*args, **kwargs): if any(arg is None for arg in args): return None return function(*args, **kwargs) return inner
f8dc8f8620aae74d90d21fb084306a807f627e98
371,528
import requests import json def edit_link(token, link, title): """ Edits an already existing Bitly links title. Args: token (str): Bitly access token. link (str): Shortened URL to be edited by Bitly. title (str): Updated Bitly link title. Returns: Bitly status information and the returned Bitly link on success. """ r = requests.get("https://api-ssl.bitly.com/v3/user/link_edit?access_token={}&link={}&edit=title&title={}".format(token, link, title)) return json.loads(r.content.decode("utf-8"))
3fbdc07f0e2d7b54787295250654ffffbf97054b
681,083
def service_level(orders_received, orders_delivered): """Return the inventory management service level metric, based on the percentage of received orders delivered. Args: orders_received (int): Orders received within the period. orders_delivered (int): Orders successfully delivered within the period. Returns: Percentage (float) of orders received that were delivered within th period. """ return (orders_delivered / orders_received) * 100
8132596dbb8e845aa116bb2451ef89761897b5db
675,713
def pull_partition_status(partition): """ Retrieve the partition operational status as fast as possible and return it. """ parts = partition.manager.cpc.partitions.list( filter_args={'name': partition.name}) if len(parts) != 1: raise AssertionError() this_part = parts[0] actual_status = this_part.get_property('status') return actual_status
074d2c436c5e7b6cb2d7579305a4ffc678b53d77
591,398
def getMovingWindowSegments(data, windowSize): """ Given a 1D list of data, slide a window along to create individual segments and return a list of lists (each of length windowSize) """ segmentCount = len(data) - windowSize segments = [None] * segmentCount for i in range(segmentCount): segments[i] = data[i:i+windowSize] return segments
99c885ff00f8a94669d85ec926af18f4ccdf31e6
526,404
def decode_material(encoded_material, vec_to_elem_dict): """Input: - encoded_material: vector - vec_to_elem_dict: dict mapping elements to indexes Returns: - decoded material (dictionary with stoich coeffs) """ decoded_material = {vec_to_elem_dict[index]: val for index, val in enumerate(encoded_material)} return decoded_material
c40f8858cf92dee3a09d1968f4a8a6e4457fa5a8
395,363
def get_required_capacity_types_from_database(conn, scenario_id): """ Get the required type modules based on the database inputs for the specified scenario_id. Required modules are the unique set of generator capacity types in the scenario's portfolio. :param conn: database connection :param scenario_id: int, user-specified scenario ID :return: List of the required type modules """ c = conn.cursor() project_portfolio_scenario_id = c.execute( """SELECT project_portfolio_scenario_id FROM scenarios WHERE scenario_id = {}""".format(scenario_id) ).fetchone()[0] required_capacity_type_modules = [ p[0] for p in c.execute( """SELECT DISTINCT capacity_type FROM inputs_project_portfolios WHERE project_portfolio_scenario_id = ?""", (project_portfolio_scenario_id, ) ).fetchall() ] return required_capacity_type_modules
bdd4f101465c55b712eb3f54797bbf213ed50b80
695,517
def get_shapes_F1(F1): """ Get string showing shapes of F1. Parameters ---------- F1: list of multidimensional np.arrays (each elment has the samples for each station-poliarization. Returns ------- out : str """ return(str([(F1[i].shape) for i in range(len(F1))]))
b8c40ad7ba688612ae82056b8ae060f776c8c21b
97,091
def moderate(func): """Map [0,1] -> [0,1] with bias towards 0.5. For instance this is needed to dampen extremes. """ def f(x): return 1/2 + 4 * (func(x) - 1/2)**3 return f
541deeefacb0559053d8f3f204036910c12ad415
364,403
import math def get_approx_flame_length(head_fire_intensity: float): """ Returns an approximation of flame length (in meters). Formula used is a field-use approximation of L = (I / 300)^(1/2), where L is flame length in m and I is Fire Intensity in kW/m """ return math.sqrt(head_fire_intensity / 300)
5728ba488c8535ea27df31526b9e67fd9d01c6aa
667,874
import torch def clip_tensor(x, lb=0., ub=1.): """ Clip a tensor to be within lb and ub :param x: :param lb: lower bound (scalar) :param ub: upper bound (scalar) :return: clipped version of x """ return torch.clamp(x, min=lb, max=ub)
ce6d53080285bf53c118f6f3cc9cc22830d792d1
75,918
def rectCenter(rect): """Determine rectangle center. Args: rect: Bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. Returns: A 2D tuple representing the point at the center of the rectangle. """ (xMin, yMin, xMax, yMax) = rect return (xMin+xMax)/2, (yMin+yMax)/2
4b1acca6aa11b293daee91de38fd7ebed7a7371e
514,038
def find_instance_in_args(obj, args): """find instance of given object type args. Args: obj(type): type of object to look for. args(iterable): arguments to search for the given object type. Returns: obj. instance of the obj type that found in args. """ return next(filter(lambda arg: isinstance(arg, obj), args))
b5e7c9562c240eb22a9d04bc556f7080b32436c9
188,310
def extremum (a, b, c) -> tuple: """ Returns the (x, y) coordinates of the extremum of the curve given by the polynomial, f(x | a, b, c). The extremum can refer to either a maximum or minimum value. When 'a' is negative, the max or top of the curve is returned. Otherwise, the min is returned. The value of the x-coordinate can be thought of as the midpoint of the curve.""" # Check params. if a == 0: raise ValueError('Argument Value Error:\tThe parameter, \'a\', in a polynomial, f(x) = ax^2 + bx + c, cannot be equal to zero.') x = -b/(2*a) return (x, a*x**2 + b*x + c)
fde4bd46492ea096c71d9d3091903592e6fca3d4
360,985
def replicate(lst, n): """ Replicate each of the elements of lst a given number of times. For example, for lst = [a, b, c] and n = 3, the returned list is [a, a, a, b, b, b, c, c, c]. """ def rep_helper(val, n): i = 0 v = "" while i < n: v = v + val i += 1 return v list_of_lists = [list(rep_helper(a, n)) for a in lst] return [val for sublist in list_of_lists for val in sublist]
d8c78f8c0737704ec1969d540e5d89b990ea8340
64,142
import logging def is_valid_dataset(platform): """Filters out datasets that can't be used because it is missing required data such as the release date or an original price. Other required data includes the name and abbreviation of the platform. """ if 'release_date' not in platform or not platform['release_date']: logging.warn(u"{0} has no release date".format(platform['name'])) return False if 'original_price' not in platform or not platform['original_price']: logging.warn(u"{0} has no original price".format(platform['name'])) return False if 'name' not in platform or not platform['name']: logging.warn(u"No platform name found for given dataset") return False if 'abbreviation' not in platform or not platform['abbreviation']: logging.warn(u"{0} has no abbreviation".format(platform['name'])) return False return True
715187e707e6e070bb170ebceedd5e05bd9124c9
680,070
def none_low_med_high_to_value(scale_value): """ This method will transform a string value from the None / Low / Med / High scale to its confidence integer representation. The scale for this confidence representation is the following: .. list-table:: None, Low, Med, High to STIX Confidence :header-rows: 1 * - None/ Low/ Med/ High - STIX Confidence Value * - Not Specified - Not Specified * - None - 0 * - Low - 15 * - Med - 50 * - High - 85 Args: scale_value (str): A string value from the scale. Accepted strings are "None", "Low", "Med" and "High". Argument is case sensitive. Returns: int: The numerical representation corresponding to values in the None / Low / Med / High scale. Raises: ValueError: If `scale_value` is not within the accepted strings. """ if scale_value == 'None': return 0 elif scale_value == 'Low': return 15 elif scale_value == 'Med': return 50 elif scale_value == 'High': return 85 else: raise ValueError("STIX Confidence value cannot be determined for %s" % scale_value)
fb9daf699e2b1f9280290ad2cf37564cd1e440f2
130,702
def convert2voxels(x_um_rw, imExtends, voxelSize): """ Converting from real world um coordinates to 0 origin voxel. :param x_um_rw: coordinates in real world frame, dimensions in um :param imExtends (list of lists): the first list are the initial extends of the image, and the second list the final ones. Dimensions are um and they are used to localize the image in the real world frame :param voxelSize: voxel size :return: coordinates in 0 centered frame, dimensions in voxels """ # First we bring the coordinates origin to 0 x_um_0 = x_um_rw - imExtends[0] # And then we transform the dimensions to voxels X_voxel_0 = x_um_0 / voxelSize return X_voxel_0
123414615e40bb41802b8f5f072bb994f859f3d7
696,197
from typing import Type def get_description_from_payload(payload): """ Return description from a single row. We only return the name, type (inferred from the data) and if the values can be NULL. String columns in Druid are NULLable. Numeric columns are NOT NULL. """ return [ ( col['label'], # name Type[col['type'].upper()], # type_code None, # [display_size] None, # [internal_size] None, # [precision] None, # [scale] True, # [null_ok] ) for col in payload['table']['cols'] ]
50a2124b39fcc881e73408bb8d69a573805fa7f5
596,143
from itertools import chain def get_words(words): """Splits out a parameter set into individual words. First, it splits the input parameters by space. Then, it chains them together. Then, it joins them together delimited by , and splits them out once again. The end result is that words can be provided as individual parameters, or a single comma separated string or a single space separated string or some combination thereof. Args: words: The word list to split out Returns: A set (hash set) of words which were in the provided input """ # First, convert all arguments to strings words = [i if isinstance(i, str) else (','.join(i) if hasattr(i, '__iter__') else str(i)) for i in words] # Split on spaces, then chain the resulting arrays together # Join into a single string delimited by comma words = ','.join(chain.from_iterable(i.split() for i in words)) # Split out the joined string by comma (to also split any single words which # contained a comma). Ignore empty strings. return set(i for i in words.split(',') if i)
bd5c26c6525460ea0127fd0c73b8f0b049370894
163,065
def validate_token_sequence(token_sequence: str) -> bool: """Returns True, if `token_sequence` is properly formed. Token sequences are strings or words which are separated by single blanks with no leading or trailing blank. """ return token_sequence[:1] != ' ' and token_sequence[-1:] != ' ' \ and token_sequence.find(' ') < 0
9419ce9c217d2cf2fa84b3fd34e5c6ac9b6de66f
108,060
async def get_unbuilt_count(db, ref_id: str) -> int: """ Return a count of unbuilt history changes associated with a given `ref_id`. :param db: the application database client :param ref_id: the id of the ref to count unbuilt changes for :return: the number of unbuilt changes """ return await db.history.count_documents({ "reference.id": ref_id, "index.id": "unbuilt" })
6f055c81ef90bc68b314085fcb9c653a5d91caed
488,171
def get_all_index_in_list(L, item): """ get all the indexies of the same items in the list :param L: list :param item: item to be found :return: the indexies of all same items in the list """ return [index for (index, value) in enumerate(L) if value == item]
3396daad5304ced92baa13f0ca15aab5b904d95b
483,093
def transaction_id(request): """ Extract the transaction id from the given request. :param IRequest request: The request we are trying to get the transaction id for. :returns: A string transaction id. """ return request.responseHeaders.getRawHeaders('X-Response-Id')[0]
61329f18658f5d3fcce756cb2ac816e3e89ccd62
84,288
def unique_preserve_order(lst): """ Deduplicate lst without changing the order. Return a new list. The elements of lst are not required to be hashable. """ new_lst = [] for item in lst: if item not in new_lst: new_lst.append(item) return new_lst
4ddbf118947d7e1b6b1619cee7fd9193f60f02fb
510,157
def remove(bowl_a, bowl_b): """Return bowl b without the "ingredients" of bowl_a.""" for ingredient in bowl_a: # If an ingredient is also in bowl_a and bowl_b - remove it. if ingredient in bowl_b: bowl_b.remove(ingredient) return bowl_b
5480c6680f9cac9fd033dedacc14da5bc58fc551
88,905
def filter_image_only(f): """Filter only png file through.""" if f.endswith('png'): return True return False
2177bded7d41b7c6800a44e7678d6dae10c7064d
68,903
import math def mean_and_std(values): """Compute mean standard deviation""" size = len(values) mean = sum(values) / size s = 0.0 for v in values: s += (v - mean) ** 2 std = math.sqrt((1.0 / (size - 1)) * s) return mean, std
962bbf6de83afe0dd7c52132fe5ebaeb26397cb8
437,968
def convertConfigDict(origDict, sep="."): """ For each key in the dictionary of the form <section>.<option>, a separate dictionary for is formed for every "key" - <section>, while the <option>s become the keys for the inner dictionary. Returns a dictionary of dictionary. """ result = {} for keys in origDict: tempResult = result parts = keys.split(sep) for part in parts[:-1]: tempResult = tempResult.setdefault(part, {}) tempResult[parts[-1]] = origDict[keys] return result
da4952ee883cd3d2d0d01e64f9055c22a277a16f
676,817
def get_translation_cache_key(translated_model, master_id, language_code): """ The low-level function to get the cache key for a translation. """ # Always cache the entire object, as this already produces # a lot of queries. Don't go for caching individual fields. return 'parler.{0}.{1}.{2}.{3}'.format(translated_model._meta.app_label, translated_model.__name__, master_id, language_code)
0da47b111ab3aad2e59679b29eb65b2325ef24d1
253,607
def simple_score(correlation, sharpe, drawdown, alpha, sensitivity, out=None): """ Calculate a simple score on a scale of 1 to 10 based on the given metrics. Each metric is given 2 points. If alpha is zero, then the score is zero since you have made no positive returns correlation correlation of strategy, between 1 and -1 sharpe sharpe ratio drawdown max drawdown percentage alpha excess returns sensitivity price sensitivity based on open=high or open=low prices out output format. returns score if None else the list of points for any other argument """ # A list to hold points for each of the metric points = [0, 0, 0, 0, 0] correlation = abs(correlation) drawdown = abs(drawdown) if correlation < 0.1: points[0] = 2 else: points[0] = 2 * (1 - correlation) if sharpe > 0: points[1] = min(2, sharpe) if abs(drawdown) < 0.05: points[2] = 2 else: points[2] = max(0, 2 - ((drawdown - 0.05) * 0.25 * 100)) if alpha > 0: points[3] = min(2, alpha * 100) if sensitivity < 0.1: points[4] = 2 else: points[4] = max(0, (0.3 - sensitivity) * 10) if out == "list": return points else: return 0 if alpha <= 0 else sum(points)
2dd8bdfec46e571afd7f8013bfbb574ad304d8a6
400,895
def bw_silver(x, y): """ Returns the Silverman bandwidth factor. Parameters ---------- x : array_like Color from the CMD. y : array_like Magnitude. Returns ------- bw : float Silverman's bandwidth factor. """ d = 2 n = len(x) bw = (n * (d + 2) / 4) ** (-1 / (d + 4)) return bw
5c891df2bdad0d3d4a952b7fa52a24aa5819380f
365,096
def is_number(val): """ Parameters ---------- val: str, arbitrary input Returns ------- bool, True if val is interpretable as a float and False else """ try: float(val) return True except ValueError: return False
b20c034093c82a274c44d5b0525d4f6c8ce3fbfe
513,509
def user_says_yes( msg: str = "\nDo you want to delete log files for this source? (y/n) ", ) -> bool: """Asks the user to enter either "y" or "n" to confirm. Returns boolean.""" choice = None while choice is None: user_input = input(msg) if user_input.lower() == "y": choice = True elif user_input.lower() == "n": choice = False else: print('Please enter either "y" or "n".') return choice
f984eb2524a84efe50330ac993a8b73802d15903
132,648
def get_clouds_landsat(file): """purpouse: to read cloud image level from landsat metadata file Parameters ---------- file: path path to metadata file Returns ------- cloud level """ with open(file, "r") as f: lines = f.readlines() cloud = None for line in lines: if "CLOUD_COVER_LAND" in line: cloud = line.split("=")[1].replace("\n", "").strip() return cloud
cd90fe65b25d45ffc1ffe9c2de993463379a831f
340,207
def test_function_attributes(cache): """ Simple tests for attribute preservation. """ def tfunc(a, b): """test function docstring.""" return a + b cfunc = cache()(tfunc) assert cfunc.__doc__ == tfunc.__doc__ assert hasattr(cfunc, 'cache_info') assert hasattr(cfunc, 'cache_clear') assert hasattr(cfunc, '__wrapped__')
591e0148a5fbeb2cece1114f728a34ed6c453b9c
671,644
from pathlib import Path from typing import List def get_regions_data(path: Path) -> List[dict]: """Gets base data for a region in a page. :param path: Path to the region directory. :return: List of dictionaries holding base region data. """ regions = list() for region in sorted(path.glob("./*.offset")): region_dict = dict() with region.open("r") as file: (x, y) = file.read().split(",") region_dict["name"] = str(region.stem) region_dict["offset"] = (int(x), int(y)) regions.append(region_dict) return regions
b5e4d10f58815eaec23104ec37ac6c9beaa45c43
676,050
def example_function(myinput: int) -> int: """ Returns the given parameter without change. This function is just for demo purposes and should be removed when the template is used for an actual project. Args: myinput: The parameter to return. Returns: The given parameter without change. """ return myinput
9c599f24075b837b64facbcef0eadf07462f1454
52,269
def is_pip_include_dependency(package: str) -> bool: """Returns True if the given package name (as used in a Conda environment file) relies on PIP includes, in the format "-r requirements.txt" :param package: The name of the PIP dependency to check. :return: True if the package name is a PIP include statement. """ return package.strip().startswith("-r ")
bc31dc61eedf56e1a9acb555fcd5808ed2d5c9ce
534,762
def text_to_string(filename, useEncoding): """Read a text file and return a string.""" with open(filename, encoding=useEncoding, errors='ignore') as infile: return infile.read()
f879bb747699496204820b74944fd563658a7117
707,080
def match_probability(h, bands, similarity): """ Function returning the probability two pairs will match given a number of a signature's integers, the number of bands dividing the signature matrix and the desired similarity. Args: h (int): Number of integers in the minhash signature. bands (int): Number of bands dividing the signature matrix. similarity (float): Desired Jaccard similarity. Returns: float: The match probability. """ return 1.0 - (1.0 - similarity ** (h / bands)) ** bands
f1c3dd3288b514de5df212eae5c7f1fa606d427b
505,830
def add_colons(df, id_name='', col_types={}): """ Adds the colons to column names before neo4j import (presumably removed by `remove_colons` to make queryable). User can also specify a name for the ':ID' column and data types for property columns. :param df: DataFrame, the neo4j import data without colons in it (e.g. to make it queryable). :param id_name: String, name for the id property. If importing a CSV into neo4j without this property, Neo4j mayuse its own internal id's losing this property. :param col_types: dict, data types for other columns in the form of column_name:data_type :return: DataFrame, with neo4j compatible column headings """ reserved_cols = ['id', 'label', 'start_id', 'end_id', 'type'] # Get the reserved column names that need to be changed to_change = [c for c in df.columns if c.lower() in reserved_cols] if not to_change: raise ValueError("Neo4j Reserved columns (['id', 'label' 'start_id', 'end_id', 'type'] not " + "found in DataFrame") # Add any column names that need to be types to_change += [c for c in df.columns if c in col_types.keys()] change_dict = {} for name in to_change: # Reserved column names go after the colon if name.lower() in reserved_cols: if name.lower() == 'id': new_name = id_name + ':' + name.upper() else: new_name = ':' + name.upper() else: # Data types go after the colon, while names go before. new_name = name + ':' + col_types[name].upper() change_dict.update({name: new_name}) return df.rename(columns=change_dict)
d7bca92e939c7ca109cd66841bb2a45d2fdbeac0
62,001
def dequeue(queue): """ Return anytype from queue""" return None if not queue else queue.pop(0)
ad8382c79a230a97e25b975bce7fbb67c65938c3
475,497
def is_nonterminal(subtree): """True if this subtree does not consist of a single terminal node (i.e., a word or an empty node).""" return isinstance(subtree, list)
0c5ddacc9ea85504af338ff649b068cce827e7e2
259,380
import logging def allocate_colors(matches): """For the array of matching colors, build a new array that represents their relative allocations. As a side effect, modifies `matches` to add the percentages to each color. """ # Allocate the colors by percentage to individual tiles total_points = sum([int(m.num_points) for m in matches]) # Derive the percentage representation in the color set, throwing out any that are too rarely # represented to be displayable in our 10x10 grid for m in matches: logging.info("Total points {} / num points {} for {}".format(total_points, m.num_points, m.tile)) alloc = int(m.num_points / total_points * 100) m.freq = alloc for m in matches[:]: if m.freq == 0: matches.remove(m) # Sometimes the sums don't total 100 because we dropped out low representation items, so add those back sum_counts = sum(x.freq for x in matches) remainder = 100 - sum_counts matches.sort(key = lambda x: x.freq) matches.reverse() matches[-1].freq += remainder return matches
8748a19718439d62a8d567ce91ef4f3c774c0cad
162,155
import importlib def load_object(path): """ Load an object given its absolute object path, and return it. """ if not isinstance(path, str): if callable(path): return path else: raise TypeError("Unexpected argument type, expected string " "or object, got: %s" % type(path)) try: dot = path.rindex('.') except ValueError: raise ValueError(f"Error loading object '{path}': not a full path") module, name = path[:dot], path[dot + 1:] mod = importlib.import_module(module) try: obj = getattr(mod, name) except AttributeError: obj = importlib.import_module(path) # raise NameError(f"Module '{module}' doesn't define any object named '{name}'") return obj
6c2bdf7575dba7fa8284ba445ae703fc51089b92
520,735
import json def get_service_set(schema_path: str) -> set: """ Get the set of all services present in a schema. Args: schema_path: schema file path Returns: service_set: set of services in file """ service_set = set() with open(schema_path, encoding="UTF-8") as f: schema = json.load(f) for service in schema: service_set.add(service["service_name"]) f.close() return service_set
8e0d51f2af7133ba92451c3fa1d4f9cd4cb4a997
609,961
def non_private_tags_in_dicom_dataset(ds): """Return all non-private tags from a DICOM dataset.""" non_private_tags = [] for elem in ds: if not elem.tag.is_private and not ( # Ignore retired Group Length elements elem.tag.element == 0 and elem.tag.group > 6 ): non_private_tags.append(elem.tag) return non_private_tags
5a69e3a8d3936462210e43cfd7a17fcc90e2d9d7
491,911
def ReadInFile(file): """ Read the file and return the content of the file as a list. """ with open(file, "r") as f: return f.readlines()
ff339de1029afd947f4da7023a7e1a557d2d0f4e
565,423
import sqlite3 def create_or_open_db(filename): """ Create (1st time) or open database :argument: filename: str database file :returns: conn: sqlite3 database connection cursor: sqlite3 database cursor """ conn = sqlite3.connect(filename) cursor = conn.cursor() sql = """ CREATE TABLE IF NOT EXISTS note ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, bibkey TEXT UNIQUE NOT NULL, author TEXT NOT NULL, genre TEXT, thesis TEXT, hypothesis TEXT, method TEXT, finding TEXT, comment TEXT, img_linkstr TEXT );""" cursor.execute(sql) sql = """ CREATE TABLE IF NOT EXISTS tags ( bibkey TEXT NOT NULL, tag TEXT NOT NULL );""" cursor.execute(sql) # create fts5 virtual table for full text search sql = """ CREATE VIRTUAL TABLE IF NOT EXISTS fts USING fts5( author, thesis, hypothesis, method, finding, comment, content="note", content_rowid="id" ); """ cursor.execute(sql) # create triggers cursor.execute(""" CREATE TRIGGER IF NOT EXISTS tbl_ai AFTER INSERT ON note BEGIN INSERT INTO fts(rowid, author, thesis, hypothesis, method, finding, comment) VALUES (new.id, new.author, new.thesis, new.hypothesis, new.method, new.finding, new.comment); END;""") cursor.execute(""" CREATE TRIGGER IF NOT EXISTS tbl_ad AFTER DELETE ON note BEGIN INSERT INTO fts(fts, rowid, author, thesis, hypothesis, method, finding, comment) VALUES ('delete', old.id, old.author, old.thesis, old.hypothesis, old.method, old.finding, old.comment); END;""") cursor.execute(""" CREATE TRIGGER IF NOT EXISTS tbl_au AFTER UPDATE ON note BEGIN INSERT INTO fts(fts, rowid, author, thesis, hypothesis, method, finding, comment) VALUES ('delete', old.id, old.author, old.thesis, old.hypothesis, old.method, old.finding, old.comment); INSERT INTO fts(rowid, author, thesis, hypothesis, method, finding, comment) VALUES (new.id, new.author, new.thesis, new.hypothesis, new.method, new.finding, new.comment); END;""") conn.commit() return conn, cursor
df85ed38a2ed1d4c870d1bfdc69b4928c0e4b0c0
470,436
def _loc(content, node): """ Find the location of a node within ``content`` Args: content (str): The file content node (ast.Node): Node to find Returns: (int, int): Start/end indices of string """ start_line, start_col = node.lineno, node.col_offset end_line, end_col = node.end_lineno, node.end_col_offset line_lengths = [len(line) for line in content.splitlines(True)] idx0 = sum(line_lengths[: start_line - 1]) + start_col idx1 = sum(line_lengths[: end_line - 1]) + end_col return (idx0, idx1)
6f7c798a300ca579ecbe037b076b7ded5d2a0054
68,365
from typing import Any def get_class_name(module_ref: Any, suffix: str = "Location") -> str: """Get class name to be dynamically imported. Class name are expected to be in following formats example - module name: test suffix: Abc expected class names - 1. TESTAbc 2. TestAbc :param module_ref: Module from which to get class location type implementation :param suffix: suffix for class name """ module_name = module_ref.__name__.split(".")[-1] class_names_formats = [ f"{module_name.title()}{suffix}", f"{module_name.upper()}{suffix}", ] for class_names_format in class_names_formats: if hasattr(module_ref, class_names_format): return class_names_format raise ValueError( "No expected class name found, please note that the class names should an expected formats." )
f740430d903a5516fedacc621a0c0dcd5981d6f7
539,543
def curry(f): """ Convert a two-parameter function into two one-parameter functions. Curried functions will allow the partial application of arguments to form new functions. ((a, b) -> c) -> a -> b -> c """ return lambda a: lambda b: f(a, b)
bb70cd4f24a52780bf8587806431649f4a375594
488,501
from typing import Tuple import torch from typing import List def encode_supervisions( supervisions: dict, subsampling_factor: int ) -> Tuple[torch.Tensor, List[str]]: """ Encodes Lhotse's ``batch["supervisions"]`` dict into a pair of torch Tensor, and a list of transcription strings. The supervision tensor has shape ``(batch_size, 3)``. Its second dimension contains information about sequence index [0], start frames [1] and num frames [2]. The batch items might become re-ordered during this operation -- the returned tensor and list of strings are guaranteed to be consistent with each other. """ supervision_segments = torch.stack( ( supervisions["sequence_idx"], supervisions["start_frame"] // subsampling_factor, supervisions["num_frames"] // subsampling_factor, ), 1, ).to(torch.int32) indices = torch.argsort(supervision_segments[:, 2], descending=True) supervision_segments = supervision_segments[indices] texts = supervisions["text"] texts = [texts[idx] for idx in indices] return supervision_segments, texts
8cd6a0ef6fa5027af454e804b82dacfe6f44be12
687,357
def twitter_id_from_timestamp(ts: float) -> int: """Get twitter id from timestamp Args: ts (float): time stamp in seconds Returns: int: twitter id representing the timestamp """ return (int(ts * 1000) - 1288834974657) << 22
2eb6f394e217f28d5950d9fe6d03d868d4696ffb
623,257
def getxmlattr( xml_root, path, key): """ Function to extract the attribute of an xml key """ try: res = xml_root.find(path).attrib[key] except: raise Exception('Cannot find attribute %s at %s'%(key, path)) return res
eda8a95c75b1d469af6db24e94cd0a1870faebca
295,999
import csv def read_score_file(filename): """ Reads the given score file. :param filename: A CSV file with segment scores. :return: A dictionary with segment name to score mappings. """ """Returns a dictionary of the scores in the csv file *filename*""" with open(filename, 'r') as fp: csv_file = csv.reader(fp) _ = next(csv_file) return {segment_name: float(score) for segment_name, score in csv_file}
5e8053d8345640b77a776d329e2b933b982b700b
247,755
import requests import time def make_request(method, url, headers=None, data=None, retries=3): """ make a request, with the ability to have retries in specific return codes :param method: HTTP verb method :param url: :param retries: int, this should be greater or equal than 1 :return: """ no_retry_status = [404, 401, 403] may_retry_status = [408, 500, 502, 503] if not retries: return requests.request(method=method, url=url, headers=headers, data=data) while retries: r = requests.request(method=method, url=url, headers=headers, data=data) if r.status_code in no_retry_status: return r elif r.status_code in may_retry_status: retries -= 1 time.sleep(1) if retries == 0: return r continue else: return r
25281e1a4cc1b329e86a782aab19f0b80f70b4b1
473,544
import math def apply_deadzone(value: float, threshold: float) -> float: """Apply a deadzone to a value in [-1,1]. This ensures that the rest of the input space maps to [-1,1]. """ assert 0 <= threshold < 1 if abs(value) < threshold: return 0 return (value - math.copysign(threshold, value)) / (1 - threshold)
49a0e0c642059865cb1de838bddf14e7ec3a8b9b
54,568
import json import traceback def _read_json_with_comments(json_path): """Reads a file with '//' comments. Reads the file, removes the commented lines and return a json loads of the result. """ file_lines = [] with open(json_path) as json_file: file_lines = json_file.readlines() lines_without_comments = [] for line in file_lines: if line.strip()[0:2] != '//' and len(line.strip()) > 0: lines_without_comments.append(line) file_content_without_comments = ''.join(lines_without_comments) json_data = {} try: json_data = json.loads(file_content_without_comments) except Exception: print('There was an error reading file {}'.format(json_path)) print(traceback.format_exc()) return json_data
f275a6e845f2d728f761371300d8f2f4988897e9
437,174
def translate_key(key): """ Function to return the correct configuration key. If not found return the key itself. Returns a string. """ mapping = { 'user': 'User', 'identityfile': 'IdentityFile', 'proxycommand': 'ProxyCommand', 'ip': 'Hostname', 'hostname': 'Hostname', 'port': 'Port', } if key in mapping: return str(mapping[key]) else: return str(key)
9c7b9b32d1b341946a9c66120c7b32c9c304c754
35,110
import yaml def load_config_file(config_file): """Load configuration from a specified yaml config file path""" with open(config_file) as f: return yaml.load(f, Loader=yaml.FullLoader)
75d7ea22bc50d2c351f052152a5b25169f29ae7a
311,326
def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument('-o', '--output_directory', type=str, default='logs', required=True, help='Directory to save checkpoints') parser.add_argument('-d', '--dataset-path', type=str, default='filelists', help='Path to dataset') parser.add_argument('--log-file', type=str, default='nvlog.json', help='Filename for logging') parser.add_argument('--latest-checkpoint-file', type=str, default='checkpoint_latest.pt', help='Store the latest checkpoint in each epoch') parser.add_argument('--phrase-path', type=str, default=None, help='Path to phrase sequence file used for sample generation') parser.add_argument('--tacotron2-checkpoint', type=str, default=None, help='Path to pre-trained Tacotron2 checkpoint for sample generation') # training training = parser.add_argument_group('training setup') training.add_argument('--epochs', type=int, required=True, help='Number of total epochs to run') training.add_argument('--epochs-per-alignment', type=int, default=10, help='Number of epochs per alignment') training.add_argument('--epochs-per-checkpoint', type=int, default=50, help='Number of epochs per checkpoint') training.add_argument('--seed', type=int, default=1234, help='Seed for PyTorch random number generators') training.add_argument('--dynamic-loss-scaling', type=bool, default=True, help='Enable dynamic loss scaling') training.add_argument('--amp-run', action='store_true', help='Enable AMP') training.add_argument('--cudnn-enabled', action='store_true', help='Enable cudnn') training.add_argument('--cudnn-benchmark', action='store_true', help='Run cudnn benchmark') training.add_argument('--disable-uniform-initialize-bn-weight', action='store_true', help='disable uniform initialization of batchnorm layer weight') optimization = parser.add_argument_group('optimization setup') optimization.add_argument('--use-saved-learning-rate', default=False, type=bool) optimization.add_argument('--init-lr', '--initial-learning-rate', default=1e-3, type=float, required=True, help='Initial learing rate') optimization.add_argument('--final-lr', '--final-learning-rate', default=1e-5, type=float, required=True, help='Final earing rate') optimization.add_argument('--weight-decay', default=1e-6, type=float, help='Weight decay') optimization.add_argument('--grad-clip-thresh', default=1.0, type=float, help='Clip threshold for gradients') optimization.add_argument('-bs', '--batch-size', default=32, type=int, required=True, help='Batch size per GPU') # dataset parameters dataset = parser.add_argument_group('dataset parameters') dataset.add_argument('--load-mel-from-disk', action='store_true', help='Loads mel spectrograms from disk instead of computing them on the fly') dataset.add_argument('--training-anchor-dirs', default=['ljs_mel_text_train_filelist.txt'], type=str, nargs='*', help='Path to training filelist') dataset.add_argument('--validation-anchor-dirs', default=['ljs_mel_text_val_filelist.txt'], type=str, nargs='*', help='Path to validation filelist') dataset.add_argument('--text-cleaners', nargs='*', default=['basic_cleaners'], type=str, help='Type of text cleaners for input text') # audio parameters audio = parser.add_argument_group('audio parameters') audio.add_argument('--max-wav-value', default=32768.0, type=float, help='Maximum audiowave value') audio.add_argument('--sampling-rate', default=22050, type=int, help='Sampling rate') audio.add_argument('--filter-length', default=2048, type=int, help='Filter length') audio.add_argument('--hop-length', default=275, type=int, help='Hop (stride) length') audio.add_argument('--win-length', default=1100, type=int, help='Window length') audio.add_argument('--mel-fmin', default=125.0, type=float, help='Minimum mel frequency') audio.add_argument('--mel-fmax', default=7600.0, type=float, help='Maximum mel frequency') distributed = parser.add_argument_group('distributed setup') distributed.add_argument('--distributed-run', default=False, type=bool, help='enable distributed run') distributed.add_argument('--rank', default=0, type=int, help='Rank of the process, do not set! Done by multiproc module') distributed.add_argument('--world-size', default=1, type=int, help='Number of processes, do not set! Done by multiproc module') distributed.add_argument('--dist-url', type=str, default='tcp://localhost:23456', help='Url used to set up distributed training') distributed.add_argument('--group-name', type=str, default='group_name', required=False, help='Distributed group name') distributed.add_argument('--dist-backend', default='nccl', type=str, choices={'nccl'}, help='Distributed run backend') return parser
76a417cf89bdb0b36bb60a0f833cfacbcf041dc2
366,860
def get_path(path, search_space, include_key=False): """Retrieve a value from a nested dict by following the path. Throws KeyError if any key along the path does not exist""" if not isinstance(path, (tuple, list)): path = [path] current_value = search_space[path[0]] if len(path) == 1: return (path[0], current_value) if include_key else current_value return get_path(path[1:], current_value, include_key)
778495fad59292ea2c75b062a3bdfb369b349a17
160,290
def find_mdf(df): """ Finds the median destructive field for AF demag data Parameters __________ df : dataframe of measurements Returns ______ mdf : median destructive field """ mdf_df=df[df.meas_norm<=0.5] mdf_high=mdf_df.treat_ac_field_mT.values[0] mdf_df=df[df.meas_norm>=0.5] mdf_low=mdf_df.treat_ac_field_mT.values[-1] mdf=int(0.5*(mdf_high+mdf_low)) return mdf
c702435aae510e8c24154ad2ae9bff6da3270db4
167,297
def _enforce_trailing_slash(string: str) -> str: """Ensure string ends in forward slash.""" if string.endswith("/"): return string return f"{string}/"
94a42813e84e9b3704de7ad3fc92b612ddc1f511
299,117
def parseRange(string): """Parses a dash-separated string of ints into a tuple""" splitarray = string.split("-") if len(splitarray) == 1: return (int(splitarray[0]), int(splitarray[0])) if len(splitarray) == 2: return (int(splitarray[0]), int(splitarray[1])) raise ValueError("Cannot parse range " + string)
7df2962f06d2eb9b1e1107b7a2f20be82598547e
181,469
def valid_reverse_lookup(net): """ Check if the reverse lookup dictionary has the same count as edges in the network """ return len(net.graph["keys"]) == len(net.edges())
b1335ae0bfa1d762ee14587b4149d7358a3c337a
168,511
def inter_cluster_mean_score(cluster, graph, mean=None): """ The method is based on the calculation of distances for each sample in a given cluster to the specific centroid of cluster. If the centroid is not given by specifying mean, it is calculated with the distance function that is provided by graph. :param cluster: The cluster to determine the mean inter cluster score for :param graph: The underlying graph that offers a distance function :param mean: A precalculated centroid for given cluster :return: Mean distances within the given cluster to its centroid """ distance = 0 assert cluster if mean is None: mean = graph.distance.mean(list(cluster)) for node in cluster: distance += graph.distance(mean, node) return distance / float(len(list(cluster)))
b669eaba5805c299c797a5bd7ae30ef0b106e2bc
610,228
def validate_dict(dict_like, schema): """Return a list of validation error strings. Compare ``dict_like`` to ``schema``, returning a list of strings describing all the validation errors. In particular, validate that ``dict_like`` contains every key specified in ``schema`` as well as that each key's value is an instance of the type specified by ``schema``. It is *not* considered a validation error if ``dict_like`` has keys which are not in ``schema``. Parameters ---------- dict_like : Dict a dictionary-like object to be validated against ``schema``. schema : Dict[String, type] a dictionary mapping strings to instances of ``type`` (i.e., classes and built-in types like ``str``). Returns ------- List[str] a list of strings describing all validation errors found when comparing ``dict_like`` to ``schema``. Example ------- The following example shows a dictionary validated against a schema where the dictionary passes validation:: >>> schema = {'foo': int} >>> data = {'foo': 3} >>> validate_dict(dict_like=data, schema=schema) [] """ validation_errors = [] for key, value_type in schema.items(): if key not in dict_like: validation_errors.append(f'Key ({key}) was not found.') elif not isinstance(dict_like[key], value_type): validation_errors.append( f'Key ({key}) is not of type {value_type}') return validation_errors
a900c388b8c2d5f144ae0b15fcfc566ae7d1e6bc
235,592
def feature_dict(entry): """Template dictionary for JSON feature from TED entry""" corners = [*entry["corners"], entry["corners"][0]] # create Linear Ring feature = { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": [corners] }, "properties": { "name": entry["name"], "ted-id": entry["id"], "description": entry["type"] } } return feature
0638da4ec93da344a72e00cc61795301fe10ffd8
568,944
def fitness(ref_matrix, guess_matrix): """Returns the value of the fitness function. The input is the reference matrix and the guess matrix. The reference matrix is assumed to be regularized (to contain no zero elements)""" f=1 for i in range (len(ref_matrix)): for j in range (len(ref_matrix)): f=f*(ref_matrix[i,j]**guess_matrix[i,j]) #Convertion from the product to an exponential of a sum is being used; transpose is needed to compute M_ij log(N_ij) and not M_ij log(N_ji) # return(np.exp(np.trace(np.transpose(guess_matrix) .dot (np.log(ref_matrix) ) ) )) return(f)
55af7b8ce29ee176fcc12839b943c1c2560a9ab9
153,684
import re def get_phased(glstring): """ Take a GL String and return a list of phased alleles """ phased_list = [] for phased in re.split(r'[+|^\]', glstring): if "~" in phased: phased_list.append(phased) return phased_list
0df0047ff247ac51bad378b15ff0f8018f23ec09
199,463
def compute_in_degrees(digraph): """ Compute In-Degree Dictionary for the given directed Graph. :param digraph: dictionary representation of a directed graph. :return: the dictionary representing the in-degree count for each node in the graph. """ in_degrees_representation = dict.fromkeys(digraph.keys(), 0) for connected_nodes in digraph.itervalues(): for entry in connected_nodes: in_degrees_representation[entry] += 1 return in_degrees_representation
5e04ecd15a0b6e41ea9a40be771164a016deb24e
556,903
def round_up(n, size): """ Round an integer to next power of size. Size must be power of 2. """ assert size & (size - 1) == 0, "size is not power of 2" return ((n - 1) | (size - 1)) + 1
02f34fd5f2c059a9ee1b657f099b4699d90dfc01
24,878
from typing import Dict import json def get_msd_score_matches(match_scores_path: str) -> Dict: """ Returns the dictionary of scores from the match scores file. :param match_scores_path: the match scores path :return: the dictionary of scores """ with open(match_scores_path) as f: return json.load(f)
2b339285bffe1adaf032319d22eac5d2ac27cedc
100,878
def find_word(string, start=0): """Find the first word starting from `start` position Return the word and the position before and after the word """ while start < len(string) and string[start].isspace(): start += 1 end = start while end < len(string) and not string[end].isspace(): end += 1 return string[start:end], start, end
cd0f6f02d2797c5f7bd04b3307fe34c29e3be748
55,202
def _inference_best_shape(n_images): """Inferences the best matplotlib row/column layout. This method searches for the two closest factors of the number `n_images`, and returns this tuple as the best shape, since this is the closest to a square grid as possible. """ a, b, i = 1, n_images, 0 while a < b: i += 1 if n_images % i == 0: a = i b = n_images // a return [b, a]
c09cb3956f1a644d80fc34491eee34c76b74c482
571,238
def str_to_list(str_value) -> list: """ Convert string to list with one element or just return received list """ return [str_value] if not isinstance(str_value, list) else str_value
3b95f1b614ff9a33bacdce24e73a97d63c38d7ad
540,317
def _getDictWithKey(key, dict_list): """ Returns the first dictionary in dict_list which contains the given key""" for d in dict_list: if key in d: return d return None
52ce0870e5589cb9d439b658d4a6f877657ca205
608,907
def is_labeled(G): """Returns True iff every edge in the graph `G` has the attribute ``label``. Parameters ---------- G : graph Examples -------- >>> G = nx.MultiDiGraph() >>> G.add_edge(1, 2, label="a") >>> sd.is_labeled(G) True >>> G.add_edge(2, 1) >>> sd.is_labeled(G) False """ return all("label" in data for _, _, data in G.edges(data=True))
8a4520e42b1919d389e8c1c427cb2a9cc9841904
327,229
import math def healpix_resolution_calculator(nodes): """Calculate the resolution of a healpix graph for a given number of nodes. Args: nodes (int): number of nodes in healpix sampling Returns: int: resolution for the matching healpix graph """ resolution = int(math.sqrt(nodes / 12)) return resolution
b486aab5cce9a6337d6c54aeb685ca5db6450942
170,631
import torch def arap_R(Si): """ Given the covariance matrix Si, computes the ARAP rotation for point Pi Parameters ---------- Si : Tensor the (3,3,) tensor represeting the covariance matrix of point Pi Returns ------- Tensor a (3,3,) tensor representing the ARAP rotation matrix of point Pi """ U, _, V = torch.svd(Si) return torch.matmul(V, torch.t(U))
7d266f16d6ce46ce04c8bcb3b074d089800423b5
72,021
def _check_magic(f): """Determines file type from leading bytes.""" # CDF f.seek(0) CDF = f.read(3) if CDF == 'CDF': VERSION_BYTE = ord(f.read(1)) if VERSION_BYTE == 1: return 'NetCDF Classic' elif VERSION_BYTE == 2: return 'NetCDF 64-bit' return None # HDF HDF_MAGIC = b"\x89HDF\r\n\x1a\n" f.seek(0) HDF = f.read(8) if HDF == HDF_MAGIC: return 'HDF' # PKZIP ZIP_MAGIC = b"\x50\x4b\x03\x04" f.seek(0) ZIP = f.read(4) if ZIP == ZIP_MAGIC: return 'ZIP' return None
c70675fdcb2cdc035202f054f1ec4bc52202cfd2
151,845
def get_hosts(leases): """ get list of host in leases leases is a dict from parse_dhcpd_config return hosts as list """ return leases.keys()
4e3f11901bb8634a7b4efc42c4d3ebe4945151ac
578,079
def is_lookup_in_users_path(lookup_file_path): """ Determine if the lookup is within the user's path as opposed to being within the apps path. """ if "etc/users/" in lookup_file_path: return True else: return False
2b4f8efe43301f94586eb4831d29536bf8d7c68c
526,122