content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def check_is_fid_valid(fid, raise_exception=True): """ Check if FID is well formed :param fid: functional ID :param raise_exception: Indicate if an exception shall be raised (True, default) or not (False) :type fid: str :type raise_exception: bool :returns: the status of the check :rtype: bool :raises TypeError: if FID is invalid :raises ValueError: if FID is not well formatted """ if fid is None: if raise_exception: raise ValueError("FID shall be set") return False if not isinstance(fid, str): if raise_exception: raise TypeError("Type of fid: '%s' shall be str, not %s" % (fid, type(fid))) return False if len(fid) < 3: if raise_exception: raise ValueError("fid shall have at least 3 characters: '%s'" % fid) return False if " " in fid: if raise_exception: raise ValueError("fid shall not contains spaces: '%s'" % fid) return False return True
b629d0ae76dee292615d7786a6ce95d2f2391d87
42,374
def timeleft(i, num, elapsed): """Returns the time left, in secs, of a given operation. Useful for loops, where `i` is the current iteration, `num` is the total number of iterations, and `elapsed` is the time elapsed since starting. """ try: rate = i/float(elapsed) left = (num-i)/rate except ZeroDivisionError: return 0 return left
f65f07f8f9711da58be0217d6e66082e0e66732e
42,375
def base64_add_padding(data): """ Add enough padding for base64 encoding such that length is a multiple of 4 Args: data: unpadded string or bytes Return: bytes: The padded bytes """ if isinstance(data, str): data = data.encode('utf-8') missing_padding = 4 - len(data) % 4 if missing_padding: data += b'=' * missing_padding return data
1093bead6d06f98f8821da80265aa73f892aff62
42,378
def get_minification_delta(source_text, minified_text): """Computes how much the code size has been reduced after minification""" orig_size = len(source_text) mini_size = len(minified_text) delta = orig_size - mini_size return delta
ca837b03a38c9b9a185bab86d2a895f5a1d56bcc
42,380
import re def sort_human(l): """Sort a list of strings by numerical.""" def convert(text): return float(text) if text.isdigit() else text def alphanum(key): return [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)] l.sort(key=alphanum) return l
ae1ede3d3af89b4646d272daca24c47f2eb1663d
42,382
from typing import Any def can_be_parsed_as_bool(value: Any) -> bool: """Checks whether a value can be parsed as a boolean. can_be_parsed_as_bool(True) -> True can_be_parsed_as_bool("true") -> True can_be_parsed_as_bool("false") -> True can_be_parsed_as_bool("TRUE") -> True can_be_parsed_as_bool(0) -> False Args: value: The value to be parsed. Returns: True if the value can be parsed as a boolean. False otherwise. """ if isinstance(value, bool): return True if not isinstance(value, str): return False value = value.lower().strip() return value == "true" or value == "false"
5427ac07fc84c61da4631b54b4bf14d6f767c345
42,385
def dedent_initial(s, n=4): # type: (str, int) -> str """Remove identation from first line of text.""" return s[n:] if s[:n] == ' ' * n else s
cf09c9aa846b2aeb3278401f921d00912b90bb1f
42,386
def _is_item_allowed(resource, item, resourcesalloweddict, resourcesuseddict): """ <Purpose> Check if the process can acquire a non-fungible, non-renewable resource. <Arguments> resource: A string with the resource name. item: A unique identifier that specifies the resource. It has some meaning to the caller (like a port number for TCP or UDP), but is opaque to the nanny. <Exceptions> None. <Side Effects> None. <Returns> True or False """ if item in resourcesalloweddict[resource]: # this is semi nonsensical, but allows us to indicate which ports are used # through get_resource_information() resourcesuseddict[resource].add(item) return True else: return False
a098f6376576446156ea25c583520fdacbd7d2ed
42,388
def dataset_constructor_kwargs(root_path, **kwargs): """Return key-value arguments for dataset constructor. In order to add or change arguments pass arguments to this function. The parameter root_path must be always specified. Args: root_path: The root_path for the Dataset. kwargs: Arguments to update the defaults. Example use: # Set "version" to 2, add new parameter "new_par" to "new_value" and # remove "firmware_url": result = dataset_constructor_kwargs(root_path=root_path, version=2, new_par="new_value") del result["firmware_url"] """ result = { "root_path": root_path, "shortname": "shortname", "architecture": "architecture", "implementation": "implementation", "algorithm": "algorithm", "version": 1, "paper_url": "http://paper.url", "firmware_url": "http://firmware.url", "licence": "CC BY 4.0", "description": "description", "url": "http://download.url", "firmware_sha256": "abc123", "examples_per_shard": 1, "measurements_info": { "trace1": { "type": "power", "len": 1024, } }, "attack_points_info": { "key": { "len": 16, "max_val": 256 }, } } result.update(kwargs) return result
250668e917b373dbd09281e3b9c081894f97010b
42,389
def normalize_data(vms, vm_statuses, nics, public_ips): """ Normalize the data from a series of Azure cloud API calls into a Python dict object containing very specific portions of the original data. dict = { '<instance_name>': { 'public_ip': '<public_ip>', 'public_dns_name': '<public_dns_name>', 'status': '<Up|Down>', 'source': 'Azure' } } """ normalized_data = {} for vm_id in vms: vm_data = vms[vm_id] name = vm_data['name'] nic_id = vm_data['nic_id'] nic_data = nics[nic_id] public_ip_id = nic_data['public_ip_id'] public_ip_data = public_ips[public_ip_id] public_ip = public_ip_data['address'] public_dns_name = public_ip_data['fqdn'] status = vm_statuses[vm_id] source = "Azure" instance_data = { 'public_ip': public_ip, 'public_dns_name': public_dns_name, 'status': status, 'source': source } normalized_data[name] = instance_data return normalized_data
58067d8cd44b2e20063e2e642f5e84951e62ea47
42,394
def form_tweets(sentences): """Create a tweet, or multiple tweets if the tweets are too long, out of the sentences.""" tweet = "" tweets = [] while sentences: if len(tweet) + len(sentences[0]) > 139: tweets.append(tweet) tweet = "(cont'd.):" else: if len(tweet) != 0: tweet += "\n" tweet += sentences.pop(0) tweets.append(tweet) return tweets
21c0aa8e069ce86f7b219e10d1e16c206eaeabe4
42,399
def _attr_files(ctx, name): """Returns the list of files for the current target's attribute. This is a convenience function since the aspect context does not expose the same convenience `file`/`files` fields used in rule contexts. Args: ctx: The Skylark context. name: The name of the attribute. Returns: A list of Files. """ return [f for t in getattr(ctx.rule.attr, name) for f in t.files]
038151d7aa91906f33ce7afb05af62f71376ec37
42,400
def task_color(task_object, show_green=False): """ Return css class depending on Task execution status and execution outcome. By default, green is not returned for executed and successful tasks; show_green argument should be True to get green color. """ if not task_object.task_executed: return 'class=muted' elif task_object.outcome == task_object.OUTCOME_FAILED: return 'class=error' elif task_object.outcome == task_object.OUTCOME_SUCCESS and show_green: return 'class=green' else: return ''
3e3997128a006d3fdef5ec5e540e9c32926175a3
42,401
def get_scale_factor(scale, max_size, img_h, img_w): """ :param scale: min size during test :param max_size: max size during test :param img_h: orig height of img :param img_w: orig width :return: scale factor for resizing """ short = min(img_w, img_h) large = max(img_w, img_h) if short <= 0: scale_factor = 1.0 return scale_factor if scale <= 0: scale_factor = 1.0 else: scale_factor = min(scale / short, max_size / large) return scale_factor
d23a435aca038ecc57bef14268a255a8404bc68c
42,402
from typing import Iterable def unique_letters(edges: Iterable) -> str: """Return unique letters in collection of edges Args: edges (Iterable): Iterable containing edges, i.e. tuples. The last element in the tuples is assumed to contain the letter. Returns: str: String of unique letters """ return ''.join(set([edge[-1] for edge in edges]))
b144d0600a72c9b7671e7b356d90560b785fdfa9
42,403
def energy_budget_rule(mod, g, h): """ **Constraint Name**: GenHydro_Energy_Budget_Constraint **Enforced Over**: GEN_HYDRO_OPR_HRZS The sum of hydro energy output within a horizon must match the horizon's hydro energy budget. The budget is calculated by multiplying the user-specified average power fraction (i.e. the average capacity factor) for that horizon with the product of the matching period's installed capacity (which can be a user input or a decision variable, depending on the capacity type), the number of hours in that horizon, and any availability derates if applicable. WARNING: If there are any availability derates, this means the effective average power fraction (and associated energy budget) will be lower than the user-specified input! Example: The average power fraction is 50% of the installed capacity in horizon 1, which represents a winter week. If the installed capacity during the period of interest is 1,000 MW, there are 168 hours in the horizon (1 week), and the unit is fully available, the hydro budget for this horizon is 0.5 * 1,000 MW * 168 h = 84,000 MWh. If the unit were unavailable for half of the timepoints in that horizon, the budget would be half, i.e. 42,000 MWh, even though the average power fraction is the same! """ return sum(mod.GenHydro_Gross_Power_MW[g, tmp] * mod.hrs_in_tmp[tmp] for tmp in mod.TMPS_BY_BLN_TYPE_HRZ[ mod.balancing_type_project[g], h] ) \ == \ sum(mod.gen_hydro_average_power_fraction[g, h] * mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp] * mod.hrs_in_tmp[tmp] for tmp in mod.TMPS_BY_BLN_TYPE_HRZ[ mod.balancing_type_project[g], h])
b2e2b6308efaee4d3f1491dd659c643456c2f194
42,409
def read_list(filename): """Read list from file.""" with open(filename, "r") as fin: items = fin.readlines() return [item.strip() for item in items]
81c2aedbfc5d7a8b2189389ac02c1201f6d35e14
42,416
def find_all(txt, char): """Extract all position of a character in a string""" return tuple(i for i, ltr in enumerate(txt) if ltr in char)
19e2a771cccb9dc2d11150fa7da8e3d1cf76d6ae
42,422
def get_property(line): """return key, value pair by splitting key=value with =""" # parser = re.compile(r'(.*)=(.*)') # match_object = parser.match(line) # if match_object: # return match_object.group(1),match_object.group(2) assert line.find('=') != -1 line_list = line.split('=', 1) return line_list[0], line_list[1]
9fbe4440021db03b85e7a12391736c2309f8a042
42,426
def drop_duplicate_rows(data_frame, column_name): """Drop duplicate rows in given column in pandas data_frame""" df = data_frame.drop_duplicates(subset=column_name, keep="first") return df
9dcdc06cf4f5ef466c6d808e03a92ef09b451994
42,429
from typing import Any from typing import Tuple def extend_attr_to_tuple( val: Any, num_elem: int, ) -> Tuple[Any, ...]: """ If `val` is not a tuple, then we make a tuple of size `num_elem` by replicating `val` `num_elem` times. Args: val (Any): Value that we want to process. Returns: A tuple. """ if not isinstance(val, tuple): val = (val,) * num_elem return val
d3eee740d8955c49dab9366bb77ea3c923140dd0
42,431
def combine_two(a, b, delimiter='/'): """returns an n-nested array of strings a+delimiter+b a and b (e.g. uuids and object_keys) can be a singlet, an array, an array of arrays or an array of arrays of arrays ... example: >>> a = ['a','b',['c','d']] >>> b = ['e','f',['g','h']] >>> combine_two(a, b) ['a/e','b/f',['c/g','d/h']] """ if isinstance(a, list): if not isinstance(b, list): raise Exception("can't combine list and non-list") if len(a) != len(b): raise Exception("Can't combine lists of different lengths") return [combine_two(a_, b_) for a_, b_, in zip(a, b)] else: return(str(a) + delimiter + str(b))
c31e508ec742a58662116d0722bd98d4b1a7b171
42,434
def cpe(*args): """Concatenate values as strings using ':', replace None with '*' :param args: sequence of values :return: string in CPE format """ return ":".join(map(lambda x: "*" if x is None else str(x), args))
bf15b0d684b77e40d069b4be7b97175511d2e830
42,436
def cli(ctx, entity): """Get the list of available fields for an entity Output: Fields information """ return ctx.gi.entity.get_fields(entity)
bfbe3814c557f7676d259413ce942afb50b781fa
42,439
def none_for_empty_string(s: str): """Input csv reads empty strings as '' instead of None. We'd want to insert None in such cases""" if s: return s else: return None
265395b2040fb43623d91413152b7bc907906857
42,440
def float_one(vals): """ Return a constant floating point value of 1.0 """ return 1.0
31ce58c2629fb36fd67d84ff440adae05a823c2e
42,442
from typing import OrderedDict def _to_ordered_dict(d): """ Recursively converts a dict to OrderedDict. This is needed to preserve `(key, value)` ordering with iterating through `dict`'s between Python 2 and Python 3. Parameters ---------- d : dict Dictionary to order. Returns ------- OrderedDict Recursively order representation of the input dictionary. """ d_ordered = OrderedDict() for key, value in sorted(d.items()): if isinstance(value, dict): d_ordered[key] = _to_ordered_dict(value) elif isinstance(value, list) and (all(isinstance(item, dict) for item in value)): list_of_ordered_dicts = [] for item in value: list_of_ordered_dicts.append(_to_ordered_dict(item)) d_ordered[key] = list_of_ordered_dicts else: d_ordered[key] = value return d_ordered
350556c1fc885e7b495509ca0ed9a0ff47f2eb70
42,447
def get_cell_entry(cell): """ function for reading cell entry of given xls spreadsheet cell input: cell (xlrd.sheet.cell object), cell to be read from output: entry (any), value stored in cell """ cell_str = str(cell) # get cell description if 'text' in cell_str: # typical text cell: text:u'L' entry = cell_str.split("'")[1] elif 'number' in cell_str: # typical numerical cell: number:3.0 entry = cell_str.split(':')[1].split('.')[0] else: entry = '' return entry
24471d68484941ec77bf617d19d6ee63ad154aab
42,451
import re def linearize(multiline: str) -> str: """ :param multiline: A multiline string as found in indented source code. :return: A stripped, one-line string. All newlines and multiple consecutive whitespace characters are replaced by a single space. """ oneline = re.sub(r"\s+", " ", multiline) return oneline.strip()
1053b72a0a3948d2a185545259ea52f8bf1cba3a
42,452
from typing import Dict from typing import Any def extremise_distribution(distribution: Dict[Any, float], tau: float = 1) -> Dict[Any, float]: """Calculate an extremised probability distribution parametrised by the temperature parameter `tau`. The extremised values are proportional to the values in `distribution` exponentiated to 1 / `tau`, where `tau` is a number between 1 and 0. Here, a value of 1 leaves the relative values unchanged and merely normalises the `distribution` whereas a value tending to 0 maximally extremises the `distribution`. In this case, the entry corresponding to the the highest value in the input distribution tends to 1 and the all the others tend to 0. Parameters ---------- distribution A dictionary with values equal to positive floats. tau A parameter between 1 and 0 defining the reciprocal exponent or 'temperature' of the extremised distribution. Returns ------- dict: A probability distribution whose keys are equal to those of the input distribution and values are proportional to extremised (exponentiated to 1 / tau) values of the input distribution. """ assert tau > 0 assert min(distribution.values()) >= 0 max_value = max(distribution.values()) rescaled_distribution = {k: v / max_value for k, v in distribution.items()} total = sum(v ** (1 / tau) for v in rescaled_distribution.values()) extremised_distribution = {k: (v ** (1 / tau) / total) for k, v in rescaled_distribution.items()} return extremised_distribution
1f2ae7e2a1ebda15f2f37e52afe4d004f2994292
42,455
from typing import Optional import calendar def parse_month(name: str) -> Optional[int]: """Return the month that unambiguously matches name partially or fully. Return None if more than one month matches. Month names that are matched against are localized according to the currently active system locale. """ comparable_name = name.lower() months = [ n for n, m in enumerate(calendar.month_name) if n > 0 and m.lower().startswith(comparable_name) ] # ambiguous if more than one match; return None return None if len(months) != 1 else months[0]
49ea4f224c8773daad9ce57626021aea66563fc3
42,459
import math def distance2D(loc2d1=tuple(), loc2d2=tuple()): """ get distance from (x1, y1), (x2, y2) loc2d1 : (x1, y1) loc2d2 : (x2, y2) return : distance """ return math.sqrt((loc2d1[0] - loc2d2[0])**2 + (loc2d1[1] - loc2d2[1])**2)
638631e678ddb16098e1d0ad32241454ce95a9ef
42,460
import time def calc_time(func, *args, **kwargs): """Calculate execution time of specified function.""" start_time = time.time() func(*args, **kwargs) finish_time = time.time() return finish_time - start_time
104b378b66009a9b49ebf8804c27f33a01b33287
42,463
def _nus_uuid(short: int) -> str: """Get a 128-bit UUID from a ``short`` UUID. Args: short: The 16-bit UUID. Returns: The 128-bit UUID as a string. """ return f"6e40{short:04x}-b5a3-f393-e0a9-e50e24dcca9e"
207163a7339808b65c0f3d1b0fcdfde87057eb57
42,464
def address_in_db(cursor, address): """Returns true if address is in database, false otherwise.""" cursor.execute('SELECT address FROM farmers WHERE address=?', (str(address),)) data = cursor.fetchone() if data is None: return False return True
1e93eaf2874fdf9fca8c4db73b981e41a00c5538
42,468
def convert_units(cube, units): """ Convert the units of a cube to new ones. This converts units of a cube. Arguments --------- cube: iris.cube.Cube input cube units: str new units in udunits form Returns ------- iris.cube.Cube converted cube. """ cube.convert_units(units) return cube
a0562d75d96b97caaab125b9999856030c41975c
42,469
def all_equal(arg1, arg2): """ Shortcut function to compute element-wise equality between two iterables Parameters ---------- arg1 : iterable Any iterable sequence arg2 : iterable Any iterable sequence that has the same length as arg1 Returns ------- bool True if each pair of elements are equal. Otherwise, False """ return all([a == b for a, b in zip(arg1, arg2)])
1920e0e28c7ca632438c3adde2c22f5b21f7e54e
42,474
def get_adjacent_face ( surface, edge_data, idx ): """ get_adjacent_face ( surface, edge_data, idx ) Get the number (index) of the face that includes the edge that is the reverse direction of the passed (hashed) edge @param surface - working surface structure (dictionary) @param edge_data - pre generated object edge data @param idx - index of the edge to process (in surface [ 'edgehash' ]) @return face number to add to the surface """ # get the existing stored edge hash from the surface edge_hash = surface [ 'edgehash'][ idx ] # get the edge end point indexes back from the (searchable) hash # create a new hash for the reverse direction edge reverse_edge = [ edge_hash & 0xffffffff, edge_hash >> 32 ] reverse_hash = reverse_edge [ 0 ] << 32 | reverse_edge [ 1 ] if reverse_hash in surface [ 'edgehash' ]: return None # Face already on the surface: do not add again # return the adjacent face index return int ( edge_data [ 'byEdge' ].index ( reverse_hash ) / 3 )
5d417afb1668b1eeb82ebbc9c2cd612e6559889a
42,477
def get_cluster_idx(_cluster): """ returns cluster idx for sorting """ return _cluster.cluster_idx
b58c5fd2fe51231cb900428f068868b0ed0218b1
42,479
def bool_mapper(attribute): """ Maps ``yes``, ``1`` and ``on`` to ``True`` and ``no``, ``0`` and ``off`` to ``False``. """ def _fn(values): if values["bool"].lower() in ("yes", "1", "on", "true"): return {attribute: True} elif values["bool"].lower() in ("no", "0", "off", "false"): return {attribute: False} return {} return _fn
549260ae13c65b452b91551865f37626dda5a423
42,481
def get_indeed_url(position, location): """ Gives the indeed url of a search query with a job position and location given as parameters Parameters : Job Position Job Location Return : Appropriate Indeed URL """ pattern = 'https://fr.indeed.com/jobs?q={}&l={}' url = pattern.format(position, location) return url
7b31bf4ca2a89ed25d43add9028c23b505419da2
42,487
def estimate_bias_randomized_response_bool(prior, p): """estimates the bias of randomized response when the probability of returning the true answer is `p`, and the likelihood that each answer is given is held in `priors`. For example, say you have a prior that your survey question will be answered "yes" 90% of the time. You run randomized response with p = 0.5. Then on average, the randomized responses will exhibit a bias of -0.2. As in, randomized responses will be False 2% more often than in the real data. :returns the bias of the randomized response""" assert 0 <= prior <= 1 assert 0 <= p <= 1 expectation = p * prior + (1 - p) / 2 return expectation - prior
e5d1690c0c4cb9888d304794925fdd84287adeff
42,503
import math def euclidean_distance(point1, point2): """Return the euclidean distance between two points.""" return math.sqrt((point2.x - point1.x)**2 + (point2.y - point1.y)**2 + (point2.z - point1.z)**2)
5ab2d751e24313b3ebe969b7c897164ffac425f9
42,507
def select_subscription(subs_code, subscriptions): """ Return the uwnetid.subscription object with the subs_code. """ if subs_code and subscriptions: for subs in subscriptions: if (subs.subscription_code == subs_code): return subs return None
95490620b6841e7bf718023b6484096ff112d27b
42,508
def get_object_to_write(result): """ Returns the constructed object containing the search results data, for later analysis. Args: result: The output from running the query Returns: object_to_write: the constructed object containing desired search result data """ result_items = [] urls = [] if "items" in result["response"].keys(): se_items = result["response"]["items"] for item in se_items: title = item["title"] link = item["link"] result_items.append({ "title": title, "link": link }) urls.append(link) request_data = [] requests = result["response"]["queries"]["request"] for req in requests: request_data.append({ "request_cx": req["cx"], "request_count": req["count"], "total_results": req["totalResults"], "start_index": req["startIndex"], "search_terms": req["searchTerms"] }) object_to_write = { "segment_id": result["segment_id"], "query_string": result["query"], "api_info": { "api_key": result["api_key"], "search_engine_id": result["search_engine_id"] }, "number_of_results_specified": result["number_of_results"], "response_info": { "search_info_total_results": result["response"]["searchInformation"]["totalResults"], "search_time": result["response"]["searchInformation"]["searchTime"], "url_template": result["response"]["url"]["template"], "requests": request_data }, "results": result_items, "links": urls } return object_to_write
9b3bba46d8eadd8f3df4c21e622aa8b821bf1781
42,509
import torch def cam_project(points, K): """ :param points: torch.Tensor of shape [b, n, 3] :param K: torch.Tensor intrinsics matrix of shape [b, 3, 3] :return: torch.Tensor points projected to 2d using K, shape: [b, n, 2] """ b = points.shape[0] n = points.shape[1] points_K = torch.matmul( K.reshape(b, 1, 3, 3).repeat(1, n, 1, 1), points.reshape(b, n, 3, 1) ) # shape: [b, n, 3, 1] points_2d = points_K[:, :, :2, 0] / points_K[:, :, [2], 0] # shape: [b, n, 2] return points_2d
3fc4af68e156f4bc46fbe106d54f7e577a457cf6
42,510
import re def check_string(text, search=re.compile(r'[^A-Za-z0-9-_]').search): """Test that a string doesnt contain unwanted characters. :param text: Text that you want to verify is compliant. :type text: str :param search: Regex to use to check the string. Defaults to allowing [^a-z0-9-_]. :return: bool """ return not bool(search(text))
59348de4e86bc762cc8a7aef2243e1d2b2ce9f85
42,511
def get_bool_param(param): """Return bool param value.""" if isinstance(param, bool): return param return True if param.strip().lower() == 'true' else False
ce43ae34e8676d1b9412738adc6647f9af1713c6
42,514
def modify_idx(*args, idx, dim): """ Make an index that slices a specified dimension while keeping the slices for other dimensions the same. Parameters ---------- *args : tuple of int or None constructor arguments for the slice object at target axis idx : tuple of slice tuple of slices in the original region of interest dim : int target axis Returns ------- new_idx : tuple of slice New tuple of slices with dimension dim substituted by slice(*args) Can be used to index np.ndarray and torch.Tensor """ new_idx = list(idx) new_idx[dim] = slice(*args) return tuple(new_idx)
09d0553b6281d1c7e5103b14dfde78003f92a554
42,523
def is_wanted_header(header): """Return True if the given HTTP header key is wanted. """ key, value = header return key.lower() not in ('x-content-type-warning', 'x-powered-by')
5b3dddbbfc7279d9d750f88eba2c07b9c87a0e29
42,525
from pathlib import Path def _get_all_migrations_from_folder(migration_folder): """Simply checks folder for files which ends with up.sql :param migration_folder: Path to migration folder :type migration_folder: string :returns: Path object globbed for a string *.up.sql :rtype: pathlib.Path """ return Path(migration_folder).glob("*.up.sql")
983c736cd2cf1ec2587ec67f33018c3368740c70
42,526
def matrix2vec(m, axis='x'): """Calculate axis vector from rotation matrix Parameters ---------- m : numpy.ndarray rotation matrix axis : str, optional axis x, y, z, by default 'x' Returns ------- vec : numpy.ndarray Raises ------ ValueError axis shoule be x, y, z """ if axis == 'x': vec = m[:, 0] elif axis == 'y': vec = m[:, 1] elif axis == 'z': vec = m[:, 2] else: raise ValueError("Valid axis are 'x', 'y', 'z'") return vec
9c6cda577f35158a8756e866a1db9a4d5f851ab4
42,527
def split_text_by_maxlen(text, maxlen=512): """ 长句切分为短句,每个短句maxlen个字 :param text: str :param maxlen: int, 最大长度 :return: list, (sentence, idx) """ result = [] for i in range(0, len(text), maxlen): result.append((text[i:i + maxlen], i)) return result
6ed3bb269d7c7628e5743f824aaa5a85057ecc1e
42,528
import shlex def split_cli_command_and_shell_commands(command_text): """Split the command text into cli commands and pipes e.g.:: cat ID | grep text ``cat ID`` is a CLI command, which will be matched against the CLI grammar tree ``grep text`` is treated as a SHELL command Returns a tuple ``(a, b)`` where ``a`` is the CLI command, and ``b`` is a list of shell commands """ tokens = shlex.split(command_text) parts = [] stack = [] for token in tokens: if token != '|': stack.append(token) else: parts.append(stack) stack = [] if stack: parts.append(stack) if len(parts) == 1: return ' '.join(parts[0]), [] return ' '.join(parts[0]), parts[1:]
d7219805e8064ce681eb24487e5bbc8ff2236af4
42,531
def make_category_query(category): """Creates a search query for the target audio category""" # mediatype:(audio) subject:"radio" return f"mediatype:(audio) subject:{category}"
f30b563bde7323483a7be670d2b41c475622ba20
42,532
def _exclusions(table_name, ignore_columns): """Generate a list of columns to exclude from serialisation for a given table name Parameters ---------- table_name : str The name of a data table within the app ignore_columns : list, tuple, dict or str A list or tuple of column names to ignore, a dict mapping table names to such lists or tuples, or a string with a single column name Returns ------- list of column names """ if isinstance(ignore_columns, (list, tuple)): return ignore_columns elif isinstance(ignore_columns, dict): return ignore_columns[table_name] elif isinstance(ignore_columns, str): return [ignore_columns] else: return []
5590783c98e3e24317705751965ee48499611064
42,539
def get_multiple_model_method(model): """ It returns the name of the Multiple Model Chain element of the model. Parameters ---------- model : A Scikit-learn model instance Returns ------- The multiple model method for a mining model. """ if model.__class__.__name__ == 'GradientBoostingClassifier': return 'modelChain' elif model.__class__.__name__ == 'GradientBoostingRegressor': return 'sum' elif model.__class__.__name__ == 'RandomForestClassifier': return 'majorityVote' elif model.__class__.__name__ in ['RandomForestRegressor','IsolationForest']: return 'average'
a87525ab9eedfb46443319f771b2275756b7188e
42,542
from datetime import datetime def parse_date_string(date_string): """ Converts the date strings created by the API (e.g. '2012-04-06T19:11:33.032') and returns an equivalent datetime instance. """ return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
eefa2ba1cec70c2a96c2deb1ece85802360e47f1
42,544
def normalize_title(title): """Normalize titles. """ return title.strip().title()
86a06fa50033e565d34b4c8b93bc67ad048fdb72
42,548
def make_filter_params(db447x, n_downsample: int = 1, n_0: int = 4096, n_poles: int = 1, f_cutoff: int = 10000, n_avg: int = 1, factor: int = 1): """ Function to turn filter parameters into a dictionary that is later read by the parse_filter function :param db447x: dataframe of ni447x filter data :param n_downsample: downsampling factor :param n_0: :param n_poles: :param f_cutoff: :param n_avg: :param factor: :return: """ return { "n_downsample": n_downsample, "n_0": n_0, "n_poles": n_poles, "f_cutoff": f_cutoff, "n_avg": n_avg, "factor": factor, "db447x": db447x }
ef59ee3c427387947702532f3827e779b3f2cb91
42,549
def last(path): """Returns a last "part" of a path. Examples: last('abc/def/ghi') => 'ghi' last('abc') => 'abc' last('') => '' """ if '/' not in path: return path return path.split('/')[-1]
af0071fa80ed67c2e427daf9f4202782aeb71997
42,559
import pathlib import zipfile def source_input_directory(user_filepath): """Examine program arguments to determine the location of an archive. Extract as needed. """ if user_filepath is None: return "export" path = pathlib.Path(user_filepath) if not path.exists(): raise RuntimeError("Specified path {} does not exist".format(user_filepath)) if path.is_dir(): return user_filepath if path.is_file(): if user_filepath.endswith(".zip"): extracted_filepath = user_filepath.replace(".zip", "") with zipfile.ZipFile(user_filepath, 'r') as zip_file: zip_file.extractall(extracted_filepath) return extracted_filepath else: raise RuntimeError("Specified path {} is a file, but not an archive".format(user_filepath))
730319b194cebff9ed15e1ffc90e0991f5531f04
42,563
def fuzzy_sort_coarse(image, pano_threshold): """ A very fuzzy sort by aspect ratio - portrait then square then landscape then pano""" if image.width > image.height*pano_threshold: return 2 elif image.width > image.height: return 1 elif image.width < image.height: return -1 else: return 0
87249a3fa900131b80e0a7cd86d2b51691eee166
42,565
def prettify_error(s): """Adds a blank and replaces regular spaces by non-breaking in the first 90 characters This function adds a big blank space and forces the first words to be a big block of unbreakable words. This enforces a newline in the DSS display and makes the error prettier. """ return '\xa0' * 130 + ' \n' + s[:90].replace(' ', '\xa0') + s[90:]
5424d45c735b860918685b6190c445603cf7d0d9
42,567
def increase(_): """Return offset adjust 1.""" return 1
cf83fd360f07980b1a21d63eff6f405dd5a0ee1c
42,568
import re def onlyalphanum(value): """ Filtre `value` pour ne garder que les caractères alphanumériques. Parameters ---------- value : str La chaîne à filtrer Returns ------- str La chaîne obtenue après filtrage """ return re.sub(r'[^A-Za-z0-9]+', '', value)
ade9b61fbda291a31d00ca2d8cb4975b197a3101
42,569
def create_worker(queue, worker_class): """ Creates a non-stop worker to gather tweets Arguments: ---------- queue: queue.Queue A queue to gather tasks from worker_class: class or function returning an object The returned object must respond to `work(status, query)` Returns: -------- worker: function A function to be used to start a working thread """ tweet_worker = worker_class() def worker(): while True: args = queue.get(block=True) tweet_worker.work(*args) queue.task_done() return worker
269569e4c6016169f59d84862630c588c2a93c17
42,574
def _is_yearbook_transaction(transaction_detail): """Returns True iff the paypal `transaction_detail` object contains an `item_name` of Yearbook. """ cart_info = transaction_detail.get('cart_info') if cart_info: item_details = cart_info.get('item_details') if item_details and len(item_details) == 1: item = item_details[0] item_name = item.get('item_name', '').lower() return item_name and item_name.find('yearbook') != -1 transaction_info = transaction_detail.get('transaction_info') if transaction_info: invoice_id = transaction_info.get('invoice_id', '').lower() return invoice_id and invoice_id.find('yearbook-invoice') != -1 return False
0540efb4d5d2fedc97ac5fe14ea29577d0adb358
42,577
def get_string_after_n_space(text, n): """ Method to return string after the nth space Input --> 'test1 test2 test3', 1 Output --> test2 test3 Input --> 'test1 test2 test3, 2 Output --> test3 :param text :param n: :return: string after nth space """ return text.split(' ', n)[-1].strip()
99abae9dc2f24d1b26999d6e401b7fc56ec584ad
42,579
def get_bits(register, index, length=1): """ Get selected bit(s) from register while masking out the rest. Returns as boolean if length==1 :param register: Register value :type register: int :param index: Start index (from right) :type index: int :param length: Number of bits (default 1) :type length: int :return: Selected bit(s) :rtype: Union[int, bool] """ result = (register >> index) & (2 ** length - 1) if length == 1: return result == 1 return result
a407074b60cb7598341ab335137f595cc9e811e4
42,587
def get_last_chunk(buffer, info): """A sampler function to extract only the last chunk of the memory. """ i_trans = info["num_records"] # Get the last chunk, reshaped (mix the first two dimensions) chunk = {} for key in buffer: chunk[key] = buffer[key][:,i_trans-info["num_steps"]-1:i_trans] return chunk
987f1695191c7c9826e867c8c88aff6aa46f72c0
42,589
def _fix_url(url: str) -> str: """ Add 'https://' to the start of a URL if necessary """ corrected_url = url if url.startswith("http") else "https://" + url return corrected_url
ad26693c9cf2958e31155f6666e5bffad9c13628
42,590
import calendar def first_day(year, month, bday=True): """ Return first day of month. Default to business days """ weekday, days_in_month = calendar.monthrange(year, month) if not bday: return 1 if weekday <= 4: return 1 else: return 7-weekday+1
f68ba78fdfed08c2bd25165932a857a51de353c4
42,591
def put_in_bucket(d, l, value): """ If this helper function is called on every gene in our analysis, it will group all genes with the same z-score into the same list. Each of those lists of same-z-score genes is stored in l. Args: d: Dict, in the context of this file, a dict of genes to z-scores l: List of Lists, represents out buckets. Initiate with an empty list, the continually pass in l to build up your buckets value: String, the current gene we would like to put into a bucket Returns: the list now containing value in its proper bucket within l. """ # dummy list to prevent ['string'] -> ['s','t','r','i','n','g'] dummy = [] # if list is empty, init it if len(l) == 0: dummy.append(value) return dummy else: # else search to see if the value fits into an existing bucket for i in range(len(l)): # along the way, make sure this is a list of lists if type(l[i]) != list: dummy.append(l[i]) l[i] = dummy dummy = [] # aka find a bucket with same z-score as value's if d[l[i][0]] == d[value]: l[i].append(value) return l # if our value (gene) doesn't have a bucket to go in, make a new one at the end of the list dummy.append(value) l.append(dummy) return l
475a6edf846bc56f9b5ecc15a6c742f1a6ed377c
42,595
def existed(fileobj): """ Returns a boolean indicating whether a file opened by openReadWrite existed in the filesystem before it was opened. """ return 'r' in getattr(fileobj, "mode", '')
620e7271c9d6abc7851f0ded16413b02e73b82b6
42,596
def choose_attribute(data, attributes, class_attr, fitness, method): """ Cycles through all the attributes and returns the attribute with the highest information gain (or lowest entropy). """ best = (-1e999999, None) for attr in attributes: if attr == class_attr: continue gain = fitness(data, attr, class_attr, method=method) best = max(best, (gain, attr)) return best[1]
b99a9fe29accc199314ac9f9852f911b9e28eeb5
42,598
def plugins_by_name(pm): """ Organize plugins by plugin name. Returns a dict where the key is the plugin name and the value is a list of all plugins that have that name. """ plugins = {} for plugin in pm.plugins: if plugin.name not in plugins: plugins[plugin.name] = [] plugins[plugin.name].append(plugin) return plugins
f274356e99a10054814a2762d807bf25d54e1cce
42,599
import random def random_start_goal(width=10, start_bounds=None, goal_bounds=None): """Return a random distinct start position in start_bounds and a random goal position in goal_bounds :param width: width of the grid :param start_bounds: a tuple of tuples ((x0, x1), (y0, y1)) :param goal_bounds: a tuple of tuples ((x0, x1), (y0, y1)) :return: random start and goal coordinates""" if start_bounds is None: (start_x_bounds, start_y_bounds) = (0, width), (0, width) else: (start_x_bounds, start_y_bounds) = start_bounds if goal_bounds is None: (goal_x_bounds, goal_y_bounds) = (0, width), (0, width) else: (goal_x_bounds, goal_y_bounds) = goal_bounds start = random.randrange(*start_x_bounds), random.randrange(*start_y_bounds) goal = random.randrange(*goal_x_bounds), random.randrange(*goal_y_bounds) while goal == start: goal = random.randrange(*goal_x_bounds), random.randrange(*goal_y_bounds) return start, goal
1e983e5d2356aa7fd02ccca0240c960ac154e751
42,601
def round_(i: int, size: int) -> int: """ Round `i` to the nearest greater-or-equal-to multiple of `size`. """ if i % size == 0: return i return i + (size - (i % size))
4a7b95d27b86f2021b98d202a5c209738e84e81d
42,602
def find_from_iterable(it) -> set: """ Find all unique conditions in given iterable object. :param it: Iterable object to traverse. :return Set of all unique available conditions. """ cond = set() for i in it: if i not in cond: cond.add(i) return cond
aaa8edb2e8fb6268a35ca8d7fc5b406b9e598722
42,603
from typing import Callable def map_to_user_params(*args_to_params: str) -> Callable[[dict], dict]: """Make a function that reads plugin arguments from user_params. The returned function can usually be directly assigned to the args_from_user_params attribute of plugin classes. :param args_to_params: each item can be <arg_name> or <arg_name>:<param_name> if <arg_name>, the plugin argument will be arg_name=user_params[arg_name] if <arg_name>:<param_name>, the argument will be arg_name=user_params[param_name] :return: function that takes a user_params dict and returns a dict of keyword arguments """ def get_args(user_params: dict) -> dict: args = {} for atp in args_to_params: arg_name, _, param_name = atp.partition(":") value = user_params.get(param_name or arg_name) if value is not None: args[arg_name] = value return args return get_args
0609542e5ba0ce2c61b9734b50aee915155c6764
42,604
import math def categorize(distance: float) -> int: """Distance binning method to be referenced across data analysis files and classifiers. Args: distance (float): The premeasured distance, in meters. Returns: The floor of the given distance amount. """ return math.floor(distance)
0014f2096131f03e6c0619e7d8a3ad9526d44fd1
42,606
def _sort_key(name): """Sorting helper for members of a directory.""" return name.lower().lstrip("_")
ecd81276f49f6a10ea7de2920ff45ddd2df469c3
42,607
import torch def concat_feat_var(src, tgt): """Concatate feature variable.""" if src is None: out = tgt else: out = torch.cat([src, tgt]) return out
144093368371ec70ceefa989b4780fdc5de2efad
42,610
import time def get_elapsed_time_ms(start_time_in_seconds): """ Returns the elapsed time in millis from the given start time. """ end_time = time.time() return int((end_time - start_time_in_seconds) * 1000)
bc71a20fddf62a1cfa4ab51724b826946d171b66
42,622
def get_urls(link, nb): """ Generate a list containing all URLs Args: link [str]: Base HTML link nb [int]: Number of pages usingHTML link Returns: url [str]: [List containing all URLs] """ def gen_season_index(): for i in range(2000, 2020): yield i url = [] gen = gen_season_index() for gsi in gen: for i in range(1, nb+1): j = link + str(gsi) + "-" + str(gsi+1) + "&teamId=" + str(i) url.append(j) return url
743e27067a76c8e2fc4a354740d39ff68e264504
42,625
from typing import Optional import re def email_is_valid(email: Optional[str]) -> bool: """ Validate that a valid email was provided. None is a valid option. Parameters ---------- email: Optional[str] The email to validate. Returns ------- status: bool The validation status. """ if email is None: return True if re.match(r"^[a-zA-Z0-9]+[\.]?[a-zA-Z0-9]+[@]\w+[.]\w{2,3}$", email): return True return False
58efc79668c4856fc74b7fc470405c5e27256c60
42,626
def sanitize_for_shell(string): """ Return `string` with double quotes escaped for use in a Windows shell command. """ return string.replace('"', r'\"')
67135287fc8b1930d43ebd15ef8b113de342fe51
42,628
import struct import base64 def _serialize_linear_biases(linear, nodelist): """Serializes the linear biases. Args: linear: a interable object where linear[v] is the bias associated with v. nodelist (list): an ordered iterable containing the nodes. Returns: str: base 64 encoded string of little endian 8 byte floats, one for each of the biases in linear. Ordered according to nodelist. Examples: >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3]) 'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA' >>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1]) 'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/' """ linear_bytes = struct.pack('<' + 'd' * len(linear), *[linear[i] for i in nodelist]) return base64.b64encode(linear_bytes).decode('utf-8')
bab0881a6adf7387a89885413b12b3e57e515527
42,632
from typing import IO from typing import Tuple from typing import Optional def solve_task(input_io: IO, slope: Tuple[int, int]) -> int: """ Solves task 1: number of trees for slope right 3 down 1. Parameters ---------- input_io: IO Map stream. slope: (slope_right: int, slope_down: int) Slope to use from top to botton. Returns ------- int Number of trees '#' in path from top to bottom. """ def go_down(amount: int) -> Optional[str]: """Try to advance amount lines 'down' in the map stream.""" line = None while (amount > 0) and (line := input_io.readline()): amount -= 1 return line trees = 0 slope_right, slope_down = slope fline = input_io.readline().strip() assert fline[0] == "." columns = len(fline) current_column = 0 while line := go_down(slope_down): line = line.strip() current_column += slope_right current_column %= columns trees += line[current_column] == "#" input_io.seek(0) return trees
85fef22a62c40839a90a329959d3823a9d38da52
42,633
def _pos_round(position): """ Returns the rounded `position`. **Don't require Pygame.** **(Not available in SimpleGUI of CodeSkulptor.)** :param position: (int or float, int or float) or [int or float, int or float] :return: (int, int) """ assert isinstance(position, tuple) or isinstance(position, list), \ type(position) assert len(position) == 2, len(position) assert isinstance(position[0], int) or isinstance(position[0], float), \ type(position[0]) assert isinstance(position[1], int) or isinstance(position[1], float), \ type(position[1]) return (int(round(position[0])), int(round(position[1])))
e0e130e17cb39c1c9ca62db03556f9410c417f67
42,634
def get_sampling_error(mu, xbar): """ Sampling error is defined as the difference between mean of the population and mean of the sample. Parameters ---------- mu: mean of the parameter xbar: mean of the sample Returns ------- The sampling error mu-xbar. """ return mu - xbar
85ea2357547379c5f9535e79c7ed9ce35bfbc73f
42,637
def expected_base_paths(configuration): """API base paths e.g ['/api/indexd', '/api/leo', '/api/sheepdog', '/api/peregrine', '/api/dockstore'].""" return [api['paths']['base'] for api in configuration['apis']]
6bce840d395e0bf45de3fe173e9cb7bc5fa75c85
42,640
def is_valid_ohlc(ohlc_row): """ ohlc format: (open,high,low,close,volume) """ _open, _high, _low, _close, _volume = ohlc_row isin_bunds = lambda v: v >= _low and v <= _high return _high >= _low and isin_bunds(_open) and isin_bunds(_close) and _volume >= 0
edc46ab2f3b0ab5696bdadfbef6f4bfe0d6d0497
42,643
def _sdiff(s1, s2): """return symmetric diff of strings s1 and s2, as a tuple of (letters unique to s1, letters common to both, letters unique to s2. s1 and s2 must be strings with letters in sorted order """ if s1 == s2: return ('', s1, '') i1 = i2 = 0 u1 = u2 = i = "" while i1 < len(s1) and i2 < len(s2): if s1[i1] == s2[i2]: i += s1[i1] i1 += 1 i2 += 1 elif s1[i1] <= s2[i2]: u1 += s1[i1] i1 += 1 elif s2[i2] <= s1[i1]: u2 += s2[i2] i2 += 1 else: assert False, "Shouldn't be here" u1 += s1[i1:] u2 += s2[i2:] return (u1, i, u2)
b1cdcac3cfb32c7445cdbbeac0b6d2d3b1e494a6
42,644
def income2(households): """ Dummy for for income group 2 """ return (households['income_category'] == 'income group 2').astype(int)
cd8cc4fcd0968b2fad261389934bee1cf183f8d0
42,646
def get_face_id(row): """ Extract the face id from the annotations. """ return row[7]
b4a3ed110ca1f8267b0bba51d7ca8c2f3c87dc10
42,649
import operator def sort(word_freq): """ Takes a dictionary of words and their frequencies and returns a list of pairs where the entries are sorted by frequency """ return sorted(word_freq.iteritems(), key=operator.itemgetter(1), reverse=True)
1a5ed4daf6e9502f5875d99dd978e5e9ab20d7ca
42,651
def gather_points(points, index): """Gather xyz of centroids according to indices Args: points: (batch_size, channels, num_points) index: (batch_size, num_centroids) Returns: new_xyz (torch.Tensor): (batch_size, channels, num_centroids) """ batch_size = points.size(0) channels = points.size(1) num_centroids = index.size(1) index_expand = index.unsqueeze(1).expand(batch_size, channels, num_centroids) return points.gather(2, index_expand)
56286d650ac8beaa89d81eb00d82cba579b9e9cb
42,653
def pluralize(text, n, suffix="s"): """Return TEXT pluralized if N != 1.""" if n != 1: return "%s%s" % (text, suffix) else: return text
a0c08408bef982426855dd4b5e0862ac155160e8
42,656