content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def length(x): """Get length of elements""" return x.size
70310b7b09173ece02db4c4aff4c14d53fc22f78
35,210
def error_func(guess, x, data, data_model): """ @param guess : parameter list:: an estimate of the model parameters of the final solution @param x : numpy array of float:: x-values @param data : numpy array of y-values @param data_model : function to be fitted @return: numpy array of float:: differences between the data and the model """ return data - data_model(x, *guess)
7ee32c6ec8738a46936205c7235e789ea7d2bd5f
35,211
def parse_file(filename): """ Parses file for error messages in logs Args: filename Returns: error_count: count of error messages in file error_msgs: list of error messages """ # Initialize return vals error_count = 0 error_msgs = [] with open(filename, 'r') as file: for line in file: # Try to find error message and locate index in string str_to_find = 'error -' str_idx = line.lower().find(str_to_find) # If error is found, extract and increment count if str_idx != -1: error_count += 1 str_start = str_idx + len(str_to_find) + 1 error_msg = line[str_start:].strip() error_msgs.append(error_msg) return error_count, error_msgs
855ad9411646961b3afdec14e7b3547af81fae84
35,214
import pickle def pickle_dump(obj, path): """ Dump a pickle """ with open(path, 'wb') as outfile: return pickle.dump(obj, outfile)
9fb609bcbee03f2294bd4b76bdbc1b0f0ad93875
35,217
def _strip_right(str, suffix): """Returns str without the suffix if it ends with suffix.""" if str.endswith(suffix): return str[0: len(str) - len(suffix)] else: return str
fdf03a237c353cc1579aec89fcc18b69ca2af8da
35,219
def repeat_tensor_for_each_element_in_batch(torch_tensor, n): """ Repeats a certain torch tensor n times for each element in a batch. :param torch_tensor: given torch tensor :param n: number of repeats :return: new tensor, where every row of torch_tensor is repeated n times """ data_shape = torch_tensor.shape[1:] # 3 repeats = [1, n] + [1] * len(data_shape) expanded = torch_tensor.unsqueeze(1).repeat(*repeats) return expanded.view(-1, *data_shape)
1fef4542e953a49c483c81c6094d5677092e5b66
35,223
def dig_deeper(entry, field, res, depth=10): """A helper function for :func:`get_wiktionary_field_strings`. It recursively locates the target field. Args: entry (dict or list): the entity to investigate field (str): the field to look up res (list): the list of found entities to update depth (integer): maximum recursion depth (otherwise this does blow up memory for some entries like "cat") Returns: (list): the updated list of found entities """ if depth > 0: if isinstance(entry, dict): for key, val in entry.items(): if field == key: if entry[key]: # if isinstance(entry[key], str): res.append(entry[key]) return res elif isinstance(val, list): for i in val: depth -= 1 res = dig_deeper(val, field, res, depth) elif isinstance(entry, list): for i in entry: depth -= 1 res = dig_deeper(i, field, res, depth) return res else: return res
6332ed989b2815a7a4e5247e7b9f28f3a88b11be
35,225
def ranges_to_indices(range_string): """Converts a string of ranges to a list of indices""" indices = [] for span in range_string.split('/'): if ':' in span: start_idx, stop_idx = [int(idx) for idx in span.split(':')] stop_idx += 1 # add 1 since end index is excluded in range() indices.extend(list(range(start_idx, stop_idx))) else: indices.append(int(span)) return indices
7f3fe841c9f0c2e309013184ffcb6a35c1dab42b
35,226
def is_error_start(line): """Returns true if line marks a new error.""" return line.startswith("==") and "ERROR" in line
a76fb0b424098c1f60c1b63a1fc36ef08a9fb8f2
35,228
import torch def soft_reward(pred, targ): """ BlackBox adversarial soft reward. Highest reward when `pred` for `targ` class is low. Use this reward to reinforce action gradients. Computed as: 1 - (targ pred). Args: pred: model log prediction vector, to be normalized below targ: true class integer, we want to decrease probability of this class """ # pred = F.softmax(pred, dim=1) pred_prob = torch.exp(pred) gather = pred[:,targ] # gather target predictions ones = torch.ones_like(gather) r = ones - gather r = r.mean() return r
d6a420b49d22d87c2d5eb4662a1ac3d1c3175660
35,235
def check_py_file(files): """ Return a list with only the python scripts (remove all other files). """ py_files = [fich for fich in files if fich[-3:] == '.py'] return py_files
942f4f2560eaeab540be59a88d5e8211efc787a0
35,241
def create_demand_evaluator(data): """Creates callback to get demands at each location.""" _demands = data['demands'] def demand_evaluator(manager, node): """Returns the demand of the current node""" return _demands[manager.IndexToNode(node)] return demand_evaluator
30f18d8ae57eda136c7a0d2114c38a7e5e378f47
35,253
def xywh2xyxy(bbox): """ Coordinate conversion xywh -> xyxy """ bbox_ = bbox.clone() if len(bbox_.size()) == 1: bbox_ = bbox_.unsqueeze(0) xc, yc = bbox_[..., 0], bbox_[..., 1] half_w, half_h = bbox_[..., 2] / 2, bbox_[..., 3] / 2 bbox_[..., 0] = xc - half_w bbox_[..., 1] = yc - half_h bbox_[..., 2] = xc + 2 * half_w bbox_[..., 3] = yc + 2 * half_h return bbox_
6111f1a6c1a35390d853ce72fe21e0a8123bdcde
35,259
def is_backbone(atom, element, minimal=False): """ Whether `atom` is a protein backbone atom or not. Parameters ---------- atom : str The atom name. element : str The element name. minimal : bool If `True` considers only `C` and `N` elements. `False`, considers also `O`. """ e = element.strip() a = atom.strip() elements = { True: ('N', 'C'), False: ('N', 'C', 'O'), } # elements is needed because of atoms in HETATM entries # for example 'CA' is calcium return a in ('N', 'CA', 'C', 'O') and e in elements[minimal]
29def3cf99de683f8dee48f534661b89ae22d2b2
35,260
import math def rotate(x, y, beta): """ Rotate vector(x,y) by beta radians counterclockwise https://matthew-brett.github.io/teaching/rotation_2d.html """ x2 = math.cos(beta)*x - math.sin(beta)*y y2 = math.sin(beta)*x + math.cos(beta)*y return (x2, y2)
c42a52d2225814601a13d80d7a875bd6d1bf6862
35,263
def references(name, tag): """Provides suggested references for the specified data set Parameters ---------- name : str Instrument name tag : str Instrument tag Returns ------- refs : str Suggested Instrument reference(s) """ refs = {'tec': {'vtec': "Rideout and Coster (2006) doi:10.1007/s10291-006-0029-5"}} return refs[name][tag]
9452192d5cac1097c59ff0521fbeb7e62f4ab7e3
35,265
def _make_alias_docstring(new_name, func_or_class): """Make deprecation alias docstring.""" if func_or_class.__doc__: lines = func_or_class.__doc__.split('\n') lines[0] += ' (deprecated)' else: lines = ['DEPRECATED CLASS'] first_line = lines[0] notice_lines = [ ('Warning: THIS CLASS IS DEPRECATED. It will be removed in a future ' 'version.'), 'Please use %s instead.' % new_name ] remaining_lines = [] remaining_lines_string = '\n'.join(lines[1:]).strip() if remaining_lines_string: remaining_lines = remaining_lines_string.split('\n') lines = ([first_line, ''] + notice_lines + (([''] + remaining_lines) if remaining_lines else [])) return '\n'.join(lines)
7f28908e09d690937a55d2bf1cc392857eb2c0ac
35,267
def adj_r2(r2: float, sample_size: int, n_features: int) -> float: """ >>> round(adj_r2(0.8, 100, 5), 3) 0.789 >>> round(adj_r2(0.8, 20, 5), 3) 0.729 """ return 1 - ((sample_size - 1) / (sample_size - n_features - 1)) * (1 - r2)
9496b8263427ee5e879d838422d5c2c152daf33b
35,271
def remove_quotes(string): """ This function is used here to remove quotes from paths used in this script. :param string: Path with quotes. :return: Path without quotes. """ if string.startswith('"'): string = string[1:] if string.endswith('"'): string = string[:-1] return string
acc09bcf6ecfaf5a26332abbb2632be6cb671286
35,272
def split_route(route): """ Split a full route into single nest to nest subroute """ routes = [] i = -1 for node in route: if node.id == 0: # start a new subroute in position i if i != -1: # close the old subroute routes[i].append(route[0]) # start the new subroute routes.append([node]) i += 1 else: # add node to the i-subroute routes[i].append(node) # return all the subroute except for the last one, which is composed only by the nest (node 0) return routes[:-1]
98e8c167dadeff4825fdffee571277e7b8ec5bcb
35,275
def split_variant(variant): """ Splits a multi-variant `HGVS` string into a list of single variants. If a single variant string is provided, it is returned as a singular `list`. Parameters ---------- variant : str A valid single or multi-variant `HGVS` string. Returns ------- list[str] A list of single `HGVS` strings. """ prefix = variant[0] if len(variant.split(";")) > 1: return ["{}.{}".format(prefix, e.strip()) for e in variant[3:-1].split(";")] return [variant]
aeaa13400333b5ee0a02b88f54399ed3ba40cc04
35,277
def stack_follow(deck_size, position, *_): """Get new position after stacking deck.""" return deck_size - position - 1
3d766864097a96c76b646a6a3b982ed879526af3
35,288
import re def CleanText(text): """Cleans provided text by lower casing words, removing punctuation, and normalizing spacing so that there is exactly one space between each word. Args: text: Raw text to be cleaned. Returns: Cleaned version of text. """ pretty_issue = text.lower().strip() quoteless_issue = re.sub('\'', '', pretty_issue) no_punctuation_issue = re.sub('[^\w\s]|_+', ' ', quoteless_issue) one_space_issue = ' '.join(no_punctuation_issue.split()) return one_space_issue
573bab3645a774096959b7957e02cba07539a74d
35,296
def sum_even_fibonacci(n): """sum of the even-valued terms of the fibonacci sequence not exceeding n""" result = 0 if n >= 2: x, y = 1, 1 for _ in range(n): x, y = y, x + y if x > n: break if x % 2 == 0: # print(x, y) result += x return result
0cfc53863115aa462f6d0558cc3a5018507c737f
35,298
def bin_frequencies(df, how='max', bin_size=5, n_bins=None): """ bins spectral data frequencies to the specified bin size or number of bins :param df: dataframe of spectral data from single sensor :param how: how to aggregate the intensities for each bins. Any numpy aggregate function, default is max :param bin_size: size of frequency bins, default is 5. Overriden by n_bins if specified :param n_bins: number of bins of equal size to return. Overrides bin_size. Default is None :return: dataframe with same number or rows but reduced number of columns """ df = df.T.reset_index() df['index'] = df['index'].astype('float') if n_bins: f_min = df['index'].min() f_max = df['index'].max() bin_size = (f_max - f_min) // n_bins df['freq_bin'] = (df['index'] // bin_size) * bin_size else: df['freq_bin'] = (df['index'] // bin_size) * bin_size df = df.groupby('freq_bin').agg(how).drop('index', axis=1).T return df
0929e1539d43a379c4ff1a3e348056dfa2e333eb
35,302
import torch def squash(inputs, axis=-1): """ The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0 :param inputs: vectors to be squashed :param axis: the axis to squash :return: a Tensor with same size as inputs """ norm = torch.norm(inputs, p=2, dim=axis, keepdim=True) scale = norm**2 / (1 + norm**2) / (norm + 1e-8) return scale * inputs
ff639bec8d0c9acbc1daf6f7f7918c517cc6642f
35,303
def convert_sec_to_time(duration_in_sec: float): """converts time in seconds to HH:MM:SS Args: duration_in_sec (float): duration in seconds Returns: (str): the time in the format: HH:MM:SS """ hours = int(duration_in_sec/3600) remainder = duration_in_sec%3600 minutes = int(remainder/60) seconds = int(duration_in_sec%60) return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
fcc96b55843555425048e36274656f19b2a879df
35,306
import json import requests def start_stream(project, location, token, s_config): """ This function will start the stream in Google Cloud DataStream :param project: Google Cloud project id mentioned in variables.py :param location: Google Cloud resource location, for example us-central1 :param token: Google Cloud auth token :param s_config: stream config from variables.py :return: True or False """ stream_id = s_config["stream_id"] name = s_config["stream_name"] url = f"https://datastream.googleapis.com/v1/" \ f"projects/{project}/locations/{location}/streams/{stream_id}?" \ "updateMask=state" payload = json.dumps({ "state": "RUNNING" }) headers = { 'Authorization': token, 'Content-Type': 'application/json' } response = requests.request("PATCH", url, headers=headers, data=payload) if response.status_code == 200: print(f"Stream {name} started successfully") start_stream_stat = True else: print(f"Issue while starting stream: {response.text}") start_stream_stat = False return start_stream_stat
d9b2e74582f0ae2ae66219e5b121801116f70078
35,307
import re def strip_json(string_value): """Strip a string containing a JSON document and remove all redundant white-space symbols. :param string_value: String containing a JSON document :type string_value: str :return: String containing a JSON document without redundant white-space symbols :rtype: str """ result = string_value.replace("\n", "") result = re.sub(r"{\s+", "{", result) result = re.sub(r"\s+}", "}", result) result = re.sub(r",\s+", ", ", result) return result
c8f6b37d5ca72dcfcfb70dbe9b156727752bdf0e
35,310
import builtins def _is_valid_padding(kernel_sdims, strides, padding): """Returns True if `padding` corresponds to "VALID" padding for a transposed convolution.""" # This is simply the padding == 'VALID' part of lax._conv_transpose_padding. for (begin, end), k, s in zip(padding, kernel_sdims, strides): pad_len = k + s - 2 + builtins.max(k - s, 0) pad_a = k - 1 pad_b = pad_len - pad_a if begin != pad_a or end != pad_b: return False return True
8e3351110a8b7b06eff6432e0cbb705d172ccd88
35,313
def normalize_application_tags(app_original, app_updated): """ Simple function to normalize application tags when application is created or updated. It aims to ensure that required tags are always well defined. :param app_original string: The ghost "app" object before modification. :param app_updated string: The ghost "app" object with the new modifications. :return list A list of dict. Each dict define a tag Test with only the default tag Name >>> from copy import deepcopy >>> from pprint import pprint >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[]}} >>> app_updated = deepcopy(app_original) >>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name'])) [{'tag_name': 'Name', 'tag_value': 'ec2.prod.webfront.app1'}] Test with a custom Tag Name >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[]}} >>> app_updated = deepcopy(app_original) >>> app_updated['environment_infos']['instance_tags'] = [{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}] >>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name'])) [{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}] Test with a custom Tag Name build with variables >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}]}} >>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name'])) [{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}] Test with a custom tag >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[]}} >>> app_updated = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'billing', 'tag_value': 'account1'}]}} >>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name'])) [{'tag_name': 'Name', 'tag_value': 'ec2.prod.webfront.app1'}, {'tag_name': 'billing', 'tag_value': 'account1'}] Test with a custom tag updated >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'billing', 'tag_value': 'account1'}]}} >>> app_updated = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'billing', 'tag_value': 'account2'}]}} >>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name'])) [{'tag_name': 'Name', 'tag_value': 'ec2.prod.webfront.app1'}, {'tag_name': 'billing', 'tag_value': 'account2'}] """ app_tags = [] reserved_ghost_tags = ['app', 'app_id', 'env', 'role', 'color'] default_tag_name_value = "ec2.{env}.{role}.{app}".format(env=app_original['env'], role=app_original['role'], app=app_original['name']) custom_tags = (app_updated['environment_infos']['instance_tags'] if 'instance_tags' in app_updated['environment_infos'] else []) if 'Name' not in [i['tag_name'] for i in custom_tags]: app_tags.append({'tag_name': 'Name', 'tag_value': default_tag_name_value}) for tag in custom_tags: if tag['tag_name'] not in reserved_ghost_tags: app_tags.append({'tag_name': tag['tag_name'], 'tag_value': tag['tag_value']}) return app_tags
0a3e69f38cc0d5dfbcbec07dc224ecfba4cc02b6
35,317
def grelha_nr_linhas(g): """ grelha_nr_linhas: grelha --> inteiro positivo grelha_nr_linhas(g) devolve o numero de linhas da grelha g. """ return len(g)
ac1b0b730b5bd139dc238d80283a5dfcc7a2dd57
35,318
def counter2val(counter): """Extract current value of an `itertools.count()` w/o incrementing it.""" return counter.__reduce__()[1][0]
1c3f2929422974a2a0e38763c9fa20251804bf3d
35,320
def replace_recursive(text, to_match, repl=''): """ Works the same as str.replace, but recurses until no matches can be found. Thus, ``replace_recursive('hello_wooorld', 'oo', '')`` would replace ``wooorld`` with ``woorld`` on the first pass, and ``woorld`` with ``world`` on the second. Note that ``str.replace`` only performs one pass, so ``'wooorld'.replace('oo', 'o')`` would return ``woorld``. :param text: the text to operate on. :param to_match: the text to match. :param repl: what to replace any matches with. :return: text, guaranteed to not contain ANY instances of ``to_match``. """ while to_match in text: text = text.replace(to_match, repl) return text
fcf58b9b5f54b0bc1e152b4a4686012ff6cc7506
35,322
def get_sim_time_span(n_interval, step_size): """Calculate the time span of the simulation. :param n_interval: number of intervals :type n_interval: integer :step_size: length of one time step in minutes :type step_size: number :return: time delta in minutes :rtype: number """ return n_interval * step_size
3cb0b573ad504593479a853ddd04465e3abd11ea
35,328
def f2f1(f1, f2, *a, **k): """ Apply the second function after the first. Call `f2` on the return value of `f1`. Args and kwargs apply to `f1`. Example ------- >>> f2f1(str, int, 2) 2 """ return f2(f1(*a, **k))
f19730807cfdd74c1b2b895ff1a034597988907a
35,330
def getBounds(lvls_arr: list, n_lvl: float): """ A helper function to calculate the BN interpolation @param lvls_arr: The corruption levels list @param n_lvl: The current level to interpolate @return: Returns the post and previous corruption levels """ lower = lvls_arr[0] upper = lvls_arr[1] for i, v in enumerate(lvls_arr[:-1]): if n_lvl <= v: break lower = v upper = lvls_arr[i + 1] return lower, upper
a478437834a5774e572015f51a15499fde516a25
35,331
def income_tax(wage): """ :param wage: 到手的月收入 个税速算:全月应纳税所得额(Taxable Income) × 适用税率(Tax Rate) - 速算扣除数(Quick Deduction) # Ti Tr Qd ----------------------- 1 ~1500 3% 0 2 1500~4500 10% 105 3 4500~9000 20% 555 4 9000~35000 25% 1005 5 35000~55000 30% 2755 6 55000~80000 35% 5505 7 80000~ 45% 13505 """ quick_deductions = ( (0.00000, 0.03, 0.00000), # 1 (1500.00, 0.10, 105.000), # 2 (4500.00, 0.20, 555.000), # 3 (9000.00, 0.25, 1005.00), # 4 (35000.0, 0.30, 2755.00), # 5 (55000.0, 0.35, 5505.00), # 6 (80000.0, 0.45, 13505.0), # 7 ) threshold = 3500 # 起征点 taxable_income = wage - threshold # 应缴税工资 if taxable_income <= 0: return 0 level = 6 for index, i in enumerate(quick_deductions): if taxable_income < i[0]: level = index - 1 break return taxable_income * quick_deductions[level][1] - quick_deductions[level][2]
ed18d667bcd0f75a840b62a231f76e227a70a4ba
35,335
def correlate(x, y): """Pearson's correlation """ # Assume len(x) == len(y) n = len(x) sum_x = float(sum(x)) sum_y = float(sum(y)) sum_x_sq = sum(xi*xi for xi in x) sum_y_sq = sum(yi*yi for yi in y) psum = sum(xi*yi for xi, yi in zip(x, y)) num = psum - (sum_x * sum_y/n) den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5) if den == 0: return 0 return num / den
12d8aeb4698b43b0a51d7131423e273a8c3d8d8d
35,336
import re def list_from_file (file_name, separator = '\\s+', convert_to = int): """Returns a 2-D list which contains the content of a file, with lines corresponding to sublists and elements being converted with function convert_to. separator is used (as a regexp) as a separator for each element.""" array = [] with open (file_name) as data_file: for line in data_file: line = line.strip () tokens = re.split (separator, line) tokens = [convert_to (token) for token in tokens] array.append (tokens) return array
37ada18a3c6e4bfede93d65db0cef359b520767a
35,341
import re def is_method_name(text): """ >>> is_method_name('hello') False >>> is_method_name('hello()') True >>> is_method_name('Foo::Bar') False >>> is_method_name('Foo::Bar#baz') True >>> is_method_name('Foo::Bar#baz()') True >>> is_method_name('user/repo#14') False """ return bool(re.match(r''' (?:\w+(?:[.]|::))* # Zero or more C++/Ruby namespaces \w+ (?: [(][)] # A standard function | [#]\w+(?:[(][)])? # A Ruby Method ) ''', text, re.VERBOSE))
f495caaf05419c369cc81d9debf576152c153f7a
35,343
def create_answer_mapping(annotations, ans2cat): """Returns mapping from question_id to answer. Only returns those mappings that map to one of the answers in ans2cat. Args: annotations: VQA annotations file. ans2cat: Map from answers to answer categories that we care about. Returns: answers: Mapping from question ids to answers. image_ids: Set of image ids. """ answers = {} image_ids = set() for q in annotations['annotations']: question_id = q['question_id'] answer = q['multiple_choice_answer'] if answer in ans2cat: answers[question_id] = answer image_ids.add(q['image_id']) return answers, image_ids
a0ca05f057f635084c8407326b0e843a8fac6fc6
35,346
def clean_join(separator, iterable): """ Filters out iterable to only join non empty items. """ return separator.join(filter(None, iterable))
f52f2a6be0f1ebdd6feb9346ccc89918c833485c
35,347
def bed_get_chromosome_ids(bed_file): """ Read in .bed file, return chromosome IDs (column 1 IDs). Return dic with chromosome ID -> count mapping. >>> test_file = "test_data/test6.bed" >>> bed_get_chromosome_ids(test_file) {'chr1': 2, 'chr2': 2, 'chr3': 1} """ ids_dic = {} with open(bed_file) as f: for line in f: row = line.strip() cols = line.strip().split("\t") chr_id = cols[0] if chr_id in ids_dic: ids_dic[chr_id] += 1 else: ids_dic[chr_id] = 1 f.closed assert ids_dic, "No chromosome IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (bed_file) return ids_dic
77400731e30e8a0313c77aaaf726f5d35216a676
35,351
def get_intersections(path1, path2): """returns a list of the intersection points between the two paths Args: path1: one path (list of tuples with consecutive integer x, y coords) path2: second path (see above) Returns: a list of all overlapping tuples from the two paths """ intersects = [] for pt in path1: if pt in path2 and pt != (0,0): intersects.append(pt) return intersects
91add11e62f898beb3faefa3b73b6bee53f2948d
35,354
from typing import Tuple def mean_grad(x: Tuple[float, float]) -> Tuple[float, float]: """A manually calculated gradient of the mean with respect to the inputs.""" return 6 * x[0], 3 * x[1] ** 2
459f9b1c0500080acd6097b905a4825c2dd4b80f
35,362
import re def process_arguments(raw_arguments: list): """ Process the arguments from CLI. The value of sys.argv """ arguments = {} for argument in raw_arguments: # catch the arguments with associated values matches = re.search(r"^([a-z][a-z0-9-]+?)=['\"]?(.+?)['\"]?$", argument) if matches: arg = matches.group(1).lower() value = matches.group(2) arguments.update({ arg: value }) continue # catch the simple arguments matches = re.search(r"^(-[a-z][a-z0-9-]*?)$", argument) if matches: arg = matches.group(1) arguments.update({ arg: True }) return arguments
2a8249af500c25b869ec68f7402a0e9130656a91
35,366
def combineImagePaths(centerImagePath, leftImagePath, rightImagePath, centerMeasurement, leftMeasurement, rightMeasurement): """ combines cnter/left/right images and measurements to one list """ # combine measurements measurements = [] measurements.extend(centerMeasurement) measurements.extend(leftMeasurement) measurements.extend(rightMeasurement) # combine image paths imagePaths = [] imagePaths.extend(centerImagePath) imagePaths.extend(leftImagePath) imagePaths.extend(rightImagePath) return imagePaths, measurements
3c8e1dbe16bc25d9efcd029c0c28f194f275e5f8
35,367
import shutil def check_valid_shell_command(cmd): """ Determine if a shell command returns a 0 error code. Args: cmd (string or list): Shell command. String of one command or list with arguments. Returns: bool """ if isinstance(cmd, list): return shutil.which(cmd[0]) else: return shutil.which(cmd)
97d2ac7a24d15217481454fdd0a2c2b7ef11fa70
35,373
def _file_read(filename): """Read file and return text""" # Open and read file to get text. with open(filename, 'r') as f: text = f.read() return text
a5422d174c964f2fe5fd4a1847d4fd1b95431749
35,381
def split_words(text : str) -> list: """ Breaks up a command input such as 'hello foo bar' into individual words""" command_text = text.strip() commands = command_text.split(' ') return commands
1bc27d5ec5f4c805eb22b84d9b916d1dca0f9312
35,385
def response(hey_bob): """ Bob is a lackadaisical teenager. In conversation, his responses are very limited. Bob answers 'Sure.' if you ask him a question, such as "How are you?". He answers 'Whoa, chill out!' if you YELL AT HIM (in all capitals). He answers 'Calm down, I know what I'm doing!' if you yell a question at him. He says 'Fine. Be that way!' if you address him without actually saying anything. He answers 'Whatever.' to anything else. Bob's conversational partner is a purist when it comes to written communication and always follows normal rules regarding sentence punctuation in English. :param hey_bob: :return: """ if hey_bob is None or hey_bob.strip() == '': # He says 'Fine. Be that way!' if you address # him without actually saying anything. return 'Fine. Be that way!' if hey_bob.isupper(): # He answers 'Calm down, I know what I'm doing!' # if you yell a question at him. if '?' in hey_bob: return 'Calm down, I know what I\'m doing!' # He answers 'Whoa, chill out!' if you # YELL AT HIM (in all capitals). return 'Whoa, chill out!' if '?' == hey_bob.strip()[-1]: # Bob answers 'Sure.' if you ask him a question, # such as "How are you?". return 'Sure.' # He answers 'Whatever.' to anything else. return 'Whatever.'
5e1e58f9a2bd44961a6b569cd52085958b0b7422
35,386
def rgb_to_hsv(image): """ Wrapper function to convert a landsat 4,5 or 5 image from RGB to HSV. :param image: Landsat Image to convert to the HSV color space :return: Image containing three bands, representing the hue, saturation and value. """ image_hsv = image.select(['B3', 'B2', 'B1']).multiply(0.0001).rgbToHsv() return image_hsv.copyProperties(image).set('system:time_start', image.get('system:time_start'))
788551ff5af5237779df41a52f138d75f2a8e3b2
35,387
def GetUnixErrorOutput(filename, error, new_error=False): """Get a output line for an error in UNIX format.""" line = '' if error.token: line = '%d' % error.token.line_number error_code = '%04d' % error.code if new_error: error_code = 'New Error ' + error_code return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
5689b28ea0475f08802c72a48b733e4625e6d9d5
35,392
def left_child_index(i): """ :param i: int Index of node in array (that is organized as heap) :return: int Position in array of left child of node """ return 2 * (i + 1) - 1
323106fbc8aa1a12144bde5c6e43cd6870c6b1da
35,394
def add(bowl_a, bowl_b): """Return bowl_a and bowl_b added together""" return bowl_a + bowl_b
e12bb4d5f4d21f4ae113f064d62d0db2ea6f8014
35,396
from typing import Tuple from typing import List def _get_value_name_and_type_from_line(*, line: str) -> Tuple[str, str]: """ Get a parameter or return value and type from a specified line. Parameters ---------- line : str Target docstring line. Returns ------- value_name : str Target parameter or return value name. type_name : str Target parameter or return value type name. """ if ':' not in line: return '', '' splitted: List[str] = line.split(':', maxsplit=1) value_name: str = splitted[0].strip() type_name: str = splitted[1].strip() return value_name, type_name
d2095fa2bc34a7086f60b40373a351f7c984dc96
35,399
def cut_tag_preserve(tags, tag): """ Cuts a tag from a list of tags without altering the original. """ tag_list = tags[:] tag_list.remove(tag) return ",".join(tag_list)
691412fc466498413c32edd30eaca2d677d7e35a
35,405
def is_related(field): """ Test if a given field is a related field. :param DjangoField field: A reference to the given field. :rtype: boolean :returns: A boolean value that is true only if the given field is related. """ return 'django.db.models.fields.related' in field.__module__
fff8bbc5f945e7f0ee576e4b501b2c9ca808541d
35,409
import torch def attention_score(att, mel_lens, r=1): """ Returns a tuple of scores (loc_score, sharp_score), where loc_score measures monotonicity and sharp_score measures the sharpness of attention peaks """ with torch.no_grad(): device = att.device mel_lens = mel_lens.to(device) b, t_max, c_max = att.size() # create mel padding mask mel_range = torch.arange(0, t_max, device=device) mel_lens = mel_lens // r mask = (mel_range[None, :] < mel_lens[:, None]).float() # score for how adjacent the attention loc is max_loc = torch.argmax(att, dim=2) max_loc_diff = torch.abs(max_loc[:, 1:] - max_loc[:, :-1]) loc_score = (max_loc_diff >= 0) * (max_loc_diff <= r) loc_score = torch.sum(loc_score * mask[:, 1:], dim=1) loc_score = loc_score / (mel_lens - 1) # score for attention sharpness sharp_score, inds = att.max(dim=2) sharp_score = torch.mean(sharp_score * mask, dim=1) return loc_score, sharp_score
ccdce864a91c9816143f414c2cde99b5f67c89c4
35,411
def sort_lists(reference, x): """ Sorts elements of lists `x` by sorting `reference` Returns sorted zip of lists """ # sort zip of lists # specify key for sorting the ith element of `reference` and `x` as the first element of the tuple of the sorted zip object, e.g. pair[0] = (reference[i], x[i])[0] if isinstance(x[0], list): s = sorted(zip(reference, *x), key=lambda pair: pair[0]) else: s = sorted(zip(reference, x), key=lambda pair: pair[0]) return zip(*s)
581e01f21902a029570dc45e5c6401696b80ee15
35,412
def pos_of(values, method): """Find the position of a value that is calculated by the given method. """ result = method(values) pos = [i for i, j in enumerate(values) if j == result] if len(pos) > 1: print('Warning: The %s of the list is not distinct.' % (method,)) return pos[0]
5300511a1dd8f76de51f58021cf011a4aa5ca757
35,416
def crop_frame(img, width, height, x, y): """ Returns a crop of image (frame) based on specified parameters Parameters ---------- img : array, required Array representing an image (frame) width : int, required Width of the crop height : int, required Height of the crop x : int, required X position of the crop, by default None y : int, required Y position of the crop, by default None Returns ------- array Cropped image (frame) based on input parameters """ img = img[y:y+height, x:x+width] return img
1cbc34aed421cf67cef57b8fb53b47015911f701
35,420
import math def sigmoid(x): """ sigmoid function Args: x: number Returns: sigmoid(number) """ return 1 / (1 + math.exp(-x))
3f4fc16c99af2cdf71aea9f108382e6464f50e6f
35,421
def parse_releases(content): """ Parse latest releases of a manga Parameters ---------- content : BeautifulSoup BeautifulSoup object of the releases page content. Returns ------- releases : list of dicts List of latest releases of a manga. List is ordered latest-to-oldest :: [ { 'chapter': 'chapter number', 'vol': 'volume number' or None, 'series': { 'name': 'Manga Name', 'id': 'Manga ID' }, 'group': { 'name': 'Scanlation Group Name', 'id': 'Scanlation Group ID' } } ] """ releases = content.find_all("div", class_="text", recursive=False)[:-1] results = [] for i in range(0, len(releases), 5): release = {} release["series"] = { "name": releases[i + 1].get_text(), "id": releases[i + 1] .a["href"] .replace("https://www.mangaupdates.com/series.html?id=", ""), } vol = releases[i + 2].get_text() release["vol"] = vol if vol else None release["chapter"] = releases[i + 3].get_text() release["group"] = { "name": releases[i + 4].get_text(), "id": releases[i + 4] .a["href"] .replace("https://www.mangaupdates.com/groups.html?id=", ""), } results.append(release) return results
9f1e99cf0e6e96cb60c6764c68052e94d341d196
35,423
def extract_instance_name(url): """Given instance URL returns instance name.""" return url.rsplit('/', 1)[-1]
333c6f12ae44b0a5de0ed80d9e01c87dffeb18d2
35,430
def salt_run_cli(salt_master): """ Override salt_run_cli fixture to provide an increased default_timeout to the calls """ return salt_master.salt_run_cli(timeout=120)
10d2d04d83ae747e0898a34823d3403a374381a9
35,434
def getDaySuffix(day): """Return st, nd, rd, or th for supplied day.""" if 4 <= day <= 20 or 24 <= day <= 30: return 'th' return ['st', 'nd', 'rd'][day % 10 - 1]
4023b97164cbb7c73a1a3ddb2a3527fa2c297a1d
35,439
import bisect def find_le(array, x): """Find rightmost value less than or equal to x. Example:: >>> find_le([0, 1, 2, 3], 2.0) 2 **中文文档** 寻找最大的小于等于x的数。 """ i = bisect.bisect_right(array, x) if i: return array[i - 1] raise ValueError
dfa67df6fadbdead10821c4aceb027b0c5f5d90a
35,440
def flatten(array: list): """Flatten nested list to a single list""" return [item for sublist in array for item in sublist]
65692535197b946d5d5a39e657ff07c3424e7652
35,441
def _transaction_sort_key(txn): """ If there are payments and invoices created on the same day we want to make sure to process the payments first and then the invoices. Otherwise the transaction history on an invoice will look like a drunk cow licked it, that's bad. :param txn: :return: sort key """ txn_date = txn.transacted_on.isoformat()[:10] type_weight = "0" if txn.transaction_type == txn.PAYMENT else "1" txn_id = str(txn.id) # sort multiple invoices on same day by primary key id return txn_date + type_weight + txn_id
c6e74a6c87ec7c46dba0507bf144bfddac954e3c
35,447
def cleanup_column_names(df,rename_dict={},do_inplace=True): """This function renames columns of a pandas dataframe It converts column names to snake case if rename_dict is not passed. Args: rename_dict (dict): keys represent old column names and values point to newer ones do_inplace (bool): flag to update existing dataframe or return a new one Returns: pandas dataframe if do_inplace is set to False, None otherwise """ if not rename_dict: return df.rename(columns={col: col.lower().replace(' ','_') for col in df.columns.values.tolist()}, inplace=do_inplace) else: return df.rename(columns=rename_dict,inplace=do_inplace)
2ee32d7742c20b9eeeffb9481d940f1e4c036937
35,451
import math def returnNewDelta(delta_old): """returns the side of the new polygonal approximation. Arguments: delta_old -- the side of the previous approximation """ return math.sqrt( 2. * (1. - math.sqrt(1. - 0.25 * delta_old**2) ) )
7ffececdc6affb3b7f9aa215415077bc14baa9e3
35,452
def max_contig_sum(L): """ L, a list of integers, at least one positive Returns the maximum sum of a contiguous subsequence in L """ sizeL=len(L) max_so_far,max_ending_here=0,0 for i in range(sizeL): max_ending_here+=L[i] if max_ending_here<0: max_ending_here=0 elif max_so_far<max_ending_here: max_so_far=max_ending_here return max_so_far
356c87a66ff1071e729e3de379550a0a61a2f4f4
35,457
def test_module(unifi_session): """ Test Module for Demisto :param unifi_session: Unifi Session from Unifi class :return: Return 'OK' if success """ result = False if unifi_session.base_url: result = "ok" return result
e375b8f673499c093cdfb691e154e5f7ba9f1737
35,462
import requests import json def post_request(url, json_payload, **kwargs): """ post_request function is used to send a POST request in order to interact with remote services through rest apis url: url to the remote service json_payload: json object containing remote service input parameters kwargs: additional parameters to be scpecified such as 'cp_cl_api_key' to authenticate """ try: if 'cp_cl_api_key' in kwargs: # prepare url to send a post request url = url + "/post" # prepare headers headers = {'Content-Type': 'application/json', 'cp_api_key': kwargs['cp_cl_api_key']} # send request print(json_payload) response = requests.post(url=url, headers=headers, json=json_payload) else: # no service key has been specified print("no cp_cl_api_key has been specified") return {} except: # if any error occurs print it print("Network exception occurred with POST request!!!") return {} status_code = response.status_code print("post_request: received response with status code {}".format(status_code)) json_data = json.loads(response.text) return json_data
8c9de68163df2c056f317aafd6affc79e49ce2cc
35,465
def tokenise_table_name(table_name): """Given a feature class or feature dataset name, returns the schema (optional) and simple name""" dot_count = table_name.count(".") if dot_count == 2: dot_pos = [pos for pos, char in enumerate(table_name) if char == "."] return { "database": table_name[:dot_pos[0]], "schema": table_name[dot_pos[0] + 1:dot_pos[1]], "name": table_name[dot_pos[1] + 1:] } elif dot_count == 1: return { "database": None, "schema": table_name[:table_name.index(".")], "name": table_name[table_name.index(".") + 1:] } else: return {"database": None, "schema": None, "name": table_name}
7664bd8099cea88c8d0313c62ce5d4da38fcf947
35,467
def ConvertTokenToInteger(string, location, tokens): """Pyparsing parse action callback to convert a token into an integer value. Args: string (str): original string. location (int): location in the string where the token was found. tokens (list[str]): tokens. Returns: int: integer value or None. """ try: return int(tokens[0], 10) except ValueError: pass
f0332c672156bb95b0d14d0af94d197464ab70a0
35,472
def midpoint(imin, imax): """Returns middle point >>> midpoint(0, 0) 0 >>> midpoint(0, 1) 0 >>> midpoint(0, 2) 1 >>> midpoint(1, 1) 1 >>> midpoint(1, 2) 1 >>> midpoint(1, 5) 3 """ middle_point = (int(imin) + int(imax)) / 2 return middle_point
389058bb50f0e1f3d31498edcac2469a97545a3f
35,474
def get_num_image_channels(module_or_spec, signature=None, input_name=None): """Returns expected num_channels dimensions of an image input. This is for advanced users only who expect to handle modules with image inputs that might not have the 3 usual RGB channels. Args: module_or_spec: a Module or ModuleSpec that accepts image inputs. signature: a string with the key of the signature in question. If None, the default signature is used. input_name: a string with the input name for images. If None, the conventional input name `images` for the default signature is used. Returns: An integer with the number of input channels to the module. Raises: ValueError: If the channel information is missing or malformed. """ if input_name is None: input_name = "images" input_info_dict = module_or_spec.get_input_info_dict(signature) try: shape = input_info_dict[input_name].get_shape() except KeyError: raise ValueError("Module is missing input '%s' in signature '%s'." % (input_name, signature or "default")) try: _, _, _, num_channels = shape.as_list() if num_channels is None: raise ValueError except ValueError: raise ValueError( "Shape of module input is %s, " "expected [batch_size, height, width, num_channels] " "with known num_channels" % shape) return num_channels
ade88e40833749b48461d7f996ab8824258ad7df
35,475
import warnings def check_case(name): """ Check if the name given as parameter is in upper case and convert it to upper cases. """ if name != name.upper(): warnings.warn("Mixed case names are not supported in database object names.", UserWarning) return name.upper()
8e973c6c087e3bb9b3abd076e2d97b3770f3589c
35,477
from typing import List def listToCSV(lst: List) -> str: """ Changes a list to csv format >>> listToCSV([1,2,3]) '1,2,3' >>> listToCSV([1.0,2/4,.34]) '1.0,0.5,0.34' """ strings = "" for a in lst: strings += str(a) + "," strings = strings[0:len(strings) - 1] return strings
89fc272c4b9fc0a3a406f67d7b655b2c72755d07
35,479
def fibonacci(n): """ This function prints the Nth Fibonacci number. >>> fibonacci(3) 1 >>> fibonacci(10) 55 The input value can only be an integer, but integers lesser than or equal to 0 are invalid, since the series is not defined in these regions. """ if n<=0: return "Incorrect input." elif n==1: return 0 elif n==2: return 1 else: return fibonacci(n-1)+fibonacci(n-2)
165a6bf1fc73d24e0b3c25599040c26a56afdcd9
35,480
def _CreatePatchInstanceFilter(messages, filter_all, filter_group_labels, filter_zones, filter_names, filter_name_prefixes): """Creates a PatchInstanceFilter message from its components.""" group_labels = [] for group_label in filter_group_labels: pairs = [] for key, value in group_label.items(): pairs.append( messages.PatchInstanceFilterGroupLabel.LabelsValue.AdditionalProperty( key=key, value=value)) group_labels.append( messages.PatchInstanceFilterGroupLabel( labels=messages.PatchInstanceFilterGroupLabel.LabelsValue( additionalProperties=pairs))) return messages.PatchInstanceFilter( all=filter_all, groupLabels=group_labels, zones=filter_zones, instances=filter_names, instanceNamePrefixes=filter_name_prefixes, )
7692812fe66b8db42bd76550281c7751d7648b1c
35,481
def filter_on_cdr3_length(df, max_len): """ Only take sequences that have a CDR3 of at most `max_len` length. """ return df[df['amino_acid'].apply(len) <= max_len]
abe853dd0a5baeb3a178c41de9b15b04912e2033
35,494
def calc_array_coverage(solid_angle, number_of_bars): """ Calculate the coverage for an entire array :param solid_angle: The solid angle in sr :param number_of_bars: The number of solids in the array. :return: """ return number_of_bars * solid_angle
1d071116dea5f6a9e91c37168681b5386df0ac76
35,495
import mmap import pickle def load(path): """Load serialized object with out-of-band data from path based on zero-copy shared memory. Parameters ---------- path : pathlib.Path Folder used to save serialized data with serialize(). Usually a folder /dev/shm """ num_buffers = len(list(path.iterdir())) - 1 # exclude meta.idx buffers = [] for idx in range(num_buffers): f = open(path / f'{idx}.bin', 'rb') buffers.append(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)) with open(path / 'meta.pkl', 'rb') as f: return pickle.load(f, buffers=buffers)
6c0ccc1d4941a6073b9f005774f53e3428dfc276
35,496
import torch def secondary_sequence_metrics(x, ss_num=7): """ Compute metrics associated with with secondary structure. It counts how many times a secondary structure appears in the proteins, and the median length of a sequence of secondary structure, e.g. H, H, ..., H. Parameters ---------- x : torch.Tensor: The node features. ss_num : int, optional The number of secondary structures. The default is 7. Returns ------- scores : torch.Tensor The score having the following format: [SS_1_count, ..., SS_ss_num_count, median_seq_len] """ # Get number of amino acids n = x.shape[0] # Initialize scores ss_counts, seq_len = [0]*ss_num, [] current_seq_len, current_ss = 0, -1 # Compute the scores for i in range(n): ss = int(x[i,1]) ss_counts[ss-1] += 1 if current_ss == -1: current_seq_len += 1 current_ss = ss elif ss != current_ss: seq_len.append(current_seq_len) current_seq_len = 1 current_ss = ss else: current_seq_len += 1 seq_len.append(current_seq_len) # last one to add ss_counts = [x/n for x in ss_counts] seq_len = float(torch.median(torch.tensor(seq_len))) scores = torch.tensor(ss_counts + [seq_len]) return scores
8ddc8df86438e0dee3aec45c2e573f4b39cca114
35,499
import re def clean_text(text): """ Remove code blocks, urls, and html tags. """ text = re.sub(r'<code[^>]*>(.+?)</code\s*>', '', text, flags=re.DOTALL | re.MULTILINE) text = re.sub(r'<div[^>]*>(.+?)</div\s*>', '', text, flags=re.DOTALL | re.MULTILINE) text = re.sub(r'<blockquote[^>]*>(.+?)</blockquote\s*>', '', text, flags=re.DOTALL | re.MULTILINE) text = re.sub('<.*?>', '', text) text = text.replace('&quot;', '"') text = re.sub(r'http\S+', '', text) text = re.sub(r'www.\S+', '', text) return text
91934ecd7e5d037be1198bc645da8e507b5955ce
35,503
def list_string_to_dict(string): """Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``.""" dictionary = {} for idx, c in enumerate(string): dictionary.update({c: idx}) return dictionary
0d9e3516e32bc69ee24d6afb19a7babcdba528f9
35,506
def bytes_leading(raw_bytes, needle=b'\x00'): """ Finds the number of prefixed byte occurrences in the haystack. Useful when you want to deal with padding. :param raw_bytes: Raw bytes. :param needle: The byte to count. Default \x00. :returns: The number of leading needle bytes. """ leading = 0 # Indexing keeps compatibility between Python 2.x and Python 3.x _byte = needle[0] for x in raw_bytes: if x == _byte: leading += 1 else: break return leading
f57a4eef0bbf28df31c5a1f49d3e681f056403a9
35,512
def ir(x): """ Rounds floating point to thew nearest integer ans returns integer :param x: {float} num to round :return: """ return int(round(x))
3ef85ede1dd773e2b9f67138a9448e9b47fd9610
35,522
def setup_with_context_manager(testcase, cm): """ Use a contextmanager in a test setUp that persists until teardown. So instead of: with ctxmgr(a, b, c) as v: # do something with v that only persists for the `with` statement use: def setUp(self): self.v = setup_with_context_manager(self, ctxmgr(a, b, c)) def test_foo(self): # do something with self.v """ val = cm.__enter__() testcase.addCleanup(cm.__exit__, None, None, None) return val
e1996c9650f02c89e8516ca9ae030f1a50576eda
35,530
def point_interval(ref_features, sec_features, disp): """ Computes the range of points over which the similarity measure will be applied :param ref_features: reference features :type ref_features: Tensor of shape (64, row, col) :param sec_features: secondary features :type sec_features: Tensor of shape (64, row, col) :param disp: current disparity :type disp: float :return: the range of the reference and secondary image over which the similarity measure will be applied :rtype: tuple """ _, _, nx_ref = ref_features.shape _, _, nx_sec = sec_features.shape # range in the reference image left = (max(0 - disp, 0), min(nx_ref - disp, nx_ref)) # range in the secondary image right = (max(0 + disp, 0), min(nx_sec + disp, nx_sec)) return left, right
22f1477ec4ef86f343969f6316771f3ebb21d085
35,533
def _remove_pageoutline(text: str): """ Remove any TracWiki PageOutline directives """ return text.replace('[[PageOutline]]', '')
72642413d11b5251c4f981a48ce7eb582cc9baf7
35,535
import tempfile def get_temp_dir(prefix='tmp-cegr-', dir=None): """ Return a temporary directory. """ return tempfile.mkdtemp(prefix=prefix, dir=dir)
3abd323f97e72edb66d6bc00c6d08459a37f962a
35,537
def _cafec_coeff_ufunc(actual, potential): """ Vectorized function for computing a CAFEC coefficient. :param actual: average value for a month from water balance accounting :param potential: average potential value from water balance accounting :return CAFEC coefficient """ # calculate alpha if potential == 0: if actual == 0: coefficient = 1 else: coefficient = 0 else: coefficient = actual / potential return coefficient
9e0b4201dff2acb6170b9719558da494f15ad6e7
35,538
from typing import List import random def sample_floats(low: float, high: float, k: int = 1) -> List[float]: """Return a k-length list of unique random floats in the range of low <= x <= high.""" seen = set() for _ in range(k): x = random.uniform(low, high) while x in seen: x = random.uniform(low, high) seen.add(x) return list(seen)
9dcbef61809e1cfc3cc3748338137e4d48a95059
35,539
def escape_invalid_characters(name, invalid_char_list, replace_with='_'): """ Remove invalid characters from a variable and replace it with given character. Few chars are not allowed in asset displayname, during import/export Escape those chars with `replace_with` and return clean name Args: name (str): variable to escape chars from. invalid_char_list (list): Must be a list, and it should contain list of chars to be removed from name replace_with (str): Char used to replace invalid_char with. Returns: name (str): name without `invalid_char_list`. """ for char in invalid_char_list: if char in name: name = name.replace(char, replace_with) return name
47359202f0cee82426d35ec5a85d315d96ece1d7
35,545
def _estimate_step_number(n_points: int, batch_size: int) -> int: """Estimates which step this is (or rather how many steps were collected previously, basing on the ratio of number of points collected and the batch size). Note that this method is provisional and may be replaced with a parameter in the config. Raises: ValueError if ``n_points`` or ``batch_size`` is less than 1 """ if min(n_points, batch_size) < 1: raise ValueError( f"Both n_points={n_points} and batch_size={batch_size} must be at least 1." ) # pragma: no cover return n_points // batch_size
c097140107c458f0517d9f616b20d88ef0268e15
35,547