content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import binascii def int_to_bytes(i: int): """ Convert an integer to a byte(s) Args: i: Integer to convert Returns: bytes """ width = i.bit_length() width += 8 - ((width % 8) or 8) fmt = '%%0%dx' % (width // 4) return b"\x00" if i == 0 else binascii.unhexlify(fmt % i)
be4bc40913fa0c61f117a1f4ec7b10402e413b6e
45,833
def path_decoder(url): """Grab the last component of a url as the path.""" components = url.split('/') if components[-1]: return components[-1] else: return components[-2]
d35112082facb7c378201cac44557c0628ba4563
45,835
def get_dir(path): """ Returns the directory of a file, or simply the original path if the path is a directory (has no extension) :param Path path: :return: Path """ extension = path.suffix if extension == '': return path else: return path.parent
ce40de2223de46be91ca28fba01c4153679a693c
45,837
def insert_output_start_stop_indicators(src): """ Insert identifier strings so that output can be segregated from input. Parameters ---------- src : str String containing input and output lines. Returns ------- str String with output demarked. """ lines = src.split('\n') print_producing = [ 'print(', '.setup(', '.run_model(', '.run_driver(', '.check_partials(', '.check_totals(', '.list_inputs(', '.list_outputs(', '.list_problem_vars(', ] newlines = [] input_block_number = 0 in_try = False in_continuation = False head_indent = '' for line in lines: newlines.append(line) # Check if we are concluding a continuation line. if in_continuation: line = line.rstrip() if not (line.endswith(',') or line.endswith('\\') or line.endswith('(')): newlines.append('%sprint(">>>>>%d")' % (head_indent, input_block_number)) input_block_number += 1 in_continuation = False # Don't print if we are in a try block. if in_try: if 'except' in line: in_try = False continue if 'try:' in line: in_try = True continue # Searching for 'print(' is a little ambiguous. if 'set_solver_print(' in line: continue for item in print_producing: if item in line: indent = ' ' * (len(line) - len(line.lstrip())) # Line continuations are a litle tricky. line = line.rstrip() if line.endswith(',') or line.endswith('\\') or line.endswith('('): in_continuation = True head_indent = indent break newlines.append('%sprint(">>>>>%d")' % (indent, input_block_number)) input_block_number += 1 break return '\n'.join(newlines)
162724502e581901629bae764a235ca0f71e0c61
45,839
def get_images(directory): """ Gets all PNG, JPG, GIF, and BMP format files at path :param Path directory: location to search for image files """ files = [] patterns = ('*.png', '*.jpg', '*.gif', '*.bmp') for pattern in patterns: files.extend(directory.glob(pattern)) return files
e6c599814d5d31de5a64e502d88f40996d49888f
45,841
def prepare_table_rows(rows): """Given a list of lists, make sure they are prepared to be formatted into a table by making sure each row has the same number of columns and stringifying all values.""" rows = [list(map(str, r)) for r in rows] max_cols = max(list(map(len, rows))) for row in rows: pad = max_cols - len(row) for _ in range(pad): row.append('') return rows
8f4730d08400a88234e9ac0137d58d255e1fbddb
45,842
import re def replace_all(src_string, regex, replacement): """ Replace each occurrence of regular expression match in the first string with the replacement string and return the replaced string. A 'regex' string with null value is considered as no change. A 'replacement' string with null value is consider as an empty string. """ if not regex: return src_string if not replacement: replacement = "" return re.sub(regex, replacement, src_string)
410e6c8f333e9776daad08c9bbbd39831640d350
45,844
def parse_kid(line, kid): """ parses a "@ ale01.cha 1;4.28" line into name/month ("ale", 16) """ tmp = line.split('\t') if len(tmp) < 3 or not (".cha" in tmp[1] or ".txt" in tmp[1]): return kid else: date = tmp[2].split(';') return (tmp[1][:3], int(date[0]) * 12 + int(date[1].split('.')[0]))
3fe1af7f741cb27eb36159ed211049e6e14f67c2
45,845
import re def make_filename(url): """ Extracts domain from a url and append '.html' :param url: string return <domain>.html string """ rx = re.compile(r'^https?:\/\/(?:www.)?([^\/]+)\/?') m = rx.search(url) if m: return m[1] + '.html' else: print(f'Can not get domain from {url}') exit(0)
12ce1ec1acb77c2147a361f563c051edf6f73e83
45,848
import ast def _log_function(item): """ Handler function for log message types. :param item: ast object being inspected for certain properties :type item: :class:`~ast.AST` :return: Returns the descriptor and arg offset of the item. :rtype: tuple (str, int) """ level_arg = item.args[0] if isinstance(level_arg, ast.Str): level = level_arg.s.lower() else: any_call_or_name_elements = any( isinstance(element, (ast.Call, ast.Name)) for element in ast.walk(level_arg) ) if any_call_or_name_elements: level = "(dynamic)" else: level = ', '.join( element.s.lower() for element in ast.walk(level_arg) if isinstance(element, ast.Str) ) integer_arg_offset = 1 return level, integer_arg_offset
cd7f798b5e28d1835d270e8d2b64b73faef4f853
45,850
def obtainLigandIndexes(trajectory, ligand): """ Extract the indexes for the ligand in the trajectory :param trajectory: mdtraj trajectory :param ligand: name of the ligand :return: list of the atom indexes of the heavy atoms of the ligand """ residueIndexes = [] for residue in trajectory.topology.residues: if residue.name == ligand: for atom in residue.atoms: if "H" not in atom.name: residueIndexes.append(atom.index) return residueIndexes
aa3f7f14afdff21e0e7b56ae69294e48e82d9a36
45,858
def swir_exp_time(int_delay: int, int_hold: int) -> float: """ Parameters ---------- int_delay : int parameters int_delay from the instrument_settings int_hold : int parameters int_holt from the instrument_settings Returns ------- float exact (SWIR) pixel exposure time """ return 1.25e-6 * (65540 - int_delay + int_hold)
9022f18bec5aecb35e8992f7cd0cbfe9a3cb84a2
45,864
def get_dot_product(first_vector, second_vector): """ (dict, dict) -> dict The function takes two dictionaries representing vectors as input. It returns the dot product between the two vectors. >>> v1 = {'a' : 3, 'b': 2} >>> v2 = {'a': 2, 'c': 1, 'b': 2} >>> get_dot_product(v1, v2) 10 >>> v3 = {'a' : 5, 'b': 3, 'c' : 3} >>> v4 = {'d': 1} >>> get_dot_product(v3, v4) 0 >>> v5 = {} >>> v6 = {} >>> get_dot_product(v5, v6) 0 >>> v7 = {'a' : 2, 'b' : -2, 'c' : -4} >>> v8 = {'a' : 1, 'b' : 3, 'c' : 2} >>> get_dot_product(v7, v8) -12 """ # initialize dot product variable dot_product = 0 # compute product for values whose key is in both vectors for key in first_vector: if key in second_vector: # add product to the sum product = first_vector[key] * second_vector[key] dot_product += product # return sum of products (dot product) return dot_product
52898081b331484c4dd8d964ad29334e8db96a6f
45,866
from typing import Iterable from typing import Any def format_like_tuple( values: Iterable[Any], ) -> str: """ Formats iterable into tuple-like format that is readable for human being. :param values: values to be formatted """ return ", ".join((repr(value) for value in values))
f05e0edafd21a62fd2e7c010a9f2b9720256b3db
45,868
def parse_pct(value): """ Parse percentage """ return float(value)/100
30694babef99a5a40a1f0037cfccced98bc153d1
45,869
def files_data_generation(candidates, namesmeasures): """Select all possible files that are related with the given measures.""" selected = [] for name in namesmeasures: sele_name = [] for i in range(len(candidates)): if name in candidates[i]: sele_name.append(candidates[i]) selected.append(sele_name) return selected
85c42a84c7f2405575bef546210c7fc88022bcce
45,870
def insert_init(shots, predist=100, dxmax=200, xcol='pos_x', initpoints=1): """ Insert initialization frames into scan list, to mitigate hysteresis and beam tilt streaking when scanning along x. Works by inserting a single frame each time the x coordinate decreases (beam moves left) or increases by more than dxmax (beam moves too quickly). The initialization frame is taken to the left of the position after the jump by predist pixels. Its crystal_id and frame columns are set to -1. :param shots: initial scan list. Note: if you want to have multiple frames, you should always first run set_frames :param predist: distance of the initialization shot from the actual image along x :param dxmax: maximum allowed jump size (in pixels) to the right. :param xcol: name of x position column :param initpoints: number of initialization points added :return: scan list with inserted additional points """ def add_init(sh1): initline = sh1.iloc[:initpoints, :].copy() initline['crystal_id'] = -1 initline['frame'] = -1 if predist is not None: initline[xcol] = initline[xcol] - predist else: initline[xcol] = 0 return initline.append(sh1) dx = shots[xcol].diff() grps = shots.groupby(by=((dx < 0) | (dx > dxmax)).astype(int).cumsum()) return grps.apply(add_init).reset_index(drop=True)
029f1431890940b8f4345cfb400a64ca24d3611e
45,871
def UsersInvolvedInAmendments(amendments): """Return a set of all user IDs mentioned in the given Amendments.""" user_id_set = set() for amendment in amendments: user_id_set.update(amendment.added_user_ids) user_id_set.update(amendment.removed_user_ids) return user_id_set
e9543bcbd678758fc9f03bf35b3e0429d1202544
45,880
def create_list_of(class_, objects): """Return a list of model objects of class `class_` from list of object metadata `objects`""" return [class_.from_metadata(obj) for obj in objects]
abb91c36fd2faeb8bbb1cd785d417757f75352bd
45,884
def has_min_length(entities, length): """Check if a term has the min required length.""" return entities.apply(lambda x: len(x) > length - 1)
7406b1d0d2a776b12ac89b624669d739a4831ffa
45,888
import pwd def username_from_uid(uid: int) -> str: """Convert a UID to a username.""" try: username = pwd.getpwuid(uid).pw_name.replace(".", "__") except KeyError: username = f"__unknown_uid_{uid}__" return username
ae400888f3c89f8b26c413138c42b430285fe6f9
45,891
from typing import List import re import click def check_descriptors(ctx, param, value: List[str]) -> List[str]: """ Check format of each MeSH descriptor passed as command line argument. Raise exception if any has incorrect format. :param ctx: required for click callback feature :param param: required for click callback feature :param value: tuple of MeSH descriptors as written on command line :return: value if all descriptors are correctly formatted """ for des in value: if not re.fullmatch(r'D(\d{6}|\d{9})', des): raise click.BadParameter( 'Descriptor %s incorrect, should be D followed by 6 or 9 digits' % des, param_hint='MeSH DESCRIPTORS') return value
f2c1d25fe793c7f8d3dabdd72a30bb4be27a8dd6
45,897
def short(s): """return the first part of the string s up to a number.""" result = [] for d in list(s): if d.isdigit(): break else: result.append(d) return ''.join(result)
467123994284838ed3d6bc4fde99cc4ef54e3645
45,898
def _AddFirmwareIdTag(image, id_name='RO_FRID'): """Returns firmware ID in '#NAME' format if available.""" if not image.has_section(id_name): return '' id_stripped = image.get_section(id_name).decode('utf-8').strip(chr(0)) if id_stripped: return '#%s' % id_stripped return ''
e070a445b9250208366a7eb43612f8d8650534d1
45,902
def compile_drop_materialized_view(element, compiler, **kw): """ Formats and returns the drop statement for materialized views. """ text = "DROP MATERIALIZED VIEW {if_exists}{name}{cascade}" if_exists = "IF EXISTS " if element.if_exists else "" cascade = " CASCADE" if element.cascade else "" return text.format(if_exists=if_exists, name=element.name, cascade=cascade)
1bedf4115edeaf33e96c7ff57b29e8809d8489c8
45,903
import re def get_header_line(headr, proprty): """ :param headr: the header of the RINEX-file :param proprty: string-like property to search for (e.g. 'delta-utc') :return: the string of the ``headr`` containing ``property`` """ pattern = re.compile(proprty, re.IGNORECASE) for d in headr: if pattern.search(d): return d
7c0a98156a4dda2ea24190c7516e488b3c162b5d
45,904
def get_file_type(file_name): """ returns file type of given file name as string. For example, for a pdf file "pdf" will be returned """ for char in file_name: # checking if given path instead of file if char == "/" or char == "\\": return "ERROR IN FUNCTION 3: FILE PATH GIVEN INSTEAD OF FILE NAME" for i in range(len(file_name)): j = len(file_name) - 1 - i if file_name[j] == ".": return file_name[j + 1:] return "ERROR: NO ENDING GIVEN TO FILE IN FUNCTION 3"
7ae544794a07b69710c6784ca389fa82a751ea66
45,907
def get_player_moves(game: tuple, player: int): """ Returns the sequence of moves of the given player """ moves = [] for i in range(player, len(game) - 1, 2): moves.append((game[i], game[i + 1])) return moves
492327399ae39c8a3378730903dab422d2c3d7f5
45,916
def floyd_warshall(graph): """ All Pairs Shortest Path problem. The problem is to find shortest distances between every pair of vertices in a given edge weighted directed Graph """ n = len(graph[0]) dist = [ [0]*n for i in range(n) ] # Initialize the solution matrix same as input graph matrix for i in range(n): for j in range(n): dist[i][j] = graph[i][j] for k in range(n): for i in range(n): for j in range(n): if dist[i][k] + dist[k][j] < dist[i][j]: dist[i][j] = dist[i][k] + dist[k][j] return dist
1b9ea3058fa791f89e4b3f60b10df6a7e22b1ea0
45,924
import math def discretize_state(state): """Discretizes continuous state space to discrete state space with 14 physical values Args: state: current continuous state space Returns: discretized state space """ # discretize observation space # https://github.com/openai/gym/wiki/BipedalWalker-v2 obs_state_bounds = [ (0, math.pi), # hull_angle (-2, 2), # hull_angular_velocity (-1, 1), # vel_x (-1, 1), # vel_y (0, math.pi), # hip_joint_1_angle (-2, 2), # hip_joint_1_speed (0, math.pi), # knee_joint_1_angle (-2, 2), # knee_joint_1_speed (0, 1), # leg_1_ground_contact_flag (0, math.pi), # hip_joint_2_angle (-2, 2), # hip_joint_2_speed (0, math.pi), # knee_joint_2_angle (-2, 2), # knee_joint_2_speed (0, 1), # leg_2_ground_contact_flag ] # create an empty obs_discrete_state array to store converted discrete state array obs_discrete_state = [] for i in range(len(state)): converted_i = int( (state[i] - obs_state_bounds[i][0]) / (obs_state_bounds[i][1] - obs_state_bounds[i][0]) * 19 # 19 is arbitrary integer ) obs_discrete_state.append(converted_i) ds = tuple( obs_discrete_state ) # convert collected discrete state array into tuple to maintain same shape return ds
547dc5176a2d6ad0708279295f1abba76b1cc642
45,928
def distance(x1, y1, x2, y2): """ This calculates the distance between (x1, y1) and (x2, y2) """ return ((x1 - y1) ** 2 + (x2 - y2) ** 2) ** 0.5
da73bc44cc69e01d41a32a1779cd50ea777f6fd3
45,941
def prep_prony_ANSYS(df_prony, prony, E_0 = None): """ Prepare ANSYS Prony series parameters for further processing. The ANSYS curve fitting routine for viscoelastic materials only stores the Prony series parameters ('tau_i', 'alpha_i') in the material card file. To calculate the master curve from the Prony series parameters the instantenous modulus and frequency range are required and added to the dataframe of the ANSYS Prony series parameters. Parameters ---------- df_prony : pandas.DataFrame Contains the ANSYS Prony series parameter. prony : dict Contains the Python Prony series parameter E_0 : float, default = None Instantaneous storage modulus; for either tensile (E_0) or shear (G_0) loading. If E_0 is not provided the instantaneous storage modulus identified during the Python curve fitting process will be used to create the master curve with the ANSYS Prony series parameters. Returns ------- prony_ANSYS : dict Contains the ANSYS Prony series parameter in the same format as the Python implementation provides (see 'prony' Parameter above). """ m = prony['modul'] if E_0 == None: E_0 = prony['E_0'] #use same estimate as Python curve fitting f_min = prony['f_min'] #use same frequency range as Python curve fitting f_max = prony['f_max'] prony_ANSYS = {'E_0': E_0, 'df_terms':df_prony, 'f_min':f_min, 'f_max':f_max, 'label':'ANSYS', 'modul' : m} return prony_ANSYS
ab2f86e9d42ea78c7849d9db877892d22e47666e
45,946
def grouper(n, L): """Group a flat list into a list of tuples of size n.""" # src: http://stackoverflow.com/questions/1624883/alternative-way-to-split-a-list-into-groups-of-n return list(zip(*[L[i::n] for i in range(n)]))
c052a4e98c90a36ff2afd811d49bd8ad9da8830b
45,947
def _cargo_home_path(repository_ctx): """Define a path within the repository to use in place of `CARGO_HOME` Args: repository_ctx (repository_ctx): The rules context object Returns: path: The path to a directory to use as `CARGO_HOME` """ return repository_ctx.path(".cargo_home")
1961fc9a13128340d57b942ebb2bbc6b12ed6eea
45,952
def get_subset(options, dataset, class_a, class_b): """ Returns a subset of the dataset that only contains the classes (class_a) and (class_b) """ x_train, y_train = dataset train_idx = (y_train == class_a) ^ (y_train == class_b) subset_train_x = x_train[train_idx] subset_train_y = y_train[train_idx] # relabel to +/-1 subset_train_y[subset_train_y == class_a] = -1 subset_train_y[subset_train_y == class_b] = 1 return subset_train_x, subset_train_y
0081af880de54aded079edb46207ba46f94e0ea4
45,955
import requests from bs4 import BeautifulSoup def page_soup(url): """ Make BeautifulSoup of page Args: url:``str`` url of page Returns: BeautifulSoup of page:``soup`` """ r = requests.get(url) if r.status_code == 200: soup = BeautifulSoup(r.content, "lxml") return soup else: raise Exception("Please check website. Error code {}.".format(r.status_code))
3ebbc59055b6034371f06455cb182d7b81eaca64
45,956
def largest_odd_times(L): """ Assumes L is a non-empty list of ints Returns the largest element of L that occurs an odd number of times in L. If no such element exists, returns None """ l = L.copy() l.sort() l.reverse() for x in l: if l.count(x) % 2: return x return None
280d071bebeec911efbcbbae2eb28090e2e7870e
45,960
import json def read_json(filename): """Read json file and return contents as dict.""" with open(filename) as f: return json.load(f)
f8e5bf4cc76de18099a3736e96141ac849aca050
45,967
def matchingByName (theDictionary, firstLetter): """Identifies students a name starting with firstLetter. Assumes student names are capitalized. :param dict[str, str] theDictionary: key: locker number / value: student name or "open" :param str firstLetter: The target letter by which to identify students. Currently does not check for only a single letter. :return: The students with name starting with firstLetter :rtype: list[str] """ studentsByName = [] firstLetter = firstLetter.upper() for key in theDictionary: if theDictionary[key][0] == firstLetter: studentsByName.append(theDictionary[key]) return studentsByName
8d492d93b58b7c4e7c2f8030fda9d01bd73d7352
45,973
from typing import List from typing import Dict from typing import Any def build_params_dict(tags: List[str], attribute_type: List[str]) -> Dict[str, Any]: """ Creates a dictionary in the format required by MISP to be used as a query. Args: tags: List of tags to filter by attribute_type: List of types to filter by Returns: Dictionary used as a search query for MISP """ params: Dict[str, Any] = { 'returnFormat': 'json', 'type': { 'OR': attribute_type if attribute_type else [], }, 'tags': { 'OR': tags if tags else [], }, } return params
0da2569aca037587a904c775330a684cef9d6f22
45,975
def compute_area(boxes): """ Computes the area of all the boxes. Returns: np.array: a vector with areas of each box. """ area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) return area
5cd1ca945d8ffcbf989d1a8d65e952b99366968e
45,976
def matrix_scalar(m, sc): """ Matrix multiplication by a scalar value (iterative algorithm). The running time of the iterative matrix multiplication algorithm is :math:`O(n^{2})`. :param m: input matrix :type m: list, tuple :param sc: scalar value :type sc: int, float :return: resultant matrix :rtype: list """ mm = [[0.0 for _ in range(len(m[0]))] for _ in range(len(m))] for i in range(len(m)): for j in range(len(m[0])): mm[i][j] = float(m[i][j] * sc) return mm
71bd6d1cfaaeb959623e3836ab48ccf7543aaaba
45,978
from pathlib import Path def get_probes_path(params, split): """Return path to directory where probe sentences are stored.""" directory = Path(params.path_to_probes) path_p = f'propositions_{split}.json' return directory / path_p
80f7b6a98d6a714177f0ba8f1ee7105d499201fc
45,979
def parse_line_number(line_str): """ In a line of the format "<line_num>: <text>"or "<line_num> <text>" this grabs line_num. >>> parse_line_number('5: def parse_line_number(line_str):') '5' >>> parse_line_number('43 line = view.line(s)') '43' >>> parse_line_number('136: line_num = parse_line_number(line_str)') '136' """ parts = line_str.split() line_num = parts[0].strip().replace(':', '') return line_num
1c25916ee3baae164e28f7ca37642901787a92fe
45,984
def get_stft_shape(sample_rate, snippet_length, time_steps): """ Gets the shape for the Short-Time Fourier Transform matrix corresponding to a sample_rate, snippet_length, and time_steps :param sample_rate: the sample rate of the time signal in Hz :param snippet_length: the length of the signal in seconds :param time_steps: the number of time steps that the signal should be split into :returns: the shape of the matrix with dim time steps times number of frequencies :rtype: tuple(int, int) """ sample_length = snippet_length * sample_rate n_fft = (time_steps - 1) * 2 win_length = int(n_fft/4) return (time_steps, int(sample_length/win_length + 1))
baf8b0bff872716c2f227b7174f2b46e16652fac
45,986
import cmath def op_phase(x): """Returns the phase of a complex number.""" if isinstance(x, list): return [op_phase(a) for a in x] else: return cmath.phase(x)
e1a3b37c91bd4726ce0f14c9b9eb0fd007a4e0d2
45,990
from typing import get_origin from typing import Union from typing import get_args def is_optional_type(type_annotation) -> bool: """ Determines if a type is Optional[XXX] :param type_annotation: A type annotation, e.g Optional[float] or float :return: Whether the type annotation is an optional """ if get_origin(type_annotation) is Union: args = get_args(type_annotation) return len(args) == 2 and type(None) in args # pylint:disable=unidiomatic-typecheck return False
2ef9fb8a536dbe489c2039a95ae1f8f185515d61
45,994
def leiaint(msg): """ --> Função que faz a validação da entrada de dados do tipo inteiro (int) :param msg: Mensagem para entrada de dados do usuário :return: retorna o valor digitado, caso este tenha sido um número inteiro :print: Escreve uma mensagem de erro na tela, caso o valor digitado não seja do tipo inteiro """ while True: n = str(input(msg)).strip() if n.replace('-', '').isnumeric(): return int(n) else: print(f'\033[0;31mErro! Digite um número inteiro válido.\033[m')
29b96b26055d965e987bf1e39a96b1924751f56c
45,996
def convert_format(ori_list: list) -> dict: """ 设输入列表长度为n 时间复杂度 O(n) 遍历两次list, 1. 构建辅助字典, 实现通过子节点查找父节点时间复杂度为O(1); 初始化子节点 2. 不断将子节点追加到父节点 空间复杂度: 需要构建辅助节点dict, 和返回结果dict, 空间复杂度是 O(n) """ tree = {"root": {}} revert_dict = dict() for n in ori_list: # 构造反向键值字典 revert_dict[n["name"]] = n.get("parent") # 初始化子节点 tree[n["name"]] = {} for ele in ori_list: name = ele["name"] parent = revert_dict.get(name) or "root" # 子节点追加到父节点 tree[parent].update({name: tree.get(name)}) return tree["root"]
02deadf4c1e6ceaabfabf93fd09d49e1e4e87ddd
45,997
import inspect def get_fn_parameters(fn): """ return the number of input parameters of the fn , None on error""" param_len = len(inspect.getfullargspec(fn).args) if inspect.ismethod(fn): param_len -= 1 return param_len
cf90737655367422a96929d1f935aea4db620a41
45,998
def parse_accept(headers): #------------------------- """ Parse an Accept header and return a dictionary with mime-type as an item key and 'q' parameter as the value. """ return { k[0].strip(): float(k[1].strip()[2:]) if (len(k) > 1 and k[1].strip().startswith('q=')) else 1 for k in [ a.split(';', 1) for a in headers.get('Accept', '*/*').split(',') ] }
43e2698695f3484d47284cb8e0d1e278b33b322d
46,006
def get_x_y_set(mt, type="test"): """Gets data set from ModelTuning object Parameters ---------- mt : ModelTuning object ModelTuning object used type : str, default="test" specifies which set to return ('train'/'test'/'val') Returns ---------- X_data, y_data : ndarray """ if type == "val": return mt.get_validation_set() if type == "train": return mt.get_train_set() if type == "test": return mt.get_test_set()
6a423e93d85e4f18057550b6d3de3d3fe3c80766
46,008
from itertools import tee def nwise(iterable, n=2): """ Adapted from more_itertools s, 2 -> (s0,s1), (s1,s2), (s2, s3), ..." s, 3 -> (s0,s1,2), (s1,s2,s3), (s2,s3,s4), ..." """ parts = tee(iterable, n) to_zip = [] while(parts): to_zip.append(parts[0]) parts = parts[1:] for p in parts: next(p, None) return zip(*to_zip)
1b73f4a565243b5aeeea4ac85ba5e6f0f83864ee
46,010
def read_mesh_off(path, scale=1.0): """ Reads a *.off mesh file :param path: path to the *.off mesh file :return: tuple of list of vertices and list of faces """ with open(path) as f: assert (f.readline().split()[0] == 'OFF'), 'Not OFF file' nv, nf, ne = [int(x) for x in f.readline().split()] verts = [tuple(scale * float(v) for v in f.readline().split()) for _ in range(nv)] faces = [tuple(map(int, f.readline().split()[1:])) for _ in range(nf)] return verts, faces
e65a78cc457dd251c8e604b24096776b54cae33c
46,012
def mean(li): """ Calculate mean of a list. >>> mean([0.5,2.0,3.5]) 2.0 >>> mean([]) """ if li: return sum(li) / len(li) return None
249d71615f0b671e6ad0e9be127d5eedacee0a64
46,014
import re def fixup_generated_snippets(content): """ Adjust the expanded code snippets that were generated by mdsnippets, to improve rendering by Sphinx """ # Remove lines like: <!-- snippet: verify_exception_message_example --> content = re.sub( r"<!-- snippet: .* -->\n", r"", content) # Remove lines like: <a id='snippet-verify_exception_message_example'/></a> content = re.sub( r"<a id='snippet-.*'/></a>\n", r"", content) # Remove 'snippet source' links from all code snippets content = re.sub( r"<sup><a href='([^']+)' title='Snippet source file'>snippet source</a> ", r"(See [snippet source](\1))", content) # Remove 'anchor' links from all code snippets content = re.sub( r"\| <a href='#snippet-[^']+' title='Start of snippet'>anchor</a></sup>", '', content) content = content.replace('<!-- endsnippet -->\n', '') return content
81c58f83058c1eb91d6846e026b9e2c7a757189b
46,016
def order_cols_by_nunique(df, cols): """ Reorder columns in cols according to number of unique values in the df. This can be used to have a cleaner grouped (multi-level) x-axis where the level with the least unique values is at the bottom. Note: alternatively you could remove cols that have only 1 unique value :param df: pandas DataFrame :param cols: list of columns to reorder :return: """ unique_count = df[cols].nunique().sort_values() re_ordered_cols = list(unique_count.index.values) return re_ordered_cols
ccc6b4208182add8f43ea7cfdeb5bad048ddf26e
46,018
def southOf(x, y, xy0, xy1): """ Returns 1 for point south/east of the line that passes through xy0-xy1, 0 otherwise. """ x0 = xy0[0]; y0 = xy0[1]; x1 = xy1[0]; y1 = xy1[1] dx = x1 - x0; dy = y1 - y0 Y = (x-x0)*dy - (y-y0)*dx Y[Y>=0] = 1; Y[Y<=0] = 0 return Y
cd7d903b60eec70f580bccc6f8dcd9a05734f161
46,019
def get_action_value(mdp, state_values, state, action, gamma): """ Computes Q(s,a) as in formula above """ return sum( prob * (mdp.get_reward(state, action, state_dash) + gamma * state_values[state_dash]) for state_dash, prob in mdp.get_next_states(state, action).items() )
082fe8b979e6ceaca218db5de5084c9040fbe34c
46,028
def ParseSubversionPropertyValues(props): """Parse the given property value which comes from [auto-props] section and returns a list whose element is a (svn_prop_key, svn_prop_value) pair. See the following doctest for example. >>> ParseSubversionPropertyValues('svn:eol-style=LF') [('svn:eol-style', 'LF')] >>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg') [('svn:mime-type', 'image/jpeg')] >>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable') [('svn:eol-style', 'LF'), ('svn:executable', '*')] """ key_value_pairs = [] for prop in props.split(";"): key_value = prop.split("=") assert len(key_value) <= 2 if len(key_value) == 1: # If value is not given, use '*' as a Subversion's convention. key_value_pairs.append((key_value[0], "*")) else: key_value_pairs.append((key_value[0], key_value[1])) return key_value_pairs
9874d2a308f66ec4dc4aa14292370977685c6e5e
46,033
import math def distance(coordinate_tuple): """Intake a coordinate tuple, return the distance between points""" x1 = coordinate_tuple[0] y1 = coordinate_tuple[1] x2 = coordinate_tuple[2] y2 = coordinate_tuple[3] dist = math.sqrt((x2-x1)**2 + (y2-y1)**2) return dist
f93e505aa267c0d4011f7e94e8a0996478c75058
46,035
def find_duplicates(l: list) -> set: """ Return the duplicates in a list. The function relies on https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list . Parameters ---------- l : list Name Returns ------- set Duplicated values >>> find_duplicates([1,2,3]) set() >>> find_duplicates([1,2,1]) {1} """ return set([x for x in l if l.count(x) > 1])
f5c3ce0949996865935182028b7782c13eb0998a
46,038
import html def unescape(text): """Replace HTML entities and character references.""" return html.unescape(text)
075821e15ee6a8bd9a924c5db20f0596de64ad1b
46,042
def _parse_line(line): """Parse poolq Quality file lines.""" if not line == "\n": parsed_line = line.strip("\n").split("\t") if not parsed_line[0].startswith("Read counts"): return parsed_line
56cf059dd75e329303054f683bc6b360641ae6e5
46,052
def hub_balance(m, i, j, co, t): """Calculate commodity balance in an edge {i,j} from/to hubs. """ balance = 0 for h in m.hub: if (h, co) in m.r_in_dict: balance -= m.Epsilon_hub[i,j,h,t] * m.r_in_dict[(h, co)] # m.r_in = 1 by definition if (h, co) in m.r_out_dict: balance += m.Epsilon_hub[i,j,h,t] * m.r_out_dict[(h, co)] return balance
239ba38223661a3dcbb3b9fa8bf793b6e32156c1
46,053
def isCSERelative(uri : str) -> bool: """ Check whether a URI is CSE-Relative. """ return uri is not None and uri[0] != '/'
98cfc2e74e7cb5e1561dec2e40e8076f6eb966e4
46,062
from typing import Dict from typing import Any def build_validation(validator) -> Dict[str, Any]: """Builds and returns a fake validator response object.""" if validator == "sql": metadata = {"sql": "SELECT user_id FROM users"} elif validator == "content": metadata = { "field_name": "view_a.dimension_a", "content_type": "dashboard", "space": "Shared", } elif validator == "assert": metadata = {"test_name": "test_should_pass"} else: metadata = {} return { "validator": validator, "status": "failed", "tested": [ dict(model="ecommerce", explore="orders", passed=True), dict(model="ecommerce", explore="sessions", passed=True), dict(model="ecommerce", explore="users", passed=False), ], "errors": [ dict( model="ecommerce", explore="users", message="An error occurred", metadata=metadata, ) ], }
a344d97b2f80486a0c826099bc47fe4bdd60989a
46,064
def combine_channel_number(major: int, minor: int) -> str: """Create a combined channel number from its major and minor.""" if minor == 65535: return str(major) return "%d-%d" % (major, minor)
7eee6911546e62b25db05a3c099de75c382e7081
46,065
import torch def batch_log_matvecmul(A, b): """For each 'matrix' and 'vector' pair in the batch, do matrix-vector multiplication in the log domain, i.e., logsumexp instead of add, add instead of multiply. Arguments --------- A : torch.Tensor (batch, dim1, dim2) Tensor b : torch.Tensor (batch, dim1) Tensor. Outputs ------- x : torch.Tensor (batch, dim1) Example ------- >>> A = torch.tensor([[[ 0., 0.], ... [ -1e5, 0.]]]) >>> b = torch.tensor([[0., 0.,]]) >>> x = batch_log_matvecmul(A, b) >>> x tensor([[0.6931, 0.0000]]) >>> >>> # non-log domain equivalent without batching functionality >>> A_ = torch.tensor([[1., 1.], ... [0., 1.]]) >>> b_ = torch.tensor([1., 1.,]) >>> x_ = torch.matmul(A_, b_) >>> x_ tensor([2., 1.]) """ b = b.unsqueeze(1) x = torch.logsumexp(A + b, dim=2) return x
116d6f5713c2bc7dc6b39fb784ee4f6f6c5ba852
46,066
def _do_overlap(rect_1, rect_2): """ Determines whether the two rectangles have overlap. Args: rect_1: Tuple :code:`(lat_min, lon_min, lat_max, lon_max) describing a rectangular tile. rect_2: Tuple :code:`(lat_min, lon_min, lat_max, lon_max) describing a rectangular tile. Returns: True if the two rectangles overlap. """ lat_min_1, lon_min_1, lat_max_1, lon_max_1 = rect_1 lat_min_2, lon_min_2, lat_max_2, lon_max_2 = rect_2 lat_min = max(lat_min_1, lat_min_2) lon_min = max(lon_min_1, lon_min_2) lat_max = min(lat_max_1, lat_max_2) lon_max = min(lon_max_1, lon_max_2) return (lat_min < lat_max) and (lon_min < lon_max)
74379617ceca1bce4237acdbd9800ba1583e781e
46,068
def symbol(data): """ Get the feature symbol. """ return data["marker_symbol"]
2efca500e061818e62405e58107d6d6883950761
46,071
import hashlib def uuid_hash(uuid): """Return SHA1 hash as hex digest of an uuid. Requirement from VAC team : You should apply SHA1 hashing over the data as a text. More specifically you should have the text data as a UTF8 encoded string then convert the string to byte array and digest it into a SHA1 hash. The resulting SHA1 hash must be converted to text by displaying each byte of the hashcode as a HEX char (first byte displayed leftmost in the output). The hash must be lowercase. No checks are made to determine if the input uuid is valid or not. Dashes in the uuid are ignored while computing the hash. :param str uuid: uuid to be hashed :returns: SHA1 hash as hex digest of the provided uuid. """ uuid_no_dash = uuid.replace('-', '') m = hashlib.sha1() m.update(bytes(uuid_no_dash, 'utf-8')) return m.hexdigest()
eb26bfa75cf2d694218adc9782181532c50fb13f
46,072
import pytz from datetime import datetime def determine_current_datetime(**args) -> tuple: """ Return args['today_date'] as a timezone aware python datetime object """ tz = pytz.timezone('America/Vancouver') args['today_date'] = datetime.now(tz) return True, args
cd31a44133955da015f9fce2794d89dc4351ca6e
46,073
def get_node_flow(flow_net, node): """ Returns the sum of the flow into minus the sum of the flow out from the node. In a maximum flow network, this function returns 0 for all nodes except for the source (wich returns -max_flow) and drain (wich returns max_flow). """ flow = 0 n = len(flow_net) for i in range(n): flow += flow_net[i][node] flow -= flow_net[node][i] return flow
c58518dff13658b15c4b2adebe2f4c4efe2a914e
46,075
import random def normalRandomInt(max_val, spread, skew=0): """Returns a random integer from a normal distribution whose parameters may be tweaked by setting max_val, spread and skew. The value is clipped to the range [0, max_val]. Args: max_val {number} A positive number. All returned values will be less than this. spread {float} Should be a value between 0 and 1. The standard deviation of the normal distribution will be set to this value times max_val. skew {float} Should be value between -1 and 1. The mean of the normal distribution will be set to max_val * 0.5 * (1 + skew). Returns: {int} A random integer in the range [0, max_val]. """ mu = max_val * 0.5 * (1.0 + skew) sigma = max_val*spread x = int(random.normalvariate(mu, sigma)) # Ensure the value is in the range [0, max_val] return max(0, min(x, max_val))
4bbe790dc86a0308700d10a65179288f0d3c2674
46,082
def get_dot(dic, key, default=None): """ Similar to dict.get(), but key is given with dot (eg. foo.bar) and result is evaluated in generous way. That is, get_dot(dic, 'foo.bar.vaz') will return dic['foo']['bar'] if both dic['foo'] and dic['foo']['baz'] exists, but return default if any of them does not exists. """ keys = key.split('.') res = dic for k in keys: if not isinstance(res, dict): return default elif k in res: res = res[k] else: return default return res
cc764debf77b6982733226ec5547e26e2394cd89
46,083
import json def load_json(json_path = "folder_tree.json"): """ Loads in memory a structured dictionary saved with `create_json` :param json_path: Path of JSON file with the dictionary :type json_path: str :return: Loaded JSON file :rtype: dict """ with open(json_path, "r") as json_file: files_index = json.load(json_file) # Check number of users loaded return files_index
c8c6f683c1622e340a479904b625b14f37ab5525
46,084
def band_sample(sample): """ Fixture which returns a bands sample file. """ return sample('silicon_bands.hdf5')
fa9ebb5e180ef1e2c67adb7d2ead4a8c21d1f815
46,090
def fmtf(format): """ Returns a function that formats strings. >>> f = fmtf("%.2f") >>> f(0.5) '0.50' """ return lambda x: format % x
9e8d1960aca7bbe69b72cd61c6374f88e0ff5e7b
46,098
def distance_to_greater_value(arr: list, allow_equal: bool) -> list: """Creates array where each value indicates distance from arr[i] to a value in arr greater than arr[i]. In order to avoid counting multiple maximums in a given subarray we count the distance to a strictly greater value in one direction and a greater or equal value in the opposite. For each value arr[i] we search for a value that is greater (or equal), if found we store its distance from i, otherwise we store the length of the array. :param arr: Array from which to determine sum of subarray maximums. :param allow_equal: Bool. If True, the index of values equal or greater than arr[i] are stored. :return: """ next_greater_value = [0] * len(arr) stack = [] for idx, num in enumerate(arr): if len(stack) == 0: stack.append(idx) elif num < arr[stack[-1]] or (allow_equal and num == arr[stack[-1]]): stack.append(idx) else: while len(stack) > 0 and ((allow_equal and num > arr[stack[-1]]) or (not allow_equal and num >= arr[stack[-1]])): idx2 = stack.pop() next_greater_value[idx2] = idx stack.append(idx) while len(stack) > 0: idx2 = stack.pop() next_greater_value[idx2] = len(arr) return next_greater_value
bd08e7f34783c8ea221b03184e3533778be354ea
46,101
from typing import List from pathlib import Path def glob_file_from_dirs(dirs: List[str], pattern: str) -> List[str]: """Return a list of all items matching `pattern` in multiple `dirs`.""" return [next(Path(d).glob(pattern)).as_posix() for d in dirs]
c8815e2a39e2722d45fcab4b81d6c81b53566b18
46,102
def _get_default_route(version, subnet): """Get a default route for a network :param version: IP version as an int, either '4' or '6' :param subnet: Neutron subnet """ if subnet.get('gateway') and subnet['gateway'].get('address'): gateway = subnet['gateway']['address'] else: return [] if version == 4: return [{ 'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': gateway }] elif version == 6: return [{ 'network': '::', 'netmask': '::', 'gateway': gateway }]
f039b70bf24ffcac1f31a6ffb9de570606fb55dd
46,103
def contents(filename): """The contents of FILENAME, or the empty string if the file does not exist or is unreadable.""" try: with open(filename) as inp: return inp.read() except: return ''
043d154ed2ef01536b9c9bc0ad88a6de596a39d1
46,105
def getMenuLabel(menuPar): """ Return menuPar's currently selected menu item's label """ try: return menuPar.menuLabels[menuPar.menuIndex] except IndexError: raise except: raise TypeError("getMenuLabel: invalid menu par " + repr(menuPar))
162c2e6158696c9f306012fa1cba16e5d3af62ea
46,110
def valid_attribute(view, pos): """attribute in valid scope""" if view.match_selector(pos, "source.python comment"): return False # match f-string if view.match_selector(pos, "source.python meta.interpolation.python"): return True # match common string if view.match_selector(pos, "source.python meta.string"): return False return True
cbac18bb63e2a0304f91ef45d394778b4f9fab0d
46,122
import requests import time def get_with_retry(uri, max_retry=5): """Wrapper for requests.get() to retry Args: uri (str): URI to request max_retry (int): number of retries to make Returns: the requests response """ r = requests.get(uri) k = 0 while r.status_code != 200: if k == max_retry: print(f"{uri} failed.") break time.sleep(1) r = requests.get(uri) k += 1 return r
1351e138b02e409434c5058f033a53a742ba0204
46,128
def _check_solve_output(out, err): """ Verify from shell output that the Radia solve completed satisfactorily. Will print any output from the shell. :param out: (bytes) STDOUT piped from shell :param err: (bytes) STDERR piped from shell :return: (bool) True if STDERR is empty. False if STDERR is non-empty. """ if err: print("Error occurred during Radia solve procedure:") print(out.decode()) print(err.decode()) return False else: print("Solve finished") print(out.decode()) return True
56fc1d5ffd5d361a4f4090703393dbb19ce11c23
46,130
def devices_to_string(devices): """ Format device list to string :param devices: list of devices :type devices: int or str or list :return: string of device list :rtype: str """ if isinstance(devices, str): return devices if isinstance(devices, int): devices = [devices] return ', '.join(['cuda:{}'.format(d) for d in devices])
3b9879f7726fc3b42cda4e656689ff584dbc6719
46,132
def dict_encode(dict, encoding='cp1251'): """Encode dict values to encoding (default: cp1251).""" encoded_dict = {} for key in dict: encoded_dict[key] = dict[key].encode(encoding) return encoded_dict
adfc1f9217ff48d20d44df5af4220f58e73419f4
46,134
def convert_pyr_coeffs_to_pyr(pyr_coeffs): """this function takes a 'new pyramid' and returns the coefficients as a list this is to enable backwards compatibility Parameters ---------- pyr_coeffs : `dict` The `pyr_coeffs` attribute of a `pyramid`. Returns ------- coeffs : `list` list of `np.array`, which contains the pyramid coefficients in each band, in order from bottom of the pyramid to top (going through the orientations in order) highpass : `np.array` or None either the residual highpass from the pyramid or, if that doesn't exist, None lowpass : `np.array` or None either the residual lowpass from the pyramid or, if that doesn't exist, None """ highpass = pyr_coeffs.pop('residual_highpass', None) lowpass = pyr_coeffs.pop('residual_lowpass', None) coeffs = [i[1] for i in sorted(pyr_coeffs.items(), key=lambda x: x[0])] return coeffs, highpass, lowpass
6b7ab3c4a6a85d05b7628cd7d31c46a62e549afc
46,135
import torch def relative_time_to_absolute(batch, relative_lens, rate): """Converts SpeechBrain style relative length to the absolute duration. Operates on batch level. Arguments --------- batch : torch.tensor Sequences to determine the duration for. relative_lens : torch.tensor The relative length of each sequence in batch. The longest sequence in the batch needs to have relative length 1.0. rate : float The rate at which sequence elements occur in real-world time. Sample rate, if batch is raw wavs (recommended) or 1/frame_shift if batch is features. This has to have 1/s as the unit. Returns ------: torch.tensor Duration of each sequence in seconds. Example ------- >>> batch = torch.ones(2, 16000) >>> relative_lens = torch.tensor([3./4., 1.0]) >>> rate = 16000 >>> print(relative_time_to_absolute(batch, relative_lens, rate)) tensor([0.7500, 1.0000]) """ max_len = batch.shape[1] durations = torch.round(relative_lens * max_len) / rate return durations
88caf837d49dd46d14106e0df6011f22415cb73b
46,140
def multiply(x, y): """Multiplies two numbers. Equivalent to a * b but curried""" return x * y
21ddc96142998879669d30f6c03abba22dc0ba08
46,143
from typing import List def find_dup(numbers: List) -> int: """ Solution: Iterate through the list and store items in a set. When an item is found that already exists in the set return that item. Complexity: Time: O(n) - Iterate through our list once Space: O(n) - We could potentially store each item found """ numbers_seen = set() for num in numbers: if num in numbers_seen: return num numbers_seen.add(num) raise Exception("No duplicate found")
94620fc5ceb565b417bcf1f0de6f6d7af23968ed
46,144
def conjugate(x: complex) -> complex: """ Returns the conjugate of a complex number """ return x.real - x.imag * 1j
6cacee436e7ca74d586364a4f578b745c08768d2
46,146
import math def radians(degrees): """ Converts degrees to radians, which is used in math functions. """ return math.pi / 180.0 * degrees
9925b4f10b097ddad384c7ed34668a6127296bff
46,147
def to_lower_camel(s: str) -> str: """ Convert a snake-case string into lower camel case. """ parts = s.split("_") return parts[0] + "".join([p.title() for p in parts[1:]])
d937d70a9a1914079a377edd2d8d3e3d0a78bcb5
46,148
import logging def _setup_text_logger(name, stream, level): """Setup a text logger.""" res = logging.getLogger(name) handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter(fmt="%(message)s")) res.addHandler(handler) res.setLevel(level) return res
54373e2c7d5e9337a6dd7ed321eefd6fd2d55f96
46,150
import yaml def fetch_mapping(filepath: str) -> dict: """Returns a dictionary from a YML file""" with open(filepath, "r") as stream: map = yaml.safe_load(stream) return map
8a228d6474e0d474eb7a441f068adc711926b433
46,152
def upcase(val: str) -> str: """Make all characters in a string upper case.""" return val.upper()
f96247fa0b46daca4bc35420d8b218c2ee22f25a
46,154
from typing import Union from pathlib import Path import wave def empty_wav(wav_path: Union[Path, str]) -> bool: """Check if a wav contains data""" with wave.open(str(wav_path), 'rb') as wav_f: return wav_f.getnframes() == 0
7ae5fa1a01138314ace682c636001b087aa61818
46,156