content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def encode_pos(i, j): """Encodes a pair (i, j) as a scalar position on the board.""" return 3 * i + j
583d2e8370edc5801760f1c59c0c3aadad61876f
45,512
def pct_to_value(data, d_pct): """ Function takes dictionary with base value, and dictionary with percentage and converts percentage to value. """ if not data or not d_pct: return data out_map = {} for _k in data: if _k not in d_pct: continue out_map[_k] = (float(data[_k])/100.0) * float(d_pct[_k]) return out_map
3c5b1f75f484b1f767bc3acf6cce5bac4b136608
45,523
def is_image_file(s): """Checks to see if the string starts with 'img:'""" return s.startswith('img:')
68081021527dbff9d2da7235c9bf454d54a7fd68
45,531
def pureDependency(dependency:str) -> str: """ Get the name of package Parameters ---------- dependency : str package Returns ------- str a name of package without the version >>> pureDependency('package==1.2.3') 'package' """ dependency = dependency.split("==")[0] dependency = dependency.split(">")[0] dependency = dependency.split("<")[0] dependency = dependency.split("~=")[0] dependency = dependency.split("=")[0] return dependency
1891c0291b84e10ff35369c0cc740eaa6ab9d4ff
45,534
import torch def get_random_tensor(size, dtype, use_cuda): """Returns a random tensor of given type and size Args: size (int): Tensor size (number of elements) dtype (:obj:`torch.dtype`): One of `torch.float16` and `torch.flaot32` use_cuda (bool): Return CUDA tensor Returns: """ tensor = torch.rand(size).to(dtype=dtype) if use_cuda: tensor = tensor.cuda() return tensor
da17a8e33b9ccf9a5bf9bcc86104a1577a8f38a9
45,540
import torch def sub2ind(shape, rows, cols): """ A PyTorch implementation of MATLAB's "sub2ind" function Parameters ---------- shape : torch.Size | list | tuple shape of the 2D matrix rows : torch.Tensor (n,) row subscripts cols : torch.Tensor (n,) column subscripts Returns ------- index : torch.Tensor (n,) linear indices """ # checks assert isinstance(shape, tuple) or isinstance(shape, list) assert isinstance(rows, torch.Tensor) and len(rows.shape) == 1 assert isinstance(cols, torch.Tensor) and len(cols.shape) == 1 assert len(rows) == len(cols) assert torch.all(rows < shape[0]) and torch.all(cols < shape[1]) if not len(shape) == 2: raise NotImplementedError('only implemented for 2D case.') # compute inds ind_mat = torch.arange(shape[0]*shape[1]).view(shape) index = ind_mat[rows.long(), cols.long()] return index
8b89b58824b1c80327082afb74b4486816345d62
45,542
def check_overlap(bbox1, bbox2): """ Checks if 2 boxes are overlapping. Also works for 2D tuples. Args: bbox1: [x1, y1, x2, y2] or [z1, z2] bbox2: [x1, y1, x2, y2] or [z1, z2] Returns: bool """ if bbox1[0] > bbox2[2] or bbox2[0] > bbox1[2]: return False if len(bbox1) > 2: if bbox1[1] > bbox2[3] or bbox2[1] > bbox1[3]: return False return True
2f39989661d421327b4a82da6e9b2fa4ae550575
45,547
def _parse_force_block(lines): """ Parse the block of total forces from the OUTCAR file :param lines: A list of lines containing lines including the TOTAL-FORCE block :returns: A tuple of position and forces """ forces = [] positions = [] istart = len(lines) for idx, line in enumerate(lines): if 'TOTAL-FORCE (eV/Angst)' in line: istart = idx elif idx > istart + 1: if not line.startswith(' -----'): # Still in the block values = list(map(float, line.split())) positions.append(values[:3]) forces.append(values[3:]) else: # Reached the end of the block break return positions, forces
37d9e488097749d4617364e23b296acee1d9bca5
45,548
import ast def filter_block(node_list): """ Remove no-op code (``pass``), or any code after an unconditional jump (``return``, ``break``, ``continue``, ``raise``). """ if len(node_list) == 1: return node_list new_list = [] for node in node_list: if type(node) == ast.Pass: continue new_list.append(node) if type(node) in (ast.Return, ast.Break, ast.Continue, ast.Raise): break if len(new_list) == len(node_list): return node_list else: return new_list
b88d3e4966e162d3e23e56e622ff47c63165b7e6
45,551
def getownattr(cls, attrib_name): """ Return the value of `cls.<attrib_name>` if it is defined in the class (and not inherited). If the attribute is not present or is inherited, an `AttributeError` is raised. >>> class A(object): ... a = 1 >>> >>> class B(A): ... pass >>> >>> getownattr(A, 'a') 1 >>> getownattr(A, 'unknown') Traceback (most recent call last): ... AttributeError: type object 'A' has no attribute 'unknown' >>> getownattr(B, 'a') Traceback (most recent call last): ... AttributeError: type object 'B' has no directly defined attribute 'a' """ attr = getattr(cls, attrib_name) for base_cls in cls.__mro__[1:]: a = getattr(base_cls, attrib_name, None) if attr is a: raise AttributeError("type object %r has no directly defined attribute %r" % (cls.__name__, attrib_name)) return attr
b59acbba4f75492fe52562443b7ca679691e7e10
45,552
def _raw_to_int(raw_data): """Converting list of raw hex values as strings to integers.""" return [int(x, 16) for x in raw_data]
e8ae4784e142bcfa3ba8d7b013871986a1b5173a
45,553
from pathlib import Path import json def load_json(filepath: Path) -> dict: """load json file and return dict. Args: filepath (Path): filepath to json file. Returns: dict: dict loaded from json file. """ with open(filepath, "r") as f: obj = json.load(f) return obj
5cc66b27a6335e29a540b98b3f29ed79cbbb7777
45,555
def bdev_rbd_register_cluster(client, name, user=None, config_param=None, config_file=None, key_file=None): """Create a Rados Cluster object of the Ceph RBD backend. Args: name: name of Rados Cluster user: Ceph user name (optional) config_param: map of config keys to values (optional) config_file: file path of Ceph configuration file (optional) key_file: file path of Ceph key file (optional) Returns: Name of registered Rados Cluster object. """ params = {'name': name} if user is not None: params['user_id'] = user if config_param is not None: params['config_param'] = config_param if config_file is not None: params['config_file'] = config_file if key_file is not None: params['key_file'] = key_file return client.call('bdev_rbd_register_cluster', params)
82c43cb070298bd983c9bf74cccfff6ddfeddd31
45,557
import torch def str_dtype_to_torch_dtype(dtype: str) -> torch.dtype: """Converts a string representation of a dtype to the corresponding PyTorch dtype.""" if dtype == "int32": return torch.int32 elif dtype == "int64": return torch.int64 elif dtype == "float32": return torch.float32 elif dtype == "float64": return torch.float64 else: raise ValueError(f"Unsupported dtype: {dtype}")
ddf64bb7fba63ff0395e08a199fa431cd8750972
45,558
def SAMflags(x): """ Explains a SAM flag. :param x: flag :returns: complete SAM flag explanaition """ flags=[] if x & 1: l="1: Read paired" else: l="0: Read unpaired" flags.append(l) if x & 2 : l="1: Read mapped in proper pair" else: l="0: Read not mapped in proper pair" flags.append(l) if x & 4 : l="1: Read unmapped" else: l="0: Read mapped" flags.append(l) if x & 8 : l="1: Mate unmapped" else: l="0: Mate mapped" flags.append(l) if x & 16 : l="1: Read reverse strand" else: l="0: Read direct strand" flags.append(l) if x & 32 : l="1: Mate reverse strand" else: l="0: Mate direct strand" flags.append(l) if x & 64 : l="1: First in pair" else: l="0: Second in pair" flags.append(l) if x & 128 : l="1: Second in pair" else: l="0: First in pair" flags.append(l) if x & 256 : l="1: Not primary alignment" else: l="0: Primary alignment" flags.append(l) if x & 512 : l="1: Read fails platform/vendor quality checks" else: l="0: Read passes platform/vendor quality checks" flags.append(l) if x & 1024 : l="1: Read is PCR or optical duplicate" else: l="0: Read is not PCR or optical duplicate" flags.append(l) if x & 2048 : l="1: Supplementary alignment" else: l="0: Not supplementary alignment" flags.append(l) return flags
e3d2c1942eac66acd4735cd4590a1905351cbc24
45,559
def bytes_xor(byte_seq1, byte_seq2): """ (bytes, bytes) -> (bytes) Do bit level XOR or two byte arrays. :param byte_seq1: byte sequence (bytes). :param byte_seq2: byte sequence (bytes). :return: XOR of the byte bytes sequences (bytes). """ assert len(byte_seq1) == len(byte_seq2), "Bytes must be of the same length." parts = [] for byte_seq1, byte_seq2 in zip(byte_seq1, byte_seq2): parts.append(bytes([byte_seq1 ^ byte_seq2])) return b''.join(parts)
539ca3707c6c07fbd64691a4b317d0d6eb8acef4
45,561
def dBm2W(W): """Converts an arbitrary power `W` in dBm to W.""" return 10 ** ((W - 3) / 10)
278f43aac26f5e38ef9ab4e73acb6496dedcb0f7
45,566
import json def json_of_response(res): """Decode json from response""" return json.loads(res.data.decode('utf8'))
8c53a8a283994cf8b16e9d759d6fcdaa35731b04
45,570
def get_frame(epoch, step_size, frame_number): """ Crop an epoch based on a frame number Args: epoch: mne.epochs.Epochs Epoch to crop steps_size: int Number of time frames per step frame_number: int Current frame number Returns: mne.epochs.Epochs: Cropped epochs for the given frame """ times = epoch.times max_index = len(times)-1 tmax = (frame_number+1)*step_size # make sure we don't go outside the possible range if(tmax > max_index): tmax = max_index return epoch.copy().crop( times[frame_number*step_size], times[tmax], include_tmax=True )
23c3b730eaf4ac369ff91e2a16f92fc18f4209a5
45,571
def full_request_url(base, text, wildcards={}): """ Build a full request URL from the API URL and endpoint. Any URL parameters will be replaced with the value set in the environment variables. """ for key in wildcards.keys(): text = text.replace(key, str(wildcards[key])) return str(base) + str(text)
217d921666a0cfa9ddd3fad09469085425398182
45,573
def collides_with_existing_words(word, line, column, direction, grid): """ Returns whether the given word collides with an existing one. """ for k, letter in enumerate(list(word)): if direction == "E": # Collisions if grid[line][column+k] != 0 and grid[line][column+k] != letter: return True if direction == "S": # Collisions if grid[line+k][column] != 0 and grid[line+k][column] != letter: return True return False
0e8863f725e29b81d9123f29be343cc55f339840
45,574
import hmac import hashlib def HMAC_MD5(key, data): """ @summary: classic HMAC algorithm with MD5 sum @param key: {str} key @param data: {str} data """ return hmac.new(key, data, hashlib.md5).digest()
601595073554175e21caac49dc160357ac976e8e
45,579
def _verify_classifiers(classifiers, valid_classifiers): """Check classifiers against a set of known classifiers""" invalid = classifiers - valid_classifiers return ["Unrecognised classifier: {!r}".format(c) for c in sorted(invalid)]
64259f25b769361ddafcb83e63845de0d052c88c
45,580
def extract_csv_row(filename: str, row: int) -> str: """Extracts a selected line from the csv file. Args: filename: A path to the file. row: The row number to extract. Returns: The row from the csv file as a string. """ with open(filename, 'r') as file: extracted = file.readlines()[row - 1:row][0].strip('\n') return extracted
19f72a462e676675c192f3611d3bb46a8aecc887
45,582
import re def GetMakeFileVars(makefile_path): """Extracts variable definitions from the given make file. Args: makefile_path: Path to the make file. Returns: A dictionary mapping variable names to their assigned value. """ result = {} pattern = re.compile(r'^\s*([^:#=\s]+)\s*:=\s*(.*?[^\\])$', re.MULTILINE + re.DOTALL) stream = open(makefile_path, 'r') content = stream.read() for match in pattern.finditer(content): result[match.group(1)] = match.group(2) stream.close() return result
7869801b18a4e4aa1cdc3e5d30219ed449e897ac
45,587
import random def ranf(x1, x2): """ return random float between x1 and x2 :param float x1: :param float x2: :return float: """ return random.uniform(x1, x2)
6b17e1e5ff3293a8ad4b5027caf9db954bbfb6da
45,589
import re def _search(value): """ Maps to `re.search` (match anywhere in `v`). """ return lambda v: re.search(value, v)
9d03811520c32d1aa7ab8fde8b913d8ad59456af
45,590
def read_predictions(pred_file): """ Read a predictions file with format: SEQUENCE1 pred1 err1 SEQUENCE2 pred2 err2 ... ... return a dictionary mapping sequence to prediction """ out_dict = {} with open(pred_file) as lines: for l in lines: if l.strip() == "" or l[0] == "#": continue col = l.split() key = col[0] pred = float(col[1]) err = float(col[2]) out_dict[key] = (pred,err) return out_dict
5081027deff7c05d4abe23d757a5156d92f355ff
45,594
def backends_mapping(custom_backend, httpbin_original, httpbin_new): """ Creates custom backends with paths "/orig", "/new, using deployed httpbin_original and httpbin_new as the upstream APIs """ return { "/orig": custom_backend("backend-orig", httpbin_original), "/new": custom_backend("backend-new", httpbin_new)}
16ccc00a449081eb0418ac5a1d193f04caa1407a
45,596
import inspect def bold_title(text: str) -> str: """ Create bold title in exam header :return: latex code """ return inspect.cleandoc(rf""" \begin{{center}} \textbf {{ {{\Large {text} }} }} \end{{center}} """)
73f29514761c55e97a0edd5d54dc01f2c76108c4
45,604
def calculate_damage(attacker, defender): """Calculate and return effective power of attacker against defender. args: attacker: [side, units, hit_points, weaknesses, immunities, damage, damage_type, initiative] defender: [side, units, hit_points, weaknesses, immunities, damage, damage_type, initiative] Return: group_damage: effective power modified by weaknesses/immunities """ group_damage = attacker[1] * attacker[5] damage_type = attacker[6] if damage_type in defender[3]: group_damage *= 2 elif damage_type in defender[4]: group_damage = 0 return group_damage
093e3a95ce4db864f4c4e9526e2b16378d407b7e
45,605
import re def Tagcleaner(text): """ Remove "<>" tags from text and return cleaned text """ # import re cleanr = re.compile('<.*?>') text_clean = re.sub(cleanr, '', text) return text_clean
fa563b8ca252fd3bf65351ac67ed6549f996a833
45,607
def get_category_info_from_anno(anno_file, with_background=True): """ Get class id to category id map and category id to category name map from annotation file. Args: anno_file (str): annotation file path with_background (bool, default True): whether load background as class 0. """ cats = [] with open(anno_file) as f: for line in f.readlines(): cats.append(line.strip()) if cats[0] != 'background' and with_background: cats.insert(0, 'background') if cats[0] == 'background' and not with_background: cats = cats[1:] clsid2catid = {i: i for i in range(len(cats))} catid2name = {i: name for i, name in enumerate(cats)} return clsid2catid, catid2name
1fa8a5c8c3af52b8a2a65e8494e20a27d2c79101
45,611
def get_magmom_string(structure): """ Based on a POSCAR, returns the string required for the MAGMOM setting in the INCAR. Initializes transition metals with 6.0 bohr magneton and all others with 0.5. Args: structure (Structure): Pymatgen Structure object Returns: string with INCAR setting for MAGMOM according to mat2d database calculations """ magmoms, considered = [], [] for s in structure.sites: if s.specie not in considered: amount = int(structure.composition[s.specie]) if s.specie.is_transition_metal: magmoms.append('{}*6.0'.format(amount)) else: magmoms.append('{}*0.5'.format(amount)) considered.append(s.specie) return ' '.join(magmoms)
e9ae02d20c34cfade463374b83fedfee86838155
45,613
def longest_row_number(array): """Find the length of the longest row in the array :param list in_array: a list of arrays """ if len(array) > 0: # map runs len() against each member of the array return max(map(len, array)) else: return 0
515a357bdb5d180fa48fb3accd0f47fe6f22d8d3
45,621
def _check_func_names(selected, feature_funcs_names): """ Checks if the names of selected feature functions match the available feature functions. Parameters ---------- selected : list of str Names of the selected feature functions. feature_funcs_names : dict-keys or list Names of available feature functions. Returns ------- valid_func_names : list of str """ valid_func_names = list() for f in selected: if f in feature_funcs_names: valid_func_names.append(f) else: raise ValueError('The given alias (%s) is not valid. The valid ' 'aliases for feature functions are: %s.' % (f, feature_funcs_names)) if not valid_func_names: raise ValueError('No valid feature function names given.') else: return valid_func_names
b8f036dad2b777142de3c98cd3928d6d22fa52cb
45,625
def list_to_str(a_list, delimiter = "\t", newline = True): """ ================================================================================================= list_to_str(a_list, delimiter, newline) Given a list, the delimiter (default '\t'), and whether to add a trailing newline character, take a list and convert the list into a string where each element is separated by the chosen delimiter. ================================================================================================= Arguments: a_list -> A list delimiter -> A string to separate the elements of the list in a string newline -> A boolean that determines whether to add a newline character to the output string ================================================================================================= Returns: A string containing the elements of a_list, separated by delimtmer, with or without a newline character ================================================================================================= Example: a = [1,2,3] print(list_to_str(a)) -> '1\t2\t3\n' ================================================================================================= """ # Make sure the user inputs the proper typed objects. # newline argument needs to be a boolean assert newline in [True, False], "newline argument must be a boolean" # a_list argument needs to be an iterable, specifically a list/tuple assert type(a_list) in [list, tuple], "The argument 'a_list' should be a list or a tuple." # These are the delimiter characters that I am currently able to work with #assert delimiter in [':', '|', ';', '-', '\\', # '/', ',', '\t', '\n', "", # " vs "], f"The delimiter provided is not an accepted delimiter." # Initialize the new string with the first element of the list. # This avoids having to slice the strings later newstr = f"{a_list[0]}" # If the list only has one element and the user elects to use # a trailing newline character if len(a_list) == 1 and newline: # Then simply return the newstr variable with an added # newline character return f"{newstr}\n" # If the list has only one element and the user does not elect to # use a trailing newline character elif len(a_list) == 1 and not newline: # Then simply return the newstr variable return f"{newstr}" # If the list has more then one element, then loop over all elements # (excluding the first one since that is already in the string) for i in range(1,len(a_list)): # and add those new elements to the newstring with the given # delimiter separating the elements. newstr = f"{newstr}{delimiter}{a_list[i]}" # If the user elects to use a trailing newline character if newline: # Then add the trailing newline character and return the string return f"{newstr}\n" # If the user does not elect to use a trailing newline character else: # Then simply return the newstr variable return newstr
827275cc7a0207578158be7d5d5aefd18028992d
45,626
def getScalarsType(colouringOptions): """ Return scalars type based on colouring options """ # scalar type if colouringOptions.colourBy == "Species" or colouringOptions.colourBy == "Solid colour": scalarType = 0 elif colouringOptions.colourBy == "Height": scalarType = 1 elif colouringOptions.colourBy == "Charge": scalarType = 4 else: scalarType = 5 return scalarType
35c2dfaab09f5a4ddee0321605d05575ccca95f0
45,628
def remove_keys(dct, keys=[]): """Remove keys from a dict.""" return {key: val for key, val in dct.items() if key not in keys}
7ccab472f303350c5a2a6b07d62dffc74affdf0b
45,632
def add_percentile(df, fig): """Adds percebtile lines to an existing graphic based on the dataframe Parameters ---------- df (Dataframe): Dataframe with the data to generate the percentiles fig (Figure): original figure Returns ------- fig: figure with the trendline included """ percentile = df.quantile([0.5, 0.85, 0.95]) for key in percentile.keys(): position = percentile[key] label = f"{int(key*100)}%" fig = fig.add_shape( type="line", yref="paper", x0=position, y0=0, x1=position, y1=0.95, line_dash="dash", ) fig = fig.add_annotation( x=position, yref="paper", y=1, showarrow=False, text=label ) return fig
dddf08632b22697528cc20befae275a5dab7ba2f
45,634
import itertools def proportion_with_neighbours(block_list): """ Calculate the percentage of positive windows that have positive neighbours :param list block_list: list of positive windows. :returns: Percentage positive windows with positive neighbours. """ no_with_neighbours = 0 for key, group in itertools.groupby(block_list): group = list(group) if key and len(group) > 1: no_with_neighbours += len(group) try: return float(no_with_neighbours) / sum(block_list) except ZeroDivisionError: return 0.
de314f13a234a90dcb4698666943a2a6619e9318
45,640
from re import match from re import split def _list_of_countries(value): """ Parses a comma or semicolon delimited list of ISO 3166-1 alpha-2 codes, discarding those which don't match our expected format. We also allow a special pseudo-country code "iso". Returns a list of lower-case, stripped country codes (plus "iso"). """ countries = [] candidates = split('[,;]', value) for candidate in candidates: # should have an ISO 3166-1 alpha-2 code, so should be 2 ASCII # latin characters. candidate = candidate.strip().lower() if candidate == 'iso' or match('[a-z][a-z]', candidate): countries.append(candidate) return countries
e5bb98b1f4b60aa2ccf6105eaec03b6e686a9c47
45,643
def getDate(timestamp): """ Extracts the date from a timestamp. :param timestamp: The timestamp from which the date should be obtained. \t :type timestamp: string \n :returns: The date info of the timestamp. \t :rtype: string \n """ return timestamp.split('T')[0]
8b612541dc17d76e0b17d9c774597a740f969b98
45,645
def get_xy(df, y, mask, vars): """Returns specified X and y with reset indices.""" X = df.loc[mask, vars].reset_index(drop=True) y = df.loc[mask, y].reset_index(drop=True) return X, y
7ca6021453b628f017570e279968397110109f97
45,649
def num(r): """ Convert from Z3 to python float values. """ return float(r.numerator_as_long()) / float(r.denominator_as_long())
dbb41e4d490bb6495fa1e3440472b72614a47075
45,658
def expand_skipgrams_word_list(wlist, qsize, output, sep='~'): """Expands a list of words into a list of skipgrams. It uses `sep` to join words :param wlist: List of words computed by :py:func:`microtc.textmodel.get_word_list`. :type wlist: list :param qsize: (qsize, skip) qsize is the q-gram size and skip is the number of words ahead. :type qsize: tuple :param output: output :type output: list :param sep: String used to join the words :type sep: str :returns: output :rtype: list Example: >>> from microtc.textmodel import expand_skipgrams_word_list >>> wlist = ["Good", "morning", "Mexico"] >>> expand_skipgrams_word_list(wlist, (2, 1), list()) ['Good~Mexico'] """ n = len(wlist) qsize, skip = qsize for start in range(n - (qsize + (qsize - 1) * skip) + 1): if qsize == 2: t = wlist[start] + sep + wlist[start+1+skip] else: t = sep.join([wlist[start + i * (1+skip)] for i in range(qsize)]) output.append(t) return output
8a4402b47d0a644f0a3f99df92e6071c1597ea6d
45,661
def create_system_dict(df): """ Reads a pandas dataFrame and creates a dictionary where the keys are the site ids and the values are a the list of systems for each site. :param df: pandas dataFrame with site and system information a `site` and a `system` column. :return: dictionary with systems associated to each site. """ site_list = df['site'].unique().tolist() ss_dict = {} for site in site_list: systems_in_site = df[df['site'] == site]['system'].values.tolist() ss_dict[site] = systems_in_site return site_list, ss_dict
843336cd4dfdb4dbc7580ae2c815724f0270bf10
45,663
def get_port_mtu(duthost, interface): """ Get MTU of port from interface name Args: duthost: DUT host object interface: Full interface name Returns: MTU """ out = '' if '.' in interface: out = duthost.show_and_parse("show subinterface status {}".format(interface)) return out[0]['mtu'] out = duthost.show_and_parse("show interface status {}".format(interface)) return out[0]['mtu']
f0b3cb81a16cb37b2e407d17876cd3e88b19b427
45,664
def binding_energy(proton_seq, affinities): """Calculate binding energy from proton affinities Parameters ---------- proton_seq : ndarray protonation state of residues affinities : ndarray proton affinities for residues Returns ------- binding energy : float Binding energy in Joules """ return affinities[proton_seq.nonzero()[0]].sum()
23201ae7bca072f3b1d971e7e15ad4fd485eda79
45,666
def clean(s): """ Remove white spaces from <s>. """ return s.strip(' \t\n\r\f\v')
65e16099090d4af8ab289ddd8e12321305fc70d0
45,669
from typing import Callable def extract_name(callable: Callable) -> str: """ Given a callable that could be either a class or a function, retrieve its name at runtime for subsequent use Args: callable (Callable): Callable whose name is to be extracted Return: Name of callable (str) """ try: return callable.__name__ except: return type(callable).__class__.__name__
7b4a04856abd2601b26cf0623ac961bc86c9790c
45,670
def get_contributors(raw): """ Extract contributors. Only contributors tagged as belonging to any of the valid roles will be extracted. @param raw: json object of a Libris edition @type raw: dictionary """ valid_roles = ["author", "editor", "translator", "illustrator"] agents = {"author": [], "editor": [], "translator": [], "illustrator": []} contribution = raw["mainEntity"]["instanceOf"].get("contribution") for agent in contribution: raw_role = agent.get("role") if not raw_role: ag_role = "author" else: ag_role = agent["role"][0]["@id"].split("/")[-1] if ag_role in valid_roles: ag_id = agent["agent"].get("@id") or "" ag_first = agent["agent"].get("givenName") or "" ag_last = agent["agent"].get("familyName") or "" ag_full = "{} {}".format(ag_first, ag_last) agents[ag_role].append({"name": ag_full, "id": ag_id}) return agents
18e8b3737643e825b7e94a75603883086f5a37d1
45,672
def add_main_cat(df): """ Takes a df and adds a column with the main category input: df (pd.DataFrame): dataframe to add column to, must contain "cat_id" returns: df (pd.DataFrame): dataframe with extracted main cat """ df["main_cat"] = df["cat_id"].astype(str).str.slice(0, 2) return df
213e3c068adfba336c9610c208f46057ecf8b7ee
45,680
from typing import Sequence def is_palindrome(sequence: Sequence) -> bool: """ Returns whether a sequence is a palindrome """ return sequence[0:len(sequence) // 2] == sequence[-1:(len(sequence) - 1) // 2:-1]
cf60fa4198d29514f255ed5597187d9ece928626
45,681
def pytest_report_header(config): """Print a message in case the plugin is activated.""" if config.option.dependencies: return 'Only tests affected by the changed files are being run.'
f1169bcc8747e4f64d4d607e7eef6199928f186b
45,684
def lower_first_char(string): """De-capitalize the first letter of a string. Args: string (str): The input string. Returns: str: The de-capitalized string. .. note:: This function is not the exact opposite of the capitalize function of the standard library. For example, capitalize('abC') returns Abc rather than AbC. """ if not string: # Added to handle case where s == None return else: return string[0].lower() + string[1:]
053b23379ed504268c94194ff54e62b7bb112bfe
45,685
def transform_coord(coord, matrix): """ Transforms the given coordinate by the given matrix :param coord: The coordinate to transform (tuple (x, y)) :param matrix: The matrix to transform by (3x3 numpy array) :return: The transformed coordinate (tuple (x, y), components casted to ints) """ h0 = matrix[0, 0] h1 = matrix[0, 1] h2 = matrix[0, 2] h3 = matrix[1, 0] h4 = matrix[1, 1] h5 = matrix[1, 2] h6 = matrix[2, 0] h7 = matrix[2, 1] h8 = matrix[2, 2] tx = (h0 * coord[0] + h1 * coord[1] + h2) ty = (h3 * coord[0] + h4 * coord[1] + h5) tz = (h6 * coord[0] + h7 * coord[1] + h8) return int(tx/tz), int(ty/tz)
39b75a25fd47c47a8ad9ed27c5c52a901fdd6874
45,690
def naiveVarDrop(X, searchCols, tol=0.0001, standardize=False, asList=False, print_=False): """ Drop columns based on which columns have variance below the threshold. Parameters ---------- X : pandas dataframe Feature (or design) matrix. searchCols : list or list-like (str) The columns to search. If None, use all columns. Default is None. tol : float, optional The threshold for variance to decide which columns to keep or drop. The default is 0.0001. asList : bool, optional Return only the list of columns to be dropped. The default is False. print_ : bool, optional Print the columns to be dropped. The default is False. Returns ------- list or dataframe Either list of columns to be dropped or dataframe with columns removed. """ cols_to_drop = [] if searchCols is None: searchCols = list(X.columns) for i in searchCols: var = X.loc[:, i].var(ddof=1) if var < tol: cols_to_drop.append(i) if print_: print("Dropped " + str(len(cols_to_drop)) + " low-var Columns") if asList: return cols_to_drop else: return X.drop(cols_to_drop, axis=1)
9bee1417a6cfff3b398c612463c6f56a163db1ba
45,692
def selectCommands(commands, indexList, lineNumbers=False): """Prints commands indexed by the list passed.""" commandList = [] for index in indexList: record = commands[index]['cmd'] if lineNumbers: record = (index, record) commandList.append(record) return commandList
650632d0e168df8cbf5bc64aa79fd4b8fb746b44
45,695
def should_go_right(event: dict): """Returns true if the current text event implies a "right" button press to proceed.""" if event["text"].startswith("Review"): return True elif event["text"].startswith("Amount"): return True elif event["text"].startswith("Address"): return True elif event["text"].startswith("Confirm"): return True elif event["text"].startswith("Fees"): return True return False
c5c47f5e4f02b6e875e869201d6a266a819d3ba6
45,697
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): """Return True if the values a and b are close to each other and False otherwise. (Clone from Python 3.5) Args: a: A float. b: A float. rel_tol: The relative tolerance – it is the maximum allowed difference between a and b, relative to the larger absolute value of a or b. For example, to set a tolerance of 5%, pass rel_tol=0.05. The default tolerance is 1e-09, which assures that the two values are the same within about 9 decimal digits. rel_tol must be greater than zero. abs_tol: The minimum absolute tolerance – useful for comparisons near zero. abs_tol must be at least zero. Returns: True if the values a and b are close to each other and False otherwise. """ return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
270be278b3865f5faebdf1bb436daa7bec90fb9c
45,699
def get_user_identity(event): """Gets event identity from event.""" return event['detail'].get('userIdentity', {})
d90b47ad530ce05fd2e667c2e5499519ea00122a
45,700
import yaml def read_yaml(infile, log=None) -> dict: """ Read YAML file and return a python dictionary Args: infile: path to the json file; can be hdfs path Returns: python dictionary """ if infile.startswith("hdfs"): raise NotImplementedError else: return yaml.safe_load(open(infile, "r"))
9c0c2ce90841119d158a72966061db3a7fa9a36a
45,701
def distance_point(p1, p2): """ Returns the Euclidian distance between two points. Retourne la distance euclidienne entre deux points. @param p1 point 1 @param p2 point 2 @return distance """ d = 0 for a, b in zip(p1, p2): d += (a - b) ** 2 return d ** 0.5
56cbebd2856e870e807fe6a4bd92b914f806d64e
45,704
def get_cloudines(input_json): """Return the cloudiness using a phrase description.""" clouds_val = int(input_json['clouds']['all']) cloudines = '' if 11 <= clouds_val < 25: cloudines = 'Few clouds' elif 25 <= clouds_val < 50: cloudines = 'Scattered clouds' elif 50 <= clouds_val < 85: cloudines = 'Broken clouds' elif 85 <= clouds_val <= 100: cloudines = 'Overcast clouds' return cloudines
2dc9a828e6b767504e2632d102e65c352695d7a7
45,707
def get_extension(t_path): """ Get extension of the file :param t_path: path or name of the file :return: string with extension of the file or empty string if we failed to get it """ path_parts = str.split(t_path, '.') extension = path_parts[-1:][0] extension = extension.lower() return extension
70fa800714c1fbd71f3071ed4832a1cd96f1949f
45,708
from typing import Union def strint(value: Union[int, str, float]) -> str: """ If the passed value is a number type, return the number as a string with no decimal point or places. Else just return the string. """ if type(value) in (int, float): return str(int(value)) else: return str(value)
271424978e0816d9e998221b18c86cc88c9adfb0
45,719
def batch_data(data, batch_size): """Given a list, batch that list into chunks of size batch_size Args: data (List): list to be batched batch_size (int): size of each batch Returns: batches (List[List]): a list of lists, each inner list of size batch_size except possibly the last one. """ batches = [data[i : i + batch_size] for i in range(0, len(data), batch_size)] return batches
1404f67ff4a2e2515c555905e9b5ec5fdf560dd6
45,720
def parse_account(config, auth, account): """ Breaks a [account:advertiser@profile] string into parts if supplied. This function was created to accomodate supplying advertiser and profile information as a single token. It needs to be refactored as this approach is messy. Possible variants include: * [account:advertiser@profile] * [account:advertiser] * [account@profile] Args: * auth: (string) Either user or service. * account: (string) A string represeting [account:advertiser@profile] Returns: * ( network_id, advertiser_ids, profile_id) after parsing the account token. """ network_id = account advertiser_ids = None profile_id = None # if exists, get profile from end try: network_id, profile_id = network_id.split('@', 1) except: profile_id = None # if exists, get avertiser from end try: network_id, advertiser_ids = network_id.split(':', 1) except: pass # if network or advertiser, convert to integer if network_id is not None: network_id = int(network_id) if advertiser_ids is not None: advertiser_ids = [ int(advertiser_id.strip()) for advertiser_id in advertiser_ids.split(',') ] return network_id, advertiser_ids
a1c00ebc9f03358d7864765bac5ad545497444ad
45,728
def get_intel_doc_label_item(intel_doc_label: dict) -> dict: """ Gets the relevant fields from a given intel doc label. :type intel_doc_label: ``dict`` :param intel_doc_label: The intel doc label obtained from api call :return: a dictionary containing only the relevant fields. :rtype: ``dict`` """ return { 'ID': intel_doc_label.get('id'), 'Name': intel_doc_label.get('name'), 'Description': intel_doc_label.get('description'), 'IndicatorCount': intel_doc_label.get('indicatorCount'), 'SignalCount': intel_doc_label.get('signalCount'), 'CreatedAt': intel_doc_label.get('createdAt'), 'UpdatedAt': intel_doc_label.get('updatedAt'), }
578467798f08bfd0776aa285790dc95d4686a830
45,731
def Extensible(cls): """Returns a subclass of cls that has no __slots__ member. This allows you to set arbitrary members in each instance, even if they don't exist already in the class. This is useful for making one-off Exporter() instances in tests, for example. Args: cls: a class to inherit from. Returns: A new class derived from cls. Example: o = Extensible(object) o.Foo = 5 """ class Ext(cls): pass Ext.__name__ = 'Ext_' + cls.__name__ return Ext
ce579072a24652fa3195cbb809978bc21fef5c29
45,737
async def mock_successful_connection(*args, **kwargs): """Return a successful connection.""" return True
fc3801c8be6a98033c265a97de3bb413d0c249cc
45,739
import socket import ipaddress def _verify_hostname(host): """Verify a hostname is resolvable.""" try: resolved = socket.getaddrinfo(host, None)[0][4][0] ip = ipaddress.ip_address(resolved) return ip except (socket.gaierror, ValueError): return False
d0b74fdcf7fabec0083cd5de295199077ad6e676
45,749
def is_gcs_path(path): # type: (str) -> bool """Returns True if given path is GCS path, False otherwise.""" return path.strip().lower().startswith("gs://")
c6b8035d685264a206555abf219c67bb5e04d340
45,750
def read(file): """Read a $file to memory""" with open(file, 'rb') as f: buffer = f.read() return buffer
09dcf131cb8a3899a02bb279bc49cd83e06f6df4
45,751
def load_w2v_vocab(fname): """Load vocabulary file generated by gensim as a dictionary. Note that this does not correspond to the gensim vocabulary object. Parameters ---------- fname: string Filename where the w2v model is stored. Returns ------- vocab: dict{str: int} A dictionary containing the words as keys and their counts as values. """ lst = [] with open(fname, 'r') as f: for line in f: word, count = line.split() lst.append((word, int(count))) vocab = dict(lst) return vocab
a53c1f88ec6f67d5ad4173316ac4e4975bbd487f
45,752
import re def slurp_word(s, idx): """Returns index boundaries of word adjacent to `idx` in `s`.""" alnum = r"[A-Za-z0-9_]" start, end = idx, idx while True: if re.match(alnum, s[start - 1]): start -= 1 else: break end = idx while True: if re.match(alnum, s[end]): end += 1 else: break return start, end
0b04de59cc1a848fac02bf58081ec990c8aa245b
45,763
def get_user_rating_max(ratings,n=20): """Return the keys of users with at most ratings""" return [key for key,value in ratings.iteritems() if len(value)<=n]
9b4ba9c0ee6e11d1d5ec14ac41b1b93f029d6db4
45,764
def strxor(str1, str2): """Xors 2 strings character by character. """ minlen = min(len(str1), len(str2)) ans = "" for (c1, c2) in zip(str1[:minlen], str2[:minlen]): ans += chr(ord(c1) ^ ord(c2)) return ans
05051bd6938726f4ce222ec01c063f53a43536cb
45,774
def string_from_source(source) : """Returns string like "CxiDs2.0:Cspad.0" from "Source('DetInfo(CxiDs2.0:Cspad.0)')" or "Source('DsaCsPad')" """ str_in_quots = str(source).split('"')[1] str_split = str_in_quots.split('(') return str_split[1].rstrip(')') if len(str_split)>1 else str_in_quots
741ea85cda2e197f6b882852f32f4df5e5338355
45,777
def make_values(repo_pkg, cur_ver, new_ver, branch, check_result): """ Make values for push_create_pr_issue """ values = {} values["repo_pkg"] = repo_pkg values["cur_version"] = cur_ver values["new_version"] = new_ver values["branch"] = branch values["check_result"] = check_result return values
38d5cbc983c57c04bbcf1efcddcc67cabb02b4fb
45,779
def get_coordinates_from_token(token, mod): """ A function which takes a mod and a token, finds the token in the mod, and then returns the coordinate (tuple) at which the token is found. If it is not found, it returns None. """ for coordinates, token_in_mod in mod.items(): # No possibility of duplicate tokens. If found, return the token. if token_in_mod == token: return coordinates return None
429e42a6439972d23bc54ac5108f196aa0ca93b7
45,780
import json def failed_validation(*messages, **kwargs): """Return a validation object that looks like the add-on validator.""" upload = kwargs.pop('upload', None) if upload is None or not upload.validation: msgs = [] else: msgs = json.loads(upload.validation)['messages'] for msg in messages: msgs.append({'type': 'error', 'message': msg, 'tier': 1}) return json.dumps({'errors': sum(1 for m in msgs if m['type'] == 'error'), 'success': False, 'messages': msgs, 'prelim': True})
fc9b54d5ef480ccaf0943f75042b3619a56a0924
45,781
def make_unique(arr): """Choose only the unique elements in the array""" return list(set(list(arr)))
e1ef04f55f1c132e54d3db4c7118469979303876
45,784
def ensure_list(obj, tuple2list=False): """ Return a list whatever the input object is. Examples -------- >>> ensure_list(list("abc")) ['a', 'b', 'c'] >>> ensure_list("abc") ['abc'] >>> ensure_list(tuple("abc")) [('a', 'b', 'c')] >>> ensure_list(tuple("abc"), tuple2list=True) ['a', 'b', 'c'] >>> ensure_list(None) [] >>> ensure_list(5.0) [5.0] """ if obj is None: return [] if isinstance(obj, list): return obj elif tuple2list and isinstance(obj, tuple): return list(obj) return [obj]
9f14560525f39951e3296b606c232d3fdb806f07
45,787
def env_chk(val, fw_spec, strict=True, default=None): """ env_chk() is a way to set different values for a property depending on the worker machine. For example, you might have slightly different executable names or scratch directories on different machines. env_chk() works using the principles of the FWorker env in FireWorks. This helper method translates string "val" that looks like this: ">>ENV_KEY<<" to the contents of: fw_spec["_fw_env"][ENV_KEY] Otherwise, the string "val" is interpreted literally and passed-through as is. The fw_spec["_fw_env"] is in turn set by the FWorker. For more details, see: https://materialsproject.github.io/fireworks/worker_tutorial.html Since the fw_env can be set differently for each FireWorker, one can use this method to translate a single "val" into multiple possibilities, thus achieving different behavior on different machines. Args: val: any value, with ">><<" notation reserved for special env lookup values fw_spec: (dict) fw_spec where one can find the _fw_env keys strict (bool): if True, errors if env format (>><<) specified but cannot be found in fw_spec default: if val is None or env cannot be found in non-strict mode, return default """ if val is None: return default if isinstance(val, str) and val.startswith(">>") and val.endswith("<<"): if strict: return fw_spec["_fw_env"][val[2:-2]] return fw_spec.get("_fw_env", {}).get(val[2:-2], default) return val
b33218f924064beda1dc0c8f922d4577bf4bd307
45,791
def _clean_subpath(subpath, delim="/"): """ Add / to the subpath if needed to avoid partial s-exon matching. """ first = subpath[0] last = subpath[len(subpath)-1] cleaned_subpath = "" if first != "s": # start cleaned_subpath += delim cleaned_subpath += subpath if last != "p": # stop cleaned_subpath += delim return cleaned_subpath
2195bfea9f798a88e688de62eafa75e1519ab647
45,792
def preconvert_str(value, name, lower_limit, upper_limit): """ Converts the given `value` to an acceptable string by the wrapper. Parameters ---------- value : `str` The string to convert, name : `str` The name of the value. lower_limit : `int` The minimal length of the string. upper_limit : `int` The maximal length of the string. Returns ------- value : `str` Raises ------ TypeError If `value` was not passed as `str` instance. ValueError If the `value`'s length is less than the given `lower_limit` or is higher than the given than the given `upper_limit`. """ if type(value) is str: pass elif isinstance(value, str): value = str(value) else: raise TypeError(f'`{name}` can be `str` instance, got {value.__class__.__name__}.') length = len(value) if (length != 0) and (length < lower_limit or length > upper_limit): raise ValueError(f'`{name}` can be between length {lower_limit} and {upper_limit}, got {length!r}; {value!r}.') return value
226d9109b9056057f0998634d10f3f5801a2db09
45,793
import logging import torch def restore_checkpoint(model, optimizer, checkpoint_file, device): """ Restores model and optimizer from a checkpoint file and returns checkpoint information. Has side effect of loading the state_dict for model and optimizer (i.e. modifies the instances). :param model: [class], torch model instance :param optimizer: [class], torch optimizer instance :param checkpoint_file: string, full file path :param device: [class], torch device instance :return: Tuple of the checkpoint values """ assert checkpoint_file logging.info("** ** * Restore from checkpoint: %s" % checkpoint_file) checkpoint_state = torch.load(checkpoint_file, map_location=device) model.load_state_dict(checkpoint_state["model_state_dict"]) optimizer.load_state_dict(checkpoint_state["optimizer_state_dict"]) last_epoch = checkpoint_state["epoch"] global_step = checkpoint_state["global_step"] logging.info(" RESTORED AT epoch:%d-%s, global_step:%d" % (last_epoch, global_step)) logging.info("** ** * Model restored! ** ** * ") # model.train() # Do this in calling code for now, maybe want model.eval() there instead return last_epoch, global_step
8a620531bded9000c6f030d45d65df27a261a67a
45,795
def import_tnmr_pars(path): """ Import parameter fields of tnmr data Args: path (str) : Path to .tnt file Returns: params (dict) : dictionary of parameter fields and values """ params = {} with open(path, "rb") as f: params["version"] = f.read(8).decode("utf-8") return params
19a7043fb261d7d909d453f7ee2f7518a26a7ec7
45,801
from typing import Optional def _extract_sequence_identifier(description: str) -> Optional[str]: """Extracts sequence identifier from description. Returns None if no match.""" split_description = description.split() if split_description: return split_description[0].partition('/')[0] else: return None
b6f927daf3726a8a933eb47d066ed2aeee82fc70
45,812
def render_fobi_forms_list(context, queryset, *args, **kwargs): """Render the list of fobi forms. :syntax: {% render_fobi_forms_list [queryset] [show_edit_link] \ [show_delete_link] \ [show_export_link] %} :example: {% render_fobi_forms_list queryset show_edit_link=True \ show_delete_link=False \ show_export_link=False %} """ request = context.get('request', None) show_edit_link = kwargs.get('edit_link', False) show_delete_link = kwargs.get('delete_link', False) show_export_link = kwargs.get('export_link', False) return { 'show_custom_actions': ( show_edit_link or show_delete_link or show_export_link ), 'show_edit_link': show_edit_link, 'show_delete_link': show_delete_link, 'show_export_link': show_export_link, }
0e2df881729ec6bf0cd2a9798a67de2c2973526c
45,815
def cents_to_hz(F_cent, F_ref=55.0): """Converts frequency in cents to Hz Notebook: C8/C8S2_FundFreqTracking.ipynb Args: F_cent (float or np.ndarray): Frequency in cents F_ref (float): Reference frequency in Hz (Default value = 55.0) Returns: F (float or np.ndarray): Frequency in Hz """ F = F_ref * 2 ** (F_cent / 1200) return F
c73c67bb931d07743ee3b53a662485924b0c3f56
45,816
def get_continuous_column_by_class(dataset): """Separates continuous column by binary class""" # Separate continuous column by class continuous_column = dataset.columns_that_are('continuous')[0] continuous_true = [] continuous_false = [] for cont_value, class_value in zip(continuous_column, dataset.cls): if class_value: continuous_true.append(cont_value) else: continuous_false.append(cont_value) continuous_column_by_class = [continuous_true, continuous_false] return continuous_column_by_class
71c52e3703f6e9eea60c6dbdba839ef45365b3fa
45,817
def get_mac_s(output: bytes) -> bytes: """Support function to get the 64-bit resynchronisation authentication code (MAC-S) from OUT1, the output of 3GPP f1* function. :param output: OUT1 :returns: OUT1[64] .. OUT1[127] """ edge = 8 # = ceil(63/8) return output[edge:]
c5f1d2d14819e9a9bf660aeb82dd24b8111263b5
45,823
def custom_sort(dictionary, sort_top_labels, sort_bottom_labels): """ Given a dictionary in the form of {'<a_label>': { 'label': '<a_label>' 'value': '<a_value>' }, ... } and two lists (for top and bottom) ['<a_label>', '<c_label>', '<b_label>', ...] return a list of the dictonaries values ordered <all top items found in dictionary, in the given order> <others> <all bottom items found in dictionary, in the given order> """ ret = [] for l in sort_top_labels: if l in dictionary: ret.append(dictionary[l]) for label, facet in dictionary.items(): if label not in sort_top_labels + sort_bottom_labels: ret.append(facet) for l in sort_bottom_labels: if l in dictionary: ret.append(dictionary[l]) return ret
41d8b12fcf397e416ba9c2adb5bd8f2cafee36b1
45,824
def reconstruct_path(current, came_from): """ Reconstruct path using last node and dictionary that maps each node on path to its predecessor. Args: current (int): Last node in discovered path came_from (dict): Dictionary mapping nodes on path to their predecessors Retuns: (tuple): Path in the form of a list and the same path encoded in an edge list """ # Initialize path and add last found node. path = [current] # Reconstruct. while current in came_from: current = came_from[current] path.insert(0, current) # Construct edgelist. edgelist = [(path[idx], path[idx+1]) for idx in range(len(path)-1)] # Return path and edge list. return path, edgelist
43d05b50987d3022f748d40b76942de38f866ac5
45,826
def gcd_steps(a, b): """ Return the number of steps needed to calculate GCD(a, b).""" # GCD(a, b) = GCD(b, a mod b). steps = 0 while b != 0: steps += 1 # Calculate the remainder. remainder = a % b # Calculate GCD(b, remainder). a = b b = remainder # GCD(a, 0) is a. #return a return steps
768d52c795c8c8eb20f4adfaf26f10da12962534
45,827
import math def avp_from_temperature_min(temperature_min): """ Estimate actual vapour pressure (*ea*) from minimum temperature. This method is to be used where humidity data are lacking or are of questionable quality. The method assumes that the dewpoint temperature is approximately equal to the minimum temperature (*temperature_min*), i.e. the air is saturated with water vapour at *temperature_min*. **Note**: This assumption may not hold in arid/semi-arid areas. In these areas it may be better to subtract 2 deg C from the minimum temperature (see Annex 6 in FAO paper). Based on equation 48 in Allen et al (1998). :param temperature_min: Daily minimum temperature [deg C] :return: Actual vapour pressure [kPa] :rtype: float """ return 0.611 * math.exp((17.27 * temperature_min) / (temperature_min + 237.3))
2eeeaac62d228c6fb05ff583a5e539ab3bafffe4
45,829
def make_mock_video_ids(num_videos): """Makes a list of video ids used for unit tests. num_videos: an integer; the number of mock video ids to make. Returns: a list of strings of that can be used as video ids for unit tests. """ video_ids = [] for video_num in range(num_videos): video_ids.append(f"video{video_num}") return video_ids
14ec177575d4a11aa44a0e42e70cfd5791e38ad2
45,832