content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import hmac import hashlib def _hmac_sha256(key, msg): """ Generates a sha256 digest using the given key and message. :param str key: starting key for the hash :param str msg: message to be hashed :returns: sha256 digest of msg as bytes, hashed using the given key """ return hmac.new(key, msg, hashlib.sha256).digest()
0e85b6b26364fc06b8eb083e37e64803f33a9834
43,946
from typing import Dict from typing import List def convert_spin_to_list(spins: Dict) -> List: """Convert the Spin dictionary from the ssoCard into a list. Add the spin index as parameter to the Spin entries. Parameters ---------- spin : dict The dictionary located at parameters.physical.spin in the ssoCard. Returns ------- list A list of dictionaries, with one dictionary for each entry in parameters.physical.spin after removing the index layer. """ spin_dicts = [] for spin_id, spin_dict in spins.items(): spin_dict["id_"] = spin_id spin_dicts.append(spin_dict) return spin_dicts
0d06523abe118305bbef13d681cb1d811628edda
43,947
def _get_scripts_resource(pe): """Return the PYTHONSCRIPT resource entry.""" res = None for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries: if entry.name and entry.name.string == b"PYTHONSCRIPT": res = entry.directory.entries[0].directory.entries[0] break return res
3bef22589a2793b09d89c4f4552f12f1583c9274
43,952
from typing import List from typing import Any from typing import Tuple def pad(x: List[List[Any]], pad: int) -> Tuple[List[List[int]], List[List[bool]]]: """Pad 2d list x based on max length. Also generate a mask to access valid values in the padded list. Args: x (List[List[Any]]): The 2d list of values to be padded with pad values. pad (Any): the value that will be used to pad x. Returns: Tuple[List[List[int]], List[List[bool]]]: padded x along with its mask """ max_length = max(len(sample) for sample in x) mask = [ [True] * len(sample) + [False] * (max_length - len(sample)) for sample in x ] x = [sample + [pad] * (max_length - len(sample)) for sample in x] return x, mask
014ab67b5ff3337f700cf8c39991f91a8ff23dd2
43,959
def get_last_slackblocks_message_text(messages) -> str: """ Utility methods for retrieving the text content of the most recent message. Assumes that message was constructed using a single SlackBlocks SectionBlock with at least one attachment. :raises KeyError: likely because the last messages was not a SlackBlocks message :raises IndexError: if there are no messages in `messages` """ return messages[-1]["attachments"][0]["blocks"][0]["text"]["text"]
63755b319445ca9b030c4a244da990f39a4b33bf
43,960
from typing import Dict def config_get_policy(config: Dict[str, str]): """Return the effective SELinux policy Checks if SELinux is enabled and if so returns the policy; otherwise `None` is returned. """ enabled = config.get('SELINUX', 'disabled') if enabled not in ['enforcing', 'permissive']: return None return config.get('SELINUXTYPE', None)
36310d1cb67bd6288c5db4c0f240a24c11f09b81
43,963
def ebitda(gross_profit, sg_a): """ Computes EBITDA(earnings before interest, tax, depreciation and amortizatin). Parameters ---------- gross_profit : int or float Gross profit for the period sg_a : int or float Selling, general and administrative cost Returns ------- out : int or float EBITDA """ return gross_profit - sg_a
4de26c7535d6c76e2945af2f59e5aee8ab47afe9
43,969
def king(r, rc, rt, sigma_0, alpha=2): """ See http://iopscience.iop.org/1538-3881/139/6/2097/fulltext/ Parameters ---------- r: float radius rc: float core radius rt: float truncation radius sigma_0: float central density """ def z(x): return 1/(1+(x/rc)**2)**(1./alpha) term1 = (1 - z(rt))**-alpha term2 = (z(r) - z(rt))**alpha sigma = sigma_0 * term1 * term2 return sigma
d5df62bfb6e2973d394fe65c77b24160eccfc125
43,971
from pathlib import Path import glob import re def increment_path(path, exist_ok=False): """ Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc. Args: path (str or pathlib.Path): f"{model_dir}/{args.name}". exist_ok (bool): whether increment path (increment if False). """ path = Path(path) if (path.exists() and exist_ok) or (not path.exists()): return str(path) else: dirs = glob.glob(f"{path}*") matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] n = max(i) + 1 if i else 2 return f"{path}{n}"
eaffc47b83ae969242b5e7af1389658a25873f1c
43,972
def mk_rule_key(rule): """ Convert a rule tuple to a hyphen-separated string. """ return '{}-{}-{}-{}'.format(rule.source_institution, rule.destination_institution, rule.subject_area, rule.group_number)
ca7ffddd023eaf10f6a50532e954c03ced182d29
43,976
import pickle def get_original_config(dirname: str) -> dict: """ Get original model config used with hydra Args: dirname (str): model directory Returns: dict: config dict """ config = pickle.load(open(f"{dirname}/tb/config.pkl", "rb")) return config
0dc54883bd508e8e95905a6ba67e7ec7cdb66d82
43,981
import six def to_bytestring(data): """ Convert data to a (utf-8 encoded) byte-string if it isn't a byte-string already. """ if not isinstance(data, six.binary_type): data = six.text_type(data).encode('utf-8') return data
a516417979a2cca0ae71a39cb8b504c2395ad2c4
43,982
def point_to_node(gfa_, node_id): """Check if the given node_id point to a node in the gfa graph. """ return gfa_.nodes(identifier = node_id) != None
16053e89556ce0e097f1dc62c02d5e313faf89eb
43,993
import copy def filter_dict(target_dict, keys_to_filter): """Filters key(s) from top level of a dict Args: target_dict (dict): the dictionary to filter keys_to_filter (list, tuple, str): set of keys to filter Returns: A filtered copy of target_dict """ assert isinstance(target_dict, dict) d = copy.deepcopy(target_dict) # Let keys_to_filter be a single string or a tuple of values if isinstance(keys_to_filter, str): keys_to_filter = [keys_to_filter] elif isinstance(keys_to_filter, tuple): keys_to_filter = list(keys_to_filter) elif keys_to_filter is None: keys_to_filter = [] # Do the filtering for kf in keys_to_filter: try: d.pop(kf) except KeyError: pass return d
70a6577c30de54c8d826aae8ba7e76b3b07412f2
43,994
def create_pinhole_camera(height, width): """ Creates a pinhole camera according to height and width, assuming the principal point is in the center of the image """ cx = (width - 1) / 2 cy = (height - 1) / 2 f = max(cx, cy) return f, cx, cy
6dc23f777b97bcce3cc4b32c36f473fe0c507d0f
43,996
from typing import List def _replace_pw_references(alias_cmd: str, pw_args: List[str]) -> str: """Replace all occurrences of pw@ with the path to the pw script (argv[0]) plus all pw options""" replacement = " ".join(pw_args) + " " return alias_cmd.replace("pw@", replacement)
9442bbc61389e3d0e6303668463cbf29aad52a1b
44,000
def single_state_time_spent(state_dict, state): """ Given a ticket's state dictionary, returns how much time it spent in the given `state`. Assumes state_dict has the key `state` present. """ # Measurement 2: Average Time Spent in Scrum Team Backlog # For the PRs that need to be reviewed by a scrum team, obtain an average of how long a ticket spends in a team backlog. # AverageBacklog = sum(amount of time a ticket spends in "Awaiting Prioritization") / # count(tickets with a non-zero amount of time spent in "Awaiting Prioritization") # This will be a rolling average over all tickets currently open, or closed in the past X days. # In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months' # worth of data, we can assess what historical interval(s) gives us the most useful, actionable data. return state_dict[state]
94534ffd8958fe9c9d7ae3ec6a317108faad766d
44,001
from typing import Union def clamp(value: Union[float, int], lower: Union[float, int], upper: Union[float, int]) -> Union[float, int]: """ Clamp a number Same as min(max((value, lower), higher) Args: value (Union[float, int]): Value to clamp lower (Union[float, int]): min value upper (Union[float, int]): max value Returns: Union[float, int]: clamped value """ return lower if value < lower else upper if value > upper else value
48b4e6c81219385b0c9d608459bea02370b63880
44,002
import json def file_open_json(file): """Reads json file from the location input :param file: path of the json file :type file: String :return: contents of json :rtype: dictionary(dict) :raise Exception: Throws exception if unable to load file """ try: with open(file) as f: return json.load(f) except Exception as e: print(e) raise Exception("Could not open the json file")
99cfc9413880698260a7802b8917adfe4111b1e3
44,009
def read_block(fi): """Read and returns one block of non-empty lines from an input stream (stripped lines are returned in a list)""" block = [] for line in fi: line = line.strip() if not line and len(block): break block.append(line) return block
ac5852e74bac7ffc084300d7a0b2ee8e3f51ccb6
44,011
import sympy def handle_gcd_lcm(f, args): """ Return the result of gcd() or lcm(), as UnevaluatedExpr f: str - name of function ("gcd" or "lcm") args: List[Expr] - list of function arguments """ args = tuple(map(sympy.nsimplify, args)) # gcd() and lcm() don't support evaluate=False return sympy.UnevaluatedExpr(getattr(sympy, f)(args))
34ecae7681e1ba7f18ab00969703fec6a14e864a
44,020
def get_author_name_and_id(soup): """ Get author name and id. Examples -------- >>> a_tag = '<a class="topic__author-link" href="/author_id">author_name</a>' >>> soup = make_soup(a_tag) >>> get_author_name_and_id(soup) ('author_name', 'author_id') """ author = soup.select('a.topic__author-link')[0] return author.text.strip(), author.get('href').strip('/')
d7ecb9c56337d38541dc9cfd63a4976ab8e5acda
44,026
import re def _find_sgRNA(guide_sequence, target_sequence, strand): """ Find start, stop position of a sgRNA in a target sequence. Parameters: ----------- guide_sequence target_sequence Returns: -------- start, stop """ guide_span = re.search(guide_sequence, target_sequence).span() start, stop = guide_span[0], guide_span[1] if strand == "-": start, stop = ( len(target_sequence) - guide_span[0], len(target_sequence) - guide_span[1], ) return start, stop, strand
3b478908145a307262b7303bfc21320591b374db
44,031
import struct def getheader(filename, gtype): """Read header data from Gadget data file 'filename' with Gadget file type 'gtype'. Returns a dictionary with loaded values and filename.""" DESC = '=I4sII' # struct formatting string HEAD = '=I6I6dddii6iiiddddii6ii60xI' # struct formatting string keys = ('Npart', 'Massarr', 'Time', 'Redshift', 'FlagSfr', 'FlagFeedback', 'Nall', 'FlagCooling', 'NumFiles', 'BoxSize', 'Omega0', 'OmegaLambda', 'HubbleParam', 'FlagAge', 'FlagMetals', 'NallHW', 'flag_entr_ics', 'filename') f = open(filename, 'rb') if gtype == 2: f.seek(16) # If you want to use the data: desc = struct.unpack(DESC,f.read(16)) raw = struct.unpack(HEAD,f.read(264))[1:-1] values = (raw[:6], raw[6:12]) + raw[12:16] + (raw[16:22],) + raw[22:30] + (raw[30:36], raw[36], filename) header = dict(zip(keys, values)) f.close() return header
daca7603362e8804736b4fae0ed95e772a183129
44,032
def gradient_add(grad_1, grad_2, param, verbose=0): """ Sum two gradients :param grad_1: (TensorFlow Tensor) The first gradient :param grad_2: (TensorFlow Tensor) The second gradient :param param: (TensorFlow parameters) The trainable parameters :param verbose: (int) verbosity level :return: (TensorFlow Tensor) the sum of the gradients """ if verbose > 1: print([grad_1, grad_2, param.name]) if grad_1 is None and grad_2 is None: return None elif grad_1 is None: return grad_2 elif grad_2 is None: return grad_1 else: return grad_1 + grad_2
e9e620c06edaf124830b0aec78604d653565860c
44,033
def _remove_model_weights(model: dict, to_delete=None) -> dict: """ Removes certain weights of a given model. The weights to remove are given by the to_delete argument. If there is also a bias term, that is deleted as well. Args: model: Loaded detectron2 model to_delete (list): Names of the weights to delete from the model, by default: ['roi_heads.box_predictor.cls_score', 'roi_heads.box_predictor.bbox_pred'] """ assert isinstance(model, dict) assert 'model' in model # print("Removing model weights with to_delete = None\n It is recommended to specify the to_delete weights directly, or use remove_model_weights_fsdet etc") # to_delete default values written here in order for default args to be immutable. if to_delete is None: # Heads in the bbox predictor: to_delete = ['roi_heads.box_predictor.cls_score', 'roi_heads.box_predictor.bbox_pred'] for param_name in to_delete: del model['model'][param_name + '.weight'] if param_name + '.bias' in model['model']: del model['model'][param_name + '.bias'] return model
70f88910db2fae52893869fc7acda8161b5df61e
44,037
def overlaps(df, idx): """ Check if the note at the given index in the given dataframe overlaps any other notes in the dataframe. Parameters ---------- df : pd.DataFrame The DataFrame to check for overlaps. idx : int The index of the note within df that might overlap. Returns ------- overlap : boolean True if the note overlaps some other note. False otherwise. """ note = df.loc[idx] df = df.loc[ (df["pitch"] == note.pitch) & (df["track"] == note.track) & (df.index != idx) ] overlap = any( (note.onset < df["onset"] + df["dur"]) & (note.onset + note.dur > df["onset"]) ) return overlap
382c21c7b2232b40ce7c563d677afa8a70f5bfcc
44,038
from typing import List from typing import Tuple def remove_pads(y_true: List[str], y_pred: List[str]) -> Tuple[List[str], List[str]]: """ Takes as input two lists of strings corresponding to the predicted and actual tags and returns the same lists except that any <pad> tags in y_true are removed and the tags corresponding to the same index position in y_pred are also removed. """ new_y_true = [] new_y_pred = [] for i in range(len(y_true)): if y_true[i] != "<pad>": new_y_true.append(y_true[i]) new_y_pred.append(y_pred[i]) return new_y_true, new_y_pred
fb660e2aa31c04be1ea77a57ac39b681786389f3
44,039
def corr(result, result2, result3): """ Computes the correlation between the three regression methods by taking squared differences from averages in each category and then averaging those results. Interpret this as a smaller # meaning a better score / agreement. Parameters ---------- result: pandas dataframe linear regression results result2: pandas dataframe the neural network results result3: pandas dataframe the random forest regression results Returns ---------- correlation: pandas.core.series.Series the correlation value for the given player """ avpts = (result['Pts'] + result2['Pts'] + result3['Pts']) / 3 diffs_pts = (avpts - result['Pts'])**2 + (avpts - result2['Pts'])**2 + (avpts - result3['Pts'])**2 avast = (result['Ast'] + result2['Ast'] + result3['Ast']) / 3 diffs_ast = (avast - result['Ast'])**2 + (avast - result2['Ast'])**2 + (avast - result3['Ast'])**2 avreb = (result['Reb'] + result2['Reb'] + result3['Reb']) / 3 diffs_reb = (avreb - result['Reb'])**2 + (avreb - result2['Reb'])**2 + (avreb - result3['Reb'])**2 correlation = (diffs_reb + diffs_ast + diffs_pts) / 3 return correlation
9b7c00af5e5b03483b1e18565f5b590f7a51d745
44,044
from typing import Union import builtins import importlib def _load_exception_class(import_specifier: str) -> Union[Exception, None]: """Load an exception class to be used for filtering Sentry events. This function takes a string representation of an exception class to be filtered out of sending to Sentry and returns an uninitialized instance of the class so that it can be used as the argument to an `isinstance` method call. :param import_specifier: A string containing the full import path for an exception class. ex. 'ValueError' or 'requests.exceptions.HTTPError' :type import_specifier: str :returns: An uninitialized reference to the exception type to be used in `isinstance` comparisons. :rtype: Exception """ namespaced_class = import_specifier.rsplit(".", 1) if len(namespaced_class) == 1: return builtins.__dict__.get(namespaced_class[0]) # noqa: WPS609 exception_module = importlib.import_module(namespaced_class[0]) return exception_module.__dict__.get(namespaced_class[1])
a1add93a1f637caad974593536e00063eca14f34
44,045
def yaml_time_serializer(dumper, data): """ This function is required to serialize datetime.time as string objects when working with YAML as output format. """ return dumper.represent_scalar('tag:yaml.org,2002:str', str(data))
b6c6a5d44a57391ca440704537cb3c449900d0cb
44,049
def year_range(y_start, y_end): """Format a year range.""" year = y_start or '' if y_end: year += '–' + y_end return year
ddd911e61d86e11f8f79de24a99620319ad605bd
44,050
def same_keys(a, b): """Determine if the dicts a and b have the same keys in them""" for ak in a.keys(): if ak not in b: return False for bk in b.keys(): if bk not in a: return False return True
841a6d715fdfcefb9a2557e01b7ccb586fd62c06
44,053
from pathlib import Path def pyyaml_path_constructor(loader, node): """Helper method to load Path tag in PyYAML.""" value = loader.construct_scalar(node) return Path(value)
e8740a9abe3e0d8b5ab27095eb8a89cb2b6f1af1
44,057
def filter_state(hdf_file_content, link_id, state='State0'): """Get the time series of a state for a link Parameters ---------- hdf_file_content : np.array data in the h5 file as a numpy array link_id : int link_id to be filtered state : str , ex. State0(default), State1 ... state to be retrieved from h5 file Returns ------ time : np.array array of timesteps state: np.array state time series """ index = hdf_file_content['LinkID'] == link_id time = hdf_file_content['Time'][index] state = hdf_file_content[state][index] return time, state
3ca64fcd455edfa0c5915fb1caba985370ce7dcc
44,058
def _get_metric_prefix(power: int, default: str = "") -> str: """Return the metric prefix for the power. Args: power (int): The power whose metric prefix will be returned. default (str): The default value to return if an exact match is not found. Returns: str: The metric prefix. """ metric_prefix = { 24: "Y", 21: "Z", 18: "E", 15: "P", 12: "T", 9: "G", 6: "M", 3: "k", -3: "m", -6: "μ", -9: "n", -12: "p", -15: "f", -18: "a", -21: "z", -24: "y", } return metric_prefix.get(power, default)
b35f5ff3691eafe87274a685d41f9c57161df1fb
44,060
def get_average_att_network(networks, select='accuracy'): """Get the average accuracy for a group of networks. Args: networks (list): List of networks Returns: float: The average accuracy of a population of networks. """ total = 0 for network in networks: if select == 'accuracy': total += network.accuracy elif select == 'params': total += network.params elif select == 'flops': total += network.flops return total / len(networks)
78d14f731b3b10179abb5d85ff1150c4e784c94d
44,061
def parse_size(size): """ Converts a size specified as '800x600-fit' to a list like [800, 600] and a string 'fit'. The strings in the error messages are really for the developer so they don't need to be translated. """ first_split = size.split('-') if len(first_split) != 2: raise AttributeError( 'Size must be specified as 000x000-method such as 800x600-fit.') size, method = first_split if method not in ('fit', 'thumb'): raise AttributeError( 'The method must either be "fit" or "thumb", not "%s".' % method) try: size_ints = [int(x) for x in size.split('x')] except ValueError: raise AttributeError( 'Size must be specified as 000x000-method such as 800x600-fit.') if len(size_ints) != 2: raise AttributeError( 'Size must be specified as 000x000-method such as 800x600-fit.') if size_ints[0] <= 0 or size_ints[1] <= 0: raise AttributeError( 'Height and width for size must both be greater than 0.') return size_ints, method
7a3ee86a48e320df70dec8f2a8fcb72bbaf377fe
44,062
def split_writable_text(encoder, text, encoding): """Splits off as many characters from the begnning of text as are writable with "encoding". Returns a 2-tuple (writable, rest). """ if not encoding: return None, text for idx, char in enumerate(text): if encoder.can_encode(encoding, char): continue return text[:idx], text[idx:] return text, None
c959aedca9085947043d676c8cd8420105e7af97
44,063
from typing import OrderedDict def backwards_state_dict(state_dict): """ Modify the state dict of older models for backwards compatibility Parameters ---------- state_dict : dict Model state dict with pretrained weights Returns ------- state_dict : dict Updated model state dict with modified layer names """ # List of layer names to change changes = (('model.model', 'model'), ('pose_network', 'pose_net'), ('disp_network', 'depth_net')) # Iterate over all keys and values updated_state_dict = OrderedDict() for key, val in state_dict.items(): # Ad hoc changes due to version changes key = '{}.{}'.format('model', key) if 'disp_network' in key: key = key.replace('conv3.0.weight', 'conv3.weight') key = key.replace('conv3.0.bias', 'conv3.bias') # Change layer names for change in changes: key = key.replace('{}.'.format(change[0]), '{}.'.format(change[1])) updated_state_dict[key] = val # Return updated state dict return updated_state_dict
ed52ca5897c25eed9a1d36b8aca6a14fb9f6a48e
44,066
def are_attributes_valid(attributes): """ Determine if attributes provided are dict or not. Args: attributes: User attributes which need to be validated. Returns: Boolean depending upon whether attributes are in valid format or not. """ return type(attributes) is dict
7f9adebbbe64716333ee0114ffe1f63d2100e6c8
44,067
import shutil def desk_per_Win(path): """ path: the disk that you want to check, example: path = 'C:' return: the percentage of the free space on that disk """ # Get the disk usage statistics # about the given path stat = shutil.disk_usage(path) # Print disk usage statistics # rint("Disk usage statistics:") percent = round(stat[2]/stat[0]*100) return percent
ac6a8b76b46fbf6a9ceeb9f48b01543857a35f59
44,070
def create_query_token_func(session, model_class): """Create an ``query_token`` function that can be used in resource protector. :param session: SQLAlchemy session :param model_class: TokenCredential class """ def query_token(client_id, oauth_token): q = session.query(model_class) return q.filter_by( client_id=client_id, oauth_token=oauth_token).first() return query_token
a25d876ef1dd5f7548741f27e584724f27447506
44,077
def check_parsed_args_compatible(imp, modules, contact, cc, parser): """ Check that the combination of arguments are compatible. Args: imp(str): CSV import specifier modules(str): Modules argument contact(str): Contact argument cc(str): CC argument parser(:class:`argparse.ArgumentParser`): Parser instance Raises: :class:`argparse.ArgumentParser` error: * --import cannot be used with --contact or --cc * You cannot set all modules in an area to one contact/cc, enter a specific module. """ if imp and (contact or cc): parser.error("--import cannot be used with --contact or --cc") # Stop user from setting all modules in an area to one contact/cc if not modules and (contact or cc): parser.error("You cannot set all modules in an area to one contact/cc," " enter a specific module.") # Just in case parser.error doesn't stop the script return 1
36778436b4ed03c3de2e45e77f9e70021ac7b8f6
44,078
def mod(p): """ Compute modulus of 3D vector p: array Cartesian coordinates """ return (p[0]**2 + p[1]**2 + p[2]**2)**0.5
a15755be4e49120fa323ece0e456ae947d826b6d
44,081
def get_weight_of(level: int) -> int: """Return the weight of a given `level`. The ratio is 1:3:5 for modules of L4:L5:L6 respectively.""" levels = {4: 1, 5: 3, 6: 5} return levels[level] if isinstance(level, int) and level in levels else 0
3eefdffdf828df8f5d2454ea56da075281400cf7
44,084
def formatter(ms): """ formats the ms into seconds and ms :param ms: the number of ms :return: a string representing the same amount, but now represented in seconds and ms. """ sec = int(ms) // 1000 ms = int(ms) % 1000 if sec == 0: return '{0}ms'.format(ms) return '{0}.{1}s'.format(sec, ms)
3d66257672d4df906581ca6cf79c808fd9e8d3ef
44,086
def form_binary_patterns(k: int) -> list: """ Return a list of strings containing all binary numbers of length not exceeding 'k' (with leading zeroes) """ result = [] format_string = '{{:0{}b}}'.format(k) for n in range(2 ** k): result.append(format_string.format(n)) return result
c13cb7e9cd831e85cc5fde6ef8497ec45289664d
44,087
def containsZero(n): """ n: an int or a str output: True if n contains '0' """ numStr = str(n) for letter in numStr: if letter is '0': return True return False
e8698f6ba935079ecdb37adbdeb88b61edf9d640
44,090
def produit(a,b): """ renvoie le résultat de la multiplication des nombres a et b""" return a*b
866d4561edd2b2168ca167ff7116241c0fff310c
44,092
def extract_hairpin_name_and_sequence(file,sampleName): """ Reads one MIRNA cluster file It returns the corresponding cluster name and hairpin sequence in a Python dictionary Dictionary keys: cluster names Dictionary values: cluster sequences """ with open(file,"r") as filin: lines = filin.readlines() clusterName = lines[0].split(" ")[0].strip() hairpinSequence = lines[2].strip() d = {clusterName:hairpinSequence} return d
ea6c189a2b38822c08097ce77088f644c7b0c489
44,093
def _max_len(choices): """Given a list of char field choices, return the field max length""" lengths = [len(choice) for choice, _ in choices] return max(lengths)
2042ff1466554abc2cbfdb6fc0faff664759ac55
44,094
def CommandLine(command, args): """Convert an executable path and a sequence of arguments into a command line that can be passed to CreateProcess""" cmd = "\"" + command.replace("\"", "\"\"") + "\"" for arg in args: cmd = cmd + " \"" + arg.replace("\"", "\"\"") + "\"" return cmd
dfb7de2d1a72a007c9d120a27de5078d407f947d
44,096
import torch def reflect(v: torch.Tensor, axis: torch.Tensor): """reflect vector a w.r.t. axis Args: `v`: tensor of shape `[...,3]`. `axis`: tensor with the same shape or dim as `a`. Returns: the reflected vector """ axis = torch.broadcast_to(axis, v.shape) h_vec = 2*axis * torch.sum(axis*v, dim=-1, keepdim=True) return h_vec - v
92dface741eb36a9c2b889091c1f3b1275fcdc68
44,097
import random import string def get_random_string(length=10): """Get a string with random content""" return ''.join(random.choice(string.ascii_uppercase) for i in range(length))
5befd55c6f1cfa3bea941acb5f25048e56fd79ed
44,103
def _convert_line_to_tab_from_orifile(line): """ :param line: :return: >>> _convert_line_to_tab_from_orifile('''IMG_1468832894.185000000.jpg -75.622522 -40.654833 -172.350586 \ 657739.197431 6860690.284637 53.534337''') ['IMG_1468832894.185000000.jpg', '-75.622522', '-40.654833', '-172.350586', '657739.197431', '6860690.284637', '53.534337'] """ return line.split()
2e5343da7673c9897d97ae29003a8f4fa29c78a4
44,104
import torch def get_ja(arr): """ calculates jaw aperture (euclidean distance between UL / J (LI)): pertinent indexes: Jx : 6, ULx: 10, Jy : 7, ULy : 11 """ return torch.sqrt((arr[:,6]-arr[:,10])**2+(arr[:,7]-arr[:,11])**2)
14b23e9e2eaead1a8a2ff4eb305107a909fbb419
44,105
def is_ranged_value(value, min_value=None, max_value=None, min_inclusive: bool=True, max_inclusive: bool=True) -> bool: """ Parameters ---------- value : float float : the value as a float min_value / max_value : float / None float : the constraint is active None : the constraint is inactive min_inclusive / max_inclusive; bool; default=True flips [min_value, max_value] to: - (min_value, max_value) - [min_value, max_value) - (min_value, max_value] Returns ------- is_ranged : bool is the value in range """ is_ranged = True if min_value is not None: #print("min:") if min_inclusive: # ( is the goal if value < min_value: is_ranged = False #print(' min_exclusive, %s %s' % (value, min_value)) #else: #print(' passed minA=%s' % value) else: # [ is the goal if value <= min_value: is_ranged = False #print(' min_inclusive, %s %s' % (value, min_value)) #else: #print(' passed minB=%s' % value) #else: #print('no limit on min') if max_value is not None: #print("max:") if max_inclusive: # ] is the goal if value > max_value: #print(' max_exclusive, %s %s' % (value, max_value)) is_ranged = False #else: #print(' passed maxA=%s' % value) else: # ) is the goal if value >= max_value: is_ranged = False #print(' max_inclusive, %s %s' % (value, max_value)) #else: #print(' passed maxB=%s' % value) #else: #print('no limit on max') return is_ranged
217400c434b04591ec4155d6a3408ad42f497104
44,107
def get_dynamic_edgelist(data): """ Make an edge list for all of the sequential visits of one site to the next in a day per user. Each edge is directed. There is a dummy start node to indicate the transition from being home to the first site visited that day """ data['total_people'] = 1 edges = data.groupby(["user_id", "date_time", "date", "cell_id"]).sum()["total_people"].to_frame() edges.reset_index(inplace=True) # start is the name of the dummy node for edges from home to the first location visited edges["from"] = 'dummy_start_node' edges["to"] = edges["cell_id"] make_link = (edges["user_id"].shift(1) == edges["user_id"]) & \ (edges["date"].shift(1) == edges["date"]) edges["from"][make_link] = edges["cell_id"].shift(1)[make_link] dynamic_edgelist = edges[["from", "to", "total_people", "date_time"]] dynamic_edgelist = dynamic_edgelist[dynamic_edgelist['from'] != dynamic_edgelist['to'] ] return dynamic_edgelist
36502d125ba10cb25ea15873c4925ff17d0afa8a
44,108
def linear_segment(x0, x1, y0, y1, t): """Return the linear function interpolating the given points.""" return y0 + (t - x0) / (x1 - x0) * (y1 - y0)
de8bb06a7b294e0f0eb62f471da553a58e65fc49
44,109
from datetime import datetime def now() -> str: """Return string timestamp for current time, to the second.""" return datetime.utcnow().isoformat(timespec='seconds')
7a4b10b224398ad532137807b320ad404a7873ff
44,110
from typing import Any from typing import Union from typing import List from typing import Sequence def _list_convert(x: Any) -> Union[Any, List[Any]]: """Converts argument to list if not already a sequence.""" return [x] if not isinstance(x, Sequence) else x
79a913305a931378e2cb2b8f46a74f1381850ac4
44,112
def title(value): """ Title cases a string, replacing hyphens with spaces """ return value.replace('-', ' ').title()
ce5276225e46adc7fbe6b3dda62c80dd5580cbfd
44,115
from typing import Iterable from typing import List def make_error_col_names(qcols: Iterable) -> List: """helper func to make error column names of the form <col_name>_low ... <col_name>_high Args: qcols (iterable): an iterable of column names used for matching Returns: list: list of error col names in non-interleaved order """ error_cols = [f"{dcol}_low" for dcol in qcols] error_cols = error_cols + [f"{dcol}_high" for dcol in qcols] return error_cols
9bee77236b4d381d69b4359bdf0a319b06ac8285
44,117
import copy def add_node_to_path(node, path): """ Adds the name of the node to the copy of the list of strings inside the safely copied version of the path. Leave the other two items in path unchanged (total distance traveled and total number of buildings). Parameters: path: list composed of [[list of strings], int, int] Represents the current path of nodes being traversed. Contains a list of node names, total distance traveled, and total number of buildings. node: Node Representing a building being added to the path Returns: A safely copied version of path with the node name added to the end of the first element. """ # need a deepcopy so that the list inside of the the list 'path' is also copied new_path = copy.deepcopy(path) new_path[0].append(node.get_name()) return new_path
9c7868a6a2c4df1161a1ed46f7f92bb887171269
44,119
def get_div(value, start): """Returns the maximum divider for `value` starting from `start` value""" div = 1 for d in range(start, 0, -1): if (value % d) == 0: div = d break return div
f483a3fcdc31eba37b17ac7f106bcea28ea84511
44,121
def pack_str(var): """Convert a string to a list of bytes.""" return str(var).encode("utf-8")
ba620b2a82f99e9accf198a211c7b71406f391fe
44,122
import operator def hamming_distance(s1, s2, equality_function=operator.eq): """ Returns the hamming distance between two strings. """ if not len(s1) == len(s2): raise ValueError("String lengths are not equal") # Number of non-matching characters: return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2))
f5fd74d3eb6c33f0a51dc5b61158d4f6e51e6b9e
44,128
import six def iterable(obj, strok=False): """ Checks if the input implements the iterator interface. An exception is made for strings, which return False unless `strok` is True Args: obj (object): a scalar or iterable input strok (bool): if True allow strings to be interpreted as iterable Returns: bool: True if the input is iterable Example: >>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}] >>> result = [iterable(obj) for obj in obj_list] >>> assert result == [False, True, False, True, True, True] >>> result = [iterable(obj, strok=True) for obj in obj_list] >>> assert result == [False, True, True, True, True, True] """ try: iter(obj) except Exception: return False else: return strok or not isinstance(obj, six.string_types)
7a9e4a835eb1eb5034fba4a7083e38994a2535b3
44,130
def annotation2rgb(i,palette,arr): """Go from annotation of patch to color. Parameters ---------- i:int Annotation index. palette:palette Index to color mapping. arr:array Image array. Returns ------- array Resulting image. """ col = palette[i] for i in range(3): arr[...,i] = int(col[i]*255) return arr
7fa9de356f27fadf72a6fe71a3ad63c70d9f0a48
44,133
def fig_path(path, *specs, suffix='.png'): """Get output path for figure.""" stem = '-'.join((path.stem,) + specs) return (path.parent / stem).with_suffix(suffix)
64c7d2205f08b098d8eaeefa494fe5282cab01e6
44,135
from typing import Callable from typing import Optional from typing import Tuple def then_parser( first_parser: Callable[[str], Optional[Tuple[str, str]]], second_parser: Callable[[str], Optional[Tuple[str, str]]], text: str, ) -> Optional[Tuple[str, str]]: """ Uses one parser on the text, then uses the next parser on the remaining text from the first parse. """ first_result = first_parser(text) if first_result is None: return None else: parsed, rest = first_result second_result = second_parser(rest) if second_result is None: return None else: parsed_2, rest_2 = second_result return parsed + parsed_2, rest_2
7604deaed51af9177661defe5e62a13766a77065
44,139
import pickle def load_investment_results(iteration): """Load investment results""" with open(f'output/investment_plan/investment-results_{iteration}.pickle', 'rb') as f: results = pickle.load(f) return results
93700e58b9db2c36ff5da9203ba1eeae9c7e4d84
44,141
def _translate_backupjobrun_summary_view(context, backupjobrun): """Maps keys for backupjobruns summary view.""" d = {} d['id'] = backupjobrun['id'] d['created_at'] = backupjobrun['created_at'] d['status'] = backupjobrun['status'] return d
3cf0394aff3191ab2db69f29f4052411f90c572b
44,142
def inbreeding_as_dispersion(inbreeding, unique_haplotypes): """Calculate dispersion parameter of a Dirichlet-multinomial distribution assuming equal population frequency of each haplotype. Parameters ---------- inbreeding : float Expected inbreeding coefficient of the sample. unique_haplotypes : int Number of possible haplotype alleles at this locus. Returns ------- dispersion : float Dispersion parameter for all haplotypes. """ return (1 / unique_haplotypes) * ((1 - inbreeding) / inbreeding)
3d57f8a31a82ad0effa8c12b7b69f36729a02c64
44,143
def _pitch2m(res): """Convert pitch string to meters. Something like -600- is assumed to mean "six-hundreths of an inch". >>> _pitch2m("-600-") 4.233333333333333e-05 >>> _pitch2m("-1200-") 2.1166666666666665e-05 """ res = int(res[1:-1]) return 0.0254 / res
a7fa2acaf8bbbf647bda3a35acfd5aad370df7f3
44,147
def tmp_bdb_root(mocker, tmp_path): """Set a temporary root directory for the BerkeleyDB minter hierarchy. By default, a BDB path resolved by the minter will reference a location in EZID's minter hierarchy, as configured in the EZID settings. Currently, `ezid/db/minters`. This fixture causes BDB paths to resolve to an empty tree under /tmp. Any minters created by the test are deleted when the test exits. Returns a pathlib2.Path referencing the root of the tree. The slash operator can be used for creating paths below the root. E.g., `tmp_bdb_root / 'b2345' / 'x1'`. """ for dot_path in ('nog.bdb._get_bdb_root','impl.nog.bdb._get_bdb_root',): mocker.patch( dot_path, return_value=(tmp_path / 'minters').resolve(), ) return tmp_path
36803584568992b2c68fb32eb810bb6ea2ede91d
44,149
def parse_csv(columns, line): """ Parse a CSV line that has ',' as a separator. Columns is a list of the column names, must match the number of comma-separated values in the input line. """ data = {} split = line.split(',') for idx, name in enumerate(columns): data[name] = split[idx] return data
ff42251c5be595cc749ccc91d419e2ef105b9b49
44,150
def armstrong(some_int: int) -> bool: """ Accepts an int Returns whether or not int is an armstrong number :param some_int: :return: """ string_rep = str(some_int) sum_val = 0 for digit in string_rep: sum_val += int(digit) ** 3 return some_int == sum_val
dd0c2b3533e77c29330d750826b328c2b33b2460
44,153
def prepare_auth_params_json(bot, manifest): """Returns a dict to put into JSON file passed to task_runner. This JSON file contains various tokens and configuration parameters that allow task_runner to make HTTP calls authenticated by bot's own credentials. The file is managed by bot_main.py (main Swarming bot process) and consumed by task_runner.py. It lives it the task work directory. Args: bot: instance of bot.Bot. manifest: dict with the task manifest, as generated by the backend in /poll. """ # This is "<kind>:<id>", e.g. "user:abc@example.com" or "bot:abc.example.com". # Service accounts appear as "user:<account-email>" strings. bot_ident = manifest.get('bot_authenticated_as', '') bot_service_account = 'none' if bot_ident.startswith('user:'): bot_service_account = bot_ident[len('user:'):] def account(acc_id): acc = (manifest.get('service_accounts') or {}).get(acc_id) or {} return acc.get('service_account') or 'none' return { 'bot_id': bot.id, 'task_id': manifest['task_id'], 'swarming_http_headers': bot.remote.get_authentication_headers(), 'swarming_http_headers_exp': bot.remote.authentication_headers_expiration, 'bot_service_account': bot_service_account, 'system_service_account': account('system'), 'task_service_account': account('task'), }
775540265b9739de40ae4497033311e4fbbcb273
44,156
import ast def get_input(arg, valid_keys): """Convert the input to a dict and perform basic validation""" json_string = arg.replace("\\n", "\n") try: input_dict = ast.literal_eval(json_string) if not all(k in input_dict for k in valid_keys): return None except Exception: return None return input_dict
55f702725186cb74767546c8a3c4b068f0c03f2b
44,157
import requests import json import logging def authenticate_with_ome(ip_address: str, user_name: str, password: str) -> tuple: """ Authenticates a session against an OME server Args: ip_address: IP address of the OME server user_name: Username for OME password: Password for the OME user Returns: Returns a tuple of auth_success (bool), headers (dict). Which are true if authentication succeeded and {'content-type': 'application/json' respectively. """ """ X-auth session creation """ auth_success = False session_url = "https://%s/api/SessionService/Sessions" % ip_address user_details = {'UserName': user_name, 'Password': password, 'SessionType': 'API'} headers = {'content-type': 'application/json'} session_info = requests.post(session_url, verify=False, data=json.dumps(user_details), headers=headers) if session_info.status_code == 201: headers['X-Auth-Token'] = session_info.headers['X-Auth-Token'] auth_success = True else: error_msg = "Failed create of session with {0} - Status code = {1} - Error: " + str(json.loads(session_info.content)) logging.error(error_msg.format(ip_address, session_info.status_code)) exit(0) return auth_success, headers
a952e445acde71ee0bfde1aedc70a976fc2c49cf
44,164
def searchForInsert(sortedList:list, value:float)->int: """Search for where to insert the value for the list to remain sorted Args: sortedList (list): a sorted list value (float): the value to insert into the sorted list Returns: int: the index where to insert the value """ for i in range(len(sortedList)-1,-1,-1): if(sortedList[i] <= value): return i+1 else: return 0
3974c8e7b58feb9d47aadb68f88a931ba3bd8048
44,165
def deep_to(batch, device, dtype): """ Static method to call :func:`to` on tensors or tuples. All items in tuple will have :func:`deep_to` called Args: batch (tuple / list / :class:`torch.Tensor`): The mini-batch which requires a :func:`to` call device (:class:`torch.device`): The desired device of the batch dtype (:class:`torch.dtype`): The desired datatype of the batch Returns: tuple / list / :class:`torch.Tensor`: The moved or casted batch """ is_tuple = isinstance(batch, tuple) if isinstance(batch, list) or isinstance(batch, tuple): batch = list(batch) for i in range(len(batch)): batch[i] = deep_to(batch[i], device, dtype) batch = tuple(batch) if is_tuple else batch elif isinstance(batch, dict): for key in batch: batch[key] = deep_to(batch[key], device, dtype) else: if batch.dtype.is_floating_point: batch = batch.to(device, dtype) else: batch = batch.to(device) return batch
e754e687c4f998c24fd6bf81259fa2aaf44bb419
44,169
def sum_two_smallest_numbers(numbers): """Find two lowest positive integers and add them.""" return sorted(numbers)[0] + sorted(numbers)[1]
9e90f521dd56e8e5dce2a26ede7a66256f46f958
44,173
import re def parse_log(line): """Parse logs with regex Function that parse raw logs and extracts request_time and url Args: line: Decoded line Returns: reqest_time_data: float number or None if log can't be parsed url_data: String or None if log can't be parsed """ url_format = re.compile(r"""((?:(?<=PUT )|(?<=GET )|(?<=POST )|(?<=HEAD ))(.*)(?=\ http))""", re.IGNORECASE) request_time_format = re.compile(r"""(([0-9]*[.])?[0-9]+(?!.*\d))$""", re.IGNORECASE) url_data = re.search(url_format, line) request_time_data = re.search(request_time_format, line) if url_data: url_data = url_data.group() if request_time_data: request_time_data = request_time_data.group() return request_time_data, url_data
3c2a889fe3e2fc7e922628da0c447575cbfed6d9
44,176
def deregister_sub(topics_to_subs, sub): """Deregister a Subscription from a mapping topics->subscriptions. Args: topics_to_subs (dict): dictionnary topic -> list of subscriptions sub (Subscription): subscription to deregister """ key = str(sub.topics) if key in topics_to_subs: return topics_to_subs[str(sub.topics)].remove(sub) else: return topics_to_subs
099e7e3a0683e0e047ab55fa4ce1dd0b111d19ee
44,182
def server_status_field(log, server_status, field_name): """ Return (0, result) for a field of BarreleServerStatusCache """ # pylint: disable=unused-argument return server_status.bssc_field_result(log, field_name)
634d19ad50311902d92390b2f1ac5a2b73ddf389
44,183
import warnings def normalize_df( df, method, drop_degenerate_cols=True, replace_zero_denom=False): """For a given dataframe with columns including numerical values, it generates a function which can be applied to original data as well as any future data to normalize using two possible methods. The `"statistical"` method removes the "mean" and divides by "std". The `"min_max"` method removes the "minimum" and divides by the "maximum - minimum". If desired, the function also drops the columns which have only one possible value and can cause issues not only during normalizaton (returning a column with all NAs) but also potentially during fitting as an example. Parameters ---------- df : `pandas.DataFrame` Input dataframe which (only) includes numerical values in all columns. method : `str` The method to be used for normalization. The "statistical" method removes the "mean" and divides by "std" for each column. The "min_max" method removes the "min" and divides by the "max - min" for each column. drop_degenerate_cols : `bool`, default True A boolean to determine if columns with only one possible value should be dropped in the normalized dataframe. replace_zero_denom : `bool`, default False A boolean to decide if zero denominator (e.g. standard deviation for ``method="statistical"``) for normalization should be replaced by 1.0. Returns ------- normalize_info : `dict` A dictionary with with the main item being a normalization function. The items are as follows: ``"normalize_df_func"`` : callable (pd.DataFrame -> pd.DataFrame) A function which normalizes the input dataframe (``df``) ``"normalized_df"`` : normalized dataframe version of ``df`` ``"keep_cols"`` : `list` [`str`] The list of kept columns after normalization. ``"drop_cols"`` : `list` [`str`] The list of dropped columns after normalization. ``"subtracted_series"`` : `pandas.Series` The series to be subtracted which has one value for each column of ``df``. ``"denominator_series"`` : `pandas.Series` The denominator series for normalization which has one value for each column of ``df``. """ if method == "statistical": subtracted_series = df.mean() denominator_series = df.std() elif method == "min_max": subtracted_series = df.min() denominator_series = (df.max() - df.min()) else: raise NotImplementedError(f"Method {method} is not implemented") # Replaces 0.0 in denominator series with 1.0 to avoid dividing by zero # when the variable has zero variance if replace_zero_denom: denominator_series.replace(to_replace=0.0, value=1.0, inplace=True) drop_cols = [] keep_cols = list(df.columns) normalized_df = (df - subtracted_series) / denominator_series if drop_degenerate_cols: for col in df.columns: if normalized_df[col].isnull().any(): drop_cols.append(col) warnings.warn( f"{col} was dropped during normalization as it had only one " "possible value (degenerate)") keep_cols = [col for col in list(df.columns) if col not in drop_cols] if len(keep_cols) == 0: raise ValueError( "All columns were degenerate (only one possible value per column).") subtracted_series = subtracted_series[keep_cols] denominator_series = denominator_series[keep_cols] def normalize_df_func(new_df): """A function which applies to a potentially new data frame (``new_df``) with the same columns as ``df`` (different values or row number is allowed) and returns a normalized dataframe with the same normalization parameters applied to ``df``. This function uses the series `subtracted_series` and ``denominator_series`` generated in its outer scope for normalization, and in this way ensures the same mapping for new data. Parameters ---------- new_df : `pandas.DataFrame` Input dataframe which (only) includes numerical values in all columns. The columns of ``new_df`` must be the same as ``df`` which is passed to the outer function (``normalize_df``) to construct this function. Returns ------- normalized_df : `pandas.dataframe` Normalized dataframe version of ``new_df``. """ normalized_df = new_df.copy() if drop_degenerate_cols: normalized_df = normalized_df[keep_cols] return (normalized_df - subtracted_series) / denominator_series return { "normalize_df_func": normalize_df_func, "normalized_df": normalized_df, "keep_cols": keep_cols, "drop_cols": drop_cols, "subtracted_series": subtracted_series, "denominator_series": denominator_series }
277353068155226894b0cdba9fcf56b24e58a891
44,186
def _check_handle(handle): """Checks if provided file handle is valid.""" return handle is not None and handle.fileno() >= 0
7bce4bc10f4a7ee9c393a4e6a79eb3cdc22f4a12
44,189
def clear_data_for_origin(origin: str, storageTypes: str) -> dict: """Clears storage for origin. Parameters ---------- origin: str Security origin. storageTypes: str Comma separated list of StorageType to clear. """ return { "method": "Storage.clearDataForOrigin", "params": {"origin": origin, "storageTypes": storageTypes}, }
8c8bf90c4ac4f7dc0dc3459fe18999c127d12226
44,191
def get_one_item(CURSOR, BARCODE): """ Returns the specified item from the database, determined by the primary key - in this case the barcode number. Args: CURSOR (object): BARCODE (str): the barcode number Returns: ITEM (list): the item in the database """ ITEM = CURSOR.execute(""" SELECT * FROM pantry WHERE barcode_number = ? ;""", [BARCODE]).fetchone() return ITEM
013c4a86ba7ca8c41965a448370d5d6ef05874e0
44,194
def linear_search(L, v): """ (list, object) -> int Return the index of the first occurrence of v in L, or return -1 if v is not in L. >>> linear_search([2, 3, 5, 3], 2) 0 >>> linear_search([2, 3, 5, 3], 5) 2 >>> linear_search([2, 3, 5, 3], 8) -1 """ i = 0 while i != len(L) and v != L[i]: i = i + 1 if i == len(L): return -1 else: return i
50a9ac2e720373d050deb29f31724d27a06e97d6
44,196
def matrix_identity(n: int) -> list: """ Generate the n-sized identity matrix. """ if n == 0: return [0] identity: list = [] for col in range(n): column: list = [] for row in range(n): if col == row: column.append(1) else: column.append(0) identity.append(column) return identity
6ac3945b90e4c8ae9bb0ca6f2b9c55379ab13cec
44,199
def get_gcd(x, y): """Calculate the greatest common divisor of two numbers.""" if y > x: x, y = y, x r = x % y if r == 0: return y else: result = get_gcd(y, r) return result
1e17028c0595df01897504b3332a6032186cd4f5
44,200
import math def repeat_data(preload, num_simulation_steps): """Repeats the transformer preload data until there are as many values as there are simulation steps. Args: preload: (list): Containing the data (floats) to be repeated. num_simulation_steps: (int): Number of simulation steps and expected length of the transformer preload after it is repeated. Returns: transformer_preload_repeated: (list): Repeated values. len() = num_simulation_steps. """ n = math.floor(num_simulation_steps / len(preload)) transformer_preload_repeated = preload * n values_to_add = num_simulation_steps - len(transformer_preload_repeated) transformer_preload_repeated += preload[:values_to_add] return transformer_preload_repeated
1e2c172d40d13c10802fd7edadb68fdb29153836
44,209
def get_node_and_attribute(attribute): """ Split a name between its node and its attribute. Args: attribute (str): attribute name, node.attribute. Returns: list: [node_name, attribute] """ split_attribute = attribute.split('.') if not split_attribute: return None, None node = split_attribute[0] attr = '.'.join(split_attribute[1:]) return node, attr
8534366c4d7800681c1088a1446f828087d9ddea
44,210
def txn_data(df, txns): """Return dataframe with supplied transactions.""" return df[df.transaction_id.isin(txns)].copy()
7236c912a6e1120e2b893cf6018ca2b09ad4a558
44,217
def _parse_nyquist_vel(nyquist_vel, radar, check_uniform): """ Parse the nyquist_vel parameter, extract from the radar if needed. """ if nyquist_vel is None: nyquist_vel = [radar.get_nyquist_vel(i, check_uniform) for i in range(radar.nsweeps)] else: # Nyquist velocity explicitly provided try: len(nyquist_vel) except: # expand single value. nyquist_vel = [nyquist_vel for i in range(radar.nsweeps)] return nyquist_vel
b5b56ad2c350873831a0574c98b4920d2abba5ac
44,224
def applyBandOffset(C, height, bandName, lines, inter=False): """Produce bands from a list of lines. Bands are defined relative to lines by means of offsets of the top and bottom heights of the lines. Bands may also be interlinear: defined between the bottom of one line and the top of the next line. Parameters ---------- C: object Configuration settings height: The height of the page or block bandName: string The name of the bands lines: tuple The lines relative to which the bands have to be determined. Lines are given as a tuple of tuples of top and bottom heights. inter: boolean, optional `False` Whether the bands are relative the lines, or relative the interlinear spaces. Returns ------- tuple For each line the band named bandName specified by top and bottom heights. """ offsetBand = C.offsetBand (top, bottom) = offsetBand[bandName] def offset(x, off): x += off return 0 if x < 0 else height if x > height else x return tuple( (offset(up, top), offset(lo, bottom)) for (up, lo) in ( zip((x[1] for x in lines), (x[0] for x in lines[1:])) if inter else lines ) )
90f5db0f04b30be774f2f87310baf90ac5f4962d
44,225