content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def is_multiline(s): """Return True if a str consists of multiple lines. Args: s (str): the string to check. Returns: bool """ return len(s.splitlines()) > 1
6c1eca6f1d3d449bff6661b2ab3b9cd8695fbf90
41,147
def get_rows_to_keep(mode, df, grp, samp_grps, qthreshold, min_child_non_leaf, min_child_nsamp, min_peptides, min_pep_nsamp): """ Use checking to find the rows (taxonomic or functional terms) that satisfy all of the filtering conditions for the specified group :param mode: either 'f', 't', or 'ft' :param df: data frame of functional and taxonomic terms. missing values are represented as 0. :param grp: grp to check conditions for :param samp_grps: SampleGroups() object :param qthreshold: minimum number of quantitations per grp :param min_child_non_leaf: minimum number of children for terms that are not leaves :param min_child_nsamp: minimum number of samples with sample children greater than min_child_non_leaf :param min_peptides: minimum number of peptides for each term :param min_pep_nsamp: minimum number of samples where the number of peptides has to be larger than min_peptides :return: boolean Series with rows to keep as True """ # intensity intcols = samp_grps.sample_names[grp] keep_int = (df[intcols] > 0).apply(sum, axis=1) >= qthreshold # peptides peptide_cols = samp_grps.n_peptide_names_dict[grp] peptide_keep_series = (df[peptide_cols] > min_peptides) if min_pep_nsamp == "all": keep_peptide = peptide_keep_series.all(axis=1) else: keep_peptide = peptide_keep_series.apply(sum, axis=1) >= int(min_pep_nsamp) if mode != 'ft': # child non leaf child_cols = samp_grps.samp_children_names_dict[grp] child_keep_series = (df[child_cols] >= min_child_non_leaf) | (df[child_cols] == 0) if min_child_nsamp == "all": keep_child = child_keep_series.all(axis=1) else: keep_child = child_keep_series.apply(sum, axis=1) >= int(min_child_nsamp) all_keep = keep_int & keep_child & keep_peptide else: all_keep = keep_int & keep_peptide return all_keep
353c0b0d2717018f60178f37d777be25bbcf2193
41,149
def send(r, stream=False): """Just sends the request using its send method and returns its response. """ r.send(stream=stream) return r.response
7350fe337450e55744ee82541b90d5204868fff0
41,150
from pathlib import Path from datetime import datetime def unique_path(parent: Path, stem: str, suffix: str, seps=('_', '-'), n: int = 1, add_date: bool = True) -> Path: """ :param parent: Directory in which a unique file name should be created :param stem: File name without extension :param suffix: File extension, including `.` :param seps: Separators between stem and date/n, respectfully. :param n: First number to try; incremented by 1 until adding this value would cause the file name to be unique :param add_date: Whether a date should be added before n. If True, a date will always be added. :return: Path with a file name that does not currently exist in the target directory """ date_sep, n_sep = seps if add_date: stem = f'{stem}{date_sep}{datetime.now().strftime("%Y-%m-%d")}' name = stem + suffix while (path := parent.joinpath(name)).exists(): name = f'{stem}{n_sep}{n}{suffix}' n += 1 return path
872ec8ad2e24e51edb37a1722f16b85abeb96614
41,151
def remove_indices_from_range(ixs, max_ix): """From the indices 0:max_ix+1, remove the individual index values in ixs. Returns the remaining ranges of indices and singletons. """ ranges = [] i0 = 0 for ix in ixs: i1 = ix - 1 if i1 < i0: i0 = ix + 1 elif i1 == i0: ranges.append([i0]) i0 = ix + 1 else: ranges.append([i0,i1+1]) i0 = ix + 1 if i0 < max_ix: ranges.append([i0, max_ix+1]) elif i0 == max_ix: ranges.append([i0]) return ranges
df71db04b7e521815042237000f036735fbbe0f3
41,152
def maybe_append(df1, df2): """ If both data frames are available, append them and return. Otherwise, return whichever frame is not None. """ if df1 is None: return df2 if df2 is None: return df1 return df1.append(df2)
aaabcc0f175fc913f0dbce575888cf08ff625c98
41,153
def max_subarray(nums): """ Find the contiguous subarray within an array (containing at least one number) which has the largest sum. For example, given the array [-2,1,-3,4,-1,2,1,-5,4], the contiguous subarray [4,-1,2,1] has the largest sum = 6. Args: nums: list[int] Returns: int """ # Method 1 Kadane Algorithm max_so_far = max_end_here = nums[0] for x in nums: # DP, optimal substructure: max_end_here = max(max_end_here + x, x) # max_end_here[i] = max(max_end_here[i - 1] + nums[i], nums[i]) max_so_far = max(max_so_far, max_end_here) # max_so_far[i] = max(max_so_far[i-1], max_end_here[i]) return max_so_far
8660758cc758f85ea4750e491f249b08c0dfdd00
41,154
def pandas_table_to_nested_list(df): """ Converts pandas table df to nested list """ table_data = [["" for x in range(df.shape[1])] for y in range(df.shape[0]+1)] # Columns names for i in range(df.shape[1]): table_data[0][i] = df.columns[i] for i in range(df.shape[0]): for j in range(df.shape[1]): table_data[i+1][j] = df.iat[i, j] return table_data
fc5aa04de82dcacab5ae6f6c64f22417d3d9318f
41,155
def cescape(string): """Escapes special characters needed for color codes. Replaces the following symbols with their equivalent literal forms: ===== ====== ``@`` ``@@`` ``}`` ``}}`` ===== ====== Parameters: string (str): the string to escape Returns: (str): the string with color codes escaped """ string = str(string) string = string.replace('@', '@@') string = string.replace('}', '}}') return string
48aef7c95851f9a7ae475d3ba38db55ce09fb5de
41,158
def closest_pair_strip(cluster_list, horiz_center, half_width): """ Helper function to compute the closest pair of clusters in a vertical strip Input: cluster_list is a list of clusters produced by fast_closest_pair horiz_center is the horizontal position of the strip's vertical center line half_width is the half the width of the strip (i.e; the maximum horizontal distance that a cluster can lie from the center line) Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist. """ strip = list() for cluster in cluster_list: if abs(cluster.horiz_center()-horiz_center) < half_width: strip.append((cluster, cluster_list.index(cluster))) strip.sort(key = lambda cluster: cluster[0].vert_center()) length = len(strip) dist, idx1, idx2 = float('inf'), -1, -1 for idx_u in range(length-1): for idx_v in range(idx_u+1, min(idx_u+4, length)): uv_dist = strip[idx_u][0].distance(strip[idx_v][0]) if uv_dist < dist: dist = uv_dist if strip[idx_u][1] < strip[idx_v][1]: idx1 = strip[idx_u][1] idx2 = strip[idx_v][1] else: idx1 = strip[idx_v][1] idx2 = strip[idx_u][1] return (dist, idx1, idx2)
d6966ec785d6ca5053ab8f91661735cbe0083dc5
41,160
def tb_args(exc): """Easily format arguments for `traceback` functions.""" return (type(exc), exc, exc.__traceback__)
d5c65e67556c28de3a97742fb4115cf8bddfb6a4
41,163
def flatten_list(cols_list, recursive=True): """Take a list of lists and return a flattened list. Args: cols_list: an iterable of any quantity of str/tuple/list/set. Example: >>> flatten_list(["a", ("b", set(["c"])), [["d"]]]) ["a", "b", "c", "d"] """ cols = [] for i in cols_list: if isinstance(i, (set, list, tuple)): cols.extend(flatten_list(i) if recursive else i) else: cols.append(i) return cols
d8e16a99b2e5f61ce53813ca7424e1b01cb1cddf
41,164
def table_entry_size(name, value): """ Calculates the size of a single entry This size is mostly irrelevant to us and defined specifically to accommodate memory management for lower level implementations. The 32 extra bytes are considered the "maximum" overhead that would be required to represent each entry in the table. See RFC7541 Section 4.1 """ return 32 + len(name) + len(value)
fb05f2299bd264d3ae8143307d9c428aad18d5d7
41,168
def global_video_success(row, weights=None): """Create a video success measurement based on basic video stats.""" metric_cols = ["commentCount", "dislikeCount", "favoriteCount", "likeCount", "viewCount"] if weights is None: weights = [1 for _ in metric_cols] weights[1] = -1 scores = [row[key] for key in metric_cols] return sum(scores)
49ca735b7efe54b49f29ba79be6c5aee250d64ba
41,176
def has_converged(mu, oldmu): """ A boolean indicating whether or not a set of centroids has converged Parameters: mu - the latest array of centroids oldmu - the array of centroids from the previous iteration Returns: A boolean indicating whether or not the old and new centroids are the same, representing whether or not the clustering has converged """ return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
35234531a15baaf4f1df2f196e6abcec40fade59
41,177
import copy def merge_config(new_config, old_config): """Merge the user-defined config with default config""" config = copy.deepcopy(old_config) if new_config is not None: config.update(new_config) return config
0d6d3f4b1df504485b991d6edc63d675439ab6d0
41,178
def make_range(chain_range_dic): """Expand a chain dictionary into ranges.""" chain_ranges = {} for chain in chain_range_dic: min_idx = min(chain_range_dic[chain]) max_idx = max(chain_range_dic[chain]) chain_ranges[chain] = (min_idx, max_idx) return chain_ranges
718f1acfae09fb0651cd351d232359f8b8ff7dfc
41,179
import math def calculateHeading(origin, destination): """ Calculate the heading direction between two coordinates. It returns the heading in degrees, where 0 deg is North. (This is not very accurate but good enough for us.)""" x1 = destination[0] y1 = destination[1] x2 = origin[0] y2 = origin[1] degrees = math.degrees(math.atan2((y1 - y2), (x1 - x2))) degrees = degrees + 90 # North is 0 deg return degrees
b114c6c4c028e148fe87f828128d2bd2766f0c61
41,185
import math def smoothedsigmoid(x, b=1): """ English: b controls smoothness, lower = smoother Japanese: b は緩やかさをθͺΏζ•΄γ—ます。b γŒε°γ•γ„γ»γ©η·©γ‚„γ‹γ«(ε€‰εŒ–γŒε°γ•γ)γͺγ‚ŠγΎγ™γ€‚ """ return 1 / (1 + math.exp(- b * x))
014bec11a761fcf19c9e5885a1fa870115b90a00
41,192
def diff_lists(list1,list2, option=None): """ if option equal 'and', return a list of items which are in both list1 and list2. Otherwise, return a list of items in list1 but not in list2. """ if option and option == 'and': return [x for x in list1 if x in list2] else: return [x for x in list1 if x not in list2]
2ffe6656d638d1ce185501361288266158ead09f
41,193
from typing import Sequence def boolListToString(binary : Sequence[bool]) -> str: """Convert a boolean list to a string Parameters ---------- binary : Sequence[bool] Sequence of booleans representing a binary number in big endian form Returns ------- str String representing a binary number in big endian form """ rtn = "" for val in binary: if val: rtn += "1" else: rtn += "0" return rtn
9a0eda92124336b66ca74304efabdf1c7f1b082e
41,194
def process_passport(passport): """Turn a passport list into a dictionary.""" pass_string = ' '.join([n.strip('\n') for n in passport]) pass_list = pass_string.split(' ') pass_dict = {} for n in pass_list: key, entry = n.split(':') pass_dict[key] = entry return pass_dict
30e5d0943f8b34fd5c02dad70af143070348331d
41,202
def remove_none_items(adict): """Return a similar dict without keys associated to None values""" return {k: v for k, v in adict.items() if v is not None}
ddea6a77bc55ce33485f74c83a75e14f01d303a9
41,209
import struct def unpack_info_packet(packet): """Unpack an informational packet.""" return struct.unpack("Ld", packet)
5976abc2b2fc1d072bf5434e639cbf27f3e69e58
41,211
def release_string(d_release): """ Produces a string describing a release Args: d_release (dict): dictonary containing the release data Returns: (string): representing the release Raises: (KeyError): if the data does not contain the field "basic_information". >>> release_string({'id': 1}) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... KeyError: "Your release 1 doesn't contain the field 'basic_information'" Example: >>> with open('discogs_finder/tests/test.json', 'r') as f: ... r = json.load(f) >>> release_string(r) # doctest: +NORMALIZE_WHITESPACE u'Keith Jarrett: Shades (3318191)' """ release_id = d_release['id'] basics = d_release.get('basic_information', None) if not basics: raise KeyError("Your release %d doesn't contain" " the field 'basic_information'" % release_id) artists = basics.get('artists', None) if len(artists): j = artists[0]['join'] if j == ',': j = '%s ' % j else: j = ' %s ' % j arts = j.join((a['name'] for a in artists)) else: arts = None title = basics.get('title', None) return u'{arts}: {title} ({release_id})'.format(arts=arts, title=title, release_id=release_id)
4ca448b4778fd0ef56bbcfc0c3dce1c60d157174
41,213
def atleast_list(thing): """Make sure the item is at least a list of len(1) if not a list otherwise, return the original list Args ---- thing (any type) : thing to assert is a list Returns ------- thing (list) """ if not isinstance(thing, list): thing = [thing] return thing
e97b61266b76aa5ffea65541515e44807c57ba1a
41,218
import decimal def has_number_type(value): """ Is a value a number or a non-number? >>> has_number_type(3.5) True >>> has_number_type(3) True >>> has_number_type(decimal.Decimal("3.5")) True >>> has_number_type("3.5") False >>> has_number_type(True) False """ return isinstance(value, (int, float, decimal.Decimal)) and not isinstance(value, bool)
d5db38736244af750ee881ceb83b5433eecd6bb9
41,220
def flat_list(x=None): """ Description: It returns a list that contains all the elements form the input list of lists. It should work for any number of levels. Example: >>> x = flat_list([1, 'k', [], 3, [4, 5, 6], [[7, 8]], [[[9]]]]) >>> x >>> [1, 'k', 3, 4, 5, 6, 7, 8, 9] Args: - x (list): List of lists of objects. Raises: - TypeError: If the input is not a list object. """ # Check for empty input. if x is None: return [] # _end_if_ # Input 'x' should be a list. if not isinstance(x, list): raise TypeError(" Input should be a list.") # _end_if_ # Define the return list. flat_x = [] # Go through all the list elements. for item_x in x: # Check for embedded lists. if isinstance(item_x, list): # Check for empty entries. if not item_x: continue # _end_if_ # Note the recursive call in "flat_list". for ix in flat_list(item_x): flat_x.append(ix) # _end_for_ else: # If the item is not a list. flat_x.append(item_x) # _end_if_ # _end_for_ # Return the flatten list. return flat_x
5b96d06192ac96530674042459277f9acfd6c707
41,221
import pipes def CommandToString(command): """Returns quoted command that can be run in bash shell.""" return ' '.join(map(pipes.quote, command))
e20a81b1336352e51b624e41aca3bea615cd030b
41,226
import re def _parse_parameters(script): """Parse parameters from script header""" params = {'profiles': [], 'templates': [], 'platform': ['multi_platform_all'], 'remediation': ['all']} with open(script, 'r') as script_file: script_content = script_file.read() for parameter in params: found = re.search('^# {0} = ([ ,_\.\-\w]*)$'.format(parameter), script_content, re.MULTILINE) if found is None: continue splitted = found.group(1).split(',') params[parameter] = [value.strip() for value in splitted] return params
30c028dd1bbd8c4737a613c15bf311798ef8e816
41,228
def get_communicator(episode, agents, alternate=False): """ This function selects the communicator. :param episode: The current episode. :param agents: The agents in the game. :param alternate: Alternate the leader or always the same. :return: The id of the communicating agent and the communicating agent itself. """ if alternate: communicator = episode % len(agents) else: communicator = 0 communicating_agent = agents[communicator] return communicator, communicating_agent
fb0939abe003f4aba04e58870a1266982921dec1
41,232
def _get_language(uid, query): """ Returns ui_locales of a language :param uid: of language :param query: of all languages :return: string """ return query.get(uid).ui_locales
914d0a1e59ea34a5732b8baee13bf5899a10cc3f
41,234
import torch def ridge_regularize(network, lam): """Apply ridge penalty at linear layer and hidden-hidden weights.""" return lam * ( torch.sum(network.linear.weight ** 2) + torch.sum(network.rnn.weight_hh_l0 ** 2) )
29e46f10b0ee63f0bda090836b95b1f0b4b1664c
41,236
def build_grid(filename): """Scrapes a formatted text file and converts it into a word search grid. Args: filename: A text file containing rows of alphabetical characters, optionally separated by spaces or commas. Each row must contain the same number of letters. Returns: A 2d list, representing the rows and columns of the word search grid. """ grid = [] input_file = open(filename, 'r') for line in input_file.read().splitlines(): # Ignore separators line = line.replace(' ', '') line = line.replace(',', '') row = list(line) grid.append(row) input_file.close() return grid
0bed89308de1ba3c6fb1d0372364305deb5d0856
41,237
def question_result_type(response): """ Generate the answer text for question result type. :param response: Kendra query response :return: Answer text """ try: faq_answer_text = "On searching the Enterprise repository, I have found" \ " the following answer in the FAQs--" faq_answer_text += '\"' + response['resultItems'][0]['documentExcerpt']['text'] + '\"' except KeyError: faq_answer_text = "Sorry, I could not find an answer in our FAQs." return faq_answer_text
15fd06ee46d6377f1fe38243c11acf5e66d739b6
41,243
def sftp_prefix(config): """ Generate SFTP URL prefix """ login_str = '' port_str = '' if config['username'] and config['password']: login_str = '%s:%s@' % (config['username'], config['password']) elif config['username']: login_str = '%s@' % config['username'] if config['port'] and config['port'] != 22: port_str = ':%d' % config['port'] return 'sftp://%s%s%s/' % (login_str, config['host'], port_str)
225ae15212f7024590b1aa91f3ad7a32594cb9c3
41,250
def get_prov(uid): """ return provenance string """ return uid.prov
0f92e61f946f42ddcda0ca8962e6364ef3c2cc32
41,253
def wrap_row(r:list, by:int = 1) -> list: """Wraps the list r by number of positions. Positive by will shift right. Negative shifts left. Args: r (list): list to wrap. by (int, optional): number of positions to shift by. Defaults to 1. Returns: list: wrapped list. """ return r[-by:] + r[:-by]
bb1302fd7f20e2a8f3356448cc6da3826b3baf4d
41,255
import re def extract_pin(module, pstr, _regex=re.compile(r"([^/]+)/([^/]+)")): """ Extract the pin from a line of the result of a Yosys select command, or None if the command result is irrelevant (e.g. does not correspond to the correct module) Inputs ------- module: Name of module to extract pins from pstr: Line from Yosys select command (`module/pin` format) """ m = re.match(r"([^/]+)/([^/]+)", pstr) if m and m.group(1) == module: return m.group(2) else: return None
6d48dc9ccdb2dfe1dc89a8c7a56f564fccfe60a3
41,256
def compute_logits(theta, ob): """ theta: A matrix of size |A| * (|S|+1) ob: A vector of size |S| return: A vector of size |A| """ #ob_1 = include_bias(ob) logits = ob.dot(theta.T) return logits
1913409b9a2f95b83c199379d602d111f6e49851
41,260
def AddOrdinalSuffix(value): """Adds an ordinal suffix to a non-negative integer (e.g. 1 -> '1st'). Args: value: A non-negative integer. Returns: A string containing the integer with a two-letter ordinal suffix. """ if value < 0 or value != int(value): raise ValueError('argument must be a non-negative integer: %s' % value) if value % 100 in (11, 12, 13): suffix = 'th' else: rem = value % 10 if rem == 1: suffix = 'st' elif rem == 2: suffix = 'nd' elif rem == 3: suffix = 'rd' else: suffix = 'th' return str(value) + suffix
732ac382c83983d2083f22bb23ff8968bb05875d
41,261
def first_player_wins(a, b): """ If tie : Returns 0 If first player wins : Returns 1 If second player wins : Returns -1 """ if a == b: return 0 elif [a, b] == ["R", "S"] or [a, b] == ["S", "P"] or [a, b] == ["P", "R"]: return 1 return -1
ebb0b92862039ed5a8227573d5e79e1a5ea7f353
41,264
def _get_right_parentheses_index_(struct_str): """get the position of the first right parenthese in string""" # assert s[0] == '(' left_paren_count = 0 for index, single_char in enumerate(struct_str): if single_char == '(': left_paren_count += 1 elif single_char == ')': left_paren_count -= 1 if left_paren_count == 0: return index else: pass return None
43c1d890fb4ba62ae6e1a7c7df603428f9b342cd
41,265
def obs_is_afternoon(obcode): """Given an observation code (eg 'ob_1a', 'ob12_b') is this an afternoon obs?""" return obcode[-1] == 'b'
8d1f87b7f526f98a831c1da3fd6ebeb429d954ef
41,268
def center_on_atom(obj_in, idx=None, copy=True): """Shift all coords in `obj` such that the atom with index `idx` is at the center of the cell: [0.5,0.5,0.5] fractional coords. """ assert idx is not None, ("provide atom index") obj = obj_in.copy() if copy else obj_in obj.coords = None # [...,idx,:] works for (natoms,3) and (nstep,natoms,3) -- numpy rocks! obj.coords_frac = obj.coords_frac - obj.coords_frac[...,idx,:][...,None,:] + 0.5 obj.set_all() return obj
690f12a7e95a8e24930096e76941c3061a080b17
41,269
def binary_search(num_list, num, not_found="none"): """Performs a binary search on a sorted list of numbers, returns index. Only works properly if the list is sorted, but does not check whether it is or not, this is up to the caller. Arguments: num_list: a sorted list of numbers. num: a number to search for. not_found: string. Controls what happens if the number is not in the list. - "none": None is returned. - "upper", "force_upper": upper index is returned - "lower", "force_lower": lower index is returned - "nearest": index to nearest item is returned If num is larger than all numbers in num_list, if "upper", "lower", "force_lower", or "nearest": index to the last item of the list is returned. if "force_upper": index to the next item past the end of the list is returned. If num is smaller than all numbers in num_list, if "upper", "force_upper", "lower", or "nearest": 0 is returned. if "force_lower": -1 is returned. Default: None. returns: None if len(num_list) is 0 None if num is not in num_list and not_found is "none" Integer index to item, or perhaps nearest item (depending on "not_found" keyword argument). """ if not_found not in ( "none", "upper", "force_upper", "lower", "force_lower", "nearest", ): raise ValueError( f"{not_found} is not a recognized value for argument " "'not_found'" ) lower_i, upper_i = 0, len(num_list) if upper_i == 0: return None if num < num_list[0]: if not_found == "none": return None if not_found == "force_lower": return -1 return 0 if num > num_list[upper_i - 1]: if not_found == "none": return None if not_found == "force_upper": return upper_i return upper_i - 1 while True: mid_i = (lower_i + upper_i) // 2 n = num_list[mid_i] if n == num: return mid_i if mid_i == lower_i: if not_found == "none": return None if not_found in ("upper", "force_upper"): return upper_i if not_found in ("lower", "force_lower"): return lower_i return lower_i + (num_list[upper_i] - num < num - n) if n > num: upper_i = mid_i else: lower_i = mid_i
91b77d6910698e18f0369990dee11dfab3333b6e
41,271
def _int_to_hex(x: int) -> str: """Converts an integer to a hex string representation. """ return hex(x)
2a9bdeb96339747ec33e90393a448519daa59a84
41,272
def bai_from_bam_file(bam_file): """ Simple helper function to change the file extension of a .bam file to .bai. """ if not bam_file.endswith('.bam'): raise ValueError('{0} must have a .bam extension.'.format(bam_file)) return bam_file[:-3] + 'bai'
812aee46a94a3a1d3eec15a72d820785cf531692
41,282
def get_events_summaries(events, event_name_counter, resource_name_counter, resource_type_counter): """ Summarizes CloudTrail events list by reducing into counters of occurences for each event, resource name, and resource type in list. Args: events (dict): Dictionary containing list of CloudTrail events to be summarized. Returns: (list, list, list) Lists containing name:count tuples of most common occurences of events, resource names, and resource types in events list. """ for event in events['Events']: resources = event.get("Resources") event_name_counter.update([event.get('EventName')]) if resources is not None: resource_name_counter.update([resource.get("ResourceName") for resource in resources]) resource_type_counter.update([resource.get("ResourceType") for resource in resources]) return (event_name_counter.most_common(10), resource_name_counter.most_common(10), resource_type_counter.most_common(10))
b8d061f9710a3914b74da9ec56a2037dcf8320d4
41,283
def note_css_class(note_type): """ Django Lesson Note Type text = blocks.TextBlock() note_type = blocks.ChoiceBlock( choices=( ('info', 'Info'), ('warning', 'Warning'), ('danger', 'Danger'), ('note', 'Note'), ), required=False, default='info', ) mapped to bootstrap alert types css classes: https://getbootstrap.com/docs/4.3/components/alerts/ """ css_class_map = { 'info': 'success', 'warning': 'warning', 'danger': 'danger', 'note': 'primary' } return css_class_map.get(note_type, 'info')
241ec5698e384d1d5026b955b32bff8e8e188dd3
41,286
def polynomial(coeffs): """ Return a polynomial function which with coefficients `coeffs`. Coefficients are list lowest-order first, so that ``coeffs[i]`` is the coefficient in front of ``x**i``. """ if len(coeffs)==0: return lambda x:x*0 def f(x): y=(x*0)+coeffs[0] for p,c in enumerate(coeffs[1:]): y=y+c*x**(p+1) return y return f
4df8fa27e3dab2d7feca9b19d6e8f87a07acd100
41,289
import math def get_xy(lat, lng, zoom): """ Generates an X,Y tile coordinate based on the latitude, longitude and zoom level Returns: An X,Y tile coordinate """ tile_size = 256 # Use a left shift to get the power of 2 # i.e. a zoom level of 2 will have 2^2 = 4 tiles num_tiles = 1 << zoom # Find the x_point given the longitude point_x = (tile_size / 2 + lng * tile_size / 360.0) * num_tiles // tile_size # Convert the latitude to radians and take the sine sin_y = math.sin(lat * (math.pi / 180.0)) # Calculate the y coordinate point_y = ((tile_size / 2) + 0.5 * math.log((1 + sin_y) / (1 - sin_y)) * - (tile_size / (2 * math.pi))) * num_tiles // tile_size return int(point_x), int(point_y)
eca13cf7d5ba4ba8b3799d80d6d71c7e72eb4402
41,293
import ast def eval_condition(condition, locals): """Evaluates the condition, if a given variable used in the condition isn't present, it defaults it to None """ condition_variables = set() st = ast.parse(condition) for node in ast.walk(st): if type(node) is ast.Name: condition_variables.add(node.id) for v in condition_variables: if v not in locals: locals[v] = None result = eval(condition, {}, locals) return result
f3fb7a871c16f22b2cd5f9d5087aec364772f6bb
41,296
def echo(data): """ Just return data back to the client. """ return data
80655150d1578c12b2f196b664df8935bae569f1
41,297
def uhex(num: int) -> str: """Uppercase Hex.""" return "0x{:02X}".format(num)
f6025d7aa2a3b1cbf8286a878b5bc6f9dcc87f4c
41,301
import torch def rae(target, predictions: list, total=True): """ Calculate the RAE (Relative Absolute Error) compared to a naive forecast that only assumes that the future will produce the average of the past observations Parameters ---------- target : torch.Tensor The true values of the target variable predictions : list - predictions[0] = y_hat_test, predicted expected values of the target variable (torch.Tensor) total : bool, default = True Used in other loss functions to specify whether to return overall loss or loss over the horizon. This function only supports the former. Returns ------- torch.Tensor A scalar with the overall RAE (the lower the better) Raises ------ NotImplementedError When 'total' is set to False, as rae does not support loss over the horizon """ y_hat_test = predictions[0] y_hat_naive = torch.mean(target) if not total: raise NotImplementedError("rae does not support loss over the horizon") # denominator is the mean absolute error of the preidicity dependent "naive forecast method" # on the test set -->outsample return torch.mean(torch.abs(target - y_hat_test)) / torch.mean(torch.abs(target - y_hat_naive))
6f3650873d00fcd237bb608eed58593a927d8815
41,304
import operator def count_words(s, n): """Return the n most frequently occurring words in s.""" # Count the number of occurrences of each word in s dictionary = {} s = s.split(" ") for word in s: if word in dictionary: dictionary[word] += 1 else: dictionary[word] = 1 # Sort the occurences in descending order (alphabetically in case of ties) sorted_x = sorted(dictionary.items(), key=operator.itemgetter(0)) sorted_x = sorted(sorted_x, key=operator.itemgetter(1), reverse=True) # Return the top n words as a list of tuples (<word>, <count>) output = [] for number in range(n): output.append(sorted_x[number]) return output
f5d2595ffadc1eebf671d3a11ee2ead45466a732
41,305
def server_address(http_server): """IP address of the http server.""" return http_server.server_address[0]
ff5c9e56f7db02924913c1f9484c83d8091c9d67
41,308
def isNestedInstance(obj, cl): """ Test for sub-classes types I could not find a universal test Parameters ---------- obj: object instance object to test cl: Class top level class to test returns ------- r: bool True if obj is indeed an instance or subclass instance of cl """ tree = [cl] if hasattr(cl, "__subclasses"): for k in cl.__subclasses(): if hasattr(k, "__subclasses"): tree += k.__subclasses__() return issubclass(obj.__class__, tuple(tree))
bca1adb3ba93605b55ed6d204e89210d6b570882
41,314
def _transform_metric(metrics): """ Remove the _NUM at the end of metric is applicable Args: metrics: a list of str Returns: a set of transformed metric """ assert isinstance(metrics, list) metrics = {"_".join(metric.split("_")[:-1]) if "_cut" in metric or "P_" in metric else metric for metric in metrics} return metrics
13747864d70f7aae6aaec5c9139724ff6c8cb7fb
41,315
def ProgressBar(percent, prefix=None, notches=50, numericalpercent=True, unicode=False): """Accepting a number between 0.0 and 1.0 [percent], returns a string containing a UTF-8 representation of a progress bar of x segments [notches] to the screen, along with an optional indication of the progress as the given percentage rounded to two places [numericalpercent], and, if given one, a custom string preceding the progress bar [prefix]. By default, common number symbols and periods are used to draw the bar's full and empty portions, respectively; [unicode] can be set to True to use full and empty blocks from the Unicode character set instead, which are not defined in all fonts.""" outString = u"" # Unicode string. if prefix: prefix = "{} ".format(prefix) outString = outString + prefix x_of_notches = int(round(percent * notches)) startCap = "[" endCap = "]" fullSegment = "#" blankSegment = "." if unicode: fullSegment = "\u25AE" # Full block in Unicode blankSegment = "\u25AF" # Empty block in Unicode outString = outString + startCap for i in range(x_of_notches): outString = outString + fullSegment # Full block for i in range(notches - x_of_notches): outString = outString + blankSegment outString = outString + endCap if numericalpercent: outString = outString + " [{}%]".format(str(round(percent * 100, 2))) return outString
1ad220f55d9dd242778f879c75905a0484bdbd73
41,316
import re def get_inputs( filename ): """ Each line in the input file contains directions to a tile starting from the origin. This function returns a list of lists with directions to each tile. """ with open( filename, 'r' ) as input_file: raw_data = input_file.read().splitlines() directions = 'e|se|sw|w|nw|ne' tiles = [] for line in raw_data: tile = re.findall( directions, line ) tiles.append( tile ) return tiles
d4171a45d93db37959d9422a3d12c193249856a1
41,318
def is_boundary(loop): """Is a given loop on the boundary of a manifold (only connected to one face)""" return len(loop.link_loops) == 0
4d4df7e552c6a57b42fa3e9c43682368ae5091c1
41,325
def collapse(array): """ Collapse a homogeneous array into a scalar; do nothing if the array is not homogenous """ if len(set(a for a in array)) == 1: # homogenous array return array[0] return array
1573cbcfe3691b83be4710e3d2ea1ff3791bc098
41,334
from typing import Union import time def time_ms(as_float: bool = False) -> Union[int, float]: """Convert current time to milliseconds. :param as_float: result should be float, default result is int :return: current time in milliseconds """ _time_ms = time.time() * 1000 if not as_float: return int(_time_ms) return _time_ms
9e9dd47636182935d2a6f52156fc987996c75ec3
41,335
def getcode(line): """ Extract out the Geonames reference code for searching. """ split_line = line.split('\t') head = split_line[0][2:] desc = split_line[1] return (head, desc)
fd647765934571c2bf1f4d55e94f572a26bf5250
41,342
def path_add_str(path_): """ Format path_ for console printing """ return '+ {}'.format(path_)
0f1edde223e432560482edd68f78cb2b42a6bc84
41,347
def get_time_in_min(timestamp): """ Takes a timestamp, for example 12:00 and splits it, then converts it into minutes. """ hours, minutes = timestamp.split(":") total_minutes = int(hours)*60+int(minutes) return total_minutes
ef7f8418ad50a2ac0c2814610004aec48236f5a8
41,348
def is_leaf(tree): """ :param tree: a tree node :return: True if tree is a leaf """ if tree.left is None and tree.right is None: return True return False
5db41c7c31ba9edd03d86d8463ef23b5e289e38b
41,349
def removeForwardSlash(path): """ removes forward slash from path :param path: filepath :returns: path without final forward slash """ if path.endswith('/'): path = path[:-1] return path
bbba3cd1d3c051f805bda075227ce1ba4428df8c
41,351
def getAgnData(hd_agn, agn_FX_soft, redshift_limit): """ Function to get the relavant data for AGNs @hd_agn :: table file with all relevant info on AGNs @AGN_FX_soft, AGN_SDSS_r_magnitude :: limits on flux, and brightness to be classified as an AGN @redshift_limit :: decides until which AGNs to consider in the sample --> typically, we are interested in the low-z universe for this project Returns:: @pos_z :: positions (ra, dec) and redshifts of the downsampled AGNs """ # criteria on flux and brightness for selection of AGNs downsample_agn = (hd_agn['FX_soft']>agn_FX_soft) & (hd_agn['redshift_R']<redshift_limit) # get the ra, dec and z for the AGNs ra_AGN = hd_agn['RA'][downsample_agn] dec_AGN = hd_agn['DEC'][downsample_agn] z_AGN = hd_agn['redshift_R'][downsample_agn] pos_z = [ra_AGN, dec_AGN, z_AGN] # scale factor of last major merger scale_merger = hd_agn['HALO_scale_of_last_MM'][downsample_agn] return pos_z, scale_merger, downsample_agn
b9f048e8ff2055a38f66959b8465279d0fa34609
41,353
def extract_name_and_link(a_object): """ Get the source name and url if it's present. Parameters: ---------- - a_object (bs4.element.Tag - `a`) : an a object html tag parsed by beautiful soup 4 Returns: ---------- - source_name (str) : the plain text source name as included by Ad Fontes Media - link (str) : the url location where Ad Fontes Media stores the reliability and bias data """ if (a_object is not None) and (a_object["href"] is not None) and (a_object["href"].startswith("https://adfontesmedia.com")): source_name, link = a_object.get_text(), a_object["href"] source_name = source_name.replace(" Bias and Reliability", "") return source_name, link else: return None, None
aebe743e27c2150cd81b582075091386253939e5
41,354
def async_get_pin_from_uid(uid): """Get the device's 4-digit PIN from its UID.""" return uid[-4:]
7556416888dbeaabd39c368458a8b64927a7a13a
41,360
def insert_ordered(value, array): """ This will insert the value into the array, keeping it sorted, and returning the index where it was inserted """ index = 0 # search for the last array item that value is larger than for n in range(0,len(array)): if value >= array[n]: index = n+1 array.insert(index, value) return index
9f491aab83fcd3716eb5894d675ec4ba90bbbae9
41,363
def compute_linenumber_stats(file_name): """ Collect data on number of total lines in the current file """ x = 0 with open(file_name) as input: for line in input: x = x + 1 return(x)
39a170010e0987903c080d2ebab1c37d7099af0b
41,365
def find_following_duplicates(array): """ Find the duplicates that are following themselves. Parameters ---------- array : list or ndarray A list containing duplicates. Returns ---------- uniques : list A list containing True for each unique and False for following duplicates. Example ---------- >>> import neurokit as nk >>> mylist = ["a","a","b","a","a","a","c","c","b","b"] >>> uniques = nk.find_following_duplicates(mylist) >>> indices = np.where(uniques) # Find indices of uniques Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - numpy """ array = array.copy() uniques = [] for i in range(len(array)): if i == 0: uniques.append(True) else: if array[i] == array[i-1]: uniques.append(False) else: uniques.append(True) return(uniques)
aafd4bb76c318ed907732549e3650df063c8c5b5
41,366
import ipaddress def list_all_available_cidr(jnj_root_cidr_list, allocated_cidr_list, subnet_prefix): """ Find all CIDRs of specified size from the provided top level CIDR list in the region Args: jnj_root_cidr_list: top-level CIDRs allocated to region allocated_cidr_list: CIDRs currently in use in region subnet_prefix: requested CIDR size Returns: locked CIDR """ # Initialize result array available_cidr_list = [] # Iterate through root level CIDRs for cidr in jnj_root_cidr_list: # Cast top-level CIDR string to network objet cidr = ipaddress.IPv4Network(cidr) # If top-level CIDR is smaller than requested CIDR, skip this top-level CIDR if int(cidr.prefixlen) > int(subnet_prefix): continue # Iterate through already allocated CIDRs allocated_cidr_in_master_list = [ipaddress.IPv4Network(cidr_block) for cidr_block in allocated_cidr_list if ipaddress.IPv4Network(cidr_block).overlaps(cidr)] # Divide the top-level CIDR into a CIDRs of the requested size cidr_subnets = list(cidr.subnets(new_prefix=int(subnet_prefix))) # Iterate through theoretical subnets and search for overlap for subnet in cidr_subnets: # Search for overlap with already allocated CIDRs subnet_conflict_flag = False for allocated_cidr in allocated_cidr_in_master_list: if subnet.overlaps(allocated_cidr): subnet_conflict_flag = True break # Found a conflict if subnet_conflict_flag: continue # This subnet has no conflicts, append to list of available subnets else: available_cidr_list.append(subnet.with_prefixlen) # Return results return available_cidr_list
caf84de05b7c8b6a7246062e2f34ce57329cf6b7
41,374
def is_unibipartite(graph): """Internal function that returns whether the given graph is a uni-directional bipartite graph. Parameters ---------- graph : GraphIndex Input graph Returns ------- bool True if the graph is a uni-bipartite. """ src, dst, _ = graph.edges() return set(src.tonumpy()).isdisjoint(set(dst.tonumpy()))
d7603408c4a99c8028a944dd699b0728cce57760
41,376
def bss_host_xname(host): """ Retrieves the xname from the BSS host object """ return host["ID"]
08a05819502815b30c71929ee66613ab164210f3
41,377
from typing import OrderedDict def unique(iterable, key=None): """Return unique elements of an iterable.""" if key: odict = OrderedDict() for element in iterable: odict.setdefault(key(element), element) return list(odict.values()) else: return list(OrderedDict.fromkeys(iterable))
ec20ceb5de991ad8828920eeac060c8651ee0da5
41,379
def get_longest_string(in_list): """ Get the longest string(s) in a list. :param in_list: list of strings :return: single string if there's only one with the max length, or a list of strings if there are several. """ if len(in_list) == 0: return None max_length = max(len(x) for x in in_list) matches = [x for x in in_list if len(x) == max_length] if len(matches) == 1: return matches[0] else: return matches
ccff7bacf938725dccbee93e52cd6fcbe9064c43
41,382
def numpy(tensor): """Convert a torch.tensor to a 1D numpy.ndarray.""" return tensor.cpu().detach().numpy().ravel()
cdea8e80a6129ba846d9f69dc4825bf574e688ac
41,383
def calc_node_coords(tiling_edge_list, first_node_offset=0): """ For a single tiling path (tiling_edge_list is a list of edges for a particular contig) calculates the genomic coordinate of every node in the path. In case there are cycles in the tiling path, the existing node's coordinate will be overwritten. `first_node_offset` refers to the length of the first node. If not specified, the contig length should not consider the length of the first node. """ if not tiling_edge_list: return {}, 0 coord_map = {} contig_len = 0 edge0 = tiling_edge_list[0] coord_map[edge0.v] = first_node_offset for edge in tiling_edge_list: if edge.v not in coord_map: raise Exception( 'Tiling path is not in sorted order. Node "{v!r}" does not yet have an assigned coordinate.'.format(v=edge.v)) coord = coord_map[edge.v] coord += abs(int(edge.b) - int(edge.e)) coord_map[edge.w] = coord contig_len = max(contig_len, coord) return coord_map, contig_len
f0d2e310bf68328f4edc4ba35619b7b242d9ff10
41,393
def bytes_to_int(byte_string) -> int: """ :param byte_string: a string formatted like b'\xd4\x053K\xd8\xea' :return: integer value of the byte stream """ return int.from_bytes(byte_string, "big")
932b6cb3e41fa0c1afdae2aa1ca765e64ce44986
41,394
from pathlib import Path def path_type(value) -> Path: """argparse type for converting string into a pathlib.Path object""" p = Path(value) if not p.exists(): raise ValueError(f"Given Path not found: {value}") return p
8b54b1c60cdb312f95c655d72ca542de7afdd826
41,396
import math def li_times(i, epsilon, delta): """ Computes li, the optimal number of times to loop while sampling the equivalence oracle. This li is the reason pac-basis is sooo much faster than the original horn1 algorithm. Parameters: ----------------------------------- i : int Number of times the equivalence oracle has been called already epsilon : float (0, 1) Tolerance for error in accuracy for the pac-basis delta : float (0, 1) Tolerance for confidence in confidence for the pac-basis """ return((1.0 / epsilon) * (i - (math.log(delta) / math.log(2))))
006afaaed14902e58d73dbb642d04fc468d684c2
41,397
import torch def size_getter(shape): """ Helper function for defining a size object. :param shape: The shape :type shape: int|tuple[int] :return: Size object :rtype: torch.Size """ return torch.Size([]) if shape is None else torch.Size(shape if isinstance(shape, (tuple, list)) else (shape,))
d6ec148770871ef636cb18a499ef30cf82997ac9
41,399
def list_diff(list1, list2): """ Returns a a list with all the elements of second list that are not contained in the first list :param list1: list :param list2: list :return: list """ return [i for i in list1 if i not in list2]
aa7869d879d2b53fe584f74ced7983ffc9e98710
41,404
def total_weight(graph, path): """Sum the weights of the edges between nodes in path Args: graph (Graph): A graph containing nodes and edges between them path (list of str): A list of strings representing nodes in graph Returns: int: The total weight of all the implied edges in path """ pairs = zip(path, path[1:]) weight = sum(graph.weight(*p) for p in pairs) return weight
ec7c88f913a23bc5bf03fa4f6724c0f6af8fc437
41,409
def reduce_by_maxcc(result_list, maxcc): """ Filter list image tiles by maximum cloud coverage :param result_list: list of dictionaries containing info provided by Opensearch REST service :type result_list: list(dict) :param maxcc: filter images by maximum percentage of cloud coverage :type maxcc: float in range [0, 1] :return: list of dictionaries containing info provided by Opensearch REST service :rtype: list(dict) """ return [tile_info for tile_info in result_list if tile_info['properties']['cloudCover'] <= 100 * float(maxcc)]
cdc9ad0bdff825a1f58f7211f1c1fd57f4611755
41,411
import re def extract_answer_text(options_text: str, answer_tag: str): """Extracts correct answer's text from all options. Args: options_text: all options as text in various format. answer_tag: correct answers tag a, b, c, ... Returns: parsed option text corresponding to the correct answer. """ if options_text.startswith('[') and options_text.endswith(']'): options = eval(options_text) # pylint: disable = eval-used options = [re.sub('[abcde] \\)', '', x).strip() for x in options] else: options = re.split('[abcde] \\)', options_text) if options[0]: raise ValueError(f'Expects first segment to be empty in {options}.') options = [x.strip().rstrip(',').strip() for x in options[1:]] correct_id = ord(answer_tag) - ord('a') if correct_id >= len(options): raise ValueError(f'Ill parsed dictionary {options} from {options_text}.') return options[correct_id]
63c2027087d405c99831b2e6ea922d1989f51c20
41,415
def pass1(arg, *args, **kwargs): """Return the first positional argument.""" return arg
d0b666ed5a2e0a4c84166dc790845fed126dc57b
41,418
import re def string_is_number(target_str): """ Check whether passed string can accurately be converted to a number. Args: target_str (str): string to validate if parsable to number. Returns: bool """ if target_str is None: return False else: return bool(re.fullmatch('^\\d+$', re.sub('[^0-9]', '', target_str)))
ef477fd6fd7072497ee58f986fc4d73bfa25f2b8
41,420
def crop_image(image, rect): """ Crops an image using the rectangle passed as parameter Args: image: the image to crop rect: a rectangle in the form of a tuple that defines the area we want to crop (top left x, top left y, width, height) Returns: The cropped image """ point1 = (rect[0], rect[1]) # Top-left point point2 = (rect[0] + rect[2], rect[1] + rect[3]) # Lower right point return image[point1[1]:point2[1], point1[0]:point2[0]]
b062177d187501692e32ef3911f750a183d1cf4c
41,425
def _add_p_tags(raw_body): """Return raw_body surrounded by p tags""" return f"<p>{raw_body}</p>"
27012f6220ed6fb983f5ee9af63e97f2497cf793
41,426
def check_run(system, dx, work_root, cmd, cwd=None, env=None): """Runs a command |cmd|. Args: system (runtime.System): The System instance. dx (dockcross.Image or None): The DockCross image to use. If None, the command will be run on the local system. work_root (str): The work root directory. If |dx| is not None, this will be the directory mounted as "/work" in the Docker environment. cmd (list): The command to run. Any components that are paths beginning with |work_root| will be automatically made relative to |work_root|. cwd (str or None): The working directory for the command. If None, |work_root| will be used. Otherwise, |cwd| must be a subdirectory of |work_root|. env (dict or None): Extra environment variables (will be applied to current env with dict.update) """ if dx is None: if cmd[0] == 'python': cmd[0] = system.native_python return system.check_run(cmd, cwd=cwd or work_root, env=env) return dx.check_run(work_root, cmd, cwd=cwd, env=env)
1507b5b916253644bf0645b7856e2c27227e084f
41,430
import hashlib def get_sha256(file_name): """ Calculate the sha256 for the file :param file_name: :return: """ s1 = hashlib.sha256() with open(file_name, 'rb') as f: while True: data = f.read(4096) if not data: break s1.update(data) return s1.hexdigest()
057caae3bfa0d2232ed92a3f241375fba0b1b231
41,432
def lineno(el): """ Get the first line number of ast element :param ast el: :rtype: int """ if isinstance(el, list): el = el[0] ret = el['loc']['start']['line'] assert type(ret) == int return ret
e57bfa22b3e16f39585621edc109f7904ef648d8
41,436
def ether2wei(ether: float): """Converts units of wei to Ether (1e18 * wei).""" return ether * 1e18
b826daaa171d24b43b7f901b6498f24f5481ed1c
41,448
def _prepare_chain(structures, pdb_id, pdb_chain, atom_filter, mapping, model=0): """ Prepare PDB chain for distance calculation Parameters ---------- structures : dict Dictionary containing loaded PDB objects pdb_id : str ID of structure to extract chain from pdb_chain: str Chain ID to extract atom_filter : str Filter for this type of atom. Set to None if no filtering should be applied mapping : dict Seqres to Uniprot mapping that will be applied to Chain object model : int, optional (default: 0) Use this model from PDB structure Returns ------- Chain Chain prepared for distance calculation """ # get chain from structure chain = structures[pdb_id].get_chain(pdb_chain, model) # filter atoms if option selected if atom_filter is not None: chain = chain.filter_atoms(atom_filter) # remap chain to Uniprot chain = chain.remap(mapping) return chain
cdc859f9742f31f32879892a30303412001ab612
41,451