content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import itertools def flatten(l): """ Function to flatten a list. """ return list(itertools.chain.from_iterable(l))
3db376d039ca5b51ac10ea4ce673bd72b04b4b2b
695,332
def compute_wap(inner, outer): """Computes the wall area percentage (WAP) from the inner and outer airway measurements.""" return (outer - inner) / outer * 100
379e0dcd1729e34e39295988c94640378d128103
695,333
def pkcs5_unpad(data): """Do PKCS5 unpadding to data and return """ data_bytes = bytes(data) return data_bytes[0:-data_bytes[-1]]
7058f51e456c8dbe8b4c9c4cd9e26bc7f27efaf6
695,336
def dash_case(name): """ Convert a camel case string to dash case. Example: >>> dash_case('SomeName') 'some-name' """ letters = [] for c in name: if c.isupper() and letters and letters[-1] != "-": letters.append("-" + c.lower()) else: letters.append(c.lower()) return "".join(letters)
5fbe7aa6f3e0b063a572e57b4b3000bb7835355f
695,340
from datetime import datetime def get_formatted_updated_date(str_date): """ converting 2015-08-21T13:11:39.335Z string date to datetime """ return datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%S.%fZ")
b0d20010a1748d470d23452c959f18aa124d9ddb
695,342
def is_on_filesystem(entry, filesystem): """ Check if a certain element is on a certain type of filesystem :param entry: a dfvfs PathSpec object :param filesystem: dfvfs type indicator string :return: True if the specified filesystem is somewhere in the path-chain of the element """ path = entry while True: if path.type_indicator == filesystem: return True if path.parent: path = path.parent else: return False
d402edf7629c05be4308e04965b9b54e1c9a3272
695,344
def remove_return(seq): """ Remove return characters args: seq: String output: seq: String """ return seq.replace("\n", "").replace("\r", "").replace("\t", "")
6179c18d0c1719538abd9dc7f753c627db2e02fa
695,349
import re def extract_value(content, key, is_int_value=True, delimiter='=', throw_not_found=False, default_value=-1): """ Extracts a key from content, value can be an integer or string Args: content (str): the full given text content key (str): the wanted key to be searched in the given content is_int_value (bool): determines if the key's value is int or string, which effects the search and parsing of the value delimiter (str): the separator between the key and it's value to be splitted by throw_not_found (bool): a flag determines if to raise a LookupError default_value (any): the value returned upon not finding the key in the content while not throwing an error Raises: LookupError: throw_not_found is true and key could not be found ValueError: is_int_value is true and the value is not a positive integer or an error while parsing the value Returns: (int|str): the extracted value """ if is_int_value: match = re.search(fr'{key}=\d+', content) else: match = re.search(fr'{key}=\S+', content) if not match: if throw_not_found: raise LookupError(f'"{key}=" is not present in the given content') else: return default_value value = match.group(0).split(delimiter)[1] try: return int(value) if is_int_value else str(value) except ValueError: raise ValueError('an error accourd while extraction.')
29386c9995d042f7c36118ca80cb4f2f335accfc
695,350
def line_has_sep(line): """Line has a `-` before a `=` """ a = line.find('-') # not header b = line.find('=') # header if a == -1: # No `-` return False elif b == -1: # No `=`, but `-` return True else: return a < b
61b9a8fa77dda3197abf1765cf50801f90d82251
695,351
def deselect(elements=None): """ Deselects the given elements. If no elements are passed then all elements are deselected. :param elements: List of elements to deselect. If none are given then all elements are deselected. :type elements: list(Element, Element, ...) :return: None """ return None
36f730f5cf95d976fa0b49d7be62a54789781d05
695,358
def _w_long(x): """Convert a 32-bit integer to little-endian.""" return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
4cd2c9b3e57f8c4dbd100ef00af6cc51480dc683
695,361
def Split_Info(info): """Splits necessary information out from the info vcf column Parameters ---------- info : Series Info column from a vcf as a series Returns ------- dict A dict of necessary fields and their values from the vcf """ fields = ['QD=', 'MQ=', 'MQRankSum=', 'FS=', 'ReadPosRankSum=', 'SOR='] # split out all necessary fields from info columns parts = dict(part.split('=') for part in info.split(';') if any(field in part for field in fields)) return parts
e58d2dad51d34a7644a7d5bf307449194aec9ca3
695,362
def add_dividers(row, divider, padding): """Add dividers and padding to a row of cells and return a string.""" div = ''.join([padding * ' ', divider, padding * ' ']) return div.join(row)
7cbe235ddf8c320cadfcc4b4a3f17a28c2aaac1c
695,365
def is_transpose_identity(perm): """ Tells if the permutation *perm* does nothing (itentity). :param perm: permutation :return: boolean """ return list(perm) == list(range(len(perm)))
09bc1fd0577297b1f9450c7f2f215197ae8ce3ee
695,367
from typing import List import difflib def compare_files(path1: str, path2: str) -> List[str]: """Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files """ diff = difflib.ndiff( open(path1).read().splitlines(), open(path2).read().splitlines() ) return [x for x in diff if x[0] in ["-", "+", "?"]]
2f8df203f3db161313ab2427f17e5db964f27f25
695,370
def _switch(mthread, local, sync, mproc): """ A construct needed so we can parametrize the executor fixture. This isn't straightforward since each executor needs to be initialized in slightly different ways. """ execs = dict(mthread=mthread, local=local, sync=sync, mproc=mproc) return lambda e: execs[e]
1ee45a6faf29d46e4ecc76fd50a5f92735d66107
695,373
def convert_units(arg, unit): """Checks compatibility and converts units using simtk.units package Args: arg (Quantity): quantity to be converted unit (Unit): Unit to be converted to Returns: arg (Quantity): Quantity scaled to the new unit """ conversionFactor = (arg.unit).conversion_factor_to(unit) arg = arg * conversionFactor return arg._value * unit
a5148d66247c41089bd01c11f7debfb955d67119
695,379
def _canonicalize_extension(ext): """Returns a transformed ext that has a uniform pattern. Specifically, if ``ext`` has a leading . then it is simply returned. If ``ext`` doesn't have a leading . then it is prepended. Exceptions to this are if ``ext`` is ``None`` or "". If ``ext`` is "" then "" is return. If ``ext`` is None then None is returned. :param ext: The extension to canonicalize. :returns: The canonicalized extension. """ if ext is None or ext == "" or ext.startswith("."): return ext return "." + ext
935e85fd9a0f1bcfadc68c2390446ecbc814a0bc
695,381
def get_dictionary_from_list(list_to_search, key, search_value): """ Find a dictionary in a list of dictionaries based on a certain key's value Parameters ---------- list_to_search: list List of dictionaries to search in key: str The key in the dictionaries to look for the value search_value: str The key's value you are looking to match Returns ------- Dictionary object we are searching for """ for the_dict in list_to_search: if the_dict[key] == search_value: return the_dict
9683ccaa9e0b0310aadc519f0067c921112f820c
695,383
def _string(self) -> str: """Returns string representation of Path.as_posix()""" return self.as_posix()
3897e5bd1f689f706b51653f6fd9f588d6d3bb54
695,389
def gcd(*args): """Calculate the greatest common divisor (GCD) of the arguments.""" L = len(args) if L == 0: return 0 if L == 1: return args[0] if L == 2: a, b = args while b: a, b = b, a % b return a return gcd(gcd(args[0], args[1]), *args[2:])
0d425e9fb35e824bcd946d68dbac31f9d87d020f
695,393
def testTelescopes(k, telescopes): """ k: a telescope, baseline or triplet (str) eg: 'A0', 'A0G1', 'A0G1K0' etc. telescopes: single or list of telescopes (str) eg: 'G1', ['G1', 'A0'], etc. returns True if any telescope in k assumes all telescope names have same length! """ if type(telescopes)==str: telescopes = [telescopes] test = False for t in telescopes: test = test or (t in k) return test
bd3d4ef02c3fa059869ced2a17b9edb3b993d6b0
695,396
import torch def _squash(input_tensor, dim=2): """ Applies norm nonlinearity (squash) to a capsule layer. Args: input_tensor: Input tensor. Shape is [batch, num_channels, num_atoms] for a fully connected capsule layer or [batch, num_channels, num_atoms, height, width] or [batch, num_channels, num_atoms, height, width, depth] for a convolutional capsule layer. Returns: A tensor with same shape as input for output of this layer. """ epsilon = 1e-12 norm = torch.linalg.norm(input_tensor, dim=dim, keepdim=True) norm_squared = norm * norm return (input_tensor / (norm + epsilon)) * (norm_squared / (1 + norm_squared))
715b5819498d4c3a7c40c623fc9a40d2fcfb3773
695,397
def sort_key(key): """ Quick wrap of key for sorting usage: >>> list_ = [{"a": 1, "b": 3}, {"a": 2, "b": 0}] >>> sorted(list_, key=sort_key("b")) [{"a": 2, "b": 0}, {"a": 1, "b": 3}] """ return lambda i: i[key]
01657ca8b2865f061b530d58b706020f7f9825b1
695,398
def crop_image(data, header, scale): """ Crop the image in the given HDUList around the centre point. If the original size is (W, H), the cropped size will be (scale * W, scale * H). """ if scale < 0 or scale > 1: raise ValueError("scale must be in [0, 1]") # idx, data = get_data_index(hdul) h, w = data.shape half_h = int(h * 0.5 * scale) half_w = int(w * 0.5 * scale) mid_y = int(h / 2) mid_x = int(w / 2) data = data[mid_y - half_h:mid_y + half_h, mid_x - half_w:mid_x + half_w] new_h, new_w = data.shape header['NAXIS1'] = new_w header['NAXIS2'] = new_h return data, header
e2145b3953565ec75437e5fb7df759bc6e15746c
695,400
import itertools def all_combinations(samples_list): """ returns all combinations in the list given """ iterable = itertools.chain.from_iterable(itertools.combinations(samples_list, r) for r in range(len(samples_list) + 1)) combinations = [] for i in iterable: combinations.append(list(i)) return combinations[1:]
0a1be4fb2cca86e8682acf05f51fc11ec5dcbbae
695,406
def time_to_str(time_in_seconds): """ Takes a time in Seconds and converts it to a string displaying it in Years, Days, Hours, Minutes, Seconds. """ seconds = time_in_seconds minutes = None hours = None days = None years = None if seconds > 60: minutes = seconds // 60 seconds -= (seconds / 60) if minutes and minutes > 60: hours = minutes // 60 minutes %= 60 if hours and hours > 24: days = hours // 24 hours %= 24 if days and days > 365: years = days // 365 days %= 365 s = '' if years: s += '{:d} Year(s), '.format(int(years)) if days: s += '{:d} Day(s), '.format(int(days)) if hours: s += '{:d} Hour(s), '.format(int(hours)) if minutes: s += '{:d} Minute(s)'.format(int(minutes)) s += (', ' if hours else ' ') + 'and ' s += '{:0.3f} Second(s)'.format(seconds) return s
ac79955ae1745180719de7260ad1f3e4e3f7f1e3
695,408
def run(df, dt): """General warpper function to claculate folds by scaffold Args: df (DataFrame): Dataframe with standardized smiles Returns: Tuple (DataFrame, DataFrame): a datframe with successfully calculated fold information, datafarem with failed molecules """ return dt.process_dataframe(df)
8d6f793f22511ba2ca2923de1c6dbfb1c986ba1d
695,411
def get_matrix_or_template_parameters(cli_args): """ cifti_conn_matrix and cifti_conn_template both have the same required parameters, with only a few exceptions. This function returns a list of all parameters required by both scripts. :param cli_args: Full argparse namespace containing all CLI args, including all necessary parameters for cifti_conn_matrix and cifti_conn_template. :return: A list of all parameters required by matrix and template scripts. """ return([ cli_args.mre_dir, cli_args.wb_command, cli_args.series_file, cli_args.time_series, cli_args.motion, cli_args.fd, cli_args.tr, cli_args.minutes, cli_args.smoothing_kernel, cli_args.left, cli_args.right, cli_args.beta8, cli_args.remove_outliers, cli_args.mask, cli_args.make_conn_conc, cli_args.output, cli_args.dtseries ])
fbda701f988ebf490bfd33fa8d78d8bcc1dd109f
695,412
def _format_training_params(params): """Convert dict pof parameters to the CLI format {"k": "v"} --> "--k v" Args: params (dict): Parameters Returns: str: Command line params """ outputs = [] for k, v in params.items(): if isinstance(v, bool): if v: outputs.append(f"--{k}") else: outputs.append(f"--{k} {v}") return " ".join(outputs)
bc0146afe7fb5201a78ee9cf7a0bb47a80bba4db
695,422
def valid_parentheses_brackets(input_string: str) -> bool: """ Determine whether the brackets, braces, and parentheses in a string are valid. Works only on strings containing only brackets, braces, and parentheses. Explanation: https://www.educative.io/edpresso/the-valid-parentheses-problem :param input_string: :return: Boolean >>> valid_parentheses_brackets('()') True >>> valid_parentheses_brackets('()[]{}') True >>> valid_parentheses_brackets('{[()]}') True >>> valid_parentheses_brackets('(})') False Time complexity: O(n) where n is the length of the input string. Space complexity: O(n) where n is the length of the input string. """ open_stack: list = [] map_close_to_open: dict = { ')': '(', '}': '{', ']': '[' } for character in input_string: if character in map_close_to_open: if open_stack and open_stack.pop() == map_close_to_open[character]: pass else: return False else: open_stack.append(character) return False if open_stack else True
fd531bc264fc56df699de67cae60b52cf51519c3
695,424
def do_chars_exist_in_db(conn, mal_id: int) -> bool: """ Args: conn ([type]): database connection mal_id (int): myanimelist id Returns: bool: returns true if there are character records for anime id """ with conn: with conn.cursor() as cursor: query = """select exists(select 1 from anime_characters where id=(%s))""" cursor.execute(query, (mal_id,)) res = cursor.fetchall() return res[0][0]
b106342207feea434c59fe4493dc09c97ef3376c
695,426
def id_to_ec2_id(instance_id, template='i-%08x'): """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" return template % instance_id
7f1f65b4a846be1c46bc3d80c207f2b9ba0033af
695,427
def buildnavcell(prefix, dateString, mouseoverImg, mouseoutImg, name): """Build the HTML for a single navcell. prefix -- the string to tack on to the front of the anchor name dateString -- the date, as a string, to be used in the anchor name mouseoverImg -- name of the image to be displayed on mouseover mouseoutImg -- name of the image to be displayed on mouseout name -- name of this navcell """ return """<a href="timeline.html#%s%s" target="timeline" onclick="onimgs('%s', '%s', '%s'); return true;" onmouseover="chimgs('%s', '%s'); return true;" onmouseout="chimgs('%s', '%s'); return true;"><img src="%s" alt="" width="25" height="20" border="0" name="%s" /></a>""" \ % (prefix, dateString, name, mouseoverImg, mouseoutImg, name, mouseoverImg, name, mouseoutImg, mouseoutImg, name)
250ebba918b850a6d87fa5146fa9865c6acc14f9
695,428
def calc_prob_sr(pt, sl, freq, tgt_sr, rf=0.): """Calculate required probability wrt target SR Paramters --------- pt: float Profit Take sl: float Stop Loss freq: float Frequency of trading tgt_sr: float Target Sharpe Ratio rf: float, (default 0) Risk Free Rate Returns ------- float: Required probability """ diff = pt - sl a = (freq + tgt_sr ** 2) * diff ** 2 b = diff * (2 * freq * (sl - rf) - tgt_sr ** 2 * diff) c = freq * (sl - rf) ** 2 p = (-b + (b ** 2 - 4 * a * c) ** .5) / (2. * a) return p
96b017e8ec18ed5c267bfd9eb0176f961481982e
695,430
def encode(integer: int) -> bytes: """Encodes an integer as an uvarint. :param integer: the integer to encode :return: bytes containing the integer encoded as an uvarint """ def to_byte(integer: int) -> int: return integer & 0b1111_1111 buffer: bytearray = bytearray() while integer >= 0b1000_0000: buffer.append(to_byte(integer) | 0b1000_0000) integer >>= 7 buffer.append(to_byte(integer)) return bytes(buffer)
da3b6b320ddcc39ecf494fca564d6d3ae06faea9
695,434
from typing import Dict from typing import Any import json import base64 def extract_pubsub_payload(event: dict) -> Dict[str, Any]: """Extracts payload from the PubSub event body. Args: event: PubSub event body, e.g. { "message": { "data": <base64 encoded object> } } Returns: Dict with PubSub event body payload. Raises: KeyError: Raised when the payload doesn't contain required key(s). TypeError: Raised when payload message has wrong dataype. """ if not event.get("message"): raise KeyError("Payload doesn't contain the 'message' key") pubsub_message = event["message"] if not isinstance(pubsub_message, dict): raise TypeError("Event payload's message is of wrong data type.") if not pubsub_message.get("data"): raise KeyError("Payload's 'message' doesn't contain the 'data' key") return json.loads(base64.b64decode(pubsub_message["data"]).decode("utf-8"))
05dadc17078fb723a385b7d36a7b9dd005c7e931
695,436
import io def parse_aliases_file(file_path): """ Parses an emoji aliases text file. Returns a list of tuples in the form ('src_name', 'dst_name'). """ with io.open(file_path, encoding='utf-8') as fp: lines = fp.read().splitlines() aliases_list = [] for line in lines: line = line.strip() if not line or line.startswith('#'): continue # strip in-line comments comment_idx = line.find('#') if comment_idx > 0: line = line[:comment_idx].strip() aliases_list.append(tuple(line.split(';'))) return aliases_list
0ab98e0921655d7582c8a6261660da84f9287395
695,439
def make_goal(func): """Decorator that turns a function of the form f(Substitution, ...) into a goal-creating function. For example: @make_goal def same(s, u, v): ... is equivalent to def same(u, v): def goal(s): ... return goal """ def wrap(*args, **kwargs): def goal(s): return func(s, *args, **kwargs) return goal if func.__doc__ is not None: wrap.__doc__ = "produce a " + func.__doc__ return wrap
d5320d46d45fa749b7f3e6cea173fd41acca29b0
695,440
from platform import python_compiler def python_version(name): """ Get compiler version used in Python. Parameters ---------- name : str Compiler name. Returns ------- float: version """ version_str = python_compiler() if name not in version_str.lower(): return 0.0 return float(version_str.split(" ", 2)[1].rsplit(".", 1)[0])
2353ca4d9156560e7109d83673a0c485c2b27437
695,443
def get_table_rows(soup): """ Get all table rows from the first tbody element found in soup parameter """ tbody = soup.find('tbody') return [tr.find_all('td') for tr in tbody.find_all('tr')]
b93e2968ff289c1380ee0f0be571897d9db06437
695,444
def topleft2corner(topleft): """ convert (x, y, w, h) to (x1, y1, x2, y2) Args: center: np.array (4 * N) Return: np.array (4 * N) """ x, y, w, h = topleft[0], topleft[1], topleft[2], topleft[3] x1 = x y1 = y x2 = x + w y2 = y + h return x1, y1, x2, y2
094b12c4c112e906714e717c7fce79321610f69c
695,446
def crop(im, slices=(slice(100, -100), slice(250, -300))): """Crop an image to contain only plate interior. Parameters ---------- im : array The image to be cropped. slices : tuple of slice objects, optional The slices defining the crop. The default values are for stitched images from the Marcelle screen. Returns ------- imc : array The cropped image. Examples -------- >>> im = np.zeros((5, 5), int) >>> im[1:4, 1:4] = 1 >>> crop(im, slices=(slice(1, 4), slice(1, 4))) array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) """ return im[slices]
e3e7c2f737b0e589e6491cba44eb3c3aaee930d0
695,448
def build_speech_response(title, ssml_output, plain_output): """Build a speech JSON representation of the title, output text, and end of session.""" # In this app, the session always ends after a single response. return { 'outputSpeech': { 'type': 'SSML', 'ssml': ssml_output }, 'card': { 'type': 'Simple', 'title': title, 'content': plain_output }, 'shouldEndSession': True }
2d38b9d0d8a261c6011eec3416227e972329de86
695,454
import copy def nondimensionalise_parameters(params): """ Nondimensionalise parameters. Arguments --------- rc : float Characteristic radius (length) qc : float Characteristic flow Ru : float Upstream radius Rd : float Downstream radius L : float Vessel length k1 : float First constant from the relation Eh/r0 k2 : float Second constant from the relation Eh/r0 k3 : float Third constant from the relation Eh/R0 rho : float Density of blood Re : float Reynolds' number nu : float Viscosity of blood p0 : float Diastolic pressure Returns ------- return : tuple Tuple of dimensionless quantities, including Reynold's number """ param = params.param sol = params.solution geo = params.geo nondim = copy.deepcopy(param) rc = param['rc'] rho = param['rho'] qc = param['qc'] nondim['Ru'] = param['Ru']/rc nondim['Rd'] = param['Rd']/rc nondim['R_term'] = param['R_term']/rc nondim['L'] = param['L']/rc nondim['k1'] = param['k1']*rc**4/rho/qc**2 nondim['k2'] = param['k2']*rc nondim['k3'] = param['k3']*rc**4/rho/qc**2 nondim['Re'] = param['qc']/param['nu']/rc nondim['nu'] = param['nu']*rc/qc nondim['p0'] = param['p0']*rc**4/rho/qc**2 nondim['p_term'] = param['p_term']*rc**4/rho/qc**2 nondim['R1'] = param['R1']*rc**4/rho/qc nondim['R2'] = param['R2']*rc**4/rho/qc nondim['CT'] = param['CT']*rho*qc**2/rc**7 return nondim
d12200d10b4ee25bf3bfdec7f461e00e431cd7e0
695,457
def qtr_offset(qtr_string, delta=-1): """ Takes in quarter string (2005Q1) and outputs quarter string offset by ``delta`` quarters. """ old_y, old_q = map(int, qtr_string.split('Q')) old_q -= 1 new_q = (old_q + delta) % 4 + 1 if new_q == 0: new_q = 4 new_y = old_y + (old_q + delta)//4 return '{:.0f}Q{:d}'.format(new_y, new_q)
e14465ad5ab600a809592a4a3a8d2aa624035515
695,458
from zlib import decompress from base64 import b64decode def decompress_string(string: str) -> str: """ Decompress a UTF-8 string compressed by compress_string :param string: base64-encoded string to be decompressed :return: original string """ # b64 string -> b64 byte array -> compressed byte array b64_bytes = b64decode(string.encode('utf-8')) # compressed byte array -> byte array -> original string string_bytes = decompress(b64_bytes) string_decompressed = string_bytes.decode('utf-8') return string_decompressed
e9c8cfd4f226e4bae5d00e32428c8c028b03797c
695,459
def create_dense_state_space_columns(optim_paras): """Create internal column names for the dense state space.""" columns = list(optim_paras["observables"]) if optim_paras["n_types"] >= 2: columns += ["type"] return columns
979a17c32dbe9a31e52b2dfb16d9771bd7a4746b
695,460
def calculate_overall_score( google_gaps_percentage: float, transcript_gaps_percentage: float, google_confidence: float, alignment_score: float, weight_google_gaps: float, weight_transcript_gaps: float, weight_google_confidence: float, weight_alignment_score: float ) -> float: """ Calculates a score to predict if an alignment is "good" or not. :param google_gaps_percentage: Percentage of gaps added to google's STT output :param transcript_gaps_percentage: Percentage of gaps added to the transcript :param google_confidence: Confidence of google's STT :param alignment_score: Final score of the alignment algorithm :param weight_google_gaps: Weight for weighted sum :param weight_transcript_gaps: Weight for weighted sum :param weight_google_confidence: Weight for weighted sum :param weight_alignment_score: Weight for weighted sum :return: Score between 0 and 1 """ return ( (weight_google_gaps * google_gaps_percentage) + (weight_transcript_gaps * transcript_gaps_percentage) + (weight_google_confidence * google_confidence) + (weight_alignment_score * alignment_score) )
ea7d92c694dd477b9cd801eb8c91f969a8d757e1
695,461
def recursive_thue_morse(n): """The recursive definition of the Thue-Morse sequence. The first few terms of the Thue-Morse sequence are: 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . .""" if n == 0: return 0 if n % 2 == 0: return recursive_thue_morse(n / 2) if n % 2 == 1: return 1 - recursive_thue_morse((n - 1) / 2)
8fba270d9a62bf3ea4a2b320a19876228255bd0f
695,464
from typing import Dict def add_information(statistic_dict: Dict, statistic_information: Dict): """ Add information to existing dict. Arguments: statistic_dict {Dict} -- Existing dict. statistic_information {Dict} -- Data to add. """ statistic_dict["dates"].append(statistic_information["datetime"]) total = statistic_information["people_total"] people_with_mask = statistic_information["people_with_mask"] people_without_mask = statistic_information["people_without_mask"] statistic_dict["people_total"].append(total) statistic_dict["people_with_mask"].append(people_with_mask) statistic_dict["people_without_mask"].append(people_without_mask) mask_percentage = ( statistic_information["people_with_mask"] * 100 / total if total != 0 else 0 ) statistic_dict["mask_percentage"].append(mask_percentage) statistic_dict["visible_people"].append( people_with_mask + people_without_mask ) return statistic_dict
3b45591d4fe96a5cd93552b322869495a007fe86
695,465
def flip_edges(adj, edges): """ Flip the edges in the graph (A_ij=1 becomes A_ij=0, and A_ij=0 becomes A_ij=1). Parameters ---------- adj : sp.spmatrix, shape [n, n] Sparse adjacency matrix. edges : np.ndarray, shape [?, 2] Edges to flip. Returns ------- adj_flipped : sp.spmatrix, shape [n, n] Sparse adjacency matrix with flipped edges. """ adj_flipped = adj.copy().tolil() if len(edges) > 0: adj_flipped[edges[:, 0], edges[:, 1]] = 1 - adj[edges[:, 0], edges[:, 1]] return adj_flipped
0e163616ddb645e636424d673500f07aedabf336
695,466
import socket def _check_ip_and_port(ip_address, port, timeout): """Helper function to check if a port is open. Args: ip_address(str): The IP address to be checked. port(int): The port to be checked. timeout(float): The timeout to use. Returns: bool: True if a connection can be made. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket_: socket_.settimeout(timeout) return not bool(socket_.connect_ex((ip_address, port)))
f97ceaba05c54c4bb70e8731462469e4d89e1bbf
695,467
def _add_thumb(s): """ Modifies a string (filename, URL) containing an image filename, to insert '.thumb' """ parts = s.split(".") parts.insert(-1, "thumb") if parts[-1].lower() not in ['jpeg', 'jpg']: parts[-1] = 'jpg' return ".".join(parts)
9fe7ba9d1e739828471e07091008cb8de47ea312
695,469
from typing import Any import asyncio import inspect def is_coroutine(obj: Any) -> bool: """Check to see if an object is really an asyncio coroutine. :param Any obj: any object. :return: `True` or `False`. """ return asyncio.iscoroutinefunction(obj) or inspect.isgeneratorfunction(obj)
595582f9bd8fae930532cea4a0aa7e3b05e010be
695,470
def get_id_name(module, id_to_check): """Return the ID name if defined, otherwise return the numbered ID.""" for name in module.symbol_name_to_id: if module.symbol_name_to_id[name] == id_to_check: return name return str(id_to_check)
5647b32d86bee44d302e551ffc0e8c290254ea10
695,476
def rect_radius(ellipsoid): """ Computes the Rectifying Radius of an Ellipsoid with specified Inverse Flattening (See Ref 2 Equation 3) :param ellipsoid: Ellipsoid Object :return: Ellipsoid Rectifying Radius """ nval = (1 / float(ellipsoid.inversef)) / (2 - (1 / float(ellipsoid.inversef))) nval2 = nval ** 2 return (ellipsoid.semimaj / (1 + nval) * ((nval2 * (nval2 * (nval2 * (25 * nval2 + 64) + 256) + 4096) + 16384) / 16384.))
525e6428de9a34f5f1f10bdb9bc1b0943f435e70
695,477
def endgame_score_connectfour_faster(board, is_current_player_maximizer) : """Given an endgame board, returns an endgame score with abs(score) >= 1000, returning larger absolute scores for winning sooner.""" chains_1 = board.get_all_chains(current_player=is_current_player_maximizer) chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer)) for chain in chains_1: if len(chain) >= 4: return 1100 - board.count_pieces() for chain in chains_2: if len(chain) >= 4: return -1100 + board.count_pieces() return 0
24f22b4d76edcdae5918c4327855aa432f404a68
695,479
from typing import Any def _is_simplekv_key_value_store(obj: Any) -> bool: """ Check whether ``obj`` is the ``simplekv.KeyValueStore``-like class. simplekv uses duck-typing, e.g. for decorators. Therefore, avoid `isinstance(store, KeyValueStore)`, as it would be unreliable. Instead, only roughly verify that `store` looks like a KeyValueStore. """ return hasattr(obj, "iter_prefixes")
1529b4531aabc50163cd00ebbb6b4aef1f99e5bd
695,483
import typing def list_difference(list_a, list_b) -> typing.List: """ Function that returns difference between given two lists. """ # Returning. return [element for element in list_a if element not in list_b]
0746a34c4f24ae2f46d5c176568e733d54ba7dea
695,484
def check_ranges(cpe_item, version): """ According to the specification, CPE uses also ranges with the keywords 'version[Start|End][Including|Excluding]'. This way it specifies ranges of versions which are affected by the CVE, for example versions from 4.0.0 to 4.5.0. :param cpe_item: cpe data :param version: version to be checked :return: True if the version is in the specified range """ if "versionStartIncluding" in cpe_item and \ version < cpe_item['versionStartIncluding']: return False if "versionStartExcluding" in cpe_item and \ version <= cpe_item['versionStartExcluding']: return False if "versionEndIncluding" in cpe_item and \ version > cpe_item["versionEndIncluding"]: return False if "versionEndExcluding" in cpe_item and \ version >= cpe_item["versionEndExcluding"]: return False return True
614148e671b7d6c526badf02f784bd7669b37ec0
695,486
def CStringIo_to_String(string_io_object): """Converts a StringIO.StringIO object to a string. Inverse of String_to_CStringIo""" return string_io_object.getvalue()
21f2b027f1eb43063bc24df25db2c2098d894d46
695,488
def is_valid_response(resp): """ Validates a Discovery response Fail if a failure response was received """ return resp.get("result") != "failure"
e008cc34eb43906bc67f2ad645625777241b76e2
695,489
def rateTSCan(params: dict, states: dict) -> float: """ Temperature Sum [oC d] TSCan is set from negative in the vegetative phase and to 0oC d at the start of the generative phase (i.e. the first fruit set). When TSCan exceeds 0 oC d, the carbohydrate distribution to the fruits increases linearly from zero till its full potential is reached at the temperature sum TSSumEnd . At values higher than TSSumEnd, the carbohydrate distribution to the fruits remains at its potential value. Parameters ---------- params: dictionary tau: float Time constant states: dictionary TCan: float TCan is the simulated or measured canopy temperature. [oC] Returns ------- tsCan_: float Development rate of the plant, expressed as the time derivative of the temperature sum [oC]. """ tsCan_ = (1/params['tau'])*states["TCan"] return tsCan_
7ab4c8242c26a846c719a304ead1cb7748513c0b
695,497
import requests def get_from_api(url, *, verbose=False): """ Performs GET request to URL with the ProPublica API Key header """ vprint = lambda *a, **kwa: print(*a, **kwa) if verbose else None with open("APIKey.txt", "r") as keyFile: apiKey=keyFile.readline() if apiKey[-1] == '\n': apiKey = apiKey[:-1] headers = {'X-API-Key': apiKey} vprint("getting", url, "with headers", headers, "...") r = requests.get(url, headers=headers) vprint("...done") return r
b0ac6365008e145d08376087b97d20a9df0f7d65
695,498
def _get_nc_attr(var, attr, default=""): """ _get_nc_attr This function, looks inside a netCDF 4 Dataset object for the attributes of a variable within the object. If the attribute specified for the variable exists then the attribute is returned. If the attribute is not associated with the variable then an empty string is returned as default :param var: variable name [string] :param attr: attribute name [string] :param default: Default value to be returned [string] :return: variable attribute (default is empty) [string] """ if attr in var.ncattrs(): return getattr(var, attr) else: return default
553c44a41a4bd1abf0dea721eff2bc6c19496a73
695,503
import requests import json def yelp_search(api_key, params): """ Makes an authenticated request to the Yelp API api_key: read text file containing API key parameters: term: keywords to search (tacos, etc.) location: location keywords (Seattle, etc.) Returns JSON """ search_url = "https://api.yelp.com/v3/businesses/search" headers = {"Authorization": "Bearer %s" % api_key} response = requests.get(search_url, params=params, headers=headers) data = json.loads(response.text) return data
cc4e79c72822f805c49f9be021297985cef396c3
695,509
def get_valid_image_ranges(experiments): """Extract valid image ranges from experiments, returning None if no scan""" valid_images_ranges = [] for exp in experiments: if exp.scan: valid_images_ranges.append(exp.scan.get_valid_image_ranges(exp.identifier)) else: valid_images_ranges.append(None) return valid_images_ranges
ddfcb091fabb0c70f7a6a3fce8b43396574a797c
695,510
def isc_250m_to_1km ( i_sc_250m ) : """ return the 1km grid index cross track of a 250m pixel """ return i_sc_250m / 4.
a183dbf59bad703e70ce9c7e09c2bac7a6417fc2
695,511
import re def extract_fasta_id(seqIdString): """ Extract the id in the fasta description string, taking only the characters till the '|'. """ seqId = None regex1 = re.compile(r'^\s*?([^|]+?)(\s|\||$)') seqIdSearch = re.search(regex1, seqIdString) if seqIdSearch: seqId = seqIdSearch.group(1) return seqId
fa3cc6a57fc2ded2f1ee62a80254076eada1df63
695,512
def compile_vprint_function(verbose): """Compile a verbose print function Args: verbose (bool): is verbose or not Returns: [msg, *args]->None: a vprint function """ if verbose: return lambda msg, *args: print(msg.format(*args)) return lambda *_: None
d2fe3b93b09011f63df54eb162e270303c328cb9
695,513
from typing import Callable def custom_splitter(separator: str) -> Callable: """Custom splitter for :py:meth:`flatten_dict.flatten_dict.unflatten()` accepting a separator.""" def _inner_custom_splitter(flat_key) -> tuple[str, ...]: keys = tuple(flat_key.split(separator)) return keys return _inner_custom_splitter
88417ecc172986e4cb8423f443da470e917406d2
695,515
def get_required_capacity_types_from_database(conn, scenario_id): """ Get the required type modules based on the database inputs for the specified scenario_id. Required modules are the unique set of generator capacity types in the scenario's portfolio. :param conn: database connection :param scenario_id: int, user-specified scenario ID :return: List of the required type modules """ c = conn.cursor() project_portfolio_scenario_id = c.execute( """SELECT project_portfolio_scenario_id FROM scenarios WHERE scenario_id = {}""".format(scenario_id) ).fetchone()[0] required_capacity_type_modules = [ p[0] for p in c.execute( """SELECT DISTINCT capacity_type FROM inputs_project_portfolios WHERE project_portfolio_scenario_id = ?""", (project_portfolio_scenario_id, ) ).fetchall() ] return required_capacity_type_modules
bdd4f101465c55b712eb3f54797bbf213ed50b80
695,517
def radius_sonic_point(planet_mass, sound_speed_0): """ Radius of the sonic point, i.e., where the wind speed matches the speed of sound. Parameters ---------- planet_mass (``float``): Planetary mass in unit of Jupiter mass. sound_speed (``float``): Constant speed of sound in unit of km / s. Returns ------- radius_sonic_point (``float``): Radius of the sonic point in unit of Jupiter radius. """ grav = 1772.0378503888546 # Gravitational constant in unit of # jupiterRad * km ** 2 / s ** 2 / jupiterMass return grav * planet_mass / 2 / sound_speed_0 ** 2
15965142ad7bd9ef3bdab1a215c0ed906b1e9102
695,518
def _process_type(type_): """Process the SQLAlchemy Column Type ``type_``. Calls :meth:`sqlalchemy.sql.type_api.TypeEngine.compile` on ``type_`` to produce a string-compiled form of it. "string-compiled" meaning as it would be used for a SQL clause. """ return type_.compile()
ac9cc08faf958ad226da1bf08381b4bedd400e49
695,520
import pathlib import typing def _get_file_format_id(path: pathlib.Path, file_format: typing.Optional[str]) -> str: """Determine the file format for writing based on the arguments.""" formats = { "yaml": path.name.endswith((".yml", ".yaml")), "toml": path.name.endswith(".toml"), "json": path.name.endswith(".json"), } finder = (k for k, v in formats.items() if file_format == k or v) return next(finder, "json")
d2fc516ba1a1fae1c7d91e6bac351ca7bead5f04
695,521
def remove_common_elements(package_list, remove_set): """ Remove the common elements between package_list and remove_set. Note that this is *not* an XOR operation: packages that do not exist in remove_set (but exists in remove_set) are not included. Parameters ---------- package_list : list List with string elements representing the packages from the requirements file. Assumes that the list has "==" to denote package versions. remove_set : set Set with the names of packages to be removed from requirements. Returns ------- list List of packages not presented in remove_set. """ package_not_in_remove_set = [] for package in package_list: package_name = package.split("==")[0].strip() if package_name not in remove_set: package_not_in_remove_set.append(package) return package_not_in_remove_set
99e6fc3d7273de551d9fc8e4f8dd5b1628b93dda
695,525
import torch def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = kornia.quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis
0f8fa2847bfe0c4e3305dce9f5d7d027c872e1f7
695,530
def ips_to_metric(d, min_depth, max_depth): """ https://github.com/fyu/tiny/blob/4572a056fd92696a3a970c2cffd3ba1dae0b8ea0/src/sweep_planes.cc#L204 Args: d: inverse perspective sampling [0, 1] min_depth: in meter max_depth: in meter Returns: """ return (max_depth * min_depth) / (max_depth - (max_depth - min_depth) * d)
5914277a9548caea02eab78f3370b91f4957f480
695,534
import re def check_text(line): """ Compares a line to see if there are any blacklisted words""" is_okay = True blacklist = ["CHAPTER", "Part", "Section"] rgx_check = re.search('([A-Z])\. +(\w+)', line) if rgx_check is not None: is_okay = False if line == "": is_okay = False else: for word in blacklist: if word in line: is_okay = False if "REPEALED" in line: is_okay = True return is_okay
5bf261a3dc5f4359b97affc1897c327423704fff
695,535
def duplicate_count(text: str) -> int: """Counts amount of duplicates in a text. Examples: >>> assert duplicate_count("abcde") == 0 >>> assert duplicate_count("abcdea") == 1 >>> assert duplicate_count("indivisibility") == 1 """ return len( set(item for item in text.lower() if text.lower().count(item) > 1) )
0a0e7c79e3370050deff190b6b71d2e6ed83768d
695,538
def _get_id_from_extension_link(ext_link): """Get the id from an extension link. Expects the id to come after the "/extension/service/" parts of the link Example ext_link: '/admin/extension/service/12345/' Example return: '12345' :param str ext_link: the extension link to extract the id from :return: the extension id :rtype: str """ link_dirs = ext_link.split('/') num_dirs = len(link_dirs) ind = 0 while ind < num_dirs: # Ensure that the id comes after /extension/service if link_dirs[ind] == 'extension' and ind < num_dirs - 2 and \ link_dirs[ind + 1] == 'service': return link_dirs[ind + 2] ind += 1 return ''
1a4ee4ea3042af22e989bb5b12bda4e0285e8719
695,541
def get_scale(bounding_box, target_size): """ Get a scale that would bring smaller side of bounding box to have target_size :param bounding_box: bounding box :param target_size: target size for smaller bounding box side :return: float """ horizontal_side = bounding_box.bounds[2] - bounding_box.bounds[0] vertical_side = bounding_box.bounds[3] - bounding_box.bounds[1] smaller_side = horizontal_side if horizontal_side < vertical_side else vertical_side return target_size / smaller_side
864de85683ae427d4a4b62d8ca719dc9d35fa26e
695,542
def line_range(lines, ind1, comment_flag='#'): """ Find a range of data lines within a line list. Given an input line list and a starting index, subsequent lines are examined to see where the next comment line is. Comment lines are assumed to start with the # character by default, or one can set this with the comment_flag variable in the call. Lines that are not comments are assumed to be data lines. The index of the next comment line is returned, or the index that gives a range to the end of the line list where there is no such comment line after the index specified. Parameters ---------- lines : A list of input lines (assumed to be from the readlines() function) ind1 : A starting index in the list of lines comment_flag: An optional string variable that marks comment lines Returns ------- n1 : an integer value for the next comment line (assumed to start with '#') in the list of input lines, or the index for the length of the line list if no other comment line is found """ ncomment = len(comment_flag) for n1 in range(ind1+1, len(lines)): if comment_flag in lines[n1][0:ncomment]: return n1 return len(lines)
6e845f3d44c4093e8e403eaf41317cf14b0299c4
695,543
from typing import Optional import re def _extract_job_id(job_name: str) -> Optional[str]: """Extracts job id from job name. Args: job_name: The full job name. Returns: The job id or None if no match found. """ p = re.compile( 'projects/(?P<project_id>.*)/locations/(?P<region>.*)/pipelineJobs/(?P<job_id>.*)' ) result = p.search(job_name) return result.group('job_id') if result else None
a9e9341961ae9f9df5a09b943a3e90c23fdace25
695,544
def _get_QDoubleSpinBox(self): """ Get current value for QDoubleSpinBox """ return self.value()
d63355786fcc72b0f0d1d5064ea26687cf7fa78a
695,548
def addition(a, b): """ Adds two given values Parameters: a (int): First value b (int): Second value Returns: int: sum result """ return a + b
464bb6c1340c050dc661052e5da331d50c0ffecb
695,550
def str2num(val: str): """ Try to convert to number, else keep as string :param val: String value :return: int, float or string """ try: return int(val) except ValueError: try: return float(val) except ValueError: return val
d8323d07de1751843506095fdd41de82b6ffa55a
695,551
import hashlib def hash_file(filename): """ Utility function to hash a file Args: filename (str): name fo file to hash """ with open(filename, "rb") as f: chunk = f.read() return hashlib.md5(chunk).digest()
66a0ed3e23a45fd23117779190ca5a339808be6e
695,553
def smallest_multiple_of_n_geq_m(n: int, m: int) -> int: """ Returns the smallest multiple of n greater than or equal to m. :param n: A strictly positive integer. :param m: A non-negative integer. :return: The smallest multiple of n that is greater or equal to m. """ return m + ((n - (m % n)) % n)
97b9796f5093b378a078dd2083058246c79d9c46
695,557
def is_valid_interval(x): """ Returns true iff x is a well-shaped concrete time interval (i.e. has valid beginning and end). """ try: return hasattr(x, "hasBeginning") and len(x.hasBeginning) > 0 and \ len(x.hasBeginning[0].inTimePosition) > 0 and \ len(x.hasBeginning[0].inTimePosition[0].numericPosition) > 0 and hasattr(x, "hasEnd") and \ len(x.hasEnd) > 0 and len(x.hasEnd[0].inTimePosition) > 0 and \ len(x.hasEnd[0].inTimePosition[0].numericPosition) > 0 and \ x.hasBeginning[0].inTimePosition[0].numericPosition < x.hasEnd[0].inTimePosition[0].numericPosition except TypeError: return False
d8e761955e2f4b5e4b199259281fbd4915272fa5
695,560
def cmd(func): """Indicate that ``func`` returns a shell command with this decorator. If this function is an operation function defined by :class:`~.FlowProject`, it will be interpreted to return a shell command, instead of executing the function itself. For example: .. code-block:: python @FlowProject.operation @flow.cmd def hello(job): return "echo {job.id}" .. note:: The final shell command generated for :meth:`~.FlowProject.run` or :meth:`~.FlowProject.submit` still respects directives and will prepend e.g. MPI or OpenMP prefixes to the shell command provided here. """ if getattr(func, "_flow_with_job", False): raise RuntimeError( "@cmd should appear below the @with_job decorator in your script" ) setattr(func, "_flow_cmd", True) return func
594d350767a2dc052a776bb2aa260ec7fbd76649
695,566
def Normalize(tensor, mean, std): """ Given mean: (R, G, B) and std: (R, G, B), will normalize each channel to `channel = (channel - mean) / std` Given mean: GreyLevel and std: GrayLevel, will normalize the channel to `channel = (channel - mean) / std` :param tensor: image tensor to be normalized :param mean: mean of every channel :param std: standard variance of every channel :return: normalized tensor """ for t, m, s in zip(tensor, mean, std): t -= m t /= s return tensor
e6eb671b104fce84420da9fb641020fb6f43e2f8
695,567
def _strong_gens_from_distr(strong_gens_distr): """ Retrieve strong generating set from generators of basic stabilizers. This is just the union of the generators of the first and second basic stabilizers. Parameters ========== ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers Examples ======== >>> from sympy.combinatorics import Permutation >>> Permutation.print_cyclic = True >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> from sympy.combinatorics.util import (_strong_gens_from_distr, ... _distribute_gens_by_base) >>> S = SymmetricGroup(3) >>> S.schreier_sims() >>> S.strong_gens [(0 1 2), (2)(0 1), (1 2)] >>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens) >>> _strong_gens_from_distr(strong_gens_distr) [(0 1 2), (2)(0 1), (1 2)] See Also ======== _distribute_gens_by_base """ if len(strong_gens_distr) == 1: return strong_gens_distr[0][:] else: result = strong_gens_distr[0] for gen in strong_gens_distr[1]: if gen not in result: result.append(gen) return result
03a81597659dbec7e537d7f0b50825ae8983e318
695,574
def generate_solution(x: int, n: int) -> int: """This is the "naive" way to compute the solution, for testing purposes. In this one, we actually run through each element. """ counter = 0 for i in range(1, n + 1): for j in range(1, n + 1): if i * j == x: counter += 1 return counter
fb074da93890f19b3aebce9babf43a9d29c68897
695,579
from functools import reduce def _merge_max_mappings(*mappings): """Merge dictionaries based on largest values in key->value. Parameters ---------- *mappings : Dict[Any, Any] Returns ------- Dict[Any, Any] Examples -------- >>> _merge_max_mappings({"a":1, "b":4}, {"a":2}) {"a":2, "b":4} """ def _merge_max(d1, d2): d1.update((k, v) for k, v in d2.items() if d1.get(k, 0) < v) return d1 return reduce(_merge_max, mappings, {})
d93ca0eedd5c112a293d7f25902f56192924fa12
695,582
import random def generate_grid(height = 10, width = 10, seed = None, sparcity = 75, separator = "#", gap = "."): """Create empty grid as base for crossword :param height: number of rows in puzzle <1, 1000> :param width: number of columns in puzzle <1, 1000> :param seed: number used as seed in RNG :param sparcity: percentage of grid to fill with characters (0, 100) :param separator: character used as BLANK :param gap: character used to indicate space to fill :raise ValueError: if arguments have incorrect value :raise TypeError: if arguments have incorrect type :returns: empty grid """ if not 1 <= height <= 1000: raise ValueError("Incorrect height") if not 1 <= width <= 1000: raise ValueError("Incorrect width") if not 0 < sparcity < 100: raise ValueError("Incorrect sparcity") if not (separator.isascii() and len(separator)==1): raise TypeError("Separator has to be single character") if not (gap.isascii() and len(gap)==1): raise TypeError("Gap has to be single character") if separator==gap: raise ValueError("Separator cannot be equal to gap character") random.seed(seed) grid = [[gap for x in range(width)] for y in range(height)] for stop in random.sample(range(0, height * width), (100 - sparcity)*(height * width)//100): grid[stop//width][stop%width] = separator return grid
a0e9d8171f2b3cca7b6bacc3444a1de447ba5069
695,585
from typing import Dict def get_existing_mapped_pipestep(func_name:str, code_to_func:Dict[str,int]): """ Given an existing mapping of function:id, return the id associated with the function Will raise an error if the function does not already have an entry in the mapping :param func_name: the name of the function to be mapped :param code_to_func: the existing function:id mapping :returns: (the mapping dictionary, id of the function) """ func_to_code = {v:int(k) for k,v in code_to_func.items()} # if we've already mapped this function if func_name in func_to_code: return code_to_func, func_to_code[func_name] else: raise ValueError(f'{func_name} not already in mapping')
aea6c32af42cd2b80b720b383308258093ad3c73
695,591
import hmac import hashlib import base64 def _GetSignature(key, url): """Gets the base64url encoded HMAC-SHA1 signature of the specified URL. Args: key: The key value to use for signing. url: The url to use for signing. Returns: The signature of the specified URL calculated using HMAC-SHA1 signature digest and encoding the result using base64url. """ signature = hmac.new(key, url, hashlib.sha1).digest() return base64.urlsafe_b64encode(signature)
3fd070040aee0bee67ae9517ba65005509ca1bfa
695,592
import collections def filter_dataframes(dfs, xs, ys, table_ys, args_list, valid_keys): """Process necessary information from dataframes in the Bokeh format. In the following explanation, N is assumed to be the number of experiments. For xs_dict and ys_dict: These are dictionary of list of list. To make it simple, we focus on particular `x` in `xs`. Everything is the same for `ys_dict`. `x` is usually a timescale values such as iteration or epoch. Here are some characteristics: 1. xs_dict[x] is list of list 2. len(xs_dict[x]) == N 3. xs_dict[x][i] is list. For example, if log is recorded every epoch and `x` is epoch, xs_dict[x][i] == [1, 2, 3, 4, ...]. For tables: This is a dictionary of list of scalars or strings. The keys correspond to the column keys of the data table. The keys are the combination of all `valid_keys` and `table_ys`. tables[key][i] is `key` value recorded in the i-th experiment. For example, if key=='main/loss', this is the minimum loss value during training time recorded for the i-th experiment. Args: dfs (list of pd.DataFrame) xs (list of strings) ys (list of strings) table_ys (dictionary) args_list (list of dictionaries) valid_keys (list of strings) """ # Descs: descriptions # ys_dict == {string (y): List(Serial Data)} xs_dict = {x: [] for x in xs} ys_dict = {y: [] for y in ys} tables = collections.OrderedDict( [(key, []) for key in ['index'] + valid_keys + list(table_ys.keys())]) for i, args in enumerate(args_list): # get df from a result tmp = dfs for key, val in args.items(): if val is None: tmp = tmp[tmp[key].isnull()] else: tmp = tmp[tmp[key] == val] for x in xs: xs_dict[x].append(tmp[x].values.tolist()) for y in ys: ys_dict[y].append(tmp[y].values.tolist()) for table_y, value_type in table_ys.items(): if value_type == 'min': tables[table_y].append(tmp[table_y].min()) elif value_type == 'max': tables[table_y].append(tmp[table_y].max()) else: raise ValueError for key in valid_keys: if key in args: tables[key].append(args[key]) else: tables[key].append(None) tables['index'] = list(range(len(args_list))) return xs_dict, ys_dict, tables
903ce855378d174370117d9cb729f1052c682ac4
695,596