content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def boolean(flag): """ Convert string in boolean """ s = flag.lower() if s in ('1', 'yes', 'true'): return True elif s in ('0', 'no', 'false'): return False raise ValueError('Unknown flag %r' % s)
9469314a87b048d428691d06722c374898fb848c
32,475
import random def choose(population, weights=None, k=None): """ Chooses k times from the given population with an optional weighted probability. :param population: the population to chose from :param weights: the weights attached to each population element :param k: the amount of times to chose :return: an element of the list if k = None, or a k-sized list of choices """ choice = random.choices(population, weights=weights, k=k or 1) return choice if k else choice[0]
4a78dde05dba4f9774ae64f0b85bd89e61204b89
32,477
def _get_arg_config_file(args): """Return String yaml config file path.""" if not args.config_file: raise ValueError("YAML Config File was not supplied.") return args.config_file
34e4570cee420035cbaeab3c852069c4abf6a3ae
32,478
def normalize_whitespace(value): """Removes repeated whitespace from string""" value = " ".join(value.split()) return value
e268e12665bb50d96c9418dfc999be20a2c96b37
32,480
def abbr_status(value): """ Converts RFC Status to a short abbreviation """ d = {'Proposed Standard':'PS', 'Draft Standard':'DS', 'Standard':'S', 'Historic':'H', 'Informational':'I', 'Experimental':'E', 'Best Current Practice':'BCP', 'Internet Standard':'IS'} return d.get(value,value)
08025ed44e9c8ea725755f9a07b6a5a04834f896
32,490
def encode_base64(base64_string: str) -> bytes: """ Convert base64 string to bytes. """ return bytes(base64_string, encoding="UTF8")
ba0263624b4ce25bac4f19684cee60db351d9b40
32,491
def between(min, max): """ Returns a function to test if a value lies between min and max """ def op(x): if min < x and x < max: return True print("ERROR: Value must be between {} and {}".format(min, max)) return False return op
2ccc2b78be3f4a85fb910e392f2c9f291e15d19b
32,494
async def hello_user(name= 'Regina'): """Returns a simple greeting 👋""" return {'user_name' : f'Hello {name}'}
5fb582fed75d9abee001fb2f0c1425d5a0abddf4
32,496
def _slice_extent_axis(ext_min, ext_max, fctr): """Slice an extent into multiple extents along an axis.""" strd = (ext_max - ext_min) / fctr return [(ext_min + i * strd, ext_max - (fctr - i - 1) * strd) for i in range(fctr)]
3365dd289cc822bed0e7c6075587ddae15806c50
32,501
def encode_special_characters(user_string): """ Encode Special Characters for user's search Strings Args: user_string(string): raw string to encode Returns: Encode string for elasticsearch """ if user_string is None: return "" sp_chars = ['+', '-', '=', '|', '<', '>', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', '\\', '/'] # List of special characters can be found here: https://www.elastic.co/guide/en/elasticsearch/reference/2.4/query-dsl-query-string-query.html#_reserved_characters output_list = [] for char in user_string: if char in sp_chars: # replace output_list.append(char.replace(char, '\\' + char)) else: output_list.append(char) return "".join(output_list)
00d31e32a823028a08333bd59000f52b64dafcf9
32,502
from importlib import import_module from typing import Callable def import_function(function_path: str) -> Callable: """ Import a function from a dotted path. Example: >>> import_function("generate_changelog.pipeline.noop_func") <function noop_func at 0x11016d280> Args: function_path: A dotted path to a function Returns: The callable function """ bits = function_path.split(".") function_name = bits[-1] module = import_module(".".join(bits[:-1])) return getattr(module, function_name)
3105abee396d73e3580d737b1149dbdd00e0751b
32,507
def should_include_rustc_srcs(repository_ctx): """Determing whether or not to include rustc sources in the toolchain. Args: repository_ctx (repository_ctx): The repository rule's context object Returns: bool: Whether or not to include rustc source files in a `rustc_toolchain` """ # The environment variable will always take precedence over the attribute. include_rustc_srcs_env = repository_ctx.os.environ.get("RULES_RUST_TOOLCHAIN_INCLUDE_RUSTC_SRCS") if include_rustc_srcs_env != None: return include_rustc_srcs_env.lower() in ["true", "1"] return getattr(repository_ctx.attr, "include_rustc_srcs", False)
a6b4c093d44d77880edc7310980fd27791a47c37
32,509
def get_table_4(air_type): """表4 外皮の内側にある空気層の熱抵抗 Args: air_type(str): 空気層の種類 'AirTight'(面材で密閉された空気層)または'OnSiteNonConnected'(他の空間と連通していない空気層)または 'OnSiteConnected'(他の空間と連通している空気層) Returns: float: 外皮の内側にある空気層の熱抵抗 """ R_dict = {'AirTight': 0.09, 'OnSiteNonConnected': 0, 'OnSiteConnected': 0} try: return R_dict[air_type] except KeyError: raise ValueError(air_type)
66f28b535f9ef69525cf1e74e0af4bbf155ec458
32,511
def create_mapping(dict_times): """ If times are not integers, transform them into integers. :param dict_times: Dict where keys are times. :return: A mapping which is a dictionary maps from current names of time stamps to integers (by their index) """ keys = list(dict_times.keys()) mapping = {} for i in range(len(keys)): mapping.update({keys[i]: i}) return mapping
75893a6419b61d86bc0d4d0693bbc1b25f111a75
32,512
def package(label): """Given the label object supplied, returns its string representation. Args: label: a Label object. Returns: A string, describing the full path of the package. Example: >>> package(Label(":target")) "@enkit//bazel/utils:target" """ return "{workspace}//{package}:{name}".format( workspace = label.workspace_name, package = label.package, name = label.name, )
0f82ce092a806823c6e9248addeea6fabd14100b
32,514
def get_hashtag_counts(key, val, collection): """ Returns a dict of the region from the colletion. For states, you receive a document containing keys: 'name', 'fips', 'counts', 'abbrev', 'landarea' The value associated to 'counts' is a dict of hashtag and hashtag counts. Query by: 'name', 'fips', or 'abbrev'. For counties, you receive a document containing keys: 'name', 'state_fips', 'county_fips', 'geoid', 'landarea' Etc. Examples: hashtag_counts_by_state('name', 'Washington', collection) hashtag_counts_by_state('fips', 53, collection) hashtag_counts_by_state('abbrev', 'WA', collection) """ c = collection.find({key: val}) hashtag_counts = {} first = True for doc in c: if first: hashtag_counts.update(doc) first = False else: # After the first one, we must explictly update the inner dict. hashtag_counts['counts'].update(doc['counts']) return hashtag_counts
ae152f15dd1dd7bb0f15ef9c181adf820c3cb32b
32,515
import torch def ex_net_svd(model, in_dim): """Performs a Singular Value Decomposition on a given model weights Args: model (torch.nn.Module): neural network model in_dim (int): the input dimension of the model Returns: U, Σ, V (Tensors): Orthogonal, diagonal, and orthogonal matrices """ W_tot = torch.eye(in_dim) for weight in model.parameters(): W_tot = weight @ W_tot U, Σ, V = torch.svd(W_tot) return U, Σ, V
5a37aed05f8685683f986da2794569c7ae9a2291
32,517
import socket import json def get_json_data(stats_sock): """Returns uwsgi stats data as dict from the socket file.""" data_dict = {} data = "" try: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: s.connect(stats_sock) while True: d = s.recv(4096) if len(d) < 1: break data += d.decode("utf8", "ignore") s.close() data_dict = json.loads(data) except Exception as e: print(e) pass return data_dict
67b6687fabecfeb84c4c460d133a93da21cc8f5e
32,518
def contains_prefix(root, input_prefix): """Check if prefix exists and return True/False w/ num of words with that prefix.""" output_list = [] cNode = root for char in list(input_prefix): found_match = False for node in cNode.nodes: if node.char == char: found_match = True cNode = node break if not found_match: return False, 0 return True, cNode.num_words_at_letter
b658cb31f54c4d1de2534e27bf1b057d72ccf254
32,520
def normalize_run_info(run_info): """Normalize all dictionaries describing a run. Args: run_info (List[Dict]): The list of dictionaries to be normalized. Returns: List[Dict]: The input run_info but with each dictionary now having all the same keys. Note that there will be empty string values when a key was not originally found on a dictionary. """ keys = set([]) for run in run_info: keys.update(run.keys()) def get_run_info_with_blanks(run): """Ensure all of the required keys are found on a single run dictionary. Args: run (Dict): The run's original dictionary. Returns: Dict: The input run dictionary with all of the required keys. Empty string values will be added when a required key is not present. """ return dict(zip(keys, map(lambda x: run.get(x, ''), keys))) return list(map(get_run_info_with_blanks, run_info))
1f80c1a14bae565dc1e55ba552d281a7249918f1
32,531
def linear_interpolate(x, y, x0, y0, x1, y1): """Format a call to linear_interpolate for the given arguments and expected result. """ fmt = """ select linear_interpolate({x}, {x0}, {y0}, {x1}, {y1}), {y} as answer, {y} = linear_interpolate({x}, {x0}, {y0}, {x1}, {y1}) as match ;""" return fmt.format(x=x, y=y, x0=x0, y0=y0, x1=x1, y1=y1)
4a4137f1c004a32f933700618fec3948fcadea50
32,534
import six def replace_with_dict(string, str_func, replacements, count=-1): """:yaql:replace Returns a string with all occurrences of replacements' keys replaced with corresponding replacements' values. If count is specified, only the first count occurrences of every key are replaced. :signature: string.replace(replacements, count => -1) :receiverArg string: input string :argType string: string :arg replacements: dict of replacements in format {old => new ...} :argType replacements: mapping :arg count: how many first occurrences of every key are replaced. -1 by default, which means to do all replacements :argType count: integer :returnType: string .. code:: yaql> "abc ab abc".replace({abc => xx, ab => yy}) "xx yy xx" yaql> "abc ab abc".replace({ab => yy, abc => xx}) "yyc yy yyc" yaql> "abc ab abc".replace({ab => yy, abc => xx}, 1) "yyc ab xx" """ for key, value in six.iteritems(replacements): string = string.replace(str_func(key), str_func(value), count) return string
d2c22d59dc030a600cf886afb98c015a572eed3d
32,535
import string import random def get_random_string(uppercase=True, alphanum=True, length=32): """ Generate random strings. :param uppercase: include uppercase characters :param alphanum: include numbers :param length: result length """ if uppercase: symbols = string.ascii_letters else: symbols = string.ascii_lowercase if alphanum: symbols += string.digits return u''.join([random.choice(symbols) for _ in range(length)])
8a4c26886e9b9ba3acd9b17c84d468cf9c399f9e
32,536
import json def create_user(user_info, app_client): """ Create a user, providing back the id and the token. @param user_dict: dictionary with email and password @param app_client: a Flask app client to create against @returns user_id, token for newly created user """ res = app_client.post( '/api/create_user', data=json.dumps(user_info), content_type='application/json' ) res = json.loads(res.data.decode("utf-8")) return res['id'], res['token']
26661ddb485ab8be600f53b63e2dc5ca744c342c
32,537
def email_is_string(email): """ Check if the email is a string. :param email: The email to be tested. :type email: str :return: True if the email is a string, else false. :rtype: bool """ return isinstance(email, str)
9262fa4fbfdbaeaa2d605f695b94fbba93de813c
32,539
import base64 def np_to_base64(a): """ base64 encode the input NumPy array Args: a (array): numpy array Returns: str: Encoded string """ return base64.b64encode(a).decode("utf-8")
9f88796f019ce1f1191c4b0492029ec8e737ffe2
32,540
import math def round_gb_size_up(gb_size, dp=2): """Rounds a GB disk size (as a decimal float) up to suit the platform. Use this method to ensure that new vdisks, LUs, etc. are big enough, as the platform generally rounds inputs to the nearest [whatever]. For example, a disk of size 4.321GB may wind up at 4.32GB after rounding, possibly leaving insufficient space for the image. :param gb_size: A decimal float representing the GB size to be rounded. :param dp: The number of decimal places to round (up) to. May be zero (round to next highest integer) or negative, (e.g. -1 will round to the next highest ten). :return: A new decimal float which is greater than or equal to the input. """ shift = 10.0 ** dp return float(math.ceil(gb_size * shift)) / shift
3e65412f461e8ab2f7bb11ef19102879b9e5782b
32,542
def dict_to_str(dictionary: dict, level: int = 0, ) -> str: """ A helper function to log dictionaries in a pretty way. Args: dictionary (dict): A general python dictionary. level (int): A recursion level counter, sets the visual indentation. Returns: str: A text representation for the dictionary. """ message = '' for key, value in dictionary.items(): if isinstance(value, dict): message += ' ' * level * 2 + str(key) + ':\n' + dict_to_str(value, level + 1) else: message += ' ' * level * 2 + str(key) + ': ' + str(value) + '\n' return message
7af1d272d15174a13ad9aaf192e9cce6624e12ba
32,544
def tvbatch(t, v): """ Convenience method for paring x and y data for training and validation. Returns tuples pairing, e.g., the offsetting each block by one timestep with respect to x and y versions, such that at each timestep t, y[t] = x[t + 1]. Note that here and elsewhere it has been my custom to identify training data with the variables x and y, and validation data with xx and yy. This seems marginally more parsimonious to me than something like "training_x," etc. Returns: x, y, xx, yy """ x, y = t[:,:-1], t[:,1:] xx, yy = v[:,:-1], v[:,1:] return x, y, xx, yy
707acb9d516fac70db6fa049f2820ff554a7758b
32,548
import re def extract_sentence_id(tag): """ Extract the sentence ID of current sentence. Args: tag (str): Sentence tag Returns: str: sentence ID """ if "<s" not in tag: return "" pattern = re.compile('id="[a-z0-9]+?"(?=\s)') res = re.findall(pattern, tag) if len(res) == 0: return None return res[0].replace('"', "").replace("id=", "")
99a24d332e21b5861c74b00fdcb334892eda4b7c
32,550
from typing import List def _make_citation_command_regex(commands: List[str]) -> str: """ A citation command typically has this structure: \\command[prenote][postnote]{keys}[punctuation] where prenote, postnote, and punctuation are all optional. Reference: https://ctan.math.illinois.edu/macros/latex/contrib/biblatex/doc/biblatex.pdf """ command_names = r"(?:" + "|".join([r"\\" + c for c in commands]) + ")" return command_names + r"(?:\[[^\]]*\]){0,2}{([^}]*?)}(?:\[[^\]]*\])?"
3545b58a4e8be5601184d2aa01e45aef53b50560
32,551
import warnings def validate_record(record): """Check that `record` contains a key called "time". Args: record (dict): a dictionary representing a data record, where the keys name the "columns". Returns: True if there is a key called "time" (it actually checks for ``"time"`` (a string) and ``b"time"`` (a binary)). False if there is no key called "time". """ if not any(k in record for k in ("time", b"time")): warnings.warn( 'records should have "time" column to import records properly.', category=RuntimeWarning, ) return True
5e995c438cf197449596622385b9d3aa47846cba
32,552
def _DecodeCSVShape(op): """Shape function for the DecodeCSV op.""" input_shape = op.inputs[0].get_shape() # Optionally check that all of other inputs are scalar or empty. for default_input in op.inputs[1:]: default_input_shape = default_input.get_shape().with_rank(1) if default_input_shape[0] > 1: raise ValueError( "Shape of a default must be a length-0 or length-1 vector.") return [input_shape] * len(op.outputs)
22bd949426a595106335a8b68387f4191ce9985b
32,559
def prime(a): """Return True if a is prime.""" if a == 2: return True if a < 2 or a % 2 == 0: return False return not any(a % x == 0 for x in range(3, int(a**0.5) + 1, 2))
c086eab2edc2fcb30eec3e2b8e992a266a4fd085
32,566
def generate_project(name='AILE', version='1.0', comment=''): """Generate an Slybot project file""" return { 'name': name, 'version': version, 'comment': comment }
fa73e224754f823177f37fdbe4ba5452ea4388c0
32,568
def linear_forward(current_set, parameter_w, parameter_b): """ linear step for forward propagation :param current_set: current A, numpy arrays :param parameter_w: current parameter W, numpy arrays :param parameter_b: current parameter b, numpy arrays :return: current z, and caches for following calculations, numpy arrays and dictionaries """ current_z = parameter_w.dot(current_set) + parameter_b assert (current_z.shape == (parameter_w.shape[0], current_set.shape[1])) cache = (current_set, parameter_w, parameter_b) return current_z, cache
e00004faa5a66fa7a1390e778edf584eaa63df85
32,570
def check_login(session): """ Function to check if the specified session has a logged in user :param session: current flask session :return: Boolean, true if session has a google_token and user_id """ # Check that session has a google_token if session.get('google_token') and session.get('user_id'): return True return False
cd5651ce622ffd108ea7d0b8c1c4f70b1b4947ab
32,571
def serialize_enum_model(model): """Serializes api model into an json serializable object. :param model: BaseModel instance to serialize :return: serialized object """ return model.value
cb68a5346d4c804e545bf29db966ff93468e1a46
32,573
def load32(byte): """ bytearray to int (little endianness) """ return sum((byte[i] << (8 * i)) for i in range(4))
9e33b13fcf1d58b27e6915a319c98db4d951ac69
32,576
def build_repr(instance, fields): """ Build the string representation for an instance. Args: instance: The instance to build the repr for. fields: A list of fields to include in the repr. Returns: A string describing the provided instance including representations of all specified fields. """ values = [f"{f}={repr(getattr(instance, f))}" for f in fields] return f'{instance.__class__.__name__}({", ".join(values)})'
832b36b9dd93de7e7e22b71693d2b5dbac3749ad
32,584
import torch def get_entropy_loss(memory, args, i_agent): """Compute entropy loss for exploration Args: memory (ReplayMemory): Class that includes trajectories args (argparse): Python argparse that contains arguments i_agent (int): Index of agent to compute entropy loss Returns: entropy_loss (torch.Tensor): Entropy loss for encouraging exploration """ _, _, entropies, _, _ = memory.sample() entropy = torch.stack(entropies[i_agent], dim=1) assert entropy.shape == (args.traj_batch_size, args.ep_horizon), \ "Shape must be: (batch, ep_horizon)" entropy_loss = -args.entropy_weight * torch.mean(torch.sum(entropy, dim=1)) return entropy_loss
0d031100d17b64402340f1c0626a04fc083be8a0
32,591
from typing import List from typing import Tuple import struct def vox_dict( entries: List[ Tuple[ str, str ] ] ) -> bytes: """Produces the binary representation of a dictionary for the .vox format. Note that all keys and values are strings. Examples -------- >>> vox_dict( [ ( '_t', '10 5 2' ) ] ) This dictionary (from the 'nTRN' chunk) defines a translation. """ w = bytearray( ) w.extend( struct.pack( '<I', len( entries ) ) ) for (key, value) in entries: key_b = bytes( key, 'UTF-8' ) value_b = bytes( value, 'UTF-8' ) w.extend( struct.pack( '<I', len( key_b ) ) ) w.extend( key_b ) w.extend( struct.pack( '<I', len( value_b ) ) ) w.extend( value_b ) return bytes( w )
c8622ad47397fd4b93104ccab578a96ab00ca6dd
32,592
def find_short(strg): """Return length of shortest word in sentence.""" words = strg.split() min_size = float('inf') for word in words: if len(word) < min_size: min_size = len(word) return min_size
87e99a5754ede74d74e76199c176f956d424fc44
32,593
def add_properties(objectclass, property_list): """Generate class properties for a model that provide read-only access to elements from the internal ._data data structure. :param objectclass: The class to which properties should be added :param property_list: A list of property name + data structure key + optional docstring tuples. Property names then read from the given data structure keys. """ for prop_item in property_list: key = prop_item[0] internalkey = prop_item[1] def model_attribute(self, k=internalkey): return self._data[k] if len(prop_item) > 2: model_attribute.__doc__ = prop_item[2] setattr(objectclass, key, property(model_attribute))
268b83bd1b794ede60b7ebce27586dc499004730
32,596
from pathlib import Path import csv def display_playlist_tracks(playlist): """ display playlist tracks from saved csv file """ path = Path('playlist_tracks_csv/') file_name = '{}'.format(playlist) # read from existing csv file fpath = (path / file_name).with_suffix('.csv') with fpath.open(mode='r') as csv_file: csv_reader = csv.DictReader(csv_file) tracks = [] line_count = 1 for row in csv_reader: track = "{} - {}: spotify_url: {}".format(row['name'], row['artists'], row['spotify_url']) tracks.append(track) line_count += 1 i = 0 while i != len(tracks) - 1: print("{}. {}".format(i, tracks[i])) i += 1 print(f'Processed {line_count} lines.') return tracks
20fd48e1d90ef1c86dd0542ce9552666da49742a
32,597
def get_corepy_output(code, inst): """Take an instruction, and return a hex string of its encoding, as encoded by CorePy""" hex_list = inst.render() hex = "" for x in hex_list: hex += "%02x" % (x) return hex
26816cc8d424bfeb6db9e3a404f22a648c0d0d41
32,605
import requests import json def create_secret(api_url, token, scope, secret_name, secret_value): """ Creates a secret in Databricks workspace in the given scope. This will overwrite any existing secrets with the same name. """ r = requests.post(api_url + 'preview/secret/secrets/write', headers={"Authorization": "Bearer " + token}, json={"scope": scope, "key": secret_name, "string_value": secret_value }) response_body = r.json() if r.status_code != 200: raise Exception('Error creating scope: ' + json.dumps(response_body)) return (response_body)
4b1cf12d115aa8c3c04d7e59adae11cbf36489cb
32,609
def autorange_xy(img, axx, axy, data, xy_limits, xy_pad): """ Adjust axx and axy vertical range. xy_limits: None or "auto" # matplotlib default range (min, max) # vrange to specified values "data" # vrange to min and max of data -/+ xy_pad "match" # vrange to min and max of default ranges of each "clim" # vrange to img.clim() -/+ xy_pad args: axx # horizontal axis axy # vertical axis data # 2D numpy.ndarray xy_limits # None or "auto" / (min, max) / "data" / "match" / "clim" xy_pad # padding of the xy vertical range # (active for xy_limits="data" or "clim") returns: axx, axy """ # axx vertical range if xy_limits is None or xy_limits == "auto": pass elif isinstance(xy_limits, tuple): # values specified axx.set_ylim(*xy_limits) axy.set_xlim(*xy_limits) elif xy_limits == "data": # edge plots range to match padded data range rng = data.max() - data.min() limits = (data.min() - xy_pad * rng, data.max() + xy_pad * rng) axx.set_ylim(*limits) axy.set_xlim(*limits) elif xy_limits == "match": # edge plots range to match each other limits = (min(axx.get_ylim()[0], axy.get_xlim()[0]), max(axx.get_ylim()[1], axy.get_xlim()[1])) axx.set_ylim(*limits) axy.set_xlim(*limits) elif xy_limits == "clim": # edge plots range to match padded image clim clim = img.get_clim() rng = clim[1] - clim[0] limits = (clim[0] - xy_pad * rng, clim[1] + xy_pad * rng) axx.set_ylim(*limits) axy.set_xlim(*limits) else: raise ValueError(f"Invalid value for `xy_limits`={xy_limits}") return axx, axy
a375f92f2800a2dbd7b600c26dfc6e287325eb12
32,610
from typing import Iterable from typing import Iterator def flatten_iterator(*args: Iterable, depth=None) -> Iterator: """ Iterates and flattens iterables recursively according to the specified depth. If depth=None (the default) it flattens recursively until it finds no iterable. >>> type(flatten_iterator([1, 2, [3, 4, ['cinco']]])) <class 'generator'> >>> list(flatten_iterator([1, 2, [3, 4, ['cinco']]])) [1, 2, 3, 4, 'cinco'] >>> list(flatten_iterator([1, 2, [3, 4, ['cinco']]], depth=1)) [1, 2, [3, 4, ['cinco']]] >>> list(flatten_iterator([1, 2, [3, 4, ['cinco']]], depth=2)) [1, 2, 3, 4, ['cinco']] """ current_depth = -1 def flatten_iterator_(*args_: Iterable, depth_=None) -> Iterator: nonlocal current_depth if depth_ is not None: current_depth += 1 for arg_ in args_: if isinstance(arg_, Iterable) and not isinstance(arg_, (str, bytes)) and (depth_ is None or current_depth < depth_): yield from flatten_iterator_(*arg_, depth_=depth_) if depth_ is not None: current_depth -= 1 else: yield arg_ return flatten_iterator_(*args, depth_=depth)
2e10b23a7c17fb2a19691e10d8b4c290b50f2ca0
32,611
def oddNumbers(l, r): """ List odd numbers within a closed interval. :param l: left interval endpoint (inclusive) :param r: right interval endpoint (inclusive) :return: odd numbers within [l, r]. """ l = l if l % 2 == 1 else l + 1 r = r if r % 2 == 0 else r + 1 return list(range(l, r, 2))
aa2768b013f42030a0bae2526c169c412963f235
32,612
def times(values): """ Reads the stdout logs, calculates the various cpu times and creates a dictionary of idle time and the total time Parameters ---------- values : list output of the command from the std out logs Returns ------- tuple idle and total time of the cpu """ user, nice, system, idle, io, irq, soft, steal, _, _ = values idle = idle + io non_idle = user + nice + system + irq + soft + steal total = idle + non_idle return total, idle
74b7675a5854c757f3f3f2ddf53474b664e3d74b
32,614
def cluster_profile_query(city): """SQL query to get cluster descriptions as 24-houred timeseries within `city` Parameters ---------- city : str City of interest, either ̀bordeaux` or `lyon` Returns ------- str SQL query that gives the timeseries cluster profile in `city` """ if city not in ('bordeaux', 'lyon'): raise ValueError("City '{}' not supported.".format(city)) return ("WITH ranked_centroids AS (" "SELECT *, rank() OVER (ORDER BY stop DESC) AS rank " "FROM {schema}.{centroid}) " "SELECT cluster_id, " "h00, h01, h02, h03, h04, h05, h06, h07, h08, h09, h10, h11, " "h12, h13, h14, h15, h16, h17, h18, h19, h20, h21, h22, h23, " "start, stop " "FROM ranked_centroids " "WHERE rank=1" ";").format(schema=city, centroid='centroid')
fedb6ec448f6b3e273730e9898481fef1c2b7a2a
32,617
def format_duration(seconds: float) -> str: """ Nicely format a given duration in seconds. Args: seconds: The duration to format, in seconds. Returns: The duration formatted as a string with unit of measurement appended. """ return f"{seconds:.2f} sec"
dac9a110051680e75bdcb99c473270fa43b1d07a
32,619
import requests def browse(url): """Retrieve the server response contents of the given URL.""" # A cookie is required to allow books with adult content to be served. return requests.get(url, cookies={"adultOff": "no"}).text
06b6d1195141dde662fd5252714e8d5facdc8c1d
32,621
import torch def apply_across_dim(function, dim=1, shared_keys=None, **tensors):# -> Dict[str, torch.Tensor]: """ Apply a function repeatedly for each tensor slice through the given dimension. For example, we have tensor [batch_size, X, input_sequence_length] and dim = 1, then we will concatenate the following matrices on dim=1. - function([:, 0, :]) - function([:, 1, :]) - ... - function([:, X-1, :]). Args: function (function): Function to apply. dim (int): Dimension through which we'll apply function. (1 by default) shared_keys (set): Set of keys representing tensors to be shared. (None by default) tensors (torch.Tensor): Keyword arguments of tensors to compute. Dimension should >= `dim`. Returns: Dict[str, torch.Tensor]: Dictionary of tensors, whose keys are corresponding to the output of the function. """ # Separate shared and non-shared tensors shared_arguments = {} repeat_targets = {} for key, tensor in tensors.items(): if not isinstance(tensor, torch.Tensor) or (shared_keys and key in shared_keys): shared_arguments[key] = tensor else: repeat_targets[key] = tensor # Check whether the size of the given dimension is the same across sliced_tensors. size = {key: tensor.shape[dim] for key, tensor in repeat_targets.items()} assert len(set(size.values())) == 1, 'Tensors does not have same size on dimension %s: We found %s' % (dim, size) # Since the sizes are the same, we will represent the size using the first entry. size = list(size.values())[0] # Dictionary for storing outputs output = {} for i in range(size): # Build kwargs for the function. kwargs = {key: tensor.select(dim=dim, index=i).contiguous() for key, tensor in repeat_targets.items()} kwargs.update(shared_arguments) # Apply function on the slice and restore the dimension for concatenation. for key, tensor in function(**kwargs).items(): if key in shared_keys: continue if key not in output: output[key] = [] output[key].append(tensor.unsqueeze(dim=dim)) # Check whether the outputs are have the same size. assert all(len(t) == size for t in output.values()) # Concatenate all outputs, and return. return {key: torch.cat(tensor, dim=dim).contiguous() for key, tensor in output.items()}
efea38442de6c42c0d3d4eead8ddf18546559f31
32,623
def bool_setter(value: bool): """Generic setter for bool objects Args: value: The value to be validated. Raises: TypeError: If the value is not bool """ if isinstance(value, bool) or value is None: return value if value == "false": return False if value == "true": return True raise ValueError("Type should be bool")
eec686da23b4a95c0276b8e2b97975fe24bf7b91
32,624
def string_to_index(needle, columns): """Given a string, find which column index it corresponds to. :param needle: The string to look for. :param columns: The list of columns to search in. :returns: The index containing that string. :raises ValueError: Value "`needle`" not found in columns "`columns`" """ for index, value in enumerate(columns): if needle == value: return index + 1 raise ValueError("Value \"{0}\" not found in columns \"{1}\"".format(needle, columns))
358a88e1ec487b142ae6380a1cb9579688cd0451
32,629
def nested_sum(t): """Computes the total of all numbers in a list of lists. t: list of list of numbers returns: number """ total = 0 for nested in t: total += sum(nested) return total
44d9fa3e0a6011c74f23a002e86bef13b0c52e72
32,634
def application_state(app): """Return the consolidated state for application *app*. The *app* parameter must be a dict as returned by :meth:`~RavelloClient.get_application`. The consolidated state for an application is the set of distinct states for its VMs. As special cases, None is returned if there are no VMs, and the single state is returned if there is exactly one state. """ states = list(set((vm['state'] for vm in app.get('deployment', {}).get('vms', [])))) return states if len(states) > 1 else states[0] if len(states) == 1 else None
d7dbd1f17e311138864f7570c5d9432a621b728c
32,638
from typing import IO from typing import Any from typing import List import mmap def _get_lines_from_fd(fd: IO[Any], nb: int = 10) -> List[str]: """ Get the last log lines from a fileno with mmap :param fd: File descriptor on the log file :param nb: number of messages to fetch :returns: A list of message lines """ with mmap.mmap(fd.fileno(), 0, prot=mmap.PROT_READ) as m: # start of messages begin with MI or MR, after a \n pos = m.rfind(b"\nM") + 1 # number of message found so far count = 0 while pos != 0 and count < nb - 1: count += 1 pos = m.rfind(b"\nM", 0, pos) + 1 lines = m[pos:].decode(errors='replace').splitlines() return lines
a3a97b7ff8fcc8a0e9b564233ccbb11fa0ee7061
32,639
def bookkeep_reactant(mol): """Bookkeep bonds in the reactant. Parameters ---------- mol : rdkit.Chem.rdchem.Mol RDKit molecule instance for reactants. Returns ------- pair_to_bond_type : dict Mapping 2-tuples of atoms to bond type. 1, 2, 3, 1.5 are separately for single, double, triple and aromatic bond. """ pair_to_bond_type = dict() for bond in mol.GetBonds(): atom1, atom2 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx() atom1, atom2 = min(atom1, atom2), max(atom1, atom2) type_val = bond.GetBondTypeAsDouble() pair_to_bond_type[(atom1, atom2)] = type_val return pair_to_bond_type
e8ee50904596152299d140a58ac4496e98c771df
32,642
def get_backbones(nts): """ Get backbone pairs. Args: ___ nts (dict): DSSR nucleotide info. Returns: --- bb (list): list of tuples (5' base, 3' base) """ bb = [] for i, three_p in enumerate(nts): if i == 0: continue five_p = nts[i-1] if five_p['chain_name'] != three_p['chain_name']: continue if three_p['nt_type'] != 'RNA' or five_p['nt_type'] != 'RNA': continue if 'break' not in three_p['summary']: bb.append((five_p, three_p)) return bb
724c38be0c5a29ac75fde21359467209f2f4a566
32,645
import hashlib def get_file_hash(file_list): """ Gets an MD5 Hash value for each file in a list. Returns a dictionary of {file: hash} items """ if type(file_list) != list: file_list = [file_list] BLOCKSIZE = 65536 file_dict = {} for file in file_list: hasher = hashlib.md5() with open(file, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) file_dict[file] = hasher.hexdigest() # print(file.name, ":", hasher.hexdigest()) return file_dict
6302bda4c321539072f4c09d7b82807d3c1a2fbe
32,646
def Validate(func, value): """Raises a ValueError if the value doesn't cause the given function to return true""" if func(value): return value raise ValueError("%r: Invalid value %r" % (func, value))
b079a809fa54635ee933242aad9c7d2673ee74ca
32,651
def find_longest_paper(pubs): """ This function finds the longest paper in a year_dict, in terms of how many tokens are in the paper. Parameters: pubs (list-like, required): The year_dict to be searched Returns: longest (int): The length of the longest paper in the year dict """ longest = 0 for paper in pubs: if len(paper) > longest: longest = len(paper) return longest
60f687c8131cef5bf77cb31cfe86a855136dcef7
32,652
def parent_counts(experiment_proto): """Return a map from all counts to counts from their input round. Args: experiment_proto: selection_pb2.Experiment describing the experiment. Returns: Dict[str, str] mapping SequencingReads names to the read name for positive results from the previous. Reads without a parent count are omitted. """ input_counts = {} for round_name, round_proto in experiment_proto.rounds.items(): if round_proto.input: parent_round = experiment_proto.rounds[round_proto.input] input_counts[round_name] = parent_round.positive_reads.name else: input_counts[round_name] = None dependencies = {} for round_name, round_proto in experiment_proto.rounds.items(): for reads in [round_proto.positive_reads, round_proto.negative_reads]: field = reads.name if field: parent_count = input_counts[round_name] if parent_count: dependencies[field] = parent_count return dependencies
2fedab0eb54b75f12616e1c6ed232352a911643e
32,653
def linspace(start, end, number_of_points): """ Generate a list of floats from start to end containing number_of_points elements. clone of NumPy function with same name. :param start: starting point of list. :param end: ending point of list. :param number_of_points: number of points in returned list. """ if start >= end: raise ValueError( 'The starting value must be less than the ending value.') if number_of_points < 2: raise ValueError('The space must contain at least two points.') interval = (end - start) / (number_of_points - 1) return [start + interval * i for i in range(number_of_points)]
9be2c37a67e3f1e00bac8dfa0691434570aa4bc9
32,655
def split_endpoint_timestamp(file): """Split a file into the endpoint and timestamp part. Parameters ---------- file : pathlib.Path Can be a dated file or a link. Returns ------- str, str endpoint name and timestamp """ endpoint, date, time = file.resolve().stem.rsplit("_", 2) return endpoint, date + "_" + time
53bbf880e80bf37f66ff95913f15d97fb2505cc4
32,662
def _event_QSpinBox(self): """ Return value change signal for QSpinBox """ return self.valueChanged
b9d4b9788b3770431858606b53992ef7694f82bb
32,663
def system_reduction_factor(delta_ss, delta_frot, delta_fshear, eta_ss, eta_frot, eta_fshear): """ Calculates the system displacement reduction factor based on the foundation and superstrucutre displacement reduction factors. :param delta_ss: superstructure displacement :param delta_frot: displacement due to foundation rotation :param delta_fshear: displacement due to soil-foundation shear deformation :param eta_ss: superstructure displacement reduction factor :param eta_frot: foundation rotation displacement reduction factor :param eta_fshear: soil foundation shear deformation displacement reduction factor :return: """ delta_total = delta_ss + delta_frot + delta_fshear return (delta_ss * eta_ss + delta_frot * eta_frot + delta_fshear * eta_fshear) / delta_total
217aa81d2a148c22a719eeb71af4fc198931c5fc
32,668
def generate_order_by_clause(params): """Generates order_by clause strings from the given list. :param list params: A list of column names to sort the result to:: params = [ 'id', 'name', 'full_path', 'parent_id', 'resource', 'status', 'project_id', 'task_type', 'entity_type', 'percent_complete' ] will result a search string like:: order by tasks.id, tasks.name, tasks.full_path, tasks.parent_id, , resource_info.info, "Statuses".code, "Tasks".project_id, task_types.name, tasks.entity_type """ order_by_string = '' order_by_string_buffer = [] column_dict = { 'id': 'id', 'parent_id': "parent_id", 'name': "name", 'path': "full_path", 'full_path': "full_path", 'entity_type': "entity_type", 'task_type': "task_types.name", 'project_id': 'project_id', 'date_created': 'date_created', 'date_updated': 'date_updated', 'has_children': 'has_children', 'link': 'link', 'priority': 'priority', 'depends_to': 'dep_info', 'resource': "resource_info.resource_id", 'responsible': 'responsible_id', 'watcher': 'watcher_id', 'bid_timing': 'bid_timing', 'bid_unit': 'bid_unit', 'schedule_timing': 'schedule_timing', 'schedule_unit': 'schedule_unit', 'schedule_model': 'schedule_model', 'schedule_seconds': 'schedule_seconds', 'total_logged_seconds': 'total_logged_seconds', 'percent_complete': 'percent_complete', 'start': 'start', 'end': '"end"', 'status': '"Statuses".code', } for column_name in params: order_by_string_buffer.append(column_dict[column_name]) if len(order_by_string_buffer): # need to indent the first element by hand order_by_string = 'order by %s' % ', '.join(order_by_string_buffer) return order_by_string
9f9a74d6a16b53cd65542a000fe4215a9d16ced1
32,669
def rect2raster(r,h): """Convert iulib rectangles to raster coordinates. Raster coordinates are given as (row0,col0,row1,col1). Note that this is different from some other parts of Python, which transpose the rows and columns.""" (x0,y0,x1,y1) = (r.x0,r.y0,r.x1,r.y1) y1 = h-y1-1 y0 = h-y0-1 return (y1,x0,y0,x1)
7892e789076fa41e07db9f640f44a76498f53196
32,672
def draw_box(image, bbox, color, lw): """Draw RGB(A) `color` bounding box on image array.""" y1, x1, y2, x2 = bbox image[y1 : y1 + lw, x1:x2] = color image[y2 : y2 + lw, x1:x2] = color image[y1:y2, x1 : x1 + lw] = color image[y1:y2, x2 : x2 + lw] = color return image
e2156f60918d2fd9a1641ee33c744fe7288560b0
32,674
def get_entity_list_container_field(name): """Returns the container field used in list responses GET /active_computers -> {"items": [...]} GET /jobs -> {"jobs": [...]} """ if name == "active_computers": return "items" elif name == "inventory_computers": return "items" elif name == "rpc_tasks": return "tasks" elif name == "rpc_jobs": return "jobs" return name
b9210a61b9b1d4e33689370a118c983cedb71456
32,675
def _parse_findings(findings, region): """ Returns relevant information from AWS Security Hub API response. Args: findings (list): AWS Security Hub response. region (str): AWS region. Returns: List[dict]: List of compliance information dictionaries. """ new_findings = [] for finding in findings: new_findings.append( { "Region": region, "Title": finding["Title"], "Description": finding["Description"], "Severity": finding["Severity"], "Compliance": finding["Compliance"]["Status"], "Recommendation": finding["Remediation"]["Recommendation"]["Text"], "Reference": finding["Remediation"]["Recommendation"]["Url"], } ) return new_findings
276f189027e105586a884cf74c8abee9bddd93be
32,677
def read_playlists(fname="playlists.txt"): """Reads in the Playlists""" with open(fname) as f: return f.readlines()
d6d36ae0df82b26c4b64bf3d785c8c42874a25cc
32,679
def is_valid_filename(filename): """Check if a file has a valid filename (valid chars and max length gotten from stack overflow and other sources, may be wrong)""" # Added some accents, ö and stuff like that is still verboten valid_chars = '-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789àèéíóòúüÀÈÉÍÒÓÚÜ' if len(filename) > 260: return False for char in filename: if char not in valid_chars: return False return True
680d7719c08cc0159bb9d50787253d97129b8f42
32,682
def disjoint_union(*graphs): """Given a list of graphs, construct their disjoint union.""" res_vertices = [] res_edges = [] for (vertices, edges) in graphs: l = len(res_vertices) res_edges.extend((a+l, b+l) for (a, b) in edges) res_vertices.extend(vertices) return (res_vertices, res_edges)
dd8a62ca4c3f9603fef56f52e83d643932d03b27
32,683
def bool_str(b: bool) -> str: """Converts boolean to string ('0' or '1')""" return '1' if b else '0'
9bbcc98a9d488e09d19c8b5689583ee835d900b8
32,684
def obsmode_name(mode): """Return full name of the observing mode""" if type(mode) is not list: mode = [mode] full_names = {'fiducial': 'Fiducial', 'binospec': 'Binospec', 'hectochelle': 'Hectochelle', 'desi': 'DESI-like', 'gaia': 'Gaia-like', 'exgal': 'Extragalactic'} keys = full_names.keys() names = [] for m in mode: if m in keys: name = full_names[m] else: name = m names += [name] return names
e608ae1e60286202153b0754fa071239c280eed9
32,686
def no_of_misplaced_tiles(state): """ Returns the number of the misplaced tiles in the given state state: a list representing the state to be checked """ h1 = 0 goal_state = [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] for y in range(len(goal_state)): for x in range(len(goal_state[y])): if state[y][x] != goal_state[y][x]: h1 += 1 return h1
1901f757db0b27ba3b3e8235efa1d52abeb3d18b
32,687
import re def parse_tweet_text(tweet_text): """ Input: tweet_text: a string with the text of a single tweet or a concatenation of tweets Output: lists of tokens in the text: words (many emoticons are recognized as words) hashtags users mentioned urls Usage: words, hashes, users, urls = parse_tweet_text(tweet_text) """ content = tweet_text # collect and remove URLs urls = re.findall(r"\b((?:https?|ftp|file)://[-A-Z0-9+&@#/%?=~_|$!:,.;]*[A-Z0-9+&@#/%=~_|$])", content, re.IGNORECASE) content = re.sub(r"\b((?:https?|ftp|file)://[-A-Z0-9+&@#/%?=~_|$!:,.;]*[A-Z0-9+&@#/%=~_|$])", "", content, 0, re.IGNORECASE) content = content.lower() # collect and remove users mentioned users = re.findall(r"@(\w+)", content) content = re.sub(r"@(\w+)", "", content, 0) # collect and remove hashtags hashes = re.findall(r"#(\w+)", content) content = re.sub(r"#(\w+)", "", content, 0) # strip out extra whitespace in the remaining text content = re.sub(r"\s{2,}", " ", content) # strip out singleton punctuation raw_words = content.split() words = [] for word in raw_words: if word in ['.',':','!',',',';',"-","-","?",'\xe2\x80\xa6',"!","|",'"','~','..','/']: continue re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE) word = re_pattern.sub(u'\uFFFD', word) #if word.encode('utf-8') in ['\xe2\x80\xa6']: continue # remove trailing commas, periods, question marks, colons, exclamation marks word = re.sub(r"(.*)[,\.,\?,:,!]$", r"\1", word, 0, re.MULTILINE) words.append(word) return (words, hashes, users, urls)
facb5f319a542ca34c9cacf6c930b98d001de78e
32,688
def get_x_y(receiver): """ (receiver: Receiver) -> (Column, Column) Returns x column and y column tuple. Assumes Receiver has at least two columns, takes first two. """ selection = receiver.selection return selection[1][0], selection[1][1]
93ad6f0f84dac8b4daed62fdfb9ee2cc52001349
32,689
from typing import Optional def normalize_dewey(class_mark: str) -> Optional[str]: """ Normalizes Dewey classification to be used in call numbers Args: class_mark: Dewey classification Returns: normalized class_mark """ if isinstance(class_mark, str): class_mark = ( class_mark.replace("/", "") .replace("j", "") .replace("C", "") .replace("[B]", "") .replace("'", "") .strip() ) try: # test if has correct format float(class_mark) while class_mark[-1] == "0": class_mark = class_mark[:-1] return class_mark except ValueError: return None else: return None
7cf7056902e6ac410b79deca9644a8cea5fae971
32,695
from functools import reduce def count(l): """Count the number of elements in an iterator. (consumes the iterator)""" return reduce(lambda x,y: x+1, l)
c30519261dbd6e02cd41d4df07607087cb7a6374
32,698
def check_commandline_inputs(n, limit): """See if the n and limit passed from the command line were valid""" try: n = int(n) except: raise ValueError("n wasn't a number") valid_ns = [5, 7, 11, 17, 23, 29, 37, 47, 59, 71, 83, 97, 113, 131, 149, 167, 191, 223, 257, 293, 331, 373, 419, 467, 521] if n not in valid_ns: raise ValueError("n must be one of: {0}".format(valid_ns)) try: limit = int(limit) except: raise ValueError("Limit wasn't a number") return n, limit
38432af09b6550349a942ea677a9e76c1616dbc7
32,699
def insert_cnpj(num): """ Cast a string of digits to the formatted 00.000.000/0001-00 CNPJ standard. """ cnpj = num[:2]+'.'+num[2:5]+'.'+num[5:8]+r'/'+num[8:12]+'-'+num[12:] return cnpj
973e6a1e0e0235e5390fec075a4ee6443df32841
32,702
def get_empty_action_space(num_actions): """ Returns an action space with nothing selected. """ return [0] * num_actions
43dd1b10ba6737ca9a9f0926f3109754ba2737c9
32,708
from typing import get_origin from typing import Sequence def _is_collection_type(o) -> bool: """ Check whether the provided type/annotation is one which can hold elements. Necessarily since the minor versions of Python 3 have evolving ways of comparing type annotations. :param o: An annotation or type reference :return: Whether it represents a type which can hold elements """ try: # Py3.9+ cls = get_origin(o) or o return issubclass(cls, Sequence) except ImportError: pass # extract the base type if 'o' is an annotation cls = o if type(o) == type else o.__orig_bases__[0] return issubclass(cls, Sequence)
386400e5a7e6ea5690bc080e31507e9de57fb193
32,709
def doc2vector(model, samples): """Infer vectors for samples Args: model: The instance to use to infer vectors vectors as :class:`gensim.models.Doc2Vec`. samples: The samples as :class:`list`. Returns: The :class:`list` of inferred vectors. """ return [model.infer_vector(sample) for sample in samples]
0d05ea36555e925fe11bdcde0ea182c169f0b374
32,710
def child_structure_dfs(sampler, seen=None): """Return the structure of a composed sampler using a depth-first search on its children. Args: sampler (:obj:`.Sampler`): :class:`.Structured` or composed sampler with at least one structured child. seen (set, optional, default=False): IDs of already checked child samplers. Returns: :class:`~collections.namedtuple`: A named tuple of the form `Structure(nodelist, edgelist, adjacency)`, where the 3-tuple values are the :attr:`.Structured.nodelist`, :attr:`.Structured.edgelist` and :attr:`.Structured.adjacency` attributes of the first structured sampler found. Raises: ValueError: If no structured sampler is found. Examples: >>> sampler = dimod.TrackingComposite( ... dimod.StructureComposite( ... dimod.ExactSolver(), [0, 1], [(0, 1)])) >>> print(dimod.child_structure_dfs(sampler).nodelist) [0, 1] """ seen = set() if seen is None else seen if sampler not in seen: try: return sampler.structure except AttributeError: # hasattr just tries to access anyway... pass seen.add(sampler) for child in getattr(sampler, 'children', ()): # getattr handles samplers if child in seen: continue try: return child_structure_dfs(child, seen=seen) except ValueError: # tree has no child samplers pass raise ValueError("no structured sampler found")
9cb6f997e12a93230ed18bf1121493f2365adf24
32,712
import itertools def sorted_classes_from_index_dict(idx_dct): """ Obtain classes from index dict, sorted by class index. :param idx_dct: A dictionary mapping atom keys to class indices. :type idx_dct: dict :returns: A tuple of tuples of keys for each class, sorted by class index. :rtype: tuple[tuple[int]] """ keys = sorted(idx_dct.keys()) clas = sorted(keys, key=idx_dct.__getitem__) cla_dct = tuple( tuple(c) for _, c in itertools.groupby(clas, key=idx_dct.__getitem__)) return cla_dct
3dd9c9a8b62c559fa8a300754fde2168c0d59fd1
32,715
def _get_option(options: dict, opt: str): """Dictionary look-up with flonb specific error message""" if opt in options: return options[opt] raise ValueError(f"Missing option '{opt}'.")
ab048a65c3e92547085e9ce31c03e8fbd3b60558
32,716
def get_ip_address(event): """ Retrieves the client IP address from an event :param event: event :return: client IP address """ if "headers" in event: if "Client-Ip" in event["headers"]: return event["headers"]["Client-Ip"] if "X-Forwarded-For" in event["headers"]: return event["headers"]["X-Forwarded-For"].split(",")[0] if ( "requestContext" in event and "identity" in event["requestContext"] and "sourceIp" in event["requestContext"]["identity"] ): return event["requestContext"]["identity"]["sourceIp"] return ""
2a919d84dce5bc84e0a527d8e8edc734797cb7d7
32,722
def partition(condition, iterable, output_class=tuple): """ split an iterable into two according to a function evaluating to either true or false on each element :param condition: boolean function :param iterable: iterable to split :param output_class: type of the returned iterables :return: two iterables """ true = [] false = [] for i in iterable: if condition(i): true.append(i) else: false.append(i) return output_class(true), output_class(false)
678eea1acf22ee07bbcf41b57a00516a076c7cc3
32,724
def parse_metadata(metadata_field): """Cleans the metadata field, in case it's NaN, converts it to str.""" str_field = str(metadata_field) if str_field == "nan": return "" return str_field
e1a449756bba1b7e78e796664eeedef190b85155
32,725
def mscale(matrix, d): """Return *matrix* scaled by scalar *d*""" for i in range(len(matrix)): for j in range(len(matrix[0])): matrix[i][j] *= d return matrix
a98be25a0e0977e5e1c55b2efd602b1f3fabc493
32,727
def get_file_paths(file_prefix, num_files): """ Generates the json file paths of the format <file_prefix>idx.json where idx is an integer between 0 and num_files-1 :param file_prefix: The first part of the file path that all files to be averaged have in common :param num_files: The number of files to be averaged :return: A list of files to be averaged """ files = [] for i in range(num_files): files.append(f"{file_prefix}{i}.json") return files
08dd65503d4ccfff1b22c54e8002831b9e1db0b3
32,731
def get_appliance_flow_bandwidth_stats( self, ne_id: str, flow_id: int, flow_seq_num: int, ) -> list: """Get the so far accumulated bandwidth stats about the flow .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - flow - GET - /flow/flowBandwidthStats/{neId}/q :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE`` :type ne_id: str :param flow_id: Flow ID :type flow_id: int :param flow_seq_num: Flow sequence number :type flow_seq_num: int :return: Returns list of dictionaries for so far accumulated bandwidth stats about the flow :rtype: list[dict] """ return self._get( "/flow/flowBandwidthStats/{}/q?id={}&seq={}".format( ne_id, flow_id, flow_seq_num ) )
99672d1ad0b4adebded4905cfc81e95b8adba098
32,734
def barycentric_to_cartesian(bary, vertices): """ Compute the Cartesian coordinates of a point with given barycentric coordinates. :param bary: The barycentric coordinates. :param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the space)). :returns: The Cartesian coordinates vector. :rtype: n-dimensional vector """ return vertices[0] * bary[0] + vertices[1] * bary[1] + vertices[2] * bary[2]
3576f93d190ef52669a0ba80483dea88c75696ac
32,740