content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def add_to_visited_places(x_curr: int, y_curr: int, visited_places: dict) -> dict: """ Visited placs[(x,y)] contains the number of visits that the cell (x,y). This updates the count to the current position (x_curr, y_curr) Args: x_curr: x coordinate of current position y_curr: y coordinate of current position visited_place: dict defined above Returns: visited_place: updated dict """ if (x_curr, y_curr) not in visited_places: visited_places[(x_curr, y_curr)] = 1 else: visited_places[(x_curr, y_curr)] += 1 return visited_places
605d1941ae0f069405ed4b804cf9c25ee3381b9b
658,528
from datetime import datetime def get_date() -> str: """Get the current date in order to properly name the recored log files Returns: str: The current date in: YY_MM_DD format """ return datetime.now().strftime('%Y_%m_%d')
71b3b56cda565f088d29868b9e47f7fd9b7169ba
466,096
from typing import List def flatten(lst: List) -> list: """Reduce a nested list to a single flat list. >>> flatten(["A", [2, ["C"]], ["DE", (5, 6)]]) == ["A", 2, "C", "DE", 5, 6] True """ result = [] for item in lst: isiterable = False try: iter(item) isiterable = True except TypeError: pass if isiterable and not isinstance(item, str): result += flatten(item) else: result.append(item) return result
3837f9578d5917664c8187a1360dab4ba5fd6673
62,901
import torch def distV(x:torch.Tensor) -> torch.Tensor: """Distance vectors of points. :param x: location Tensor of shape (n, 3) :returns: vector Tensor of shape (n, n, 3)""" n = x.shape[0] return x.view(1, n, 3) - x.view(n, 1, 3)
c9c46c44a6b8ca49acae27d32736e98cc310e88f
419,596
def defvalkey(js, key, default=None, take_none=True): """ Returns js[key] if set, otherwise default. Note js[key] can be None. :param js: :param key: :param default: :param take_none: :return: """ if js is None: return default if key not in js: return default if js[key] is None and not take_none: return default return js[key]
b38ea51a87b426b31d3f51af3572fdd677408a37
586,134
import re def add_accessions2(genomes: dict) -> dict: """ Some genomes have their assembly accession in the 'sourceName' field. Updates the the genome dict "assembly_accession" field for each genome found. """ re_acc = re.compile(r"GC[AF]_\d{9}\.\d+") for name in genomes: hit = re_acc.search(genomes[name]["sourceName"]) if hit: genomes[name]["assembly_accession"] = hit.group() return genomes
ab16c1687987b092564c146f2265d326f155bb9f
204,717
def fixture_use_real_aws(request): """Indicates whether the 'use actual AWS' option is on or off.""" return request.config.getoption("--use-real-aws-may-incur-charges")
1561b3a78e35fd28628e559e424387ac192fd73e
179,873
def make_window(x, y, xmin, ymin, windowsize): """ Create a window for writing a child tile to a parent output tif """ if x < xmin or y < ymin: raise ValueError("Indices can't be smaller than origin") row = (y - ymin) * windowsize col = (x - xmin) * windowsize return ( (row, row + windowsize), (col, col + windowsize) )
42374d0aa2f11a2e1348e27dfdaf2355da7a7d38
407,313
def first(sequence, condition=None): """ Returns the first item in a sequence that satisfies specified condition or raises error if no item found. Args: sequence: iterable Sequence of items to go through. condition: callable Condition to test. Returns: any First valid item. """ if condition is None: return next((d for d in sequence)) return next((d for d in sequence if condition(d)))
879c3aaf4d07e367e275227c963422f021be509f
644,265
import copy def convert_results_to_list(results, deep_copy=True): """ Given a Whoosh results object, converts it to a list and returns that list. Useful, as the Whoosh results object does not permit reassignment of Hit objects. Note that if deep_copy is True, a deep copy of the list is returned. """ results_list = [] for hit in results: if deep_copy: results_list.append(copy.copy(hit)) continue results_list.append(hit) return results_list
6754afd63dbc72073ccf743bc7e27d7c645b570b
483,639
import math def simple_half_live(q, k, dt): """ Return the remaining quantity after biodegradation occuring during a time interval Parameters ---------- q : Amount k : Half life constant [1/s] dt : Time interval [s] """ return q*(1-math.e**(-k*dt))
f4dfa255968c20757fd03ac6c0d50b22e795f5dd
219,996
def reverse_downsample(self, ftimes, other_spatial, other_ftimes, **kwargs): """ Perform downsample_place the with the spatial data swapped. Calls other_spatial.downsample_place( other_ftimes, self, ftimes, **kwargs) See Also -------- downsample_place """ return other_spatial.downsample_place(other_ftimes, self, ftimes, **kwargs)
73351b351402710ed5d0d5e7009aa08bb3277f25
194,831
def nsec_to_usec_round(DatetimeNanos) -> int: """Round nanoseconds to microseconds Timestamp in zipkin spans is int of microseconds. See: https://zipkin.io/pages/instrumenting.html """ inttimestamp = DatetimeNanos.timestamp() * 1e9 return int((inttimestamp + 500) // 10 ** 3)
054fb56d733988a90ac8945e9bf1b733fb1a0572
645,762
def count(text, pattern): """How many times the given pattern appears in the text""" i = 0 n = 0 while i <= len(text) - len(pattern): if text[i:i+len(pattern)] == pattern: n = n + 1 i = i + 1 return n
9879b823aa640371a265ed1594bd726957d52866
536,732
import random def seed42(request): """A fixture that sets the random seed to 42, for consistency""" random.seed(42) return request
7abb0a70a2a147569e291d1511851fb74d66bdb2
598,884
def _is_password_valid(pw): """Returns `true` if password is strong enough for Firebase Auth.""" return len(pw) >= 6
fbaff0d6b2866de14ddd7719cd369ea589f02e84
641,465
import string import secrets def get_alphanumeric_unique_tag(tag_length: int) -> str: """Generates a random alphanumeric string (a-z0-9) of a specified length""" if tag_length < 1: raise ValueError("Unique tag length should be 1 or greater.") use_chars = string.ascii_lowercase + string.digits short_id = "".join([secrets.choice(use_chars) for i in range(tag_length)]) return short_id
098c9888790faf6b771062e13a31f5e8f6eba4e0
37,156
def _parse_spectre_coefficients (hist_line): """ parse the coefficients from the spectre line Parameters ---------- hist_line : string "HISTORY 23: 7:2008 D1,2,3: 5.50348825884E+03 4.46070136915E-02 0.00000000000E+00" Returns ------- coefficients : list [5.50348825884e+03, 4.46070136915e-02, 0.0e+00] """ coeffs = hist_line[18:36],hist_line[36:54],hist_line[54:] coeffs = [float(c.replace("D","e")) for c in coeffs] return coeffs
c26b6afe387c63b4383c8ecab4e2ad564ee351c1
250,460
def min_point(points, n_points): """ Return the index of the closest point to (0,0) :param points: tuple of list for each coordinate :param n_points: number of points :return: the index of the selected point in the list """ min_y_indexes = [0] min_y = points[1][0] # Find minimum y for k in range(1, n_points): if points[1][k] < min_y: min_y_indexes = [k] elif points[1][k] == min_y: min_y_indexes.append(k) # Among the points having the smallest y, find the points having the smallest x min_x = points[0][min_y_indexes[0]] min_index = min_y_indexes[0] for k in min_y_indexes: if points[0][k] < min_x: min_index = k return min_index
c06117007693aba8877952f88b6155df143f663b
646,062
def commajoin(*items): """ Small helper function that joins all items by comma and maps types of items into strings. """ return ",".join(map(str, items))
051de682a0f5588d61a7c0cbf72b0756235d61d0
127,744
def _strip_path_prefix(path, prefix): """Strip a prefix from a path if it exists and any remaining prefix slashes Args: path: <string> prefix: <string> Returns: <string> """ if path.startswith(prefix): path = path[len(prefix):] if path.startswith("/"): path = path[1:] return path
9cd4460ffd318600287300686de748141442e0d1
668,536
import functools def prod(factor): """ Equivalent to sum(list) with multiplication. Return the multiplication of all the elements in the list. """ return functools.reduce(lambda x, y: x*y, factor, 1)
7da5daec5dcf4cc33f51655a7ed0286d4e0160f2
426,829
def add_computer (api, configuration, api_version, api_exception, hostname): """ Adds a computer to Deep Security Manager. :param api: The Deep Security API modules. :param configuration: Configuration object to pass to the api client. :param api_version: The version of the API to use. :param api_exception: The Deep Security API exception module. :param hostname: The hostname or IP address that resolves to the computer. :return: An Integer that contains the ID of the computer. """ # Create the computer object computer = api.Computer() computer.host_name = hostname try: # Add the computer to Deep Security Manager computers_api = api.ComputersApi(api.ApiClient(configuration)) new_computer = computers_api.create_computer(computer, api_version) return new_computer.id except api_exception as e: return "Exception: " + str(e)
50e3e9e59336cac9df81c52009e7d691ea30ec66
272,422
def fake_request(*responses): """ Creates a request function that returns the supplied responses, one at a time. Making a new request after the last response has been returned results in a StopIteration error. """ iterator = (response for response in responses) def request(method, url, **kwargs): return next(iterator) return request
e5a9a190129587f0c76fcabddfaca487fab3bd0c
629,393
def IsInTargetRange( target, error_ref, warning_percentage, error_percentage, value): """Test if a value falls into warning or error range around a target. Args: target: The target value. error_ref: The error reference as the base for warning/error percentage. warning_percentage: The percentage of error_ref to draw warning bounds. error_percentage: The percentage of error_ref to draw error bounds. value: The value of the variable. Returns: 0 for normal, 1 for warning, and 2 or error. """ warning_lower_bound = target - error_ref * warning_percentage warning_upper_bound = target + error_ref * warning_percentage error_lower_bound = target - error_ref * error_percentage error_upper_bound = target + error_ref * error_percentage if warning_lower_bound <= value <= warning_upper_bound: return 0 elif error_lower_bound <= value <= error_upper_bound: return 1 else: return 2
edcb405bd409d543005e95907faa1eaa0a1304a9
667,892
def KeyValuePairMessagesToMap(key_value_pair_messages): """Transform a list of KeyValuePair message to a map. Args: key_value_pair_messages: a list of KeyValuePair message. Returns: a map with a string as key and a string as value """ return {msg.key: msg.value for msg in key_value_pair_messages}
7ab0d9a3dea7da762a559efa00ae50247ee8d2d4
16,092
def forward_and_adapt(x, criterion, model, optimizer): """Forward and adapt model on batch of data. Measure entropy of the model prediction, take gradients, and update params. """ # forward outputs = model(x) # adapt loss = criterion(outputs) loss.backward() optimizer.step() optimizer.zero_grad() return outputs
beecee742d2e8f8baa88279c928376d0c8c01656
163,096
def rfc822_date(date): """Return date in RFC822 format For reference, the format (in CLDR notation) is: EEE, dd MMM yyyy HH:mm:ss Z With the caveat that the weekday (EEE) and month (MMM) are always in English. Example: Sat, 19 Sep 2015 14:53:07 +0100 For what it's worth, this doesn't strictly use the RFC822 date format, which is obsolete. (The current RFC of this type is 5322.) This should not be a problem — 822 calls for a two-digit year, and even the RSS 2.0 spec sample files (from 2003) use four digits. """ weekday_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] weekday = weekday_names[date.weekday()] month = month_names[date.month - 1] template = '{weekday}, {d:%d} {month} {d:%Y %H:%M:%S %z}' return template.format(weekday=weekday, month=month, d=date)
0946ab904b17ea889dadc17497b2b94b58f8284c
552,679
def out_of_bounds(subject, start, end): """Check if the subject is completely outside of a given range""" return subject.start < start or subject.end > end
329ed59560f277ac87a0acf045feaa9c1c5cd0b6
312,378
import torch def pack(inputs, is_tensor=False): """Pack the inputs into tuple if they were a single tensor""" single = torch.is_tensor(inputs) outputs = (inputs, ) if single else inputs return (outputs, single) if is_tensor else outputs
14c1f7c16e0871d1fa7ec265e4db062cdc62b82d
33,570
def convertMessagetoBinaryString(input): """convert ascii string or raw bytes (depending on if it is an ascii string) to string containing binary representation""" #take each character and pad to be 8 bits, add to output string and return entire string. output = "" if input.isascii(): input = input.encode("ascii") for char in input: output += bin(char)[2:].zfill(8) return output
284ed20de4d3d1b7edff4d793c4807a60bd929c1
454,985
def is_dialog_complete(handler_input): """ Checks whether the dialog is complete according to the dialog model :param handler_input: (HandlerInput) :return: (boolean) """ if not hasattr(handler_input.get_request(), "dialogState"): return False return handler_input.get_request().dialogState == "COMPLETE"
c9137b497bf472e88f05f8395166db5b18fe5f51
28,429
def search_string(text: str, pattern: str, starting_point: int = 0) -> int: """ Search a text for a pattern given a starting point Arguments: text - The text we're searching through pattern - The pattern we're searching for starting_point - The point in the string we'd like to start at Return: int indicating if there was a match. We return i for the index that matched, or -1 for indicating that there was no match """ text_len = len(text) for i in range(starting_point, len(text)): if pattern[0].lower() == text[i].lower(): window = len(pattern) + i if window > text_len: return -1 if pattern.lower() == text[i:window].lower(): return i return -1
c8f53270e12b552e2d53722027c7d9923ae5edbd
327,753
def apply_case(s, case): """Helper function that applies case to a string.""" if case.upper() == "UPPER": s = s.upper() elif case.upper() == "LOWER": s = s.lower() elif case.upper() == "CAPITALIZE": s = s.capitalize() elif case.upper() == "TITLE": s = " ".join([w.capitalize() for w in s.split(" ")]) return s
bb696e91f545c91d62e4c220a75634f481db0315
445,229
def _value_sorter(feature_value): """Custom sorting function taking into account the structure of values. Given a feature value extracts its numeric portion, e.g. "18 ObligDoubleNeg" -> "18". Args: feature_value: (string) WALS feature value. Returns: An integer corresponding to the first portion of the string. """ toks = feature_value.split(" ", maxsplit=1) assert len(toks) == 2, "%s: Invalid feature value" % feature_value val = int(toks[0]) assert val > 0, "%s: Feature value should be positive. Found %d" % ( feature_value, val) return val
d65f4fe352679e8255f4d43188ac4a2e540024ba
463,743
def get_last_conv_layer(keras_model): """ Get last convolution layer name of keras model input: Keras model output: string of the name of the last convolution layer Make sure last convolution layer has "conv" in the name! """ layer_names=[layer.name for layer in keras_model.layers] layer_names.reverse() # This loop cycles the reversed list an extracts the name of the last conv layer for i in range(0,len(layer_names)): if "conv" in layer_names[i]: conv_layer=layer_names[i]; print("The last convolution layer is:", conv_layer) return conv_layer
b53eb42b9b9f83f790b3b194026ae3f8571db070
666,577
def cheating_matrix(seating_chart): """ Calculate and return the probabilities of cheating for each position in a rxc grid :param seating_chart: A nested list representing a rxc grid :return: A nested list, the same size as seating_chart, with each element representing that position's cheating probability """ # Create matrix of probabilities by location prob_matrix = [[.025, .3, .025], [.2, 0, .2], [.025, .2, .025]] # Create blank nested list for saving calculated probabilities (same size as seating_chart) calc_prob = [] for ch_row in range(len(seating_chart)): new_row = [] for ch_col in range(len(seating_chart[ch_row])): new_row.append(seating_chart[ch_row][ch_col]) calc_prob.append(new_row) # calculate probabilities for each spot in seating_chart, store in calc_prob for row in range(len(seating_chart)): for col in range(len(seating_chart[row])): calc_prob[row][col] = 0 for r_adj in range(-1, 2): for c_adj in range(-1, 2): if 0 <= row + r_adj < len(seating_chart): if 0 <= col + c_adj < len(seating_chart[row]): if seating_chart[row][col] == seating_chart[row + r_adj][col + c_adj]: calc_prob[row][col] += prob_matrix[1 + r_adj][1 + c_adj] return calc_prob
f9eaa1eb187ceeb0252d208002c739da3f70743c
65,347
def run(*args, initial=None): """Creates and runs the pipe calling functions in given sequence. Args: *args: functions to call in sequence initial: argument to the first function (pipe argument) Returns: result of the last function called """ data = initial for fun in args: data = fun(data) return data
1ffe668a8ca8f17e248c3725e77ee5a28d838243
337,664
def simple_hash(token: str, output_size: int) -> int: """ This runs a simple, deterministic hash function on the input token. This needs to be exactly duplicated in Java, and is very sensitive. In general, don't change this function. :param token: the token to hash :param output_size: the output size to mod into :return: a number in [0, output_size) """ encoded = token.encode("utf-8") hash_sum = 0 for letter in encoded: hash_sum = ((31 * hash_sum) + letter) % output_size return hash_sum
bc10c846485f1a261673c9ac3251ccc715fcc275
219,210
def has_fact_sheets(responses, derived): """ Return whether or not the user is submitting fact sheets """ return any([derived['show_fact_sheet_b'], derived['show_fact_sheet_c'], derived['show_fact_sheet_d'], derived['show_fact_sheet_e'], derived['show_fact_sheet_f'], ])
0f5e869476ce83debb1c88d9c367594e6c9e952e
566,883
def deleted_genes_to_reactions(model, genes): """ Convert a set of deleted genes to the respective deleted reactions. Arguments: model (CBModel): model genes (list): genes to delete Returns: list: list of deleted reactions """ if isinstance(genes, str): genes = [genes] active_genes = set(model.genes) - set(genes) active_reactions = model.evaluate_gprs(active_genes) inactive_reactions = set(model.reactions) - set(active_reactions) return inactive_reactions
22cd57d35efe6b02aaa0cb3d5e05b9f817f63d10
675,723
def ensure_tuple(tuple_or_mixed, *, cls=tuple): """ If it's not a tuple, let's make a tuple of one item. Otherwise, not changed. :param tuple_or_mixed: :return: tuple """ if isinstance(tuple_or_mixed, cls): return tuple_or_mixed if tuple_or_mixed is None: return tuple.__new__(cls, ()) if isinstance(tuple_or_mixed, tuple): return tuple.__new__(cls, tuple_or_mixed) return tuple.__new__(cls, (tuple_or_mixed, ))
d7d6ca13a86391f4777da2716d5d9e4fe242698e
663,452
def df_div(df0, df1, axis=1): """Wrapper function to divide two Pandas data frames in a functional manner. Args: df0 (:obj:`pd.DataFrame`): First data frame. df1 (:obj:`pd.DataFrame`): Second data frame. axis (int): Axis; defaults to 1. Returns: The quotient from applying :meth:`pd.DataFrame.div` from ``df0`` to ``df1``. """ return df0.div(df1, axis=axis)
1b362ea7ae920ef79de68aeec7fb25eeba06ca3b
52,720
def _is_out_of_order(segmentation): """ Check if a given segmentation is out of order. Examples -------- >>> _is_out_of_order([[0, 1, 2, 3]]) False >>> _is_out_of_order([[0, 1], [2, 3]]) False >>> _is_out_of_order([[0, 1, 3], [2]]) True """ last_stroke = -1 for symbol in segmentation: for stroke in symbol: if last_stroke > stroke: return True last_stroke = stroke return False
193fdf5d71fc9a515140c94ea550d296d2e2db62
231,857
def convert_newlines(msg): """A routine that mimics Python's universal_newlines conversion.""" return msg.replace('\r\n', '\n').replace('\r', '\n')
38459e52b040fd43de65b5a0b3f5ce85df585917
126,291
import math def is_square(x): """Returns True if x is a perfect square. Otherwise False. Examples -------- >>> is_square(1) True >>> is_square(-1) False >>> is_square(4) True """ try: return int(math.sqrt(x)) == math.sqrt(x) except ValueError: return False
863c8eddd1d9b6256f5a8ddf7c32366c0ddbcfb2
495,949
def pairs(seq): """Return a sequence in pairs. Arguments: seq (sequence): A sequence with an even number of elements. If the number is uneven, the last element will be ignored. Returns: A zip object with tuple pairs of elements. Example: >>> list(pairs([1,2,3,4])) [(1, 2), (3, 4)] """ return zip(*[iter(seq)]*2)
f97c2954cf2e64990fa30ec8a318e3c618c9b889
113,460
import re def unixify_string(string): """ Sanitizes a string making it nice for unix by processing special characters. Removes: ()!? and spaces, Replaces with underscores: '/' and ' ' Parameters ---------- string : str the string to sanitize Returns ------- str the sanitized string """ return re.sub("['/]", '_', re.sub('[()!?,]', '', string)).replace(' ', '_')
2231f911e4653ccae2363881cf1a94a5fb2cf366
692,209
import uuid def _create_uuid() -> uuid.UUID: """Creates a valid UUID 4. Returns: uuid.UUID: A UUID """ return uuid.uuid4()
a437b999bc20ce550f916a62432bb51c4832a76c
214,858
def null_processor(value): """Processor that does nothing, just returns the value""" return value
bb4682564a17271b8282a850cabb3af456771633
135,519
def autotype_seq(entry): """Return value for autotype sequence Args: entry - dict Return: string """ return next((i.get('value') for i in entry['fields'] if i.get('name') == 'autotype'), "")
ff2fb811656577ced76ede843def2ef070c8cd43
660,766
import torch def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
cfd079165a15957c8b4b57db8bc5e1bbfac63d13
352,464
import shutil def zipfile_make_archive(dirname, to=None, **kwargs): """ Thin layer over shutil.make_archive. Just defaults the to to the same name as the directory by default. :param dirname: (str) The directory name :param to: (str, default None) The file path to create the archive in. :param kwargs: shutil.make_archive(**kwargs) :return: (str) The :param to file path. """ if to is None: to = dirname return shutil.make_archive(to, "zip", root_dir=dirname, **kwargs)
1d72dc244e625ce531c128eb02bf7f7ad6b1856d
374,813
def pegar_carta_index(lista_jogadores, jogador_da_vez, index_carta): """ -> Irá pegar o dicionário da carta através do índice da mesma na lista :param lista_jogadores: lista contendo os baralhos dos jogadores :param jogador_da_vez: jogador da vez :param index_carta: índice da carta que o usuário deseja :return: irá retornar o dicionário da carta """ return lista_jogadores[jogador_da_vez][index_carta]
98cd990ccd9900267f165ace2d7c4dccb6ce62d2
152,399
def _unique_combinations(model, ignore_pk=False): """ Returns an iterable of iterables to represent all the unique constraints defined on the model. For example given the following model definition: class ExampleModel(models.Model): username = models.CharField(unique=True) email = models.EmailField(primary_key=True) first_name = models.CharField() second_name = models.CharField() class Meta: unique_together = ('first_name', 'second_name') This method would return [ ['username'], # from field level unique=True ['email'], # implicit unique constraint from primary_key=True ['first_name', 'second_name'] # from model meta unique_together ] Fields with unique constraint defined in a concrete parent model are ingored since they're checked when that model is saved """ # first grab all the unique together constraints unique_constraints = [list(together_constraint) for together_constraint in model._meta.unique_together] # then the column level constraints - special casing PK if required for field in model._meta.fields: if field.primary_key and ignore_pk: continue if field.unique and field.model == model: unique_constraints.append([field.name]) # the caller should sort each inner iterable really - but we do this here # for now - motive being that interpolated keys from these values are consistent return [sorted(constraint) for constraint in unique_constraints]
dfb9b9b451af72d0841c8311bb9438b39d546993
547,437
def _remove_trailing_chars(text: str) -> str: """ Removes trailing characters from the beginning or end of a string. """ chars = ['.', '@', '/', '&', '-', "'"] for char in chars: text = text.strip(char) return text
b19e1d369fc858a2a269fdfc14a128907ecf0bc3
91,089
import torch def compute_loss(inputs, outputs, criterion, edge_criterion): """Compute loss automatically based on what the model output dict contains. 'doc_logits' -> document-label CE loss 'para_logits' -> paragraph-label CE loss 'pare_edge_weights' -> additional edge CE loss """ if 'para_logits' in outputs: loss = criterion(outputs['para_logits'], inputs['para_label']) loss *= (inputs['para_lengths'] != 0).to(torch.float) loss = loss.sum() else: loss = criterion(outputs['doc_logits'], inputs['label']).sum() if 'para_edge_weights' in outputs: edge_loss_lambda = 0.1 edge_loss = edge_criterion(outputs['para_edge_weights'], 1 - inputs['para_label']) edge_loss *= (inputs['para_lengths'] != 0).to(torch.float) loss += edge_loss_lambda * edge_loss.sum() return loss
fac6c23b1654b830d57f9197e22372a90b7d9176
683,503
def CanonicalizeAddress(addr): """Strip angle brackes from email address iff not an empty address ("<>"). Args: addr: the email address to canonicalize (strip angle brackets from). Returns: The addr with leading and trailing angle brackets removed unless the address is "<>" (in which case the string is returned unchanged). """ if addr == '<>': return addr return addr.lstrip('<').rstrip('>')
d16fdfd36e30a6b6b591cf7540e790e109e4cd1a
212,761
def merge_info(chr_list, pos_split, name_pos_split_list, no_chrom_list): """ Merges all lists created above to be used for DataFrame. Args: chr_list (list): the list with the chromosome info of product pos_split_list (list): the list with the split positions name_pos_split_list (list): the list with the names and positions split no_chrom_list (list): the list without chrom info Returns: merged_list (list): list with all info to be taken into dataframe """ merged_list = [] for chrom, pos, name, prims in zip(chr_list, pos_split, name_pos_split_list, no_chrom_list): merged_items = chrom + pos + name + prims merged_list.append(merged_items) return merged_list
27487612a4d127a22598918bea41a2f426f8b6f3
263,527
from datetime import datetime def first_of_year(dte): """ Return the first day of the year for the specified date. Argument: dte - a date """ year = dte.year first = datetime(year, 1, 1) return first
3b66d923fd6ff13c9bb22abefc6df60fe935d5f1
306,094
def get_mip_at_res(vol, resolution): """Find the mip that is at least a given resolution Args: vol (cloudvolume.CloudVoluem): CloudVolume object for desired precomputed volume resolution (int): Desired resolution in nanometers Returns: tuple: mip and resolution at that mip """ tmp_mip = 0 tmp_res = 0 for i, scale in enumerate(vol.scales): if (scale["resolution"] <= resolution).all(): tmp_mip = i tmp_res = scale["resolution"] elif i == 0: tmp_res = scale["resolution"] return tmp_mip, tmp_res return tmp_mip, tmp_res
1d41583439a39789cc49e54b9e840c4d53936673
454,460
def array_to_grader(array, epsilon=1e-4): """Utility function to help preparing Coursera grading conditions descriptions. Args: array: iterable of numbers, the correct answers epslion: the generated expression will accept the answers with this absolute difference with provided values Returns: String. A Coursera grader expression that checks whether the user submission is in (array - epsilon, array + epsilon)""" res = [] for element in array: if isinstance(element, int): res.append("[{0}, {0}]".format(element)) else: res.append("({0}, {1})".format(element - epsilon, element + epsilon)) return " ".join(res)
cf608271e821da48a62c67a932ce0413aac19466
62,259
def _neighbor_keys_from_bond_keys(key, bnd_keys): """ Determine neighbor keys of an atom from the bond keys """ nkeys = [] for bnd_key in bnd_keys: if key in bnd_key: nkey, = bnd_key - {key} nkeys.append(nkey) return frozenset(nkeys)
2cf82e8d506eec64203077a063e2b37fc4413e4b
118,054
def LOS(arr): """The function is to find the maximum ordered subarray in a array Arguments: arr {int} -- It is a list of numbers Returns: arr {int} -- Returns the the maximum ordered subarray and Length """ n = len(arr) # Declare the list (array) for LIS and initialize LIS # values for all indexes Los = [] helper = [] for i in range(0, n): helper.append(i) Los.append(1) # In bottom up manner generating the LOS(Longest Ordered Subarray) for i in range(1, n): for j in range(0, i): if arr[i] > arr[j] and Los[i] < Los[j] + 1: Los[i] = Los[j]+1 helper[i] = j # maximum is the length of the LOS maximum = max(Los) # Gives the index of the last item of the in the LOS idx = Los.index(maximum) ordersequence = [] for _ in range(0, maximum): # Pushing the elements to a list orderedsequence ordersequence.append(arr[idx]) idx = helper[idx] ordersequence.reverse() # Returning the reverse of the ordered sequence return (max(Los), ordersequence)
0709402fc67e0aa05591a2dd7817be2c6e2920ae
413,411
def get_url_without_trailing_slash(value): """ Function which strips a trailing slash from the provided url if one is present. :param value: URL to format. :type value: ``str`` :rtype: ``str`` """ result = value[:-1] if value.endswith("/") else value return result
b9179b3920a62423623d51496cee523643a2c2e2
508,168
def oposto(a): """ Faz a negação do literal de acordo com os moldes do algoritmo. :param a: literal :return: negação do literal """ if a[0] == '~': aux = a[1:] else: aux = ''.join(['~', a]) return aux
d32a6b428ea276d11f85d6df5a17f7bef280327e
101,290
import pytz def getAwareTime(tt): """ Generates timezone aware timestamp from timezone unaware timestamp PARAMETERS ------------ :param tt: datatime timezome unaware timestamp RETURNS ------------ :return: datatime timezone aware timestamp """ timezone = pytz.timezone("Europe/Amsterdam") return (timezone.localize(tt))
1b286c92c7f5d8f0ff48d77296489fbd358c14ce
707,958
def append_line(buf, linearr): """Append lines Args: buf (obj): Nvim buffer linearr (Array[string]): Line contents Returns: suc (bool): True if success """ for l in linearr: buf.append(l) return True
9ae3c08b119c01276a95c1adf9e8372730f08154
51,971
def get_mac_addr_from_dbus_path(path): """Return the mac addres from a dev_XX_XX_XX_XX_XX_XX dbus path""" return path.split("/")[-1].replace("dev_", '').replace("_", ":")
a8603f2f7b6202ab9bafe5f911606efd8dc54358
21,599
def is_biallelic(variant): """Returns True if variant has exactly one alternate allele.""" return len(variant.alternate_bases) == 1
27caf4f34bc76a477b0fca193c36383ae7fd482e
209,369
def equated_monthly_installments( principal: float, rate_per_annum: float, years_to_repay: int ) -> float: """ Formula for amortization amount per month: A = p * r * (1 + r)^n / ((1 + r)^n - 1) where p is the principal, r is the rate of interest per month and n is the number of payments >>> equated_monthly_installments(25000, 0.12, 3) 830.3577453212793 >>> equated_monthly_installments(25000, 0.12, 10) 358.67737100646826 >>> equated_monthly_installments(0, 0.12, 3) Traceback (most recent call last): ... Exception: Principal borrowed must be > 0 >>> equated_monthly_installments(25000, -1, 3) Traceback (most recent call last): ... Exception: Rate of interest must be >= 0 >>> equated_monthly_installments(25000, 0.12, 0) Traceback (most recent call last): ... Exception: Years to repay must be an integer > 0 """ if principal <= 0: raise Exception("Principal borrowed must be > 0") if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0") if years_to_repay <= 0 or not isinstance(years_to_repay, int): raise Exception("Years to repay must be an integer > 0") # Yearly rate is divided by 12 to get monthly rate rate_per_month = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly number_of_payments = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) )
5352513d7a0eb4965c0c5c75bebfefcb1fe50d17
435,386
def tag_function(entry): """ Default tag function Given a PDB report entry, generate a list of tags to apply to the entry when created in the database. This function can be overridden through the PDB_TAG_FUNCTION settings variable :param entry: :return: """ return [entry['diffrn_source.pdbx_synchrotron_beamline'].split()[-1],]
4f1cd217d07043ea83bd2ddcac176d31f42cdc07
335,315
def give_pendant(character: str) -> str: """Returns the pendant to each character.""" if character == ')': return '(' if character == ']': return '[' if character == '}': return '{' if character == '>': return '<' raise ValueError(f"Unknown character '{character}' encountered.")
c08bd0c836555db71993332b8f2d9eb1efa69eb5
443,212
import math def calc_node_distance(graph, node_1, node_2): """ Calculates distance between two nodes of graph, Requires node attribute 'position' as shapely Point, e.g. Point(10, 15.2) Parameters ----------- graph : nx.graph Graph object of networkx node_1 : int Node id of node 1 node_2 : int Node id of node 2 Returns -------- distance : float Distance between nodes """ assert graph.has_node(node_1) assert graph.has_node(node_2) node_1_x = graph.nodes[node_1]['position'].x node_1_y = graph.nodes[node_1]['position'].y node_2_x = graph.nodes[node_2]['position'].x node_2_y = graph.nodes[node_2]['position'].y distance = math.sqrt((node_1_x - node_2_x) ** 2 + (node_1_y - node_2_y) ** 2) return distance
a0a0def854fe2cef4e1fa59567b05a98874d8637
664,918
def pad_base64(data): """ Pads a base64 encoded string Args: data (str): The base64 encoded string Returns: str: The padded base64 encoded string """ data = data.replace("-", "+").replace("_", "/") missing_padding = len(data) % 4 if missing_padding: data += "=" * (4 - missing_padding) return data
f2b82b7e7379d5d6fbc80cb41820d1288e4eca78
305,481
def predictions_for_df(df, inferrer): """Returns df with column that's the activations for each sequence. Args: df: DataFrame with columns 'sequence' and 'sequence_name'. inferrer: inferrer. Returns: pd.DataFrame with columns 'sequence_name', 'predicted_label', and 'predictions'. 'predictions' has type np.ndarray, whose shape depends on inferrer.activation_type. """ working_df = df.copy() working_df['predictions'] = inferrer.get_activations( working_df.sequence.values).tolist() return working_df
7a5979162836719ed875ec147808a2c31c254f33
77,134
def encode_time(time): """ Converts a time string in HH:MM format into minutes """ time_list = time.split(":") if len(time_list) >= 2: return (int(time_list[0]) * 60) + int(time_list[1]) return 0
938f0eba5dbbf2b8e61eeeddd249eb605bc618fd
77,582
def parse_lexicon(lex_file): """ Parse the lexicon file and return it in dictionary form. Args: lex_file (str): filename of lexicon file with structure '<word> <phone1> <phone2>...' eg. peppers p eh p er z Returns: lex (dict): dictionary mapping words to list of phones """ lex = {} # create a dictionary for the lexicon entries (this could be a problem with larger lexica) with open(lex_file, 'r') as f: for line in f: line = line.split() # split at each space lex[line[0]] = line[1:] # first field the word, the rest is the phones return lex
6ddf0705efc7f02f15c290515d9bc22eac5f48e1
438,024
def MultiplyTwoNumbers(a,b): """ multiplying two numbers input: a,b - two numbers output: returns multiplication """ c = a*b return c
ee083b6133de4e2c8da434b937dd801d074ae31b
663,561
from pathlib import Path import pickle def load_trained_model(fname: str): """ Loads a saved ModelData object from file. Args: fname (str): Complete path to the file. Returns: A ModelData object containing the model, list of titles, list of authors, list of genres, list of summaries, training corpus, and test corpus. """ fp = Path(fname) with fp.open("rb") as f: model_data = pickle.load(f) return model_data
3bdfa3f090fa5efcd54b17bd47a0cd4ea57e1c4a
8,600
def get_visual(screen, desired_depth=32): """get_visual() returns the visual id of the screen @ some depth Returns an int (xcb_visualid_t) corresponding to the screen's visualid On failure it returns None. For an ARGB visual -> desired_depth=32 For a RGB visual -> desired_depth=24 If you just want the screen's default visual you can do the following: >>> conn = xcffib.connect(display=os.getenv('DISPLAY', ':0')) >>> screen = conn.get_setup().roots[conn.pref_screen] >>> visual = screen.root_visual On my computer the default depth is only 24bit (screen.root_depth), even when running a compositor. """ for depth in tuple(screen.allowed_depths): for v in depth.visuals: if depth.depth == desired_depth: msg = 'For a screen depth of {} bits the visual_id is {}' print(msg.format(desired_depth, v.visual_id)) return v.visual_id return None
66fe76aab64524b1cba8b04f0462e1ddf14bc527
552,445
def _read_multi_smiles_from_file(filepath): """Read multiple SMILES strings from file""" smiles = [] with open(filepath) as input_file: for line in input_file: _smiles, _id = line.split() smiles.append(_smiles) return smiles
2a76a0e47e2f88af89697b654e698487ebbd8040
337,090
def collatz(num): """ if n is even, n/2 if n is odd, 3n+1 """ newlist = [num] while num != 1: if num%2 == 0: num //= 2 elif num%2 == 1: num = 3*num + 1 newlist.append(num) return newlist
4929b72542e1d64f22abd26317cc05dba6d32805
402,119
async def valid_file_extension(file_extension: str) -> bool: """Return True if valid file-extension.""" if file_extension.lower() in ["ttl", "html", "png", "pdf"]: return True return False
196b062b1b89cfedd2e7376b9bd2cc16f3042b12
442,574
def euler(initial, tendency, h=1): """Integrate forward in time using Euler's method of numerical integration. initial + h*tendency Arguments --------- initial : float The initial state. tendency : float The rate of change in the initial state. Keyword arguments ----------------- h = 1 : float The timestep duration. """ return initial + h*tendency
aed541f5d4cf9b5dd3d06f3a23fcd6ccc39cffba
546,524
def prevent_cache(response): """ Disable response caching for dashboard """ response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" return response
0f0bde2c63fd94262b8dab19a9831708d5caa2fb
328,763
import struct def _read_hkey(buff): """Reads from fobj and returns hkey (128 bites integer).""" a, b = struct.unpack('=QQ', buff) return (a << 64) | b
675b9b3e14758eaae71500f0f91d1a1555821f5a
516,856
def generateCSV(package_exports): """ Create a CSV file based on the exported data It expects to receive a list of objects with the following keys: - package - name - type """ csv_content = "" for package_export in package_exports: csv_content += "%s,%s,%s\n"%( package_export["package"], package_export["name"], package_export["type"] ) return csv_content
405de2ee4cdf14dae3805844ac5f1a31b2d43b23
302,997
def _image_name_from_key(agent_key: str) -> str: """Generate docker image name from an agent key. Args: agent_key: agent key in the form agent/organization/name. Returns: image_name: image name in the form : agent_organization_name. """ return agent_key.replace('/', '_').lower()
0a7c2d0824a1c809364048def899d523c5853d48
568,071
def atom_in_core(atom, separated_cores): """ Checks if an atom is in any of the lists of atoms. """ for core in separated_cores: if atom in core: return True return False
2989a482d92c588098d960ec862aee8bc2f71a51
458,616
def timestamp(location): """Gets timestamp from the cur.txt associated with location.""" with open(f'{location}_cur.txt','r') as current_file: current_file.readline() return current_file.readline()
4bcb2aeedc472de573cfd54b9261d9615309ea20
543,363
import torch def predict_batch(model, x_batch, dynamics, fast_init): """ Compute the softmax prediction probabilities for a given data batch. Args: model: EnergyBasedModel x_batch: Batch of input tensors dynamics: Dictionary containing the keyword arguments for the relaxation dynamics on u fast_init: Boolean to specify if fast feedforward initilization is used for the prediction Returns: Softmax classification probabilities for the given data batch """ # Initialize the neural state variables model.reset_state() # Clamp the input to the test sample, and remove nudging from ouput model.clamp_layer(0, x_batch.view(-1, model.dimensions[0])) model.set_C_target(None) # Generate the prediction if fast_init: model.fast_init() else: model.u_relax(**dynamics) return torch.nn.functional.softmax(model.u[-1].detach(), dim=1)
61102cfa3bcb3e7d52e9f3eca8c97db4d726c1a7
4,944
import logging def main(num: int) -> int: """Activity function performing a specific step in the chain Parameters ---------- num : int number whose value to increase by one Returns ------- int the input, plus one """ logging.info(f"Activity Triggered: {num}") return num + 1
e3f5d45a82e37c1dd3dfa703e557e53fe64339f3
521,604
import importlib def _import_class(cls: str): """Take a string FQP and return the imported class or identifier clas is of the form "package.module.klass". """ mod, name = cls.rsplit(".", maxsplit=1) module = importlib.import_module(mod) return getattr(module, name)
b2b3cddf49b88f99b35c6720d12d96dfa007441c
45,196
def target_string_to_int(target_value): """ Convert a String-based into an Integer-based target value. """ if target_value == "Iris-setosa\n": return 0 elif target_value == "Iris-versicolor\n": return 1 else: return 2
2297bf37fe45f028a89e12c4c05bbe7c8b71e384
287,421
def f1 (p, r): """ Computes F1 Args: p (float): precision r (float): recall Returns: float: F1 """ try: return 2 * (p*r) / (p+r) except ZeroDivisionError: return 0
59c91941a86fa8e91f6b5ce83d2e966004b4f0b9
207,499
import six def get_unicode_dicts(iterable): """ Iterates iterable and returns a list of dictionaries with keys and values converted to Unicode >>> gen = ({'0': None, 2: 'two', u'3': 0xc0ffee} for i in range(3)) >>> get_unicode_dicts(gen) [{u'2': u'two', u'0': None, u'3': u'12648430'}, {u'2': u'two', u'0': None, u'3': u'12648430'}, {u'2': u'two', u'0': None, u'3': u'12648430'}] """ def none_or_unicode(val): return six.text_type(val) if val is not None else val rows = [] for row in iterable: rows.append({six.text_type(k): none_or_unicode(v) for k, v in six.iteritems(row)}) return rows
f5d96224b10aeeab99621d18ae2ec3b326dd8892
589,004
def flat_clfs(params) -> dict: """Flatten the classifiers for easier access.""" flat = {} for clfs in params["classifiers"].values(): flat.update(clfs) return flat
bb3fbad8f9866848648f6e8a323553534727b9e3
637,564
def auth_token(token): """ A token stored for a user. Format: token:[token] """ return f"token:{token}"
55f123b5f0e9c5c77754a44408d22e8f51838e18
608,790
def print_columns(objects, cols=3, gap=2): """ Print a list of items in rows and columns """ # if the list is empty, do nothing if not objects: return "" # make sure we have a string for each item in the list str_list = [str(x) for x in objects] # can't have more columns than items if cols > len(str_list): cols = len(str_list) max_length = max([len(x) for x in str_list]) # get a list of lists which each represent a row in the output row_list = [str_list[i:i+cols] for i in range(0, len(str_list), cols)] # join together each row in the output with a newline # left justify each item in the list according to the longest item output = '\n'.join([ ''.join([x.ljust(max_length + gap) for x in row_item]) for row_item in row_list ]) return output
c38112cc5410872f25122d2f428ecfed71c40e6a
196,590