content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import torch def get_prediction_ids(predictions): """ Get the label ids from the raw prediction outputs """ tensor = torch.tensor(predictions) softmax = torch.nn.functional.softmax(tensor, dim=-1) argmax = torch.argmax(softmax, dim=-1) return argmax.tolist()
51b93b23138c1d32cdba27d218ad01a87ebccb93
628,460
def get_names_from_rows(frows): """ Gets names from workbook and returns a list Returns ------- names : LIST ['first last', 'first last'] ... all are stripped and lowered. """ names = [] for row in frows: name = row[2] + ', ' + row[1] if name not in names: names.append(name) return sorted(names)
449b4a00b1e4000b17fd6618e296605cde462c77
421,467
def quote_string(arg: str) -> str: """Quote a string""" if '"' in arg: quote = "'" else: quote = '"' return quote + arg + quote
ed982c56dfeb57c8c9244d693bbc94289b29f90c
558,451
def _compute_negative_examples( label, all_items, pos_items, item_to_labels, label_graph, assert_ambig_neg ): """ Compute the set of negative examples for a given label. This set consists of all items that are not labelled with a descendant of the label, but are also not labelled with an ancestor of the label (can include siblings). """ anc_labels = label_graph.ancestor_nodes(label) candidate_items = set(all_items) - set(pos_items) if assert_ambig_neg: neg_items = list(candidate_items) else: # Remove items from the negatives that are labelled # most specifically as an ancestor of the current label final_items = set() for item in candidate_items: ms_item_labels = label_graph.most_specific_nodes( item_to_labels[item] ) ms_item_labels = set(ms_item_labels) if len(ms_item_labels & anc_labels) == 0: final_items.add(item) neg_items = list(final_items) return neg_items
1e09ea6b88f03801eeb04103deeb62f642d54747
652,293
def all_unique(s): """ Returns a boolean which is True if each element in iterable was used only once. >>> all_unique('abcd') True >>> all_unique('ab') False """ seen_it = {} for c in s: if c in seen_it: return False else: seen_it[c] = True return True
85d1888f1c8314ccf96940ef48aa04ccc7053e05
302,800
def recursive_dict_filling(dct, keys, value): """ Recursively add value to an arbitrary deep dictionary structure based on a list of keys. """ if not keys: return value else: if not keys[0] in dct.keys(): dct[keys[0]] = dict() dct[keys[0]] = recursive_dict_filling(dct[keys[0]], keys[1:], value) return dct
2f9dfaaec843022501fef2dd0b39a7b2ecaa05a7
416,944
def draw_forces(ax, pos, vel, **kwargs): """ draw forces on atoms Args: ax (plt.Axes): matplotlib Axes object, must have projection='3d' pos (np.array): array of atomic positions vel (np.array): array of forces on each atom (or velocities) kwargs (dict,optional): keyword arguments passed to plt.plot Returns: list: a list of plt.Line3D """ x, y, z = zip(*pos) vx, vy, vz = zip(*vel) qvs = ax.quiver(x, y, z, vx, vy, vz, **kwargs) return qvs
746ee30bd668ef53eb820959505fe254808a681f
184,562
def compare(numA, numB): """ compare(numA, numB): Compares two numbers. Returns: 1, if the first number is greater than the second, 0, if they are equal, -1, if the first number is smaller than the second. Parameters ---------- numA: integer or float numB: integer of float Returns ------- integer """ if numA > numB: return 1 if numA < numB: return -1 if numA == numB: return 0
7ad62cb677882d22b32adb517a31a4149685ecef
22,259
def normalize_repo_root_url(url): """Normalize a GitHub URL into the root repository URL. Parameters ---------- url : `str` A GitHub URL Returns ------- url : `str` Normalized URL of a GitHub repository. Examples -------- >>> normalize_repo_root_url('https://github.com/lsst/LDM-151.git') 'https://github.com/lsst/LDM-151' """ # Strip the .git extension, if present if url.endswith('.git'): url = url[:-4] return url
bbd01746ebdc777ba23cce3ef6a1aa9081d8d46f
570,485
def point_in_polygon(x, y, polygon): """ Check if a point is inside a polygon - x,y - Coordinates of the point - polygon - List of the vertices of the polygon [(x1, x2), (x2, y2), ..., (xn, yn)]""" i = 0 j = len(polygon) - 1 res = False for i in range(len(polygon)): if (polygon[i][1] < y and polygon[j][1] >= y) \ or (polygon[j][1] < y and polygon[i][1] >= y): if polygon[i][0] + (y - polygon[i][1]) / (polygon[j][1] - polygon[i][1]) * ( polygon[j][0] - polygon[i][0]) < x: res = not res j = i return res
5e52fead34f6c0cc768dea5006edba153a505655
219,009
import string import secrets def random_string(length:int = 1024, ascii:bool = True, digits:bool = True) -> str: """Generate a random alphanumeric string of size `length` Keyword arguments: Argument: length:int (defaults to 128), Return: string """ alphabet = [] if ascii: alphabet += string.ascii_letters elif digits: alphabet += string.digits else: raise SyntaxError("bool or digits should either be True") return ''.join(secrets.choice(alphabet) for i in range(length))
4d267db4bf81fc6ea12bc9adc72dc05912a2eaca
368,490
def _convert_to_index_name(s): """Converts a string to an Elasticsearch index name.""" # For Elasticsearch index name restrictions, see # https://github.com/DataBiosphere/data-explorer-indexers/issues/5#issue-308168951 # Elasticsearch allows single quote in index names. However, they cause other # problems. For example, # "curl -XDELETE http://localhost:9200/nurse's_health_study" doesn't work. # So also remove single quotes. prohibited_chars = [ ' ', '"', '*', '\\', '<', '|', ',', '>', '/', '?', '\'' ] for char in prohibited_chars: s = s.replace(char, '_') s = s.lower() # Remove leading underscore. if s.find('_', 0, 1) == 0: s = s.lstrip('_') print('Index name: %s' % s) return s
4ce052c19e97ed0d528890f3a444e05b88344753
672,829
import random def backoff(retries: int, jitter: bool = True) -> int: """ Compute duration (seconds) to wait before retrying using exponential backoff with jitter based on the number of retries a message has already experienced. The minimum returned value is 1s The maximum returned value is 604800s (7 days) With max_retries=9, you will have roughly 30 days to fix and redeploy the the task code. Parameters ---------- retries : int How many retries have already been attemped. jitter : bool Whether to add random noise to the return value (recommended). Notes ----- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ """ x = 6**(retries + 1) if jitter: x = random.randrange(x // 3, x * 2) return min(604_800, x)
51b7802f76697f52dc6bea800b349973061ec428
563,908
def isunauthenticated(f): """ Checks to see if the function is marked as not requiring authentication with the @unauthenticated decorator. Returns True if decorator is set to True, False otherwise. """ return getattr(f, 'unauthenticated', False)
fdf6dded7bd656ee122eb4f2b67b669dd0b9be1f
322,977
import typing import re def _mask_command(text: str, rules: str) -> str: """Mask part of text using rules. :param text: source text :type text: str :param rules: regex rules to mask. :type rules: str :return: source with all MATCHED groups replaced by '<*masked*>' :rtype: str """ masked: typing.List[str] = [] # places to exclude prev = 0 for match in re.finditer(rules, text): for idx, _ in enumerate(match.groups(), start=1): start, end = match.span(idx) masked.append(text[prev:start]) masked.append("<*masked*>") prev = end masked.append(text[prev:]) return "".join(masked)
e9c424cc5d699c3f50b2b265b0f261fa321e10a8
141,136
def format_weather_header_for_HELP(itype, iunits, city, lat=None): """ Prepare the header for the precipitation, air temperature and global solar radiation input weather datafile for HELP. The format of the header is defined in the subroutine READIN of the HELP Fortran source code. """ fheader = [['{0:>2}'.format(itype)], # 3: data was entered by the user. ['{0:>2}'.format(iunits)], # 1 for IP and 2 for SI ['{0:<40}'.format(city[:40])], ] if lat is not None: # Append the latitude if the data are solar radiation. fheader.append(['{0:>6.2f}'.format(lat)]) else: fheader.append([]) return fheader
7d848481b7316cef4094c2d24b9978665a1c2e1d
20,399
def calculate_average_error(actual_hist, noisy_hist): """ Calculates error according to the equation stated in part (e). Args: Actual histogram (list), Noisy histogram (list) Returns: Error (Err) in the noisy histogram (float) """ error = 0.0 for i in range(len(actual_hist)): error += abs(actual_hist[i] - noisy_hist[i]) error = error / len(actual_hist) return error
3df201e67c3849b62d9ceda7ec6d0b6072b18e2f
181,286
def parse_line_update_success(tokens): """Parses line which logs stats for a successful write/update request.""" latency = float(tokens[2]) name = tokens[1] name_server = int(tokens[4]) local_name_server = int(tokens[5]) return latency, name, name_server, local_name_server
7cec3ef424a4b606b9e836e54d20a155d6bb016c
73,271
import requests def get_url(url): """ Get a url, return it's contents. """ headers = {"User-Agent": "dftools"} resp = requests.get(url, headers=headers) if resp.status_code == requests.codes.ok: return resp.content, resp.status_code else: return None, resp.status_code
6a67f273277605979fb114031760e07253dbd565
104,404
def combine(background_img, figure_img): """ :param background_img: SimpleImage, the background image :param figure_img: SimpleImage, the green screen figure image :return: SimpleImage, the green screen pixels are replaced with pixels of background image """ # (x, y) represent every pixel in the figure image for x in range(figure_img.width): for y in range(figure_img.height): # get pixel at (x, y) in figure image pixel_fg = figure_img.get_pixel(x, y) # find the maximum value between R-value and B-value at (x, y) bigger = max(pixel_fg.red, pixel_fg.blue) # check whether pixel at (x, y) is green screen if pixel_fg.green > bigger * 2: # get pixel at (x, y) in background image pixel_bg = background_img.get_pixel(x, y) # replace figure image's R-value at (x, y) with background image's R-value at (x, y) pixel_fg.red = pixel_bg.red # replace figure image's G-value at (x, y) with background image's G-value at (x, y) pixel_fg.green = pixel_bg.green # replace figure image's B-value at (x, y) with background image's B-value at (x, y) pixel_fg.blue = pixel_bg.blue # return the combined image return figure_img
07ae5d988fa3c7e5132ad9a50173a682be6cc840
673,385
def get_provenance_record(caption: str, ancestors: list): """Create a provenance record describing the diagnostic data and plots.""" record = { 'caption': caption, 'domains': ['reg'], 'authors': [ 'kalverla_peter', 'smeets_stef', 'brunner_lukas', 'camphuijsen_jaro', ], 'references': [ 'brunner2019', 'lorenz2018', 'knutti2017', ], 'ancestors': ancestors, } return record
b4eba250f5a581989c80a8de727d7ed3e034bdd6
656,217
from typing import List def findMedianArrays(nums1: List[int], nums2: List[int]) -> float: """Receives two arrays of ints and returns the median of the merged arrays after sorting.""" nums1.extend(nums2) nums1.sort() if len(nums1) % 2 == 0: return (nums1[len(nums1)//2 - 1] + nums1[len(nums1)//2])/2 else: return nums1[len(nums1)//2]
760d910d86b5f6689c99e9ab07c123659a22683b
570,229
def word_preprocessing(word, ignore_non_alnumspc=True, ignore_space=True, ignore_numeric=True, ignore_case=True): """ Function for word preprocessing | | Argument | | word: a string to be processed | | Parameter | | ignore_non_alnumspc: whether to remove all non alpha/numeric/space characters | | ignore_space: whether to remove all spaces | | ignore_numeric: whether to remove all numeric characters | | ignore_case: whether to convert all alpha characters to lower case | | Output | | processed string (type: str) """ if ignore_non_alnumspc: word = "".join(filter(lambda x: x.isalnum() or x.isspace(), word)) if ignore_space: word = "".join(filter(lambda x: not x.isspace(), word)) if ignore_numeric: word = "".join(filter(lambda x: not x.isnumeric(), word)) if ignore_case: word = word.lower() return word
b5c1737cc5b92337e05e0c1f58950146d94f95e4
668,198
from datetime import datetime def python_type_to_sql_type(_python_type): """ Convert a python data type to ab SQL type. :param _python_type: A Python internal type """ if _python_type == str: return 'string' elif _python_type == bytes: return "blob" elif _python_type == float: return "float" elif _python_type == int: return "integer" elif _python_type == datetime: return "datetime" elif _python_type == bool: return "boolean" else: raise Exception("python_type_to_sql_type: _type_code \"" + str(_python_type) + "\"not supported")
d74c0a8e8b1ef2340e1fc1decddcd60aba718570
30,832
def test_tosolr_index_update_errors(basic_exporter_class, record_sets, new_exporter, setattr_model_instance, assert_records_are_indexed, assert_records_are_not_indexed): """ When updating indexes via a ToSolrExporter, if one record causes an error during preparation (e.g. via the haystack SearchIndex obj), the export process should: 1) skip that record, and 2) log the error as a warning on the exporter. Other records in the same batch should still be indexed. """ records = record_sets['item_set'] expclass = basic_exporter_class('ItemsToSolr') invalid_loc_code = '_____' exporter = new_exporter(expclass, 'full_export', 'waiting') def prepare_location_code(obj): code = obj.location_id if code == invalid_loc_code: raise Exception('Code not valid') return code exporter.indexes['Items'].prepare_location_code = prepare_location_code setattr_model_instance(records[0], 'location_id', invalid_loc_code) exporter.export_records(records) exporter.commit_indexes() assert_records_are_not_indexed(exporter.indexes['Items'], [records[0]]) assert_records_are_indexed(exporter.indexes['Items'], records[1:]) assert len(exporter.indexes['Items'].last_batch_errors) == 1
dda25db5a2e2c43e4aea9de15c77017dc8f75a13
108,704
import base64 def authorization_headers(user, token): """ Prepares authorization header for an API request """ if None in [user, token]: return None credentials = '%s:%s' % (user, token) auth = base64.b64encode(credentials.encode('utf-8')) headers = {'Authorization': 'Basic %s' % auth.decode('utf-8')} return headers
623a728197d9458e77fbde90c411db9306987a09
416,653
def Q_ssm(nres, rmsd, n1, n2): """ Compute the SSM 'Q' score for an alignment of two proteins with n1 and n2 residues, with nres residues aligned and RMSD value of rmsd (Angstroms). This score is defined in Kirssinel, E. & Henrick, K. 2004 'Secondary-structure matching (SSM), a new tool for fast protein structure alignment in three dimensions' Acta Crystallographica D60:2256-2268 Parameters: nres - number of residues in alignment rmsd - root mean square deviation of aligned residues (Angstroms) n1 - number of residues in protein 1 n2 - number of residues in protein 2 Return value: Q score for alignment """ R0 = 3.0 # Krissinel & Henrick p. 2262 return nres**2 / ( (1 + (rmsd / R0)**2) * n1 * n2)
ed778309469abd251032561d4a9f104264b0de16
493,864
def get_bool_input(msg): """ Gets a user input from the console, ensuring that it is either 'yes' or 'no'. :param msg: Message to be prompted to the user. :return: Valid user input. """ user_input = input(msg + " (yes, no)") while user_input != "yes" and user_input != "no": user_input = input(msg + " (yes, no)") return user_input == "yes"
bc5ff3b109b1d7941e1ab584c8a4a2cfe27aef32
223,027
def get_time_sorted_events_list(df, n_states): """ Given a dataframe returns a list of events, ordered by timestamp. """ end_event = n_states - 1 beginning_event = n_states - 2 events_list = list([beginning_event]) events_list.extend(df.reset_index() .set_index("timestamp") .sort_index() ['event'].values) events_list.append(end_event) return events_list
5858bf34d896cf838e404a4c6001d7430cd7ed32
577,601
def ground(visited_node_dict, end_node): """ 'grounds' a rod: it returns a list of nodes given a dict of nodes pointing to their precessor. For example: {2 : 1, 3 : 2, 5 : 3}, 5 -> [1, 2, 3, 5] """ res = [] curr = end_node while curr is not None: res.append(curr) curr = visited_node_dict.get(curr, None) return res[::-1]
4ffd5a229ecf996d6c18dae6f6deae31de4b27d3
498,345
def pivots_col(matrix): """ Checks to see if pivots are required due to negative values in right column, excluding the bottom value. Args: matrix (numpy array): matrix to be reviewed. Returns: Flag (bool): True or False indicating whether a negative element was found. """ m = min(matrix[:-1, -1]) if m >= 0: return False else: return True
f44c5583284ae7a816dcc55be9d530ccf1c02a39
359,303
def calc_julian_days_dg(tlc_time): """Calculate the Julida dayys according to the fomula provided by DigitalGlobe. Found in section 4.1.2 here https://dg-cms-uploads-production.s3.amazonaws.com/uploads/document/file/207/Radiometric_Use_of_WorldView-3_v2.pdf Parameters ---------- tlc_time: datetime.datetime Returns ------- int: days since the beginning of the year -4712 Meuss, Jean. "Astronomical algorithms, 2nd Ed.." Richmond, VA: Willmann-Bell(1998). Pg 61 """ a = tlc_time.year // 100 b = 2 - a + (a // 4) UT = tlc_time.hour + tlc_time.minute/60 + \ (tlc_time.second+tlc_time.microsecond)/3600 jd = int(365.25*(tlc_time.year+4716)) + \ int(30.6001*(tlc_time.month+1)) + \ tlc_time.day + UT/24+b-1524.5 return jd
c25c304d7aa2f27abf521cf07538200784b73e22
261,875
def mm_on_cpu(x, w): """ (helper function) Perform matrix multiplication on CPU. PLEASE DO NOT EDIT THIS FUNCTION CALL. Input: - x: Tensor of shape (A, B), on CPU - w: Tensor of shape (B, C), on CPU Returns: - y: Tensor of shape (A, C) as described above. It should not be in GPU. """ y = x.mm(w) return y
583984e5692ee17908e1136fd025d32f5a28157e
295,945
def isGlideinHeldNTimes(jobInfo, factoryConfig=None, n=20): """This function looks at the glidein job's information and returns if the CondorG job is held for more than N(defaults to 20) iterations This is useful to remove Unrecoverable glidein (CondorG job) with forcex option. Args: jobInfo (dict): Dictionary containing glidein job's classad information Returns: bool: True if job is held more than N(defaults to 20) iterations, False if otherwise. """ if factoryConfig is None: factoryConfig = globals()["factoryConfig"] greater_than_n_iterations = False nsysholds = jobInfo.get("NumSystemHolds") if nsysholds > n: greater_than_n_iterations = True return greater_than_n_iterations
e608ada46f1571b9e96531bc3cca3692f940846a
694,035
def conn_count() -> str: """Return amount of active connections to database.""" return "SELECT SUM(numbackends) FROM pg_stat_database"
912b53a5035b5fa8fc84bd60e61a10b6879baeac
148,945
import requests import time def etiget(url: str, crawl_delay=2, headers={'User-agent': 'Custom'}, **kwargs) -> requests.Response: """ Wrap requests.get() to conform to webscraping etiquette. """ time.sleep(crawl_delay) return requests.get(url, headers=headers, **kwargs)
75cc5772ffada1714709847fb2c14d0accdb7240
552,613
def stringify(value): """ Return string ``value``, with quotes around if there is whitespace. :param value: Value to stringify :return: Stringified, possibly quoted, value :rtype: :class:`str` """ rv = str(value) if len(rv.split()) > 1: return '"%s"' % rv return rv
277e3b3cdc166e805f8509464d53980b5371abf8
385,045
import math def radiansToDegrees(radians): """Convert radians to degrees.""" return math.degrees(radians)
842f2ac9c9d48ee5ff52a1cafe679b4ffb374123
429,564
def get_tags(tweet, lower=False): """Extract tags from a tweet""" try: ent = tweet.get('entities', []) if not ent: return [] if lower: return (tag['text'].lower() for tag in ent.get('hashtags', [])) return set(tag['text'] for tag in ent.get('hashtags', [])) except KeyError as e: print("get_tags got KeyError")
9399e2acce8073904277c8c3357a10e04a8b67ad
408,373
def _get_channel_to_group(channel_groups): """Take a list of ChannelGroup instances, and return a mapping channel ==> channel_group index""" mapping = {} channels_list = [(i, cg.channels) for i, cg in channel_groups.iteritems()] for igroup, channels in channels_list: for channel in channels: mapping[channel] = igroup return mapping
c253015422721dfcb4ef2fb701f63ad86f3dccb6
334,656
def get_index_of_last_match(obj, li): """Get index of last item matching obj in list""" # start end step, start inclusive - end not for i in range(len(li) - 1, -1, -1): if obj == li[i]: return i
f4c2b422fcb708e4c52c4c591f92ae1aec99fc8b
423,222
import torch def cross_squared_distance_matrix(x, y): """Pairwise squared distance between two (batch) matrices' rows (2nd dim). Computes the pairwise distances between rows of x and rows of y Args: x: [batch_size, n, d] float `Tensor` y: [batch_size, m, d] float `Tensor` Returns: squared_dists: [batch_size, n, m] float `Tensor`, where squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2 """ x_norm_squared = torch.sum(torch.mul(x, x)) y_norm_squared = torch.sum(torch.mul(y, y)) x_y_transpose = torch.matmul(x.squeeze(0), y.squeeze(0).transpose(0, 1)) # squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj squared_dists = x_norm_squared - 2 * x_y_transpose + y_norm_squared return squared_dists.float()
cbe660db0458554619de62d25654991c9faf6b22
664,331
def stars_filter(scorestars): """ Used from the template to produce stars rating the treebank. Takes a pair of floats (score,stars). """ score=scorestars[0] stars=scorestars[1] return '<span class="hint--top hint--info" data-hint="%f"><img src="/img/stars%02d.png" style="max-height:1em; vertical-align:middle" /></span>'%(score,stars*10)
12449f1094db8f24e03a6622bf8b8ca14319cd31
66,823
def dunderkey(*args): """Produces a nested key from multiple args separated by double underscore >>> dunderkey('a', 'b', 'c') >>> 'a__b__c' :param *args : *String :rtype : String """ return '__'.join(args)
8a7fe3921000f5f4e8052207ddda1c79553ef113
102,977
import math def rotate_vector(vector, angle_degree): """Rotate a vector with given angle in degree.""" angle_rad = math.pi * angle_degree / 180 xval = vector[0]*math.cos(angle_rad) + vector[1]*math.sin(angle_rad) yval = -vector[0]*math.sin(angle_rad) + vector[1]*math.cos(angle_rad) return xval, yval
c43f31d5dcb7900437dda676af6ed0f6d9c40356
259,690
from torch.utils.data._utils.collate import default_collate from typing import Sequence from typing import Dict def multi_supervision_collate_fn(batch: Sequence[Dict]) -> Dict: """ Custom collate_fn for K2SpeechRecognitionDataset. It merges the items provided by K2SpeechRecognitionDataset into the following structure: .. code-block:: { 'features': float tensor of shape (B, T, F) 'supervisions': [ { 'sequence_idx': Tensor[int] of shape (S,) 'text': List[str] of len S 'start_frame': Tensor[int] of shape (S,) 'num_frames': Tensor[int] of shape (S,) } ] } Dimension symbols legend: * ``B`` - batch size (number of Cuts), * ``S`` - number of supervision segments (greater or equal to B, as each Cut may have multiple supervisions), * ``T`` - number of frames of the longest Cut * ``F`` - number of features """ dataset_idx_to_batch_idx = { example['supervisions'][0]['sequence_idx']: batch_idx for batch_idx, example in enumerate(batch) } def update(d: Dict, **kwargs) -> Dict: for key, value in kwargs.items(): d[key] = value return d supervisions = default_collate([ update(sup, sequence_idx=dataset_idx_to_batch_idx[sup['sequence_idx']]) for example in batch for sup in example['supervisions'] ]) feats = default_collate([example['features'] for example in batch]) return { 'features': feats, 'supervisions': supervisions }
3b85c2ca4dcdbbd3b5d902bb2b061562401422ae
674,230
def at_most_one_none(*args): """ Returns True if at most one of the args is not None. """ n_none = sum([a is None for a in args]) return n_none <= 1
addb2d89ade13904fbe4e479835acec0b0ddb665
395,585
def is_in_range(x, config): """ Checks to see x is in the range of the config. :param x: number :type x: int/float :param config: configuration :type config: dict :returns: bool """ try: return config['start'] <= float(x) <= config['end'] except: raise TypeError("Value is not a float")
b6d5dff449465556267e4f80d09ab262ba67ff81
183,672
def who_split(who): """ Returns a tuple with target owner specification split in two parts, the userid and the ticket. The ticket will be None if the orders specification doesn't contain it. """ if ":" in who: return who.split(":", 2) return who, None
db4c8ce71c42c396ac07de850a6b5ec809fee431
548,933
def validateApiRequest(api_returned_message) -> None: """ Validates the API request and looks for an success message. If the message is not present the programm will print an error and exit. """ if api_returned_message != "success": print("ERROR: API message was not success") exit(1) return None
5ecb1209450917d13ffeeff55438914115623ecf
301,499
import random def CalculateWaitForRetry(retry_attempt, max_wait=60): """Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time. Returns: Amount of time to wait before retrying request. """ wait_time = 2 ** retry_attempt max_jitter = (2 ** retry_attempt) / 2 return min(wait_time + random.randrange(-max_jitter, max_jitter), max_wait)
a1dd746984eeceaf1950de02afecd4e516e19cd0
522,422
def parse_payload_v1(event): """ Get HTTP request method/path/body for v1 payloads. """ body = event.get('body') method = event.get('httpMethod') try: package, *_ = event['pathParameters']['package'].split('/') except KeyError: package = None return (method, package, body)
2a32dd96bcaf916efdb53ba3dd8430ba4c5eef5d
646,273
import re def clean_len(s): """ Calculate the length of a string without it's color codes """ s = re.sub(r'\x1b\[[0-9;]*m', '', s) return len(s)
5148d6fe580e45e5e3ce11b6bbcda602d3886305
634,268
def sharedLangCost(criterion, frRow, exRow): """Returns 1 if the two do not share a language, else 0""" fluentQ = criterion['fluentQ'] learningQ = criterion['learningQ'] frLangs = [set(frRow[fluentQ].split(',')), set(frRow[learningQ].split(','))] exLangs = [set(exRow[fluentQ].split(',')), set(exRow[learningQ].split(','))] # Do they share no language ? return int(len(frLangs[0].union(frLangs[1])\ .intersection(exLangs[0].union(exLangs[1]))) == 0)
11d6f6db627f93ecc726407eee29a3e46cea6cc0
611,489
def submit_pyspark_job(dataproc, project, region, cluster_name, bucket_name, filename): """Submits the Pyspark job to the cluster, assuming `filename` has already been uploaded to `bucket_name`""" job_details = { 'projectId': project, 'job': { 'placement': { 'clusterName': cluster_name }, 'pysparkJob': { 'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename) } } } result = dataproc.projects().regions().jobs().submit( projectId=project, region=region, body=job_details).execute() job_id = result['reference']['jobId'] print('Submitted job ID {}'.format(job_id)) return job_id
550cad720f507d39814b7b5a3a8c08eeb9cae399
534,088
def read_matrix(filename): """ Reads a matrix and returns a dictionary with the data and list of conditions (tuple of dict and list) :param filename: the matrix containing genes id , conditions and reads :return: data, conditions """ data = {} with open(filename, "r") as f: header = f.readline().strip().split('\t') conditions = header[1:] for row in f: parts = row.strip().split('\t') gene_id = parts[0] expression_values = parts[1:] gene_data = {} for condition, read_counts in zip(conditions, expression_values): gene_data[condition] = read_counts data[gene_id] = gene_data return data, conditions
b9885ea156e347b949ad5584eb96f79144ea91ce
361,454
from typing import List from typing import Dict from typing import Any def _transform_dto_list_to_list_of_dicts(dto_list) -> List[Dict[str, Any]]: """ Given a list of DTO objects, this function returns a list of dicts, that can be passed to jsonify function. """ return [vars(dto_obj) for dto_obj in dto_list]
41d4d587aa78cf1e3879c22c2d95e28f9e4b0507
683,823
import time def create_time_name() : """Create a time-link name for file saving purposes Return: a string including the YearMonthDay_HourMinSec """ return str(time.strftime("%Y%m%d_%H%M%S", time.localtime()))
bb9e4be66fdb62bc5f6595f4783124bb92a1272f
239,061
def ircLower(string): """ Lowercases a string according to RFC lowercasing standards. """ return string.lower().replace("[", "{").replace("]", "}").replace("\\", "|")
6d64ea5a8b0ce489eac5d81e5f7438df631ef5f8
578,630
def convert_to_boolean(value): """Turn strings to bools if they look like them Truthy things should be True >>> for truthy in ['true', 'on', 'yes', '1']: ... assert convert_to_boolean(truthy) == True Falsey things should be False >>> for falsey in ['false', 'off', 'no', '0']: ... assert convert_to_boolean(falsey) == False Other things should be unchanged >>> for value in ['falsey', 'other', True, 0]: ... assert convert_to_boolean(value) == value """ if isinstance(value, str): if value.lower() in ['t', 'true', 'on', 'yes', '1']: return True elif value.lower() in ['f', 'false', 'off', 'no', '0']: return False return value
7cbf7a8fd601904c7aa8b685f6a3b3f5eaaa5c51
706,443
import torch def denormalise(tensor_bchw, scale, mean_c, std_c): """Reversed normalisation Args: tensor_bchw (torch.tensor): 4D tensor of shape BxCxHxW scale (float): scale value mean_c (np.ndarray): mean array of shape (C,) std_c (np.ndarray): standard deviation array of shape (C,) Returns: Un-normalised torch tensor. """ mean_bchw = ( torch.from_numpy(mean_c[None, :, None, None]).float().to(tensor_bchw.device) ) std_bchw = ( torch.from_numpy(std_c[None, :, None, None]).float().to(tensor_bchw.device) ) return (tensor_bchw * std_bchw + mean_bchw) / scale
275dc4ded45daafac09674ebeb4e1264b23740f8
447,970
def rosenbrock(tensor): """ Compute Rosenbrock function. # https://en.wikipedia.org/wiki/Test_functions_for_optimization """ x, y = tensor return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
ab036bf21aa5a23739a1c560da409a59af3ea4cb
619,133
def find_nearest_index(data, value): """Find the index of the entry in data that is closest to value. Example: >>> data = [0, 3, 5, -2] >>> i = partmc.find_nearest_index(data, 3.4) returns i = 1 """ min_diff = abs(value - data[0]) min_i = 0 for i in range(1,len(data)): diff = abs(value - data[i]) if diff < min_diff: min_diff = diff min_i = i return min_i
41454836eba73757cc3fbe537389479d38d9f8ff
480,592
def _by_idx(x): """Sorting key for Operators: queue index aka temporal order. Args: x (Operator): node in the circuit graph Returns: int: sorting key for the node """ return x.queue_idx
1d9f8caeb18e10b62302a4c0f5ac11db31ea679c
539,352
def typename(obj): """ Get the typename of object. :param obj: Target object. :return: Typename of the obj. """ if not isinstance(obj, type): obj = obj.__class__ try: return f'{obj.__module__}.{obj.__name__}' except AttributeError: return str(obj)
029dcdbd7a14f2e74be072582bc1e556fde8c2c2
228,593
def between(full, start, end): """Returns the substring of the given string that occurs between the start and end strings.""" try: if not start: parse = full else: parse = full.split(start, 1)[1] if end: result = parse.split(end, 1)[0] else: result = parse return result except: return full
34697a5625d07724b2f6b22cad6b6b98badcd7c1
616,575
def indent(text, indent=4): """Indents text with spaces.""" return u'\n'.join([u' ' * indent + x for x in text.splitlines()])
81684634882cdc061c3b453eaa8aff69e91accec
105,734
def reducer(words, frequency): """ reduces words to final output frequency list :param words: :param frequency: :return: dict of unique words and their frequencies """ for word in words: frequency[word] += 1 return frequency
07e631ad152ba8551f7f152bdd5a7be343b10192
288,443
def linear_gradient(x, y, theta): """ ํŠน์ • ๋ฐ์ดํ„ฐ (x, y)์—์„œ ๊ธฐ์šธ๊ธฐ์™€ y์ ˆํŽธ์— ๋Œ€ํ•œ ํŽธ๋ฏธ๋ถ„ ๋ฒกํ„ฐ ๋ฆฌํ„ด :param x: ์‹ค์ œ ๋ฐ์ดํ„ฐ :param y: ์‹ค์ œ ๋ฐ์ดํ„ฐ :param theta: [theta1, theta2] ๋ฒกํ„ฐ(๋ฆฌ์ŠคํŠธ). [๊ธฐ์šธ๊ธฐ, y์ ˆํŽธ] """ slope, intersect = theta y_hat = slope * x + intersect # ์˜ˆ์ƒ๊ฐ’ error = y_hat - y # ์˜ค์ฐจ # error**2์„ ์ตœ์†Œํ™”ํ•˜๋Š” slope(๊ธฐ์šธ๊ธฐ), intersect(์ ˆํŽธ) ์ฐพ๊ธฐ # ์ (x,y)์—์„œ [๊ธฐ์šธ๊ธฐ์— ๋Œ€ํ•œ ํŽธ๋ฏธ๋ถ„, ์ ˆํŽธ์— ๋Œ€ํ•œ ํŽธ๋ฏธ๋ถ„] gradient = [error * x, error] return gradient
1b6e1f9bf3f9ac953d4a8329c715535765d456b7
571,918
def boxUnion(b1, b2): """ Compute a box big enough to contain both input boxes """ # easy enough return tuple(map(min, zip(b1[0], b2[0]))), tuple(map(max, zip(b1[1], b2[1])))
d511d91d0a5f42cbaaf59f9b0c392b59c7317f6a
146,932
from typing import get_origin from typing import Tuple def is_tuple(typ) -> bool: """ Test if the type is `typing.Tuple`. """ try: return issubclass(get_origin(typ), tuple) except TypeError: return typ in (Tuple, tuple)
c8c75f4b1523971b20bbe8c716ced53199150b95
707,794
import json def convert_log_dict_to_json(cf_dict): """ Convert a parsed log dictionary to JSON """ return json.dumps(cf_dict, sort_keys=True)
addd88890b3de888b120b9b9fecbd05eee0d614c
312,261
def get_exception_name(exception: Exception) -> str: """ Gets the name of the exception (e.g., IndexError gives ``"IndexError"``). """ return exception.__class__.__name__
a32ec7ac8d1be60786cc2d1b4392865470cd6bfa
563,372
def stateful_replace_properties(change): """ List of property changes that triggered replace action """ property_list = set() for detail in change['Details']: if detail['Target']['Attribute'] == 'Properties': if detail['Target']['RequiresRecreation'] != 'Never': property_list.add(detail['Target']['Name']) return property_list
012f2cb70ebbbaadb44053e249aa5c4e826b6d4d
338,803
def state2str(State): """ Converts the dictionary representation of a state into the string representation of a state. If *State* is already of type string it is simply returned. **arguments** * *State* (dict): dictionary representation of state **returns** * *State* (str): string representation of state **example**:: >>> state = {"v2":0, "v1":1, "v3":1} >>> state2str(primes, state) '101' """ if type(State) == str: return State return ''.join([str(State[x]) for x in sorted(State)])
d8248a131750f0e490737df955bc4e553a2640ee
163,943
import math def distance(point1, point2): """ Args: point1: ex) (1,1), (2,3) point2: ex) (1,1), (2,3) Returns: sqrt((x2-x1)**2 - (y2-y1)**2) Example: >>> distance((1,1) (2,2)) >>> 1.4142135623730951 """ return math.sqrt((point2[0] - point1[0])**2 + (point2[1] - point1[1])**2) # return distance(point1[0], point1[1], point2[0], point2[1])
7999bb379a947fd834b6c3db7781d54200f4d83e
224,742
def sum_card(lst): """ Takes in a list of numbers and returns the summed total as a string. >>> sum_card([1, 8, 9, 9, 7, 6, 7, 5, 5, 3, 5, 6, 1, 5, 8]) '85' """ total = 0 for num in lst: total += num return str(total)
4a98027c5d8779bf82206ededf3d6846f9faaee2
214,041
def fib2(n): """ returns list of Fibonacci series up to nth term """ result = [0, 1] i = 2 while i < n: result.append(result[i-1] + result[i-2]) i += 1 return result
dbef3bb7c270330c31860e350f0e15ed6d8d03e1
606,066
import functools import math def gcd_multiple(*args) -> int: """Return greatest common divisor of integers in args""" return functools.reduce(math.gcd, args)
c686b9495cd45ff047f091e31a79bedcd61f8842
708,215
def contributes_to(id): """ A factory for extension point decorators! As an alternative to making contributions via traits, you can use this decorator to mark any method on a 'Plugin' as contributing to an extension point (note this is *only* used on 'Plugin' instances!). e.g. Using a trait you might have something like:: class MyPlugin(Plugin): messages = List(contributes_to='acme.messages') def _messages_default(self): return ['Hello', 'Hola'] whereas, using the decorator, it would be:: class MyPlugin(Plugin): @contributes_to('acme.messages') def _get_messages(self): return ['Hello', 'Hola'] There is not much in it really, but the decorator version looks a little less like 'magic' since it doesn't require the developer to know about Traits default initializers. However, if you know that you will want to dynamically change your contributions then use the trait version because all you have to do is change the value of the trait and the framework will react accordingly. """ def decorator(fn): """ A decorator for marking methods as extension contributors. """ fn.__extension_point__ = id return fn return decorator
a0010a88a02ce99bffbdefb7ea029d5bfb8da74c
527,398
import re def parse_budget(s): """ Parse a `budget` string and convert it to numeric. Parameters ---------- s : str or float String `budget` value Returns ------- Float Numeric `budget` value """ # Check if value is null or already a float if isinstance(s, float): return s # Remove $, spaces, and commas s = re.sub(r'[\$\s,]', '', s).lower() # Convert to float if 'mil' in s: return float(s.replace('mil', '')) * 1e6 # million else: return float(s)
c9bf9a611a9859f12860a497e726f42ad2a3e679
474,120
def add_python_snippet(txt: str, expandable: bool = True): """ Add a python snippet. :param txt: A string object. :param expandable: If true, puts code in <details></details> syntax. :return: Python string markdown syntax. """ md_str = f"\n```python\n{txt}\n```" if expandable: md_str = f"\n<details>\n<summary>source code</summary>\n{md_str}\n</details>\n" return md_str
749820d9da60d1b3e7897d329ebc6479587564aa
625,196
def _test_rast(h): """Sun raster file""" if h.startswith(b'\x59\xA6\x6A\x95'): return 'rast'
510b8088fd1aa29926fa8ec740241acc7643ed62
655,770
def get_version(fname="flake8_black.py"): """Parse our source code to get the current version number.""" with open(fname) as f: for line in f: if line.startswith("__version__"): return eval(line.split("=")[-1])
17d6a6bfcd05fedd65f2fd4d3e6ae39bb0d0ef58
312,311
def shquote(arg): """Quote an argument for later parsing by shlex.split()""" for c in '"', "'", "\\", "#": if c in arg: return repr(arg) if arg.split()!=[arg]: return repr(arg) return arg
543894d6bf98ae27000d0ee4241f908546d9722e
607,366
import hashlib def get_file_hash(file_path, block_size=1024, hasher=None): """ Generate hash for given file :param file_path: Path to file :type file_path: str :param block_size: Size of block to be read at once (default: 1024) :type block_size: int :param hasher: Use specific hasher, defaults to md5 (default: None) :type hasher: _hashlib.HASH :return: Hash of file :rtype: str """ if hasher is None: hasher = hashlib.md5() with open(file_path, 'rb') as f: while True: buffer = f.read(block_size) if len(buffer) <= 0: break hasher.update(buffer) return hasher.hexdigest()
1c4be71f67e86af1e50e670e0a91cad64496899e
252,044
def intramolecular_constraints(constraint_charge, constraint_groups): """Extracts intramolecular constraints from user constraint input Parameters ---------- constraint_charge : list list of lists of charges and atom indices list e.g. [[0, [1, 2]], [1, [3, 4]]] The sum of charges on 1 and 2 will equal 0 The sum of charges on 3 and 4 will equal 1 constraint_group : list list of lists of indices of atoms to have equal charge e.g. [[1, 2], [3, 4]] atoms 1 and 2 will have equal charge atoms 3 and 4 will have equal charge Returns ------- constrained_charges : list list of fixed charges constrained_indices : list list of lists of indices of atoms in a constraint negative number before an index means the charge of that atom will be subtracted. Notes ----- Atom indices starts with 1 not 0. Total charge constraint is added by default for the first molecule. """ constrained_charges = [] constrained_indices = [] for i in constraint_charge: constrained_charges.append(i[0]) group = [] for k in i[1]: group.append(k) constrained_indices.append(group) for i in constraint_groups: for j in range(1, len(i)): group = [] constrained_charges.append(0) group.append(-i[j-1]) group.append(i[j]) constrained_indices.append(group) return constrained_charges, constrained_indices
e73ce20f0d44e3bf24a5b11d0afe4a7709adb7fd
369,708
def merge_dictionaries(dicts): """ Merge multiple dictionaries. # Arguments dicts: List of dictionaries. # Returns result: Dictionary. """ result = {} for dict in dicts: result.update(dict) return result
eabe7e9b98cdd41baa4cce8d0bcb0e81d482e270
417,031
def intersection(p1,p2,pause=[0,0]): """ Given two converging forks and their firing time and speeds, compute the position of the intersection as well as the position of the time of intersection. If the intersection is outside [x1,x2], the initial position of the forks, then return False """ x1,t1,R_fork_speed=p1.pos,p1.firing_time,p1.R_fork_speed x2,t2,L_fork_speed=p2.pos,p2.firing_time,p2.L_fork_speed t1 += pause[0] t2 += pause[1] assert(x2>x1) #x = (x1+x2)/2 + (t2-t1)*v/2 x = 1/(1/L_fork_speed+1/R_fork_speed)*(t2-t1 + x1/L_fork_speed+x2/R_fork_speed) if not( x1<x<x2): return False,[None,None] t = (x2-x1)/(R_fork_speed+L_fork_speed) + (t1 * R_fork_speed + t2 * L_fork_speed)/(R_fork_speed+L_fork_speed) return True,[x,t]
f7bb92e5d020fec76f29bd17406582ef41f997aa
317,020
def parse_number(string): """ Retrieve a number from the string. Parameters ---------- string : str the string to parse Returns ------- number : float the number contained in the string """ num_str = string.split(None, 1)[0] number = float(num_str) return number
67fc6f4f2d6ab6e99578bdc0906bbb7328d0fdb2
75,633
import pytz def normalized_utc(value): """ normalizes input datetime value as utc datetime with zero offset and returns it. :param datetime value: value to get it's utc equivalent. :rtype: datetime """ utc = pytz.utc return utc.normalize(value)
f1e375c2ae6206d280591bc1be0f51b1ca6cd21c
564,037
def school_year(date, as_tuple=False): """ Return the school year of 'date'. Example: * as_tuple = False: "2013 โ€” 2014" * as_tuple = True: [2013, 2014] """ if date.month < 8: start_year = date.year - 1 else: start_year = date.year if as_tuple: return (start_year, start_year + 1) else: return "%d โ€” %d" % (start_year, start_year + 1)
b18fcde3abb76f5f90a48792c1e9be21da6ff276
693,397
def __repr__(self): """A custom repr for targetdb models. By default it always prints pk, name, and label, if found. Models can define they own ``__print_fields__`` as a list of field to be output in the repr. """ fields = ['pk={0!r}'.format(self.pk)] for ff in self.__print_fields__: if hasattr(self, ff): fields.append('{0}={1!r}'.format(ff, getattr(self, ff))) return '<{0}: {1}>'.format(self.__class__.__name__, ', '.join(fields))
98c7ab169dd0e8cb7074f65099432fccf4a18c31
623,990
import filecmp import io import difflib def _perform_diff(result, original): """ Compares the files `result` and `original` Returns a tuple: The first value is True if the files are the same The second arg is the text representation of the diff """ are_same = filecmp.cmp(result, original) if not are_same: with open(result) as f: result_txt = f.readlines() with open(original) as f: original_txt = f.readlines() with io.StringIO() as output_stream: for line in difflib.unified_diff(result_txt, original_txt): output_stream.write(line) return (False, output_stream.getvalue()) else: return (True, "")
3f447b8233a1d17b9c296dd4c2a2a6c732b5bcab
170,067
def find_falling_hydrometeors(obs, is_liquid, is_insects): """Finds falling hydrometeors. Falling hydrometeors are radar signals that are a) not insects b) not clutter. Furthermore, falling hydrometeors are strong lidar pixels excluding liquid layers (thus these pixels are ice or rain). Args: obs (_ClassData): Container for observations. is_liquid (ndarray): 2-D boolean array of liquid droplets. is_insects (ndarray): 2-D boolean array of insects. Returns: ndarray: 2-D boolean array containing falling hydrometeors. """ is_z = ~obs.z.mask no_clutter = ~obs.is_clutter no_insects = ~is_insects falling_from_lidar = ~obs.beta.mask & (obs.beta.data > 1e-6) & ~is_liquid is_falling = (is_z & no_clutter & no_insects) | falling_from_lidar return is_falling
aaf4daef6a104a392abd8f308a11c3ccf799c0ae
98,231
def sumList(alist): """ To calcualte the sum of a list of numbers. """ sum = 0 for value in alist: sum += value return sum
a1958170ea5027f537332c513c5ad55aba3ad9ad
318,115
def unsigned_right_shift(a, b): """Computes a >>> b in Java, or an unsigned right shift. Assumes longs, e.g., 64-bit integers.""" if a >= 0: return a >> b else: return ((a + 2 ** 64) >> b)
9e3bc0c822c61ec9a4d73698aef1201e11f77564
294,776
import re def _is_a_glob(a_string): """ Return True or False depending on whether a_string appears to be a glob """ pattern = re.compile(r'[\*\[\]\{\}\?]') return bool(pattern.search(a_string))
849bdf0e49ae95e0b425b5072afd2b800aa93848
624,025
import torch def ordinal_accuracy(logits, levels, device='cpu', tolerance=0, reduction='mean'): """Computes the accuracy with a tolerance for ordinal error. Parameters ---------- logits : torch.tensor, shape(num_examples, num_classes-1) Outputs of the CONDOR layer. levels : torch.tensor, shape(num_examples, num_classes-1) True labels represented as extended binary vectors (via `condor_pytorch.dataset.levels_from_labelbatch`). device: 'cpu', 'cuda', or None (default='cpu') If GPUs are utilized, then the device should be passed accordingly. tolerance : integer Allowed error in the ordinal ranks that will count as a correct prediction. reduction : str or None (default='mean') If 'mean' or 'sum', returns the averaged or summed loss value across all data points (rows) in logits. If None, returns a vector of shape (num_examples,) Returns ---------- loss : torch.tensor A torch.tensor containing a single loss value (if `reduction='mean'` or '`sum'`) or a loss value for each data record (if `reduction=None`). Examples ---------- >>> import torch >>> levels = torch.tensor( ... [[1., 1., 0., 0.], ... [1., 0., 0., 0.], ... [1., 1., 1., 1.]]) >>> logits = torch.tensor( ... [[2.1, 1.8, -2.1, -1.8], ... [1.9, -1., -1.5, -1.3], ... [1.9, 1.8, 1.7, 1.6]]) >>> ordinal_accuracy(logits, levels) tensor(1.) """ nclasses = logits.shape[1]+1 nbatch = logits.shape[0] if not logits.shape == levels.shape: raise ValueError("Please ensure that logits (%s) has the same shape as levels (%s). " % (logits.shape, levels.shape)) y_true = torch.sum(levels,dim=1,keepdim=True,dtype=logits.dtype).to(device) y_est = torch.sum(torch.cumprod(torch.sigmoid(logits),dim=1)>0.5,dim=1,keepdim=True,dtype=logits.dtype).to(device) # 1 when correct and 0 else val = torch.le(torch.abs(y_true-y_est),tolerance).to(torch.float32) if reduction == 'mean': loss = torch.mean(val) elif reduction == 'sum': loss = torch.sum(val) elif reduction is None: loss = val else: s = ('Invalid value for `reduction`. Should be "mean", ' '"sum", or None. Got %s' % reduction) raise ValueError(s) return loss
0c02598013044b4beefc28ddd8908edc64fe5423
655,455
def convert_index_2_bool(index): """ Convert integer style outlier / inlier index to boolean. Inputs: index: (list) -1 for outliers and 1 for inliers. Returns: (list) False for outliers and True for inliers. """ return [True if i == 1 else False for i in index]
bcc1e6bed141ae9141b601a04210abb518e020ab
227,360