content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import math def fpIsInf(x): """ Checks whether the argument is a floating point infinity. """ return math.isinf(x)
ab9e80f5f971a959c07f59aba203928158aa4351
79,199
def get_rb_blob_attribute(blobdict, attr): """Get Attribute `attr` from dict `blobdict` Parameters ---------- blobdict : dict Blob Description Dictionary attr : string Attribute key Returns ------- ret : Attribute Value """ try: value = blobdict['BLOB']['@' + attr] except KeyError: raise KeyError('Attribute @' + attr + ' is missing from Blob.' + 'There may be some problems with your file') return value
3f3768057cde529c467774da93cc33bb83571a78
672,916
import requests def get_instances_from_event_id(access_token, calendar_id, event_id): """Simple function to get instances of a recurring event from Google Calendar API Args: access_token (string): User's access token to make the request calendar_id (string): ID of the specific calendar that the event exists on event_id (string): ID of the recurring event to get the instances of Returns: Dict: Returns the normal response to the '/instances' endpoint as outlined in Google Cal API reference """ # Headers for the request headers = {"Authorization": "Bearer " + access_token, "Accept": "application/json"} # Make the request response = requests.get("https://www.googleapis.com/calendar/v3/calendars/" + calendar_id + "/events/" + event_id + "/instances", headers=headers) try: # Attempt to get JSON return response.json() except Exception as e: # Otherwise return an error occured with info + response return {"error": True, "message": e, "response": response}
1ce01e7f2fa6924a707bfb604d91713d8d105451
521,168
import gzip def open_gzip(inf, rw="rb"): """ Returns handle using gzip if gz file extension. """ if inf.split(".")[-1] == "gz": return gzip.open(inf, rw) else: return open(inf, rw)
77b549e11dedf56b8947ae87f0b9c0bb2d817ffb
419,314
def _fp_almost_equal(x: float, y: float, delta: float = 1e-10) -> bool: """Compares given two floating point numbers with delta compensation""" return abs(x - y) < delta
55ae02075c480cdfc3eccb31d4b451a051c4449e
467,468
import importlib def get_callable(string_or_callable): """ If given a callable then it returns it, otherwise it resolves the path and returns an object. """ if callable(string_or_callable): return string_or_callable else: module_name, object_name = string_or_callable.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, object_name)
b8a084cf8f8e3101e82f9d08649d0af1e3cefe1a
542,073
def remove_outlier(x): """Removes outliers under, over 1th, 99th percentile of the input pandas series. Parameters ---------- x : pd.Series """ x99 = x.quantile(0.99) x01 = x.quantile(0.01) x = x.where(x > x01).dropna() x = x.where(x < x99).dropna() return x
8ecbefd0a5b851c08363a602dd03b1197a69fb65
464,959
def safe_readable(handle): """Attempts to find if the handle is readable without throwing an error.""" try: status = handle.readable() except (OSError, ValueError): status = False return status
ac0c5918d2c323df741b771c5cb31203eb018f10
542,668
from typing import Any def is_integer(s: Any) -> bool: """ Is the parameter an integer? """ try: int(s) return True except ValueError: return False
dfc6bcc718f2967b63ec10633e402539b1744f5f
160,558
def inner_product(x, y): """Inner product.""" return x.dot(y)
aa56c71199863b5b8764ce8e96375c8cc61378d4
8,794
def _VersionFileFilter(change): """A change filter function that disregards all changes that don't touch src/syzygy/kasko/VERSION. Args: change: a buildbot Change object. """ return change.branch == 'master' and 'syzygy/kasko/VERSION' in change.files
8a45c68954be197eca723d82663bd1538c1a2195
257,841
def re(q): """Fetch the real part of the quaternion.""" return q[..., 3:]
f5b526a217d49ff479d0de41b59552fc85059a95
518,471
def remove_metachars(name): """remove troublesome metacharacters from ifDescr""" for (char,repl) in (("/", "_"), (" ", "_")): name = name.replace(char, repl) return name
80ccbb7cfc2aeb00a2604da887c14e40277082d1
244,651
import ctypes def constructor(cfunc): """ Make a C function a constructor. The C interface is defined to accept no parameters and return a void pointer. It is also wrapped as a staticmethod to allow usage in classes. :param cfunc: The plain C function as imported from the C library using ctypes. :return: A static method with appropriate C interface. """ cfunc.argtypes = [] cfunc.restype = ctypes.c_void_p return staticmethod(cfunc)
3b375bffe20fd69cdd354f7cf90c4bb48d8fc121
388,026
def _intersection(la: list, lb: list): """ Calculates an intersection between two lists of relations. """ tmp = lb[:] cnt_tp, cnt_fn = 0, 0 for a in la: if a in tmp: cnt_tp += 1 tmp.remove(a) else: cnt_fn += 1 return cnt_tp, cnt_fn
6bdba2a6c449e812841d60060f2a3f84c35e4077
591,861
import json def create_error_response_from_httpexception(http_exception): """ Create error response from an HTTPException error Parameters ---------- http_exception : HTTPException An HTTPException error Returns ------- string A JSON string of the HTTPException error response """ http_exception_response = http_exception.get_response() error_response = json.dumps({ 'status_code': http_exception_response.status_code, 'status': http_exception_response.status, 'description': http_exception.description, }) return error_response
18c9d320de32324f4df44904c8a2735c704901da
337,228
def _to_frac(timestamp, n=32): """Return the fractional part of a timestamp. Parameters: timestamp -- NTP timestamp n -- number of bits of the fractional part Retuns: fractional part """ return int(abs(timestamp - int(timestamp)) * 2**n)
490c96d4bcab7f370280c08bfee0e2801a4befe3
362,748
def make_constant_data_ts(d, t0=0.0, dt=0.1, nsamp=5, val=1.0): """ Fills TimeSeries (or _CoreTimeSeries) data vector with a constant value of a specified length and start time. Used for testing arithmetic operators. Parameters ---------- d : TYPE DESCRIPTION. TimeSeries or _CoreTimeSeries skeleton to build upon t0 : TYPE, optional DESCRIPTION. The default is 0.0. data start time dt : TYPE, optional DESCRIPTION. The default is 0.1. sample interval nsamp : TYPE, optional DESCRIPTION. The default is 5. length of data vector to generate Returns ------- None. """ d.npts = nsamp d.t0 = t0 d.dt = dt d.set_live() for i in range(nsamp): d.data[i] = val return d
00f99cae6d215ed7af584420d9c6d70e352dd49d
516,385
def fill_missing(fields, row): """ Fills missing fields with nulls. Args: fields (list<str>): list of column names that should be filled. row (dict): row object to fill. Returns: Dict of new row with missing columns filled with nulls. """ for col in fields: if col not in row: row[col] = None return row
f8bec7b6d981ae4b6306dcb109dc2eb86b3b0a45
507,655
def dic2axisheader(dic): """ Convert axisheader dictionary to list """ al = [0] * 12 al[0] = dic["nucleus"] al[1] = dic["spectral_shift"] al[2] = dic["npoints"] al[3] = dic["size"] al[4] = dic["bsize"] al[5] = dic["spectrometer_freq"] al[6] = dic["spectral_width"] al[7] = dic["xmtr_freq"] al[8] = dic["zero_order"] al[9] = dic["first_order"] al[10] = dic["first_pt_scale"] al[11] = dic["extended"] return al
8e0c18050f6fc8ef8cb6fec61ad6827968f0194f
488,649
def gen_parallel_data(text_output, text_input): """ Construct dataset in the form of paired list for each parallel sentence """ combined = list(map(list, zip(text_output, text_input))) return combined
887467b2b8756e39748f62c77aefba189c13079b
302,783
import re def _expand_tfrecords_pattern(tfr_pattern): """Helper function to expand a tfrecord patter.""" def format_shards(m): return '{}-?????-of-{:0>5}{}'.format(*m.groups()) tfr_pattern = re.sub(r'^([^@]+)@(\d+)([^@]+)$', format_shards, tfr_pattern) return tfr_pattern
d9b42821644540b629c909796ea2e4dc27b820f0
168,925
import collections def align (crabs, cost): """Aligns the crab positions `crabs` (array) to the same position using the least fuel, according to the given `cost` function. The `cost` function takes a single parameter, the number of steps to move, i.e. `cost(steps)`. Returns `(pos, fuel)`. """ fuel = collections.defaultdict(int) for moveto in range( max(crabs) ): for crab in crabs: fuel[moveto] += cost( abs(crab - moveto) ) return min(fuel.items(), key=lambda pair: pair[1])
577bacf52cac36155a372fca139040ba1d96e1a7
72,616
import re import yaml def get_yaml(note, key): """ Find a YAML property in a note. (Gets the first occurrence of a property.) You can also pass a regex pattern matching a key. """ body = note.body.split('\n') i = 0 while i<len(body): if (type(key)==str and body[i].startswith(key)) or (type(key)==re.Pattern and key.search(body[i])): #print(i) yaml_content = body[i] while i+1<len(body) and (body[i+1].startswith(' ') or body[i+1].startswith('\t')): yaml_content += '\n'+body[i+1] i += 1 #print(i) return(yaml.safe_load(yaml_content)[key]) i += 1 return(None)
6e10c61609c61b4b7cc3a07aa296570c28377a13
107,226
import _hashlib def _getMD5(filename): """returns the MD5 signature of the file""" with open(filename) as f: d = f.read() h = _hashlib.md5(d).hexdigest() # HEX string representation of the hash return h
16f20512159c0fedbb1ea39f5316dfd042fc8ddb
322,183
from importlib import metadata def version() -> str: """ Get the version of the package installed via pip """ return metadata.version("pascal-triangle")
d0aa912bfab670f2d52714e597f8c97ccd247721
593,301
from typing import Union from typing import Optional def to_bool(value: Union[str,bool,int]) -> Optional[bool]: """Convert a string to boolean. True values: 1, yes, true, false values: 0, no, false, otherwise returns `None`.""" if isinstance(value, bool): return value elif isinstance(value, int): return bool(value) elif isinstance(value, str): lower: str = value.lower() if lower in ("1", "true", "yes"): return True elif lower in ("0", "false", "no"): return False else: return None else: raise TypeError("Can't convert value of type '{}" "to bool".format(type(value)))
5cf1611d91a8c3073205ba50d622eaeb2b30f110
195,899
def address_from_string(address_string): """Address tuple from host:port string""" address = address_string.split(":") return (address[0], int(address[1]))
92d90fbc1f8a0bc7e50bd9786972e7686ce8175f
140,394
import re def device_name_convention(host): """ Helper filter function to filter hosts based targeting host names which a specified naming convention Examples: - lab-junos-08.tst.dfjt.local - lab-arista-22.prd.dfjt.local - lab-nxos-01.lab.dfjt.local :param host: The host you want to filter on :return bool: True if it matches, False if it doesn't match """ # Perform regex match on host name and return boolean if re.match("\w{3}\-\w+\-\d{2}.\w{3}.dfjt.local", host.name): return True else: return False
5469d6c1569167e404952c9774afe6642fd4f65c
585,739
def match_strings(str_in, match_list): """ Check if any of a list of strings is found in another string. str_in input string. match_list list of strings to find in str_in. returns True if a match is found and False if no match is found. """ for match in match_list: if match in str_in: return True return False
a8a977304c0f85858a7c89c75299c5630849fd53
665,568
import csv def get_production_objectives(input_file): """ List the target metabolites to produce It reads the input file and gets which metabolites has to be produced. -------------------------------------------- Argument: input_file--str input file in .csv format dictionary like Return: production_objects: list of BiGG IDs of the target metabolites """ with open(input_file, newline='', encoding='utf-8-sig') as csvfile: reader = csv.DictReader(csvfile, dialect='excel') production_objects = [] for row in reader: if (row['production'] in ('yes', 'Yes', 'Y', 'y', 'YES') and row['metabolite_BiGG_ID']!= ''): production_objects.append(row['metabolite_BiGG_ID']) return production_objects
1246d57440bda25b2e25a98aec0b9277a45dd674
483,677
def BakExportContext(sql_messages, uri, database): """Generates the ExportContext for the given args, for exporting to BAK. Args: sql_messages: module, The messages module that should be used. uri: The URI of the bucket to export to; the output of the 'uri' arg. database: The list of databases to export from; the output of the '--database' flag. Returns: ExportContext, for use in InstancesExportRequest.exportContext. """ return sql_messages.ExportContext( kind='sql#exportContext', uri=uri, databases=database, fileType=sql_messages.ExportContext.FileTypeValueValuesEnum.BAK)
72791dde366a52798b2d5b01c3fbfd1947fa3fc7
160,995
def get_block_number(row_index: int, column_index: int, k: int) -> int: """Retrieve the block number corresponding to the given row and column indices for a k-by-k block Sudoku puzzle For example, block 0 in a k=3 Sudoku puzzle contains the indices (0,0), (0,1), (0,2), (1,0) ... (2,2) :param row_index: Row index :type row_index: int :param column_index: Column index :type column_index: int :param k: Size of the Sudoku puzzle (a grid of k x k blocks) :type k: int :return: Corresponding block number (between 0 and k**2-1) :rtype: int """ block_row = row_index // k block_column = column_index // k return (block_row * k) + block_column
00e2e52181ddc4dad5dc9d2a996f926f4c73e08a
582,118
def generate_header(consumer_id) -> dict: """Helper method for generating enrollment record request headers""" return {"HTTP_X_CONSUMER_CUSTOM_ID": consumer_id}
bbb9440f9de90ad75a1e647287cfa24588db3e7e
257,991
from typing import Union def convert_to_set(convert_this: Union[list, set, str, tuple]) -> set: """ Convert a string, a tuple, or a list into a set (i.e. no duplicates, unordered)""" if isinstance(convert_this, set): # functions using this expect a set, so everything # else just captures bad input by users new_set = convert_this elif isinstance(convert_this, str): new_set = {convert_this} elif isinstance(convert_this, list): new_set = set(convert_this) elif isinstance(convert_this, tuple): new_set = set(convert_this) else: raise TypeError('The function calling this expects a set.') return new_set
702e942a9980d511728a2bfbfd6943247765311a
617,865
def bce_loss(input, target): """ Numerically stable version of the binary cross-entropy loss function. As per https://github.com/pytorch/pytorch/issues/751 See the TensorFlow docs for a derivation of this formula: https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits Inputs: - input: PyTorch Variable of shape (N, ) giving scores. - target: PyTorch Variable of shape (N,) containing 0 and 1 giving targets. Returns: - A PyTorch Variable containing the mean BCE loss over the minibatch of input data. """ ########################### ######### TO DO ########### ########################### loss = None neg_abs = - input.abs() loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() loss = loss.mean() return loss
d4c9a218049334d5535b01337c83085a88efdf8b
649,707
def _crate_id(crate_info): """Returns a unique stable identifier for a crate Returns: (string): This crate's unique stable id. """ return "ID-" + crate_info.root.path
465a0c5a8b57e0c4251784f02af687ad9a1b87e1
483,141
def get_label(data): """Small utility to grab label information from different types of data sources Parameters: data: Tidy (long-form) dataframe where each column is a variable and each row is an observation. Pandas dataframe or numpy array Returns: label (str): the name of the column .. codeauthor:: pygeostat development team """ label = None if hasattr(data, 'columns'): if len(data.columns) == 1: label = data.columns[0] if hasattr(data, 'name'): label = data.name return label
a0438eecd4c100bf6ac7ef947e0b331bcf0a7215
266,457
def get_index(string, list): """ Return index of string in list. """ return [i for i, s in enumerate(list) if string in s][0]
e3e491e06eb66c3433a31d95cb6d851e8eb79dc6
417,232
def is_variable(token): """Check if a token is a variable.""" return token.startswith('$')
1d517f61308146c7b467fbc957856944c25860b5
248,837
def get_gene_summary(gene): """Gets gene summary from a model's gene.""" return { gene.id: { "name": gene.name, "is_functional": gene.functional, "reactions": [{rxn.id: rxn.name} for rxn in gene.reactions], "annotation": gene.annotation, "notes": gene.notes, } }
dd9cb3f8e9841a558898c67a16a02da1b39479d2
707,206
def compute_standard_walking_time(length_m: float, elevation_gain_m: float, elevation_loss_m: float) -> float: """Estimate the hiking time in seconds Parameters ---------- :param: length_m: Length of the hike in meters :param: elevation_gain_m: Elevation gain of the hike in meters :param: elevation_loss_m: Elevation loss of the hike in meters :return: Walking time (= moving time) for the hike in seconds """ walking_time_length_s = 0.9 * length_m # assumed speed: 4 km/h walking_time_up_s = elevation_gain_m * 12 # assumed speed: +300 m/h walking_time_down_s = elevation_loss_m * 7.2 # assumed speed: -500 m/h walking_time_up_down_s = walking_time_up_s + walking_time_down_s return max(walking_time_length_s, walking_time_up_down_s) + \ 0.5 * min(walking_time_length_s, walking_time_up_down_s)
c6c1c6896f1c9b5188d3b65bf32880f6071b92e1
636,906
def any_metric_has_any_dim_key(fake_services, metrics, dim_keys): """ Check if any metric in `metrics` with any dimension key in `dim_keys` exist """ for dp in fake_services.datapoints: if dp.metric in metrics: for dim in dp.dimensions: if dim.key in dim_keys: print('Found metric "%s" with dimension key "%s".' % (dp.metric, dim.key)) return True return False
4abaa97989bd633a34c5fa81d8b1153579e2cb08
401,637
from pathlib import Path def __get_files(dir, filepattern): """Concatenates a directory and filepattern and returns all matching paths.""" return [file for file in Path(dir).rglob(filepattern) if file.is_file()]
63f677f0d35790d1ce0dfc6a1201a0bb3b6d1e0a
332,427
def evaluate_bounding_box(ctrlpts): """ Evaluates the bounding box of a curve or a surface. :param ctrlpts: control points :type ctrlpts: list, tuple :return: bounding box :rtype: list """ # Estimate dimension from the first element of the control points dimension = len(ctrlpts[0]) # Evaluate bounding box bbmin = [float('inf') for _ in range(0, dimension)] bbmax = [0.0 for _ in range(0, dimension)] for cpt in ctrlpts: for i, arr in enumerate(zip(cpt, bbmin)): if arr[0] < arr[1]: bbmin[i] = arr[0] for i, arr in enumerate(zip(cpt, bbmax)): if arr[0] > arr[1]: bbmax[i] = arr[0] return [tuple(bbmin), tuple(bbmax)]
ce0f79d6626f24efcba281e90b0cebcd041f98a1
498,564
def simple(expr: str) -> int: """Evaluate a simple expression with no parens. >>> simple("3 + 4 * 5") 35 >>> simple("1 + 2 * 3 + 4 * 5 + 6") 71 """ # Split into bits then get out if we're done. bits = expr.split() if len(bits) == 1: return int(expr) # Evaluate the first three bits. a, op, b = bits[:3] if op == '+': x = int(a) + int(b) else: x = int(a) * int(b) # Make a new expression and recurse. new_expr = ' '.join([str(x)] + bits[3:]) return simple(new_expr)
3ec4e44a8149e514f072858a6d0913d28a976372
450,520
def canonicalize_eol(text, eol): """Replace any end-of-line sequences in TEXT with the string EOL.""" text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if eol != '\n': text = text.replace('\n', eol) return text
7ff5a8ed1ede1f854c3bb18aa6a9076ea16667c9
654,439
def constant(t, eta_init, last_eta, c = 100): """Constant learning rate.""" return c
9e62affa875f236e5e628c4762fd60c28c7ac346
220,501
import hashlib def hashing_function(filename): """ Takes in complete file path as input and returns MD5 hash of contents """ md5_hash = hashlib.md5() with open(filename, "rb") as f: content = f.read() md5_hash.update(content) return md5_hash.hexdigest()
a057853298894bee89a737a5b6aabc95f0e70480
460,355
def _construct_grounding_map(rows): """Construct grounding map from rows in a grounding_map csv file Parameters ---------- rows : list List of rows from a grounding map csv file. File should contain seven columns, the first of which is an agent text. The remaining columns contain namespace, id pairs, each pair occupying two columns. Some columns may be blank but in this case the row must be padded out with commas. Returns ------- gmap : dict Dictionary mapping agent texts to INDRA style db_refs dicts. Each db_refs dict maps namespaces to ids. """ gmap = {} for row in rows: text = row[0] db_refs = {'TEXT': text} db_refs.update({ns: id_ for ns, id_ in zip(row[1::2], row[2::2]) if ns}) gmap[text] = db_refs if len(db_refs) > 1 else None return gmap
54b6dfdba6b2e9dd9ece2bb8c902efc4e0e33aad
657,621
def three_in_a_row(game_state, last_move): """Return true if X or O have three in a row in one of the rows that include last_move. Else return false.""" if last_move == 0: if game_state[0] == game_state[1] == game_state[2]: return True if game_state[0] == game_state[4] == game_state[8]: return True if game_state[0] == game_state[3] == game_state[6]: return True if last_move == 1: if game_state[1] == game_state[0] == game_state[2]: return True if game_state[1] == game_state[4] == game_state[7]: return True if last_move == 2: if game_state[2] == game_state[0] == game_state[1]: return True if game_state[2] == game_state[5] == game_state[8]: return True if game_state[2] == game_state[4] == game_state[6]: return True if last_move == 3: if game_state[3] == game_state[0] == game_state[6]: return True if game_state[3] == game_state[4] == game_state[5]: return True if last_move == 4: if game_state[4] == game_state[1] == game_state[7]: return True if game_state[4] == game_state[3] == game_state[5]: return True if game_state[4] == game_state[0] == game_state[8]: return True if game_state[4] == game_state[2] == game_state[6]: return True if last_move == 5: if game_state[5] == game_state[3] == game_state[4]: return True if game_state[5] == game_state[2] == game_state[8]: return True if last_move == 6: if game_state[6] == game_state[0] == game_state[3]: return True if game_state[6] == game_state[7] == game_state[8]: return True if game_state[6] == game_state[2] == game_state[4]: return True if last_move == 7: if game_state[7] == game_state[1] == game_state[4]: return True if game_state[7] == game_state[6] == game_state[8]: return True if last_move == 8: if game_state[8] == game_state[2] == game_state[5]: return True if game_state[8] == game_state[6] == game_state[7]: return True if game_state[8] == game_state[0] == game_state[4]: return True return False
0be412c649a27426e32c62a57e378429a639f65f
392,106
def check_game_state(current_word: str, hangman_state: int) -> bool: """Check if there are any _ left in the word or if hangman_state >= 9. Args: current_word (str): The current state of the word that the user sees. hangman_state (int): The state of the hangman. Returns: bool: True if the game may continue, False if the game is over. """ if hangman_state >= 9: return False for i in current_word: if i == '_': return True return False
0630be5fac83739f249baee886b26b14eae1ba90
96,842
import re def find_all(item, items, regex=False, regex_flags=None): """ Finds the indexes and values for all values matching a given item. :param item: the value (or pattern) to match/find. :param items: an iterable of items to match against. :param regex: If True, item will be treated as a regex pattern. :param regex_flags: Optional flags for re.search(). :return: an iterable of (index, value) tuples. >>> find_all('own',['Is', 'that', 'your', 'own', 'brown', 'cow']) [(3, 'own')] >>> find_all('own',['Is', 'that', 'your', 'own', 'brown', 'cow'], regex=True) [(3, 'own'), (4, 'brown')] >>> find_all('^own$',['Is', 'that', 'your', 'own', 'brown', 'cow'], regex=True) [(3, 'own')] >>> find_all('ow',['How', 'now', 'brown', 'cow']) [] >>> find_all('ow$',['How', 'now', 'brown', 'cow'], regex=True) [(0, 'How'), (1, 'now'), (3, 'cow')] >>> find_all('[a-z]ow(?![\w])',['How', 'now', 'brown', 'cow'], regex=True) [(1, 'now'), (3, 'cow')] >>> find_all('(?<!\w)[a-z]ow',['How', 'now', 'brown', 'cow'], regex=True, regex_flags=re.IGNORECASE) [(0, 'How'), (1, 'now'), (3, 'cow')] >>> find_all('(?<=\w)[a-z]ow',['How', 'now', 'brown', 'cow'], regex=True, regex_flags=re.IGNORECASE) [(2, 'brown')] """ if regex: flags = regex_flags or 0 return [(index, value) for index, value in enumerate(items) if re.search(item, value, flags=flags)] else: return [(index, value) for index, value in enumerate(items) if value == item]
bf9e78ef94261c0ee88162e6a1be85a8cdb1dd54
694,703
import json def category_lists(DataFrame, categorical_variables, threshold=50, return_type='json'): """ Parameters ---------- DataFrame : pandas.DataFrame DataFrame categorical_variables : list(str) Variable to examine the freqeuncy. threshold : int, default 50 Maximum number of categories expected return_type : {'json', 'dict'}, default 'json' Returns ------- out : JSON str or dict """ out = dict() for variable in categorical_variables: categories = DataFrame[variable].unique() if len(categories)>threshold: out[variable] = [] print('Numebr of ategories > {}'.format(threshold)) else: out[variable] = list(categories) if return_type=='json': out = json.dumps(out, indent = 4) return out
7bb0697933b9dc6470cd73901de09005f924cb15
242,916
def get_managed_object(handle, class_id, params, inMo=None, in_heir=False, dump=False): """Get the specified MO from UCS Manager. :param managed_object: MO classid :in_filter: input filter value :returns: Managed Object :raises: UcsException in case of failure. """ return handle.GetManagedObject(inMo, class_id, params, inHierarchical=in_heir, dumpXml=dump)
adcc9393d4cdfeba488248e1e7bb6c3d09a38ec9
278,880
import click def editor(*args, **kwargs): """ Wrapper for `click.edit` that raises an error when None is returned. """ result = click.edit(*args, **kwargs) if result is None: msg = "Editor exited without saving, command aborted" raise click.ClickException(msg) return result
090d3f67d2e1cdcda015744471282f8f204762fa
530,824
def normalize(item): """ Normalizes strings in topics and labels. """ if isinstance(item, str): item = item.lower() for k, v in {' ': '_', '_!': '!', '_’': '’'}.items(): item = item.replace(k, v) return item else: return item
9012868b8fe3ce80a0b98c51f63ff59f62aa51fe
510,911
import yaml def open_yaml_file(filename): """ Open a yaml file to extract configuration details. Paramters --------- filename : STRING path to the file to be opened. Returns ------- config : STRING configuration details located in the file. """ try: with open(filename, 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) return config except Exception as e: print(str(e))
7474e813edd5e69db2971850422d3701332d7258
600,919
def compile_drop_materialized_view(element, compiler, **kw): """ Formats and returns the drop statement for materialized views. """ text = "DROP MATERIALIZED VIEW {if_exists}{name}{cascade}" if_exists = "IF EXISTS " if element.if_exists else "" cascade = " CASCADE" if element.cascade else "" return text.format(if_exists=if_exists, name=element.name, cascade=cascade)
1bedf4115edeaf33e96c7ff57b29e8809d8489c8
45,903
def safety_check_first_line(first_line: str) -> None: """Inspects first line of lineage_notes.txt to perform safety check. We pull all of our Pango lineages from a human-edited .txt file. The format has been stable so far, but if things ever change significantly, the planned loading process will probably explode. In case of changes, we will need to investigate and alter the loading process. This check exists to avoid ever accidentally loading a file where the format has (probably) changed. Assumption is that if the first line has changed from what it used to be, the file format has probably changed. Will print the problem and raise an exception. Raises: RuntimeError -- If first line of file not what was expected. """ EXPECTED_FIRST_LINE = "Lineage\tDescription\n" if first_line != EXPECTED_FIRST_LINE: print("First line of imported lineages file has changed!") print("Loading script was originally written for previous version.") print(f"Expected: '{EXPECTED_FIRST_LINE}'") print(f"Actually got first line: '{first_line}'") print("Very likely you need to rewrite loading script. Aborting.") raise RuntimeError("Format of lineage file has likely changed") return None
63246beb9e3eea515297ed30f83cd5462517e475
84,985
def trigger_show(connection, show_number, await_response=False): """Send a serial command over the given connection to trigger playback of the given show.""" command = "X04" + hex(show_number)[2:].upper().rjust(2,'0') connection.write(bytes(command, 'UTF-8')) if await_response: response = connection.readline().decode("UTF-8").strip() return response return True
eb88e02af34a3a530f692e980f583ad1cbf7096a
195,655
from functools import reduce def collapse(data): """ Given an homogenous list, returns the items of that list concatenated together. """ return reduce(lambda x, y: x + y, data)
5802b7868b4feb423b124a0f4ecc2471b83a9adf
364,722
import gzip def read_kmers(kmerfile): """ Simply reads input file with kmers, ony by line. :param kmerfile: path to file with kmers to read :return kmers: set of kmers """ with gzip.open(kmerfile, 'rt') as inf: kmers = {x.strip() for x in inf} return kmers
89f698acbddb7f46e229289978f73b5887bdd3af
202,608
import random def pick_random(parent1, parent2): """ Crossover method 2: =================== For each seed in seed_index of both parents. Pick one for the child with a 50/50 prob. Returns the new child seeds """ child_seeds = [] parents = {1:parent1, 2:parent2} for idx in range(len(parent1)): choice = random.choice([1,2]) parent = parents[choice] if idx < len(parent): seed = parent[idx] child_seeds.append(seed) else: pass return child_seeds
8a78a819778e4174925a58baef0fc417900b702d
384,795
def split_instruction(ins): """ Split an assembly instruction into seperate parts. :param ins: The assembly line. :return: A list with the parts of the instruction. """ newins = ins.replace(',', ' ') splitins = newins.split() return splitins
f3799deb9dc41c3c5184effec7bd1b1c07c61ffc
44,415
def get_tag_options(label_matches): """Returns a list of the unique tags present in label_matches.""" tag_options = [] for key in label_matches.keys(): if key[1] not in tag_options: tag_options.append(key[1]) return tag_options
9a59bd06f85da460701ccab7453afb559bc21bbd
163,723
def _no_negative_zero(val): """Make sure -0 is never output. Makes diff tests easier.""" if val == 0: return 0 return val
345802e297cc1e1c77a5b1db664715bfc42f3da6
5,833
def travel_object(obj, key_functions=[], val_functions=[]): """Recursively apply functions to the keys and values of a dictionary Parameters ---------- obj : dict/list List or dict to recurse through. key_functions : list Functions to apply to the keys in 'obj'. val_functions : list Functions to apply to the values in 'obj' Returns ------- list/dict A list or dict in which all nested keys and values have been altered by the key_functions and val_functions respectively. """ def operate_on_dict(the_dict): new_dict = {} for key, val in the_dict.items(): new_key = key for key_func in key_functions: new_key = key_func(new_key) if isinstance(val, dict) or isinstance(val, list): new_val = travel_object(val, key_functions=key_functions, val_functions=val_functions) else: new_val = val for val_func in val_functions: new_val = val_func(val) new_dict[new_key] = new_val return new_dict if isinstance(obj, list): new_list = [] for item in obj: new_item = operate_on_dict(item) if isinstance(item, dict) else item new_list.append(new_item) return new_list elif isinstance(obj, dict): altered_dict = operate_on_dict(obj) return altered_dict else: err_msg = 'Invalid type: the passed "obj" argument was not of type "dict" or "list".' raise TypeError(err_msg)
e02aba54a4e209204de6c044a86be9758cff0fef
510,696
import codecs def read_datablock_size(file, location): """Return the size of the datablock starting at location""" # Advance 8 bytes to skip signature file.seek(location + 8) # Return the 8 byte long size return int(codecs.encode(file.read(8), 'hex'), 16)
362330af577fdf451e947a94f98d7f1697e05937
252,597
def elapsed_time_formatter(seconds, cmdp=None, fmt=None): """Format time in seconds to a string. :param seconds: The elapsed time in seconds. :param cmdp: The optional CommandProcessor containing the time formatting options. :param fmt: The optional format string containing: * 'seconds': Display time in seconds. * 'standard': Display time as D:hh:mm:ss. :return: The elapsed time string. """ seconds = int(seconds) # drop fractions fmt = 'seconds' if fmt is None else str(fmt) if cmdp is not None: if isinstance(cmdp, str): fmt = cmdp else: fmt = cmdp['Units/elapsed_time'] if seconds >= 60 and fmt in ['D:hh:mm:ss', 'conventional', 'standard']: days = seconds // (24 * 60 * 60) seconds -= days * (24 * 60 * 60) hours = seconds // (60 * 60) seconds -= hours * (60 * 60) minutes = seconds // 60 seconds -= minutes * 60 time_parts = f'{days}:{hours:02d}:{minutes:02d}:{seconds:02d}'.split(':') while True: p = time_parts[0] p_zero = '0' * len(p) if p == p_zero: time_parts.pop(0) else: break time_str = ':'.join(time_parts) while time_str[0] == '0': time_str = time_str[1:] return time_str else: return f'{seconds} s'
ad3682f0698896f6fed13f042d517bbfad50d388
131,658
import json def get_service_account_email(credential_model): """ Get the service account email from the stored Google creds JSON. """ if not credential_model.credentials: return None credentials = json.loads(credential_model.credentials) return credentials.get("client_email")
417d9b8098816024aa3e471e58d880e04a71412d
506,523
import re def twitter_preprocess( text: str, no_rt: bool = True, no_mention: bool = False, no_hashtag: bool = False ) -> str: """ Preprocessing function to remove retweets, mentions and/or hashtags from raw tweets. Examples -------- >>> twitter_preprocess("RT @the_new_sky: Follow me !") 'Follow me !' >>> twitter_preprocess("@the_new_sky please stop making ads for your #twitter account.", no_mention=True, no_hashtag=True) ' please stop making ads for your account.' """ if no_rt: text = re.sub(r"^RT @(?:[^ ]*) ", '', text) if no_mention: text = re.sub(r"@\w+", '', text) if no_hashtag: text = re.sub(r"#\w+", '', text) return text
1644b7da6b43ebd77a33bcb186f2634b6d3bc8db
14,628
def reloc_exit_patch(patch_data, relocs, start_address): """Relocate a piece of 6502 code to a given starting address.""" patch_data = bytearray(patch_data) for i in relocs: address = patch_data[i] + (patch_data[i + 1] << 8) + start_address patch_data[i] = address & 0xFF patch_data[i + 1] = address >> 8 return patch_data
24ef61eeb5db23c41e4cea99e47826462775551c
688,713
def quadratic_attractive_force(current_cell, goal_cell, K = 0.015): """ Calculates the quadratic attractive force for one grid cell with respect to the target current_cell: a list containing x and y values of one map grid cell goal_cell: a list containing x and y values of the target grid cell K: potential attractive constant returns: quadratic attractive force scaled by the potential attractive constant """ dx = goal_cell[0] - current_cell[0] dy = goal_cell[1] - current_cell[1] distance = (dx ** 2 + dy ** 2)**0.5 quadratic_attractive_force = distance**2 return K * quadratic_attractive_force
4640982dd2f17f153ba2f5b421ba72bc9196eb81
407,745
def sum_dicts(a, b): """Sum the values of the two dicts, no matter which type they are. >>> sum_dicts({}, {}) {} >>> sum_dicts({'a': 1}, {'a': 2}) {'a': 3} >>> sum_dicts({'a': [1]}, {'a': [2], 'b': 3}) {'b': 3, 'a': [1, 2]} >>> sum_dicts({'a': 1, 'b': 2, 'c': [1]}, {'b': 3, 'c': [4], 'd': [5]}) {'c': [1, 4], 'b': 5, 'd': [5], 'a': 1} """ merged = dict() if a is None or b is None: return a if a else b or {} for key in set(a) | set(b): value_a = a.get(key, type(b.get(key))()) value_b = b.get(key, type(a.get(key))()) if isinstance(value_a, dict) and isinstance(value_b, dict): merged[key] = sum_dicts(value_a, value_b) else: merged[key] = value_a + value_b return merged
e9748407bb0b4ee0a2035df0ce56c7a4d09f75e4
137,668
def invert_pairs(trading_pairs: list) -> list: """ swap quote w/ base :param list(trading_pairs): ["AAA-BBB", "BBB-CCC", "DDD-AAA"] :return list(trading_pairs): ["BBB-AAA", "CCC-BBB", "AAA-DDD"] """ return [f"{i.split('-')[1]}-{i.split('-')[0]}" for i in trading_pairs]
af8b4042dac04e0b1b3df54c340fc29fca39659d
500,835
import requests def retrieve_paper(PMCID): """For a given PMCID, retrieve the papaer from Europe PMC. Decoded plain text content will be returned.""" URL = f"https://www.ebi.ac.uk/europepmc/webservices/rest/{PMCID}/fullTextXML" response = requests.get(URL) data = response.content.decode() return data
ade6b02dbb0817cef235013b3b65007da4dc4a0b
287,277
import re def dnsify(name: str) -> str: """ Replace DC/OS folders with dots and replace other invalid characters with `_` >>> dnsify("folder/sec!ret") "folder.sec_ret" """ _invalid_secret_key = re.compile('[^-._a-zA-Z0-9]') # Replace DC/OS folders with dots name = ".".join(list(filter(None, name.split("/")))) # Replace other invalid characters with `_` # `folder/sec!ret` becomes `folder.sec_ret` return _invalid_secret_key.sub('_', name)
d21fbb3aaf5dfca9174cb6911c1bf3571b69fbd8
237,196
def dict_get(item, *keys): """ nested dict `get()` >>> example={1: {2: 'X'}} >>> dict_get(example, 1, 2) 'X' >>> dict_get(example, 1) {2: 'X'} >>> dict_get(example, 1, 2, 3) is None True >>> dict_get(example, 'foo', 'bar') is None True >>> dict_get('no dict', 'no key') is None True """ for key in keys: if isinstance(item, dict): item = item.get(key) else: return None return item
10848f641b9bb55144aaa6e4d0690c603bdc2e0e
473,739
def logOut(command): """ Check if command is Quit (b | q | x | e). """ return (command.strip().lower() == "b" or command.strip().lower() == 'back' or \ command.strip().lower() == 'q' or command.strip().lower() == 'quit' or \ command.strip().lower() == 'x' or command.strip().lower() == 'exit' or \ command.strip().lower() == 'e')
dfd7a9b2d9cf28091ba6ad88a451d943a8eb1e1f
76,761
import click import functools def common_package_action_options(f): """Add common options for package actions.""" @click.option( "-s", "--skip-errors", default=False, is_flag=True, help="Skip/ignore errors when copying packages.", ) @click.option( "-W", "--no-wait-for-sync", default=False, is_flag=True, help="Don't wait for package synchronisation to complete before exiting.", ) @click.option( "-I", "--wait-interval", default=5.0, type=float, show_default=True, help="The time in seconds to wait between checking synchronisation.", ) @click.option( "--sync-attempts", default=3, type=int, help="Number of times to attempt package synchronisation. If the " "package fails the first time, the client will attempt to " "automatically resynchronise it.", ) @click.pass_context @functools.wraps(f) def wrapper(ctx, *args, **kwargs): # pylint: disable=missing-docstring return ctx.invoke(f, *args, **kwargs) return wrapper
b01362bb6082558a600fc40a493a4209a2814fed
510,367
def build_rating_list(name_to_rating, names_final): """(dict of {str: int}, list of str) -> list of list of [int, str] Return a list of [rating%, restaurant name], sorted by rating% >>> name_to_rating = { 'Georgie Porgie': 87, 'Queen St. Cafe': 82, 'Dumplings R Us': 71, 'Mexican Grill': 85, 'Deep Fried Everything': 52} >>> names_final = ['Queen St. Cafe', 'Dumplings R Us'] >>> build_rating_list(name_to_rating, names_final) [[82, 'Queen St. Cafe'], [71, 'Dumplings R Us']] """ result = list() # Go through final restaurant names and select rating from other dict. # Create list of lists with restaurant name and rating. for name in names_final: rating = name_to_rating[name] name_and_ratings = list() name_and_ratings.append([rating, name]) result.append(name_and_ratings) result.sort(reverse=True) return result
3f2474480ca0c1b41f412fe122091e2c67cf1096
405,420
def write_error_response(s3_client, request_context, status_code, error_code, error_message): """ write WriteGetObjectResponse to the S3 client for AWS Lambda errors :param s3_client: s3 client :param request_context: Requests that was sent to supporting Access Point :param status_code: Http status code for the type of error :param error_code: Error Code :param error_message: Error Message :return: WriteGetObjectResponse """ return s3_client.write_get_object_response( RequestRoute=request_context['outputRoute'], RequestToken=request_context['outputToken'], StatusCode=status_code, ErrorCode=error_code, ErrorMessage=error_message )
31a3a45b1e25bc99039588d4854b27520fb405f0
241,087
import hashlib def get_sha256_of_string(the_string): """Returns SHA-256 hash for given string.""" new_hash = hashlib.new("sha256") new_hash.update(bytes(the_string, "utf-8")) return new_hash
1657bd433e62c9342d5474fae472fc9dff649575
17,364
import json def deserialize(filename='data.json'): """Return JSON data from file.""" with open(filename) as f: data = json.load(f) return data
e4830b5d476c3956cbee51bd2d5010eb5c43bec3
354,074
def aggregate_count(keyname): """ Straightforward sum of the given keyname. """ def inner(docs): return sum(doc[keyname] for doc in docs) return keyname, inner
431896d1c8d8c1f773b684b5235919ce1728a8ce
694,869
def format_cursor(cursor): """Format cursor inside double quotations as required by API""" return '"{}"'.format(cursor)
8e45ddae1bc3aca6f95887fc69a0b32623804733
318,157
def cost2(x: int) -> int: """Compute the cost of carb movement by x steps for part 2.""" return x * (x + 1) // 2
a9c059d33dfdd2cf2b78478019605a20b3198728
382,824
import json def load_checkpoint(checkpoint_path): """Loads the checkpoint object stored at a given path. Args: checkpoint_path: Path along which checkpoint is stored. Returns: Python dictionary representing the data serialized in the checkpoint. """ with open(checkpoint_path, "r") as fp: checkpoint_data = json.load(fp) return checkpoint_data
c29ed093ea7eaf1d5ff91aa3a8548f80196c4c3c
525,938
from typing import Sequence import ctypes def build_c_double_array(values: Sequence[float]) -> ctypes.Array: """Convert a list of numbers to a ctypes c_double array. :param values: A list of floats. :return: The values as ctypes compatible values. """ arr = (ctypes.c_double * len(values))(*values) return arr
10231257e6a7ba5d4cfe20b9ebf60f87fef1ba69
333,821
import ast def attribute(value: ast.expr, attr: str) -> ast.Attribute: """Generate an ast attribute get operation""" return ast.Attribute(value=value, attr=attr, ctx=ast.Load())
96eacb4419f50865e52f58b699166a6909885e5f
185,923
def truncate_data(perc, dataset): """Truncate the training dataset. Args: perc: float between 0 and 1, percentage of data kept. dataset: data, under the form (x_train, y_train), (x_test, y_test) Returns: dataset: truncated training dataset, full test dataset """ (x_train, y_train), (x_test, y_test) = dataset n = x_train.shape[0] n_trunc = int(perc * n) return (x_train[:n_trunc, :], y_train[:n_trunc, :]), (x_test, y_test)
09805d58ca3bbd9bbab6eb96083a11f41cbb0d3f
82,717
from unittest.mock import patch def class_mock(q_class_name, request, autospec=True, **kwargs): """ Return a mock patching the class with qualified name *q_class_name*. The mock is autospec'ed based on the patched class unless the optional argument *autospec* is set to False. Any other keyword arguments are passed through to Mock(). Patch is reversed after calling test returns. """ _patch = patch(q_class_name, autospec=autospec, **kwargs) request.addfinalizer(_patch.stop) return _patch.start()
448ab8aff578bbdb6ab8a9b3130fdf0fac1f113b
647,058
from typing import Optional import requests import re def get_user_image(username: str) -> Optional[str]: """Return URL of the profile picture of a given user.""" url = f"https://wt.social/u/{username}" response = requests.get(url) match = re.search(r"(https.*?--profile_pic\.\w+)", response.text) return match.group(1).replace("\\", "") if match else None
14e1d71e5ecc824aefabbd0eb0c7183f7993ee4b
484,516
def find_majority_element(values): """ Returns the majority element or None if no such element found """ def find_candidate(): candidate_index = 0 count = 1 for index in range(0, len(values)): if values[candidate_index] == values[index]: count += 1 else: count -= 1 if count == 0: candidate_index = index count = 1 return values[candidate_index] def is_majority(candidate): count = 0 for value in values: if value == candidate: count += 1 return count > len(values) // 2 if not values: return None candidate = find_candidate() if is_majority(candidate): return candidate else: return None
8e28a7c2e823e406465b871beb327af367c2a8bc
270,068
def FindPart(part: dict, mime_type: str): """ Recursively parses the parts of an email and returns the first part with the requested mime_type. :param part: Part of the email to parse (generally called on the payload). :param mime_type: MIME Type to look for. :return: The part of the email with the matching type. """ if part['mimeType'] == mime_type: return part elif 'multipart' in part['mimeType']: for child in part['parts']: out = FindPart(child, mime_type) if out is not None: return out
ab557300860b6030acc5b851aa5bdb10ae2850cc
44,905
def title_case(text: str) -> str: """Convert the text to title case. Args: text: The text to convert to title case. Returns: str: The text converted to title case. """ return text.title()
d4e87ecd4fbc003e6370ae2d96cf9a3aef114243
653,541
def G_nu2lame(G, nu): """ Converts shear modulus and Poisson's ratio to Lame constants. Parameters: G : shear modulus nu : Poisson's ratio Returns: lame1 : lambda lame2 : mu """ return 2*G*nu/(1 - 2*nu), G
70e227168adb02d8bfb05eaa0af0257bd4196dc4
356,167
import random def gen(n=12, flags={}, forbidden=""): """Generate a random string of characters for use as a secure password. Inputs n: Length of the string to generate. flags: digits: Include at least one digit in the string. lower: Include at least one lowercase letter in the string. upper: Include at least one uppercase letter in the string. special: Include at least one common special character in the string. more: Include at least one of other special characters in the string. ReduceConfusion: Remove "01l|O" characters. forbidden: Any characters that should not be in the final string. Outputs A string of random ACSII characters. """ # Create the random number generator rng = random.SystemRandom() # This variable will hold a string of potential characters from which to # make the password. ascii_chars = ""; # Digits ascii_digits = '0123456789' if flags['digits']: ascii_chars += ascii_digits # Uppercase Letters ascii_uppers = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" if flags['upper']: ascii_chars += ascii_uppers # Lowercase Letters ascii_lowers = "abcdefghijklmnopqrstuvwxyz" if flags['lower']: ascii_chars += ascii_lowers # Special Characters (Common) ascii_special = "_=+!@#$%&*?-" if flags['special']: ascii_chars += ascii_special # More special Characters (Less Common) ascii_more = r"""`|^\/~<>'",.(){}[];:""" if flags['more']: ascii_chars += ascii_more # Remove Confusion (no 1's, 0's, l's, |'s or O's) if flags['ReduceConfusion']: ascii_chars = ascii_chars.replace("l", "") ascii_chars = ascii_chars.replace("O", "") ascii_chars = ascii_chars.replace("1", "") ascii_chars = ascii_chars.replace("0", "") ascii_chars = ascii_chars.replace("|", "") # Remove any characters specified as forbidden by the user ascii_chars = [c for c in ascii_chars if c not in forbidden] def meetsCriteria(candidate): """Ensure the candidate password contains characters from each desired set of ASCII characters.""" met = True if flags['digits']: if len(set(candidate) & set(ascii_digits)) < 1: met = False if flags['upper']: if len(set(candidate) & set(ascii_uppers)) < 1: met = False if flags['lower']: if len(set(candidate) & set(ascii_lowers)) < 1: met = False if flags['special']: if len(set(candidate) & set(ascii_special)) < 1: met = False if flags['more']: if len(set(candidate) & set(ascii_more)) < 1: met = False return met # Generate the random password and ensure it meets the criteria (contains # characters from each specified group). pw = "" while not meetsCriteria(pw): pw = "" for count in range(0,n): # Randomly select a character from the list ridx = rng.randint(0, len(ascii_chars) - 1) pw += ascii_chars[ridx] return pw
2fd28c0cc1ad9205a054c31f13ab802890e96d5c
91,806
def Outcomes(d1, d2): """ Returns a list of all possible outcomes for d1 and d2 being rolled, where each element is a total pip count """ # Flattening a lsit of lists t is done as such: flat_list = [item for sublist in t for item in sublist] # The list of lists is created by taking sum of every pairing of faces on the two dice # Eg for two normal dice its [2,3,3,4,4,4,5,5,5,5,5 .......,12] return [item for sublist in [[sum([x, y])for x in d1.pips]for y in d2.pips] for item in sublist]
3c8bd4406b566cea3d3dfe125e1719206571d6d2
367,047