content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _edges_from_path(lst: list) -> list[tuple]: """Return a list of edges form the given path""" edges = [] for index in range(len(lst) - 1): edges.append((lst[index], lst[index + 1])) return edges
78155390cbbc859134a614e009090cc2da9debc1
360,988
def does_diff_include_ts_files(diff_files): """Returns true if diff includes TypeScript files. Args: diff_files: list(str). List of files changed. Returns: bool. Whether the diff contains changes in any TypeScript files. """ for file_path in diff_files: if file_path.endswith(b'.ts'): return True return False
b4bdde2c86687d27ccc0f59fdb4b5e76705e4ec7
606,042
def proportion_of_missing_values(dfData): """ Parameters #--------- dfData pandas.DataFrame containing data Returns #--------- serProportions pandas.Series containing values between 0 and 1 for each feature. Description #---------- Computes the proportion of missing values to all values for each feature """ numberOfRows = len(dfData) serNumberOfNans = dfData.isnull().sum() serProportions = serNumberOfNans/numberOfRows return serProportions
3d824f60ce1e14f8cb7bbb5fc74978c25df5580c
437,568
from typing import List from typing import Any from typing import Tuple import itertools def common_subsequences( dataset: List[Any], min_match_len = 3, max_match_len = 10) -> List[Tuple[int, List[Any]]]: """Catalog common sequential patterns (ie, occuring twice or more) that are of length min_match_len to max_match_len that occur within list dataset. Return a list of (count, subsequence), most frequent first. """ results = [] def _count_occurrences(sublist, parent): return sum(parent[i:i+len(sublist)]==\ sublist for i in range(len(parent))) if max_match_len > len(dataset) > 1: raise Exception("max_match_len cannot be > len(dataset)") for i1 in range(len(dataset) - max_match_len): previous = None for i2 in range(min_match_len, max_match_len): head = dataset[i1:i1 + i2] tail = dataset[i1 + i2:] count = _count_occurrences(head, tail) if head not in results: results = results + [head for i in range(count + 1)] results = [list(j) for i, j in itertools.groupby(sorted(results))] results = [(len(seq), seq[0]) for seq in results] results = filter(lambda r: r[0] > 1, results) results = sorted(results, key=lambda m: m[0], reverse=True) return results
84f74bf34d08c94f9a06d938935ef12e74333924
79,961
def mean(num_1st): """ Calculates the mean of a list of numbers Parameters ---------- num_1st : list List of numbers to calculate the averge of Returns -------- The average/mean of num_1st avgMean Examples -------- >>> mean([1,2,3,4,5]) 3.0 """ Sum = sum(num_1st) N = len(num_1st) avgMean = float(Sum/N) return avgMean
a01394e0e7cc1ecc63474f06889618c3221cc682
400,892
def hello(name): """Say hello Function docstring using Google docstring style. Args: name (str): Name to say hello to Returns: str: Hello message Raises: ValueError: If `name` is equal to `nobody` Example: This function can be called with `Jane Smith` as argument using >>> from {{ cookiecutter.package_name }}.my_module import hello >>> hello('Jane Smith') 'Hello Jane Smith!' """ if name == 'nobody': raise ValueError('Can not say hello to nobody') return f'Hello {name}!'
b3ec11a61b43ad12e5f5206c966ea18ca199ee85
324,801
def load_doc(filename) -> str: """ Opens a file and reads it in as a string. Parameters: filename (str): Path to file Returns: str: contents of file """ with open(filename, 'r') as f: doc = f.read() return doc
360f05803663bedfa13cb33e3c5ce377ef6086d0
517,295
def _devicePixelRatioF(obj): """ Return obj.devicePixelRatioF() with graceful fallback for older Qt. This can be replaced by the direct call when we require Qt>=5.6. """ try: # Not available on Qt<5.6 return obj.devicePixelRatioF() or 1 except AttributeError: pass try: # Not available on Qt4 or some older Qt5. # self.devicePixelRatio() returns 0 in rare cases return obj.devicePixelRatio() or 1 except AttributeError: return 1
155e498b4be19d572c213f01782156e68c2a60c5
589,448
def identity(x, rho=None): """Identity operator""" return x
fe6b4b802cc1d96b4ce6d79e0f35c4ce51c35bf9
556,437
def cummean(x): """Return a same-length array, containing the cumulative mean.""" return x.expanding().mean()
b5a35c56cb78e0588dd5be64a75384c4cd81ccb5
704,788
def is_oneliner(txt) -> bool: """Checks if the given string contains no newlines.""" assert isinstance(txt, str) return len(txt.splitlines()) == 1
cd612c5818b9458e7a048907cdcc9aba32f41973
671,995
from typing import Iterable def count_op(it: Iterable, oper, value): """Return a count of the number of items in `it` where **oper** **value** == True. This allows user-defined objects to be included and is subtly different to ``[...].count(...)`` which uses the __eq__ operator.""" return [oper(x, value) for x in it].count(True)
c52bea160b2d4460b9f4ad147752bd080ae440f6
120,449
def _get_euclidean_dist(e1, e2): """Calculate the euclidean distance between e1 and e2.""" e1 = e1.flatten() e2 = e2.flatten() return sum([(el1 - el2) ** 2 for el1, el2 in zip(e1, e2)]) ** 0.5
c29777c9f17a099d97984711f2f5d261c86276da
670,088
def Prefix(string, pad, length): """ Prefix the supplied string until it is the desired length, with the given padding character """ assert(len(string) <= length) assert(len(pad) == 1) result = "" while len(string) + len(result) < length: result += pad result += string return result
d3d66bab9365909d5ac52e1eb1695861ac75a6e3
106,667
def split_arg(argument, delimiter=","): """ Split argument with delimiter and return list """ if not argument: return [] return [arg.strip() for arg in argument.split(delimiter) if arg]
2f3eb19fa2a14b7d5b19569b5dc90cce814ff259
257,277
def adjacent_enemy(inp, rowI, colI, enemy): """Check for enemy in adjacent square""" if any(x[0]==enemy for x in [inp[rowI+1][colI], inp[rowI-1][colI], inp[rowI][colI+1], inp[rowI][colI-1]]): return True return False
1fb36051402d3fef0c4c8b022676bdeb4f121b3a
653,181
from typing import Tuple def calculate_asymmetric_level_ranges( num_bits: int, narrow_range: bool = False) -> Tuple[int, int, int]: """ Calculates the numbers of the low and high quant and the number of quantization levels for the asymmetric quantization scheme. :param num_bits: The bitwidth of the quantization :param narrow_range: The flag specifying quantization range if it is True then [1; 2^num_bits - 1] and [0; 2^num_bits - 1] otherwise :return: A Tuple level_low - the low quant number level_high - the high quant number levels - the number of quantization levels """ levels = 2 ** num_bits level_high = levels - 1 level_low = 0 if narrow_range: level_low = level_low + 1 levels = levels - 1 return level_low, level_high, levels
431deb1ea73a2e0d7f12d17272ceb21b54784f06
565,545
def _generate_line(club, name_column_width): """ Generates a single line of output. :param Organization club: object holding information about club :param int name_column_width: width of name column :return: nicely formatted line of output :rtype: str """ name_column = str(club.name) + ":" while len(name_column) <= (name_column_width + 3): name_column += " " stam_column = "{}".format(club.stam) while len(stam_column) <= 6: stam_column += " " guid_column = "({})".format(club.guid) return "{}{}{}".format(name_column, stam_column, guid_column)
a62417b4dcd339cc824a6c3a8a4f79dd19abb4c8
671,719
def _default_kwargs_split_fn(kwargs): """Default `kwargs` `dict` getter.""" return (kwargs.get('distribution_kwargs', {}), kwargs.get('bijector_kwargs', {}))
3d39d73ba6d7bfd673fa7216f3e386e61abca795
641,658
def pad_args(src: list, total_verdicts: int) -> list: """Pad extra args with 0.""" padding_count = max(total_verdicts - len(src), 0) result = [] result.extend(src) result.extend(["0",]*padding_count) return result
6c8a5e14d1b6348c9bc1c78ac9193b4ffcd3837e
195,552
import torch def central_grady(image): """Compute central vertical gradient. Assumes input is a 4D tensor with dimensions (batch, channels, rows, cols). """ assert(len(image.shape) == 4) padder = torch.nn.ReplicationPad2d((0, 0, 1, 1)) padded_image = padder(image) gy = 0.5 * (padded_image[:, :, 2:, :] - padded_image[:, :, :-2, :]) return gy
5d484c15a0d347cea4fce9c1e7c5cd487224d308
595,283
def no_anagrams(passphrase): """Checks if passphrase doesn't contain words that are anagrams.""" anagrams = set(''.join(sorted(word)) for word in passphrase) return len(passphrase) == len(anagrams)
44b5dc0a7063a34be29982f08db787c30cc019a7
467,731
import re def collapse_whitespace(strarg, remove_nl=False): """Returns a cleaned-up version of the block text It collapses whitespaces, removes tabs, and, if specified, only keeps the tag delimiters(like in cleaneval) as newlines.""" strarg = re.sub(r'\t+', ' ', strarg) # replace tabs with spaces if remove_nl: # remove newlines, used for cleaneval # they will be replaced by the remove tags strarg = re.sub(r'\n', ' ', strarg) strarg = re.sub(r'<[a-zA-Z]+>', '\n', strarg) strarg = re.sub(r' +', ' ', strarg) # collapse whitespace return strarg
3ba832be1a408cc1076ae96fe215601b6bc5f120
517,275
from typing import List def parse_int_list(input: str) -> List[int]: """Parse a list of ints from the config file. Args: input (str): Input list Returns: List[int]: List of ints """ return list(int(value) for value in input.split(","))
86fe2fa6159f6f7d25f970c9661b446888c0b48a
331,806
def data(object_type: str, subscriber: str) -> str: """Return the db key for subscriber event data. Args: object_type (str): Type of object subscriber (str): Name of the subscriber Returns: str, database key for the event data """ return 'events:{}:{}:data'.format(object_type, subscriber)
665f2a765c45fc8b651c9a2060c654d1bd8c362c
420,452
def FetchEntities(query_obj, limit): """Fetches number of Entities up to limit using query object. Args: query_obj: AppEngine Datastore Query Object. limit: Fetch limit on number of records you want to fetch. Returns: Fetched Entities. """ entities = [] # If Limit is more than 1000 than let's fetch more records using cursor. if limit > 1000: results = query_obj.fetch(1000) entities.extend(results) cursor = query_obj.cursor() while results and limit > len(entities): query_obj.with_cursor(cursor) results = query_obj.fetch(1000) entities.extend(results) cursor = query_obj.cursor() else: entities = query_obj.fetch(limit) return entities
ed86b6818ad07cc844a85dd272d29eea803cb11a
581,662
def lookup(dic, key, *keys): """A generic dictionary access helper. This helps simplify code that uses heavily nested dictionaries. It will return None if any of the keys in *keys do not exist. :: >>> lookup({'this': {'is': 'nested'}}, 'this', 'is') nested >>> lookup({}, 'this', 'is') None """ if keys: return lookup(dic.get(key, {}), keys[0], *keys[1:]) return dic.get(key)
e4da4ed2bcaf070496a8a5ef3878c7a53099bc84
347,300
def producer_config(config): """Filter the producer config""" for field in ["group.id", "partition.assignment.strategy", "session.timeout.ms", "default.topic.config"]: if field in config: del config[field] return config
3245c62f26288ab58a992494a4d0e87b772fcc81
636,930
def check_bracket_validity(brackets: str): """ Check if a sequence of brackets is valid. Examples -------- >>> check_bracket_validity("()[]{}") True >>> check_bracket_validity("([{([{}])}])") True >>> check_bracket_validity("([{([{}])}]") False >>> check_bracket_validity("([{([{}])}]))") False >>> check_bracket_validity("([)]") False """ o, p, s = "([{", {"(": ")", "[": "]", "{": "}"}, [] for b in brackets: if b in o: s.append(b) else: if len(s) == 0: return False q = s.pop() if p[q] != b: return False return len(s) == 0
1accd13bc2d282d85c038fa06752aa8619f71af3
459,377
import pytz from datetime import datetime def utc_(moment_time, tz): """ Converts unix time to datetime.datetime. :param moment_time: unix time; may be format: int, float :param tz: timezone; format: str("Region/City") :return: datetime.datetime + utc """ local_tz = pytz.timezone(tz) dt = local_tz.localize(datetime.utcfromtimestamp(moment_time)) dt = dt.astimezone(pytz.timezone(tz)) return dt
ffe8850c7c5204e174c97a13e7548d984d1c844a
142,383
import torch def convert_to_boxes(z_where, z_pres, z_pres_prob): """ All inputs should be tensors :param z_where: (B, N, 4). [sx, sy, tx, ty]. N is arch.G ** 2 :param z_pres: (B, N) Must be binary and byte tensor :param z_pres_prob: (B, N). In range (0, 1) :return: [[y_min, y_max, x_min, x_max, conf] * N] * B """ B, N, _ = z_where.size() z_pres = z_pres.bool() # each (B, N, 1) width, height, center_x, center_y = torch.split(z_where, 1, dim=-1) center_x = (center_x + 1.0) / 2.0 center_y = (center_y + 1.0) / 2.0 x_min = center_x - width / 2 x_max = center_x + width / 2 y_min = center_y - height / 2 y_max = center_y + height / 2 # (B, N, 4) pos = torch.cat([y_min, y_max, x_min, x_max], dim=-1) boxes = [] for b in range(B): # (N, 4), (N,) -> (M, 4), where M is the number of z_pres == 1 box = pos[b][z_pres[b]] # (N,) -> (M, 1) conf = z_pres_prob[b][z_pres[b]][:, None] # (M, 5) box = torch.cat([box, conf], dim=1) box = box.detach().cpu().numpy() boxes.append(box) return boxes
0efb5118aab6a945537917e101b75f2e78c52897
304,224
def _eval_expr(expr, ctxt, vars=None): """Evaluate the given `Expression` object. :param expr: the expression to evaluate :param ctxt: the `Context` :param vars: additional variables that should be available to the expression :return: the result of the evaluation """ if vars: ctxt.push(vars) retval = expr.evaluate(ctxt) if vars: ctxt.pop() return retval
20c3fc9d2a8a4f61ea72d6df3474e034c6369d7e
641,808
def determinize_tree(determinization, ppddl_tree, index = 0): """ Replaces all probabilistic effects with the given determinization. Variable "determinization" is a list of determinizations, as created by "get_all_determinizations_effect". This function will visit the PPDDL tree in pre-order traversal and each time it encounters a probabilistic effect, it will replace it with the effect at "determinizaton[index][1][1]", then increment variable "index" and return its new value. Therefore, the user of this function must ensure that the each effect in the given determinization corresponds to the proper probabilistic effect in the PPDDL tree. """ if not isinstance(ppddl_tree, list) or not ppddl_tree: return index if ppddl_tree[0] == 'probabilistic': ppddl_tree[:] = [] ppddl_tree.extend(determinization[index][1][1]) return index + 1 else: for element in ppddl_tree: index = determinize_tree(determinization, element, index) return index
255fbec5403cccfb21ee602d5c4f334a22ea181c
560,194
def len_board(board): """Function count elements (X and O) on the board and return integer.""" counter = 0 for line in board: for cell in line: if cell != ".": counter += 1 return counter
fcfaa78b7ea9998411ceded4311eccdc243bf970
403,372
import bz2 import base64 def passx_decode(passx): """decode the obfuscated plain text password, returns plain text password""" return bz2.decompress(base64.b64decode(passx.encode("ascii"))).decode("ascii")
b8b2138c55dd28734661484a231128e6f3ccbbb7
8,094
def get_resource(request): """ Get the resource being requested """ path = request.path path = [x for x in path.split('/') if x] if path: return path[-1] else: return None
8a17e47e8610f79f0919965e30a0194e4e2e1082
187,395
def import_name_to_name_version(import_name): """Convert the import name to a name and version. Args: import_name (str): Import name with a version (Ex: "custom_0_0_0") Returns: name (str): Just the package name (Ex: "custom") version (str)[None]: Version of the package (Ex: "0.0.0") """ s = str(import_name).split('_') for i, n in enumerate(s): if len(n) > 0 and n.isdigit(): return '_'.join(s[:i]), '.'.join(s[i:]) return import_name, None
48142220295454fc0c4db518367d803ee2c09366
224,482
import six def next_or_none(iterator): """ Returns the next item from the iterator, or None if there are no more. """ try: return six.advance_iterator(iterator) except StopIteration: return None
dfa130c873e8fa49cba458916b2be933d0b734f1
573,748
def get_words(xmlelement): """ Get all words, lower-cased, from the word tags in the BNC xmlelement. """ return [word_tag.text.strip().lower() for word_tag in xmlelement.find_all('w')]
4ae354dd3057260f2a4f15b9203262d8663bb672
404,012
def _getVersionString(value): """Encodes string for version information string tables. Arguments: value - string to encode Returns: bytes - value encoded as utf-16le """ return value.encode("utf-16le")
36646a686c17f2c69d71a0cdeede56f0a1e514e2
8,511
def format_argument_version(arg_version): """ Replaces '.' with '-' throughout arg_version to match formatting requirements for log message Args: arg_version(str): Version tag to be formatted Returns: str: Formatted version tag """ return arg_version.replace(".", "-")
690d4cf2ae54ff705d3956295fb2be9ae4985dcb
358,903
def create_payload(text, target_lang): """Create payload for DeepL API call""" return { "jsonrpc": "2.0", "method": "LMT_handle_jobs", "id": 1, "params": { "jobs": [{"kind": "default", "raw_en_sentence": s} for s in text], "lang": {"user_preferred_langs": ["EN", "PL"], "source_lang_user_selected": "auto", "target_lang": target_lang}, "priority": 1}}
4b3def68bc60a8074578cea40ab8a1c8adecd409
207,146
import pathlib def list_ancestors(path): """Return logical ancestors of the path.""" return [str(parent) for parent in pathlib.PurePosixPath(path).parents if str(parent) != "."]
7164933474ce45fd8ad7a5d96370e70205dcc1a7
365,524
import torch def collate_fn(samples): """ collate_fn for SequentialMNIST. Args: samples: a list of samples. Each item is a (imgs, nums) pair. Where - imgs: shape (T, 1, C, H, W) - nums: shape (T, 1) And len(samples) is the batch size. Returns: A tuple (imgs, nums). Where - imgs: shape (T, B, C, H, W) - nums: shape (T, B) """ imgs, nums = zip(*samples) # (T, B, C, H, W) imgs = torch.cat(imgs, dim=1) # (T, B) nums = torch.cat(nums, dim=1) return imgs, nums
29a8b44a261d9db1de0b91c4f5eace64ee21d5fb
674,778
import threading def count_threads_with_name(name): """ Returns the number of currently existing threads with the given name """ n_count = 0 for th in threading.enumerate(): if th.name == name: n_count += 1 return n_count
9de9e04e823c5e6cde94591616dd9032fb91f665
486,491
def affixed(text, prefix=None, suffix=None, normalize=None): """ Args: text (str | None): Text to ensure prefixed prefix (str | None): Prefix to add (if not already there) suffix (str | None): Suffix to add (if not already there) normalize (callable | None): Optional function to apply to `text` Returns: (str | None): `text' guaranteed starting with `prefix` and ending with `suffix` """ if text is not None: if normalize: text = normalize(text) if prefix and not text.startswith(prefix): text = prefix + text if suffix and not text.endswith(suffix): text = text + suffix return text
2ebd8d734ae3cd641212ae173e7e804e359ffffa
245,057
from typing import Dict from typing import Any from typing import List def get_attribute_details_from_given_lists(source_dict: Dict[str, Any], key: str, lists: List[str]) -> str: """ Retrieve attribute details from given lists of various properties :param source_dict: The source dictionary from which attributes are to be fetched :param key: The attribute to be fetched from the lists :param lists: The lists from which the attribute value is to be fetched :return: value: value of the requested attribute """ value = '' for single_list in source_dict and lists: if key in source_dict.get(single_list, []): value += f'{single_list.capitalize()}: {source_dict.get(single_list, []).get(key, "")}\n' return value
6ddc0d6a0d6f14bfe2f7cc8ef6e17fed98ae6c75
343,499
def words_to_indices(sentence, worddict): """ Transform the words in a sentence to integer indices. Args: sentence: A list of words that must be transformed to indices. worddict: A dictionary associating words to indices. Returns: A list of indices. """ # Include the beggining of sentence token at the start of the sentence. indices = [worddict["_BOS_"]] for word in sentence: if word in worddict: index = worddict[word] else: # Words absent from 'worddict' are treated as a special # out-of-vocabulary word (OOV). index = worddict['_OOV_'] indices.append(index) # Add the end of sentence token at the end of the sentence. indices.append(worddict["_EOS_"]) return indices
17f7722d7c1f6332bbe1b729d51d330ebd6e6933
377,492
def flip_boxes(boxes, im_width): """Flip boxes horizontally.""" boxes_flipped = boxes.copy() boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1 boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1 return boxes_flipped
14a384e55e521f4162e0e69c7d946fdf434c149a
406,341
def format_link_header(link_header_data): """Return a string ready to be used in a Link: header.""" links = ['<{0}>; rel="{1}"'.format(data['link'], data['rel']) for data in link_header_data] return ', '.join(links)
9a68ff381d51e6e10fe257d2d2d6766295ffc050
708,695
def spancmb(class_=None, **kwargs): """ span combinator because class is a reserved keyword in python, class_ is the first arg kwargs keys may be any html global attribute """ cdict = {'class': class_} # put class first (sign the siren song or python preserving key order) cdict.update(kwargs) content = ' '.join(f'{key}="{value}"' for key, value in cdict.items() if value is not None) def spantag(text): return f'<span {content}>{text}</span>' return spantag
6a524e035781052ff92b61a35c71faff2bd54b93
657,252
def _data_range(data: str) -> bool: """ Checks if a string data are a numbers in range. """ return True if data.find('-') != -1 else False
16ba9bade3ec711c59e33f0410d6b48eda4963e6
632,212
def get_snr(catalog): """ This utility computes the S/N for each object in the catalog. It does not impose any cuts and returns NaNs for invalid S/N values. """ snr = catalog['iflux_cmodel'] / catalog['iflux_cmodel_err'] return snr
47bff66a244799f154bd87cdb24adcaf92751be3
198,503
def get_json_field(json_obj, attribute_name, json_filename=None, suppress_err_msg=False): """Retrieves a value from a JSON object (dict). Args: json_obj (dict): The JSON file to retrieve a value from. attribute_name (str): The attribute to look up. If there are nested structures expressed within the attribute_name, they should be separated by periods. Consequently, attribute names and nested names cannot contain periods. json_filename (Optional[str]): The name of the JSON file the attribute is read from. This will be included in error messages. Default is `None`. suppress_err_msg (Optional[bool]): Specifies whether an error message will be printed to stdout if an error condition is met. By default, error messages will be printed. Raises: KeyError: If the specified `attribute_name` does not exist in the JSON. """ while '.' in attribute_name: attrib_as_list = attribute_name.split('.') attrib_name_single = attrib_as_list.pop(0) try: json_obj = json_obj[attrib_name_single] except KeyError: if not suppress_err_msg: error_suffix = '.' if json_filename is not None: error_suffix = " '%s'." % json_filename print("Attribute '%s' not found in preferences file%s" % (attrib_name_single, error_suffix)) raise attribute_name = '.'.join(attrib_as_list) #no period included for len 1 return json_obj[attribute_name]
220078ee53faf2e1cb97548c78b1dd4ba91eb6d7
323,698
def filter_by_startswith(seq, chars): """A generator that filters ``seq`` such that every element in ``seq`` begins with ``chars``. """ return (elem for elem in seq if elem.startswith(chars))
cafb1b8da41ec5736b6bdd149bb157877184721d
377,123
from typing import List from typing import Callable from typing import Tuple def classify_annotations( confidences: List[float], condition: Callable[..., bool], class_if_true: str, class_if_false: str ) -> Tuple[List[str], int, int]: """ Classifies annotations based on a condition. :param confidences: A list of confidences. :param condition: A function that takes a confidence and returns a boolean. :param class_if_true: The class to assign to the annotations that satisfy the condition. :param class_if_false: The class to assign to the annotations that do not satisfy the condition. :return: A tuple containing: - A list of classifications. - The number of positive classifications. - The number of negative classifications. """ classifications = [] num_positive_classifications = 0 num_negative_classifications = 0 for confidence in confidences: if condition(confidence): classification = class_if_true num_positive_classifications += 1 else: classification = class_if_false num_negative_classifications += 1 classifications.append(classification) return classifications, num_positive_classifications, num_negative_classifications
c8214cc7c5ab19340ce4353a8f55d8580a2b7738
265,917
import filelock import pickle def pickle_read(filename): """Read pickle file contents with a lock.""" lock = filelock.FileLock(filename+'.lck',timeout=20) pickle_data = None with lock: pickle_data = pickle.load(open(filename, 'rb')) return pickle_data
e6bb14a849ff0c8cb524ce7fce4311455d087a11
582,300
def close_enough(val_1, val_2): """Quick function to determine if val_1 ~= val_2""" return abs(val_1 - val_2) < 0.001
06593406d9c4d47ef1b20a0bcc43a36beea38c4c
282,965
def load_file(file_name: str, modes='r'): """ Load a file. :param file_name: file to load. :param modes: File open modes. Defaults to 'r' :return: contents of file. """ with open(file_name, modes) as f: return f.read()
4800e0531071d1392c432e6801771f4de8414b11
288,847
def weighted_centroid(x, y, density): """ Computes weighted centroid of a bin -- eq 4 of Cappellari & Copin (2003). INPUTS x : x-coordinate of pixels in bin y : y-coordinate of pixels in bin density : pixel weights """ mass = density.sum() xbar = (x*density).sum()/mass ybar = (y*density).sum()/mass return xbar, ybar
b613b881eec2fc326c549854d6d5cf4bf901cdd4
544,581
def moffat(coords, y0, x0, amplitude, alpha, beta=1.5): """Moffat Function Symmetric 2D Moffat function: .. math:: A (1+\frac{(x-x0)^2+(y-y0)^2}{\alpha^2})^{-\beta} """ Y,X = coords return (amplitude*(1+((X-x0)**2+(Y-y0)**2)/alpha**2)**-beta)
7a5138de271a266d711c6d9b354a81ef06479b56
489,482
from datetime import datetime def generate_birthdays(birthdays: list, year_to_generate: int): """ generate birthdays from lists :param birthdays: :param year_to_generate: how many year from this year to add to the event :return: """ this_year = datetime.now().year event_list = [] for birthday in birthdays: for year in range(this_year, this_year + year_to_generate): date = birthday.in_year(year) event_list.append([birthday, date]) return event_list
158c68da9f171904f76d2104dd6a6035a6bf7d39
37,669
def _get_usb_hub_map(device_info_list): """Creates a map of usb hub addresses to device_infos by port. Args: device_info_list (list): list of known usb_connections dicts. Returns: dict: map of usb hub addresses to device_infos by port """ map_usb_hub_ports = {} for device_info in device_info_list: hub_address = device_info['usb_hub_address'] port = device_info['usb_hub_port'] if hub_address: if hub_address not in map_usb_hub_ports: map_usb_hub_ports[hub_address] = {} if not map_usb_hub_ports[hub_address].get( port) or device_info['ftdi_interface'] == 2: map_usb_hub_ports[hub_address][port] = device_info return map_usb_hub_ports
eaadc4713a41fdf38cea4fce35806d1d8772df27
703,943
def is_akamai_domain(domain): """ Is the provided domain within akamai.net? """ return domain.endswith(".akamai.net")
d68af4119614f470738ead3b15db085eaeeda359
107,370
def _GenerateAdditionalProperties(values_dict): """Format values_dict into additionalProperties-style dict.""" return { 'additionalProperties': [ {'key': key, 'value': value} for key, value in sorted(values_dict.items()) ]}
fcd3e7c2a4f27ee177136ac75ec4704a367dcbc1
549,781
def _remove_batch(tensor): """ Return a tensor with size tensor.size()[1:] :param tensor: 3D or 4D tensor :return: 2D or 3D tensor """ return tensor.view(tensor.size()[1:])
0fb8b56e0507e5f51e98827d5d97cb3b70e66549
325,544
def get_vehicle_angle(rect_angle): """ Returns angle needed to be turned to align perpendicularly to long side of object """ angle = 90 - abs(rect_angle) if rect_angle >= -90 else 90 - abs(rect_angle) return (angle + 90) % 360
8d3a9bb7a348e6fc952e0edbaa2c9aa188205869
192,165
def calcBayes(priorA, probBifA, probB): """priorA: initial estimate of probability of A independent of B priorBifA: est. of probability of B assuming A is true priorBifNotA: est. of probability of B returns probability of A given B""" return priorA*probBifA/probB
a02131484b9b66263fb58aeba8521fb20e50b2ad
171,894
def game_status(game_state): """return if a given game is complete, based off game_state""" for score in game_state.scores: if score > 100: return True return False
a9ca63d82ce8ebd16f5c650a5f176b048dba2df0
152,530
def _rectify_countdown_or_bool(count_or_bool): """ used by recursive functions to specify which level to turn a bool on in counting down yields True, True, ..., False counting up yields False, False, False, ... True Args: count_or_bool (bool or int): if positive and an integer, it will count down, otherwise it will remain the same. Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> a8 = (_rectify_countdown_or_bool(None)) >>> result = [a1, a2, a3, a4, a5, a6, a7, a8] >>> print(result) [1, 0, 0, -1, -2, True, False, False] """ if count_or_bool is True or count_or_bool is False: count_or_bool_ = count_or_bool elif isinstance(count_or_bool, int): if count_or_bool == 0: return 0 elif count_or_bool > 0: count_or_bool_ = count_or_bool - 1 else: # We dont countup negatives anymore count_or_bool_ = count_or_bool else: count_or_bool_ = False return count_or_bool_
ad8edadbe3d0f212629be005eaf22d7ebc6ea3a0
521,430
def merge_arg(cmd_arg, ini_arg): """ Merge command line argument and configure file argument. The cmd_args has higher priority than ini_arg. Only none-empty argument will be considered. """ if isinstance(cmd_arg, (list, tuple)): cmd = cmd_arg[0] return cmd if cmd else ini_arg else: return cmd_arg if cmd_arg else ini_arg
f896463fad7a00096e9a1e3b730ad96614a6e966
23,199
def get_dict_key_by_value(val, dic): """ Return the first appeared key of a dictionary by given value. Args: val (Any): Value of the key. dic (dict): Dictionary to be checked. Returns: Any, key of the given value. """ for d_key, d_val in dic.items(): if d_val == val: return d_key return None
d01522a61d7a0549ed54bfcb620da10857d67ae7
703,212
import six def string_repr(text, show_quotes=True): """ Prints the repr of a string. Eliminates the leading 'b' in the repr in Python 3. Optionally can show or include quotes. """ if six.PY3 and isinstance(text, six.binary_type): # Skip leading 'b' at the beginning of repr output = repr(text)[1:] else: output = repr(text) if show_quotes: return output else: return output[1:-1]
c1a0bea9965418a0ca97e09de7436b73c6659c56
372,554
def make_safe_for_html(html): """Turn the text `html` into a real HTML string.""" html = html.replace("&", "&amp;") html = html.replace(" ", "&nbsp;") html = html.replace("<", "&lt;") html = html.replace("\n", "<br>") return html
18feac5e745bc86a51829bd7b1a7c4d73c15f1c9
623,356
from bs4 import BeautifulSoup import re import html def __getCompanyMetadata(parsed: BeautifulSoup) -> dict: """Function to extract company Standard Industrial Classification (SIC) code, SIC type (i.e. description), company location, state of incorporation, and the end of its fiscal year. Searches the raw HTML of the company identification section of the page using regular expressions. Arguments: parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing. Returns: dict -- Company metadata with keys `sic`, `sic_type`, `location`, `incorporation_state`, and `fiscal_year_end`. """ # Company metadata container metadata_container = parsed.find('p', class_='identInfo') # String representation of HTML (used in RegEx) metadata_str = str(metadata_container) # Dictionary for company metadata company_metadata = dict() # RegEx for extracting SIC and SIC type sic_re = re.compile(r'SIC.+?:.+?(\d+?)<\/a> -(.+?)<br') # Getting SIC and SIC type match sic_matches = sic_re.findall(metadata_str) # Saving SIC and stripped, HTML-parsed SIC type company_metadata['sic'] = sic_matches[0][0] company_metadata['sic_type'] = html.unescape(sic_matches[0][1]).strip() # RegEx for extracting company location (state) location_re = re.compile(r'State location:.+?>(\w+?)<\/a>') # Getting company location location_matches = location_re.findall(metadata_str) # Saving company location company_metadata['location'] = location_matches[0].strip() # RegEx for extracting state of incorporation incorp_state_re = re.compile(r'State of Inc\.:.+?>(\w+?)<\/strong>') # Getting state of incorporation incorp_match = incorp_state_re.findall(metadata_str)[0] # Saving state of incorporation company_metadata['incorporation_state'] = incorp_match.strip() # RegEx for extracting end of fiscal year fiscal_year_re = re.compile(r'Fiscal Year End:.+?(\d{4})') # Getting end of fiscal year fiscal_year_match = fiscal_year_re.findall(metadata_str)[0] # Saving end of fiscal year (in mm-dd format) fy_formatted = fiscal_year_match[0:2] + '-' + fiscal_year_match[2:] company_metadata['fiscal_year_end'] = fy_formatted return company_metadata
a9efbed062f8e6f9f43ba46d6753096df3c43e08
692,199
def api_token_error(context, request): """Handle an expected/deliberately thrown API exception.""" request.response.status_code = context.status_code resp = {'error': context.type} if context.message: resp['error_description'] = context.message return resp
d72c71885182f9ecb56efbfddf11f2c474be261b
386,339
def split_procs(procs, valid_frac = .2): """Split the procedures into a training and validation set based on start time """ procs.sort(key = lambda x: x.get_start_time()) split_ind = int((1-valid_frac) *len(procs)) training = procs[:split_ind] validation = procs[split_ind:] return (training, validation)
e28f5679631bbeeb9efa9b657d57e156f02d7625
111,275
def get_header(img): """ Gets the header of a NIFTI image Attributes ---------- :param img: NIFTI image :type img: Image :return: Image header :rtype: nibabel.nifti1.Nifti1Header""" return img.header
d3aa8dcc23d806963f0527328e4f9dac9dba9f19
290,696
import json def json_to_obj(json_str: str): """ Transforms the json string into tree made up of dictionaries and list. :param json_str: The json string to transform :return: the dict or list """ return json.loads(json_str)
cebf9eacd8c55d80290ed06036bdc754f01c4dbb
468,655
import importlib def _on_import_factory(module, raise_errors=True): """Factory to create an import hook for the provided module name""" def on_import(hook): # Import and patch module path = 'ddtrace.contrib.%s' % module imported_module = importlib.import_module(path) imported_module.patch() return on_import
06c06429559ab3f64a5b0b3a0b2a6f960452ad23
424,386
def ps_weight_timemean(field, ps): """ This takes the surface pressure time mean of atmos_fields input: field xr.DataArray or xr.Dataset ps surface pressure field with the same dimensions are field, it does not need the verical coordinates return same structure are field but time averaged """ return (field * ps).mean('time') /ps.mean('time')
a33d3eb7cdece49e8e6ff41c6dff46bf40a4deb4
690,239
def substring_search(word, collection): """Finds all matches in the `collection` for the specified `word`. If `word` is empty, returns all items in `collection`. :type word: str :param word: The substring to search for. :type collection: collection, usually a list :param collection: A collection of words to match. :rtype: list of strings :return: A sorted list of matching words from collection. """ return [item for item in sorted(collection) if item.startswith(word)]
dd200f02a7fe9ed09f01ad267c498d6bf32f6024
57,031
def dict_of_str(json_dict): """Given a dictionary; return a new dictionary with all items as strings. Args: json_dict(dict): Input JSON dictionary. Returns: A Python dictionary with the contents of the JSON object as strings. """ result = {} for key, value in json_dict.items(): result[key] = '{}'.format(value) return result
9fa004a38ff9d8508c914c5eb6d2a468dfd77708
363,483
import pickle def unpickle_file(picklefile, **kwargs): """ Load data from `picklefile` with Python's :mod:`pickle` module. :param picklefile: either target file path as string or file handle :param kwargs: further parameters passed to :func:`pickle.load` :return: data stored in `picklefile` """ if isinstance(picklefile, str): with open(picklefile, 'rb') as f: return pickle.load(f, **kwargs) else: return pickle.load(picklefile, **kwargs)
793267e872784c25558959e72aeb8014294772a6
389,748
def dsr_pb(D_eq): """ Pruppacher and Beard drop shape relationship function. Arguments: D_eq: Drop volume-equivalent diameter (mm) Returns: r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class expects horizontal to vertical, so you should pass 1/dsr_pb """ return 1.03 - 0.062*D_eq
cb0980733d3c498f2d7ec5e31a530e75f0585ffc
241,148
def tidy_passphrase(passphrase: str) -> str: """ Perform any string processing we want to apply to the entered passphrase. Currently we strip leading/trailing whitespace. """ return passphrase.strip()
3d43caa99a9ffbab03e85b862993d4b07f81f239
416,016
def is_comment(string): """ Find out if line is a comment or line of code :param string: line to analyze :return: True if line is a comment """ chars = list(string) if len(chars) <= 0: return True if chars[0] == "#": return True else: return False
2a6c761130bf608d7363334d95257d8d73332680
655,440
def contains_memory(memoryrange, memory): """Returns True if the given `memory` is in `memoryrange`, False if not.""" for field, value in memoryrange.lower.ListFields(): if getattr(memory, field.name) < value: return False for field, value in memoryrange.upper.ListFields(): if getattr(memory, field.name) > value: return False return True
91133402ee4e150fde8bfa85e4c4a84363ef9c20
624,608
import pickle def load_detectron_predictions(detectron_root): """Load detectron predictions from root directort. Args: detectron_root (Path): Points to a directory which contains a subdirectory for each sequence, which in turn contains a .pickle file for each frame in the sequence. Returns: predictions (dict): Map sequence to dict mapping from frame name to a dictionary containining keys 'boxes', and 'segmentations'. """ predictions = {} for sequence_path in detectron_root.iterdir(): if not sequence_path.is_dir(): continue sequence = sequence_path.stem predictions[sequence] = {} for detectron_path in sequence_path.glob('*.pickle'): with open(detectron_path, 'rb') as f: frame_data = pickle.load(f) if frame_data['segmentations'] is None: frame_data['segmentations'] = [ [] for _ in range(len(frame_data['boxes'])) ] frame_name = detectron_path.stem predictions[sequence][frame_name] = { 'boxes': [], 'segmentations': [] } for c in range(len(frame_data['boxes'])): predictions[sequence][frame_name]['boxes'].extend( frame_data['boxes'][c]) predictions[sequence][frame_name]['segmentations'].extend( frame_data['segmentations'][c]) return predictions
bc9c999aeab998bdb8de49343e770b5b7a7b99ed
668,046
from typing import Dict def invert_dict(d: Dict) -> Dict: """ Inverts the key:value relation of a dictionary. Parameters ---------- d : dict dictionary to be inverted Returns ------- dict inverted dict """ return {v: k for k, v in d.items()}
6ca7b0d0cb647025f2ac1383e6a5247dcd1310ec
139,694
import re def get_partition_separator(name): """Get partition separator based on device name, returns str.""" separator = '' if re.search(r'(loop|mmc|nvme)', name, re.IGNORECASE): separator = 'p' return separator
9f4a44e4c1b0c0654a4d19440d27421559530395
513,372
from re import sub def remove_multiple_spaces(string): """ Strips and removes multiple spaces in a string. :param str string: the string to remove the spaces from :return: a new string without multiple spaces and stripped """ return sub(" +", " ", string.strip())
6d65985fb6b3adddff5e8e6b06d0ae94e6dd5e3a
511,304
def diff_dict(old, new): """ Compare two dicts and create dict (set_) with new or changed values/keys or a list (remove_) of missing keys. """ set_ = {} remove_ = [] for k in new.keys(): if k in old: # key exists in both if new[k] != old[k]: # but the value changed set_[k] = new[k] else: # something new appeared in new dict set_[k] = new[k] for k in old.keys(): if k not in new: # key from old which was not found in new remove_.append(k) # the remove array is a list of identifiers return set_, remove_
dc1358060d475ed8fa6ba76566359b71f2d58caa
348,755
def renormalize(values, old_min, old_max, new_min, new_max): """ Transforms values in range [old_min, old_max] to values in range [new_min, new_max] """ return (new_max - new_min) * (values - old_min) / (old_max - old_min) + new_min
7ac87a8962379b68224ecf4ac8ff939090321d51
504,269
def outsidein(d): """Transform a dictionary of lists to a list of dictionaries.""" ds = [] keys = d.keys() for key in keys: d[key] = list(d[key]) for i in range(len(d.values()[0])): ds.append(dict([(k, d[k][i]) for k in keys])) return ds
3df73ad47cf1aec8b01a3c539ccddfe61c9a29ec
235,109
import torch def Conv2dGroup( in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, padding: int = 0, bias: bool = True, num_groups=1, **kwargs, ): """A 2D convolution followed by a group norm and ReLU activation.""" return torch.nn.Sequential( torch.nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, ), torch.nn.GroupNorm(num_groups, out_channels), torch.nn.ReLU(inplace=True), )
eb723c3673a263c573022681cc5b30018c8647b6
80,836
def startswith(string, prefixes): """Checks if str starts with any of the strings in the prefixes tuple. Returns the first string in prefixes that matches. If there is no match, the function returns None. """ if isinstance(prefixes, tuple): for prefix in prefixes: if string.startswith(prefix): return prefix return None elif isinstance(prefixes, str): prefix = prefixes if string.startswith(prefix): return prefix return None else: raise Exception('Second argument must be string or a tuple of strings.')
9a507aa2c4da6cb187023e5be28084b7b915feba
608,418
def get_gs_distortion(dict_energies: dict): """Calculates energy difference between Unperturbed structure and most favourable distortion. Returns energy drop of the ground-state relative to Unperturbed (in eV) and the BDM distortion that lead to ground-state. Args: dict_energies (dict): Dictionary matching distortion to final energy, as produced by organize_data() Returns: (energy_difference, BDM_ground_state_distortion) """ if len(dict_energies['distortions']) == 1: energy_diff = dict_energies['distortions']['rattled'] - dict_energies['Unperturbed'] if energy_diff < 0 : gs_distortion = 'rattled' #just rattle (no BDM) else: gs_distortion = "Unperturbed" else: lowest_E_RBDM = min(dict_energies['distortions'].values()) #lowest E obtained with RBDM energy_diff = lowest_E_RBDM - dict_energies['Unperturbed'] if lowest_E_RBDM < dict_energies['Unperturbed'] : #if energy lower that with Unperturbed gs_distortion = list(dict_energies['distortions'].keys())[list(dict_energies['distortions'].values()).index( lowest_E_RBDM )] #BDM distortion that lead to ground-state else: gs_distortion = "Unperturbed" return energy_diff, gs_distortion
2f23103ccac8e801cb6c2c4aff1fb4fc08341e78
4,300
def find_disconnected(model): """ Return metabolites that are not in any of the reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ return [met for met in model.metabolites if len(met.reactions) == 0]
19c606affff99c01c47522d6903b6f8008cee8c6
42,182
import logging def log_level(level_string): """ Return a log level for a string """ return getattr(logging, level_string.upper())
14acbe0701a184247484db226cd36c56d60f7c1f
477,836