content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import pathlib def tmp_path(tmp_path): """Always present a pathlib's Path. This is to avoid pytest using pythonlib2 in Python 3.5, which leads to several slight differences in the tests. This "middle layer fixture" has the same name of the pytest's fixture, so when we drop Py 3.5 support we will be able to just remove this, and all the tests automatically will use the standard one (no further changes needed). """ return pathlib.Path(str(tmp_path))
77d0dc21b52b3737ea20df11201214df7d40cc5f
30,946
def transfer(item, from_collection, to_collection, n=1): """Move an item from one dictionary of objects to another. Returns True if the item was successfully transferred, False if it's not in from_collection. For example, to have the player pick up a pointy stick:: if transfer('pointy stick', tile.contents, player.inventory): print("You take the stick.") else: print("There's no stick here.") """ if from_collection.get(item, 0) < n: return False from_collection[item] -= n if item in to_collection: to_collection[item] += n else: to_collection[item] = n return True
fafa36344f443ef8e7e5741b5664e626b346512d
30,947
def ad(df, high, low, close, volume, ad): """ The Accumulation/Distribution Line is similar to the On Balance Volume (OBV), which sums the volume times +1/-1 based on whether the close is higher than the previous close. The Accumulation/Distribution indicator, however multiplies the volume by the close location value (CLV). The CLV is based on the movement of the issue within a single bar and can be +1, -1 or zero. Parameters: df (pd.DataFrame): DataFrame which contain the asset information. high (string): the column name for the period highest price of the asset. low (string): the column name for the period lowest price of the asset. close (string): the column name for the closing price of the asset. volume (string): the column name for the volume of the asset. ad (string): the column name for the ad values. Returns: df (pd.DataFrame): Dataframe with ad of the asset calculated. """ money_flow_multiplier = ( (df[close] - df[low]) - (df[high] - df[close]) ) / (df[high] - df[low]) df[ad + "_money_flow_volume"] = money_flow_multiplier * df[volume] prev_ad = df.loc[0, ad + "_money_flow_volume"] df.loc[0, ad] = prev_ad ads = [0.0] for row in df.loc[1:, [ad + "_money_flow_volume"]].itertuples(index=False): ads.append(prev_ad + row[0]) prev_ad = ads[-1] df = df.fillna(0) df[ad] += ads df.drop([ad + "_money_flow_volume"], axis=1, inplace=True) return df
4d7480cf7c78ee874404efadee7ee742e6655ac8
30,954
def has_decreased(scores, in_last): """Return True iff the score in the last `in_last` descended.""" if in_last >= len(scores): return True last = scores[-(in_last + 1)] for score in scores[-in_last:]: if score < last: return True last = score return False
472f01b5573ecf965ab5b7c239904ac113cc0b67
30,962
def orderlex(l1, l2, n1, n2): """ lexicographic order on infinite words, returns : 0 if the n1-th shift of l1^infty is smaller than the n2-th shift of l2^infty, 1 if it is larger, and 2 if the two words coincide (this can be determined by looking only length(l1)+length(l2) letters since u^infty = v^infty iff uv = vu). """ i = 0 while( (l1[(i+n1)%len(l1)] == l2[(i+n2)%len(l2)]) & (i <= (len(l1)+len(l2))) ): i = i+1 if l1[(i+n1)%len(l1)] < l2[(i+n2)%len(l2)]: return 0 else: if l1[(i+n1)%len(l1)] > l2[(i+n2)%len(l2)]: return 1 else: return 2
06871fc13d8af63b24ce494c1e017595a6853403
30,965
import json def get_swiftlm_uptime_mon_data(uptime_stats_file, logger, openr=open): """retrieve values from a swiftlm uptime mon cache file :params cache_file: cache file to retrieve items from. :params openr: open to use [for unittests] :return: dict of cache items and their values or none if not found """ try: with openr(uptime_stats_file, 'r') as f: return json.load(f) except IOError: logger.exception('Error reading swiftlm uptime mon cache file') except ValueError: logger.exception('Error parsing swiftlm uptime mon cache file')
2d0a6ff07cafd4314b26fac3f6c6c40eb95ee345
30,968
import base64 def b64decode(data: str, urlsafe: bool = False) -> str: """Accepts a string and returns the Base64 decoded representation of this string. `urlsafe=True` decodes urlsafe base64 """ if urlsafe: b64 = base64.urlsafe_b64decode(data.encode("ascii")) return b64.decode("ascii") b64 = base64.b64decode(data.encode("ascii")) return b64.decode("ascii")
3345bc380f75b5e195dd825f6ad5284d5382e7d6
30,971
def func(a: int, b: int) -> int: """An example of type annotation in a function """ return a + b
31c3ec1ffc27c18d02a2b92f083b2d47675423c5
30,972
def max_depth(root): """Figure out what is maximum depth of a given binary tree is.""" if root is None: return 0 return max(max_depth(root.left), max_depth(root.right)) + 1
89acbb44f5221871acd5f9875a8fd2168ea0661c
30,975
def get_bit_positions(bit_mask): """Return an array of positions for each enabled bit in bit_mask.""" bit_positions = [] # find bit positions of enabled bits in mask for i in range(16): if (bit_mask & (1 << i)) != 0: bit_positions.append(i) return bit_positions
d18de611cbc17f3c6d5cf0797d31976c654c0fea
30,979
def tonick(user_name): """Convert a Stack user name into an IRC nick name by stripping whitespace. Also roundtrips it through raw_unicode_escape to handle nicks in the user list (which use embedded \\uXXXX sequences for non-ASCII characters). For some reason nicks in MessagePosted events use the actual utf-8 characters, which are unharmed by this. """ return (user_name .encode('raw_unicode_escape') .decode('raw_unicode_escape') .replace(' ', ''))
125ec9b879f8921deca4ae23735de6ba2e844a0c
30,988
def convert_rag_text(dca_rating: str) -> str: """Converts RAG name into a acronym""" if dca_rating == "Green": return "G" elif dca_rating == "Amber/Green": return "A/G" elif dca_rating == "Amber": return "A" elif dca_rating == "Amber/Red": return "A/R" elif dca_rating == "Red": return "R" else: return ""
d22c8186b2e03c62f358e1e23e96614819aab9e0
30,995
def process_max_frames_arg(max_frames_arg): """Handle maxFrames arg in vidstab.__main__ Convert negative values to inf :param max_frames_arg: maxFrames arg in vidstab.__main__ :return: max_frames as is or inf >>> process_max_frames_arg(-1) inf >>> process_max_frames_arg(1) 1 """ if max_frames_arg > 0: return max_frames_arg return float('inf')
8b4756804688516b828fbd082fb2b9be7b6d9f52
30,998
from typing import List def compute_bootstrapped_returns(rewards: List, values: List, gamma: float = 0.95) -> List: """ Compute bootstrapped rewards-to-go. It's assumed the last state is the terminal state, so V(s_T) = 0. q(s_t, a_t) = r(s_t, a_t) + V(s_t+1) * (1 - done) :param rewards: :param values: :param gamma: :return: """ returns = [] for step in range(len(rewards) - 1): q_t = rewards[step] + gamma * values[step + 1] returns.append(q_t) returns.append(rewards[-1]) # terminal state -> V(s_T) = 0 return returns
fde4eafc51fc64cd00245dfb2208c08697044f3e
31,007
def dirac(x, y, tol=2e-2, bounds=None): """Creates a Parameters ---------- x : int, float Horizontal position of the Dirac. y : int, float Vertical position of the Dirac. tol : float, optional Small offset around the position to avoid completely null Dirac if no vertex falls on the precise position. bounds : list[int or float, int or float, int or float, int or float] or None, optional Clamping bounds for the Dirac, if necessary. """ xmin, xmax = x - tol, x + tol ymin, ymax = y - tol, y + tol if bounds is not None: xmin = max(xmin, bounds[0]) xmax = min(xmax, bounds[1]) ymin = max(ymin, bounds[2]) ymax = min(ymax, bounds[3]) return lambda x,y: 1. if (xmin <= x <= xmax and ymin <= y <= ymax) else 0.
fd5de2102ef3bff24e5eab1d79beae2fbe8414bb
31,008
def chunks_in_region_file(filename): """Return the number of chunks generated in a region file.""" chunks = 0 with open(filename, 'rb') as f: for i in range(1024): entry = f.read(4) if entry[-1] != 0: chunks += 1 return chunks
f53578b084ca6006b477937a86285b956293f127
31,009
import re def get_image_name(url: str) -> str: """ 获取图片名称 :param url: 图片链接 :return: 图片名称 >>> get_image_name('http://fm.shiyunjj.com/2018/1562/5idk.jpg') '5idk.jpg' """ return re.split(r'/', url)[-1]
9b621da34680abe259a8d45ccf9b0ee055e7a628
31,011
import requests async def get_url_async(url, **kwargs): """ Async variant of above get_url() method """ return requests.get(url, **kwargs)
3f3398760705f25bcc05a5d971d5d907e5cc07af
31,019
from typing import Dict def invert_val_mapping(val_mapping: Dict) -> Dict: """Inverts the value mapping dictionary for allowed parameter values""" return {v: k for k, v in val_mapping.items()}
92971f233276c316e0a28805a303c9457793c646
31,022
def _iterable_to_varargs_method(func): """decorator to convert a method taking a iterable to a *args one""" def wrapped(self, *args, **kwargs): return func(self, args, **kwargs) return wrapped
5085f564cd67259231860e11674749be18429ae3
31,023
def lowersorted(xs): """Sort case-insentitively.""" return sorted(xs, key=lambda x: x[0].lower())
aad12c0d3cceca8fd4b32b08b652858ee897a125
31,025
from typing import Mapping import click def format_verbose_output(filename: str, print_statements: Mapping[int, str]) -> str: """Return the formatted output used when the `--verbose` flag is provided. Args: filename: Name of the file currently being checked. print_statements: Mapping of line number where the print statement is present to the code representation of that print statement. Returns: Formatted output or an empty string if there are no print statements. """ if len(print_statements) == 0: return "" result = [click.style(filename, fg="blue")] for start, statement in print_statements.items(): for lineno, line in enumerate(statement.splitlines(), start=start): result.append( f" {click.style(lineno, dim=True)} {click.style(line, bold=True)}" ) return "\n".join(result)
91b635db0e9da6ff377c9e3d41f5d544bc0314a8
31,028
import pickle def read_model(pickle_file_name): """Reads model from Pickle file. :param pickle_file_name: Path to input file. :return: model_object: Instance of `xgboost.XGBClassifier`. """ pickle_file_handle = open(pickle_file_name, 'rb') model_object = pickle.load(pickle_file_handle) pickle_file_handle.close() return model_object
b73ade8de86e61eda23837b560e5691e0537f387
31,034
def _build_arguments(keyword_args): """ Builds a dictionary of function arguments appropriate to the index to be computed. :param dict keyword_args: :return: dictionary of arguments keyed with names expected by the corresponding index computation function """ function_arguments = {"data_start_year": keyword_args["data_start_year"]} if keyword_args["index"] in ["spi", "spei"]: function_arguments["scale"] = keyword_args["scale"] function_arguments["distribution"] = keyword_args["distribution"] function_arguments["calibration_year_initial"] = \ keyword_args["calibration_start_year"] function_arguments["calibration_year_final"] = \ keyword_args["calibration_end_year"] function_arguments["periodicity"] = keyword_args["periodicity"] elif keyword_args["index"] == "pnp": function_arguments["scale"] = keyword_args["scale"] function_arguments["calibration_start_year"] = \ keyword_args["calibration_start_year"] function_arguments["calibration_end_year"] = \ keyword_args["calibration_end_year"] function_arguments["periodicity"] = keyword_args["periodicity"] elif keyword_args["index"] == "palmers": function_arguments["calibration_start_year"] = \ keyword_args["calibration_start_year"] function_arguments["calibration_end_year"] = \ keyword_args["calibration_end_year"] elif keyword_args["index"] != "pet": raise ValueError( "Index {index} not yet supported.".format(index=keyword_args["index"]) ) return function_arguments
5bb04062613cd554ebee1879d7a526eca6a12830
31,035
import math def entropy_term(x): """Helper function for entropy_single: calculates one term in the sum.""" if x==0: return 0.0 else: return -x*math.log2(x)
0b98662fee53ff7eb4e0e99a0eef006cc20891a9
31,037
from datetime import datetime def create_output_folder_name(suffix=None): """ Creates the name of the output folder. The name is a combination of the current date, time, and an optional suffix. :param suffix: str, folder name suffix :return: str, name of the output directory """ # Record start execution date and time now = datetime.now().strftime('%Y-%m-%d_%H-%M') # Prepare path to subdirectory within the result directory name = '_'.join([now, suffix]) if suffix else now return name
29b21944d32b56532545b68a9f415c0a597de1e3
31,040
import unicodedata def find_unicodedata_name(data: str) -> list: """查询Unicode编码中的名字 ♠ == BLACK SPADE SUIT \N{BLACK SPADE SUIT} == ♠ :param data: 字符串 :return: 字符的Unicode名字列表 """ ls = [] for i in data: ls.append(unicodedata.name(i)) return ls
b23ebd7db8f1aed60659dea1160dd6c9dfcd13c0
31,044
def grabOverlappingKmer(seq, sitei, pos=0, k=9): """Grab the kmer from seq for which it is in the pos position at sitei Return the gapped and non-gapped kmer This is a generalization of grabKmer for pos = 0 If seq[sitei] is a gap then the non-gapped kmer is None. If there are not enough non-gap AA to return before/after sitei then it returns None Parameters ---------- seq : str Sequence from which peptide will be grabbed. sitei : int Key position of the kmer (zero-based indexing) pos : int The position of the key sitei in the kmer. k : int Length of the peptide to return. Returns ------- gapped : str A k-length peptide that overlaps sitei nonGapped : str A k-length peptide that overlaps sitei If seq[sitei] is a gap then returns None. If not then all gaps are removed before taking the k-length peptide (if there aren't k AAs then return is None)""" aaRight = k - pos aaLeft = pos if seq[sitei] == '-': return None, None if (sitei + aaRight) <= len(seq) and (sitei - aaLeft) >= 0: if pos<k: rh = seq[sitei:] fullRH = rh[:aaRight] if '-' in fullRH: ngRH = rh.replace('-', '') if len(ngRH) >= aaRight: ngRH = ngRH[:aaRight] else: ngRH = None else: ngRH = fullRH else: fullRH = '' ngRH = '' if pos>0: lh = seq[:sitei] fullLH = lh[-aaLeft:] if '-' in fullLH: ngLH = lh.replace('-', '') if len(ngLH) >= aaLeft: ngLH = ngLH[-aaLeft:] else: ngLH = None else: ngLH = fullLH else: fullLH = '' ngLH = '' full = fullLH + fullRH #print aaLeft,fullLH,",", aaRight,fullRH if ngLH is None or ngRH is None: ng = None else: ng = ngLH + ngRH return full, ng else: return None, None
378866da9dc9af0898a3a07a0aa3ac3e6988bb36
31,049
def generate_alias(tbl): """Generate a table alias, consisting of all upper-case letters in the table name, or, if there are no upper-case letters, the first letter + all letters preceded by _ param tbl - unescaped name of the table to alias """ return "".join( [l for l in tbl if l.isupper()] or [l for l, prev in zip(tbl, "_" + tbl) if prev == "_" and l != "_"] )
535cc686e7feb561a61ff8780dd9df84635e7c00
31,057
def get_one_element(singleton): """When singleton is not empty, return an element from it.""" for e in singleton: return e
49f43742036e5462a74febde93b5b30e4e5f97bc
31,059
import re def getFilename(resp,url): """ tries to figure out the filename by either looking at the response header for content-disposition, or by extracting the last segment of the URL """ filename = '' if "Content-Disposition" in resp.headers.keys(): if 'filename' in resp.headers["Content-Disposition"]: filename = re.findall("filename=(.+)", resp.headers["Content-Disposition"])[0] else: filename = url.split("/")[-1] else: filename = url.split("/")[-1] return filename
77529d910ff4585cd755f8cce872a40683e1bde1
31,065
import socket def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM): """Returns an unused port that should be suitable for binding. This is achieved by creating a temporary socket with the same family and type as the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to the specified host address (defaults to 0.0.0.0) with the port set to 0, eliciting an unused ephemeral port from the OS. The temporary socket is then closed and deleted, and the ephemeral port is returned. """ with socket.socket(family, socktype) as tempsock: tempsock.bind(('', 0)) port = tempsock.getsockname()[1] del tempsock return port
3cf82aa209832a9ba3c3b7e0c994f8e234ef1d14
31,066
def beautify_message(message): """Make the message more readable by removing some SHACL-specific formatting.""" if message.startswith("Less than 1 values on"): index = message.find("->") + 2 message = "Less than 1 value on " + message[index:] return message
54fbef5106267a7d519b821e65ce511847c88244
31,073
def different_utr5_boundary(transcript1, transcript2): """Check if two transcripts have different UTR5 exon boundaries""" for i in range(len(transcript1.utr5_exons)): exon1 = transcript1.utr5_exons[i] exon2 = transcript2.utr5_exons[i] if exon1[0] != exon2[0] or exon1[1] != exon2[1]: return True return False
e43cb5cb056869218da5ce2be18c74f3d7b7b91d
31,084
from typing import List from typing import Dict def data_splits_from_folds(folds: List[str]) -> List[Dict[str, List[str]]]: """ Create data splits by using Leave One Out Cross Validation strategy. folds is a list of dataset partitions created during pre-processing. For example, for 5-fold cross val: ["fold00", "fold01", ..., "fold04"]. This function will create k test, validation, and train splits using these folds. Each fold is successively treated as test split, the next split as validation, and the remaining as train. Folds will be sorted before applying the above strategy. With 5-fold, for example, we would have: test=fold00, val=fold01, train=fold02..04, test=fold01, val=fold02, train=fold03,04,01 ... test=fold04, val=fold00, train=01..03 """ sorted_folds = tuple(sorted(folds)) assert len(sorted_folds) == len(set(sorted_folds)), "Folds are not unique" num_folds = len(sorted_folds) all_data_splits: List[Dict[str, List[str]]] = [] for fold_idx in range(num_folds): test_fold = sorted_folds[fold_idx] valid_fold = sorted_folds[(fold_idx + 1) % num_folds] train_folds = [f for f in sorted_folds if f not in (test_fold, valid_fold)] all_data_splits.append( { "train": train_folds, "valid": [valid_fold], "test": [test_fold], } ) assert not set(train_folds).intersection( {test_fold, valid_fold} ), "Train folds are not distinct from the dev and the test folds" return all_data_splits
9abb2246f56dfd96c9d04bfdc1aea3eb4fd0bfa7
31,087
from pathlib import Path from typing import Dict from typing import Optional import toml def read_pyproject_toml(working_dir: Path) -> Dict[str, Optional[str]]: """ Read project's `pyproject.toml` file Args: working_dir: CWD. Usually a root of source code Returns: Configurations described in toml file Raises: toml.TomlDecodeError: Failed to decode OSError: Failed to read a file """ pyproject_toml_path: Path = working_dir / "pyproject.toml" if not pyproject_toml_path.is_file(): return dict() pyproject_toml = toml.load(pyproject_toml_path) config_d = pyproject_toml.get("tool", {}).get("pytestdocgen", {}) config = dict() for k in config_d: config[f"--{str(k)}"] = config_d[k] return config
829265da48da5c221345816586b8ee91adc53494
31,091
def tag_contents_xpath(tag, content): """Constructs an xpath matching element with tag containing content""" content = content.lower() return '//{}[contains(translate(*,"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"{}")]'.format(tag, content)
c6bcea931afb29282b2435a73c9ef86fa4f24dc8
31,094
import random def sample_minor_versions(versions, sample): """ Pick randomly a sample from the given versions. :param versions: A list of valid semver strings. :param sample: The number of versions to choose from the available versions. """ minors = [] for version in versions: major, minor, patch = version.split('.') if minor != '0' or patch != '0': minors.append(version) random.shuffle(minors) return minors[-sample:]
88a7e0900b6380182a5e95760ba2105396f72055
31,095
def intersect(pieceA, pieceB): """ Check if two 2-len tuples have at least one element in common """ return pieceA[0] in pieceB or pieceA[1] in pieceB
2330389e6945167f58a1a75a05f1310d5db9c4dd
31,099
from functools import reduce def _extract_name_from_tags(tag_list): """ Extracts the value of the 'Name' tag of an EC2 instance. Filters all tag dictionaries to get the one where the key is 'Name', then extracts the value from it. If there's several 'Name' tags, it uses the first one always :param tag_list: List of tags for an EC2 instance :return: Name value """ filtered_tag_list = list(filter(lambda tag: tag['Key'] == 'Name', tag_list)) names = list(map(lambda tag: tag['Value'], filtered_tag_list)) return reduce(lambda a, b: a, names) if len(names) > 0 else ''
129183767492530fe363795d645f2a59a82ef18d
31,100
import json def save_json(data, path): """ Save a JSON file to the specified path. """ with open(path, "w") as file: return json.dump(data, file)
d93a212dc97a5f3a6a059868aedc92f67f146599
31,105
def build_cores(cores, solr_cores): """ Builds a list of cores to search based on given parameters. Raises an exception if any core is not available. """ if cores == '': return solr_cores if cores == 'test': return ['test'] core_list = cores.split(',') invalid_core_list = [core for core in core_list if core not in solr_cores] if len(invalid_core_list) > 0: raise ValueError('Invalid type(s) requested: ' + ','.join(invalid_core_list)) return core_list
0338b2bb424b6126c4e31bdb54155ef82fc3741c
31,107
def get_timestamp(milliseconds): """ Generates timestamp for an amount of milliseconds Parameters ---------- milliseconds : int Time in milliseconds Returns ------- str Timestamp (in format H:M:S.milli) """ hours = int(milliseconds / (60 * 60 * 1000)) milliseconds = milliseconds - hours * (60 * 60 * 1000) minutes = int(milliseconds / (60 * 1000)) milliseconds = milliseconds - minutes * (60 * 1000) seconds = int(milliseconds / 1000) milliseconds = milliseconds - seconds * 1000 return "%s:%s:%s.%s" % ( str(hours).zfill(2), str(minutes).zfill(2), str(seconds).zfill(2), str(milliseconds).zfill(3), )
2fbe332cf0faad775f878960cf021d46f5aee2fa
31,119
def bracketing(node, labeled=True): """Generate bracketing ``(label, indices)`` for a given node.""" return (node.label if labeled else '', node.bitset)
3906e912b3f3db6b9725d0738647c745659decb0
31,123
def getFilename(page, extension=None): """ Create a filename that is unique for the page. @param page: page used to create the new filename @type page: Page @param extension: file extension @type extension: str @return: filename of <family>-<lang>-<page>.<ext> @rtype: str """ filename = '%s-%s-%s' % (page.site.family.name, page.site.code, page.titleForFilename()) if extension: filename += '.%s' % extension return filename
1168cb8899c46b39ce8e471f3dfd687559eb3a12
31,125
def up_to_first_space(s): """Return the substring of s up to the first space, or all of s if it does not contain a space.""" i = s.find(' ') if i == -1: i = len(s) return s[:i]
f1bdde1e55821c022dde4f8f9112dfb694ebfde8
31,126
from typing import Dict def extract_tables_from_base(base) -> Dict[str, type]: """Extract tables and sqlalchemy declarative base classes from a base. Parameters ---------- base : DeclarativeBase base from which to extract the model classes Returns ------- result : dict[str, type] dictionary from tablename to sqlalchemy model class """ tables = {} registry = base._decl_class_registry for dbase in registry.data.values(): try: tablename = dbase().__table__.name except AttributeError: continue # ignore _ModuleMarker else: modelclass = dbase().__mapper__.class_ tables[tablename] = modelclass return tables
c5ea8e4e059cf40d8b0128aefae9e420bfc66888
31,133
def get_features_list(df, contains:list=[], contains_not:list=[], sort_results = True, verbose=True): """ Returns list of continous or categorical features from DataFrame. contains: must contain all strings in list contains_not: must not contain any of strings in list """ column_list = [col for col in df.columns] for s in contains: column_list = [col for col in column_list if col.find(s)> -1] for s in contains_not: column_list = [col for col in column_list if col.find(s)== -1] if sort_results: column_list = sorted(column_list) if verbose: print('found columns:', len(column_list)) diff = len(column_list) - len(list(set(column_list))) if diff>0: print('found', diff, 'duplicate column names') return column_list
53c016f1f8876904cc64500d0666d67efa159291
31,137
def _circle_slice(lst, start, end): """ Slices a list in a loop. Treats the list as a ring and slices from beginning to end, looping back to the beginning of the list if necessary. """ if 0 <= start < end < len(lst): return lst[start:end] elif start < 0: return lst[start+len(lst):] + lst[:end] elif end >= len(lst): return lst[start:] + lst[0:end-len(lst)] elif start > end: return lst[start:] + lst[:end] elif start == end: return lst[start:] + lst[:end] print("SLICE FAILURE: ", lst, "FROM:", start, "END:", end) return []
c2fad61408c08cd9c09bf426a71f5b0060da3021
31,139
def ahref(text,link): """ Wraps text around an <a href=X> tag """ return "<a href='"+link+"'>"+text+"</a>"
4db85e0f119330afe3cdc9c2cb8f1fbc963c7610
31,142
import math def cartesian_to_polar(x, y, xorigin=0.0, yorigin=0.0): """ Helper function to convert Cartesian coordinates to polar coordinates (centred at a defined origin). In the polar coordinates, theta is an angle measured clockwise from the Y axis. :Parameters: x: float X coordinate of point y: float Y coordinate of point xorigin: float (optional) X coordinate of origin (if not zero) yorigin: float (optional) Y coordinate of origin (if not zero) :Returns: (r, theta): tuple of 2 floats Polar coordinates of point. NOTE: theta is in radians. """ PI2 = 2.0 * math.pi PIBY2 = math.pi / 2.0 xdiff = float(x) - float(xorigin) ydiff = float(y) - float(yorigin) distsq = (xdiff * xdiff) + (ydiff * ydiff) r = math.sqrt(distsq) theta = PIBY2 - math.atan2(ydiff, xdiff) # Adjust theta to be in the range 0 - 2*PI while theta < 0.0: theta += PI2 while theta > PI2: theta -= PI2 return (r, theta)
c8a14848976673b2d4bbab1f0d587fed2180b3ed
31,143
def load_glove_vocab(filename): """Loads GloVe's vocab from a file. Args: filename (str): path to the glove vectors. Returns: set: a set of all words in GloVe. """ print('Building vocab...') with open(filename) as f: vocab = {line.strip().split()[0] for line in f} print('- done. {} tokens'.format(len(vocab))) return vocab
ad36dffb75dec1bb44108de8de2b4ecbd9d066dd
31,147
def calc_score(score): """ Convert threatgrid score to dbot score """ if not score: return 0 dbot_score = 1 if score >= 95: dbot_score = 3 elif score >= 75: dbot_score = 2 return dbot_score
153aace2e34a38e1e476e7d592a0280cb55d5798
31,149
def cluster_by_diff(data, max_gap): """ a function that clusters numbers based on their differences based off of a stacktrace answer: http://stackoverflow.com/a/14783998 :param data: any list of floats or ints :param max_gap: the largest gap between numbers until starting a new cluster :return: nested list """ # since the data needs to be sorted to determine useful differences, sort the data data.sort() # initialize the nest list groups = [[data[0]]] # iterate through data for x in data[1:]: # compare the difference of the first value of the data and the last entry in the groups to the max gap if abs(x - groups[-1][-1]) <= max_gap: # not larger than gap, append to last group groups[-1].append(x) else: # make new group if larger groups.append([x]) return groups
a3fd87f0220d27accdb96eda3faa8a3544bebd2f
31,155
def sort(iterable): """ Selection Sort works by iterating through the entire array and selects the minimal value of that array, swapping the first element with it. Then it repeat the same procedure, but ignoring the first element, which is already sorted. This version does not works with generators. Complexity: O(n²) :param iterable: :return: Sorted Iterable """ for i in range(len(iterable)): minimum = iterable[i] min_index = i for j, v in enumerate(iterable[i:], i): if v < minimum: minimum = v min_index = j iterable[i], iterable[min_index] = iterable[min_index], iterable[i] return iterable
c543c35981afb4272cbc0724bcebbe56cc012744
31,157
async def esi_names_to_lists(response: list) -> tuple: """Take the esi_names response and make a ids list and a names list and return them as a tuple. :param response: The esi_names response. :return: A tuple with two lists, ids and names. """ categories = ['alliance', 'character', 'constellation', 'corporation', 'inventory_type', 'region', 'solar_system', 'station'] ids = [] names = [] for element in response: ids.append(element.get('id')) names.append(element.get('name')) return ids, names
ca767537ee8afb78ddd7c174ed6a3e2e88b52c8e
31,160
def myinc(a): """Increase a """ return a + 1
ebf28605974ad30b2c221944e7431dab5a30d387
31,165
def trim(value): """Removes whitespaces around the string. Example usage: {{ value|trim }} """ return value.strip()
61bfcd402144bdf51648ad043446dafe9405fb09
31,166
def turn_on_off_event(b, name: str): """Turn on or off light Parameter: b -> bridge name -> name of lights""" b.connect() lights = b.get_light_objects('name') # boolen if lights[name].on: lights[name].on = False return 'Light turned off' else: lights[name].on = True return 'Light turned on'
72613b97dd6e663c726baea599c32a782d3b9db5
31,167
def get_vc_version(session): """Return the dot-separated vCenter version string. For example, "1.2". :param session: vCenter soap session :return: vCenter version """ return session.vim.service_content.about.version
09e13254302a3e9ff784c2c2fbe1a2e637d7330f
31,168
def get_messages_str(messages): """ From messeges list returns a string with all conversation between client and server Arguments: messages {list} -- list of messages from get_messages Returns: str -- string of conversation """ messages_str = "" for i in messages: messages_str += "{}: {}\n".format(i[0], i[1]) return messages_str.strip("\n\t ")
f2cad328c920fff85491e98dc5478392c8c2e453
31,170
import torch def convert_to_numpy(*inputs): """ Coverts input tensors to numpy ndarrays Args: inputs (iteable of torch.Tensor): torch tensor Returns: tuple of ndarrays """ def _to_numpy(i): assert isinstance(i, torch.Tensor), "Expected input to be torch.Tensor" return i.detach().cpu().numpy() return (_to_numpy(i) for i in inputs)
32cd0a3a68c47646e180f205f8120ed6df70e863
31,171
def parse(stdin): """ Calculate the gaps between a sorted sequence of integers, starting at 0. """ adapters = [ int(line) for line in stdin.read().strip().split("\n") ] adapters.sort() gaps = [] prev = 0 for adapter in adapters: gaps.append(adapter - prev) prev = adapter return gaps
44e5491fada781bb9a4c5ee1bcf4c311c48fd38c
31,173
def htmldec(text): """Decode HTML entities in the given text.""" chunks = text.split('&#') for i in range(1, len(chunks)): number, rest = chunks[i].split(';', 1) chunks[i] = chr(int(number)) + rest text = ''.join(chunks) text = text.replace('\xa0', ' ') text = text.replace('&nbsp;', ' ') text = text.replace('&lt;', '<') text = text.replace('&gt;', '>') text = text.replace('&quot;', '"') text = text.replace('&amp;', '&') return text
3543fa84477d35db3fa1cf00ea36e8c1ed82ea62
31,176
def flatten_corner(corner_kick, game_id): """Flatten the schema of a corner kick.""" ck_id = corner_kick[0] ck_data = corner_kick[1] return {'game_id': game_id, 'ck_id': ck_id, 'time_of_event(min)': (ck_data['t']['m'] + (ck_data['t']['s'] / 60 )), # 'assist': ck_data.get('assBy', None), 'player_id': float(ck_data['plyrId']), 'ck_coord_x1': ck_data['coord']['1']['x'], 'ck_coord_y1': ck_data['coord']['1']['y'], 'ck_coord_z1': ck_data['coord']['1']['z'], 'ck_coord_x2': ck_data['coord']['2']['x'], 'ck_coord_y2': ck_data['coord']['2']['y'], 'ck_coord_z2': ck_data['coord']['2']['z']}
385bcb73348c997d447f87333e5d141f2b96e0d3
31,178
def get_ranks(keywords, script): """Return ranks of queried keyword in a given script. Parameters ---------- keywords : str[] Array of keywords to search in the script. script : dict[] JSON object containing ranks of different keywords. Returns ------- ranks : int[] Array of integers in the same order as their respective keywords """ ranks = [] # Populate list of ranks for keyword in keywords: for d in script: if d['keyword'] == keyword: ranks.append(d['rank']) break # If no rank has been specified for a word, set its rank to 0 else: ranks.append(0) return ranks
558a5faf1e9a237f183ad0c255dd682f7e3329d5
31,183
def only1(l): """ Checks if the list 'l' of booleans has one and only one True value :param l: list of booleans :return: True if list has one and only one True value, False otherwise """ true_found = False for v in l: if v: if true_found: return False else: true_found = True return true_found
ce8c938ab00f546916fdeb86fb8cb942dd36afce
31,193
def fields_to_batches(d): """ The input is a dict whose items are batched tensors. The output is a list of dictionaries - one per entry in the batch - with the slices of the tensors for that entry. Here's an example. Input: d = {"a": [[1, 2], [3,4]], "b": [1, 2]} Output: res = [{"a": [1, 2], "b": 1}, {"a": [3, 4], "b": 2}]. """ # Make sure all input dicts have same length. lengths = [len(x) for x in d.values()] assert len(set(lengths)) == 1 length = lengths[0] keys = d.keys() res = [{k: d[k][i] for k in keys} for i in range(length)] return res
2840591ad2def849c5b9ffbc5fb59e17776ab0c2
31,194
def num_spectral_coeffs_up_to_order(b): """ The SO(3) spectrum consists of matrices of size (2l+1, 2l+1) for l=0, ..., b - 1. This function computes the number of elements in a spectrum up to (but excluding) b - 1. The number of elements up to and including order L is N_L = sum_{l=0}^L (2l+1)^2 = 1/3 (2 L + 1) (2 L + 3) (L + 1) :param b: bandwidth :return: the number of spectral coefficients """ L_max = b - 1 assert L_max >= 0 return ((2 * L_max + 1) * (2 * L_max + 3) * (L_max + 1)) // 3
39a8645519f2fa681537c8d9ee9843fafbaa5bdb
31,195
import logging def metrics(time_dur, extremes, count, mean_hr, list_of_times, time, voltages): """Create a dictionary with the specified metrics Once all of the metrics have been determined, it is necessary to compile them all together. This is done through the generation of a dictionary. In the assignment, it is specified that the dictionary should contain the following information: duration: time duration of the ECG strip voltage_extremes: tuple in the form (min, max) where min and max are the minimum and maximum lead voltages found in the data file. num_beats: number of detected beats in the strip, as a numeric variable type. mean_hr_bpm: estimated average heart rate over the length of the strip beats: list of times when a beat occurred This function reads in each of these metrics and places each one into their respective keys as mentioned above. Then, once all of the information has been added to the dictionary, it is returned. Parameters ---------- time_dur : float Contains duration of ECG strip in seconds extremes : tuple Contains the minimum and maximum voltages from the ECG strip count : int Value containing the number of peaks present mean_hr : float Contains the average heart rate (beats per minute) list_of_times : list Contains floats that match the times at which peaks occurred Returns ------- dictionary Contains all of the metrics necessary """ logging.info("Dictionary being established") metrics_dict = {"duration": time_dur, "voltage_extremes": extremes, "num_beats": count, "mean_hr_bpm": mean_hr, "beats": list_of_times, "times": time, "voltages": voltages} logging.info("Dictionary filled") return metrics_dict
e9188d3b6119c44463a3403c570f6f1d567c50f7
31,196
import re def remove_html_tags(text_obj, pandas_apply_mode=True): """ The purpose of this function is to clean up the strings of article contents by removing any HTML tags present. :param text_obj: This object specifies what will be cleaned. It can either be a single string that represents the content for a single article or the row of a Pandas DataFrame that represent the content of a single article that belongs to a collection of articles. If it is a DataFrame row, then that means that this funciton is being used in the `.apply()` method of a Pandas DataFrame. :type text_obj: str or row of a Pandas DataFrame. :param pandas_apply_mode: This Boolean controls whether or not this function is being used to clean a single string or an entire column of a DataFrame (which would be the case if this parameter is set to True which is its default value). :type pandas_apply_mode: Bool :returns: The function itself returns a string the represents the cleaned text. Of course, if this function is used with the `.apply()` DataFrame method, then you will get a Pandas Series that contains of all the cleaned content strings. :rtype: str **Notes** 1. https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.apply.html """ # Instantiate object that will look for text within html tags. cleanr = re.compile("<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});") # Determine how we want to go about the cleaning. if pandas_apply_mode: # If the user is using this function to clean multiple strings # that live in a column of a Pandas DataFrame. content_str = text_obj.content cleantext = re.sub(cleanr, "", content_str) else: # If the user is simply trying to use this function to clean out # a single string. # removes anything between <> and any other unneeded html tags cleantext = re.sub(cleanr, "", text_obj) return cleantext
f4f9c2d5776e532fcd8e750f7f9dce3bcf2780fc
31,197
def is_string_blank(string: str) -> bool: """Checks if a string is empty/blank or not. Parameters: ---------- string (str): String to be checked. Returns: ------- bool: Bool value whether the string is empty or not. """ if string and string.strip(): return False return True
d04a9bd33fb4f254a75202de2e1a06cafb1f67e9
31,199
def file_to_list(path_to_file, keep_duplicates=True, encoding='utf-8'): """Transforms a plain text (txt or csv) file to Python :obj:`list`. Args: path_to_file (str): A path to plain text file. keep_duplicates (bool): If set to False, the output list will not contain duplicates. Defaults to True. encoding (str): File's encoding. Defaults to "utf-8". Returns: list: The processed plain text file as a Python :obj:`list`. """ handler = open(path_to_file, "r", encoding=encoding) lines = [line.replace("\n", "") for line in handler.readlines()] handler.close() return lines if keep_duplicates else list(dict.fromkeys(lines))
dd98720f3d24d761f229094ee4dec70468e1fd70
31,201
def get_crash_id(line): """ Takes a raw CSV line and returns a crash_id :param str line: The raw CSV line :return str: The Crash ID """ try: return line.strip().split(",")[0] except Exception as e: print("Error: " + str(e)) return ""
68188ff4290173deddd3608ceb0534ce6ca2df18
31,203
def combine(*dicts): """Given multiple dicts, merge them into a new dict as a shallow copy.""" super_dict = {key: val for d in dicts for key, val in d.items()} return super_dict
9747f4dd6cb3f930e31d088d417ffe10ceed168f
31,204
def get_area(ptlist): """ Calculate the area of a polygon defined by a list of points. The variable ptlist is a list of (x, y) point pairs. Be careful, the implementation can give unexpected results with self-intersecting polygons. The output will always be non-negative. Created: 2015 April 29, msswan """ I = lambda pt1, pt2: (pt2[1] + pt1[1]) * (pt2[0] - pt1[0]) / 2.0 area = I(ptlist[-1], ptlist[0]) for idx in range(0, len(ptlist)-1): area += I(ptlist[idx], ptlist[idx+1]) return abs(area)
f33f08f26206e08fa3fdcf047038f58b3a90df58
31,206
def auto_int(x): """Convert a string into an integer. :param x: String to convert :return: value in x :rtype: int """ return int(x, 0)
ac212129ef19d11c9035ac87a769adf85d0b59d9
31,210
import inspect def is_coroutine(coro): """Returns true if the argument is a coroutine or coroutine function.""" return inspect.iscoroutine(coro) or inspect.iscoroutinefunction(coro)
c4b659fd469b4d50a93019001b83729de693a09f
31,221
import attr def struct(*args, **kw): """ Wrapper around ``attr.s``. Sets ``slots=True`` and ``auto_attribs=True``. All other arguments are forwared to ``attr.s``. """ return attr.s(*args, slots=True, auto_attribs=True, **kw)
1eee1044ee340b2fe9387773304751d4efca209f
31,222
import dataclasses def is_missing(value): """ Used to check whether a dataclass field has ever been assigned. If a field without default value has never been assigned, it will have a special value ``MISSING``. This function checks if the parameter is ``MISSING``. """ # MISSING is not singleton and there is no official API to check it return isinstance(value, type(dataclasses.MISSING))
5b5ce45382e655d32afcdc2ef80c8868efaa3ed5
31,223
def genAliases(name): """ Generates aliases for metabolite names, e.g.: val --> set(['Val-L', 'Val', 'val', 'val-L']) """ name = name.replace('-L','').replace('-l','') output = [] output.append(name) output.append(name.lower()) output.append(name.lower()+'-L') output.append(name.lower()+'_L') output.append(name.capitalize()) output.append(name.capitalize()+'-L') output.append(name.capitalize()+'_L') return output
28b88a35588197765e296528fd3b05f34baa1351
31,227
from datetime import datetime def str2datetime(st): """Convert a ISO 8601 string to datetime object""" if '.' in st: return datetime.strptime( st, "%Y-%m-%dT%H:%M:%S.%f") else: return datetime.strptime( st, "%Y-%m-%dT%H:%M:%S")
b0a011f2417ab72d48f37d1e797121a2dc446523
31,229
import re def ocd_type_id(text, strip_leading_zeros=True): """ Format a string in a way that's suitable for an OCD type ID Args: text: String to format. strip_leading_zeros: Remove leading zeros from name. Default is True. For example, '08' would become '8'. Returns: Formatted string. See https://github.com/opencivicdata/ocd-division-ids for more on the Open Civic Data divsion identifier spec. * Valid characters are lowercase UTF-8 letters, numerals (0-9), period (.), hyphen (-), underscore (_), and tilde (~). * Characters should be converted to UTF-8. * Uppercase characters should be converted to lowercase. * Spaces should be converted to underscores. * All invalid characters should be converted to tildes (~). * Leading zeros should be dropped unless doing so changes the meaning of the identifier. """ # Convert to lowercase text = text.lower() text = text.replace('(','') text = text.replace(')','') # Convert spaces to underscores text = re.sub(r'\s', '_', text) text = re.sub(r'[^\w.\-~]', '~', text) if strip_leading_zeros: # Remove leading zeros text = text.lstrip('0') return text
ac1a7659d3dba44b6cb5fc51ace6a36ec4f7b1e8
31,232
import itertools def self_cross_product(it, length=2): """ Return all unordered permutations with repeated elements of `it` with itself. >>> self_cross_product(('happy', 'feet')) ( ('happy', 'happy'), ('happy', 'feet'), # wombo combo ('feet', 'happy'), ('feet', 'feet') ) """ return tuple(itertools.product(it, repeat=length))
9e1d42ff387d0687d56749402ebc1364b1365272
31,233
import torch def to_value(v): """ Convert where appropriate from tensors to numpy arrays Args: v: an object. If ``torch.Tensor``, the tensor will be converted to a numpy array. Else returns the original ``v`` Returns: ``torch.Tensor`` as numpy arrays. Any other type will be left unchanged """ if isinstance(v, torch.Tensor): return v.cpu().data.numpy() return v
571e027a59a1060663785e9be6094093bc1a8115
31,237
import torch def _set_coords(num_shots, num_per_shot, nx, dx, location='top'): """Create an array of coordinates at the specified location.""" ndim = len(nx) coords = torch.zeros(num_shots, num_per_shot, ndim) coords[..., 0] = torch.arange(num_shots * num_per_shot)\ .reshape(num_shots, num_per_shot) if location == 'top': pass elif location == 'bottom': coords[..., 0] = (nx[0] - 1).float() - coords[..., 0] elif location == 'middle': coords[..., 0] += int(nx[0] / 2) else: raise ValueError("unsupported location") for dim in range(1, ndim): coords[..., dim] = torch.round(nx[dim].float() / 2) return coords * dx
c38d05784dc802c502bb8123057fa1370dbb76a2
31,240
def get_var_names(component, intent='input'): """ Get a list of input or output variable names from *component* (a BMI-like object). Use the *intent* keyword to specify whether to return input or output variable names. *intent* must be one of *input* or *output*. """ assert(intent in ['input', 'output']) func = getattr(component, 'get_' + intent + '_var_names') try: var_names = func() except TypeError: var_names = getattr(component, '_' + intent + '_var_names') return var_names
3dcb6f1a7f181a42813755f582e1a2ca03de10e0
31,245
def _merge_cipher(clist): """Flatten 'clist' [List<List<int>>] and return the corresponding string [bytes].""" cipher = [e for sublist in clist for e in sublist] return bytes(cipher)
060af5ad69b11592029e4aaf10fdfff8354378a9
31,249
def ABCDFrequencyList_to_HFrequencyList(ABCD_frequency_list): """ Converts ABCD parameters into h-parameters. ABCD-parameters should be in the form [[f,A,B,C,D],...] Returns data in the form [[f,h11,h12,h21,h22],...] """ h_frequency_list=[] for row in ABCD_frequency_list[:]: [frequency,A,B,C,D]=row h11=B/D h12=(A*D-B*C)/D h21=-1/D h22=C/D h_frequency_list.append([frequency,h11,h12,h21,h22]) return h_frequency_list
d4f54e6864a34b8b24b1afe599a9338be52f29fd
31,250
def confidence_for_uniform_audit(n,u,b): """ Return the chance of seeing one of b bad precincts in a uniformly drawn sample of size u, from a set of size n. """ miss_prob = 1.0 for i in range(int(u)): miss_prob *= float(n-b-i)/(n-i) return 1.0 - miss_prob
16fc15a6a2bd32f9202ff545ca77c9a65abb0ffc
31,256
def get_token(request): """Get token from cookie or header.""" token = request.COOKIES.get('jwt-token') if token is None: token = request.META.get('HTTP_AUTHORIZATION') return token
591d6f282969b3994a0d4b86ccf6eda19fe93530
31,258
def tet_f(note, tuning=440): """Returns the frequency of a note given in scientific notation.""" # parse note as nth semitone from A0 letter = note[:-1] octave = int(note[-1]) note_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"] letter_index = note_names.index(letter) + 1 note_number = octave * 12 + letter_index - 9 # apply formula return tuning * ((2 ** (1 / 12)) ** (note_number - 49))
17dd86af181c9ed0420f6dcb9f0af07e46607c91
31,259
def StripSuffix(s: str, suffix: str): """Strips the provided suffix from s, if present at its end.""" if s.endswith(suffix): return s[:-len(suffix)] return s
bb4457b754cefa8c96df2e668a0a92e8a6713517
31,260
from typing import Dict from typing import List def iterate_parent(ontology_label: str, ontology: Dict[str, List[str]], family_tree: List[str], mappings: Dict[str, Dict[str, str]]): """ Iterate ontology to find matched mapping. Parameters ---------- ontology_label label to query mappings. ontology name to parents mappings. family_tree list of labels in the family tree. mappings ontology label to hub term/dimension mappings. Returns ------- list of mapped dictionary of label, dimension, and hierarchy, for example iterating Manual segmentation returns [ { "label": "Image Segmentation", "dimension": "Operation", "hierarchy": [ "Image segmentation", "Manual segmentation" ] }, { "label": "Image annotation", "dimension": "Operation", "hierarchy": [ "Image annotation", "Dense image annotation", "Manual segmentation" ] } ] """ family_tree.insert(0, ontology_label) if ontology_label in mappings: return [{ "label": mappings[ontology_label]["label"], "dimension": mappings[ontology_label]["dimension"], "hierarchy": family_tree }] if ontology_label not in ontology: return [] all_families = [] for token in ontology[ontology_label]: all_families.extend(iterate_parent(token, ontology, family_tree.copy(), mappings)) return all_families
da641d14d978c9a6010707dd7bbdfb95bbdd1ea4
31,262
def extract_red_channel(input_im, bayer_pattern='grbg'): """ Extract and return the red channel from a Bayer image. :param input_im: The input Bayer image. :param bayer_pattern: The Bayer pattern of the image, either 'rggb' or 'bggr'. :return: The extracted channel, of the same type as the image. """ d = {'rggb':(0,0), 'bggr':(1,1), 'grbg': (0,1), 'girg':(1,0)} assert bayer_pattern in d, 'Invalid Bayer pattern \'{}\''.format(bayer_pattern) red_idx = d[bayer_pattern][0] red_idy = d[bayer_pattern][1] im = input_im[red_idx::2, red_idy::2, ...] return im
f879f26b42333a82d8f5d8653a6b1bf8479d4732
31,263
def count_bits_set_kernighan(n: int) -> int: """ https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan >>> count_bits_set_kernighan(0b101010101) 5 >>> count_bits_set_kernighan(2 << 63) 1 >>> count_bits_set_kernighan((2 << 63) - 1) 64 """ c = 0 while n: n &= n - 1 c += 1 return c
d676e182733f12bf90dfbc74a5c96f60cc6f5eb0
31,267
import hashlib def generate_mutex_name(target_name, prefix=""): """ A mutex name must be a valid filesystem path, so, this generates a hash that can be used in case the original name would have conflicts. """ if not isinstance(target_name, bytes): target_name = target_name.encode("utf-8") return prefix + (hashlib.sha224(target_name).hexdigest()[:16])
e36f03fc03ff29535bc95adef3c4425efcf7bd3e
31,268
import operator def get_out_operands(instruction_form): """Returns a list indicating which operands are written by this instruction form""" return tuple(map(operator.attrgetter("is_output"), instruction_form.operands))
df85c3021268d820f1c7ad0b820d343ae4041a82
31,271
import pickle def load_nodes(path): """ load nodes from storage file """ nodes = {} with open(path, 'rb') as file: nodes = pickle.load(file) for node in nodes.values(): # reset old properties node.online = False node.index = None node.clientcount = 0 return nodes
9f444f6f1a010d2822045ee359fa8bc109576bc9
31,275
from datetime import datetime import pytz def format_absolute_datetime(date_time: datetime) -> str: """Generate a human-readable absolute time string.""" now = datetime.now(tz=pytz.utc) format_str = "" if date_time.date() != now.date(): format_str += "%Y-%m-%d" time_part = date_time.time() # Only add the relevant time parts if time_part.hour != 0 or time_part.minute != 0 or time_part.second != 0: if time_part.second != 0: format_str += " %H:%M:%S" else: format_str += " %H:%M" else: time_part = date_time.time() # Only add the relevant time parts if time_part.second != 0: format_str = "%H:%M:%S" else: format_str = "%H:%M" return date_time.strftime(format_str)
8d9ead27c415225d211fe45195a6c2df5e3e17d1
31,278