content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def case_of(value: str) -> int: """Returns 1 for all uppercase, 0 for all lowercase, and -1 for mixed case.""" if all(map(lambda x: x.isupper(), value)): return 1 elif all(map(lambda x: x.islower(), value)): return 0 return -1
e0b56890ff73a9b59385e716be6ea3c41ab03bc6
36,151
def __filter_event_type__(trace_events, event_type): """ Looks for the events in the trace matching the event type :param trace_events: Events found in the trace (filtered by family). :param event_type: Event type to filter. :return: Filtered trace """ filtered = [] for line in trace_events: if line[0] == event_type: filtered.append(line) return filtered
4a4b49272014ff2a2f552f3a4a381f859a886a5a
36,152
from typing import Callable def map(f: Callable, collection): """Transform each element of a collection. Examples -------- .. doctest:: >>> a = ['The', 'quick', 'brown', 'fox'] >>> hl.eval_expr(hl.map(lambda x: hl.len(x), a)) [3, 5, 5, 3] Parameters ---------- f : function ( (arg) -> :class:`.Expression`) Function to transform each element of the collection. collection : :class:`.ArrayExpression` or :class:`.SetExpression` Collection expression. Returns ------- :class:`.ArrayExpression` or :class:`SetExpression`. Collection where each element has been transformed by `f`. """ return collection._bin_lambda_method("map", f, collection.dtype.element_type, lambda t: collection.dtype.__class__(t))
677c8b5185e45126c85448020a6a914c0938f785
36,154
from functools import reduce from operator import mul def prod(iterable): """ Return the product of all numbers in an iterable. """ return reduce(mul, iterable)
d4ca281afd572aaae7da4bf13696ebc8d05be32f
36,161
def get_project_from_manifest(manifest, pid): """ Returns the project entry from the manifest :param manifest: :param pid: :return: """ if 'projects' not in manifest: return None for project in manifest['projects']: if project['identifier'] == pid: return project return None
5f72faffeb14bc20568c2898ca3b9c65b2edb53f
36,164
def all_properties(obj): """ Return a list of names of non-methods of 'obj' """ noncallables = [] for name in dir(obj): if not hasattr(getattr(obj, name), '__call__'): noncallables.append(name) return noncallables
58a86250e03e9cb4c9f6567eaf072173ff419e73
36,167
def power_level(serial: int, x: int, y: int) -> int: """Compute the power level of the fuel cell at x, y. """ rack_id = x + 10 p = rack_id * y + serial p *= rack_id p = (p // 100) % 10 return p - 5
316895b97f752867171ff4dd0463ea5395228b97
36,170
from pathlib import Path def get_nprocs(modelpath): """Return the number of MPI processes specified in input.txt.""" return int(Path(modelpath, 'input.txt').read_text().split('\n')[21].split('#')[0])
82faad4d21e5a9acb7de123338e71b1123ad79cc
36,172
import pathlib def is_relative_to(path: pathlib.Path, base: pathlib.Path) -> bool: """Check whether `path` is contained inside `base`.""" try: path.relative_to(base) return True except ValueError: return False
6a9bed0700d87d9c74ba8980f25f3de522fa5ca8
36,181
from functools import reduce import operator import collections def strings_to_wordsets(strings, stop_words=None): """Build a dict of wordsets from a list of strings, with optional filter. For each distinct word found in the list of strings, the wordset dict will map that word to a set of the strings that contain it. A list of words to ignore may be passed in stop_words. """ string_words = [set(w.split(' ')) for w in (s.lower() for s in strings)] words = reduce(operator.or_, string_words) if stop_words: words -= set(stop_words) wordsets = collections.OrderedDict( (w, set(strings[i] for i, s in enumerate(string_words) if w in s)) for w in sorted(words)) return wordsets
407f604d6fe78e6aad10e972fcda63ebb42209f1
36,185
from typing import Union import re def _cast_valid_types(content: Union[str, int, bool]) -> Union[str, bool, int]: """ Cast an input that explicitly reads "true" or "false" (case-insensitive) as a boolean type and cast all strings of only digits as an integer type. This function does nothing and returns the same value if the input is not a string. :param content: The string of content to parse out compatible types for :return: The value casted as the type detected """ if type(content) == str: # Check if the response matches a boolean's text (must be explicit to prevent coercion of ints like '1' -> True) if content.lower() == ('true' or 'false'): content = bool(content) # Check if the response is an integer and only an integer (explicitly define match to avoid type coercion) elif re.fullmatch('\\d+', content): content = int(content) return content
c06e3635c1f5d4ac6d33939db8d552d58803dda1
36,187
def more_like_this(_es, es_index, field, like_list, min_term_freq, max_query_terms): """Build and execute a more like this query on the like document See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html Returns result (list): list of documents that match the like document. """ queries = [{ "stored_fields": field, "query": { "more_like_this": { "fields": field, "like": like, "min_term_freq": min_term_freq, "max_query_terms": max_query_terms } } } for like in like_list] results = [] for query in queries: res = _es.search(index=es_index, body=query) results.append([hit['fields'][field[0]][0] for hit in res['hits']['hits']]) return results
a48fcc6e44b9a25bafd7936122c5ecc91cfdb932
36,189
import re def _getXMLText(fileobj): """Convenience function for reading the XML header data in a ShakeMap grid file. :param fileobj: File-like object representing an open ShakeMap grid file. :returns: All XML header text. """ tline = fileobj.readline() datamatch = re.compile('grid_data') xmltext = '' tlineold = '' while not datamatch.search(tline) and tline != tlineold: tlineold = tline xmltext = xmltext + tline tline = fileobj.readline() xmltext = xmltext + '</shakemap_grid>' return xmltext
a44091de4543142a83a13b010de7f5a471a21df8
36,191
def map_l2dist_gaussianmech_renyiDP(sensitivity, scale, alpha): """map an L2 distance `sensitivity` through the gaussian mechanism with parameter `scale` to (alpha, epsilon)-RDP Proposition 7 and Corollary 3: https://arxiv.org/pdf/1702.07476.pdf#subsection.6.3 :param sensitivity: maximum L2 distance perturbation of a query :param scale: standard deviation of gaussian noise :param alpha: order of renyi divergence > 1 :returns epsilon """ return alpha * (sensitivity / scale) ** 2 / 2
98b5454fdc299b345a73ad1473ff56e249a29397
36,193
import json def to_dict(data): """Convert json to dict data, if input is not json return None""" if isinstance(data, dict): return data try: value = json.loads(data) if not isinstance(value, dict): raise return value except: return None
9419173d027b998cebe949e7a43424834fd50d7c
36,194
from typing import Mapping from typing import Any def _get_param_or_die(input_params: Mapping[str, Any], param: str) -> Any: """Returns value of param. Dies with user-formatted message if not defined. Args: input_params: Mapping from input parameter names to values. param: Name of the param to get the value of. Returns: Value of the given param. Raises: ValueError: User formatted message on error. """ value = input_params.get(param, None) if not value: raise ValueError('Missing parameter: %s' % param) return value
f06bfb5ae5393cf0e41a33db1c03797e940fcdef
36,197
def fieldmap_minmax(fieldmap, variable): """Data limits for a given varible across all zones in a fieldmap""" limits = None for z in fieldmap.zones: if limits is None: limits = z.values(variable).minmax() else: low, high = z.values(variable).minmax() limits = (min(limits[0], low), max(limits[1], high)) return limits
662da76d19fd73c8e93b0cbaf80a04d8f14f3135
36,200
def reverse(sentence): """ split original sentence into a list, then append elements of the old list to the new list starting from last to first. then join the list back toghether. """ original = sentence.split() reverse = [] count = len(original) - 1 while count >= 0: reverse.append(original[count]) count = count - 1 result = " ".join(reverse) return result
258bb97834a177a90dbfcf436d3228bb7f9a7237
36,202
from typing import Dict import torch def create_ner_conditional_masks(id2label: Dict[int, str]) -> torch.Tensor: """Create a NER-conditional mask matrix which implies the relations between before-tag and after-tag. According to the rule of BIO-naming system, it is impossible that `I-Dog` cannot be appeard after `B-Dog` or `I-Dog` tags. This function creates the calculable relation-based conditional matrix to prevent from generating wrong tags. Args: id2label: A dictionary which maps class indices to their label names. Returns: A conditional mask tensor. """ conditional_masks = torch.zeros(len(id2label), len(id2label)) for i, before in id2label.items(): for j, after in id2label.items(): if after == "O" or after.startswith("B-") or after == f"I-{before[2:]}": conditional_masks[i, j] = 1.0 return conditional_masks
0cda8db465b039349eff1022a8cd2cc071f21295
36,203
def acquire_image_url(soup): """ Take a BeautifulSoup content of a book page. Return the url of the image of the book. """ partial_url = soup.img['src'][5:] image_url = f"http://books.toscrape.com{partial_url}" return image_url
c2c14823f3fa1dbe30838dbab2513653d7c0fa3c
36,204
def encode_varint_1(num): """ Encode an integer to a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: num (int): Value to encode Returns: bytearray: Encoded presentation of integer with length from 1 to 10 bytes """ # Shift sign to the end of number num = (num << 1) ^ (num >> 63) # Max 10 bytes. We assert those are allocated buf = bytearray(10) for i in range(10): # 7 lowest bits from the number and set 8th if we still have pending # bits left to encode buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0) num = num >> 7 if num == 0: break else: # Max size of endcoded double is 10 bytes for unsigned values raise ValueError("Out of double range") return buf[:i + 1]
3a5883a352a85c3c472889b8b1e347ba41df0615
36,205
def to_degrees(dir, value): """ convert the GPS coordinates stored in the EXIF to degress in float format :param value: tuples of DMS :param dir: direction E/N/W/S """ d = float(value[0][0]) / float(value[0][1]) m = float(value[1][0]) / float(value[1][1]) s = float(value[2][0]) / float(value[2][1]) w = 1 if dir in ('E', 'N') else -1 return w * (d + (m / 60.0) + (s / 3600.0))
3cb60e15049bf3c538d8dc46fae92b123e4cafd5
36,210
import math def distance(x1, y1, x2, y2): """Get euclidean distance between two points""" return math.sqrt(math.pow(abs(x1-x2), 2) + math.pow(abs(y1-y2), 2))
722600df2cba61443a0663e4076c58f5522ea7b1
36,212
def unknown_pairs_present(model): """ Test if PDB file contains unknown type pairs Parameters: model (obj): model object Returns: (bool): True if PDB file contains unknown type pairs """ grm = model.get_restraints_manager() sites_cart = model.get_sites_cart() site_labels = model.get_xray_structure.scatterers().extract_labels() pp= grm.pair_proxies(sites_cart=sites_cart,site_labels=site_labels) return (pp.nonbonded_proxies.n_unknown_nonbonded_type_pairs != 0)
f826a41f2e62436b68a3ed9ab15d15b5a8335252
36,215
def _quote_tag_if_needed(tag: str) -> str: """Quotes tags just like timewarrior would quote them. Args: tag: The tag that should be quoted. Returns: The quoted tag. """ if tag[0] == '"' or tag[0] == "'": return tag special_chars = [" ", '"', "+", "-", "/", "(", ")", "<", "^", "!", "=", "~", "_", "%"] if any(char in tag for char in special_chars): return f'"{tag}"' return tag
bfcb2064dae08c758d9c75eac3abe371854ea62b
36,219
def largest_prime_factor(input_num): """ Function returns the largest prime factor of an input. REQ: input_num >= 0 and whole :param input_num: {int} is original input number :return: {int} the largest prime factor of the input or {NoneType} if input_num < 2 """ # if input is less than 2, there are no prime factors less than 2, return None if input_num < 2: return None # set current lowest prime to lowest prime factor (2) curr_lpf = 2 # loop while our current input is greater than our current lpf while input_num > curr_lpf: # if division results in whole number, divide input by curr_lpf and reset to 2 if input_num % curr_lpf == 0: input_num = input_num // curr_lpf curr_lpf = 2 # else move onto next largest factor else: curr_lpf += 1 return curr_lpf
507c36186f29ba96ec842dce0b3c7c76cca89d9e
36,222
def get_fastqs_for_read_index(lane_to_fastqs, read_index): """Get a list of fastq urls for the given read index Args: lane_to_fastqs (Dict[Dict[str, str]]): dict of dict mapping each lane to a dict of read index to fastq url read_index (str): the read_index to filter by, e.g. "read1", "read2", or "index1" Returns: fastq_urls (list): list of fastq urls """ lanes = sorted(lane_to_fastqs.keys()) fastq_urls = [] for lane in lanes: if read_index in lane_to_fastqs[lane]: manifest_entry = lane_to_fastqs[lane][read_index] fastq_urls.append(manifest_entry.url) return fastq_urls
2f81d5f4457bb635b47ddcf85c8d8acca0eccbb2
36,227
from pathlib import Path import torch def load_pretrained(vae, path, load_predictor): """Load a previously trained model, and optionally ignore weights/bias for predictor""" load_path = Path(path) state = torch.load(load_path) if "epoch" in state.keys(): print(f"Loading model from epoch {state['epoch']}") state_dict = state["state_dict"] else: state_dict = state if not load_predictor: state_dict = {k: v for k, v in state_dict.items() if "predictor" not in k} mismatch = vae.load_state_dict(state_dict, strict=False) print("Missing keys:", mismatch) return vae
20ea5b631b56078732da4c3631e614e53d37ac19
36,228
def determine_best_contact(contacts): """ gibt Kontakt mit höchster übertragener Datenmenge aus übergebener Menge zurück :param contacts: list / set / ... :return: Contact """ current_best_data = 0 best_contact = None for contact in contacts: current_data = contact.get_data() if current_data > current_best_data: current_best_data = current_data best_contact = contact return best_contact
5ce0ccb4c6489ca545983adf78cead03311aae03
36,229
def find_key(d: dict, key: str, default: None): """ Search for the first occurence of the given key deeply in the dict. When not found is returned the default value """ if key in d: return d[key] for k, v in d.items(): if isinstance(v, dict): item = find_key(v, key, default) if item is not None: return item return default
8f3741a99da6eb3d7989089e460cb7a5eaa4554a
36,231
def polygon_trans(p): """ :param p: polygon list with dict("lat": v1, "lng": v2) as elements :return: polygon list with (v_lat, v_lng) as elements """ new_p = [] for point in p: new_p.append((point["lat"], point["lng"])) return new_p
86000bc8f94a79060ad0321808d13fbc7296be6b
36,232
def _buffer_if_necessary(shape): """Fix the basins shapes which are invalid. Following the advice given here: https://github.com/Toblerity/Shapely/issues/344 """ if not shape.is_valid: shape = shape.buffer(0.0) assert shape.is_valid return shape
f7646b8a909f9790f16e672e8de5b479402a5b5d
36,235
def get_stats(data): """ Returns some statistics about the given data, i.e. the number of unique entities, relations and their sum. Args: data (list): List of relation triples as tuples. Returns: tuple: #entities, #relations, #entities + #relations. """ entities = set() relations = set() for triple in data: entities.add(triple[0]) entities.add(triple[2]) relations.add(triple[1]) return len(entities), len(relations), len(entities) + len(relations)
6496db5be15b330de84345d862a66a16c0ebc969
36,236
def format_pylint_disables(error_names, tag=True): """ Format a list of error_names into a 'pylint: disable=' line. """ tag_str = "lint-amnesty, " if tag else "" if error_names: return u" # {tag}pylint: disable={disabled}".format( disabled=", ".join(sorted(error_names)), tag=tag_str, ) else: return ""
de7355f51fc20ba174f5f8db852c9254c016fa75
36,237
def is_jar_library(target): """Returns True if the target is an external jar library.""" return target.has_label('jars')
020d389e3bc71723a21d948f43debaa274b0de39
36,245
def myfuncMean( TheListOfvalues ): """ This function computes and returns: 1.- The mean of a list holding any set of values. 2.- A message regarding whether the mean is or not a whole number. To call this function do: thevalues = [1,2,3,4,5] meanval, message = myfunc_mean( thevalues ) """ NumberOfValues = 0 TheAdding = 0 for newValue in TheListOfvalues: NumberOfValues = NumberOfValues + 1 TheAdding = TheAdding + newValue if (TheAdding % NumberOfValues) == 0: TheMean = TheAdding//NumberOfValues mesg = 'The mean of the values is the whole number = {0}'.format(TheMean) else: TheMean = TheAdding/NumberOfValues mesg = 'The mean of the values IS NOT a whole number = {0}'.format(TheMean) return TheMean, mesg
1276f97d42161b5a4b8ddb882660ce97fdaae030
36,247
from pathlib import Path def get_expect_file(sql_file: Path) -> Path: """ Returns the csv file with the expected results for a sql file. """ if ( str(sql_file) == "" or sql_file.stem == "" or sql_file.suffix == "" or sql_file.suffix.lower() not in (".sql") ): return Path() return sql_file.parent / (sql_file.stem + ".csv")
a110787038fe149a7282beb8830f620fe41ac23a
36,250
def binaryFilesEqual(fn1, fn2): """True if two files are bytewise identical.""" with open(fn1, "rb") as f1, open(fn2, "rb") as f2: for byte1, byte2 in zip(f1, f2): if byte1 != byte2: return False return True
e1d71d0a8ed51eb85d4e3deae11f46730bd51d51
36,252
def score_char_overlap(term1: str, term2: str) -> int: """Count the number of overlapping character tokens in two strings. :param term1: a term string :type term1: str :param term2: a term string :type term2: str :return: the number of overlapping ngrams :rtype: int """ num_char_matches = 0 for char in term2: if char in term1: term1 = term1.replace(char, "", 1) num_char_matches += 1 return num_char_matches
8bda41b2babffcc55b27831bced476b6d9a77eb5
36,253
def clean_command_type(text: str) -> str: """Remove parents from the command type""" text = text.replace("Command.", "") text = text.replace("CommandType.", "") return text
ca6b1a8ee0a3ee87487c5901413ad141c6a97ff2
36,257
import tempfile import getpass def get_default_session_filename(username: str) -> str: """Returns default session filename for given username.""" dirname = tempfile.gettempdir() + "/" + ".instaloader-" + getpass.getuser() filename = dirname + "/" + "session-" + username return filename.lower()
8f8e9415f5151088a55144e9c3e6f0a9608e1dba
36,258
import re def parse_points(points_str): """Parses the points specification for polyline and polygon elements (SVG 1.1, 9.7.1). """ # Treat #-# as # -# points_str = points_str.replace('-', ' -') return [float(s) for s in re.split("[\x20\x09\x0D\x0A]+|[,]", points_str) if s != ""]
b2b29ffcf9e240ea06ca75d55f2595416b75963d
36,260
def find_text(node, path): """Find a node's text or None """ return getattr(node.find(path), 'text', None)
ddc951dcec720ab2ed73c4277d582e8ffa25ae2b
36,270
def format_time(time): """ Format time based on strftime. Args: time: Expected time as datetime.datetime class Returns: Formatted time. """ return time.replace(second=0).strftime('%Y-%m-%d %H:%M:%S')
fe6abda2c787c5504ea8518c25fc2ddf0054b8f2
36,271
from typing import Iterable from pathlib import Path def list_images(img_dir) -> Iterable[str]: """List all image files in img_dir. Returns an iterator that lists the files to process. Subclasses may want to override this to return specific image types or filter the results. By default, will list all images in self.img_dir if the file extension is in the extensions list. Returns ------- Iterable[str] The list of files to be used for generating the masks. """ extensions = (".png", ".jpg", ".jpeg", ".tif", ".tiff") paths = Path(img_dir).glob("**/*") paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths) return (str(p) for p in paths)
c7585c4fe737fb95af27a3fad578ebf3347e4f9c
36,282
from pathlib import Path def assemble_path(*args): """Join together all specified inputs into a directory path.""" if not args: raise ValueError("You must specify at each one path parameter to assemble") assembled_path = Path(args[0]) for index, path in enumerate(args[1:]): assembled_path /= path return assembled_path
cfe2102683046fa655535bbf1ae6323960046780
36,286
from typing import OrderedDict def list_drop_duplicates(li: list, keep: str = 'first') -> list: """ Drop duplicates from a (ordered) list :param li: List to drop duplicates from :param keep: Keep first or last occurrence of the unique items """ if keep == 'first': return list(OrderedDict((x, True) for x in li).keys()) elif keep == 'last': li.reverse() li = list(OrderedDict((x, True) for x in li).keys()) li.reverse() return li else: raise ValueError(f'Cannot parse {keep} as argument for keep. This should be either "first" or "last"')
b43b59a7d6ea266843266ee3eb8d5af5eaf7bb33
36,287
def is_api_disabled(config, api_name): """Check if api_name is disabled in the config. Args: config (dict): GCP API client configuration. api_name (str): The name of the GCP api to check. Returns: bool: True if the API is disabled in the configuration, else False. """ return config.get(api_name, {}).get('disable_polling', False)
cdce24c07cdf1190c1ea46613cbdccfaec649404
36,289
import glob def get_file_paths(path_to_data: str = 'drive/MyDrive/Belgorodskaya/*.tif', feature_names: list = ['tmax', 'tmin', 'pr']): """ Filters out required features amongs terraclim dataset Arguments: path_to_data (str): path to directory that containts terraclim dataset feature_names (list): list of required features Returns: dict: key -- feature name; value -- list of related tif files """ files_to_mosaic = glob.glob(path_to_data) files_to_mosaic = list(filter(lambda x: sum(fn in x for fn in feature_names) > 0, files_to_mosaic)) file_paths = {fn: list(filter(lambda x: fn in x, files_to_mosaic)) for fn in feature_names} return file_paths
3e9e5d3a527cb7c7dafa800736cc274ea23e34e0
36,290
def deserialize_measurement(serialized): """Deserialize a `openff.evaluator.unit.Measurement` from a dictionary of the form `{'value', 'error'}`. Parameters ---------- serialized : dict of str and str A dictionary representation of a `openff.evaluator.unit.Measurement` which must have keys {"value", "error"} Returns ------- openff.evaluator.unit.Measurement The deserialized measurement. """ if "@type" in serialized: serialized.pop("@type") return serialized["value"].plus_minus(serialized["error"])
80b28ac9c641fb0399efc5d24eddfc381df68f27
36,293
def score(letter): """ Returns index of letter in alphabet e.g. A -> 1, B -> 2, ... """ string = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' return(string.index(letter) + 1)
becba57c57c36204d2b106836233a91b90960712
36,294
import re import unicodedata def slugify(string): """ Slugify unicode string. Example: >>> slugify("Hélló Wörld") "hello-world" """ if not string: return '' return re.sub(r'[-\s]+', '-', re.sub(r'[^\w\s-]', '', unicodedata.normalize('NFKD', string)).strip().lower())
8924c712d7b9527f3c0df2d2b3522a411a3dfd4a
36,299
def computeSimilarityScores(inputs, fvecfunc, combfunc, clsfunc): """Computes similarity scores for all pairs in the given set of inputs. Exhaustively goes through all pairs of inputs, and looks up feature vectors using the given fvecfunc. Pairs of feature vectors are combined using the combfunc. These are fed into clsfunc (as a list) to get a score. Returns a dict mapping pairs (input1, input2) -> similarity score. """ ret = [] for i, i1 in enumerate(inputs): f1 = fvecfunc(i1) for i2 in inputs[i+1:]: f2 = fvecfunc(i2) fvec = combfunc(f1, f2) ret.append((i1,i2,fvec)) allin1, allin2, allfvecs = zip(*ret) scores = clsfunc(allfvecs) ret = dict(((i1,i2), s) for (i1,i2,fvec), s in zip(ret, scores)) return ret
383be99d20e2bea50b3b551f009daa0e5f54cb0a
36,302
def get_device_id(device): """ Get the device_id of a device """ return device.type[len('_arsdk-'):-len('._udp.local.')]
f53f38a06089d584382d5ab02d3e3dd3b250bb41
36,304
import re def remove_default_namespace(src): """ Remove default xmlns from the given string. :param str src: Source string :returns: String with xmlns definitions removed. """ return re.sub(r' xmlns="[^"]+"', '', src)
600a0d2331b32010d29c4ba9cb004cc3bdf80d05
36,310
def get_display_range(worksheet): """ Get the conditions displayed on a worksheet. args: worksheet (seeq.spy.workbooks._worksheet.AnalysisWorksheet): Worksheet returns: conditions (dict): Display range. {'Start':Timestamp, 'End':Timestamp} """ return worksheet.display_range
160447f5adf495748042f1b2010921eb4da3c573
36,312
def _is_prefix(lhs, rhs): """ return True if the first list is a prefix of the second """ rhs = list(rhs) while rhs: if lhs == rhs: return True rhs.pop() return False
6b80093479f4f8f989386f51af98862593fc6867
36,315
def count_number_of_entries(row, feature, ref_counts): """Count the number entries for given building based on building reference number. row : pandas.Series EPC dataset row. feature: str Feature by which to count building entries. e.g. "BUILDING_REFERNCE_NUMBER" or "BUILDING_ID" ref_counts : pandas.Series Value counts for building reference number. Return --------- counts : int How many entries are there for given building.""" building_ref = row[feature] try: counts = ref_counts[building_ref] except KeyError: return building_ref return counts
2658281beeb51cea8ca1bd3484a8ecd089763d55
36,319
def comment_parser(reddit_comment_object): """Parses a comment and returns selected parameters""" post_timestamp = reddit_comment_object.created_utc post_id = reddit_comment_object.id score = reddit_comment_object.score ups = reddit_comment_object.ups downs = reddit_comment_object.downs post_body = reddit_comment_object.body thread_title = reddit_comment_object.link_title thread_url = reddit_comment_object.link_url subreddit = reddit_comment_object.subreddit.display_name return post_timestamp, post_id, score, ups, downs, post_body, thread_title, thread_url, subreddit
2331c0b52201272a39d0b3befeb8a962f59c05a6
36,322
def ids2tokens(vocab, tokids): """ Convert list of numeric token ID arrays `tokids` to a character token array with the help of the vocabulary array `vocab`. Returns result as list of string token arrays. .. seealso:: :func:`~tmtoolkit.preprocess.tokens2ids` which reverses this operation. :param vocab: vocabulary array as from :func:`~tmtoolkit.preprocess.tokens2ids` :param tokids: list of numeric token ID arrays as from :func:`~tmtoolkit.preprocess.tokens2ids` :return: list of string token arrays """ return [vocab[ids] for ids in tokids]
70149c881d362bbe32fa39de6941891e0f8915db
36,323
def get_block_size(num_antennas=1, tchans_per_block=128, num_bits=8, num_pols=2, num_branches=1024, num_chans=64, fftlength=1024, int_factor=4): """ Calculate block size, given a desired number of time bins per RAW data block `tchans_per_block`. Takes in backend parameters, including fine channelization factors. Can be used to calculate reasonable block sizes for raw voltage recording. Parameters ---------- num_antennas : int Number of antennas tchans_per_block : int Final number of time bins in fine resolution product, per data block num_bits : int Number of bits in requantized data (for saving into file). Can be 8 or 4. num_pols : int Number of polarizations recorded num_branches : int Number of branches in polyphase filterbank num_chans : int Number of coarse channels written to file fftlength : int FFT length to be used in fine channelization int_factor : int, optional Integration factor to be used in fine channelization Returns ------- block_size : int Block size, in bytes """ obsnchan = num_chans * num_antennas bytes_per_sample = 2 * num_pols * num_bits // 8 T = tchans_per_block * fftlength * int_factor block_size = T * obsnchan * bytes_per_sample return block_size
25d667f84ddaaf25b0cb4bae48a0ab1f363bc63a
36,329
def dimensionsKeepAspect(targetWidth, targetHeight, oldWidth, oldHeight): """ Gives resizing dimensions to keep an image within (targetWidth, targetHeight) while preserving the original aspect ratio. Does not upsize iamges smaller than the target dimensions. """ if (oldWidth < targetWidth) and (oldHeight < targetHeight): return (int(oldWidth), int(oldHeight)) oldAspect = oldWidth/float(oldHeight) newAspect = targetWidth/float(targetHeight) if oldAspect > newAspect: newWidth = targetWidth newHeight = targetWidth/oldAspect return (int(newWidth), int(newHeight)) elif oldAspect < newAspect: newHeight = targetHeight newWidth = targetHeight*oldAspect return (int(newWidth), int(newHeight)) elif oldAspect == newAspect: return (int(targetWidth), int(targetHeight))
9763638a9a4334dcc22f2d38f1e6b4a8fda1d1b6
36,330
def calculate_IonS(salt): """DOE handbook, Chapter 5, p. 13/22, eq. 7.2.4""" return 19.924 * salt / (1000 - 1.005 * salt)
7bb10b47580831b49c7f8f2b9af3fce31272eb43
36,342
def str_dict(input_dict): """ Convert all the values in a dictionary to str :param input_dict: :type input_dict: dict{any: any] :return: dictionary including str values :rtype dict[any: str] """ return {key: "{}".format(value) for key, value in input_dict.items()}
cc893ede066a50426990e4e578151d8ef97bfb55
36,346
from typing import Union from pathlib import Path from typing import Any import pickle def read_pkl(pkl: Union[str, Path]) -> Any: """ Read pickle file. Parameters ---------- pkl : str or pathlib.Path The path od pickle file. Returns ------- obj : Any Restored object. """ with open(pkl, "rb") as f: obj = pickle.load(f) return obj
e98815a571e4812b654cf264f53e044f7b31ae8a
36,347
def alignment_percentage(document_a, document_b, model): """ Returns the percentage of alignments of `document_a` and `document_b` using the model provided. - `document_a` and `document_b` are two lists of Sentences to align. - `model` can be a YalignModel or a path to a yalign model. """ if len(document_a) == 0 and len(document_b) == 0: return 100.0 align = model.align(document_a, document_b) align = [x for x in align if x[0] is not None and x[1] is not None] ratio = len(align) / float(max(len(document_a), len(document_b))) return round(ratio * 100, 2)
dbd11fbdf9649e6f2c3a20e8407a799a3faeb428
36,355
import re def format_card(card_num): """ Formats card numbers to remove any spaces, unnecessary characters, etc Input: Card number, integer or string Output: Correctly formatted card number, string """ card_num = str(card_num) # Regex to remove any nondigit characters return re.sub(r"\D", "", card_num)
0c967a499a71ff8934b7578a3f62a31874f22ca2
36,357
def _produce_scores(scores, exp): """Produces the scores dictionary. Args: scores: Is either the score dictionary, or a function that produces the score dictionary based on an experiment. exp: The experiment ojbect. Returns: The dictionary of scores. """ if isinstance(scores, dict): return scores return scores(exp)
bd7d3e4883f294ba7daed987a555c8e5310160e7
36,362
def bessel_spoles(n, Ts=1): """ Return the roots of the reverse Bessel polynomial normalized the given settling time. The settling time is 1 second by default. Adapted from Digital Control: A State-Space Approach, Table 6.3. Args: n: The order of the Bessel polynomial. Ts (optional): The settling time to scale to. Returns: list: The roots of the Bessel polynomial. """ spoles = [0] if n == 1: spoles = [-4.6200 + 0j] elif n == 2: spoles = [-4.0530 + 2.3400j, -4.0530 - 2.3400j] elif n == 3: spoles = [-5.0093 + 0j, -3.9668 + 3.7845j, -3.9668 - 3.7845j] elif n == 4: spoles = [-4.0156 + 5.0723j, -4.0156 - 5.0723j, -5.5281 + 1.6553j, -5.5281 - 1.6553j] elif n == 5: spoles = [-6.4480 + 0j, -4.1104 + 6.3142j, -4.1104 - 6.3142j, -5.9268 + 3.0813j, -5.9268 - 3.0813j] elif n == 6: spoles = [-4.2169 + 7.5300j, -4.2169 - 7.5300j, -6.2613 + 4.4018j, -6.2613 - 4.4018j, -7.1205 + 1.4540j, -7.1205 - 1.4540j] elif n == 7: spoles = [-8.0271 + 0j, -4.3361 + 8.7519j, -4.3361 - 8.7519j, -6.5714 + 5.6786j -6.5714 - 5.6786j, -7.6824 + 2.8081j -7.6824 - 2.8081j] elif n == 8: spoles = [-4.4554 + 9.9715j, -4.4554 - 9.9715j, -6.8554 + 6.9278j, -6.8554 - 6.9278j, -8.1682 + 4.1057j, -8.1682 - 4.1057j, -8.7693 + 1.3616j, -8.7693 - 1.3616j] elif n == 9: spoles = [-9.6585 + 0j, -4.5696 + 11.1838j, -4.5696 - 11.1838j, -7.1145 + 8.1557j, -7.1145 - 8.1557j, -8.5962 + 5.3655j, -8.5962 - 5.3655j, -9.4013 + 2.6655j, -9.4013 - 2.6655j] elif n == 10: spoles = [-4.6835 + 12.4022j, -4.6835 - 12.4022j, -7.3609 + 9.3777j, -7.3609 - 9.3777j, -8.9898 + 6.6057j, -8.9898 - 6.6057j, -9.9657 + 3.9342j, -9.9657 - 3.9342j, -10.4278 + 1.3071j, -10.4278 - 1.3071j] return [ spole/Ts for spole in spoles ]
4e2191c9c1201997e77da314316ef1ebc208f7d4
36,364
from typing import List from typing import Tuple def words_to_ngrams(words: List[str]) -> List[Tuple[str]]: """ Convert a list of words to uni-grams :param words: The list of words to convert :return: A list of the same size, containing single-element tuples of uni-grams. """ return [(w,) for w in words]
3d4fa456bd9c8bbe034b9441521bdc62940f8cf9
36,366
def _copy_non_t_vars(data0, data1): """Copies non-t-indexed variables from data0 into data1, then returns data1""" non_t_vars = [v for v in data0.data_vars if 't' not in data0[v].dims] # Manually copy over variables not in `t`. If we don't do this, # these vars get polluted with a superfluous `t` dimension for v in non_t_vars: data1[v] = data0[v] return data1
81c0a21f61fd284fd572383acff2ac8744101777
36,371
from typing import Dict from typing import Any from typing import List def filter_dict(mydict: Dict[str, Any], keys: List[str]) -> Dict[str, Any]: """Filter dictionary by desired keys. Args: mydict: disctionary with strings as keys. keys: a list of key names to keep. Returns: the same dictionary only at the desired keys. """ return {k: v for k, v in mydict.items() if k in keys}
339101fc16fc69110f3d470668eda455f430c062
36,377
import random def gen_individual(toolbox, config, out_type='out'): """ Generates a random tree individual using the toolbox and arity and height configuration. The ``ou_type`` parameter specifies the output type of the root of the tree. :param toolbox: Toolbox which contains methods to create the individual. :param GenensConfig config: Configuration of Genens :param out_type: Output type of the root of the tree individual. :return: A new random tree individual. """ arity = random.randint(config.min_arity, config.max_arity) # randint is inclusive for both limits height = random.randint(config.min_height, config.max_height) return toolbox.individual(max_height=height, max_arity=arity, first_type=out_type)
5fb0d051502cd9764e8bedc31dc2c717200f618f
36,378
def top_n(n: int, tokens: list) -> list: """Return a list of top-n unique elements of the dataset Arguments: n {int} -- number of top unique elements to return Returns: list -- array of top-n elements within a dataset """ top_n_elements = tokens[:n] return top_n_elements
6d0e03b3d041edb460a874394af171cf589b2b1b
36,380
from typing import List from typing import Type def validate_objects(elements: List, element_class: Type) -> bool: """ Check if all the elements are instance of the element_class. Args: elements: List of objects that should be instance of element_class. element_class: class of the objects. Returns: True if all the elements are instance of element_class. Raises: ValueError if one element is not an instance of element_class. """ for element in elements: if not isinstance(element, element_class): raise ValueError(f"{element} is not a {element_class}") return True
cd913ef4005d5360f8c2053ae30a72173e41d886
36,381
def check_day(n): """ Given an integer between 1 and 7 inclusive, return either string 'work!' or string 'rest!' depending on whether the day is a workday or not """ if n < 1 or n > 7: return None if n >= 6: return "rest!" return "work!"
f7329fb411a737c2fb9799a37a0bb52e28d4db83
36,382
def tolist(x): """convert x to a list""" return x if isinstance(x, list) else [x]
eb987b9766d09d7a36f1e671e5cf266296c0604e
36,383
def get_index_of_table_a_1(type): """表A.1における行番号を取得する Args: type(str): 主たる居室に設置する暖冷房設備機器等 Returns: int: 表A.1における行番号 """ key_table = { '電気蓄熱暖房器': 0, '温水暖房用パネルラジエーター': 1, '温水暖房用床暖房': 2, '温水暖房用ファンコンベクター': 3, 'ルームエアコンディショナー': 4, 'FF暖房機': 5, '電気ヒーター床暖房': 6, 'ルームエアコンディショナー付温水床暖房機': 7 } return key_table[type]
70f2087e08d05520bb1db7fb04fa2f66b8f5d445
36,385
def neg_network(network): """ Perform the operation ``-1 * network`` where ``network`` is an instance of ``Network``. """ return network * -1
da1e59f09ca8c91efb6fdaa22bf03e58a6fb7e43
36,393
def cell_width(cell_name): """ Set the width of the cells from the pdf report file.""" if cell_name == "No": table_cell_width = 6 elif cell_name == "Phrase": table_cell_width = 73 elif cell_name == "Question": table_cell_width = 57 else: table_cell_width = 25 return table_cell_width
fac9e9dc0f8ad3cac09ed3054a798e7d832cdcc6
36,396
def get_shape_from_dims(axis_0, axis_1=None): """ Constructs a chain shape tuple from an array-like axis_0 and an optional array-like axis_1 :param axis_0: Iterable of dimensions for the first leg on each site of the chain :param axis_1: Optional iterable of dimensions for the second leg on each site of the chain (of the same size as axis_0) :return: chain shape tuple """ if axis_1 is None: return tuple([(dim, )for dim in axis_0]) else: assert len(axis_0) == len(axis_1) return tuple([(dim0, dim1) for dim0, dim1 in zip(axis_0, axis_1)])
3508654e6b94ab515bf1975755d95923775c4849
36,399
def add_node(nodes, parent, time): """Adds a node with specified parent and creation time. This functions assumes that a node has been already allocated by "child" functions `add_node_classifier` and `add_node_regressor`. Parameters ---------- nodes : :obj:`Nodes` The collection of nodes. parent : :obj:`int` The index of the parent of the new node. time : :obj:`float` The creation time of the new node. Returns ------- output : `int` Index of the new node. """ node_index = nodes.n_nodes nodes.index[node_index] = node_index nodes.parent[node_index] = parent nodes.time[node_index] = time nodes.n_nodes += 1 return nodes.n_nodes - 1
d204d618114b13ffcbfc038c91884f4a87e743c0
36,405
def scale(y, yerr): """ Standardization of a given dataset y Parameters: y = data; subtract mean, divide by std yerr = errors; divide by std of x Returns: standardized y and yerr """ m, s = y.mean(), y.std() return (y-m)/s, yerr/s
5985fa930470d1535b4befde5878ce325f1dc86b
36,414
def isoforest_label_adjust(pred_func): """Adjusts isolation forest predictions to be 1 for outliers, 0 for inliers. By default the scikit-learn isolation forest returns -1 for outliers and 1 for inliers, so this method is used to wrap fit_predict or predict methods and return 0 for inliers, 1 for outliers. :param pred_func: Scikit-learn prediction function that returns a flat :class:`numpy.ndarray` of labels ``-1`` and ``1``. :type pred_func: function or method :rtype: function """ def adjust_pred_func(*args, **kwargs): res = pred_func(*args, **kwargs) res[res == -1] = 1 res[res == 1] = 0 return res return adjust_pred_func
11ac61f691404525a357a32e725c30dd2675a85a
36,415
def join_list(words: list, join_s: str): """ Take each strings in the list 'words' is joined in a single string spaced whit 'join_s' :param words: string to be joined :param join_s: spaced string :return: joined string """ return join_s.join(words)
cf7641ec81d7284c0ec6f65085567a572310d193
36,416
def write_gwosc_string(config, ifos, outdir): """ Write command string to execute bajes_read_gwosc.py given a config file """ read_string = 'bajes_read_gwosc.py --outdir {} '.format(outdir) try: read_string += '--event {} '.format(config['gw-data']['event']) except Exception: pass try: read_string += '--version {} '.format(config['gw-data']['version']) except Exception: pass for ifo in ifos: read_string += '--ifo {} '.format(ifo) read_string += '--seglen {} '.format(config['gw-data']['seglen']) read_string += '--srate {} '.format(config['gw-data']['srate']) read_string += '--t-gps {} '.format(config['gw-data']['t-gps']) return read_string
2c42d4956764b4328a92792cb6b6087fbb2daf57
36,417
import gc import time def run_test(func, fobj): """Run func with argument fobj and measure execution time. @param func: function for test @param fobj: data for test @return: execution time """ gc.disable() try: begin = time.time() func(fobj) end = time.time() finally: gc.enable() return end - begin
c9851736cb5ad3886565fc3a06cb8ef2c2ee5478
36,420
def _CheckNoIn(input_api, output_api): """Checks that corpus tests don't contain .in files. Corpus tests should be .pdf files, having both can cause race conditions on the bots, which run the tests in parallel. """ results = [] for f in input_api.AffectedFiles(include_deletes=False): if f.LocalPath().endswith('.in'): results.append(output_api.PresubmitError( 'Remove %s since corpus tests should not use .in files' % f.LocalPath())) return results
814b7b52c59c373a3d7ec010f99b85d602b8dd02
36,421
import asyncio import functools def schedule_coroutine(delay, coro_func, *args, **kwargs): """ Creates a coroutine out of the provided coroutine function coro_func and the provided args and kwargs, then schedules the coroutine to be called on the running event loop after delay seconds (delay can be float or int). Returns asyncio.Task on which cancel() can be called to cancel the running of the coroutine. The coro_func parameter should not be a coroutine, but rather a coroutine function (a function defined with async). The reason for this is we want to defer creation of the actual coroutine until we're ready to schedule it with "ensure_future". Otherwise every time we cancel a TimerTask returned by "call_later", python prints "RuntimeWarning: coroutine '<coro-name>' was never awaited". """ # See method above for comment on "get_event_loop()" vs "get_running_loop()". loop = asyncio.get_event_loop() coro_partial = functools.partial(coro_func, *args, **kwargs) return loop.call_later(delay, lambda: asyncio.ensure_future(coro_partial()))
4df7fc85d20f15a5c1850f1d2d4b2645b22445cd
36,422
def pop_node_record(records): """Pops (removes and returns) a node record from the stack of records. Parameters ---------- records : Records A records dataclass containing the stack of node records Returns ------- output : tuple Outputs a tuple with eight elements containing the attributes of the node removed from the stack. There attributes are as follows: - parent : int, Index of the parent node - depth : int, Depth of the node in the tree - is_left : bool, True if the node is a left child, False otherwise - impurity : float, Impurity of the node. Used to avoid to split a "pure" node (with impurity=0). - start_train : int, Index of the first training sample in the node. We have that partition_train[start_train:end_train] contains the indexes of the node's training samples - end_train : int, End-index of the slice containing the node's training samples indexes - start_valid : int, Index of the first validation (out-of-the-bag) sample in the node. We have that partition_valid[start_valid:end_valid] contains the indexes of the node's validation samples - end_valid : int, End-index of the slice containing the node's validation samples indexes """ records.top -= 1 stack_top = records.stack[records.top] return ( stack_top["parent"], stack_top["depth"], stack_top["is_left"], stack_top["impurity"], stack_top["start_train"], stack_top["end_train"], stack_top["start_valid"], stack_top["end_valid"], )
2cc535ab444c7cb86a544ec2f98de176fcc546db
36,423
from typing import Sequence import re def _validate_against_blacklist(string: str, blacklist: Sequence[str]): """Validates a string against a regex blacklist.""" return not any(re.search(pattern, string, re.IGNORECASE) for pattern in blacklist)
7c1e3ca9b9b295d3927d8b516f0562a2a04858af
36,428
def cycles_per_trial(nwb): """Get the number of microscope cycles/trial. That is, the number of times each point is imaged in each trial. Currently looks at the first imaging timeseries in the first trial, and assumes they're all the same. """ trial1 = nwb['/epochs/trial_0001'] for ts_name in trial1: ts = trial1[ts_name] is_image_series = ts['timeseries/pixel_time_offsets'] is not None if is_image_series: return ts['count'].value else: raise ValueError('No imaging timeseries found')
dbe421716267669041ee654bb357426259ed2703
36,429
def format_name(f_name, l_name): """Take a first and last name and format it to return the title case version of the name. Args: f_name ([string]) l_name ([string]) Returns: f_name + l_name in title """ if f_name == "" or l_name == "": return "You not enter a valid name!" full_name = f"{f_name} {l_name}" return full_name.title()
59c7a9b135d0001c7bc1a378bbcbfb68a4dfa36a
36,431
import re def parse_num(s): """Parse data size information into a float number. Here are some examples of conversions: 199.2k means 203981 bytes 1GB means 1073741824 bytes 2.1 tb means 2199023255552 bytes """ g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s)) if not g: raise ValueError("can't parse %r as a number" % s) (val, unit) = g.groups() num = float(val) unit = unit.lower() if unit in ['t', 'tb']: mult = 1024*1024*1024*1024 elif unit in ['g', 'gb']: mult = 1024*1024*1024 elif unit in ['m', 'mb']: mult = 1024*1024 elif unit in ['k', 'kb']: mult = 1024 elif unit in ['', 'b']: mult = 1 else: raise ValueError("invalid unit %r in number %r" % (unit, s)) return int(num*mult)
721cc52cdf543bcaf3ff77d8059a7bfe6cb6d892
36,433
def rstrip_line(line): """Removes trailing whitespace from a string (preserving any newline)""" if line[-1] == '\n': return line[:-1].rstrip() + '\n' return line.rstrip()
adb3b707ddb450996b1677e68ad1861a76313cc6
36,438
import inspect import typing def get_type(type): """ Helper function which converts the given type to a torchScript acceptable format. """ if isinstance(type, str): return type elif inspect.getmodule(type) == typing: # If the type is a type imported from typing # like Tuple, List, Dict then replace `typing.` # with a null string. This needs to be done since # typing.List is not accepted by TorchScript. type_to_string = str(type) return type_to_string.replace(type.__module__ + '.', '') elif type.__module__.startswith('torch'): # If the type is a subtype of torch module, then TorchScript expects a fully qualified name # for the type which is obtained by combining the module name and type name. return type.__module__ + '.' + type.__name__ else: # For all other types use the name for the type. return type.__name__
116e169107460cd0807257eef303d4959323319b
36,440
def pow_mod(a: int, b: int, p: int) -> int: """ Computes a^b mod p using repeated squaring. param a: int param b: int param p: int return: int a^b mod p """ result = 1 while b > 0: if b & 1: result = (result * a) % p a = (a * a) % p b >>= 1 return result
e84085c1ed5e4c9c321f42a3a6275537736789bc
36,451
import functools def get_latex_name(func_in, **kwargs): """ Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. """ if isinstance(func_in, functools.partial): func = func_in.func assert not set(func_in.keywords) & set(kwargs), ( 'kwargs={0} and func_in.keywords={1} contain repeated keys' .format(kwargs, func_in.keywords)) kwargs.update(func_in.keywords) else: func = func_in param_ind = kwargs.pop('param_ind', 0) probability = kwargs.pop('probability', 0.5) kwargs.pop('handle_indexerror', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) ind_str = r'{\hat{' + str(param_ind + 1) + '}}' latex_name_dict = { 'count_samples': r'samples', 'logz': r'$\mathrm{log} \mathcal{Z}$', 'evidence': r'$\mathcal{Z}$', 'r_mean': r'$\overline{|\theta|}$', 'param_mean': r'$\overline{\theta_' + ind_str + '}$', 'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'} # Add credible interval names if probability == 0.5: cred_str = r'$\mathrm{median}(' else: # format percent without trailing zeros percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.') cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}(' latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$' latex_name_dict['r_cred'] = cred_str + r'|\theta|)$' try: return latex_name_dict[func.__name__] except KeyError as err: err.args = err.args + ('get_latex_name not yet set up for ' + func.__name__,) raise
1eb376cd597e78a4d7e7c1c0144fc904ffa3ea60
36,452
import yaml def create_blacklist(config_file): """ Generate a list of images which needs to be excluded from docker_image_list :param config_file: application_configuration file where images are. :return: """ with open(config_file, 'r') as f: file = yaml.load(f, Loader=yaml.SafeLoader) blacklist=[] for name, _ in file['runtime_images'].items(): path = file['runtime_images'][name]['path'] blacklist.append(path[1:]) return blacklist
9d0b7ae78b83670f06390abb43d2b19d3f3342e0
36,456
def join_segments_raw(segments): """Make a string from the joined `raw` attributes of an iterable of segments.""" return "".join(s.raw for s in segments)
ba2f3d1157ee505daaec3919c4e70ad6e49e5db2
36,457