content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def crop(img, i, j, h, w): """ Crop the given PIL.Image. Args: img (PIL.Image): Image to be cropped. i: Upper pixel coordinate. j: Left pixel coordinate. h: Height of the cropped image. w: Width of the cropped image. Returns: PIL.Image: Cropped image. """ return img.crop((j, i, j + w, i + h))
9cd2f3aa355bad6909fcb0c0eac0ce7c385bd828
272,177
def parse_message(error_msg): """This function parses error messages when necessary to prepare them for translation. :param error_msg: The original error message :type error_msg: str :returns: The prepared error message """ split_sequences = ['\n', '\r'] for sequence in split_sequences: if sequence in error_msg: error_msg = error_msg.split(sequence)[0] break return error_msg
5394670b8466881b8de40f6186ab4960f24bf0fe
303,871
def from_uint256(value: bytes) -> int: """Decodes a big endian uint256 into a Python int.""" return int.from_bytes(value, "big")
06f967e8d7356c3bcee1e9f64bcea010072fac4e
524,160
def is_topk_evaluator(evaluator_keys): """Are these evaluator keys from TopK evaluator?""" return (len(evaluator_keys) == 5 and evaluator_keys[0] == 'top_1' and evaluator_keys[1] == 'top_2' and evaluator_keys[2] == 'top_3' and evaluator_keys[3] == 'top_4' and evaluator_keys[4] == 'top_5')
77b0d446118497bb1cb7be6c559f3193722eaf9c
665,439
def eigvector_uncoupled(par): """ Returns the flag for the correction factor to the eigenvectors for the linear guess of the unstable periodic orbit. Parameters ---------- parameters : float (list) model parameters Returns ------- correcx : 1 or 0 flag to set the x-component of the eigenvector correcy : 1 or 0 flag to use the y-component of the eigenvector """ correcx = 1 correcy = 1 return correcx, correcy
7d05aeee2a930a8ce712c4d134c82884717d3160
613,283
def copy_and_update(target, updater): """ Copy dictionary and update it all in one operation For example:: >>> a = {'foo': 'bar'} >>> b = copy_and_update(a, {1: 2}) >>> a is b False >>> b == {'foo': 'bar', 1: 2} True """ result = target.copy() result.update(updater) return result
1e2c78fb0aca6a7505b8053e3cbf06d9fbdd90f0
385,112
def pad_gtin(app_identifier, value): """ Pad the value of any GTIN [ AI (01) or (02) ] to 14 digits in the element string representation. :param app_identifier: Application identifier. :param value: The GTIN string - can be 8, 12 or 13 digits. :return: GTIN string with zeros padded to the left, and will be 14-digit. """ new_value = value if app_identifier in ["01", "(01)", "02", "(02)"]: if len(value) == 8: new_value = ''.join(['000000', value]) elif len(value) == 12: new_value = ''.join(['00', value]) elif len(value) == 13: new_value = ''.join(['0', value]) return new_value
5463e9ffd1b974b3d0376f481b7baf5862d431ed
101,280
def split_all_category_roots(cat): """Takes category like `foo/bar/baz` and returns its roots ex: ['foo/bar/baz', 'foo/bar', 'foo'] """ if cat and '/' in cat: cats = [cat] is_root = False raw = cat while not is_root: item = [part for part in raw.rpartition('/') if part] raw = item[0] cats.append(raw) if len(item) == 1: is_root = True return cats else: return [cat]
87ff4f7bdbcd0b45a8833318987547180f3fc6a8
350,640
def charge(mol): """Total charge""" return sum(a.charge for _, a in mol.atoms_iter())
c849fe6c25c0e778c12fc9d04c38421ac7d32651
62,631
def is_exception(host): """ Exceptions are domain names such as google.co.uk or hire.mil.gov, where the top level domain can be thought of co.uk or mil.gov rather than .uk or .gov. These domains need to be processed as a special case when converting the domain level from one level to another, since they are essentially of one level higher than they would ordinarily be thought of. That is, google.co.uk is a 3rd level domain, but for practicel purposes it should be considered a 2nd level domain. >>> is_exception('') False >>> is_exception('google.com') False >>> is_exception('google.co.uk') True >>> is_exception('hire.mil.gov') True >>> is_exception('indiana.edu') False >>> is_exception('indiana.edu.us') True >>> is_exception('whitehouse.gov') False """ exceptions = [".com.", ".net.", ".org.", ".edu.", ".mil.", ".gov.", ".co."] for e in exceptions: if e in host: return True return False
22e2ca9b8290c1b9d2fab93c503bbd37d578c7de
256,331
def char_at(s, index): """ Return the str[index] in int class. Args: s: index: Returns: value (int): the int value of s[index], -1 for IndexError. """ if index < len(s): value = ord(s[index]) else: value = -1 return value
36b811d88b19af4f3fdcb613a05253b7b58d11a2
379,123
def _check_valid_ref_file(refname): """ refname should not start with / (ie absoulte path) or .. """ if refname.startswith('/'): return False if refname.startswith('..'): return False return True
4f7de9f471e50df51b1dcfa12f5c41ac28542174
137,396
from typing import List def hyphen_range(string: str) -> List[int]: """ Expands a string of numbers separated by commas and hyphens into a list of integers. For example: 2-3,5-7,20-21,23,100-200 """ list_numbers = list() temporary_list = string.split(",") for element in temporary_list: sub_element = element.split("-") if len(sub_element) == 1: list_numbers.append(int(sub_element[0])) elif len(sub_element) == 2: for number in range(int(sub_element[0]), int(sub_element[1]) + 1): list_numbers.append(number) else: raise Exception( "Something went wrong expanding the range {}".format(string) ) return list_numbers
2a8ebd5a60c180e722db053e4e9d4c69808d8fc8
614,500
def square_crop(im, target_size=None): """ Crop image to `target_size`. If that's None the image is squared to the smallest size """ w = im.size[0] h = im.size[1] target_size = target_size if target_size else min(w, h) dx = (w - target_size) / 2 dy = (h - target_size) / 2 return im.crop((dx, dy, dx + target_size, dy + target_size))
28fb58b21ca4b15e6d48c9345fb31aa333cd7276
43,038
def run_length_encode(data): """ Encodes the input data using the RLE method. See: https://en.wikipedia.org/wiki/Run-length_encoding Args: data: list, corresponding to the input data. Returns: list, result of the compression. """ val = data[0] count = 1 compressed_data = [] for i in range(1, len(data)): if data[i] == data[i-1]: count += 1 else: compressed_data.extend([count, val]) val = data[i] count = 1 compressed_data.extend([count, val]) return compressed_data
796e8044c4cd22b63354162d22746e0da1dda51f
268,777
def grouper(some_list, count=2): """ splits a list into sublists given: [1, 2, 3, 4] returns: [[1, 2], [3, 4]] """ return [some_list[i:i+count] for i in range(0, len(some_list), count)]
9cd9f79a077b0cd64fd51dc0c6b4ea7ede2188fa
672,580
def int_to_string(x): """Convert integer x into a string of bytes, as per X9.62.""" assert x >= 0 if x == 0: return b'\0' result = [] while x: ordinal = x & 0xFF result.append(bytes((ordinal,))) x >>= 8 result.reverse() return b''.join(result)
630a763b16fd24c363447f936d58119d781f29e8
385,208
from typing import Sequence from typing import Union import re def _parse_problem_sizes(argument: str) -> Sequence[Union[int, Sequence[int]]]: """Parse a problem size argument into a possibly nested integer sequence. Examples: 64,128 -> [64, 128] 32,32,[1,1] -> [32, 32, [1, 1]] """ problem_sizes = [] while argument: # Match size. match = re.match(r"""[,]?\d+""", argument) if match: problem_sizes.append(int(match.group().lstrip(','))) argument = argument[match.end():] continue # Match nested sizes. match = re.match(r"""[,]?\[[0-9,]+\]""", argument) if match: nested = match.group().lstrip(',')[1:-1] problem_sizes.append([int(elem) for elem in nested.split(',')]) argument = argument[match.end():] continue raise ValueError() return problem_sizes
9ac8055a4cfb26d11d19cba2de58d1b797fc2125
63,263
def removeCommentFromLine(line): """ Remove a C++/Java style comment from a line of text. This refers particularly to comments that begin with a double-slash '//' and continue to the end of the line. """ index = line.find('//') if index >= 0: line = line[0:index] return line
3931bf7605414f07a8037083362291c7d75cbdbf
492,228
import re def get_prefixlength(ip): """Returns the length of the network prefix, in bits.""" regex = r'(\d+)[.](\d+)[.](\d+)[.](\d+)[/](\d+)' if not re.findall(regex, ip): return ip p = re.split(regex, ip) return int(p[5])
6ac8e0c827175e13e8adb30010c93be46a35b49f
639,647
def getCurvesListWithDifferentCurveName(originalCurveList, origCurve, newCurve): """ Takes in list of curves, curve name to be replaced, and curve name to replace with. Returns a list with the orginal and new curve names switched in the given curve list """ plentifulCurves_wDEPTH = originalCurveList.copy() try: plentifulCurves_wDEPTH.remove(origCurve) plentifulCurves_wDEPTH.append(newCurve) except: print( "did not find ", origCurve, " when attempting to replace it with ", newCurve, " in the getCurvesListWithDifferentCurveName function", ) return plentifulCurves_wDEPTH
7e0f4af1d54e416b6b95e0ee30b82876a80320a1
663,812
def esc_quotes(strng): """ Return the input string with single and double quotes escaped out. """ return strng.replace('"', '\\"').replace("'", "\\'")
25956257e06901d4f59088dd2c17ddd5ea620407
706,403
def set_days_in_month(month_picked): """ Changes the values in the slider based on how many days are in the month selected. Args: month_picked (str): month June through Sept Returns: days (int): number of days in that month marks (dict): dictionary of day intervals for corresponding tick marks on the slider """ if month_picked in ['July', 'August']: days = 31 marks = {1: '1', 10: '10', 20: '20', 31: '31'} else: days = 30 marks = {1: '1', 10: '10', 20: '20', 30: '30'} return days, marks
e845c8fac2488cac796c9e10347c3fbd9be9691f
200,865
import configparser def read_from_config(key): """ Read the value of the given key from the configuration file. """ config = configparser.ConfigParser() config.read("appium.ini") return config["DEFAULT"][key]
65d4357a92fd1fe994b42ed3771136776d9b53c2
160,354
def to_img(x): """ Denormalises Tensor `x` (normalised from -1 to 1) to image format (from 0 to 1). """ x = 0.5 * (x + 1) x = x.clamp(0, 1) return x
4ac9ed1d0a82773fafdf064a40d26fbb0c41f7b7
399,839
def _new_shape(op): """Shape helper function for new and _new_grad function below.""" return [op.inputs[0].shape]
21c04d555a610bb5c9731fd3911134ef4176fef2
389,007
def get_confirmation(message: str) -> bool: """Get user confirmation. :param message: message to ask :type message: str :return: user confirm status :rtype: bool """ confirm = None while confirm != "y" and confirm != "n": confirm = input("%s(y/n): " % message).lower() return True if confirm == "y" else False
f13de953c8ada175975a8480efb1037456963f03
196,095
def get_text(answer: list) -> str: """Extract only the text from the answers.text column Args: answer: the answer. """ return answer[0]
eddf15f182a869ff7a862e66ddb31b9238a869d3
103,275
def compute_resource_attributes(decos, compute_deco, resource_defaults): """ Compute resource values taking into account defaults, the values specified in the compute decorator (like @batch or @kubernetes) directly, and resources specified via @resources decorator. Returns a dictionary of resource attr -> value (str). """ assert compute_deco is not None # Use the value from resource_defaults by default result = {k: v for k, v in resource_defaults.items()} for deco in decos: # If resource decorator is used if deco.name == "resources": for k, v in deco.attributes.items(): # ..we use the larger of @resources and @batch attributes my_val = compute_deco.attributes.get(k) if not (my_val is None and v is None): result[k] = str(max(int(my_val or 0), int(v or 0))) return result # If there is no resources decorator, values from compute_deco override # the defaults. for k, v in resource_defaults.items(): if compute_deco.attributes.get(k): result[k] = str(compute_deco.attributes[k]) return result
bbc02bfb4dab6bc4825b326f166b2a4d63bfc69d
676,142
def merge_dicts(dicta, dictb, path=None): """merge two dicts. Merge dicta and dictb, then create new dict. When conflicts, override dicta as dictb. >>> dicta = {'a': 1, 'b': 2} >>> dictb = {'b': 3, 'c': 4} >>> merge_dicts(dicta, dictb) {'a': 1, 'b': 3, 'c': 4} >>> dicta = {'a': 1, 'b': {'x': 10, 'z': 30}} >>> dictb = {'b': {'x': 10, 'y': 20}, 'c': 4} >>> merge_dicts(dicta, dictb) {'a': 1, 'b': {'x': 10, 'z': 30, 'y': 20}, 'c': 4} """ if path is None: path = [] for key in dictb: if key in dicta: if isinstance(dicta[key], dict) and isinstance(dictb[key], dict): merge_dicts(dicta[key], dictb[key], path + [str(key)]) elif dicta[key] == dictb[key]: pass # same leaf value # elif str(dicta[key]) in str(dictb[key]): # # strで上書き。JSONだったのをstrに変換したデータ # dicta[key] = dictb[key] else: # conflict and override original value with new one dicta[key] = dictb[key] else: dicta[key] = dictb[key] return dicta
a70baa38ed850672ed88950ae1afba99f8b3d927
138,853
def quote_path_component(text): """ Puts quotes around the path compoenents, and escapes any special characters. """ return "'" + text.replace("\\", "\\\\").replace("'", "\\'") + "'"
8339f39850716d2dd9835c79c0b4ee465674ab96
533,864
from typing import List from typing import Dict def parse_stringdb_interactions(this_line: str, header_items: List) -> Dict: """Methods processes a line of text from Drug Central. Args: this_line: A string containing a line of text. header_items: A list of header items. Returns: item_dict: A dictionary of header items and a processed Drug Central string. """ items = this_line.strip().split(" ") item_dict = dict(zip(header_items, items)) return item_dict
dbbcc9c98c5ce9cc7cd195c3e45c4c39f9d58b29
619,686
import math def slide_split(num: int, num_val: int, num_test: int) -> list: """ A slide dataset split scheme. The slide reference is the test set, so that the test set among all reslut splits cover the whole dataset (execept for the first a few samples) Args: num (int): Total number of samples in the dataset num_val (int): Number of samples for validation dataset num_test (int): Number of samples for test dataset Returns: list: The list of dataset split folds. Each fold (an entry in the list) is a 3-tuple whose elements are train_index, val_index, test_index in order """ assert num_val + num_test < num, "Sum of num_val and num_test should be less than num." index_all = list(range(num)) index_splits = list() num_folds = math.floor((num - num_val) / num_test) for fold_idx in range(num_folds): # ... left ... val_index ... center ... test_index ... right left = num - (fold_idx + 1) * num_test - num_val center = num - (fold_idx + 1) * num_test right = num - fold_idx * num_test val_index = index_all[left:center] test_index = index_all[center:right] train_index = list(set(index_all) - set(val_index) - set(test_index)) index_splits.append((train_index, val_index, test_index)) print(index_splits[-1]) return index_splits
1ce97b0de6cb26b720d5f35c05d5c940cc77e9ee
59,478
def check_duplicate_index(df, verbose=True): """ checks for duplicates in the index of a dataframe """ dupes = df[df.index.duplicated()] num = dupes.shape[0] print('{} index duplicates'.format(num)) if verbose == True: print('duplicates are:') print(dupes.head(3)) return df[df.index.duplicated(keep=False)]
cc41b7b30c6699259e03c458f092f4ad0fa1892d
60,534
import json def filterPOSTToJSON(body, fields): """ Extract a post body by field name into a sane format for JSON dump """ filtered = {} for k in fields: fk = k if k.endswith('[]'): fk = k[:-2] filtered[fk] = body.getlist(k) else: filtered[fk] = body[k] return json.dumps(filtered)
21bbeac78a431bb8f381711f46c09144584484ca
199,837
def join_dict(keys, values): """ Create a dictionary from a list of keys and values having equal lengths """ if len(keys) == len(values): adict = dict(zip(keys, values)) return adict else: print('Error: Attempting to create a dictionary from ' 'a key and value list of unequal length') return -1
8b0297b85cdd3bf07544f954ac21d1e0e6328a0f
42,911
def parse_input(txt_path): """parse input TXT file to wire list and four digits list""" with open(txt_path, encoding="UTF-8") as file: wires_list = [] digits_list = [] for line in file: wires, digits = line.strip().split(" | ") wires_list.append(wires.split(" ")) digits_list.append(digits.split(" ")) return wires_list, digits_list
47f392f08978e743ddcf61e5834b3fb0e33afc8a
345,835
def get_jd_image(prod): """Get the image of a given JD product""" path = prod.find("div", class_="p-img").find("img").attrs["data-lazy-img"].strip() return "http:" + path
c37154fa58cad1e75bb4f6483a34a08dc7469055
311,333
def get_marketing_cloud_visitor_id(visitor): """Get Marketing Cloud visitor ID""" if not visitor: return None visitor_values = visitor.get_visitor_values() return visitor_values.get("MCMID")
1194c50f099702e797c82b1c0551f302e7501939
476,953
def intersection(arrA, arrB): """ Checks for intersection between two 2D arrays""" #from: https://stackoverflow.com/questions/24477270/ # python-intersection-of-2d-numpy-arrays return not set(map(tuple, arrA)).isdisjoint(map(tuple, arrB))
9c78c675313fdc8078702f96d0ddf40bbb74e9a1
216,792
from typing import Sequence from typing import Tuple def count_origins( generated_texts: Sequence[Tuple[str, ...]], train_texts: Sequence[Tuple[str, ...]], dev_texts: Sequence[Tuple[str, ...]], ) -> Tuple[int, int, int]: """Count the proportion of generated texts that are in the train or dev sets, or are novel texts. This can be useful when training a language model to get a sense of whether the model has just memorized the training set. This wouldn't be useful for a large domain where texts are unlikely to repeat themselves entirely (e.g. generated paragraphs of text) but could be useful in a much more constrained domain like words of less than 15 characters. Args: - generated_texts: a Sequence of texts, each of which is a tuple of tokens. - train_texts: same format as `generated_texts`. - dev_texts: same format as `generated_texts`. Returns a triple, which is the proportion of generated texts that are: 1. in the train set 2. in the dev set 3. novel """ train = dev = novel = 0 for text in generated_texts: if text in train_texts: train += 1 elif text in dev_texts: dev += 1 else: novel += 1 total = len(generated_texts) return (int(train / total * 100), int(dev / total * 100), int(novel / total * 100))
d5ab14d82a1ad32d43c321ab34c00ed67ffbce4a
362,290
def CreateMutForOligos(seq, primerlength, prefix, firstcodon): """Creates oligos to tile a gene and introduce NNS at each codon. *seq* : sequence of the gene. The gene itself should be upper case. The flanking regions and start / stop codons should be lower case. All upper case codons are randomized. The length of the lower case sequences at each end must be >= (primerlength - 3) / 2.0 *primerlength* : length of primers. Must be an odd number, so that equal length flanking on each side. *prefix* : string prefix attached to primer names. *firstcodon* : number assigned to first codon in primer name. Tiles primers across the gene in the forward direction. The primers are all of length primerlength with NNS at the middle codon. Note that only upper case letters are randomized. Primers are named as follows: "%s-for-mut%d" % (prefix, i) -> 5' tiling primers, where i = 2, 3, ... In other words, the indices cover codons 2 and up. Returns a list of all these primers as *(name, sequence)* 2-tuples. """ n = len(seq) assert primerlength % 2 == 1, "primer length not odd" flanklength = (primerlength - 3) // 2 upperseq = ''.join([nt for nt in seq if nt.istitle()]) assert upperseq in seq, "upper case nucleotides not substring" assert len(upperseq) % 3 == 0, "length of upper case not multiple of 3" startupper = seq.index(upperseq) if startupper < flanklength: raise ValueError("not enough 5' lower case flanking nucleotides") if n - len(upperseq) - startupper < flanklength: raise ValueError("not enough 3' lower case flanking nucleotides") ncodons = len(upperseq) // 3 primers = [] for icodon in range(ncodons): i = startupper + icodon * 3 primer = "%sNNS%s" % (seq[i - flanklength : i], seq[i + 3 : i + 3 + flanklength]) name = "%s-for-mut%d" % (prefix, firstcodon + icodon) primers.append((name, primer)) return primers
8b8d9f067646b9d3b216675916e3cac69fdca2d5
456,619
from typing import Iterable from typing import AnyStr def join_separated(iterable: Iterable[AnyStr], sep: AnyStr) -> AnyStr: """ Join the elements of ``iterable`` together, separating consecutive elements with ``sep`` :param iterable: an iterable of binary or text strings :param sep: a binary or text string :rtype: a binary or text string """ return sep.join(iterable)
e2cd30bf76b0f17623d00dbd302a7976ee8230db
577,643
def iwm(a, b): """Input-Weight Multiply""" return a*b
284ad052dd4940b783821e3820e6f06de9f74436
179,036
def set_y_axis_min(vsc): """ Determine whether y_axis_min should be zero or a negative value Parameters ---------- vsc: Pandas Series Returns ------- zero or 1.1 * negative minimum of the series """ if vsc.min() > 0: y_axis_min = 0 else: y_axis_min = 1.1 * vsc.min() return y_axis_min
dfe3f8721dce5bd399647684f848a13e60501cb9
419,739
def update_line(line): """ Takes a string line representing a single line of code and returns a string with print updated """ # Strip left white space using built-in string method lstrip() new_line = line.lstrip() striped_spaces = len(line)-len(new_line) # If line is print statement, use the format() method to add insert parentheses if new_line.find('print') == 0: new_line = "print("+ new_line[6:] +")" # Note that solution does not handle white space/comments after print statememt new_line = " " * striped_spaces + new_line return new_line
12d2dc22d3aebee9c25080b8a496ce5edc9c0e9b
598,075
def is_prime(num: int) -> bool: """判断一个正整数是不是质数 :param num: 正整数 :return: 如果是质数返回True,否则返回False """ for i in range(2, int(num ** 0.5) + 1): if num % i == 0: return False return num != 1
d00b6d182cc0dd1b99445de1e613f7f959b3222b
362,160
def merge_dicts_into(target, *dicts): """Merge the provided dicts into the target dict.""" for d in dicts: target.update(d) return target
c2ff91a3f43158350150f223ee6987c75250a4ca
667,047
from pathlib import Path import yaml def fixture_mock_config_path(tmp_path): """Fixture for creating a config file. Returns: pathlib.Path The path to the config file. """ config_file_path = Path(tmp_path).joinpath("test_config.yaml") config_file = { "azure": { "STORAGE_ACCOUNT_URL": "https://mock-url.net/", "STORAGE_CONTAINERS": ["container1", "container2"], } } with open(config_file_path, "w") as file: yaml.dump(config_file, file) return config_file_path
bd75d36f731059e2e160d13ebbe4cbd5656fc869
460,598
def get_face_ids(oEditor, body_name): """ Get the face id list of a given body name. Parameters ---------- oEditor : pywin32 COMObject The HFSS editor in which the operation will be performed. body_name : str Name of the body whose face id list will be returned Returns ------- face_id_list : list of int list with face Id numbers of body_name """ face_id_list = list(oEditor.GetFaceIDs(body_name)) return map(int,face_id_list)
4547e0ad98ea031748c9d55ec1928bb3751947c4
586,203
import json def load_one_params(params_json_path): """Load params.json as dict""" with open(params_json_path, 'r') as f: data = json.loads(f.read()) if "args_data" in data: del data["args_data"] if "exp_name" not in data: data["exp_name"] = params_json_path.split("/")[-2] return data
2a1546ce9c763ebc322c856f843bd242f209df36
124,191
def biot(h, r, k): """ Calculate the dimensionless Biot number represented by Bi. Parameters ---------- h = heat transfer coefficient, W/m^2K r = radius of particle, m k = thermal conductivity, W/mK Returns ------- Bi = Biot number, - """ Bi = (h*r) / k return Bi
4997785657593fb186d7fb251bb39b83b0b312f7
147,440
import requests def get_bucket(url, token, record_id): """ Get bucket url from record json data Parameters ---------- url : str The base url of application token : str The authentication token for the api record_id : str The id for record we want to upload files to Returns ------- bucket_url : str The url for the file bucket to which upload files """ headers = {"Content-Type": "application/json"} url += f"{record_id}" r = requests.get(url, params={'access_token': token}, headers=headers) return r.json()["links"]["bucket"]
baa64b414bad18684da0eca04a3ee432779667ab
468,840
def data_for_template(number, username, reply_count, msg_time, msg_pretext, msg_text, file_permalink, file_name, is_msg_parent): """ Prepare the dictionary of substitution values for jinja template :param number :param username: :param reply_count: :param msg_time: :param msg_pretext: :param msg_text: :param file_permalink: :param file_name: :param is_msg_parent: :return: data """ data = { "number": number, "username": username, "reply_count": reply_count, "msg_time": msg_time, "msg_pretext": msg_pretext, "msg_text": msg_text, "file_permalink": file_permalink, "file_name": file_name, "is_msg_parent": is_msg_parent } return data
f718b5c0fdb99761418c9fdb337ee522b963bc00
242,258
def addLetter(letters): # <-- pass in a string """ A function factory builds and returns function objects. L is a function that will add whatever letters are passed in to be the ending letters. """ def L(s): return s + letters return L
45cf2adeb150b4ab38d61cad4fb3182a8289498a
342,545
def create_leaf_list(root): """ Question 10.12: Create list from leaves of binary tree """ if root.left is None and root.right is None: return [root.val] ls = [] if root.left is not None: ls.extend(create_leaf_list(root.left)) if root.right is not None: ls.extend(create_leaf_list(root.right)) return ls
6457f80b6b2b4257f5e2def37fa90c77f2670e27
284,081
import json def get_command_line(command, positional): """ Gets the command line invocation for this submission by concatenating the command, and positional arguments @param command - command line from job spec @param positional - positional arguments @return: command strin with args, or None """ parts = [] if not command is None: parts.append(command) for posit in positional: # Escape any single quotes if not isinstance(posit, str): posit = "{0}".format(json.dumps(posit)) posit = posit.replace("'", "'\"'\"'") # Add in encapsulating single quotes parts.append("'{0}'".format(posit)) ret = " ".join(parts) return ret
dc867a7e094152790f9a77345c75bd2e4eb82854
297,677
def get_reflectivity_name(radar): """ Test for several possible name for the ground radar reflectivity. Parameters: =========== radar: object Py-ART radar structure Returns: key: str Reflectivity field name. """ possible_name = ['reflectivity', 'corrected_reflectivity', 'total_power', 'DBZ', 'DBZH', 'UZ', 'CZ', 'Refl'] for key in possible_name: try: radar.fields[key] return key except KeyError: continue return None
75bdf283334f2eaea853396fe500bc844bbadfb4
630,431
def find_line(string, index): """For the index into string, return the line number (0-based) character within line (0-based), and the line itself (no newline).""" line_start_index = string.rfind('\n',0,index)+1 line_end_index = string.find('\n',index) if line_end_index < 0: line_end_index = len(string) lineindex = string.count('\n',0,index) line = string[line_start_index:line_end_index] charindex = index - line_start_index return (lineindex, charindex, line)
b9cd83934db523fa418b8ec217689dcc4f232052
323,195
from typing import List def lists_to_str_list(array: List[List[str]]) -> List[str]: """ Transform most important features arrays into a list of strings to match submission format. """ result = [] for tmp in array: tmp = "[" + " ".join(["'" + str(s) + "'" for s in tmp]) + "]" result.append(str(tmp)) return result
79f5403f2d1f79879d9749d13c492b188b4ff408
326,733
import yaml def read_config(path): """ Read config file """ with open(path) as file: params = yaml.load(file, Loader=yaml.SafeLoader) return params
cb6fbfc7b62e1ea36b76e3f4fcf401249c292756
392,272
import requests from bs4 import BeautifulSoup import re def scrape_links_from_each_page(urls, target_pattern, labeler=(lambda x:x)): """ Loops over a list of urls and finds links that matches a target pattern from each page. Inputs: - urls: the list of urls to scrape links from - target_pattern: regex that specifies the types of links you want to collect - labeler: function that parses a url and returns a label for that page Outputs: - links: a dictionary with key/value pairs {url_label:[scraped_links]} """ links = {} for url in urls: response = requests.get(url) label = labeler(url) if response.status_code != 200: raise ConnectionError(f"Failed to connect to {url}.") soup = BeautifulSoup(response.text, "lxml") target_regex = re.compile(target_pattern) target_urls = [x['href'] for x in soup.find_all('a', {'href':target_regex})] links[label] = target_urls return links
3fddbacfea52bbcce7617d7045ffc45f2609ea3b
511,247
from typing import Tuple def _get_output_pad(plain: bool) -> Tuple[int, int, int, int]: """Return the padding for outputs. Args: plain (bool): Only show plain style. No decorations such as boxes or execution counts. Returns: Tuple[int, int, int, int]: The padding for outputs. """ if plain: return (0, 0, 0, 0) else: return (0, 0, 0, 1)
5fdc2b645238028cfe17f8773c1f1a343da692ec
677,581
def reset_labels(dataset): """Set labels to the original labels stored in originalLabel""" dataset['label'] = dataset['originalLabel'] return dataset
978678ac1ba52d17a6ef596efe3c1e499d08f0ab
410,637
import torch def value_td_residuals( rewards: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount: float, ) -> torch.Tensor: """Compute TD residual of state value function. All tensors must be one dimensional. This is valid only for one trajectory. Parameters ---------- rewards: The one step reward. values: The estimated values at the current step. Note that the last test is terminal, the associated value should be zero. next_values: The estimated values at the next step. discount: The discount rate. """ return rewards + (discount * next_values) - values
723ed0f0ce651cc7da5ad9c7950972d104198888
698,405
def get_instance(module, name, config): """ Get module indicated in config[name]['type']; If there are args to specify the module, specify in config[name]['args'] """ func_args = config[name]['args'] if 'args' in config[name] else None # if any argument specified in config[name]['args'] if func_args: return getattr(module, config[name]['type'])(**func_args) # if not then just return the module return getattr(module, config[name]['type'])()
ea57e7097665343199956509bb302e3806fb383a
5,639
def format_elapsed_time(elapsed: float, precision: int = 2) -> str: """ Format the elapsed time in seconds to a human readable string. Parameters ---------- elapsed : `float` The elapsed time in seconds. precision : `int` The number of decimal places to use, defaults to 2. Returns ------- `str` The formatted elapsed time. """ ms = elapsed * 1e3 if ms >= 1e3: return f'{ms / 1e3:.{precision}f}s' if ms >= 1: return f'{ms:.{precision}f}ms' return f'{ms * 1e3:.{precision}f}μs'
e8cd1ceb24b62936bdf317e0b1584b80ba363da3
60,162
def read_metadata_tf(line, search_text, current_value): """ function to read simple DynAdjust header items and return True/False :param line: DynAdjust header line :param search_text: header field desired. :param current_value: stored value. Updated when search_text is successfully found. :return: either current value or True/False corresponding to search_text """ if line[:35] == search_text.ljust(35, ' '): if line[35:].strip() == 'Yes': return True else: return False else: return current_value
76c70ac7c8d7d94854e32186b2384d5ce04b5859
162,980
def IsPakFileName(file_name): """Returns whether the given file name ends with .pak or .lpak.""" return file_name.endswith('.pak') or file_name.endswith('.lpak')
0ed3c4da1e1fdcfd994b317b2eadef95e25719d8
574,009
def get_dtype(i): """ Identify bit depth for a matrix of maximum intensity i. """ depths = [8, 16] for depth in depths: if i <= 2 ** depth - 1: return "uint%d" % (depth,) return "uint"
7adbb6b1c7a704f6d05482f8310c5c51e7a6b5e4
485,695
import json def load_json(path): """ Load run data from a JSON file. Arguments: path (str): Path to the JSON file to load. Returns: data (list): Data loaded from the file. """ with open(path, 'r') as f: _data = json.load(f) return _data
6f5c0bb63dd9a746dfeb726a67ddec8d8beddcc7
372,429
import json import yaml def prepare(*, spec_str: str, version: str) -> str: """ Prepare a stored spec to be returned to the user. De-serializes using JSON, adds version and serializes using YAML. Args: spec_str: The spec as it is stored. version: The version of the spec. Returns: The spec in a user friendly form. """ spec = json.loads(spec_str) info = {"version": version} if "info" in spec: info = {**info, **spec["info"]} components = spec["components"] return yaml.dump({"info": info}) + yaml.dump({"components": components})
311424aa376941690b59c6ec6f31ca6d9f411e2c
511,999
def center_crop(img_mat, size = (224, 224)): """ Center Crops an image with certain size, image must be bigger than crop size (add check for that) params: img_mat: (3D-matrix) image matrix of shape (width, height, channels) size: (tuple) the size of crops (width, height) returns: img_mat: that has been center cropped to size of center crop """ w,h,c = img_mat.shape start_h = h//2-(size[1]//2) # Size[1] - h of cropped image start_w = w//2-(size[0]//2) # Size[0] - w of cropepd image return img_mat[start_w:start_w+size[0],start_h:start_h+size[1], :]
5a65f44f3bc6e5fb7b768769580a3a0706e60673
682,871
def is_close(val1, val2, tol=1e-6): """Shorthand for `abs(val2 - val1) < tol`.""" return abs(val2 - val1) < tol
43336d15c19239902d9f30976330499b2c34e662
226,868
import socket def is_ipv6(addr): """Checks if a given address is an IPv6 address.""" try: socket.inet_pton(socket.AF_INET6, addr) return True except socket.error: return False
b877ff42e0077f05ea2af205a86b40822daa7fa7
664,296
def make_keys_multicol(columns_to_update_lst): """ returns keys to be updated and new names of columns to be updated :param columns_to_update_lst: :return joined_str: part of postgres query with keys. E.g. "col1=c.updatecol1 , col2=c.updatecol2" update_lst: list of new column names in intermediate tables. E.g. [updatecol1, updatecol2] """ key_equal = [] update_lst = [] for key in columns_to_update_lst: temp_str = key + " = c.update" + key update_lst.append("update" +key) key_equal.append(temp_str) joined_str = ",".join(key_equal) return joined_str, update_lst
96929862d124f3c05be35c235f6b5daa6523ed3d
406,348
def presence_of_all_elements_located(locator): """ An expectation for checking that there is at least one element present on a web page. locator is used to find the element returns the list of WebElements once they are located """ def _predicate(driver): return driver.find_elements(*locator) return _predicate
b2bd4729c003d099c19febf57eea4fee4abbfd41
101,783
def _kelvin_to_celsius(temp): """Convert temperature in Kelvin to Celsius""" return temp - 273.15
71caaf8282f66d8e56a48b2cfb603b4d9324b91b
389,680
from typing import Optional from typing import Callable def docfill( *args, template: Optional[str] = None, **kwargs ): """Format the decorated function's docstring using given args and kwargs This is useful if some functions share many common inputs or if the range of valid values for a parameter is specified by some global constant. Notes: - If a template string is provided, then args and kwargs are formatted into this string to produce the decorated function's docstring - If no template string is provided and the decorated function's docstring is not None, then this docstring is formatted with the given args and kwargs. Args: args: values to be formatted in docstring as positional arguments, e.g., {0}, {1}, etc. kwargs: values to be formatted in docstring as keyword arguments, e.g, {some_var}, {the_value}, etc. template: string to use as template for decorated function's docstring Examples: >>> @docfill([1, 2, 42], valid_animals={"cat", "dog"}) >>> def func(numbers, animals): >>> \"\"\"This is a function >>> Args: >>> numbers: options are {0} >>> animals: valid choices are {valid_animals} >>> \"\"\" >>> help(func) Help on function func in module __main__: func(numbers, animals) This is a function Args: numbers: options are [1, 2, 42] animals: valid choices are {'dog', 'cat'} """ def decorator(fn: Callable) -> Callable: if template is not None: fn.__doc__ = template if fn.__doc__ is not None: fn.__doc__ = fn.__doc__.format(*args, **kwargs) return fn return decorator
ae3ab484d18bfb41e232f191163955e2c2e829f4
675,836
import random def rand_x_digit_num(x): """Return an X digit number, leading_zeroes returns a string, otherwise int.""" return '{0:0{x}d}'.format(random.randint(0, 10**x-1), x=x)
b46864143ca6186ebeede6c687a85d1b585e70db
4,927
def contains_tlvs(sub_tlvs, tlv_types): """Verify if all types of tlv in a list are included in a sub-tlv list. """ return all((any(isinstance(sub_tlv, tlv_type) for sub_tlv in sub_tlvs)) for tlv_type in tlv_types)
461f026518e7f96d9475c7d09163ff1725b49a32
261,706
def file_type(s): """ Take in a filename and determine if the extension indicates a fasta or fastq file. Arguments --------- s : str, a filename string Returns --------- out : string, either 'fasta' or 'fastq' if file has an accepted extension, or ValueError. Examples --------- >>> file_type("example_file.fasta") "fasta" >>> file_type("example_file.fa") "fasta" >>> file_type("example_file.fastq") "fastq" >>> file_type("example_file.fq") "fastq" >>> file_type("example_file.txt") ValueError: Input file must be in fasta or fastq format. Accepted file extensions: fa, fq, fasta, or fastq. """ suffix = s.split(".")[-1] if suffix == "fa" or suffix == "fasta": return "fasta" elif suffix == "fq" or suffix == "fastq": return "fastq" else: raise ValueError("File must be in fasta or fastq format. "+\ "Accepted file extensions: fa, fq, fasta, or fastq.")
3ea41a3b3c1c122925ce9711d786534ad9d8c70c
190,890
def _get_county_variants(county: str) -> list: """ Given a county, returns a list of the possible variants of the county name. Args: county: A string literal that represents the county name. Returns: A list of the possible variants of the county name. """ county = county.replace(' County Police Department', '') county_variants = [ county, county + ' County', county + ' Parish', county + ' Borough' ] return county_variants
b61356fc1845907c3b761dbdaed607ba965f531a
631,054
def get_url(line3): """Collects URL from the line. Args: line3 (str): 3.st line of data block Returns: str: URL """ link = line3.split(' ')[1] return link if link != '' else '-'
71875ef6924c90c4d6e3759bcd46b0e8b063a2fe
109,786
def generate_unigram_table(ngram): """Generates a frequency table for a unigram model""" ngram_table = dict() for token in ngram: if token in ngram_table: # If token is already present, add one to value ngram_table[token] = ngram_table[token] + 1 else: ngram_table[token] = 1 # Add token if not present ngram_sum = sum(ngram_table.values()) # Sums the total occurrences of all tokens for k, v in ngram_table.items(): ngram_table[k] = v/ngram_sum # Changes frequency to probability return ngram_table
e8e8d83d50b50bdb7f6ba765deb3705874b6fe42
290,077
def unique_strategies(strategies): """ Extract unique strategies from array of strategies. Parameters ---------- strategies : Strategy list Strategies to be processed. Returns ------- Strategy set : Unique strategies. """ # Using set (we must define hash to use this) unique = list(set(strategies)) # Using list # unique = [] # # traverse for all elements # for x in strategies: # # check if x exists in unique_list or not # if x not in unique: # unique.append(x) return unique
f6f5d945bd52a29d4899b0395222e20b0205f612
182,710
import hashlib def get_md5(fname): """ get MD5 of file. """ hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
1fa95f64716946ca1697c7d4410d8b10f0d1ab48
306,199
from math import fmod def split_translation(t): """ Split translation into pixel aligned and sub-pixel components. Subpixel translation is guaranteed to be in [-0.5, +0.5] range. > x + t = x + t_whole + t_subpix :param t: (float, float) :returns: (t_whole: (float, float), t_subpix: (float, float)) """ def _split1(x): x_part = fmod(x, 1.0) x_whole = x - x_part if x_part > 0.5: x_part -= 1 x_whole += 1 elif x_part < -0.5: x_part += 1 x_whole -= 1 return (x_whole, x_part) _tt = [_split1(x) for x in t] return tuple(t[0] for t in _tt), tuple(t[1] for t in _tt)
becacbcb6b1d53b06ccb7a683a7c8284246f3873
514,801
def breadcrumbs(path, root_discovery): """Create the breadcrumb trail to this page of documentation. Args: path: string, Dot separated name of the resource. root_discovery: Deserialized discovery document. Returns: HTML with links to each of the parent resources of this resource. """ parts = path.split('.') crumbs = [] accumulated = [] for i, p in enumerate(parts): prefix = '.'.join(accumulated) # The first time through prefix will be [], so we avoid adding in a # superfluous '.' to prefix. if prefix: prefix += '.' display = p if i == 0: display = root_discovery.get('title', display) crumbs.append('<a href="%s.html">%s</a>' % (prefix + p, display)) accumulated.append(p) return ' . '.join(crumbs)
ac7227be554836d8a0d59adccde3f87f93a47639
555,265
def format_name(voter): """Given a register.Citizen instance, returns the voter's name formatted appropriately for addition to the PDFs. """ fields = ['first_name', 'father_name', 'grandfather_name', 'family_name'] # ltr here, because bidi flips output return ' '.join([getattr(voter, field) for field in fields])
f17c3dbae7573841d4e3954d051628852d4c7592
252,286
def create_attempter(f): """ Helper method for methods that call others and use ``Future`` directly. Returns a wrapper function that will set the given ``Future``'s exception state if the inner function call fails. """ def attempt(fn, *args, **kwargs): if f.done(): return try: fn(*args, **kwargs) except Exception as e: f.set_exception(e) return attempt
864e66999ff93ba51da6ceba580f39fdfd37afc7
301,699
import torch def one_hot(class_n, x): """ Make scalar into one-hot vector :param class_n: number of classes :param x: the scalar :return: converted one-hot vector """ return torch.eye(class_n)[x]
29ae8794f785a2983cefc7a94cc7a3732a7a746a
75,965
def striptag(tag): """ Get the short representation of a fully qualified tag :param str tag: a (fully qualified or not) XML tag """ if tag.startswith('{'): return tag.rsplit('}')[1] return tag
f0193e3f792122ba8278e599247439a91139e72b
4,039
def shorten_text(text, max_len=400, show_more_text=""): """ Shorten a body of text. text - the string to be shortened, or self if not long enough max_len - maximum length in characters of text body show_more_text - the string that will be attached to end of text IF trim """ if text == None: return None cutoff_string = "... " shorter = None if len(text) > max_len + len(cutoff_string): shorter = text[:max_len] + cutoff_string + show_more_text return shorter or text
959cbc6c12cdcb7b1648b3bc98ea136d4982b993
292,608
from typing import Dict from typing import List def restore_program(memory_updates: Dict[int, int], memory: List[int]) -> List[int]: """ Sets input parameters of program Args: memory_updates: A map of which places to update in memory memory: The RAM module to perform the updates in """ for (index, new_value) in memory_updates.items(): memory[index] = new_value return memory
57f306310b587ecba4bc8f4fc5969ccd17db4249
463,263
from typing import MutableSequence def is_list(x): """Checks if argument is a list.""" return isinstance(x, MutableSequence)
f909160de2112767c01c4ddb76f0ebc2bd885355
175,158
def get_info(res): """ This will extract info about result, provided by libgen. input: res , one of results for the search. output: info, a dictionary info['author'] : author of the book info['title'] : title of the book info['pages'] : number of pages in the book info['lang'] : language of the book info['size'] : size of the book info['type'] : file type of the book i.e. pdf, djvu info['links'] : links (mirror links) info['code'] : book code used by libgen info['download_page'] : currently mirror link with source 93.174.95.29 is selected by default. """ info = {} temp = res.find_all('td') info['author'] = temp[1].text info['title'] = temp[2].text val = ''.join(list(filter(str.isdigit,temp[5].text))) try: val = int(val) except: val = 0 info['pages'] = val info['lang'] = temp[6].text info['size'] = temp[7].text info['type'] = temp[8].text info['links'] = [x.a['href'] for x in temp[9:-1]] def get_code(link): return link.split('/')[-1] info['code'] = get_code(info['links'][0]) def get_download_page(links): for i in links: if 'http://93.174.95.29/_ads/' in i: return i return None info['download_page'] = get_download_page(info['links']) return info
3120d14f6c4ccc239f628a39f7fb419776c17b81
265,142
def M_to_D(M): """Parabolic anomaly from mean anomaly. Parameters ---------- M : float Mean anomaly in radians. Returns ------- D : float Parabolic anomaly. Notes ----- This uses the analytical solution of Barker's equation from [5]_. """ B = 3.0 * M / 2.0 A = (B + (1.0 + B ** 2) ** 0.5) ** (2.0 / 3.0) D = 2 * A * B / (1 + A + A ** 2) return D
6fb840ca7e8c60fff55a9a97e2bfa5c37bdbf259
405,966
import mpmath import math def mpfmt(a, num_digits=6): """Formats a mpmath float to given number of decimals.""" log10_a = mpmath.log(abs(a), 10) scaling = int(math.floor(log10_a)) a0 = float(a * mpmath.mpf('10')**(-scaling)) return "mpf('%se%d')" % (round(a0, num_digits), scaling)
7303610d92ded5f1f1f03beca611c9d67ff0b9ad
505,900
from functools import reduce def get_nested(d, path): """ Get value from a nested dictionary, addressing it via a list of keys indicating the value to the path. Example:: >>> d = dict(a=dict(a0=0, a1=1)) >>> get_nested(d, ['a', 'a1']) 1 If the path points to an undefined branch in the hierarchy, all required nested keys are added to the dictionary and an empty dictionary is added as value at that location. Example: >>> d = dict(a=dict(a0=0, a1=1)) >>> get_nested(d, ['a', 'a2', 'new_dict']) >>> print(d) {'a': {'a0': 0, 'a1': 1, 'a2': {'new_dict': {}}}} :param d: nested dictionary to address; :param path: list of keys forming the path to the required entry; :return: entry from addressed path. """ return reduce(lambda d, k: d.setdefault(k, {}), path, d)
6d3aca8f06367ad9d81ef0ee9c431ccc52dea38a
443,659