content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import re def extract_character_name(actor_str): """Returns the character name extracted from the input string.""" if not actor_str: return 'UNKNOWN' actor_str = actor_str.replace('(', '').replace(')', '') actor_str = actor_str.replace('[', '').replace(']', '') actor_str = actor_str.replace('van binnen', '') parts = re.split('[.,]', actor_str) return parts[0].strip()
8797a8fa4df2e83e46557b76317087a1aa2a72a8
162,909
from typing import List def list_params(pop: list, gen: int, lamarck: bool, multicore: bool, **extra_params: dict) -> List: """ Internal function to list execution parameters. For advanced users only. Parameters ---------- pop : list List of individuals. gen : int Number of generations. lamarck : bool If Lamarckian Evolution is used. multicore : bool If parallelism is used. **extra_params : dict Extra parameters. For details, please check: https://github.com/PonyGE/PonyGE2/wiki/Evolutionary-Parameters. Returns ------- param_list : List List of parameters. """ param_list = [] if 'population_size' not in extra_params.keys(): param_list.append('--population_size={0}'.format(str(pop))) if 'generations' not in extra_params.keys(): param_list.append('--generations={0}'.format(str(gen))) if multicore and 'multicore' not in extra_params.keys(): param_list.append("--multicore") if lamarck and 'lamarck' not in extra_params.keys(): param_list.append("--lamarck") for (key, val) in extra_params.items(): if val == "True": param_list.append("--"+key) elif val=="False" or val=="": continue else: param_list.append("--{0}={1}".format(key, val)) return param_list
d08629029f24a85df1adaeeb3db865c2b4a9507f
17,430
def delete_profile(db, user_id, profile_id): """Deletes a profile for the given user. Args: db (object): The db object user_id (int): The id of the user. profile_id (int): The id of the profile to delete. Returns: True if the record was deleted, False otherwise """ db.execute('''DELETE FROM profile WHERE user_id=? and id=?''', (user_id, profile_id)) return not db.rows_affected == 0
728247bd982a7b4f3916b8c358e95ff18c837625
40,760
def calc_nlanes(reg_dict): """ Return the number of MIPI Lanes used during readout Parameters ---------- reg_dict : dict The dictionary that contains all the register information Returns ---------- nlanes : int The number of lanes, 2 or 4 """ if reg_dict["DATA_LANE_CONFIG"][2] == 0: return 2 elif reg_dict["DATA_LANE_CONFIG"][2] == 1: return 4 else: raise RuntimeError("Invalid lane configuration!") return
8cfd2b6c21db5651f5d4e67f90365c63df8ce835
387,812
def _formatter_self(name, value): """Format the "self" variable and value on instance methods. """ __mname = value.__module__ if __mname != '__main__': return '%s = <%s.%s object at 0x%x>' % (name, __mname, value.__class__.__name__, id(value)) else: return '%s = <%s object at 0x%x>' % (name, value.__class__.__name__, id(value))
5ab8794f01ddcfccb38b704150ff6a430618bbaa
578,910
import re def get_words(text): """ Parameters: text: a string of text which needs to be processed Returns: string of all words extracted from the input string """ text = text.lower() wordlist = text.split() clean_list = [] for word in wordlist: # only get words (no digits) if re.match(r"^[a-z]+$", word): clean_list.append(word) return " ".join(clean_list)
bf2aa60d2f670371dff751eb37a434553a281a64
413,601
def parse_coverage_list(coverage_list): """ Given a coverage_list (structured as a list of coverage statements (which are strings) that define the poset, parses them into a dictionary. Parameters ---------- coverage_list : `list` list of coverage statements defining a poset, e.g.: ["_<1", "_<2", "_<3", "1<12", "1<13", "2<12", "2<23", "3<13", "3<23", "13<123", "12<123", "23<123"] Returns ------- coverage_dict : `dict` keys are items in the poset, other than the maximal element values are items in the poset that cover their respective key Example Usage ------------- >>> coverage_list = [ ... "_<1", ... "_<2", ... "_<3", ... "1<12", ... "1<13", ... "2<12", ... "2<23", ... "3<13", ... "3<23", ... "13<123", ... "12<123", ... "23<123"] >>> parse_coverage_list(coverage_list) {'_': ['1', '2', '3'], '1': ['12', '13'], '2': ['12', '23'], '3': ['13', '23'], '13': ['123'], '12': ['123'], '23': ['123']} """ coverage_dict = {} if "<" not in coverage_list[0]: coverage_dict[coverage_list[0]] = [] for i in [j.split("<") for j in coverage_list]: for j in range(len(i)-1): if i[j] not in coverage_dict.keys(): coverage_dict[i[j]] = [] coverage_dict[i[j]].append(i[j+1]) return coverage_dict
9715bd6fed1c3d032ef5ef5524429c35a7370307
633,882
import ctypes import codecs import functools def add_string_properties_from_utf_16_le(c): """ c_ubyte arrays that start with '_' are automatically converted to a string attribute with the name of the field without the leading '_'. The content of the original Bytarray is decoded using UTF-16-LE. :param c: Class to add the properties to :return: Nothing """ for field_name, field_type in c._fields_: if field_type._type_ == ctypes.c_ubyte and field_type._length_ > 1 and field_name.startswith('_'): field = field_name[1:] def getter(self, name=None): value = getattr(self, "_%s" % name) return codecs.decode(value, 'UTF-16-LE', 'replace').split('%')[0] setattr(c, field, property(functools.partial(getter, name=field)))
77618545d8e4c2c6241e493027db9cd896430492
234,465
def neighbour(x,y,image): """Return 8-neighbours of image point P1(x,y), in a clockwise order""" img = image.copy() x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1; return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]]
8e645f7634d089a0e65335f6ea3363d4ed66235b
5,348
def _to_original(sequence, result): """ Cast result into the same type >>> _to_original([], ()) [] >>> _to_original((), []) () """ if isinstance(sequence, tuple): return tuple(result) if isinstance(sequence, list): return list(result) return result
7b9d8d1d2b119d61b43dde253d8d3c48bd0e45b8
709,274
def correct_time_dilation(df): """Short summary. Parameters ---------- df : Pandas DataFrame The dataframe containing the photometry of all events. Returns ------- Pandas DataFrame The same dataframe with undilated times. """ for idx, row in df.iterrows(): row['T'] = row['T'] / (1.+row.ZCMB) return df
b7aae7a26279d5629a367cf49e898a5543981dd9
627,016
import math def rotatePoint(x, y, z, ax, ay, az): """Returns an (x, y, z) point of the x, y, z point arguments rotated around the 0, 0, 0 origin by angles ax, ay, az (in radians). Directions of each axis: -y | +-- +x / +z """ # Rotate around x axis: rotatedX = x rotatedY = (y * math.cos(ax)) - (z * math.sin(ax)) rotatedZ = (y * math.sin(ax)) + (z * math.cos(ax)) x, y, z = rotatedX, rotatedY, rotatedZ # Rotate around y axis: rotatedX = (z * math.sin(ay)) + (x * math.cos(ay)) rotatedY = y rotatedZ = (z * math.cos(ay)) - (x * math.sin(ay)) x, y, z = rotatedX, rotatedY, rotatedZ # Rotate around z axis: rotatedX = (x * math.cos(az)) - (y * math.sin(az)) rotatedY = (x * math.sin(az)) + (y * math.cos(az)) rotatedZ = z return (rotatedX, rotatedY, rotatedZ)
39a2f6491ac8df4780f9f39be863d1b46433825c
67,099
from typing import List import math def calc_new_ahc_rating(past_perfs: List[float], perf: float) -> float: """ AHCに沿ってレーティングを求める (https://www.dropbox.com/s/ne358pdixfafppm/AHC_rating.pdf?dl=0) past_perfs: 過去のパフォーマンス perf: 今回のパフォーマンス returns: new rating """ R = 0.8271973364 S = 724.4744301 Q = sorted([p - S*math.log(j) for p in past_perfs + [perf] for j in range(1, 101)], reverse=True) numerator = sum([Q[i-1]*R**i for i in range(1, 101)]) denominator = sum([R**i for i in range(1, 101)]) r = numerator / denominator if r >= 400: return r else: return 400 / math.exp((400 - r) / 400.0)
df8c09596bb61b95c5ea6b3a87023f0b3fbb36b8
370,686
def SegmentContains(main_l, main_r, l, r): """Returns true if [l, r) is contained inside [main_l, main_r). Args: main_l: int. Left border of the first segment. main_r: int. Right border (exclusive) of the second segment. l: int. Left border of the second segment. r: int. Right border (exclusive) of the second segment. """ return main_l <= l and main_r >= r
761b064131b7885327e59818f7838573ee292264
75,037
def _is_bst(root, min_value=float('-inf'), max_value=float('inf')): """Check if the binary tree is a BST (binary search tree). :param root: Root node of the binary tree. :type root: binarytree.Node | None :param min_value: Minimum node value seen. :type min_value: int | float :param max_value: Maximum node value seen. :type max_value: int | float :return: True if the binary tree is a BST, False otherwise. :rtype: bool """ if root is None: return True return ( min_value < root.value < max_value and _is_bst(root.left, min_value, root.value) and _is_bst(root.right, root.value, max_value) )
24dbb77c0d7d7e4a0f4fc3fca60a2cbc70441044
312,734
from typing import Iterable from typing import Optional def sentence_join(items: Iterable[str], *, joiner: Optional[str] = None, oxford: bool = False) -> str: """Join a list of strings like a sentence. >>> sentence_join(['red', 'green', 'blue']) 'red, green and blue' Optionally, a different joiner can be provided. """ if not items: return "" if joiner is None: joiner = "and" ox = "" if oxford: ox = "," # Do this in case we received something like a generator, that needs to be wrapped in a list items = list(items) if len(items) == 1: return items[0] return f"{', '.join(items[:-1])}{ox} {joiner} {items[-1]}"
a7c5b3c24a5e9b3823162be6fe32c1765c31d52c
497,502
def find_attr(name, attrs): """Find an attribute in an HTML tag by name. name - The attribute name to search for. attrs - The list of attributes to search. Returns the matching attribute or None. """ for attr, values in attrs: if attr == name: return values return None
8633083870a38d14731eca729ada3a5ef6658408
442,397
def multi_replace_final(inputlist, replacements): """Apply the replace method multiple times and join lines. "inputlist" is a list of strings, each to have replace method applied multiple times. "replacements" is a list of tuples, the fist item of each tuple the substring to be replaced and the second the replacement text. After all replacements are applied, the string will be joined with a new line character between each line and the resulting string will be returned. """ inputlist = inputlist.split("\n") for line in inputlist: for replacement in replacements: if replacement[0] in line: line = line.replace(replacement[0], replacement[1]) outputstring = "\n".join(inputlist) return outputstring
bce7c3424771e0546e5a6e927e1fa985add7d065
141,590
import fnmatch def GetTestsFromDevice(runner): """Get a list of tests from a device, excluding disabled tests. Args: runner: a TestRunner. """ # The executable/apk needs to be copied before we can call GetAllTests. runner.test_package.StripAndCopyExecutable() all_tests = runner.test_package.GetAllTests() # Only includes tests that do not have any match in the disabled list. disabled_list = runner.GetDisabledTests() return filter(lambda t: not any([fnmatch.fnmatch(t, disabled_pattern) for disabled_pattern in disabled_list]), all_tests)
89c575bfca9b03983354be84df00100da49ab41e
104,356
def merge(list_a, length_a, list_b, length_b): """ Merges two lists together by always choosing the smaller of the two values from the front of the lists. When one list is exhausted, the other list has simply all its elements added to the end of the merged list. """ index_a, index_b = 0, 0 merged_list = [] while index_a < length_a and index_b < length_b: if list_a[index_a] < list_b[index_b]: # Add the smallest of the two merged_list.append(list_a[index_a]) index_a += 1 else: merged_list.append(list_b[index_b]) index_b += 1 if index_a < length_a: # The list who hasn't reached its end is not exhausted merged_list.extend(list_a[index_a:]) else: merged_list.extend(list_b[index_b:]) return merged_list
4dbc322bcfa1e5a0c2aa1249c36476c7e773f863
223,738
def _preprocess(sentences, preprocess_pipeline, word_tokenize=None): """ Helper function to preprocess a list of paragraphs. Args: param (Tuple): params are tuple of (a list of strings, a list of preprocessing functions, and function to tokenize setences into words). A paragraph is represented with a single string with multiple setnences. Returns: list of list of strings, where each string is a token or word. """ if preprocess_pipeline is not None: for function in preprocess_pipeline: sentences = function(sentences) if word_tokenize is None: return sentences else: return sentences, [word_tokenize(sentence) for sentence in sentences]
9f44962a5ba4a66523beb40feb0a25b63d1dc528
638,770
import turtle def initialize(turtle_shape, bg_color, turtle_color, turtle_speed): """ Initializes turtle instance for turtle game. """ turtle_instance = turtle.Turtle() turtle_instance.shape(turtle_shape) turtle.bgcolor(bg_color) turtle_instance.color(turtle_color) turtle_instance.speed(turtle_speed) return turtle_instance
8657270f06961445030aee7e41c2aa61dd1c6cd3
217,143
def nest(coefficients, x, base_points): """Evaluates polynomial from nested form using Horner's Method Parameters ---------- coefficients, list This is the coefficients of the polynomial, with coefficients[0] indicating the constant term and coefficients[-1] indicating the degree of the polynomial term. x, float The value to evaluate the polynomial on base_points, list An array of base points, if needed. len(base_points) + 1 == len(coefficients) Default: base_points == [0] * (len(coefficients) - 1) Returns ------- value, float `value` is the function value using the coefficients and x """ value = float(coefficients[-1]) for i, coef in enumerate(coefficients[-2::-1]): value = coef + value * (x - base_points[i]) return value
90a17386832a34184a142a638150705bcd5b97bb
470,850
import math def cal_SEM(input_std: float, sample_number) -> float: """This function returns the SEM given the std and the sample size""" return round(input_std / math.sqrt(sample_number - 1), 4)
b36b1140ed9b03cdeb057041db68d893dcdae1c8
138,991
def read_file(input_file): """ Reads in a file, returns contents one line at a time. """ with open(input_file) as f: content = f.readlines() return content
64a0a8276c1227bc13a7cbb131803693c6de74db
533,033
def unprime(s): """Given a variable's name as a `str`, check if the variable is a prime, i.e. has "_p" at the end. If so, return the unprimed version, if not return itself.""" if s[-2:] == "_p": return s[:-2] else: return s
87f62801cd51c5863c0231f3553c1ece69f4193b
135,573
def max_digits(x): """ Return the maximum integer that has at most ``x`` digits: >>> max_digits(4) 9999 >>> max_digits(0) 0 """ return (10 ** x) - 1
3f0ffdfbbb3fdaec8e77889b3bfa14c9b9829b2e
699,184
def parse_database_credentials(db_credentials): """ Parsing database credentials to needed format :param db_credentials: Dictionary :return: Dictionary """ _db_config = { 'name': db_credentials['db-name'], 'host': db_credentials['db-hostname'], 'password': db_credentials['db-password'], 'port': db_credentials['db-port'], 'user': db_credentials['db-username'], } return _db_config
f84d0b311511e28182979c8747bcb0547d542ab7
415,312
import asyncio def mock_coro(return_value=None, exception=None): """Return a coro that returns a value or raise an exception.""" fut = asyncio.Future() if exception is not None: fut.set_exception(exception) else: fut.set_result(return_value) return fut
d06d037bab143e288534e3e7e98da259f7c1cefc
704,448
import re def FormatTime(time): """Returns the time variable back in ##:## format Args: time: a string representing the iqamah time Returns: a string containing the iqamah time in ##:## format """ # if the time input is in a format where it contains integers # then proceed forward otherwise return None if re.match(r'(\d+)', time): # strip the time variable from anything added to it # example: 10:00 PM becomes 1000 and 5 AM becomes 5 time = ''.join(re.findall(r'(\d+)', time)) # if time length is more than 2 characters then add a : after # the second character from the right # example: 1000 becomes 10:00 and 5 stays 5 if len(time) > 2: return time[:-2] + ':' + time[-2:] return time + ':00'
c7d595b7ee8016236e25439d87b1bbd730408d50
202,688
def _createLegendString(value, unit): """ Creates a label "value unit" """ legendString = "%.4g" % value if (unit): legendString += (" " + unit) return legendString
21e19cedfbd7893f2151d74a44234bbcc92d0b24
483,672
def get_first_label(x): """Returns the label of the first element. :param x: Series or DataFrame """ return x.index[0]
9c2c98cca3f163a01b9ce06fa75b516e9b84a49b
650,740
def plus(a, b): """add vectors, return a + b """ return [a[i] + b[i] for i in range(len(a))]
66dec1e1967c5f43827629f67d994db197bd749c
137,825
import math def clamping_acos(cos): """ Calculate arccos with its argument clamped to [-1, 1] """ if cos > 1: return 0 if cos < -1: return math.pi/2 return math.acos(cos)
427fb795a3d384f4d7aea55831a07ce896822af2
527,133
import math def hsv_to_rgb (h, s, v, a = 1): """ Convert hue, saturation, value (0..1) to RGBA. """ f,i = math.modf(h * 6) p = v * (1-s) q = v * (1-f*s) t = v * (1-(1-f)*s) i %= 6 if i == 0: r,g,b = v,t,p elif i == 1: r,g,b = q,v,p elif i == 2: r,g,b = p,v,t elif i == 3: r,g,b = p,q,v elif i == 4: r,g,b = t,p,v else: r,g,b = v,p,q return [r,g,b,a]
5588288125b99d312449f0d750399ceaf2a30068
356,784
def list_modules(curdir, pattern): """List names from files (*.py) in ``curdir`` match ``pattern``. """ return sorted( m.name.replace('.py', '') for m in curdir.glob('*.py') if pattern.match(m.name) )
0f278ca11626589c287cff9c6b0d644e185b4c0f
473,706
import yaml def parse_yaml(filename): """Parse a YAML file and return the parsed object.""" with open(filename) as f: return yaml.safe_load(f)
8758c021370d9568f2c62f8d0652c738b808503d
395,198
def update_parameters(parameters, gradients, learning_rate=1.0): """updates parameters using the gradient descent update rule""" # retrieve each parameter from the dictionary parameters W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] # retrieve each gradient from the dictionary gradients dW1 = gradients['dW1'] db1 = gradients['db1'] dW2 = gradients['dW2'] db2 = gradients['db2'] # update rule for each parameter W1 -= learning_rate * dW1 b1 -= learning_rate * db1 W2 -= learning_rate * dW2 b2 -= learning_rate * db2 parameters = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} return parameters
45d6edf89f577542d6833cb669ea422649ae95bc
328,096
def seqWeightThenLenCmp(seq1, seq2): """ Compares two sequences according to the assignment weight and if it`s the same or not defined, it compares them according to their length. """ weight1 = seq1.getTaxonomyPathWeight() weight2 = seq2.getTaxonomyPathWeight() if (weight1 is not None) and (weight2 is not None): if (weight1 - weight2) < 0: return -1 else: return 1 else: return seq1.seqBp - seq2.seqBp
945776d40a2c981bf9ff12f2fa45e23bb383658d
544,152
import six def inclusion_only_unlimited_args(*args): """Expected inclusion_only_unlimited_args __doc__""" return {"result": "inclusion_only_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))}
b2f3759bb81722bba6a1777a4a74b274908b9ec1
373,447
from typing import Iterable def join_last_separator(elements: Iterable, separator: str, last_separator: str, final_char='') -> str: """ Join all the elements in a string, using a separator for all of them except the last one, where it uses last_separator. You can also add to final char. >>> join_last_separator(['Uno', 'dos', 'tres', 'cuatro'], ', ', ' y ', '.') 'Uno, dos, tres y cuatro.' """ elements = list(elements) if not len(elements): return '' if len(elements) == 1: return f'{elements[0]}{final_char}' return f'{separator.join(elements[:-1])}{last_separator}{elements[-1]}{final_char}'
3f12080cddf66eaf95d92c8b9e33e39bf023bb7d
192,132
def plot_components(self): """ Wrapper function for Prophet's default plotting functionality. """ return self.model.plot_components(self.results)
3ef0eead99a9faf40d45275bb6bf9eab79eda80d
411,853
def flatten_config_dict(x, prefix=""): """Flattens config dict into single layer dict Example: flatten_config_dict({ MODEL: { FBNET_V2: { ARCH_DEF: "val0" } } }) => {"MODEL.FBNET_V2.ARCH_DEF": "val0"} """ if not isinstance(x, dict): return {prefix: x} d = {} for k, v in x.items(): new_key = f"{prefix}.{k}" if prefix else k d.update(flatten_config_dict(v, new_key)) return d
7929574ba28377ae255ed2e8a0236387f51e8f1f
325,736
import csv def load_cluster(path, iso3): """ Load cluster number. You need to make sure the R clustering script (pytal/vis/clustering/clustering.r) has been run first. Parameters ---------- path : string Directory path to the capacity lookup table, generated by pysim5g. iso3 : string The ISO 3-digital country code for the country being modeled. """ with open(path, 'r') as source: reader = csv.DictReader(source) for row in reader: if row['ISO_3digit'] == iso3: return row['cluster']
2b1b5c08a708c18353cbdaab978c587cfab32aa8
373,014
def _select_room(caller, menuchoice, **kwargs): """ Get a room from the selection using the mapping we created earlier. """ room = caller.ndb._menutree.room_map[menuchoice] return "node_join_room", {"room": room}
b2eca7af5273589175e8671a89c61d95ffa611f7
328,052
def getDimmedRGB(color, alpha=255): """Returns dimmed RGB values, with low and high pass to ensure LEDs are fully off or on""" if alpha >= 253: # int is 1 return color elif alpha <= 2: # int is 0 return 0, 0, 0 else: p = alpha/255.0 r, g, b = color return int(r*p), int(g*p), int(b*p)
e574e1e96d38f7da5ae8aebc64e2d3a870e31b4a
301,986
def ds_read_mock(data_set, *args, **kwargs): """ Mock of IkatsApi.ts.fid method Same parameters and types as the original function """ return {"description": "description of my data set", "ts_list": ['00001', '00002', '00003', '00004']}
1da970015a3affe088f54022b4b3da5e5f036dce
687,135
def shift(coord_paths, shift_vector): """ Take an array of paths and shift them by the coordinate. """ new_paths = [] for path in coord_paths: new_path = [] for point in path: new_path.append( (point[0] + shift_vector[0], point[1]+ shift_vector[1])) new_paths.append(new_path) return new_paths
feae7b047d9c0362bc5309b2fd3229cf2bef8160
292,240
import base64 def _auth_header(username, password): """return the value part of an Authorization: header for basic auth with the specified username and password""" return b"Basic " + base64.b64encode(username + b":" + password)
83ce29513993bea0185b209a6cb6b4d9beb9ee3a
580,291
import time def banner(SCRIPT_NAME, SCRIPT_VERSION, REVISION_DATE, AUTHORS, CONTRIBUTORS, DESCRIPTION,): """ Banner for python scripts. :param SCRIPT_NAME: The name of the script :param SCRIPT_VERSION: The version number :param REVISION_DATE: The latest revision date :param AUTHORS: The main authors :param CONTRIBUTORS: Any contributing authors :param DESCRIPTION: A brief description :return banner_list: A nicely formatted banner :rtype: list """ banner_list = [] banner_list.append("============================================================================== ") banner_list.append(SCRIPT_NAME + " " + SCRIPT_VERSION + " (" + REVISION_DATE + ")") banner_list.append(AUTHORS) banner_list.append("============================================================================== ") banner_list.append(time.ctime()) banner_list.append("") banner_list.append(DESCRIPTION) if CONTRIBUTORS != '': banner_list.append("With contributions from:") banner_list.append(CONTRIBUTORS) banner_list.append("") return banner_list
1c4b2f74bae29901aacac9f1465d4c5858333fa3
269,763
import torch def rand_stiefel(n,p): """ Generate random Stiefel point using qr of random normally distributed matrix """ X = torch.randn(n, p) q, r = torch.qr(X) return q
222cac7e6e32c021fa51b45e3fd473b190bf8788
256,427
def merge_dicts(srcdict: dict, mergedict: dict, overwrite=False): """Recursively merges `mergedict` into `srcdict` and returns `srcdict`. Makes shallow copies of `dict` and `list` values. """ for k, v in mergedict.items(): srcvalue = srcdict.get(k, None) if isinstance(v, dict) and isinstance(srcvalue, dict): merge_dicts(srcvalue, v, overwrite) continue if overwrite or srcvalue is None: if isinstance(v, dict): v = dict(v) elif isinstance(v, list): v = list(v) srcdict[k] = v return srcdict
c0adfaa48be3965c67b730978ce0b043a0cfe32a
271,187
import logging def prepare_logger(level): """Configure logging for the module.""" handler = logging.StreamHandler() handler.setFormatter(logging.Formatter( datefmt='%Y/%m/%d %H:%M:%S', fmt='%(asctime)s %(levelname)-5s %(message)s')) bot_logger = logging.getLogger(__package__) prawtools_logger = logging.getLogger('prawtools') for logger in (bot_logger, prawtools_logger): logger.setLevel(getattr(logging, level)) logger.addHandler(handler) return bot_logger
ce63a1819530d3d1b0e0e6dd6248df762e421832
601,975
def sanity_check(vars, cons): """ Check all variables participate in some constraint """ v_con = [] for c in cons: for x in cons[c]['scope']: if x not in v_con: v_con.append(x) for v in vars: if v not in v_con: return False return True
fc9885ce27d459b9f95310e1a1bda970e412a741
559,026
def user_discrim(user): """ Return the user's username and disc in the format <username>#<discriminator> """ return f"{user.name}#{user.discriminator}"
22866ad0c23a23bfbd7460844a9582916970991c
38,280
from typing import Union from typing import Any def gte(query: Union[int, float], field_name: str, object: Any) -> bool: """ Check if value of object is greater than or equal to value of query """ return float(getattr(object, field_name)) >= float(query)
26f8378a008e20b0b17e2f54d99d088b46f1c722
447,157
def list_remote_branches(repo): """ Lists remote branches of given git repository Args: repo(:class:`~git.repo.base.Repo`): Git repository instance Returns: List[str]: Branches of current git repository """ branches = [] for ref in repo.references: if ref not in repo.branches + repo.tags: remote = str(ref).split('/')[1] if remote not in ['HEAD']: branches.append(remote) return branches
ac161b0c39f10f45f02a43f86e4caa76a9bb3621
323,496
def keys_are_unique(hyperparam_list): """Check if the ``base_keys`` in a list of ``HyperparameterOption`` objects are unique. Args: hyperparam_list (list)(HyperparameterOption): a list of hyperparameter options. Returns: bool: True if all the ``base_keys`` are unique, otherwise False. """ keys = [item.base_key for item in hyperparam_list] keys = set(keys) return len(keys) == len(hyperparam_list)
656e453f00bb56f929d0a577329d41976eb22a3a
455,877
def get_node_ips_from_config(boot_config): """ Returns the IPs of the configured nodes :param boot_config: the snaps-boot config to parse :return: a list if IPs for the given nodes """ out_hosts = list() if ('PROVISION' in boot_config and 'DHCP' in boot_config['PROVISION'] and 'subnet' in boot_config['PROVISION']['DHCP']): for subnet in boot_config['PROVISION']['DHCP']['subnet']: if 'bind_host' in subnet: for bind_host in subnet['bind_host']: if 'ip' in bind_host: out_hosts.append(bind_host['ip']) return out_hosts
0bc9fc9bc8f5ef577ecf21e0a754f0b3ac1d4e0a
100,863
def shrink_to_hint(s: str): """ Shrink a string to hint. :param str s: Source string :return: str """ length = len(s) if length < 4: return '*' * length return '{}**{}'.format(s[0], s[length - 1])
f1aed54259127a589c279902a26947e968bcd41d
390,449
def write_source(t, s, shot, imp="Ca"): """Write a STRAHL source file. This will overwrite any {imp}flx{shot}.dat locally. Parameters ---------- t : array of float, (`n`,) The timebase (in seconds). s : array of float, (`n`,) The source function (in particles/s). shot : int Shot number, only used for saving to a .dat file imp : str, optional Impurity species atomic symbol Returns ------- contents : str Content of the source file written to {imp}flx{shot}.dat """ contents = "{.d}\n".format(len(t),) for tv, sv in zip(t, s): contents += " {5.5f} {5.5e}\n".format(tv, sv) with open(f"{imp}flx{shot}.dat", "w") as f: f.write(contents) return contents
30ecf4474af48ba00d6c50d2fa2643c01b20ea91
293,297
def Resample(df_z, kind, time): """Resamples the data frame from 1 second resolution to either 1 Hour or 1 Day resolution Arguments: df_z - full data frame kind - Water type, either hotIn or coldIn time - resolution, either 1H or 24H Returns: df_a - resampled data frame """ df_a = df_z[kind].resample(rule=time, base=0).sum() * (1 / 60) return df_a
35de617f7dedb6043b39906b068d3c17a40c3463
502,005
def flatten(l): """ Flatten a list of lists :param l: List of lists :return: flattened list """ try: return [item for sublist in l for item in sublist] except TypeError: return l
a2bddde75d5aa58ade6c628fc339c1eaa36a91f5
139,034
import torch import math def uniform_binning_correction(x, n_bits=8): """Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0). Args: x: 4-D Tensor of shape (NCHW) n_bits: optional. Returns: x: x ~ U(x, x + 1.0 / 256) objective: Equivalent to -q(x)*log(q(x)). """ b, c, h, w = x.size() n_bins = 2**n_bits chw = c * h * w x += torch.zeros_like(x).uniform_(0, 1.0 / n_bins) objective = -math.log(n_bins) * chw * torch.ones(b, device=x.device) return x, objective
22f1cfe6b66ac11788a15edb281b6dee4e213654
684,143
import torch def unif(n): """return a uniform histogram of length n (simplex) Parameters ---------- n : int number of bins in the histogram Returns ------- h : torch.Tensor (n,) histogram of length n such that h_i=1/n for all i """ return torch.ones(n)/n
36f154d1cdd06f79ba5007b480c1e2e970955775
66,479
def find_line_no(content_lines, match_text, error_message): """Find line number where given content occurs.""" for line_no, line in enumerate(content_lines): if line.startswith(match_text): return line_no else: raise LookupError(error_message)
75c2271507483c70f6b293348805bea0aceab1b2
174,282
def reset_df_index(data_frame): """ Resets pandas data frame index, dropping current index and replacing it with a "clean" index in place. Useful after data frame filtering and multi- value column splitting. :param data_frame: pandas data frame (pd df) :return: resets index index in place in original df (None) """ return data_frame.reset_index(drop=True, inplace=True)
9cc10b510bbbbe52cbfebdb8cbcc945d930e966a
489,933
def _match_pragma(stmt, key): """Internal helper to match stmt to pragma stmt. Parameters ---------- stmt : Stmt The AttrStmt key : str The pragma key """ return ((stmt.attr_key == "pragma_" + key) or (stmt.attr_key == "pragma_scope" and stmt.value.value == key))
eec6d79ff091ffb4e43086889689d9e2cf29efd0
462,819
def merge(*ds): """ Merge several ``dict``s together. """ res = {} for d in ds: if d: res.update(d) return res
e83e6893d32b31e6c1c6e2b1fe657cf1b02fb38c
180,138
import json def get_json_content(json_path): """Get json content. Args: json_path (str): path to json. Returns: (dict): json content. """ with open(json_path, 'r') as json_bytes: content = json.load(json_bytes) return content
bd0630a38c89fa9d7d211b087b3c3a753b4d3ecc
510,543
import re def extract_paths(file): """ Extract the list of modified paths from the patch file. """ paths = [] fp = open(file) for line in fp: if line[:4] != '+++ ': continue match = re.match('^([^\t]+)', line[4:]) if not match: continue paths.append(match.group(1).strip()) return paths
d309e9a64cfd76cdc2527e612d3f4472df3aa17b
399,180
def sqlite_quote(val): """Return text to quote some sqlite value.""" return "'%s'" % val
577bebaf087711f93479556f2e2a6eb8f5da4a8c
257,295
import torch def scale_and_translation_transform_batch_torch(P, T): """ First Normalises batch of input 3D meshes P such that each mesh has mean (0, 0, 0) and RMS distance from mean = 1. Then transforms P such that it has the same mean and RMSD as T. :param P: (batch_size, N, 3) batch of N 3D meshes to transform. :param T: (batch_size, N, 3) batch of N reference 3D meshes. :return: P transformed """ P_mean = torch.mean(P, dim=1, keepdim=True) P_trans = P - P_mean P_scale = torch.sqrt(torch.sum(P_trans ** 2, dim=(1, 2), keepdim=True) / P.shape[1]) P_normalised = P_trans / P_scale T_mean = torch.mean(T, dim=1, keepdim=True) T_scale = torch.sqrt(torch.sum((T - T_mean) ** 2, dim=(1, 2), keepdim=True) / T.shape[1]) P_transformed = P_normalised * T_scale + T_mean return P_transformed
cd3b1da9111233376d1056217ae2c14ffdd8ea38
585,182
def first_phrase(name): """ returns phrase, given lab member Parameters ---------- name : str lab member Returns ---------- utterance : string phrase simulating specific lab member """ if name == 'gina': return('I just re-listened to my spotify playist; ') if name == 'lucy': return('I keep seeing psych professors at the gym; ') if name == 'andy': return('My new puppy has so much energy; ') if name == 'kirsten': return('I just ran myself on the attention task; ') if name == 'jeremy': return('The episode of West Wing I\'m on now...') if name == 'emily': return('I\'m addicted to drinking hot water; ') if name == 'paxton': return('I always sit in front for Jeremy\'s lecture; ')
753d32d69c9b0de24912262876fae2ae112fc0e0
524,929
import pickle def load_pickle(filename): """ Load saved data from pickle file :param filename: pickle file name :return: data from pickle file """ with open(filename, "rb") as f: data = pickle.load(f) return data
aabb0fb57fca48ef53c75129d662bf82f683d978
646,153
def save(df, name): """ Save a dataframe as a csv and pickle file :param df: (pd.DataFrame) - dataframe to save :param name: (str) - output name :return name: (str) - output name """ df.to_csv('{0}.csv'.format(name)) df.to_pickle('{0}.pkl'.format(name)) return name
4efda71dc0f5dcd0208558b7f211636cbf52bb5d
660,748
def get_npp_block(value): """ Determine the number of pixels per block value. Parameters ---------- value : int Returns ------- int """ return 0 if value > 8192 else value
ca189015705dffbfd74ad28e1676210bba7936ac
503,239
def chunks(obj, size, start=0): """Convert `obj` container to list of chunks of `size`.""" return [obj[i : i + size] for i in range(start, len(obj), size)]
c523a346906b85c121bf67a56a806d51f639eeb3
30,282
from typing import Union from typing import List from typing import Tuple from typing import Dict def format_locations(locations: Union[str, List, Tuple]) -> List[Dict[str, str]]: """ Format locations from YAML file into the list format expected by boto3. Possible inputs are: a) A 2 letter country code string, e.g. "US" b) A 2-tuple (or 2 item list) of 2 letter country and subdivision codes , e.g ("US", "NY") c) A list of two or more instances of a) and/or b) """ if isinstance(locations, str): return [{'Country': locations}] elif isinstance(locations, list) or isinstance(locations, tuple): return [{'Country': l[0], 'Subdivision': l[1]} if isinstance(l, (tuple, list)) else {'Country': l} for l in locations] else: raise TypeError
c332fe63dd1fe5e4703a746a53a545cd684a1d0d
412,581
def mac_to_oid(mac): """ Converts a MAC address to an OID string """ return '.'.join([ str(int(x,16)) for x in mac.split(':') ])
823f68a1708a2d4db6184b3aedc003f9f8fe79fe
631,150
def BFS(start, target, getNeighborsFunction): """ Breadth First Search Algorithm, it starts at the root node (the start node) and searches all of its neighbors. If a neighbor is the target node, we return the shortest path from start to target. Otherwise we repeat this proccess for each neighbor until we find the target node. Args: start (Graph Node): The node we start searching from. target (Graph Node): The target we're searching. getNeighborsFunction (Function): A function we can use to find a node's neighbors (in order for this method to work on multiple data structures). Returns: Graph Node: The shortest path from the start node to the target node. Time Complexity: Worst-Case: O(V + E) """ q = [start] explored = [] parents = {} parents[start] = None while len(q) > 0: v = q.pop(0) if v == target: path = [v] while parents[v] != None: path.insert(0, parents[v]) v = parents[v] return path explored.append(v) neighbors = getNeighborsFunction(v) for n in neighbors: if n not in explored: q.append(n) # In DFS we insert at index 0, here we append (BFS uses a queue, DFS uses a stack). explored.append(n) parents[n] = v return None
312b1df54a8457072c2e869a9a442058059814dd
405,139
def _create_instance(row): """ Parse individual row of ARFF file in PHP dataset. If the row is from a tokens file, the first field must be a string of tokens optionally enclosed in single quotes. If the row is from a metrics file, it must be a comma- separated list of numbers. The last field must have values 'yes' or 'no, which are mapped to the integers 1 and 0, respectively. Args: row: String representing an individual row of an ARFF file in the PHP dataset. Returns: List of the form [features, label], where features is a list of features (tokens or metrics data) and label is an integer having the value 1 if the last field of the row is 'yes' and '0' otherwise. """ assert row.count('\'') in [0,2] mod_row = row.split('\''); # Rows with multiple tokens are wrapped in single quote marks. Otherwise, # they are separated by commas. if row.count('\'') == 2: mod_row = row.split('\'')[1:]; else: mod_row = row.split(','); assert (len(mod_row) == 2), "mod row: {}".format(mod_row) return [mod_row[0].replace('\'', ''), 1 if 'yes' in mod_row[1] else 0]
4908add0399c34c7545c37694cf6e853b98d4518
421,681
def get_column_name(column): """ gets the pure column name from given column name with ordering info. for example: +age -> age age -> age -age -> age :param str column: column name to extract pure name from it. :rtype: str """ if column.startswith(('-', '+')): return column[1:] return column
eff552e85b5bebd2fac6cad4ee21f3d2ec4ea994
171,680
def ExtractDependencies(input): """ Create a list of dependencies from input list of lines Each element contains the name of the object and a list of files that it depends on. Dependencies that contain "/usr/" are removed as they are system headers. """ deps = [] for line in input: headersLine = line.startswith(" ") or line.startswith("\t") line = line.strip() line = line.rstrip("\\ ") fileNames = line.strip().split(" ") if not headersLine: # its a source file line, there may be headers too sourceLine = fileNames[0].rstrip(":") fileNames = fileNames[1:] deps.append([sourceLine, []]) deps[-1][1].extend(header for header in fileNames if "/usr/" not in header) return deps
8c0f87af50342221c1c73356e9c3ffdefb690e03
671,262
import ast def status_from_analysis(line): """ Utility method to obtain the process status boolean flags from the relative line in the final analysis output text file. :param line: string from log file :return: list of boolean status flags """ line = line.strip().split('\t') return [ast.literal_eval(line[1]), ast.literal_eval(line[3]), ast.literal_eval(line[5]), ast.literal_eval(line[7]), ast.literal_eval(line[9])]
7983bd8d9e74a457a019b912cdaa0a1b31272bd9
671,130
def get_conversion_option(shape_records): """Prompts user for conversion options""" print("1 - Convert to a single zone") print("2 - Convert to one zone per shape (%d zones) (this can take a while)" % (len(shape_records))) import_option = int(input("Enter your conversion selection: ")) return import_option
7608c588960eb3678970e0d4467c67ff9f17a331
2,952
def get_matched_primer_pair(le,ri): """ Inputs: two lists of primers get matching primers from a list (get primer pair from same amplicon) Returns: matched primer pair (same amplicon) if any """ for l in le: for r in ri: if l[2] == r[2]: return l, r return None, None
5d04466741c295d4c9c21720e59fba08fb41709f
138,556
def frequency(string, word): """ Find the frequency of occurrences of word in string as percentage """ word_l = word.lower() string_l = string.lower() # Words in string count = string_l.count(word_l) # Return frequency as percentage return 100.0*count/len(string_l)
dd2183dcec04bdf835ab22a8a351d53571f6e5e9
19,566
def getattr_(entity, attribute): """Either unpack the attribute from every item in the entity if the entity is a list, otherwise just return the attribute from the entity. Returns None if the entity is either None or empty.""" if entity in (None, []): return None if isinstance(entity, list): return [getattr(item, attribute) for item in entity] return getattr(entity, attribute)
42dbfb2c0cdb1bd59d627cd6d509e6dc43ba7a2d
670,168
import json def list_of_schematized_configs(test_configs_root): """returns list of configuration files, schematized with attributes so that fixtures and unit tests can specify those attributes to find the filename of a specific configuration file, that can then be used to get that file. Each element in the list is a dict with the following keys: `filename`, `config_type`, `audio_format`, `spect_format`, `annot_format` These keys define the schema for config files. For example, here is the first one: { "filename": "test_eval_audio_cbin_annot_notmat.toml", "config_type": "eval", "audio_format": "cbin", "spect_format": null, "annot_format": "notmat" } The ``specific_config`` factory fixture returns a function that itself return a configuration ``filename``, when provided values for all of the other keys. """ with test_configs_root.joinpath("configs.json").open("r") as fp: return json.load(fp)["configs"]
d5e5e54ee78077a77057bc179d26da703b1a3694
267,067
def is_boundary(loop): """Is a given loop on the boundary of a manifold (only connected to one face)""" return len(loop.link_loops) == 0
4d4df7e552c6a57b42fa3e9c43682368ae5091c1
41,325
def _keep_spikes(samples, bounds): """Only keep spikes within the bounds `bounds=(start, end)`.""" start, end = bounds return (start <= samples) & (samples <= end)
b63979156e23cf9d26c0c51b4eebf16100dd7804
306,267
def _compute_newton_step(lambdas, p_norm, w_norm): """Compute the Newton step. Args: lambdas (namedtuple): Named tuple containing the current candidate value for the damping factor lambda, its lower bound and upper bound. p_norm (float): Frobenius (i.e. L2-norm) of the candidate vector. w_norm (float): Frobenius (i.e. L2-norm) of vector w, which is the solution to the following triangular system: U.T w = p. Returns: (float): Newton step computed according to formula (4.44) p.87 from Nocedal and Wright (2006). """ return lambdas.candidate + (p_norm / w_norm) ** 2 * (p_norm - 1)
30319530a61029d3a689856d1ac686ad2954b46d
335,623
def unnormalize(z, bounds): """Inverse of normalize""" return z * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
9a39768cdaaa1c169ea88b8dfcb5080a38b9e358
644,289
def RGBtoYCC(R, G, B): """ convert RGB to YCC color The Kodak* PhotoYCC* was developed for encoding Photo CD* image data. This model comprises luminance (Y) and two color difference, or chrominance (C1, C2) components. The PhotoYCC is optimized for the color photographic material, and provides a color gamut that is greater than the one that can currently be displayed. Warning: Converting RGB to YCC then back doesn't necessarily give the same result :param R: red value (0;255) :param G: green value (0;255) :param B: blue value (0;255) :return: YCC tuple (0;1) """ rgb = [i / 255.0 for i in (R, G, B)] Y = (0.213 * rgb[0]) + (0.419 * rgb[1]) + (0.081 * rgb[2]) C1 = (-0.131 * rgb[0]) - (0.256 * rgb[1]) + (0.387 * rgb[2]) + 0.612 C2 = (0.373 * rgb[0]) - (0.312 * rgb[1]) - (0.061 * rgb[2]) + 0.537 return Y, C1, C2
a70c694930a06e5bca42a11fb345b53c6b3065ca
157,303
def _do_get_features_list(featurestore_metadata): """ Gets a list of all features in a featurestore Args: :featurestore_metadata: metadata of the featurestore Returns: A list of names of the features in this featurestore """ features = [] for fg in featurestore_metadata.featuregroups.values(): features.extend(fg.features) features = list(map(lambda f: f.name, features)) return features
d5d1b99aa77067dc22473d12fb8fec6312d75b53
691,030
def _get_tf_batch_norm_parameter_name(bn_counter): """Returns the name of the batch_norm layer for the given batch norm index. Args: bn_counter: Integer number denoting the batch norm op index. Returns: A String denoting the name of the batch norm layer. """ if bn_counter != 0: return 'batch_normalization_%d' % bn_counter return 'batch_normalization'
2259cb01188354eaaeab082b6d00071326b9d9fa
95,618
def mandel(x, y, max_iters): """ Given the real and imaginary parts of a complex number, determine if it is a candidate for membership in the Mandelbrot set given a fixed number of iterations. """ c = complex(x, y) z = 0.0j for i in range(max_iters): z = z*z + c if (z.real*z.real + z.imag*z.imag) >= 4: return i, z return max_iters, z
988f34156365551631eb926da2e1d5e08faff4f0
668,473
def select_last_n_imports(stats_file, n=3): """ Selects which independent full and partial imports should be plotted based on n. :param stats_file: pandas DataFrame with stats data. :param int n: number of independent imports to select. :return: List of import ids to be plotted according to selection criterion. """ partial = [] full = [] if 'datetime' in stats_file and 'import_id' in stats_file and 'Import_flag' in stats_file: df = stats_file[['datetime', 'import_id', 'Import_flag']].sort_values('datetime', ascending=False).drop_duplicates(['import_id'], keep = 'first', inplace = False) full = df[df['Import_flag'] == 'full'] full = full.iloc[:n, 1].tolist() partial = df[df['Import_flag'] == 'partial'] partial = partial.iloc[:n, 1].tolist() return partial + full
39c3f2975ae333543a1e6ba239e4c10e61ba01c8
307,196
from typing import Sequence from typing import Union import shlex def command_strings_to_lists( cmds: Sequence[Union[str, Sequence[str]]] ) -> Sequence[Sequence[str]]: """ Convert any command strings in `cmds` to lists. Args: cmds: Commands - either strings or lists of arguments. Returns: A sequence of command argument sequences. """ return [shlex.split(cmd) if isinstance(cmd, str) else cmd for cmd in cmds]
53e8977b89b2cfdc144111ac98ea8cf2de8d689a
446,960