content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def overlaps(s1, e1, s2, e2): """ Check if two start/end ranges have overlap :param `s1`: range 1 start :type `s1`: int :param `e1`: range 1 end :type `e1`: int :param `s2`: range 2 start :type `s2`: int :param `e2`: range 2 end :type `e2`: int :return: True if ranges overlap :rtype: bool """ s_cand = max(s1, s2) e_cand = min(e1, e2) return s_cand < e_cand
30656ce4bd685a64faf81efa59e4d1c694c299a0
31,571
def compareConfigs(name, c1, c2, shortcut=True, rtol=1E-8, atol=1E-8, output=None): """ Helper function for Config.compare; used to compare two Configs for equality. If the Configs contain RegistryFields or ConfigChoiceFields, unselected Configs will not be compared. Floating point comparisons are performed by :func:`numpy.allclose`; refer to that for details. Parameters ---------- name : Name to use when reporting differences c1 : LHS config to compare c2 : RHS config to compare shortcut : If True, return as soon as an inequality is found. rtol : Relative tolerance for floating point comparisons. atol : Absolute tolerance for floating point comparisons. output : If not None, a callable that takes a string, used (possibly repeatedly) to report inequalities. """ assert name is not None if c1 is None: if c2 is None: return True else: if output is not None: output("LHS is None for %s" % name) return False else: if c2 is None: if output is not None: output("RHS is None for %s" % name) return False if type(c1) != type(c1): if output is not None: output("Config types do not match for %s: %s != %s" % ( name, type(c1), type(c2))) return False equal = True for field in c1._fields.values(): result = field._compare( c1, c2, shortcut=shortcut, rtol=rtol, atol=atol, output=output) if not result and shortcut: return False equal = equal and result return equal
463c94f8912924a933d64c3f6eeeb38e8df5ed80
31,572
def pyAttrName2XmlAttrName(key): """ The @pyAttrName2XmlAttrName@ converts the Python XML attribute name @key@ to an appropriate XML attribute identifier. If the *key* is 'cssClass' then it is translated into 'class'. If an HTML5 attribute *data_xxxx* is used, then change that to *data-xxxx*. """ if key == 'cssClass': key = 'class' elif key == 'cssId': key = 'id' elif key.startswith('data'): key = key.replace('_', '-') return key
7bafbef748960b1e5e34ee27d14a39778bb8ec43
31,573
def get_images_helper(request, images): """ Helper method for gathering an object's list of images and formatting them along with their corresponding types. Parameters: request : Request object from the serializer instance. images : Queryset of image objects connected to the Object Returns: List of Image objects in JSON format. """ image_list = [] for image in images: image_dict = { "image_url": f"{request.scheme}://{request.get_host()}{image.image.url}", "image_type": image.type.type, } image_list.append(image_dict) return image_list
97b798ca406e63dcc00b9eefdd27edc9fcd9aef9
31,574
def find_authorization_in_db(user_id, users_collection): """Queries the db to find authorization of the given user.""" first_user = users_collection.find_one({'user_id': user_id}) if first_user is None: # user not found return False authorized = first_user.get('is_organizer') return bool(authorized)
8834e83abf638e8a98c87fcd3f86b03e943ab5ad
31,576
def celciusToFarenheit(celcius): """ Convert a temperatur in Celcius to Farenheit """ if celcius is None: return None else: return float(celcius) * 1.8 + 32.0
ccf57c2d376de6c7b61b2ac48e6c22348dd83ee2
31,579
def decimal2dms(decimal_degrees): """ Converts a floating point number of degrees to the equivalent number of degrees, minutes, and seconds, which are returned as a 3-element list. If 'decimal_degrees' is negative, only degrees (1st element of returned list) will be negative, minutes (2nd element) and seconds (3rd element) will always be positive. Example: >>> decimal2dms(121.135) [121, 8, 6.0000000000184173] >>> decimal2dms(-121.135) [-121, 8, 6.0000000000184173] """ degrees = int(decimal_degrees) decimal_minutes = abs(decimal_degrees - degrees) * 60 minutes = int(decimal_minutes) seconds = (decimal_minutes - minutes) * 60 return [degrees, minutes, seconds]
14586909f670e4dd4a8173925eea4dc330ed6adc
31,580
def frame_ranges_to_string(frames): """ Take a list of numbers and make a string representation of the ranges. >>> frame_ranges_to_string([1, 2, 3, 6, 7, 8, 9, 13, 15]) '[1-3, 6-9, 13, 15]' :param list frames: Sorted list of frame numbers. :return: String of broken frame ranges (i.e '[10-14, 16, 20-25]'). """ if not frames: return '[]' if not isinstance(frames, list): frames = list(frames) frames.sort() # Make list of lists for each consecutive range ranges = [[frames.pop(0)]] current_range_index = 0 # index of current range for x in frames: if x - 1 == ranges[current_range_index][-1]: ranges[current_range_index].append(x) else: current_range_index += 1 ranges.append([x]) range_strings = [] for x in ranges: if len(x) > 1: range_strings.append('-'.join([str(x[0]), str(x[-1])])) else: range_strings.append(str(x[0])) complete_string = '[' + ', '.join(range_strings) + ']' return complete_string
9fdd3f05812a34144102f3727cfe49f37a4c0f60
31,581
import zlib def bytes_to_zlib(bytes_data): """ Compress a bytes array """ return zlib.compress(bytes_data)
d9e9583a35d52ba1f69972bcc2fcaa295348a4ec
31,582
def get_short_uuid(uuid): """get the first block of a 4-word UUID to use as a short identifier""" full_uuid = str(uuid) return full_uuid.split('-', 1)[0]
727847113068d7bd48356047d98b12d00d9c811a
31,583
def is_go_source(view, point=None): """Return True if the given view contains Go source code. :param sublime.View view: View containing the code to be formatted. :returns: bool """ if point is None: point = view.sel()[0].begin() return view.score_selector(point, 'source.go') > 0
f666309988d754f1c352b4febf92e5e443f17384
31,586
def is_negatively_charged_oxygen(atom_name, resname): """ Determine whether the oxygen atom of interest is either negatively charged (usually a carboxyl group or sulfate/phosphate), or has a lone pair (and no hydrogen atom) that would similarly repel anions. Parameters ----------- atom_name : str resname : str Returns ------- bool """ if ((atom_name in ["OD1","OD2","OE1","OE2"]) and (resname in ["GLU","ASP","GLN","ASN"])): return True elif ((atom_name == "O") and (not resname in ["HOH","WAT"])): return True # sort of - the lone pair acts this way elif ((len(atom_name) == 3) and (atom_name[0:2] in ["O1","O2","O3"]) and (atom_name[2] in ["A","B","G"])): return True elif (resname in ["SO4","PO4"]): return True return False
2e3e4b3aab87da8bf44708ca28ee11b3d8fb5e1e
31,588
import json import errno def write_config_file(path, config): """Writes the specified configuration to the specified file.""" contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n' try: with open(path, 'w') as f: f.write(contents) return True except IOError as ex: if ex != errno.ENOENT: raise return False
f72720aa3c96830b55726cfc961c66b5d8975802
31,589
def create_reference_lookup_df(df): """Return a dataframe indexed by task_id for looking up the reference.""" df = df[df['tag'] == 'reference'] df.drop_duplicates(subset=['task_id'], inplace=True) df.set_index('task_id', verify_integrity=True, inplace=True) return df
3acdef32286af7ed6f596e0b3668fc441b775a11
31,596
def action_traffic_permitted(env, state, action): """ Return True if action is permitted in terms of firewall traffic """ if action.is_scan(): return True network = env.network service = action.service dest = action.target[0] # add 0 since 0 = internet compromised_subnets = set([0]) for m in env.address_space: if state.machine_compromised(m): compromised_subnets.add(m[0]) for src in compromised_subnets: if network.traffic_permitted(src, dest, service): return True return False
dcce59e1b9d48bd0c35a65f7e016a0477d86e2f7
31,597
def find_multilines(config_text): """ Parses condor config file looking for multiline entries Args: config_text: string, contents of a condor configuration file Returns: multi: dictionary. keys are first line of multi line config values are the rest of the multi line config keeping original formatting see parse_configs_for_multis() below for example muli dict """ multi = {} tag = None dict_key = None for line in config_text: parts = line.split() if tag is None: for idx in range(len(parts)): if parts[idx].startswith("@="): tag = parts[idx].replace("=", "").strip() dict_key = parts[idx - 1].strip() multi[dict_key] = "".join(parts[idx:]) + "\n" else: if "#" not in line: multi[dict_key] += line for idx in range(len(parts)): if tag in parts[idx]: tag = None return multi
551785ff3188b3194f5e3795308376c86ac42019
31,599
from typing import List def get_effective_lindbladian_object_names() -> List[str]: """Return the list of valid effective-lindbladian-related object names.""" names = [] names.append("hamiltonian_vec") names.append("hamiltonian_mat") names.append("effective_lindbladian_mat") names.append("effective_lindbladian") return names
7c5f63e2ab48fd9cb6e0f53fcab895261311e5b5
31,601
import re def tokenize(library): """ Function that takes a string of text a tokenizes it. The text is returned tokenized. """ # Make sure all words are written with lower cases library = library.lower() # Remove non-alphabetic characters tokenizer = re.compile(r"\W+") # tokenize tokenized_library = tokenizer.split(library) return(tokenized_library)
b638cda4945aee3ed377614968998314121735cf
31,605
def sign_string(value): """ Return a string representing the sign of a numerical value, "+" for positive, "-" for negative, and "+/-" for 0. :param value: A numerical value. :return: The sign of that value as a string. """ return "+" if value > 0 else "-" if value < 0 else "+/-"
f305cd9199e174520ea675c3a46f340904e410e6
31,610
import requests from bs4 import BeautifulSoup def url2bs(url: str): """ HTTP GET response text with the url to a BeautifulSoup object parsed with lxml. :param url: URL. :return: BeautifulSoup object. """ try: response = requests.get(url) result = BeautifulSoup(response.text, 'lxml') except requests.exceptions.RequestException as e: print('Error in HTTP GET from the url:\n\t' + url + '\nERROR MESSAGE:') print(e) result = None return result
374a94bb3c9970a340fccce4b596ae15ad60022c
31,611
def agenda_width_scale(filter_categories, spacer_scale): """Compute the width scale for the agenda filter button table Button columns are spacer_scale times as wide as the spacer columns between categories. There is one fewer spacer column than categories. """ category_count = len(filter_categories) column_count = sum([len(cat) for cat in filter_categories]) # Refuse to return less than 1 to avoid width calculation problems. return max(spacer_scale * column_count + category_count - 1, 1)
7c26fdb708ddf622593388af0377fa3957e3a753
31,612
from typing import List def encode(obs: List[int], spaces: List[int]) -> int: """ Encode an observation from a list of gym.Discrete spaces in one number. :param obs: an observation belonging to the state space (a list of gym.Discrete spaces) :param spaces: the list of gym.Discrete spaces from where the observation is observed. :return: the encoded observation. """ assert len(obs) == len(spaces) sizes = spaces result = obs[0] shift = sizes[0] for o, size in list(zip(obs, sizes))[1:]: result += o * shift shift *= size return result
32e2f6c1b43e8567f113905d8fc2452dd7058750
31,614
import re def num_in_str(lst: list) -> list: """Create a list of strings that have numbers in it.""" return [i for i in lst if re.search(r"\d", i)]
a9c94acbf5812e1c5ecfc5bb6b08d325882d2806
31,617
import re def clean_genre(df): """ Clean genre by reducing multiple genres to singular genre. Parameters ---------- df : pd.DataFrame A data frame with a column of movie genres. Returns ------- df : pd.DataFrame A modified data frame with grouped genres. """ # Convert the movie Social to Drama df["GenreFix"] = df["Genre"].apply(lambda x: re.sub("social", "drama", x)) # Take the first genre from a tuple (A,B). E.g., Comedy, Romance => Comedy df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("(?<=[a-z]),\s*[a-z]+", "", x)) # Take the first genre from A-B. E.g., Comedy-romance => Comedy df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("(?<=[a-z])\s*-\s*[a-z]+", "", x)) # Take the first genre A/B. E.g., Comedy/Romance => Comedy df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("(?<=[a-z])\s*/\s*[a-z]+", "", x)) # Categorize as war movies df["GenreFix"] = df["GenreFix"].apply(lambda x: "war" if x.find("ww") != -1 or \ x.find("world war") != -1 or \ x.find("war") != -1 else x) # Categorize as animations df["GenreFix"] = df["GenreFix"].apply(lambda x: "animation" if x.find("anim") != -1 \ else x) # Categorize as crime movies df["GenreFix"] = df["GenreFix"].apply(lambda x: "crime" if x.find("crime") != -1 \ else x) # Remove the word "film" df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub("film", " ", x)) # Historical drama => drama etc. df["GenreFix"] = df["GenreFix"].apply(lambda x: x.split()[-1] if x != " " else x) # Remove commas and dots alongside their whitespaces df["GenreFix"] = df["GenreFix"].apply(lambda x: re.sub(",|\.", "", x.strip())) return df
5be5331fc23b6e2ad8c1c56e9a21069ca06842a6
31,618
import base64 def base64解码(data): """ 将base64字符串 解码为 bytes 字节集 """ return base64.b64decode(data)
6b52b555b29595b1cbe4a9510fd8b2c0902511ae
31,619
def trackSpeeds_to_bodyFixed(right_track_speed, left_track_speed, track_width): """ Function maps speeds for individual skid-steering tracks to the body-fixed velocity and angular velocity Arguments: right_track_speed - speed of the right track left_track_speed - speed of left track track_width - track width of the vehicle (distance between the two tracks) right_track_max - maximum speed of right track, default = 100 right_track_min - maximum speed of right track, default = -100 left_track_max - maximum speed of right track, default = 100 left_track_min - maximum speed of right track, default = -100 Returns: velocity (m) angular_velocity (rad/) """ velocity = (right_track_speed + left_track_speed) / 2 angular_velocity = (right_track_speed - left_track_speed) / track_width return velocity, angular_velocity
a2b720f100776ef696d3479a28f35811ff75966d
31,620
def chunk(lst, n_chunks): """ https://stackoverflow.com/questions/2130016/ splitting-a-list-into-n-parts-of-approximately-equal-length Parameters ---------- lst : list n_chunks : int Returns ------- list chunked list """ k, m = divmod(len(lst), n_chunks) return [lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n_chunks)]
418fa1599676821bb897efd139d3c17c5facebb8
31,622
def convert_lon(lon): """Convert a single longitude value to a floating point number. Input longitude can be string or float and in -135, 135W, 225 or 225E format. Output longitude lies in the range 0 <= lon <= 360. """ lon = str(lon) if 'W' in lon: deg_east = 360 - float(lon[:-1]) elif 'E' in lon: deg_east = float(lon[:-1]) elif float(lon) < 0.0: deg_east = 360 + float(lon) else: deg_east = float(lon) assert 0 <= deg_east <= 360, "Longitude must lie between 0-360E" return deg_east
b86343766209d81957d74b8ba96ebc69f462289a
31,623
def crop_from_quad(quad): """Return (left, top, right, bottom) from a list of 4 corners.""" leftmost = min([corner[0] for corner in quad]) topmost = min([corner[1] for corner in quad]) rightmost = max([corner[0] for corner in quad]) bottommost = max([corner[1] for corner in quad]) return (leftmost, topmost, rightmost, bottommost)
eeb2a2041f52fd342d5f121ecf4df5cf42f07ac2
31,629
import textwrap def reflow(text, width, protect): """Reflow the given `text` with line width `width` Return unchanged `text` if `protect` is True. """ if protect: return text else: dedented_text = textwrap.dedent(text.strip()) wrapped_text = textwrap.fill( dedented_text, width=width, break_on_hyphens=False, break_long_words=False) return wrapped_text
dedfb019188d4963cb0eaec185d46e60b6a9ab38
31,633
def array(fr): """ Return a Numpy array for input frame or Numpy array. Parameters ---------- fr : Frame, or 2D ndarray Input frame or Numpy array Returns ------- data : ndarray Data array """ try: return fr.get_data() except AttributeError: return fr
4268494b205e484863c19e5bad72e69972ccb680
31,636
import getopt def get_command_line_args(argv): """ Gets the command line arguments which are: Returns ------- pm_model (str) - The PowerModels.jl power model e.g. "DCPPowerModel" pm_solver (str) - The solver to use, e.g. "juniper", "gurobi" grid_name (str) - optional if you only want to calculate one grid, e.g. "rts" lc (str) - the loadcases to calculate. Either "ts" or "scaled" kind (str) - the optimizations to run, e.g. "tnep,ots,repl". Can only be a part of these like "tnep,repl" """ pm_model, pm_solver, grid_name = None, None, None kind = "tnep,repl,ots" lc = "scaled" try: opts, args = getopt.getopt(argv, ":m:s:g:k:", ["model=", "solver=", "grid=", "kind="]) except getopt.GetoptError: raise getopt.GetoptError("error reading input") for opt, arg in opts: if opt in ("-m", "--model"): pm_model = arg if opt in ("-s", "--solver"): pm_solver = arg if opt in ("-g", "--grid"): grid_name = arg if opt in ("-k", "--kind"): kind = arg if pm_solver is None: UserWarning("pm_solver is None. You must specify a solver with '--solver='") if pm_model is None: UserWarning("pm_model is None. You must specify a model with '--model='") return pm_model, pm_solver, grid_name, lc, kind
126de7ba0aa6267ef9052b70ca4273ef1af97601
31,642
import collections import csv def read_dataset(dataset_path): """ Returns a tuple of three dicts. The gloss data is a {gloss: [[lang, trans, segments, cog_class],]} dict. The concepticon data is a {gloss: global_id} dict. The ISO codes data is a {lang: iso_code} dict. """ data = collections.defaultdict(list) gloss_ids = {} iso_codes = {} with open(dataset_path, newline='', encoding='utf-8') as f: reader = csv.DictReader(f, delimiter='\t') for row in reader: li = [ row['language'], row['transcription'], row['tokens'], row['cognate_class']] if li not in data[row['gloss']]: data[row['gloss']].append(li) gloss_ids[row['gloss']] = row['global_id'] iso_codes[row['language']] = row['iso_code'] return dict(data), gloss_ids, iso_codes
136e0ad25f798f870d975cf44c69d3b96aacdd43
31,643
def polevl(x, coefs, n): """ Evaluate polynomial of degree n. :param x: polynomial variable :type x: float :param coefs: polynomial coefficients :type coefs: list :param n: degree :type n: int :return: result as float """ ans = 0 power = len(coefs) - 1 for coef in coefs: ans += coef * x**power power -= 1 return ans
f5ffb93d252a780fd02ac791afe8c0a0f61fb9a4
31,647
def reset_key_secret(api, configuration, api_version, api_exception, key_id): """ Resets the secret of an API key. :param api: The Deep Security API modules. :param configuration: Configuration object to pass to the api client. :param api_version: The version of the API to use. :param api_exception: The Deep Security API exception module. :param key_id: The ID of the key. :return: An APIKeysApi object that contains the secret of the key. """ try: # Reset the key api_keys_api = api.APIKeysApi(api.ApiClient(configuration)) return api_keys_api.replace_api_secret_key(key_id, api_version) except api_exception as e: return "Exception: " + str(e)
99ca4d56e07a4f5f9fb8fa3b0fb2c16f2cdcac32
31,648
import requests def url_ok(url): """ Checks that a given URL is reachable. :param url: A URL :rtype: bool """ return requests.head(url).ok
f4aa22e55a6c05948488fcb16098c4ed76f9c0d6
31,649
from typing import Union from typing import Any from typing import Sequence def supports_iteration(value: Union[Any, Sequence[Any]]) -> bool: """Returns ``True`` if the ``value`` supports iterations.""" try: for _ in value: return True except TypeError: pass return False
b74b0ffc85fdfdabfdd1fb5f352c23632966eb97
31,650
import requests def fetch_idol(idx): """Fetch data for a single idol.""" r = requests.post('https://www.produce101event.com/entry_info.php', data={'idx': idx}) r.raise_for_status() idol_data = r.json() if idol_data['result'] == 'success' and idol_data['name']: return idol_data else: return None
7378ba567336df0240116c4355c2fd8cf56e52d7
31,653
from dateutil import tz def local_time(dt): """ Convert a datetime.datetime object to a timezone-aware datetime.datetime in the users local timezone. :param dt: A datetime.datetime object. :returns: A timezone-aware datetime.datetime object in the users local timezone. """ if dt is None: return None return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
73d66c916fe7c849184181faf781715535da3166
31,654
def from_datastore(entity): """Formats data from datastore Datastore typically returns: [Entity{key: (kind, id), prop: val, ...}] This returns: [ name, description, pageCount, author, review ] """ if not entity: return None if isinstance(entity, list): entity = entity.pop() return [entity['name'],entity['description'],entity['pageCount'],entity['author'],entity['review']]
45b7b4690258b380bf7c7e69c631cf4f520559e2
31,667
import math def round_sample(input_dataframe, frac=0.1, min_samples=1): """Sample X ensuring at least min samples are selected.""" num_samples = max(min_samples, math.floor(len(input_dataframe) * frac)) return input_dataframe.sample(num_samples)
1e6279a9e069929bdf9fef931b00d9d99a9b6f0c
31,668
import time def time_sorting(sort_fxn, data): """ Record the run times for each run of a sorting function :param sort_fxn: The sort function of form sort_fxn(vec) to call :param data: A list of vectors to sort :returns: A list of run times to sort each element in data """ times = [time.process_time()] for vec in data: sort_fxn(vec) times.append(time.process_time()) return [t1 - t0 for t0, t1 in zip(times[:-1], times[1:])]
5d7f2e83ec2a74202264447e24fb43dbd03ab83c
31,671
def caffe_compute(transformed_image, caffe_net=None, output_layers=None): """ Run a Caffe network on an input image after preprocessing it to prepare it for Caffe. :param PIL.Image pimg: PIL image to be input into Caffe. :param caffe.Net caffe_net: A Caffe network with which to process pimg afrer preprocessing. :param list output_layers: A list of the names of the layers from caffe_net whose outputs are to to be returned. If this is None, the default outputs for the network are returned. :return: Returns the requested outputs from the Caffe net. """ if caffe_net is not None: # Grab the default output names if none were requested specifically. if output_layers is None: output_layers = caffe_net.outputs input_name = caffe_net.inputs[0] all_outputs = caffe_net.forward_all(blobs=output_layers, **{input_name: transformed_image}) outputs = all_outputs[output_layers[0]][0].astype(float) return outputs else: return []
09d6581a5db5e184092742ae4296cc3310122507
31,676
def kstairs(n, k): """Give the number of ways to take n steps, given that at each step, you can choose to take 1, 2, ... k-2, k-1 or k steps. >>> kstairs(5, 2) 8 >>> kstairs(5, 5) 16 >>> kstairs(10, 5) 464 """ if n == 0: return 0 if n <= k: return 2**(n-1) return sum([kstairs(n - i, k) for i in range(1, k + 1)])
07484fe1186967ba892d3d4e3284962d93a36b91
31,678
def bin_names_to_coords_filepath(query_bin, ref_bin, results_dir): """ prepare a file path for results :param query_bin: bin number 1 name/filepath (reference sequence) :param ref_bin: bin number 2 name/filepath. (query sequence) :return:string like Acidovora-69x_Ga0081644_to_Acidovorax-79_Ga0081651 """ return results_dir + '/' + query_bin + "_to_" + ref_bin
946b788acaf776322cc02aaa3b6740f198b61b4d
31,685
from typing import Tuple import re def parse_hp_condition(hp_condition: str) -> Tuple[int, int, str]: """ HPと状態異常を表す文字列のパース :param hp_condition: '50/200' (現在HP=50, 最大HP=200, 状態異常なし) or '50/200 psn' (状態異常の時) :return: 現在HP, 最大HP, 状態異常('', 'psn'(毒), 'tox'(猛毒), 'par', 'brn', 'slp', 'frz', 'fnt'(瀕死)) """ if hp_condition == '0 fnt': # 瀕死の時は0という表示になっている # 便宜上最大HP100として返している return 0, 100, 'fnt' m = re.match('^(\\d+)/(\\d+)(?: (psn|tox|par|brn|slp|frz|fnt)|)?$', hp_condition) assert m is not None, f"HP_CONDITION '{hp_condition}' cannot be parsed." # m[3]は状態異常がないときNoneとなる return int(m[1]), int(m[2]), m[3] or ''
cbe9ec75efbae1b144836cc8129fdc3256e0dccf
31,686
from typing import List from typing import Dict def dictify(data, keys: List, val: Dict) -> Dict: """Turns a flat :class:`NodeTree` dictionary into a nested dictionary. Helper function to generate nested dictionary from list of keys and value. Calls itself recursively. Arguments: data (dict): A dictionary to add value to with keys. keys (list): A list of keys to traverse along tree and place value. val (dict): A value for innermost layer of nested dict. """ key = keys[0] key = int(key) if key.isdecimal() else key.lower() if len(keys) == 1: data[key] = val else: if key in data.keys(): data[key] = dictify(data[key], keys[1:], val) else: data[key] = dictify({}, keys[1:], val) return data
e7c0111f67b7755a6e28d6264d8f7b300e94273c
31,690
def normalize(x, min_x, max_x): """ Goal of this function is to normalize passed data given the min and max of the data to fit in 0 - 1 :param x: Array Like structure: The data to be normalized :param min_x: Float like: The minimum of the data set :param max_x: Float like: The maximum of the data set :return: Array: The normalized data in an array with values ranging from 0 to 1 """ return (x - min_x) / (max_x - min_x)
4fbbaf06017cedd14eca3e7f0639d9002b231be4
31,691
import textwrap import html def pretty_text(data): """Unsescape the html characters from the data & wrap it""" if data is not None: return textwrap.fill(html.unescape(html.unescape(data)), width=60) else: return ""
f5baf58394b8578b26ad3cb95aff26867bc5f748
31,692
def count_recursive(contents_per_bag, bag_list): """ Count number of nested bags from the given list :param contents_per_bag: per-bag mapping :param bag_list: list of bag to inspect :return: number of bags """ nested_bag_qty = 0 for bag in bag_list: nested_bag_qty += 1 nested_bag_qty += count_recursive(contents_per_bag, contents_per_bag[bag]) return nested_bag_qty
d48e317ee73bf0f18021d27bc9857a3ad898759c
31,697
def sort_list(doc_name: str, *vart): """Checks if a string contains the identifiers in vart. If it does return the initial string, if not return None.""" for var in vart: if var.lower() in doc_name.lower() and "(old" not in doc_name and "~" not in doc_name: return doc_name
92caeaa253daa4dc1b9fac5acda4e38963a984bf
31,699
def from_homogeneous(vectors, at_infinity): """Converts vectors from homogeneous (from point or direction).""" if at_infinity: return vectors[:-1,...] else: divided = vectors / vectors[-1,...] return divided[:-1,...]
002c7fe26fa5100f4b21a373b8506e4960081b4c
31,700
def rotl(x, count): """Rotates the 64-bit value `x` to the left by `count` bits.""" ret = 0 for i in range(64): bit = (x >> i) & 1 ret |= bit << ((i + count) % 64) return ret
6bba2bf109f9ddd0f9b4c202c3192e14a0cd625f
31,701
from typing import Union import re def _header_line(line: str) -> Union[None, tuple]: """ If it is detected as header line, returns its header level and caption. Otherwise returns `None`. """ m = re.match(r"^(#+)(.+)", line) if m: level = len(m.group(1)) caption = m.group(2) return level, caption return None
0704ff88623eb66caef6a0473047f17a152701f4
31,702
from typing import Optional import re def get_nb_query_param(nb_url_search: str, param: str) -> Optional[str]: """ Get a url query parameter from the search string. Parameters ---------- nb_url_search: str The URL search string param: str The parameter name to search for Returns ------- Optional[str] value of the query string parameter or None if not found. """ qs_regex = r"[\\?&]{param}=(?P<val>[^&#]*)".format(param=param) query_string_match = re.search(qs_regex, nb_url_search) if query_string_match: return query_string_match["val"] return None
28b36c630879a16dbca38a9739fe92b1d54aa5ca
31,704
def MinMaxAvg(data): """ Given a list of values, the MIN/MAX/AVG value is returned in a Dictionary :param data: List of data of the same kind :type data: int[] or float[] :returns: a dictionary { 'min':min,'max':max,'avg':average } :rtype: dictionary .. seealso:: Stats """ min_val = data[0] max_val = data[0] acc_val = 0 count = 0 for d in data: if (d < min_val ): min_val= d if (d > max_val ): max_val= d acc_val = acc_val + d count = count+1 return { 'min':min_val,'max':max_val,'avg':acc_val/count }
0373da1ab3dfa36453669d0f6f33b8541e33eb52
31,709
from pathlib import Path def read_html_file(file_path): """Reads the contents of an HTML file.""" html_file = Path(file_path) if html_file.exists() and html_file.is_file(): with html_file.open() as temp_file: contents = temp_file.read() return contents return None
260b56d895a42696ff04badf62d58268b4f24c0a
31,713
def rsa_encrypt(data: int, e: int, n: int) -> int: """ encrypt data with the rsa cryptosystem (rsa_fernet_encrypt is more secure and supports more data) :param data: the plaintext :param e: public key (e) of the other person :param n: public key (n) of the other person :return: the ciphertext """ if data > n: raise OverflowError('') return pow(data, e, n)
58f39297a594a1ed224b0ea09b81e7f988c2fe54
31,716
from typing import List def is_armstrong(number: int) -> bool: """Return True if the given number is an armstrong number.""" digits = list(map(int, str(number))) exponent = len(digits) powers: List[int] = [base ** exponent for base in digits] return sum(powers) == number
2c84a53e439368a403f75066e4fbdca119d68f2f
31,718
def quote_identifier(identifier, sql_mode=""): """Quote the given identifier with backticks, converting backticks (`) in the identifier name with the correct escape sequence (``) unless the identifier is quoted (") as in sql_mode set to ANSI_QUOTES. Args: identifier (str): Identifier to quote. Returns: str: Returns string with the identifier quoted with backticks. """ if sql_mode == "ANSI_QUOTES": return '"{0}"'.format(identifier.replace('"', '""')) return "`{0}`".format(identifier.replace("`", "``"))
c032e6e597795b74b3a25c8529074684fc4ab619
31,719
from typing import Counter import re def build_wordlist(input_file): """Build a wordlist Counter from lines of the corpus file""" wordlist = Counter() for line in input_file: words = re.findall(r'\w+', line) wordlist.update(words) return wordlist
575a6fb872750dc83ac8b6f8b66f4b779962b71d
31,720
def check_max_amplitude(st, min=5, max=2e6): """ Checks that the maximum amplitude of the traces in the stream are ALL within a defined range. Only applied to counts/raw data. Args: st (obspy.core.stream.Stream): Stream of data. min (float): Minimum amplitude for the acceptable range. Default is 5. max (float): Maximum amplitude for the acceptable range. Default is 2e6. Returns: Stream that has been checked for maximum amplitude criteria. """ if not st.passed: return st for tr in st: # Only perform amplitude/clipping check if data has not been converted # to physical units if 'remove_response' not in tr.getProvenanceKeys(): if (abs(tr.max()) < float(min) or abs(tr.max()) > float(max)): tr.fail('Failed max amplitude check.') return st
3043ac4e29904779578c52dfb1c7b79f1cf13ad1
31,721
def _calc_corr(dbal, benchmark_dbal, window): """ Calculate the rollowing correlation between two returns. Parameters ---------- dbal : pd.Series Strategy daily closing balance indexed by date. benchmark_dbal : pd.Series Benchmark daily closing balance indexed by date. window : int Size of the moving window. This is the number of observations used for calculating the statistic. Returns ------- corr : pd.DataFrame Window size rollowing correlation between `dbal` and `benchmark_dbal`. """ ret = dbal.pct_change() benchmark_ret = benchmark_dbal.pct_change() corr = ret.rolling(window).corr(benchmark_ret) return corr
4ae0ec7184774a6500eecafa91467daee7800ca1
31,724
def iben_tutukov1984(history, al=1): """ CE formalism from `Iben & Tutukov 1984, ApJ, 284, 719 <https://ui.adsabs.harvard.edu/abs/1984ApJ...284..719I/abstract>`_ Required history parameters: - star_1_mass - star_2_mass - he_core_mass - binary_separation :param history: ndarray with model parameters :param al: alpha CE, the efficiency parameter for the CE formalism :return: final separation, final primary mass """ M1 = history['star_1_mass'][-1] M2 = history['star_2_mass'][-1] Mc = history['he_core_mass'][-1] a = history['binary_separation'][-1] af = al * (Mc * M2) / (M1 ** 2) * a return af, Mc
6eef41463eef83668902c74f748258a39e8b919f
31,725
def _count_words(text): """ Count words in a piece of text. """ if isinstance(text, (list, tuple)): text = "\n".join(text) return len(text.split()) if text else 0
473b91262f27400b8ae7c95a819eb35288cb1dc4
31,727
def parse_monitors(monitors): """ Given a list of dictionaries, returns a list of hosts that can be used in an ansible inventory. These host lines can include host variables as well. For example, monitors in this format:: [ {"host": "mon0.host", "interface": "eth0"}, {"host": "mon1.host", "interface": "enp0s8"}, ] Would return the following:: ["mon0.host monitor_interface=eth0", "mon1.host monitor_interface=enp0s8"] Because the API allows for both ``interface`` or ``address`` this utility will look for both. Ideally, only one should be defined, but it is up to the client to ensure that the one that is needed is passed to the API. """ hosts = [] for mon in monitors: host = [] host.append(mon["host"]) # This is an 'either or' situation. The schema engine does not allow us # to play with situations that might have one key or the other. That # problem gets solved here by trying to use monitor_interface first and # then falling back to its address if defined. try: host.append("monitor_interface=%s" % mon['interface']) except KeyError: try: host.append("monitor_address=%s" % mon['address']) except KeyError: # do not append monitor_* and just use the host pass hosts.append(" ".join(host)) return hosts
5fc05ca5713bb188c022b0f070bf63f97162174f
31,729
def jump_stats(previous_jumps, chute_altitude): """Compare altitude when chute opened with previous successful jumps. Return the number of previous jumps and the number of times the current jump is better. """ n_previous_jumps = len(previous_jumps) n_better = sum(1 for pj in previous_jumps if chute_altitude < pj) return n_previous_jumps, n_better
f9b92c1355ca45b4bb8be77c0e446bc71cdae7fc
31,733
from operator import xor def detect_secstruct_clash(i, j, secstruct): """ Detect if an EC pair (i, j) is geometrically impossible given a predicted secondary structure Based on direct port of the logic implemented in choose_CNS_constraint_set.m from original pipeline, lines 351-407. Use secstruct_clashes() to annotate an entire table of ECs. Parameters ---------- i : int Index of first position j : int Index of second position secstruct : dict Mapping from position (int) to secondary structure ("H", "E", "C") Returns ------- clashes : bool True if (i, j) clashes with secondary structure """ # extract a secondary structure substring # start and end are inclusive def _get_range(start, end): return "".join( [secstruct[pos] for pos in range(start, end + 1)] ) def _all_equal(string, char): return string == len(string) * char # get bigger and smaller of the two positions b = max(i, j) s = min(i, j) # if pair too distant in primary sequence, do # not consider for clash if b - s >= 15: return False # get secondary structure in range between pairs secstruct_string = _get_range(s, b) # part 1: check for clashes based on alpha helices # first check for helix between them, or both in a helix # (or either one directly next to helix) if _all_equal(_get_range(s + 1, b - 1), "H"): return True # of if just one of them is in a helix elif xor(secstruct[s] == "H", secstruct[b] == "H"): h2 = "H" * (b - s - 1) h3 = "H" * (b - s - 2) if h2 in secstruct_string: if b - s > 6: return True elif h3 in secstruct_string: if b - s > 11: return True # part 2: check for clashes based on beta strands if _all_equal(_get_range(s + 1, b - 1), "E"): return True elif _all_equal(_get_range(s + 2, b - 2), "E"): if b - s > 8: return True if xor(secstruct[s] == "E", secstruct[b] == "E"): e2 = "E" * (b - s - 1) e3 = "E" * (b - s - 2) e4 = "E" * (b - s - 3) if e2 in secstruct_string: return True elif e3 in secstruct_string: return True elif e4 in secstruct_string: if b - s > 8: return True return False
2591bffd2f21266ace9371fee68f3ac729f3cc9d
31,739
def gauss_sum(n): """Calculate sum(x for x in range(1, n+1)) by formula.""" return n * (n + 1) // 2
96d611e8975d163f1cc1cdf207f0930b5951d4a1
31,742
def _is_mobi(file_bytes: bytes) -> bool: """ Decide if a file is a MOBI/AZW3 file. From ./se/vendor/kindleunpack/mobi_sectioner.py lines 49-53 """ return file_bytes[:78][0x3C:0x3C+8] in (b"BOOKMOBI", b"TEXtREAd")
dcd1fb584b5a8c373fc5b453314ccbc9574b66ea
31,743
def remove_empty(rec): """ Deletes sequences that were marked for deletion by convert_to_IUPAC """ for header, sequence in rec.mapping.items(): if all(char == 'X' for char in sequence): rec.headers.remove(header) rec.sequences.remove(sequence) rec.update() return rec
57616d8fb35f6bf5c74d4271dfe709e87171932a
31,747
def get_y_for_x(x: float, gradient: float, y_intercept: float) -> float: """ Linear equation, y = mx + c """ return (x * gradient) + y_intercept
f2e4226c159e74621ae8301d24ecc5bef85971f4
31,754
import math def regularize(data): """Converts every non-numerical list value to zero which is useful for analysis later.""" for index, val in enumerate(data): if math.isinf(val) or math.isnan(val): data[index]=0 return data
ae38fa7a3a1f5bb6bfeba2ca4fbcfd5145f36ee8
31,763
from typing import List def padding_list(someList: List[str], N: int) -> List[str]: """Padding the list with <s> at the front and </s> behind Args: someList (List[str]): The list to be padded with N (int): The amount of <s>, </s> to be padded Returns: List[str]: Padded list """ for i in range(N): someList = ['<s>'] + someList + ['</s>'] return someList
e8df315d715e5e1e4575b42da5f6ff1691107732
31,767
def check_non_ascii(line): """Checks if a line contains a non ascii chars Params: line (unicode) Returns: true if line does not contain non ascii chars """ try: line.encode('ascii') return True except UnicodeEncodeError: return False
9e9eebf5623000b27e3b2234ada4c2c5eaafad83
31,775
def az_rate(val): """Convert an integer value to a floating point angular rate.""" return val * 90. / 2**16
22dfd3cfcc04ec83dc5b6d845eb333de96cd5f7a
31,776
def getelasticpublicip(ip): """Gets the PublicIp address of the Elastic IP Address""" return ip["PublicIp"]
84e4f788cb4a9b765f4b5dfc1a0606004162a4ed
31,781
def commands(ctx): """ Returns a string of Rimworld specific commands Parameters: ctx: The context of the message Returns: str: The list of commands """ response = '' response = 'Rimworld Commands: !item, !event, !iteminfo, !eventinfo, !mods' return response
4fcb162740e97437f414f8925eff97d3032e5319
31,782
def add_timeout_arg(parser): """Add the timeout argument to a parser""" def _validator(val): """Validate acceptable inputs for the timeout of the function""" error = 'Value for \'timeout\' must be an integer between 10 and 900' try: timeout = int(val) except ValueError: raise parser.error(error) if not 10 <= timeout <= 900: raise parser.error(error) return timeout parser.add_argument( '-t', '--timeout', required=True, help=( 'The AWS Lambda function timeout value, in seconds. ' 'This should be an integer between 10 and 900.' ), type=_validator )
2b96c763f579650e86e311a2081eab2313f6dc49
31,784
def remove_dul(entitylst): """ Remove duplicate entities in one sequence. """ entitylst = [tuple(entity) for entity in entitylst] entitylst = set(entitylst) entitylst = [list(entity) for entity in entitylst] return entitylst
7763ea273b9eed00d923ac1b8d631b9a24daf01a
31,786
def a_source_filename(plugin_ctx, fsm_ctx): """send source filename.""" src_file = plugin_ctx.src_file fsm_ctx.ctrl.sendline(src_file) return True
97a2d12fd2807b0dfca8d503478ccc5c514b88b6
31,793
import re def seperate_data(data, labels): """ Given a data file such as | x11 u11 x12 other x13 | | x21 u21 x22 other x23 | | ..................... | And labels for each column such as 'x' 'u' 'x' '_' 'x' Split the data into x' and u' """ # Take the index of all instances of 'x', get the cols with # that index, which should be the states. xt = data[:, [m.start() for m in re.finditer('x', labels)]] # Save as above, but with 'u' and inputs. ut = data[:, [m.start() for m in re.finditer('u', labels)]] return xt, ut
6dc739f55fcf6113cc45d849c256e5186e328cc0
31,798
def hr2deg(deg): """Convert degrees into hours.""" return (deg * (24.0 / 360.0))
e9f268be41221cd2b17d596f859d7a7686c3b4ac
31,799
from typing import Union import time def cast_timestamp_seconds_since_epoch_to_text(seconds: Union[int, float, str], timezone: str = "UTC", time_in_nanoseconds: bool = False, date_time_format: str = '%Y-%m-%d %H:%M:%S') -> str: """ >>> assert cast_timestamp_seconds_since_epoch_to_text(0,"UTC") == '1970-01-01 00:00:00' >>> assert cast_timestamp_seconds_since_epoch_to_text("0","LOCAL") >>> assert cast_timestamp_seconds_since_epoch_to_text(1590674483 ,"UTC") == '2020-05-28 14:01:23' >>> assert cast_timestamp_seconds_since_epoch_to_text(1590674574765797619 ,"UTC", time_in_nanoseconds=True) == '2020-05-28 14:02:55' """ seconds = float(seconds) if time_in_nanoseconds: seconds = seconds / 1E9 seconds = int(round(seconds)) if timezone == "UTC": epochseconds = time.gmtime(seconds) else: epochseconds = time.localtime(seconds) return time.strftime(date_time_format, epochseconds)
cc675ad75bc90e3866eb737b6878c1c21d049ac4
31,805
def to_lower( tokens ): """Convert all tokens to lower case. Args: tokens (list): List of tokens generated using a tokenizer. Returns: list: List of all tokens converted to lowercase. """ return [w.lower() for w in tokens]
acef98d5e52ed03104f75d542b8f8328790a7c5f
31,806
import re def has_three_consecutive_vowels(word): """Returns True if word has at least 3 consecutive vowels""" pattern = re.compile(r"[aAeEiIoOuU]{3,}") match = pattern.search(word) return True if match else False
6159ccca9a132e2d2bbd28b8a867aca496ba8436
31,807
def rotc(ebit, debt, equity): """Computes return on total capital. Parameters ---------- ebit : int or float Earnins before interest and taxes debt : int or float Short- and long-term debt equity : int or float Equity Returns ------- out : int or float Return on total capital """ return ebit / (debt - equity)
4c4149d55439c0b6d91b15559fe0618dd09efac0
31,811
def is_final_option(string): """Whether that string means there will be no further options >>> is_final_option('--') True """ return string == '--'
272b3300571096eb0a4931c7f699b00b7a39842c
31,812
def traverse_dict(obj: dict, convert_to_string: bool = True): """ Traversal implementation which recursively visits each node in a dict. We modify this function so that at the lowest hierarchy, we convert the element to a string. From https://nvie.com/posts/modifying-deeply-nested-structures/ """ if isinstance(obj, dict): out_dict = {} for key, val in obj.items(): out_dict[key] = traverse_dict(val) return out_dict if isinstance(obj, list): return [traverse_dict(elem) for elem in obj] return_obj = str(obj) if convert_to_string else obj return str(return_obj)
af323c350fc5c784362baf6aaee9a61be0d7f0ca
31,816
def squash_dims(tensor, dims): """ Squashes dimension, given in dims into one, which equals to product of given. Args: tensor (Tensor): input tensor dims: dimensions over which tensor should be squashed """ assert len(dims) >= 2, "Expected two or more dims to be squashed" size = tensor.size() squashed_dim = size[dims[0]] for i in range(1, len(dims)): assert dims[i] == dims[i - 1] + 1, "Squashed dims should be consecutive" squashed_dim *= size[dims[i]] result_dims = size[:dims[0]] + (squashed_dim,) + size[dims[-1] + 1:] return tensor.contiguous().view(*result_dims)
d12ee924fabae3529aa48a90d124bbb41cc3a655
31,817
def transform_post(post): """Transforms post data Arguments: post {dict} -- Post data """ return { 'id': post['id'], 'title': post['title'], 'url': post['url'], 'image': post['feature_image'], 'summary': post['custom_excerpt'] \ if post['custom_excerpt'] else post['excerpt'] }
c765ba32ec5b00c289034e8daa2424f431678307
31,821
def min_operations(number): """ Return number of steps taken to reach a target number number: target number (as an integer) :returns: number of steps (as an integer) """ # Solution: # 1. The number of steps to reach a target number = number of steps take to make the target number 0 # 2. We will be greedy, each time if it is possible we will try to make the number half steps_count = 0 while number > 0: # check if we can make the number half if number % 2 == 0: number = number / 2 else: # number can't halved so we can only decrease it by 1 number -= 1 steps_count += 1 return steps_count
d15d91e22aa2d552acb8308882482ffeafa9d5e3
31,824
import ipaddress def validate_ipv4_address(ipv4_address): """ This function will validate if the provided string is a valid IPv4 address :param ipv4_address: string with the IPv4 address :return: true/false """ try: ipaddress.ip_address(ipv4_address) return True except: return False
51de6b7d4da2de4413217ee339147538aac4c2f5
31,825
def flatten(some_list): """ Flatten a list of lists. Usage: flatten([[list a], [list b], ...]) Output: [elements of list a, elements of list b] """ new_list = [] for sub_list in some_list: new_list += sub_list return new_list
26df175c0e119f5a872449df9fd8ea8d46393542
31,827
from typing import Any from typing import Set def _all_names_on_object(obj: Any) -> Set[str]: """Gets all names of attributes on `obj` and its classes throughout MRO. Args: obj: The object to get names for. Returns: A set of names of attributes of `obj` and its classes. """ nameset = set(obj.__dict__.keys()) for cls in obj.__class__.__mro__: nameset = nameset.union(set(cls.__dict__.keys())) return nameset
ac635b970df640a602656af55eabb94c4d55daae
31,831
import ipaddress import re def _validate_cidr_format(cidr): """Validate CIDR IP range :param str cidr: :return: :rtype: bool """ try: ipaddress.ip_network(cidr, strict=False) except (ValueError, ipaddress.AddressValueError, ipaddress.NetmaskValueError): return False if '/' not in cidr: return False if re.search('\s', cidr): return False return True
5f2a667c93720909ce7b9ff3019a0c403a499222
31,833
def _convert_path_to_ee_sources(path: str) -> str: """Get the remote module path from the 'ee-sources' GCS bucket. Args: path: str Returns: An ee-sources module url. """ if path.startswith("http"): eempath = path else: bpath = path.replace(":", "/") eempath = f"https://storage.googleapis.com/ee-sources/{bpath}" return eempath
f650736711fb8909e0e11df2165a89f06210cb53
31,838
import pytz def convert_utc_to_localtime(utc_datetime, timezone_str): """ 轉換 utc 時間為指定的 local 時間,如果輸入的時區有錯,則保持原來的 UTC Args: utc_datetime(datetime): utc 時間 timezone(str): 指定轉換的時區,採用 tz database 列表 Returns timezone_dt(datetime): 回傳轉換好的時區時間 """ # 取得 tzinfo 的時區 if timezone_str in pytz.common_timezones: tz = pytz.timezone(timezone_str) # 取得時區資訊的 dateime dateime_include_tzinfo = pytz.utc.localize(utc_datetime, is_dst=None) timezone_dt = dateime_include_tzinfo.astimezone(tz) return timezone_dt return utc_datetime
3185746161ddfd812f023bdfa74bb58bd2be9113
31,840
import collections def list_compare(a: list, b: list) -> bool: """Check if two lists contain the same elements.""" return collections.Counter(a) == collections.Counter(b)
05a15f46b5e00f6e17e81f4b67332a196154f4e7
31,848
def copper_heat_capacity_CRC(T): """ Copper specific heat capacity as a function of the temperature from [1]. References ---------- .. [1] William M. Haynes (Ed.). "CRC handbook of chemistry and physics". CRC Press (2014). Parameters ---------- T: :class:`pybamm.Symbol` Dimensional temperature Returns ------- :class:`pybamm.Symbol` Specific heat capacity """ cp = 1.445e-6 * T ** 3 - 1.946e-3 * T ** 2 + 0.9633 * T + 236 return cp
5a1aa532ea0d9eb7cb4974709963495835d608aa
31,853