content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Any def is_greater_than(value: Any, *, lower_bound: Any = 0) -> bool: """Checks whether the value is greater than the lower_bound :param value: The value to check if is greater than :param lower_bound: The lower bound :return: Whether the value is greater than lower_bound """ return value > lower_bound
5b78579bec610cb83ee01cc0c625b056604e6510
38,684
def is_hdfs_path(path): """ Check if a given path is HDFS uri Args: path (str): input path Returns: bool: True if input is a HDFS path, False otherwise >>>is_hdfs_path("/tdk") False >>>is_hdfs_path("hdfs://aa:123/bb/cc") True """ return path.startswith("hdfs://")
62cc9286f91fbad848541275d79d250bf62b4c99
38,690
def dotProduct(vector1, vector2): """Caclulate and return dot product of vectors. Calculate and return the dot product (inner product) of the two vectors (Python dict type). :param vector1: vector 1 :type vector1: dict :param vector2: vector 2 :type vector2: dict :returns: dot product >>> dotProduct({1:1,2:2,3:3,4:4}, {2:2,3:3}) 13 >>> dotProduct({1:1,2:2,3:3,4:4}, {2:2.0,3:3.0}) 13.0 """ # order doesn't affect result - just need to compare the two # should be faster to iterate over shorter then search in longer dict a, b = sorted([vector1, vector2], key=len) return sum([v * b.get(k, 0) for k, v in a.iteritems()])
c1e136f308ce6743bd90b42af87b09ef569eaffc
38,697
import random def _gsa_update_velocity(velocity, acceleration): """Stochastically update velocity given acceleration. In GSA paper, velocity is v_i, acceleration is a_i """ # The GSA algorithm specifies that the new velocity for each dimension # is a sum of a random fraction of its current velocity in that dimension, # and its acceleration in that dimension # For this reason we sum the dimensions individually instead of simply # using vec_a+vec_b new_velocity = [] for vel, acc in zip(velocity, acceleration): new_velocity.append(random.uniform(0.0, 1.0) * vel + acc) return new_velocity
14110c14a54450d0ea8f42d47c90bf56d0b8d3f7
38,698
def extract_cands(mystr,candd): """ extract candidate names from _-separated string, increment dict entries """ for c in mystr.split("_"): if c in candd.keys(): candd[c] += 1 else: candd[c] = 1 return candd
1add9487d8939c3fa24cf2568c4916ad72c4381a
38,702
def try_get(src, getter, expected_type=None): """Getter for Object with type checking. Args: src (object): Object for getter. getter (lambda): Lambda expression for getting item from Object. expected_type (type, optional): Expected type from the getter. Defaults to None. Returns: expected_type: Value of getter for Object. """ if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v
7d54542d70df933ecaf40c3bd8df58a71fa0f5b1
38,704
import struct import socket import binascii def compact(ip, port, ascii=False): """ Compact IP address and port. >>> compact('127.0.0.1', 6667, ascii=True) '7f0000011a0b' >>> compact('127.0.0.1', 6667) == '7f0000011a0b'.decode('hex') True """ compacted = struct.pack('!4sH', socket.inet_aton(ip), port) return binascii.hexlify(compacted) if ascii else compacted
51aa9d2ece55fce558855763e7c4a965d4d800cb
38,712
import re def remove_tags_and_content(s, tag): """Removes all of the specified tags from the string including their children. Greedily finds an opening and closing of specified tag and removes all content between the two. **Note**: Not intended to remove multiple sibling nodes with content in between. Args: s (:obj:`str`): The HTML to parse. tag (:obj:`str`): The tag to be removed. Returns: :obj:`str`: A string with all of the specified tags and their content removed. """ return re.sub(rf'<\s*{tag}.*?>(.|\r|\n)*<\s*/\s*{tag}\s*>', '', s)
9bfecaa082e9bea406b75bde46edbe32b1f4149a
38,720
def snake_to_camel(text: str) -> str: """ Convert snake_case to CamelCase """ data = [ i.capitalize() for i in text.split("_") ] return "".join(data)
fbfd0a3de9f659559a1f3d2b7ccedc5c8bcc173f
38,722
def calc_f1(precision, recall): """ Compute F1 metric from the score dictionary inputs: precision float with the precision value recall float with the recall value output: f1 float with the F1 value """ f1 = (2 * precision * recall) / (precision + recall) return f1
7dd261deb9d5325c05986b9498b80d01066b69b1
38,723
import time def program_timer(func): """Print the runtime of the decorated function""" def wrapper_timer(*args, **kwargs): start_time = time.perf_counter() # 1 value = func(*args, **kwargs) end_time = time.perf_counter() # 2 run_time = end_time - start_time # 3 # convert duration to hours, minuts, seconds minutes, seconds = divmod(run_time, 60) hours, minutes = divmod(minutes, 60) if hours > 0: print( "{}{} HOURS {} MINUTES {:6.3f} SECONDS".format( "TOTAL RUN TIME: ", hours, minutes, seconds ) ) elif minutes > 0: print( "{}{} MINUTES {:6.3f} SECONDS".format( "TOTAL RUN TIME: ", minutes, seconds ) ) else: print("{}{:6.3f} SECONDS".format("TOTAL RUN TIME: ", seconds)) return value return wrapper_timer
2832963859fea68c2e7120f3777be952e54c33f8
38,725
import binascii def mac_str_to_bytes(mac): """Converts string representation of a MAC address to bytes""" if isinstance(mac, bytes): return mac if not isinstance(mac, str): raise TypeError('MAC address given must be a string') mac = mac.replace(':', '').replace('-', '').replace('.', '') return binascii.unhexlify(mac)
ab6952595dcf193908b529f29edbcba540aa4310
38,726
import json def read_config(filename: str) -> dict: """ Reads a JSON config file into a dict """ with open(filename) as file: config = json.load(file) return config
9c4ed49e0a70e568effaec1315a6aeefdad987f2
38,727
def div(a: int, b: int) -> float: """ Division, mind the zero! >>> div(10, 2) 5.0 :param a: An integer :param b: Another integer :return: the result of dividing a and b :raises: ZeroDivisionError: if parameter b is 0 """ return a / b
9c1af2374b7b8f5c8b9d6bcfcc5e25c3099e2634
38,728
def make_list(iterable): """ Makes a list from given ``iterable``. But won't create new one if ``iterable`` is a :py:func:`list` or :py:func:`tuple` itself. :param Iterable iterable: Some iterable entity we need to convert into :py:func:`list`. """ if isinstance(iterable, (list, tuple)): return iterable return list(iterable)
37116d2716e59ce57b45aadbc05b7c2bc5dddcd2
38,729
def _GenerateUserPayload(users): """Generate the user payload data for all users. I could just pass through all the user's properties here, but that would expose the private key we have in the datastore along with various other user data, so I'm explicitly limiting what we show to an email and key for modifying values. Args: users: A list of users with associated properties from the datastore. Returns: user_token_payloads: A dictionary with user key id as key and email as a value. """ user_token_payloads = {} for user in users: user_token_payloads[user.key.urlsafe()] = user.email return user_token_payloads
b193991ca67879f0eab683882b50f337bbb84942
38,732
def add_traffic_column(df): """Add a TRAFFIC column that is the sum of the Entries and Exits for a station Args: df (pandas.DataFrame): The original pandas dataframe Returns: df (pandas.DataFrame): The pandas dataframe with the TRAFFIC column and TIMEFRAME_ENTRIES and TIMEFRAME_EXITS columns removed """ df = df[(df['TIMEFRAME_ENTRIES'] >= 0) & (df['TIMEFRAME_ENTRIES'] <= 5000)] df = df[(df['TIMEFRAME_EXITS'] >= 0) & (df['TIMEFRAME_EXITS'] <= 5000)] df['TRAFFIC'] = df['TIMEFRAME_ENTRIES'] + df['TIMEFRAME_EXITS'] df = df.drop('TIMEFRAME_ENTRIES', 1) df = df.drop('TIMEFRAME_EXITS', 1) return df
b916144b26e9985554009ab7e3cd446d09ff6b52
38,733
import time def get_runtime_s(job): """Returns job runtime in milliseconds.""" scrapystats = job.metadata.get("scrapystats") finished_time = job.metadata.get("finished_time") start_time = scrapystats.get("start_time") if finished_time: return finished_time - start_time return int(round(time.time() * 1000)) - start_time
4cd353b078de50c6e4f54ce27bcf620c7212b9bb
38,734
import torch def get_output_dim(model, pooling_type="gem"): """Dinamically compute the output size of a model. """ output_dim = model(torch.ones([2, 3, 224, 224])).shape[1] if pooling_type == "netvlad": output_dim *= 64 # NetVLAD layer has 64x bigger output dimensions return output_dim
d08f54fef8923ac5132af9aab80a080b9bd6c0ee
38,738
def value_for_key(membersuite_object_data, key): """Return the value for `key` of membersuite_object_data. """ key_value_dicts = { d['Key']: d['Value'] for d in membersuite_object_data["Fields"]["KeyValueOfstringanyType"]} return key_value_dicts[key]
ef35afa306c7ba9e90060a4f143ed0ae87a831bb
38,746
def intersection(A,B): """intersection of two lists """ return filter(lambda x: x in B,A)
87b02ffedb78e033714c11186e097b13c4425fb0
38,747
import re def clean_sentence(sentence): """Remove extra white space from `sentence`.""" return re.sub(r'\s+', ' ', sentence)
56e69adcb34f9b982eb12c396dc11727a150a7a8
38,750
from typing import Any def as_text(value: Any) -> str: """ Convert the given value to a string. :py:obj:`None` is converted to ``''``. :param value: The value to convert to a string. :rtype: .. versionchanged:: 0.8.0 Moved from :mod:`domdf_python_tools.utils`. """ if value is None: return '' return str(value)
5f246d8b291fcaa312fb8340bf3d0a60040df297
38,752
import re def remove_tags(html_str: str, tags): """ removes a list of tags and their content from the html """ if isinstance(tags, str): tags = [tags] if isinstance(tags, list): for tag in tags: if tag == 'js' or tag == 'javascript': scripts = re.compile(r'<(script).*?</\1>(?s)') html_str = scripts.sub('', html_str) if tag == 'css': css = re.compile(r'<(style).*?</\1>(?s)') html_str = css.sub('', html_str) if 'comment' in tag or tag == '#' or tag == '//': comments = re.compile(r'<!--(.|\s)*?-->') html_str = comments.sub('', html_str) # tag = re.compile(r'<(style).*?</\1>(?s)') # html = tag.sub('', html) return html_str
cf7cbd068e73eb90dcbc89e90e140c326fe0e273
38,755
def make_select( name, selected, data, jscallback=None, cssclass=None, multiple=False, showvalue=True, ) -> str: """Generate a HTML select. The trick here is what `data` looks like. The basic form is a dict. You can get `optgroup`s by having the dictionary keys be additional lists or dicts. Args: name (str): The select[name] to assign. selected (mixed): The option value that should be set to selected. data (dict): The structure to build our select from. jscallback (str): javascript to place in the `onChange` attribute. cssclass (str): CSS class to assign to the select element. showvalue (bool): Should option label be prepended by [key]. Returns: html_string """ if not isinstance(selected, (list, tuple)): selected = [selected] s = '<select name="%s"%s%s%s>\n' % ( name, ( "" if jscallback is None else f' onChange="{jscallback}(this.value)"' ), "" if cssclass is None else f' class="{cssclass}"', "" if not multiple else " MULTIPLE", ) for key, val in data.items(): if isinstance(val, (tuple, list)): val = dict(list(zip(val, val))) if not isinstance(val, dict): # simple s += '<option value="%s"%s>%s%s</option>\n' % ( key, ' selected="selected"' if key in selected else "", f"[{key}] " if showvalue else "", val, ) continue s += f'<optgroup label="{key}">\n' for key2, val2 in val.items(): s += '<option value="%s"%s>%s%s</option>\n' % ( key2, ' selected="selected"' if key2 in selected else "", f"[{key2}] " if showvalue else "", val2, ) s += "</optgroup>\n" s += "</select>\n" return s
7d47b562d3c3bd58f7f946032207c1a32dc3f04b
38,756
def create_file_name(path, start): """Create the name of rst file. Example: resources.libraries.python.honeycomb.rst tests.perf.rst :param path: Path to a module to be documented. :param start: The first directory in path which is used in the file name. :type path: str :type start: str :returns: File name. :rtype: str """ dir_list = path.split('/') start_index = dir_list.index(start) return ".".join(dir_list[start_index:-1]) + ".rst"
398a5e7749cc3f7f47a068dd5859b28fb0ffe98d
38,760
import re def get_compound_id(microstate_id): """ Extract the compound ID from a microstate ID (which includes a wart suffix like '_1', '_2') Parameters ---------- microstate_id : str The microstate ID, which includes a wart suffix (e.g. 'MAT-POS-8a69d52e-7_1') Returns ------- compound_id : str The compound ID (e.g. 'MAT-POS-8a69d52e-7_1') """ match = re.match('^(?P<compound_id>\S+)_(?P<microstate_suffix>\d+)$', microstate_id) if match is None: # No warts; compound and microstate are identical compound_id = microstate_id else: # Remove the wart compound_id = match.group('compound_id') return compound_id
6260389236ec579e57f9deaa4d7b41e34b45bcc2
38,761
import inspect def get_methods_defined_in_class(cls): """ Get all functions defined in a given class. This includes all non-inherited methods, static methods and class methods. Args: cls (Type): Class for lookup Returns: List[Tuple[str, Union[FunctionType, MethodType]]] """ methods = inspect.getmembers(cls, inspect.isfunction) class_methods = inspect.getmembers(cls, inspect.ismethod) functions = methods + class_methods # Only keep non-inherited functions cls_symbols = cls.__dict__ functions = [f for f in functions if f[0] in cls_symbols] return functions
82974e60ed907a998f736bec0ab7cd28c1f93fdd
38,762
def initial_graph_properties(rlist, qlist): """Initial processing of sequence names for network construction. Args: rlist (list) List of reference sequence labels qlist (list) List of query sequence labels Returns: vertex_labels (list) Ordered list of sequences in network self_comparison (bool) Whether the network is being constructed from all-v-all distances or reference-v-query information """ if rlist == qlist: self_comparison = True vertex_labels = rlist else: self_comparison = False vertex_labels = rlist + qlist return vertex_labels, self_comparison
e65a81a421c02bb2a4faa679660641f7550c5dc7
38,763
import tempfile def write_temp_return_filename(data): """ Write out data to a temporary file and return that file's name. This file will need to be deleted. :param data: str: data to be written to a file :return: str: temp filename we just created """ file = tempfile.NamedTemporaryFile(delete=False) file.write(data.encode('utf-8')) file.close() return file.name
ef34cd2328cf450c0cbf603ca07295e858da7861
38,767
def same_base_index(a, b): """Check if the base parts of two index names are the same.""" return a.split("_")[:-1] == b.split("_")[:-1]
6186230a9cb982be4cd113c2e8098e8ab472159b
38,776
import re def quote_type_string(type_string: str) -> str: """Quotes a type representation for use in messages.""" no_quote_regex = r'^<(tuple|union): \d+ items>$' if (type_string in ['Module', 'overloaded function', '<nothing>', '<deleted>'] or re.match(no_quote_regex, type_string) is not None or type_string.endswith('?')): # Messages are easier to read if these aren't quoted. We use a # regex to match strings with variable contents. return type_string return '"{}"'.format(type_string)
63f50cf7a986354cc3cddd947ff9830012659d81
38,777
import uuid def make_uuid(df, name='uuid'): """ Creates a list of uuids with the same length as the dataframe """ uuids = [uuid.uuid4().hex for _ in range(len(df))] return uuids
904dab2ea1dab3b53974277a1f37fa30f26e3e61
38,781
def get_collection_sizes(net, bus_size=1.0, ext_grid_size=1.0, trafo_size=1.0, load_size=1.0, sgen_size=1.0, switch_size=2.0, switch_distance=1.0): """ Calculates the size for most collection types according to the distance between min and max geocoord so that the collections fit the plot nicely .. note: This is implemented because if you would choose a fixed values (e.g. bus_size = 0.2),\ the size could be to small for large networks and vice versa :param net: pandapower network for which to create plot :type net: pandapowerNet :param bus_size: relative bus size :type bus_size: float, default 1. :param ext_grid_size: relative external grid size :type ext_grid_size: float, default 1. :param trafo_size: relative trafo size :type trafo_size: float, default 1. :param load_size: relative load size :type load_size: float, default 1. :param sgen_size: relative static generator size :type sgen_size: float, default 1. :param switch_size: relative switch size :type switch_size: float, default 2. :param switch_distance: relative distance between switches :type switch_distance: float, default 1. :return: sizes (dict) - dictionary containing all scaled sizes """ mean_distance_between_buses = sum((net['bus_geodata'].max() - net[ 'bus_geodata'].min()).dropna() / 200) sizes = { "bus": bus_size * mean_distance_between_buses, "ext_grid": ext_grid_size * mean_distance_between_buses * 1.5, "switch": switch_size * mean_distance_between_buses * 1, "switch_distance": switch_distance * mean_distance_between_buses * 2, "load": load_size * mean_distance_between_buses, "sgen": sgen_size * mean_distance_between_buses, "trafo": trafo_size * mean_distance_between_buses } return sizes
06abcef3e8fe7833820057952133c6fa570b36d5
38,783
def constant_density_2D(R, constant): """ return a constant value at every input r Parameters ---------- R: [Mpc] distance from the center constant: multiplicative constant Returns ------- constant """ return constant
8a0f51ba8b296e6b81270790daa5e69d71f808d7
38,785
import shelve def read_population_assignation_cp(namefile): """Read pre-computed population assignation.""" db = shelve.open(namefile) cps = db['cps'] population_value = db['population_value'] methodvalues = db['methodvalues'] db.close() return cps, population_value, methodvalues
9281d78d3c21e8d340c38b1060d616f09b0e312b
38,787
def to_node(lines, node): """ Returns all lines connected to a given node""" return list(lines[((lines.node_i == node) | (lines.node_e == node))].index)
65132478e0ffab8e4b8687d88a02b7efc35e065e
38,796
def multi_find(s: str, f: str): """Finds every occurrence of substring F in string S. Returns a list of indexes where each occurence starts.""" res = [] scanned = 0 while len(s) > 0: found = s.find(f) if found == -1: break res.append(found + scanned) s = s[found + 1:] scanned += found + 1 return res
b306ddab98c8b7da056879c4c1e1437954d508a0
38,798
def sub_symbols(pattern, code, symbol): """Substitutes symbols in CLDR number pattern.""" return pattern.replace('¤¤', code).replace('¤', symbol)
4d87e263ba53e99368fb82c8c93b3998834852ad
38,800
def get_K_loss_pp(has_pipe): """配管の線熱損失係数 Args: has_pipe(bool): 配管の断熱の有無 Returns: float: 配管の線熱損失係数 (W/mK) """ if has_pipe: return 0.15 else: return 0.21
6d5baf5303e442f2dce94dba7cfb78b68a45e50a
38,801
def count_tuples(map): """ @brief counts non zero tuples in a map @param map BitArray containing the bitmap @return Count of non-zero bytes in the bitmap""" count = 0 if map != None: for bit in map.tobytes(): if bit != 0: count += 1 return count
63725c8dfb129aaf7fa25e0362563572958cf0fb
38,807
def get_sentence(root_node): """ Given a complete sentence like (ROOT (S ...)) or (ROOT (SQ ...)), returns the nested (S ...) or (SQ ...). """ return root_node[0]
db8256fc7cfe56e469cd983c8f2097162671d519
38,810
def divide(dividend, divisor): """ :param dividend: a numerical value :param divisor: a numerical :return: a numerical value if division is possible. Otherwise, None """ quotient = None if divisor is not None : if divisor != 0: quotient = dividend / divisor return quotient
e5e6678f7de33524444b3bcd40fa45c21f42c77d
38,811
import struct import binascii def make_chunk(type, data): """Create a raw chunk by composing chunk's ``type`` and ``data``. It calculates chunk length and CRC for you. :arg str type: PNG chunk type. :arg bytes data: PNG chunk data, **excluding chunk length, type, and CRC**. :rtype: bytes """ out = struct.pack("!I", len(data)) data = type.encode("latin-1") + data out += data + struct.pack("!I", binascii.crc32(data)) return out
f4ef2410c28437561b316d768d5cdbe7cbbadc15
38,813
def first_index_k_ones_right(qstr, k, P): """ For a binary string qstr, return the first index of q with k (mod P) ones to the right. Return: index in [0, qstr.length] """ num_ones_right = 0 for j in range(qstr.length, -1, -1): if (num_ones_right - k) % P == 0: return j if j == 0: raise Exception("No valid position found") if qstr[j-1] == 1: num_ones_right += 1
ed258c584957c5b9a778eec555c4efe80505be85
38,815
def contour_coords(contour, source='scikit'): """Extract x, y tuple of contour positions from contour data. Scikit has reversed x, y coordinates OpenCV has an unusual numpy array shape (npts, 1, 2) Parameters ---------- contour: contour data source: 'scikit' or 'opencv' Output ------ x, y (tuple of numpy arrays) that can be used directly on an imshow() graph """ if source == 'scikit': return contour[:, 1], contour[:, 0] elif source == 'opencv': contour = contour.squeeze() # eliminate middle dimension in array return contour[:, 0], contour[:, 1] else: raise ValueError(f'{source} not a valid source for contour data.')
971f269fab6c476aed1be0a047f843ff0372fe08
38,817
def find(inp, success_fn): """ Finds an element for which the success_fn responds true """ def rec_find(structure): if isinstance(structure, list) or isinstance(structure, tuple): items = list(structure) elif isinstance(structure, dict): items = list(structure.items()) else: # Base case, if not dict or iterable success = success_fn(structure) return structure, success # If dict or iterable, iterate and find a tensor for item in items: result, success = rec_find(item) if success: return result, success return None, False return rec_find(inp)[0]
cf5918af04ebf66d4eab6923e84c7ff2ebe0c7f6
38,818
def get_text(node): """Get the contents of text nodes in a parent node""" return node.text
f0ac82cd958f762e9e0e5425a73a49bc5b9eaa68
38,819
import re def get_argument_value(input_string, argument): """This function gets the value of the argument from the input_string. Inputs: - input_string: Input_string containing all arguments [string] - argument: The argument of interest to be gathered from the input_string [string] Outputs: - argument_value: the value of the argument of interest [string] """ # Gather the argument value argument_value = list(filter(None, re.split(' ', list(filter(None, re.split(argument, input_string)))[-1])))[0] return argument_value
7b2662e16459e2500084e53303516a821cf255dc
38,820
def read_column_data_from_txt(fname): """ Read data from a simple text file. Format should be just numbers. First column is the dependent variable. others are independent. Whitespace delimited. Returns ------- x_values : list List of x columns y_values : list list of y values """ datafile = open(fname) datarows = [] for line in datafile: datarows.append([float(li) for li in line.split()]) datacols = list(zip(*datarows)) x_values = datacols[1:] y_values = datacols[0] return x_values, y_values
d115c85d6675150082af61de833a5614b80bf34b
38,822
def _map_operation(operation): """Default function to map the operation to target""" return operation
8a42b9306ef570485e27ad62b58134d58a0604b9
38,828
def validate_item_against_schema(schema, item): """Returns whether or not the given item has the same format as the given schema. Args: schema: A dictionary mapping field name to expected value type. item: A dictionary mapping field name to value. Returns: A boolean representing whether or not the item matches the schema. Raises: TypeError: If either argument is not a dictionary. """ if not isinstance(schema, dict): raise TypeError("Schema is not a dict object.") if not isinstance(item, dict): raise TypeError("Item is not a dict object.") if len(schema) != len(item): return False for key, value_type in schema.items(): if key not in item: return False if not isinstance(item[key], value_type): return False return True
319e8b7c703c65fea20cb586d62bfb5140f346fb
38,831
import re def _extract_current_step(current_status_string): """ Attempts to extract the current step numeric identifier from the given status string. Returns the step number or None if none. """ # Older format: `Step 12 :` # Newer format: `Step 4/13 :` step_increment = re.search(r"Step ([0-9]+)/([0-9]+) :", current_status_string) if step_increment: return int(step_increment.group(1)) step_increment = re.search(r"Step ([0-9]+) :", current_status_string) if step_increment: return int(step_increment.group(1))
2d1ee544c1d719ddbef1c175233d8304296ea33c
38,834
def make_class_name(pv): """ Make a class name based on a given PV. """ return '{}'.format(pv.title())
90c3820eb68e0afb8ec90c49b94f94129646dc3d
38,836
def binary_search_base(nums: list, target: int) -> int: """ Time complexi O(logn) The basic binary search nums is a sorted list if multi targets in nums, return one target index else return -1 """ if not nums: return -1 left, right = 0, len(nums) - 1 while left < right: mid = left + (right - left) // 2 if nums[mid] == target: return mid elif nums[mid] < target: left = mid + 1 elif nums[mid] > target: right = mid - 1 return -1
1af211a911ef0e206fcf0b6e25a8fe0b9cedebf8
38,843
def build_array(text): """Returns an array of numbers contained in a text file""" with open(text, 'r') as f: nums = {int(line.strip()) for line in f} return nums
6ef1580ca0b77b3e505274feeef692009e400edf
38,845
def _pd_df_cols_match_metadata_cols(df, table_metadata): """ Is the set of columns in the metadata equal to the set of columns in the dataframe? This check is irrespective of column order, does not check for duplicates. """ pd_columns = set(df.columns) md_columns = set([c["name"] for c in table_metadata["columns"]]) return pd_columns == md_columns
5c793646dddad977d2ec61908932ac0fac096564
38,847
def process_config_str(config_str): """ Takes potentially multi-line RawConfigParser-returned strings, strips them, and splits them by line. :param config_str: String parsed in by RawConfigParser :return: List of strings broken up and trimmed. """ if config_str is not None: return [i.strip() for i in config_str.split("\n") if len(i.strip()) > 0]
192b29f7f9fe5b2f8401f80ef462491c750c6588
38,848
def daysinmonth(year, month): """ Return days in month based on the month and year Parameters ---------- year : str month : str Returns ------- integer of the days in the month """ if year%4 == 0: daysinmonth_dict = { 1:31, 2:29, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31} else: daysinmonth_dict = { 1:31, 2:28, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31} return daysinmonth_dict[month]
663e4449bb7273a5a08bc7b7953540f80ac3ad2c
38,850
def get_pokemon_names(cur): """Returns a list of pokemon names in the database (as strs) sorted in alphabetical order Args: cur: an open sqlite3 cursor created from a connection to the pokemon db """ new_list = [] query = ('SELECT name FROM pokemon') cur.execute(query) data = cur.fetchall() for element in data: new_list.append(element[0]) return sorted(new_list)
f55fcaaf86072a6f5ec87b40c7d75ac121e41868
38,854
def truncate_text_by_num_tokens(text, max_tokens, tok_separator=" "): """ Truncate a text to left a maximum number of tokens. :param text: :param max_tokens: :param tok_separator: :return: The truncated text. """ _toks = text.split(tok_separator) return tok_separator.join(_toks[:min(max_tokens, len(_toks))])
62cc97f101c37b6452cb5649706bdf6f5f47a68d
38,855
def transform(order_data_dict: dict): """ #### Transform task A simple Transform task which takes in the collection of order data and computes the total order value. """ total_order_value = 0 for value in order_data_dict.values(): total_order_value += value return {"total_order_value": total_order_value}
2f6b7b96a2ca5285cb668aad2333395ee3f9c474
38,862
def div(dividend, divisor): """ Takes two ints and returns a tuple (quotient, remainder) """ quotient = 0 while dividend - divisor >= 0: dividend -= divisor quotient += 1 return (quotient, dividend)
78ba5951df412e0599d834ebc2a71adf0be9370f
38,873
def make_rev_adict(adict): """ An adict maps text answers to neuron indices. A reverse adict maps neuron indices to text answers. """ rev_adict = {} for k,v in adict.items(): rev_adict[v] = k return rev_adict
827a288a6937d482d7d54a4509cd85ace5ee074e
38,874
def pluralize(number, singular, plural=None): """Helper function for getting the appropriate singular or plural variant of a word or phrase. pluralize(0, 'awoo') #=> "0 awoos" pluralize(1, 'awoo') #=> "1 awoo" pluralize(2, 'awoo') #=> "2 awoos" pluralize(1, 'box', 'boxen') #=> "1 box" pluralize(2, 'box', 'boxen') #=> "2 boxen" """ if plural is None: plural = singular + 's' if number == 1: ret = singular else: ret = plural return '{} {}'.format(number, ret)
da7bbe4864d38717811cc468bbda4cc076c23cf1
38,877
from typing import Iterable def field_lookup(obj, field_path): """ Lookup django model field in similar way of django query lookup. Args: obj (instance): Django Model instance field_path (str): '__' separated field path Example: >>> from django.db import model >>> from django.contrib.auth.models import User >>> class Article(models.Model): >>> title = models.CharField('title', max_length=200) >>> author = models.ForeignKey(User, null=True, >>> related_name='permission_test_articles_author') >>> editors = models.ManyToManyField(User, >>> related_name='permission_test_articles_editors') >>> user = User.objects.create_user('test_user', 'password') >>> article = Article.objects.create(title='test_article', ... author=user) >>> article.editors.add(user) >>> assert 'test_article' == field_lookup(article, 'title') >>> assert 'test_user' == field_lookup(article, 'user__username') >>> assert ['test_user'] == list(field_lookup(article, ... 'editors__username')) """ if hasattr(obj, 'iterator'): return (field_lookup(x, field_path) for x in obj.iterator()) elif isinstance(obj, Iterable): return (field_lookup(x, field_path) for x in iter(obj)) # split the path field_path = field_path.split('__', 1) if len(field_path) == 1: return getattr(obj, field_path[0], None) return field_lookup(field_lookup(obj, field_path[0]), field_path[1])
38bdf5efa75fc9f8273d1a40f719a700b86aa026
38,880
import time def time_spent_from(start_time: float) -> float: """Calculate time spent from start_time to now Example: >>> start_time = time.time() >>> ... >>> time_spent = time_spent_from(start_time) :param start_time: time in seconds since the epoch :return: time spent from start_time to now """ return time.time() - start_time
e96c9cc6c0ae33f6f577e49ee5c6fb95dae682e6
38,881
def get_combat_skills(lower: bool=False): """ Returns a list of the skills that contribute to combat level in no particular order. Args: lower: If the skills should be lowercase or titlecase. """ skills = [ "Attack", "Strength", "Defence", "Hitpoints", "Ranged", "Magic", "Prayer" ] return [s.lower() for s in skills] if lower else skills
e7da123590719f50a80bbd503901931b1a5f1dc7
38,883
def safe_characters(filename): """ Converts special characters e.g. from YouTube Video titles to 'safe' filenames (initially Windows 10) """ characters = {"|": "_", '"': "'", ":": " -", "@": "", "#": "", "?": ""} for old, new in characters.items(): filename = filename.replace(old, new) return filename
4a90e3d2eb7f5375a7c23aaff0f9a34f06208e2c
38,884
def parse_did(did): """Parses a Rucio DID and returns a tuple of (number:int, dtype:str, hash: str)""" scope, name = did.split(':') number = int(scope.split('_')[1]) dtype, hsh = name.split('-') return number, dtype, hsh
fc8ffd99743682895b4a7dc10cbbe911a0c6ca61
38,885
def compose_subject(raw_subject: str, tag: str = "", category: str = "") -> str: """Compose a subject containing a tag and a category. If any of tag or category is missing, don't print the corresponding part (without whitespace issues). :param raw_subject: The original subject :param tag: :param category: :returns: The subject. Form: "[{tag}] {category}: {raw_subject}" """ subject = "" if tag: subject += f"[{tag}] " if category: subject += f"{category}: " subject += raw_subject return subject
69fc7bd6bc5a4c6568f1b9a2642c7417d18f97b9
38,886
from datetime import datetime import click def validate_optional_timestamp(ctx, param, value): """Ensure that a valid value for a timestamp is used.""" if value: try: return datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ").replace( hour=0, minute=0, second=0 ) except ValueError: raise click.BadParameter( "{} must be a valid utc timestamp formatted as `%Y-%m-%dT%H:%M:%SZ` " "e.g. `2020-12-31T00:00:00Z`".format(param.name), param=param, ) return value
ace2b46b6b36d078b450f264164b73f88ebc2537
38,887
def _star_wrapper(arg): """ Internal helper function used to allow multiple arguments for functions called by `mp_starmap`. """ (func, args) = arg return func(*args)
e7f8dbd1cb50c35648b7cd9939d61024d7f0ff90
38,888
import operator def dfSubset(data, where): """ Return a subset of the data given a series of conditions .. versionadded:: 0.1.9 Parameters ---------- data: :py:class:`pandas.DataFrame`: DataFrame to view where: str or list or tuple Conditions to apply. Notes ----- If the argument is a string, it will be converted to a tuple for iteration. Items in iterable can be either a string or three-valued iterable of the following form:: string: 'column operand target' iterable: ('column', 'operand', 'target') If the first-level item is a string, it will be split at spaces. Operands are string-representations of operators from the operator module, e.g.:: 'eq', 'ge', 'le', 'ne', 'gt', 'lt', 'contains' Returns ------- view: :py:class:`pandas.DataFrame`: View into the data frame after successive slices See Also -------- :py:mod:`operator` """ view = data if isinstance(where, str): where = where, for item in where: if isinstance(item, str): cond = item.split() else: cond = item assert len(cond) == 3, ('Conditions should have three arguments, ' 'not like {}'.format(item)) evalFunc = getattr(operator, cond[1]) view = view[evalFunc(view[cond[0]], cond[2])] return view
01ae55694f89a314ef00796017cb76d393ace90f
38,895
def _match_suffix(suffix, ffilter): """Return file type (textual description) for a given suffix. Parameters ---------- suffix : str File extension to check (must include the leading dot). ffilter : dict ffilter : dict Known file types. The keys contain descriptions (names), whereas the values contain the corresponding file extension(s). Returns ------- ftype : str | None File type (None if unknown file type). """ for ftype, ext in ffilter.items(): if suffix in ext: return ftype
7fd59ba819ac0f151d5b8156b69282f97f8601a2
38,896
def unwrap(value): """ Unwrap a quoted string """ return value[1:-1]
4852eab462a8961deb1d29b0eb4359ab12c00378
38,901
def is_board_full(board: list) -> bool: """return true if board is full else false""" return all([i != " " for i in board])
920d1693f34162fd00636f48c4ef7cfcd17c9d8c
38,906
import re def parse_swift_recon(recon_out): """Parse swift-recon output into list of lists grouped by the content of the delimited blocks. Args: recon_out (str): CLI output from the `swift-recon` command. Returns: list: List of lists grouped by the content of the delimited blocks Example output from `swift-recon --md5` to be parsed: ============================================================================ --> Starting reconnaissance on 3 hosts (object) ============================================================================ [2018-07-19 15:36:40] Checking ring md5sums 3/3 hosts matched, 0 error[s] while checking hosts. ============================================================================ [2018-07-19 15:36:40] Checking swift.conf md5sum 3/3 hosts matched, 0 error[s] while checking hosts. ============================================================================ """ lines = recon_out.splitlines() delimiter_regex = re.compile(r'^={79}') collection = [] delimiter_positions = [ind for ind, x in enumerate(lines) if delimiter_regex.match(x)] for ind, delimiter_position in enumerate(delimiter_positions): if ind != len(delimiter_positions) - 1: # Are in the last position? start = delimiter_position + 1 end = delimiter_positions[ind + 1] collection.append(lines[start:end]) return collection
6fd92e6dffab521edd3dfe414ca1902d4f1859fd
38,908
def get_emissions_probability(label_matches, given_tag, given_word, tag_counts): """Calculates the emissions probability of associating a given tag and word by the following formula: emissions_probability = (count of word associated with tag) / (count of tag) """ lookup_tuple = (given_word, given_tag) word_tag_frequency = label_matches.get(lookup_tuple, 0) tag_frequency = tag_counts[given_tag] if tag_frequency == 0: emissions_probability = 0 else: emissions_probability = float(word_tag_frequency)/float(tag_frequency) return emissions_probability
cdf9ba0a1645c9bc38d7bcde11096326e001ffb7
38,912
def validate_password(password): """ ensure that any input password is at least length 6, has at least one upper case, at least one lower case, and at least one special character :param password: the password of the user :return: returns True if the password is valid, False otherwise """ special_chars = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" has_uppercase = any(char.isupper() for char in password) has_lower = any(char.islower() for char in password) has_special = any(special_char in special_chars for special_char in password) if len(password) >= 6 and has_lower and has_uppercase and has_special: return True return False
e8ba6946e617ac224b9733a2f7ff6dc7256bdb86
38,913
from typing import ByteString from unittest.mock import Mock def create_mock_process( returncode: int = 0, stdout: ByteString = b'success', stderr: ByteString = b'', ): """ Create a mock Popen process for later inspection. Defaults to a successfully ran process. :param returncode: what the process will return :param stdout: what the process will write to STDOUT :param stderr: what the process will write to STDERR :return: the mock """ mock = Mock() mock.poll = returncode mock.wait = lambda: None mock.kill = lambda: None mock.__enter__ = Mock(return_value=mock) mock.__exit__ = Mock() mock.returncode = returncode mock.communicate = Mock(return_value=(stdout, stderr)) return mock
b310ce45eca976d399ea91dbaa40c4d1a51c5cd3
38,916
def readOrder(filename): """Read an elimination order from a file Elimination orders are stored as unknown length vectors, format "[nvar] [v0] [v1] ... [vn]" Note: the same file format may also be useful for MPE configurations, etc. """ with open(filename,'r') as fp: lines = fp.readlines(); text = lines[-1].strip('\n').split(' '); nvar = int(text[0]); vals = [int(text[i]) for i in range(1,nvar+1)]; if len(vals) != nvar: raise ValueError("Problem with file?"); return vals
49af53bcabb134a2717c339f8f51bbdfbfba328c
38,921
def render_generic_exception(e): """ Return a formatted string for a generic exception. Parameters: e: the exception to render """ return 'Details: {0}'.format(e)
7e8f39145ba768d196c9a5ef094de706b7c57ad2
38,926
def PyObject_SetItem(space, w_obj, w_key, w_value): """Map the object key to the value v. Returns -1 on failure. This is the equivalent of the Python statement o[key] = v.""" space.setitem(w_obj, w_key, w_value) return 0
a77f23bc803090027fdf8249404f10200fe0992a
38,931
def Claret_LD_law(mu, c1, c2, c3, c4): """ Claret 4-parameter limb-darkening law. """ I = (1 - c1*(1 - mu**0.5) - c2*(1 - mu) - c3*(1 - mu**1.5) - c4*(1 - mu**2)) * mu return I
535136e560dba26da012b9d13d7edcc3b507ec54
38,933
def _ExtractKeyValuePairsFromLabelsMessage(labels): """Extracts labels as a list of (k, v) pairs from the labels API message.""" labels = [] if labels is None else labels return [(label.key, label.value) for label in labels]
3e26f60594bb1050cf5c701836754a31ce3defdd
38,935
def get_run_name_nr(_run_name, _run_nr): """ :param _run_name: [str], e.g. 'runA' :param _run_nr: [int], e.g. 1 :return: _run_name_nr: [str], e.g. 'runA-1' """ return f"{_run_name}-{_run_nr}"
e0428fd478509301f9623b83316bf058f69561f1
38,936
def merge_json(data1, data2): """merge lists in two json data together Args: data1 (json or None): first json data data2 (json): 2nd json data Returns: TYPE: merged data """ if not data1: return data2 else: for i in data2['list']: data1['list'][i] = data2['list'][i] return data1
613b931a6d83ef65be9fada1a99d34984892bdd0
38,937
def get_next_arguments(action, type="input"): """ Get a tuple of required/nonrequired inputs or outputs for each method Parameters ---------- action : Qiime2.action type : {"input", "param", "output"} Delineates if getting the action input, param, or output types Returns ------- List of tuples containing name and required semantic types List of tuples containing name and optional semantic types """ req = [] non_req = [] if type == "input": for k, v in action.signature.inputs.items(): if not v.has_default(): req.append([k, v.qiime_type]) else: non_req.append(["."+k, v.qiime_type]) elif type == "param": for k, v in action.signature.parameters.items(): if not v.has_default(): req.append([k, v.qiime_type]) else: non_req.append(["."+k, v.qiime_type]) else: for k, v in action.signature.outputs.items(): if not v.has_default(): req.append([k, v.qiime_type]) else: non_req.append(["."+k, v.qiime_type]) return req, non_req
db74dc93dfb3f52f3bdfeff785ae4a29ad31d2f2
38,940
import random def miller_rabin(n, k): """Run the Miller-Rabin test on n with at most k iterations Arguments: n (int): number whose primality is to be tested k (int): maximum number of iterations to run Returns: bool: If n is prime, then True is returned. Otherwise, False is returned, except with probability less than 4**-k. See <https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test> """ assert n > 3 # find r and d such that n-1 = 2^r × d d = n-1 r = 0 while d % 2 == 0: d //= 2 r += 1 assert n-1 == d * 2**r assert d % 2 == 1 for _ in range(k): # each iteration divides risk of false prime by 4 a = random.randint(2, n-2) # choose a random witness x = pow(a, d, n) if x == 1 or x == n-1: continue # go to next witness for _ in range(1, r): x = x*x % n if x == n-1: break # go to next witness else: return False return True
2b1ca6fd03c40bef2650102e690fbc270c01972c
38,944
def joiner(text): """ Simple function to join a list together into one string """ string = (' ').join(text) return string
6d50be365de6899801497b66a7fa79f9c3dc764e
38,947
def imatch(a: str, b: str) -> bool: """ return True if the given strings are identical (without regard to case) """ return a.lower() == b.lower()
449f3337e04173d8e62b755ecbc54686809431b0
38,948
def get_books_by_years(start_year, end_year, books_list): """ Get a dictionary of books and their published based on a range of year Parameters: start_year: The lower bound of the search range. end_year: The upper bound of the search range. books_list: The list of books to search in Returns: books_with_years: A dictionary of books with their publised year. """ print("You search for books published from " + str(start_year) + " to " + str(end_year)) books_with_years = {} for book in books_list: if book["published_year"] >= int(start_year) and book["published_year"] <= int(end_year): books_with_years[book["title"]] = book["published_year"] return books_with_years
72a5fbf0c82eb8353176da0263aab58289e4c782
38,953
def _clean_ylabel(feature, max_length=20): """ Replaces underscores with spaces and splits `feature` based on line length Parameters ---------- feature : str String to be cleaned max_length : str Maximum length (in characters) of each line. If `feature` is longer than this length it will be split with a newline character. Default: 20 Returns ------- feature : str Cleaned input `feature` """ feature = feature.replace('_', ' ') if len(feature) > max_length: ylabel = feature.split(' ') idx = len(ylabel) // 2 feature = '\n'.join([' '.join(ylabel[:idx]), ' '.join(ylabel[idx:])]) return feature
31c2e57cc57c48f5940d2b57f0fce7aef0600e32
38,960
from typing import Any import re def regex_method(term: str, key: str, value: Any) -> str: """ Map file search method 'regex' will return 'value' if regex pattern 'term' matches 'key' >>> regex_method(r"Hello (?:World|Werld|Squidward)", "Hello World", "squaids") 'squaids' >>> regex_method("xxx", "Hello World", "squaids") '' """ return value if re.search(term, key) else ""
3de6752d6f205c56c4086b13a17f60346069e92b
38,964
def get_settings(args): """Determine the settings from the commandline arguments.""" settings = { "analysis_directory": args.input, "report_directory": args.output, "tokens": args.tokens, "language": args.language, } return settings
55ac9010e289f5e7fb870d74b531578ddac4c36a
38,966
def GenerateSingleQueryParameter(name, param_type, param_value): """Generates a single valued named parameter. Args: name: name of the parameter. param_type: Type of the parameter. E.g. STRING, INT64, TIMESTAMP, etc. param_value: Value of this parameter. """ return { 'name': name, 'parameterType': { 'type': param_type }, 'parameterValue': { 'value': param_value } }
d7bd31ec80a73f627fcd3de08b77bf1a397aa69f
38,968
def decode_utf8(text: bytes) -> str: """Decode `text` as UTF-8 string Arguments: bytes {text} -- ascii-encoded bytes Returns: str -- decoded text """ return text.decode('utf-8')
d9624f6a65d81b45192ff140bae245fb334ef2eb
38,969
def DivideIfPossibleOrZero(numerator, denominator): """Returns the quotient, or zero if the denominator is zero.""" if not denominator: return 0.0 else: return numerator / denominator
a276130c4ec0319ae3c53757e620243610a9c564
38,973
import re def _parse_proplist(data): """ Parse properties list. """ out = {} for line in data.split("\n"): line = re.split(r"\s+", line, 1) if len(line) == 2: out[line[0]] = line[1] return out
971b26aefab8d67cc7ee6ad66306e9f1e8de82d2
38,974