content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def WithChanges(resource, changes): """Apply ConfigChangers to resource. It's undefined whether the input resource is modified. Args: resource: KubernetesObject, probably a Service. changes: List of ConfigChangers. Returns: Changed resource. """ for config_change in changes: resource = config_change.Adjust(resource) return resource
4e0db8a71b24146e0ef5d18c65e71ac12e500fef
670,472
import socket def is_port_open(port): """ Checks if the port is open on localhost by creating a socket connection to it. :param port: The port as a number :return: True if open, false otherwise """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: return sock.connect_ex(('127.0.0.1', port)) == 0
51ab6b8a5259f2628a2b9d163acda2c266130566
615,318
from typing import Any import requests def from_url(url: str) -> Any: """Gets json response from url.""" resp = requests.get(url=url) resp_json = resp.json() return resp_json
ea72f01ea304c96da2ab03361ab194eadf1fcdbe
434,956
def validation_failed(schema, doc): """Return True if the parsed doc fails against the schema This will ignore validation failures of the type: IDREF attribute linkend references an unknown ID. This is because we are validating individual files that are being imported, and sometimes the reference isn't present in the current file.""" return not schema.validate(doc) and \ any(log.type_name != "DTD_UNKNOWN_ID" for log in schema.error_log)
dcfa36da3370c1e20b9ce1e11588868eb9bcdb55
529,031
def filter_stream(streams, excludes): """ Uses a list of keywords to remove sensors or streams from the list returned by OOI Net. :param streams: list of sensor or streams returned from OOI Net :param excludes: list of keywords to use in pruning the list :return: a cleaned, pruned list """ clean = [] for stream in streams: if not any(sub in stream for sub in excludes): clean.append(stream) return clean
d7c7278714cb80541a2aa29ab1c111256ef6b618
39,987
def extract_demonym(demonyms, gender): """ Search through the list of demonyms and find the right one by gender :param demonyms: :param gender: may be male (u'Q499327') od female (u'Q1775415') :return: demonym in Serbian language """ description = u'' for demonym in demonyms: local_demonym = demonym.getTarget() if local_demonym.language == u'sr': demonym_qualifiers = demonym.qualifiers if 'P518' in demonym_qualifiers: demonym_gender = demonym_qualifiers['P518'] if len(demonym_gender) > 1: exit() demonym_gender = demonym_gender[0].target.id if demonym_gender == gender: description += local_demonym.text break return description
1e35a61db65778f98cdbd814d0e9f223a232d675
76,732
def best_scale_factor(x, y): """Maximum distance from zero for one variable relative to another >>> best_scale_factor([.1, .2, .3], [.2, .4, .6]) # doctest: +ELLIPSIS 2.0 >>> best_scale_factor([-.1, -.2, -.3], [-1, -2, -3]) # doctest: +ELLIPSIS 10.0 For speed and simplicity, the scale factor is intended for data near or crossing zero. It is not the maximum relative ranges of the data. """ if any(x) and any(y): return float(max(abs(min(y)), abs(max(y)))) / max(abs(min(x)), abs(max(x))) return 1.
70d526054627d720affa6c8dad91dcc95f3a5b90
199,257
def _pretty_class(s): """ convert the internal class name representation into what users expect to see. Currently that just means swapping '/' for '.' """ # well that's easy. return str(s).replace("/", ".")
4fc3292bce1ce2cf72688c6b580eed174c4ad5c4
587,385
def packed_letter_to_number(letter): """Unpack a letter to the corresponding number according to MPC. See: https://www.minorplanetcenter.net/iau/info/DesDoc.html Args: letter (str): Single character to decode. Returns: int: Corresponding number. """ try: int(letter) return letter.rjust(2, '0') except ValueError: ord_letter = ord(letter) if ord_letter >= 97 and ord_letter <= 122: return str(ord_letter - 61) elif ord_letter >= 65 and ord_letter <= 96: return str(ord_letter - 55) else: raise ValueError(f'Letter "{letter}" is invalid')
ab83f539363316c09c4b38be93a78f34605774ee
285,200
import platform def is_linux() -> bool: """ :return: True if running on Linux. False otherwise. """ return platform.system().lower() == 'linux'
2126de6751fdaf27eab606eef54230690270e4bf
660,443
def normalize( df, baseline = 0.1 ): """ Normalize all spectrum. :param df: The Pandas DataFrame to normalize. :param baseline: Baseline correction threshold or False for no correction. [Default: 0.1] :returns: The normalized DataFrame. """ df = df.copy() df /= df.max() if baseline is not False: base = df[ df.abs() < baseline ] df -= base.mean() return df
65a7a0238736c2c5a5e21b4bfc0205aca9f9f778
641,145
def choose_features(features, hrs=True, numlocs=True, location=True, latlon=True): """ Returns a list of feature names chosen from the dictionary "features" Parameters: features: a dictionary with keys equal to the arguments of the function, values list of df variables hrs: If True, choose feature group that contains hours spent in Tuscany, hours spent outside Tuscany numlocs: If True, choose feature group that contains number of locations and number of unique locations visited in Tuscany and Italy location: If True, choose feature group that contains time spent (in mumtiples of ) at the locations with respective features, including landscape, cities visited, and total number of attactions visited. latlon: If True, choose feature group that contains latitude and longitude of average location, most visited location, start and end location, and standard deviation of all lat/lon """ final_features=[] options = [hrs, numlocs, location, latlon] for f in zip(options,features.keys()): #print (f) if f[0]==True: final_features.extend(features[f[1]]) return final_features
5403fcd5728121a187113abf2a7f6ab2a59ece75
372,503
def dtype(x): """Returns the dtype of a tensor as a string. """ return x.dtype.name
5be6755b88e6f9c1985ab5c57afb47921be84c9a
309,979
def get_mode_from_operator_identifier(identifier): """Get the Fmm mode from the operator identifier.""" descriptor = identifier.split("_")[0] if descriptor == "laplace": return "laplace" elif descriptor == "helmholtz": return "helmholtz" elif descriptor == "modified": return "modified_helmholtz" elif descriptor == "maxwell": return "helmholtz" else: raise ValueError("Unknown identifier string.")
8fa7d55ccde72d87ab548cb52b562a3697277d5e
494,104
def remove_keys(road): """ :param road: A road segment as a dict :return: The same road segment without "felt" and "kvalitet". """ road.pop("felt", None) road["geometri"].pop("kvalitet", None) return road
633f5934f2e41bbf9a0a83b55ada2400ec9b3ddf
607,759
import math def dwpf(tmpf, relh): """ Compute the dewpoint in F given a temperature and relative humidity """ if tmpf is None or relh is None: return None tmpk = 273.15 + (5.00 / 9.00 * (tmpf - 32.00)) dwpk = tmpk / (1 + 0.000425 * tmpk * -(math.log10(relh / 100.0))) return int(float((dwpk - 273.15) * 9.00 / 5.00 + 32))
49031b6fb76f0311ec2554aae01671b2b935eaf7
63,198
from typing import Union import pathlib def name_modules( folder: Union[str, pathlib.Path], recursive: bool = False) -> list[str]: """Returns list of python module names in 'folder'. Args: folder (Union[str, pathlib.Path]): path of folder to examine. recursive (bool): whether to include subfolders. Defaults to False. Returns list[str]: a list of python module names in 'folder'. """ kwargs = {'folder': folder, 'suffix': '.py', 'recursive': recursive} paths = [p.stem for p in get_paths(**kwargs)] # type: ignore return [str(p) for p in paths]
b5f23927deb52c557aac571e977cf2fdb35408f8
501,913
def intersection(x1, x2): """Histogram Intersection Parameters ---------- x1: numpy.ndarray Vector one x2: numpy.ndarray Vector two Returns ------- distance: float Histogram intersection between `x1` and `x2` """ assert(len(x1) == len(x2)) minsum = 0 for i in range(len(x1)): minsum += min(x1[i], x2[i]) return float(minsum) / min(sum(x1), sum(x2))
055530363d62b0993eee8a9c72c7f759982a8376
39,389
def _slugify(value: str) -> str: """ Converts the value to a slugified version reducing the str down to just alpha-numeric values and removing white-space :param value: value as a str :return: slugified version of the value """ return ''.join(s for s in value if s.isalnum()).lower()
dd0e28381f1033c4fea9bb4b368a715f2ba132fe
686,664
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict: """ This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to update specific key-value pairs. :param base_dict: Complete dictionary of key-value pairs. :param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent. :return: A merged single dictionary. """ for k, v in base_dict.items(): if k in in_dict.keys(): base_dict[k] = in_dict[k] return base_dict
34abb21b38f64aff27c62b1029cf19e99f1ac99f
345,916
from typing import Dict def create_authorization_headers(authorization: str) -> Dict[str, str]: """Creates headers containing authorization""" return {"Authorization": authorization}
6df2ac11c778944c36d9d6145f7c466425526e0d
204,839
def _FindAdobeIllustrator(f): """Does a file contain "Adobe_Illustrator"? Args: f: an open File Returns: bool: True if reading forward in f, we find "Adobe_Illustrator" """ while True: s = f.readline() if not s or s.startswith(b"%%EndProlog"): break if s.find(b"Adobe_Illustrator") >= 0: return True return False
899edf5500c350485089d7a63e6ac867e5d1ad96
466,477
def read64str(f): """Read 8 bytes from a file and return as an ASCII string. """ return f.read(8).decode("latin1")
532dd21f33d5935b3aa9fc329747f3edd468a6aa
618,860
def decode_resouce_type(id: str) -> int: """Decode a number between 0 and 0xFFFF from a 4-character hex string""" return int(id, 16)
64a1d319a95dd497721635c6cbb4c806ac172245
576,722
def count_binary_ones(number): """ Returns the number of 1s in the binary representation of number (>=0) For e.g 5 returns 2 (101 has 2 1's), 21 returns 3 ("10101") """ # Use only builtins to get this done. No control statements plz return bin(number)[2:].count("1")
c4d153d08e0d1d114fbd95a6ca0bfe33af5cbd9c
335,446
def check_ssid(ssid: str) -> str: """ Check if SSID is valid """ if len(ssid) > 32: raise ValueError("%s length is greater than 32" % ssid) return ssid
aea354e4299ed2524f1caa3fcd72b843d87d52b9
375,233
def on_condition_that(condition, fuzzer): """ A composite fuzzer that applies the supplied fuzzer if the specified condition holds. :param condition: Can either be a boolean value or a 0-ary function that returns a boolean value. :param fuzzer: the fuzz operator to apply if the condition holds. :returns: a fuzz operator that applies the underlying fuzz operator if the specified condition is satisfied. """ def _on_condition_that(steps, context): if hasattr(condition, '__call__'): if condition(): return fuzzer(steps, context) else: return steps elif condition: return fuzzer(steps, context) else: return steps return _on_condition_that
b5b8d6ba5e13619b06f8a92ed882f1cb78d7a4a6
211,595
def mix(x, y, a): """ re-implementation of opengl mix https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/mix.xhtml The return value is computed as x×(1−a)+y×a """ return x * (1.0 - a) + y * a
d95f7026b47273e91891dfdc5f39905e99533e50
87,998
def slice_vis(step, uvw, v=None): """ Slice visibilities into a number of chunks. :param step: Maximum chunk size :param uvw: uvw coordinates :param src: visibility source :param v: Visibility values (optional) :returns: List of visibility chunk (pairs) """ nv = len(uvw) ii = range(0, nv, step) if v is None: return [ uvw[i:i+step] for i in ii ] else: return [ (uvw[i:i+step], v[i:i+step]) for i in ii ]
6c12607e4966575f6f3d3ae3a0004e6d9b6d2648
485,249
def format_address(msisdn): """ Format a normalized MSISDN as a URI that ParlayX will accept. """ if not msisdn.startswith('+'): raise ValueError('Only international format addresses are supported') return 'tel:' + msisdn[1:]
f5a5cc9f8bcf77f1185003cfd523d7d6f1212bd8
5,320
import random def some_simple_data(length=1000000): """Generate random array of integers""" data = list(range(length)) random.shuffle(data) return data
c12a22a2e226e80176c428816d9162d0d2cead1b
581,958
def safe_hasattr(obj, attr, _marker=object()): """Does 'obj' have an attribute 'attr'? Use this rather than built-in hasattr, as the built-in swallows exceptions in some versions of Python and behaves unpredictably with respect to properties. """ return getattr(obj, attr, _marker) is not _marker
2ed0ed97c072b5476e1ae679863d2bd50e22f845
223,467
def base_convert_to_ten(base: int, digits: list[int]) -> int: """ Examples: >>> digits = [0, 1, 1] >>> base_convert_to_ten(2, digits) 6 """ assert abs(base) >= 2 p = 1 n = 0 for d in digits: n += d * p p *= base return n
007f887a2dc5e8cc19dc9c9b9d1d248456d5eda3
79,771
def as_list(tup_list): """ Turns a tuple-list into a list of the first element of the tuple @param tup_list is the tuple-list you are converting @returns the created list """ res = [] for elem in tup_list: res.append(elem[0]) return res
770f5f4e302e1796f6945507c7f314cf157cc77a
101,236
def print_sr(search_results, readable_list): """ Prints results of movie lookup (movie/tv show title and release year) Param: search_results (list) like sr and readable_list (str) like org_list Example: print_sr(sr, org_list) Returns: None """ for title in search_results: readable_list.append(title["Title"] + ", " + title["Year"].replace("–","-")) print(title["Title"] + " (" + title["Year"] +")") # prints a list of the search results' title and release year return None
aaa92cff09d9909e2c13b55f6eb6525b6c6034bd
225,137
def convert_cases(case): """ Converts cases as found on Wikidata to more succinct versions. """ case = case.split(" case")[0] if case in ["accusative", "Q146078"]: return "Acc" elif case in ["dative", "Q145599"]: return "Dat" elif case in ["genitive", "Q146233"]: return "Gen" else: return ""
a479d4624f5b1402db8367679b45a013c523412a
420,945
def has_converged(mu, oldmu): """ A boolean indicating whether or not a set of centroids has converged Parameters: mu - the latest array of centroids oldmu - the array of centroids from the previous iteration Returns: A boolean indicating whether or not the old and new centroids are the same, representing whether or not the clustering has converged """ return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
35234531a15baaf4f1df2f196e6abcec40fade59
41,177
import random def random_bits(bit_ranges): """Generate random binary mask with ones in the specified bit ranges. Each bit_ranges is a list of tuples of lower and upper limits of bit positions will be fuzzed. The limits are included. Random amount of bits in range limits will be set to ones. The mask is returned in decimal integer format. """ bit_numbers = [] # Select random amount of random positions in bit_ranges for rng in bit_ranges: bit_numbers += random.sample(range(rng[0], rng[1] + 1), random.randint(0, rng[1] - rng[0] + 1)) val = 0 # Set bits on selected positions to ones for bit in bit_numbers: val |= 1 << bit return val
6347f98cbf141c8059f920a1e03e007004fb1090
414,602
def day_of_month(date_time_col): """Returns the day of month from a datetime column.""" return date_time_col.dt.day
c69e12d5a7bbababd9816df0ca13e09fd12eca48
125,405
def p1_for(input_list): """Compute some of numbers in list with for loop.""" out = 0 for i in input_list: out += i return out
7691bc14e10eab3a7fbda46091dcb62773704f1a
675,882
from typing import Counter def check_outliers(df,list): """ Check for upper and lower limit Parameters ---------- df : dataframe dataframe to be inspected list : list list of columns less id Returns ------- outlier_index_unique : list index of rows with outlier values """ outlier_index=[] for feature in list: lower_limit=df[feature].quantile(0.25) upper_limit=df[feature].quantile(0.75) iqr=upper_limit-lower_limit iqr_step=iqr*1.5 feature_outlier_index=df[(df[feature]<lower_limit-iqr_step)|(df[feature]>upper_limit+iqr_step)].index outlier_index.extend(feature_outlier_index) outlier_index_counter=Counter(outlier_index) outlier_index_unique=[] for key,value in outlier_index_counter.items(): outlier_index_unique.append(key) return outlier_index_unique
d12f08f14254c135f95c74185564b2d637ecd5e5
101,990
def add_a_b(a,b): """return sum of a and b""" return a+b
05bb65169489a4f401628750e0b79bda0bed0f03
370,163
import json def is_nonexistent_file_error(e): """ Differentiates between an abnormal connection issue or the file not existing Args: e: the HTTP exception to analyze Returns: bool whether the error is a nonexistent file http error """ no_file_msg = 'The File does not exist with the provided parameters.' return e.response is not None and (json.loads(e.response.content).get('detail') == no_file_msg)
6c52bd8b79abebec73252df4369d5f3e5cf77fe5
342,367
def interp(val, array_value, array_ref): """ Interpolate the array_value from the array_ref with val. The array_ref must be in an increasing order! """ if val <= array_ref[0]: return array_value[0] elif val > array_ref[len(array_ref)-1]: return array_value[len(array_ref)-1] else: i = 1 while val > array_ref[i]: i += 1 delta = array_ref[i] - array_ref[i-1] return ((array_ref[i]-val)*array_value[i-1]+array_value[i]*(val-array_ref[i-1]))/delta
a47e49eea2bc8233103c4425103f2a4bab42c6ce
125,293
def get_vertex_indices( structure, centre_species, vertex_species, cutoff=4.5, n_vertices=6 ): """ Find the atom indices for atoms defining the vertices of coordination polyhedra, from a pymatgen Structure object. Given the elemental species of a set of central atoms, A, and of the polyhedral vertices, B, this function finds: for each A, then N closest neighbours B (within some cutoff). The number of neighbours found per central atom can be a single value for all A, or can be provided as a list of values for each A. Args: structure (`pymatgen.Structure`): A pymatgen Structure object, used to find the coordination polyhedra vertices.. centre_species (str): Species string identifying the atoms at the centres of each coordination environment, e.g. "Na". vertex_species (str or list(str)): Species string identifying the atoms at the vertices of each coordination environment, e.g. "S"., or a list of strings, e.g. ``["S", "I"]``. cutoff (float): Distance cutoff for neighbour search. n_vertices (int or list(int)): Number(s) of nearest neighbours to return for each set of vertices. If a list is passed, this should be the same length as the number of atoms of centre species A. Returns: list(list(int)): Nested list of integers, giving the atom indices for each coordination environment. """ central_sites = [ s for s in structure if s.species_string == centre_species ] if isinstance(n_vertices, int): n_vertices = [n_vertices] * len(central_sites) if isinstance(vertex_species, str): vertex_species = [ vertex_species ] vertex_indices = [] for site, n_vert in zip(central_sites, n_vertices): neighbours = [ s for s in structure.get_neighbors(site, r=cutoff, include_index=True) if s[0].species_string in vertex_species ] neighbours.sort(key=lambda x: x[1]) atom_indices = [ n[2] for n in neighbours[:n_vert] ] vertex_indices.append( atom_indices ) return vertex_indices
b4edeaaf1329b9b2f1f130cd4ffa91ddcc021fe2
298,147
import requests def get_lihkg_response(resp): """ get_lihkg_response(resp) Obtain the data of the response object. Return: ------- A dictionary. """ response = dict() if isinstance(resp, requests.models.Response): if resp.status_code == 200: response = resp.json() if response.get('success', 0) == 1: response = response.get('response', dict()) return response else: raise TypeError('resp must be a \'requests.models.Response\' object.')
46f3677fd42b5eaf7779cdb3daa70f3f616ebb1b
39,715
def add_prefix_un(word): """Take the given word and add the 'un' prefix. :param word: str - containing the root word. :return: str - of root word prepended with 'un'. """ return 'un' + word
5ce5b6899d1ebd83572ba2daaf1467ed3b239055
583,353
def getChromSizes(infile): """ read a chrom sizes file and return a dict keyed by names valued by ints. """ chromDict = {} with open(infile, 'r') as f: for line in f: line = line.strip() if line == '': continue data = line.split() chromDict[data[0]] = int(data[1]) return chromDict
a96042f7b3140a558bfae2c54538fac2d0628ff2
353,694
def parse_residue_spec(resspec): """ Light version of: vermouth.processors.annotate_mud_mod.parse_residue_spec Parse a residue specification: <mol_name>#<mol_idx>-<resname>#<resid> Returns a dictionary with keys 'mol_name', 'mol_idx', and 'resname' for the fields that are specified. Resid will be an int. Parameters ---------- resspec: str Returns ------- dict """ molname = None resname = None mol_idx = None resid = None mol_name_idx, *res = resspec.split('-', 1) molname, *mol_idx = mol_name_idx.split('#', 1) if res: resname, *resid = res[0].split('#', 1) error_msg = ('Your selection {} is invalid. Was not able to assign {}' 'with value {}') out = {} if mol_idx: try: mol_idx = int(mol_idx[0]) except ValueError: raise IOError(error_msg.format(resspec, 'mol_idx', mol_idx)) out['mol_idx'] = mol_idx if resname: out['resname'] = resname if molname: out['molname'] = molname if resid: try: out['resid'] = float(resid[0]) except ValueError: raise IOError(error_msg.format(resspec, 'mol_idx', mol_idx)) return out
eab66570595e28cfea2ed92834c3eaffd14ba474
228,649
def conceptnet_create_search_query(input_dict): """ Constructs a Conceptnet search query. """ input_limit = input_dict['limit'] input_offset = input_dict['offset'] input_text = input_dict['text'] input_minweight = input_dict['minweight'] query = "search?" if input_limit != None and input_limit != '': query = query + 'limit=' + str(input_limit) + '&' if input_offset != None and input_offset != '': query = query + 'offset=' + str(input_offset) + '&' if input_text != None and input_text != '': query = query + 'text=' + str(input_text) + '&' if input_minweight != None and input_minweight != '': query = query + 'minWeight=' + str(input_minweight) + '&' output_dict = {} output_dict["query"] = query[0:len(query)-1] return output_dict
d725657c30734607174acc2a632a66dd28593489
457,567
def capitalise(input_string: str) -> str: """Convert the first character of the string to uppercase.""" return input_string[0].upper() + input_string[1:]
eb5dd6a2208ed626bac496b456c7b1f45770a1d5
614,177
import hashlib def md5(file_path): """Get md5 hash of a file. Args: file_path (str): File path Returns: md5_hash (str): md5 hash of data in file_path """ hash_md5 = hashlib.md5() with open(file_path, 'rb') as fhandle: for chunk in iter(lambda: fhandle.read(4096), b''): hash_md5.update(chunk) return hash_md5.hexdigest()
c63d974cddff78a825ba9cde8cac0fac8567b64e
597,874
import math def bin_of_week(dt, bin_size_mins=30): """ Compute bin of week based on bin size for a pandas Timestamp object or a Python datetime object. Based on .weekday() convention of 0=Monday. Parameters ---------- dt : pandas Timestamp object or a Python datetime object, default now. bin_size_mins : Size of bin in minutes; default 30 minutes. Returns ------- 0 to (n-1) where n is number of bins per week. Examples -------- dt = datetime(2020, 2, 4, 1, 45) bin = bin_of_week(dt, 30) # bin = 51 """ # if dt is None: # dt = datetime.now() # Number of minutes from beginning of week (Monday is 0) minutes = (dt.weekday() * 1440) + (dt.hour * 60) + dt.minute # Convert minutes to bin time_bin = math.trunc(minutes / bin_size_mins) return time_bin
f49cd29e43e32bd3d331cdcca593ed00c6737a90
64,302
def pvap_water(TC): """Return vapor pressure of water (Pa) for temperature in C""" # Antoine equation a, b, c = 8.07131, 1730.63, 233.426 p_mmHg = 10**(a - (b/(c+TC))) p_Pa = p_mmHg * 133.3 return p_Pa
7881cf6aba9e4f8abd927eaf20535c44f749eddb
334,652
def _parse_query_arguments(query_arguments): """ This function converts a string in the format 'key1:value1-key2:value2' to a dict in the format {'key1': 'value1', 'key2': 'value2',...}. Args: query_arguments: A string cuntianing all the arguments to query by e.g. 'tags:cat,plastic-tag_mode:any'. Returns: A dict of all the args e.g. {'tags': 'cat,plastic', 'tag_mode': 'any', 'text':''}. """ init_query_arguments = {'tags': 'all', 'tag_mode': 'any', 'text': ''} if query_arguments is not None: different_arg = query_arguments.split('-') for arg in different_arg: key = arg.split(':')[0] value = arg.split(':')[1] if key in init_query_arguments: if key == 'tag_mode' and value != 'any' and value != 'all': raise ValueError('Invalid tag_mode, tag_mode can be "any" or "all"') init_query_arguments[key] = value else: raise ValueError('Invalid query_arguments key can be tags, tag_mode or text.') return init_query_arguments
81bdff575a45f46c11cbcf6e8ef7871a6ff59590
367,635
def IsBuildingModule(env, module_name): """Returns true if the module will be built with current build process. Args: env: The environment. module_name: module name. Returns: Whether the given module will be built. """ return module_name in env['BUILD_SCONSCRIPTS']
ed71032319c708d6640952fee0b1914a420932c7
391,624
import math import random def generate_entities(num_entities=100): """generate num_entities random entities for synthetic knowledge graph.""" i = 0 entity_list = [] hex_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] l = int(math.log(num_entities, 18)+1) # print l while i < num_entities: entity = "/entity_{}".format(''.join(random.sample(hex_chars, l))) if entity not in entity_list: entity_list.append(entity) i += 1 return entity_list
6ccdf228dbfe6b04b002bdc648ed73d1c759f426
89,727
def _json_container_dates(containers): """ Moves container "status date" to a JSON friendly value. Unit tested: test__json_container_dates :param containers: The containers found from docker ps. :type containers: list :returns: The containers found from docker ps, with JSON friendly dates. :rtype: list """ clean_containers = [] for container in containers: tmp_container = container tmp_container["status_date"] = str(tmp_container["status_date"]) tmp_container["created_date"] = str(tmp_container["created_date"]) clean_containers.append(tmp_container) return clean_containers
25c05068ae56e8f58c98d17308e81b8b1cde3b36
579,141
def create_distance_callback(data): """Creates callback to return distance between points.""" distances = data["distances"] def distance_callback(from_node, to_node): """Returns the manhattan distance between the two nodes""" return distances[from_node][to_node] return distance_callback
20c8a1e7239ad05152c3ad66c975ad47e2a5f2f0
400,430
def _process_disk_details(sub, disk): """Process disk record and yield them. Arguments: sub (Subscription): Azure subscription object. disk (dict): Raw disk record. Yields: dict: An Azure record of type ``disk``. """ disk_type = 'unattached' if 'managed_by' in disk: if disk.get('managed_by'): disk_type = 'attached' record = { 'raw': disk, 'ext': { 'cloud_type': 'azure', 'record_type': 'disk', 'disk_type': disk_type, 'subscription_id': sub.get('subscription_id'), 'subscription_name': sub.get('display_name'), 'subscription_state': sub.get('state'), }, 'com': { 'cloud_type': 'azure', 'record_type': 'disk', 'reference': disk.get('id') } } return record
2a1849257bda7bab85c538f7989caa2d87906f2b
359,066
def replicate_config(cfg, num_times=1): """Replicate a config dictionary some number of times Args: cfg (dict): Base config dictionary num_times (int): Number of repeats Returns: (list): List of duplicated config dictionaries, with an added 'replicate' field. """ # Repeat configuration settings across replicates configs = [] for replicate in range(num_times): _config = cfg.copy() _config.update({"replicate": replicate}) configs.append(_config) return configs
8b4d43002c22d20c6d2238c2309086a21d560c5c
20,425
def square(number): """Returns the number of grains on given square on chess board.""" if number not in range(1, 64+1): raise ValueError(f"{number} is not a square in a chess board.") return 2**(number - 1)
3efa293ec67b0b50731d295943e479330b7d1fc8
355,075
import struct def ext_cap_hdr(buf, offset): """Read an extended cap header.""" # struct pcie_ext_cap_hdr cap_id, cap_next = struct.unpack_from('HH', buf, offset) cap_next >>= 4 return cap_id, cap_next
199c2d268000e7163931273e636d21e2c8b30f5b
259,425
from typing import Iterable from typing import Tuple from typing import Any from typing import List def unzip(tuples: Iterable[Tuple[Any, ...]]) -> List[Iterable[Any]]: """The inverse of the `zip` built-in function.""" return [list(x) for x in zip(*tuples)]
10dd4755c501f64f6b98dea8abd7677d6fa23535
21,964
import yaml import collections def ParseMappingAsOrderedDict(enable=True, loader=yaml.Loader, dumper=yaml.Dumper): """Treat OrderedDict as the default mapping instance. While we load a yaml file to a object, modify the object, and dump to a yaml file, we hope to keep the order of the mapping instance. Therefore, we should parse the mapping to the Python OrderedDict object, and dump the OrderedDict instance to yaml just like a dict object. Args: enable: if enable is True, load and dump yaml as OrderedDict. """ def DictRepresenter(dumper, data): return dumper.represent_dict(data.items()) def OrderedDictRepresenter(dumper, data): return dumper.represent_object(data) def OrderedDictConstructor(loader, node): return collections.OrderedDict(loader.construct_pairs(node)) def DictConstructor(loader, node): return dict(loader.construct_pairs(node)) if enable: # Represent OrderedDict object like a dict. # Construct the yaml mapping string to OrderedDict. yaml.add_representer(collections.OrderedDict, DictRepresenter, Dumper=dumper) yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, OrderedDictConstructor, Loader=loader) else: # Set back to normal. yaml.add_representer(collections.OrderedDict, OrderedDictRepresenter, Dumper=dumper) yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, DictConstructor, Loader=loader)
6145d9737e9a11b2cc65c24e271917dd0f9a46a0
618,797
def is_fplus_include_line(code_line): """ Tests whether or not a C++ code line is a include statement that concerns a fplus header, (i.e this will *exclude* lines like "#include <vector>") """ return ( code_line.startswith("#include <fplus/") or code_line.startswith("#include \""))
dd325b7a8f4120aad03e0ce4a70041f34604d7cc
442,460
import math def get_path_length(path): """ Compute path length """ length = 0 for i,k in zip(path[0::], path[1::]): length += math.dist(i,k) return length
e7a2b2e7e766dd5c423e908b8ae48d305bfe4ef3
227,857
def _aisc_table(metric, version): """ Returns the name of the AISC table matching the criteria. Parameters ---------- metric : bool If True, searches for the name in the metric shape database. Otherwise, searches for the name in the imperial shape database. version : {'15.0'} The version of the shape database to query. If None, the latest version will be used. """ if version is None: # Use the latest version version = '15.0' # Return the name of the version table if version == '15.0': if metric: return 'aisc_metric_15_0' else: return 'aisc_imperial_15_0' else: raise ValueError('Version {!r} not found.'.format(version))
0914154232b9452f90053ef189ddea01ba75023d
168,707
import re import logging def _find_includes_recursive(fname, files=None): """Recursive helper function for find_includes() Recursively traverse all files included by ``fname``, results are accumulated in ``files`` and then returned. """ if files is None: files = set() if fname in files: return files files.add(fname) l = [] try: with open(fname, 'r') as f: for line in f: m = re.match(r'#\s*include\s*[<"](.*?)[>"]', line) if not m: continue inc = m.group(1).replace('/', '\\') if not inc.startswith('boost'): continue l.append(inc) for inc in l: _find_includes_recursive(inc, files) except FileNotFoundError as e: logging.warn("File not found: '%s'", fname) return files
f5e77d87320a1e1c8c62696a69275500b40e822c
389,044
def str_to_bytes(s): """ Converts a given string into an integer representing bytes where G is gigabytes, M is megabytes, K is kilobytes, and B is bytes. """ if type(s) is int: return s units = {'B': 1, 'K': 1024, 'M': 1024 ** 2, 'G': 1024 ** 3} if len(s) < 2: raise ValueError('invalid size') order = s[-1] try: return units[order] * int(s[:-1]) except ValueError: raise ValueError('invalid size') except KeyError: raise ValueError('invalid units')
c36d5491698cfc3d2cc32854c209b20f84247b7f
68,141
from datetime import datetime def squash_dates(obj): """squash datetime objects into ISO8601 strings""" if isinstance(obj, dict): obj = dict(obj) # don't clobber for k, v in obj.items(): obj[k] = squash_dates(v) elif isinstance(obj, (list, tuple)): obj = [squash_dates(o) for o in obj] elif isinstance(obj, datetime): obj = obj.isoformat() return obj
4799784d15897e260aada2b4c5f18615b438bda5
40,653
def proglen(s): """ Program length is measured in characters, but in order to keep the values in a similar range to that of compressibility, DTW and Levenshtein, we divide by 100. This is a bit arbitrary. :param s: A string of a program phenotype. :return: The length of the program divided by 100. """ return len(s) / 100.0
7b945e9d0c8b79b5afb9cc1611c82b49bd541feb
372,654
def rgb_to_grayscale(vid, channel=0): """Convert the given RGB Image Tensor to Grayscale. For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which is L = R * 0.2989 + G * 0.5870 + B * 0.1140 Args: vid (Tensor): Image to be converted to Grayscale in the form [C, N, H, W]. channel: color channel Returns: Tensor: Grayscale video tensor [C, N, H, W]. """ assert vid.size(channel) == 3 return (0.2989 * vid.select(channel,0) + 0.5870 * vid.select(channel,1) + 0.1140 * vid.select(channel,2)).to(vid.dtype)
0f328825ea3c3e72c629bb5e964250a17b3e0661
225,410
import re def get_error_num(function): """ Extract first number from function name and return string with it. Leading zeroes will be truncated. """ match_obj = re.search(r"\d+", function.__name__) if match_obj is None: return "0" result = match_obj.group(0).lstrip("0") if result == "": result = "0" return result
67f0fb54e2b7bc260b1f26e2f15f1ea101f2168b
621,502
def remove_quotes(val): """Helper that removes surrounding quotes from strings.""" if val[0] in ('"', "'") and val[0] == val[-1]: val = val[1:-1] return val
11a8c26e5b261e75a08ae9b11d9c4e39f07da4c3
33,293
def intify(*nums): """ Safely convert to integer (avoiding None). **Parameter**\n nums: list/tuple/1D array Numeric array to convert to integer. **Return**\n intnums : list Converted list of numerics. """ intnums = list(nums) # Make a copy of the to-be-converted list for i, num in enumerate(nums): try: intnums[i] = int(num) except TypeError: pass return intnums
ff0fb63e2c8a3db178af61da383b625a56d60b95
472,370
def join(sep): """join(sep)(iterable) Join strings in iterable with sep. str -> [str] -> str >>> comma_separate = join(', ') >>> comma_separate(['a', 'b', 'c', 'd']) 'a, b, c, d' """ def join_sep(iterable): return sep.join(iterable) return join_sep
7cd633ea5eb43572df8260badf6fde86a078a0c9
325,070
def _julian_century(jd): """Caluclate the Julian Century from Julian Day or Julian Ephemeris Day""" return (jd - 2451545.0) / 36525.0
477f07e9106c09a2a54a5f279248a221f3729b4e
128,713
def test_lzh (archive, compression, cmd, verbosity, interactive): """Test a LZH archive.""" opts = 't' if verbosity > 1: opts += 'v' return [cmd, opts, archive]
802ad486be339535a97557c1dbe8bbafe8752415
552,890
def find_heads(own_snake, data): """ Finds coordinates of all the heads of enemy snakes :param own_snake: :param data: :return: list of coordinates of heads """ enemy_heads = [(snake['body'][0]['x'], snake['body'][0]['y']) for snake in data['board']['snakes'] if snake['id'] != own_snake.id] return enemy_heads
baf0aeb80b438612b86310477f761089119ad691
451,255
def collect(app, *args, **kwargs): """Calls `tf.fabric.Fabric.collect` from an app object.""" TF = app.api.TF return TF.collect(*args, **kwargs)
1f54eb523c521c46b76a207b220190eacd340b4a
561,235
def format_as(format_string): """format_as(form)(obj) = form % obj Useful to format many objects with the same format string. >>> list(map(format_as("0x%x"), [0, 1, 10, 11, 15])) ['0x0', '0x1', '0xa', '0xb', '0xf'] """ return lambda obj : format_string % obj
ad2e02106b32b2157c6c026f8031120f3d16fe1e
642,652
import torch def batch_index_select(input, dim, index): """batch version of ``torch.index_select``. Returns a new tensor which indexes the input tensor along dimension ``dim`` using the corresponding entries in ``index`` which is a ``LongTensor``. The returned tensor has the same number of dimensions as the original tensor (input). The ``dim``th dimension has the same size as the length of index; other dimensions have the same size as in the original tensor. Parameters ---------- input : torch.Tensor (B, ..)the input tensor. dim : int the dimension in which we index. Must be ``>0`` since we use the ``0``th index as the batch. May be negative. index : torch.LongTensor (B, N) the 1-D tensor containing the indices to index per batch Returns ------- torch.Tensor (B, ...) tensor that matches the input dimensions, except the ``dim``th dimension now has length ``N``. NOTE: does NOT use the same storage as ``input`` Tensor """ if dim < 0: dim = input.ndim + dim assert dim > 0, "Cannot index along batch dimension." assert ( input.shape[0] == index.shape[0] ), "input and index must have same batch dimension." for ii in range(1, len(input.shape)): if ii != dim: index = index.unsqueeze(ii) expanse = list(input.shape) expanse[0] = -1 expanse[dim] = -1 index = index.expand(expanse) return torch.gather(input, dim, index)
3708aab64f952085a7c43717c38b134a91973f9a
75,371
def i_input(prompt): """input function to return an integer.""" while True: q_temp = input(prompt) if q_temp.isnumeric(): return int(q_temp)
4975840611a6d9559a387bde914d858b05493a8a
150,102
import collections def load_dem_rsc(filename): """Loads and parses the .dem.rsc file Args: filename (str) path to either the .dem or .dem.rsc file. Function will add .rsc to path if passed .dem file Returns: dict: dem.rsc file parsed out, keys are all caps example file: WIDTH 10801 FILE_LENGTH 7201 X_FIRST -157.0 Y_FIRST 21.0 X_STEP 0.000277777777 Y_STEP -0.000277777777 X_UNIT degrees Y_UNIT degrees Z_OFFSET 0 Z_SCALE 1 PROJECTION LL """ # Use OrderedDict so that upsample_dem_rsc creates with same ordering as old output_data = collections.OrderedDict() # Second part in tuple is used to cast string to correct type field_tups = (('WIDTH', int), ('FILE_LENGTH', int), ('X_STEP', float), ('Y_STEP', float), ('X_FIRST', float), ('Y_FIRST', float), ('X_UNIT', str), ('Y_UNIT', str), ('Z_OFFSET', int), ('Z_SCALE', int), ('PROJECTION', str)) rsc_filename = '{}.rsc'.format(filename) if not filename.endswith('.rsc') else filename with open(rsc_filename, 'r') as f: for line in f.readlines(): for field, num_type in field_tups: if line.startswith(field): output_data[field] = num_type(line.split()[1]) return output_data
94ac2021e49286e7122d98465c17a330139d6e36
511,035
import math def quadrant(x): """Returns which quadrant of the unit circle x is in. """ norm = x % (math.pi * 2) quad = x % (math.pi / 2) if 0 <= norm < math.pi / 2: # first quadrant return 1 elif math.pi / 2 <= norm < math.pi: # second quadrant return 2 elif math.pi <= norm < math.pi * (3 / 2): # third quadrant return 3 elif math.pi * (3 / 2) <= norm < math.pi * 2: # fourth quadrant return 4
124c18305eb804e76104478c827804dd89974255
563,790
def prime_factors(number): """Finds prime factors of an integer (by trial-division). :param number: The integer to factor :type number: int :rtype: list of ints **Examples** >>> prime_factors(314) [2, 157] >>> prime_factors(31) [31] """ factor = 2 factors = [] while factor * factor <= number: if number % factor: factor += 1 else: number //= factor factors.append(factor) if number > 1: factors.append(number) return factors
de1726b6a54eddb8927a2eb700b0f466ef077364
111,279
import socket def is_port_used(ip, port): """ check whether the port is used by other program :param ip: :param port: :return: True(in use) False(idle) """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((ip, port)) return True except OSError: return False finally: s.close()
56fbcf03ab0bbfb23fdea56df8eeda19adaac539
690,726
import math import random def random_coords(min_lat=-75, max_lat=75, min_lon=-180, max_lon=180): """ Produces a random lat/lon pair within the given bounds. """ lats_ok = -90 < min_lat <= max_lat < 90 lons_ok = -180 <= min_lon <= max_lon <= 180 assert lats_ok and lons_ok, "Bad coordinate bounds arguments" # Source: https://www.jasondavies.com/maps/random-points/ min_x = 0.5 * (1 - math.sin(math.pi * min_lat / 180)) max_x = 0.5 * (1 - math.sin(math.pi * max_lat / 180)) lat = (180 / math.pi) * math.acos(2 * random.uniform(min_x, max_x) - 1) - 90 lon = random.uniform(min_lon, max_lon) return (lat, lon)
cb6a64319cd94d94936f76b669d02cfa53fd409d
442,790
import pickle def load_pickle(filename): """ load an object from a given pickle file :param filename: source file :return: loaded object """ with open(filename, 'rb') as file: return pickle.load(file)
575c1601ca2ee1370e6a4911369e6948cd345c82
340,177
def get_categories(x): """ Return the categories of x Parameters ---------- x : category_like Input Values Returns ------- out : Index Categories of x """ try: return x.cat.categories # series except AttributeError: try: return x.categories # plain categorical except AttributeError: raise TypeError("x is the wrong type, it has no categories")
19495de6478955f27e1bdc3ef91e9bd0453a180f
245,926
from textwrap import dedent def split_docstring(docstring): """ Split a docstring into a summary and a description. Arguments: docstring (str): the docstring Returns: summary (str or None): a one-line summary. description (str or None): the multi-line description. """ summary = None description = None if docstring: doc_lines = docstring.strip().split("\n") if doc_lines: summary = doc_lines[0].strip() if len(doc_lines) > 1: description = dedent("\n".join(doc_lines[1:])) return summary, description
0d8d74a0629864757aa294ab967570ba508fdd32
641,342
def _compare_logical_disks(ld1, ld2): """Compares the two logical disks provided based on size.""" return ld1['size_gb'] - ld2['size_gb']
29a51ee7241239a2a02739db203e1c481c462568
121,255
import pathlib def _get_csv_file_path(data_dir: str, three_sec_song: bool) -> pathlib.Path: """Gets CSV file path from data directory. Parameters ---------- data_dir : str directory where datafiles are stored three_sec_song : bool if should use the three second song csv (as opposed to thirty second songs) Returns ------- pathlib.Path path to csv file """ if three_sec_song: return pathlib.Path(data_dir).joinpath(pathlib.Path("Data/features_3_sec.csv")) else: return pathlib.Path(data_dir).joinpath(pathlib.Path("Data/features_30_sec.csv"))
8ce28b1fe367f329a1629dc04cb930715baec765
585,349
def str_to_bool(s): """Transform a string to boolean. :param s: the value to transform as boolean :type s: str :return: the given is as bool :rtype: boolean """ if s == 'false': return False if s == 'true': return True raise Exception('Unable to cast as bool %s' % s)
9acfe7df11d82c1f5642251046e2e1651212d8a3
457,875
def fileinfo_remove(log_table, remove_table): """ Remove files from log table that are in remove table. Params: log_table {fileinfo} - dictionary to remove from remove_table {fileinfo} - dicionary containing keys to remove Returns: removed (int) - number of entries removed Notes: does not actually remove entries simply marks for deletion (see fileinfo_clean) """ removed = 0 for k in remove_table: if k in log_table: log_table[k].flags |= 0x2 # marked for removal removed += 1 return removed
e7de73edf1ea97e980a3ff4328d6d02b771da4a6
118,772
def lfnGroup(job): """ _lfnGroup_ Determine the lfnGroup from the job counter and the agent number provided in the job baggage, the job counter and agent number default both to 0. The result will be a 5-digit string. """ modifier = str(job.get("agentNumber", 0)) jobLfnGroup = modifier + str(job.get("counter", 0) // 1000).zfill(4) return jobLfnGroup
55e7f934128ef1212fc7ff428ffb0d6ef5a10267
183,868
from datetime import datetime import click import json def write_json_file(file_name: str, results: list) -> str: """Write data to json file""" now = datetime.now() timestamp = f"_{now.month}-{now.day}-{now.year}_{now.hour}-{now.minute}.json" file_path = file_name + timestamp click.secho(f"[*] Writing results to {file_path}", fg="green") with open(file_path, "w") as f: json.dump(results, f, indent=4) return file_path
4cb127c0b9bb916ab5f32c859fd03cc0570eb561
93,519
def value_in_list(answers, key, values): """Determine user's answer for the key is in the value list. Args: answer: Dict of user's answers to the questions. key: question key. values: List of values to check from. """ try: value = answers[key] return value in values except KeyError: return False
8e1dd440063645e39419864ee7cc7563608c64c3
589,907
import torch def get_activations(imgs, model, dim=2048): """Calculates the activations of the pool_3 layer for all images. Params: -- imgs : Tensor of images -- model : Instance of inception model -- dims : Dimensionality of features returned by Inception Returns: -- A pytorch tensor of dimension (num images, dims) that contains the activations of the given tensor when feeding inception with the query tensor. """ n_imgs = len(imgs) pred_arr = torch.empty((n_imgs, dim)) imgs = imgs.cuda() pred = model(imgs)[0] pred_arr = pred.data.view(n_imgs, -1) return pred_arr
8fdf135b00ebc91855b75863f2c67a6fd6661b2f
164,515