content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def collate_fn(data): """Construct a bacth by padding the sequence to the size of the longest. Args: data (tuple): tensors Returns: tuple: padded tensors """ # Construct a bacth by padding the sequence to the size of the longest size = [len(_x) for _x in list(zip(*data))[0]] pad_data = [torch.zeros(len(size), max(size)) for _ in zip(*data)] pad_mask = torch.ones(len(size), max(size)) for i, _data in enumerate(data): end = size[i] pad_mask[i, :end] = 0 for j, d in enumerate(_data): pad_data[j][i, :end] = d return [d.type(torch.int64) for d in pad_data] + [pad_mask.type(torch.bool)] # not yet supported by yapf and black formatter (allowed in Python 3.8) # return *[d.type(torch.int64) for d in pad_data], # pad_mask.type(torch.bool)
0dffeac33a95a7001a1898299a329b12b1b29ffe
43,295
def SmoothBrokenPowerLaw(x, norm=1., gamma_low=1., gamma_high=1., break_freq=1.): """ Smooth broken power law function, implemented using astropy.modeling.models custom model Parameters ---------- x: numpy.ndarray non-zero frequencies norm: float normalization frequency gamma_low: float power law index for f --> zero gamma_high: float power law index for f --> infinity break_freq: float break frequency Returns ------- model: astropy.modeling.Model generalized smooth broken power law psd model """ return norm * x**(-gamma_low) / (1. + (x / break_freq)**2)**(-(gamma_low - gamma_high) / 2)
e5d40a987d4427a5f6dac1a7727e908aa78ec95e
43,297
def format_exts(exts: list): """ Returns a formatted list of extentions. """ return ", ".join(["." + ext for ext in exts])
e1ab9f8cf039291a9344111308ef72715ae19188
43,298
import configparser def configurations(path): """Parses and reads the configuration file named found at path. Returns a configparser Object.""" # create config parsing object config = configparser.ConfigParser() # read config config.read(path) return config
de3177feee980f1ffa3be9b1b330ed780304108f
43,301
def adjust(val, length=6): """ This function left align the numerical value for printing purpose """ return str(val).ljust(length)
64ac888b308821215bf68cd4d846898d403fdb24
43,304
def toLower(str): """ This method just turns the first char of a word to lowercase. """ return str[:1].lower() + str[1:] if str else ''
f44d129af901b9ee9781cd4c78b9ec9361e29a84
43,306
import string import random def gen_id(length): """ Create a well-usable random id """ source = string.ascii_letters + string.digits return ''.join([random.choice(source) for _ in range(0, length)])
6c08f786476ee90d9f9d71fe499be99c93a523d2
43,310
def prepare_url(valip, valch, valc, valii): """ Prepare the URL """ ## Parameter - IP url = "http://" + valip ## Parameter - URL - action url += "/api/v100/dali_devices.ssi?action=get_device" ## Parameter - Channel url += "&ch=" + valch ## Parameter - Lamp index, group index or channel if valc == 1: # Lamp index url += "&di=" + valii elif valc == 2: # Group index url += "&gi=" + valii else: # Channel url += "&gi=-1" return url
4516e45206a7f681e71856cca5b1e1332c775e68
43,314
def read_input(ifn): """ Read an Abaqus INP file, read its sections. Return the section headings and the lines. """ with open(ifn) as inf: lines = [ln.strip() for ln in inf.readlines()] # Remove comments lines = [ln for ln in lines if not ln.startswith("**")] # Find section headers headings = [(ln[1:], n) for n, ln in enumerate(lines) if ln.startswith("*")] # Filter the headings so that every heading has a start-of-data and # end-of-data index. headings.append(("end", -1)) ln = [h[1] for h in headings] headings = [ (name, start + 1, end) for (name, start), end in zip(headings[:-1], ln[1:]) ] return headings, lines
7223dc6d808cb96101cd18e21f09ef013478fa6e
43,317
def convert_to_float(number_string): """Convert comma-delimited real numberts in string format to a float >>> convert_to_float("-79,1") -79.1 """ return(float(number_string.replace(',', '.')))
8b30293677af0860f32eb28b544c262a3b1279f4
43,325
def _darknet_required_attr(attr, key): """Check the attribute exists and return if exists, if not return error.""" assert isinstance(attr, dict) if key not in attr: raise AttributeError("Required attribute {} not found.".format(key)) return attr[key]
938555c55069fafc28a1d43a06b1d316abaea87f
43,326
def grouper(x): """ Given a line number in a fastq file, return a tuple mapping each line to a read number (modulo 4) and line number """ return int(x[1] / 4), x[0]
e61093ec88fc0ab26d7b1e5662d75a1506bb6df8
43,330
def times2(num): """ This function returns twice the value of the entered number """ return num * 2
a59997b5e94b287c849f1aa3250c2e5b8df7b094
43,334
def qubit_constraint(ind1, ind2, instructions): """ Determine if two instructions have overlap in the used qubits. """ if instructions[ind1].used_qubits & instructions[ind2].used_qubits: return False else: return True
c8fce40c70977c86a9c86bb1b17d92037571fb36
43,335
def data_split(data, frac_validate=.2, frac_test=.2): """ Split data into train, validatio, train2 and test set. Args: data (array or series): data of interest frac_valid (float): a fraction of data for validation set (default = 0.2) frac_test (float): a fraction of data for test set (default = 0.2) Returns: a tuple of 4 dataframe including train, train2, valid, and test. """ n_size = data.shape[0] n_test = int(n_size*(frac_test)) n_validate = int(n_size*(frac_validate)) n_train = n_size - n_test - n_validate train = data.iloc[:n_train].dropna() validate = data.iloc[n_train:-n_test] train2 = data.iloc[:-n_test] test = data.iloc[-n_test:] return train, validate, train2, test
6f391aee009bbc95a6216b6b42d53fcbf71cb5ad
43,337
def latticeWrapIdx(index, lattice_shape): """ Returns periodic lattice index for a given iterable index :param index: List of cells :param lattice_shape: Oxygen distribution in lattice array shape :return: Modified indexes """ if not hasattr(index, '__iter__'): return index # handle integer slices if len(index) != len(lattice_shape): return index # must reference a scalar if any(type(i) == slice for i in index): return index # slices not supported if len(index) == len(lattice_shape): # periodic indexing of scalars mod_index = tuple(((i % s + s) % s for i, s in zip(index, lattice_shape))) return mod_index raise ValueError('Unexpected index: {}'.format(index))
80d4caf99770548f7d0edf3f6aaac2e7a904ecec
43,341
def range_union(ranges): """ Returns total size of ranges, expect range as (chr, left, right) >>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)] >>> range_union(ranges) 41 >>> ranges = [("1", 30, 45), ("2", 40, 50)] >>> range_union(ranges) 27 >>> ranges = [("1", 30, 45), ("1", 45, 50)] >>> range_union(ranges) 21 >>> range_union([]) 0 """ if not ranges: return 0 ranges.sort() total_len = 0 cur_chr, cur_left, cur_right = ranges[0] # left-most range for r in ranges: # open new range if left > cur_right or chr != cur_chr if r[1] > cur_right or r[0] != cur_chr: total_len += cur_right - cur_left + 1 cur_chr, cur_left, cur_right = r else: # update cur_right cur_right = max(r[2], cur_right) # the last one total_len += cur_right - cur_left + 1 return total_len
3fa8aa251c207d2576d3739116c0d33378716bee
43,343
import base64 def readDataOrPath(dataStr): """ Reads in either base64 data or a path. """ if dataStr.startswith('base64:'): dataPath = None dataContents = base64.b64decode(dataStr[7:]) else: dataPath = dataStr with open(dataStr, 'rb') as stream: dataContents = stream.read() return dataPath, dataContents
0b18eaa4affdb409455e7575a6938963df1f1db1
43,344
def truncate_chars(val: str, num: int, end: str = "...") -> str: """Truncates a string if it is longer than the specified number of characters. Truncated strings will end with `end`, an ellipsis by default.""" val_length = len(val) end_length = len(end) if val_length < num: return val return f"{val[:num-end_length]}{end}"
fc5ba4950e4a7562b1a0d97850048c35d3f3518c
43,345
def full_to_one(full): """ Convert full amino acid name to one-letter amino acid code. """ assert full.lower() in [ "phenylalanine", "leucine", "serine", "tyrosine", "stop", "cysteine", "tryptophan", "proline", "histidine", "glutamine", "arginine", "isoleucine", "methionine", "threonine", "asparagine", "lysine", "valine", "alanine", "aspartic acid", "glutamic acid", "glycine", "unnatural aa", ], ( "Error, %s is not a valid amino acid" % full ) AA = { "phenylalanine": "F", "leucine": "L", "serine": "S", "tyrosine": "Y", "stop": "*", "cysteine": "C", "tryptophan": "W", "proline": "P", "histidine": "H", "glutamine": "Q", "arginine": "R", "isoleucine": "I", "methionine": "M", "threonine": "T", "asparagine": "N", "lysine": "K", "valine": "V", "alanine": "A", "aspartic acid": "D", "glutamic acid": "E", "glycine": "G", "unnatural aa": "U", } return AA[full.lower()]
182438900416a7a0be9bb799648c822597670e32
43,349
def print_failure(filename, failure, colored=False, verbose=False): """ Pretty prints a failure :param filename: str, path to file tested :param failure: Failure :param colored: bool, prints with ansi colors according to failure severity """ def color_string(message, severity): RED = '31m' YELLOW = '33m' BLUE = '34m' WHITE = '37m' RESET = '0m' color_map = { 'ERROR': RED, 'WARNING': YELLOW, 'INFO': BLUE, } return f'\033[{color_map.get(severity, WHITE)}{message}\033[{RESET}' message = f'[{failure.role}] {filename}:{failure.line}: {failure.element}: {failure.message}' if verbose: message += f'\n location: {failure.location}\n test: {failure.test}' if colored: message = color_string(message, failure.role) # print(message) return message
f402ea84316a6e2b48cb0632d8a5f8421650a128
43,351
import yaml def load_yaml(filename): """Load a yaml file. Args: filename (str): Filename. Returns: dict: Dictionary. """ try: with open(filename, "r") as f: config = yaml.load(f, yaml.SafeLoader) return config except FileNotFoundError: # for file not found raise except Exception as e: # for other exceptions raise IOError("load {0} error!".format(filename))
702ff5ca3321c7ec2ffc3d09363b6c4e22092837
43,353
def make_args(pcls): """ Returns an args tuple for the parameter class pcls. """ return tuple(range(1, len(pcls.__rndargs__) + 1))
5aed1d6b40ac40bef6d94a915c803875066e0833
43,360
def rds_product_engine_match(product, engine): """ Check whether an RDS reservation 'product' matches a running instance 'engine' """ return (product, engine) in (('postgresql','postgres'), ('mysql','mysql'), # note: not sure if this is correct )
4373ba6a51e5a5d80aeb4a410f60064becf2ede1
43,369
import yaml def load_config(config_path): """Load configuration file in YAML format Args : config_path (string) : path to configuration file Returns : config (dict) : configuration in dictionary format """ with open(config_path) as file: config = yaml.safe_load(file) return config
3d018b5df5e88d697e4ba55712422dd11ed287bd
43,371
import re def to_np(url): """Return a comment url converted to the np subdomain.""" return re.sub("https?://.*reddit\.com", "https://np.reddit.com", url)
72a8efaf2f5c77184ae9d80bf268aa06aa414e7e
43,375
import base64 def download_link(object_to_download, filename, download_link_text): """ Generates a link to download the given object_to_download. Args: object_to_download: str. filename: str. filename and extension of file. download_link_text: str. Text to display for download link. Examples: download_link(our_string, 'my_file.txt', 'Click here to download your text!') """ # some strings <-> bytes conversions necessary here b64 = base64.b64encode(object_to_download.encode()).decode() return f'<a href="data:file/txt;base64,{b64}" download="{filename}">{download_link_text}</a>'
a98d42f90370f73bd2282e042ab9c8a7ec2d7766
43,380
def MyGrep(hash_list, index_name, iname): """ hash_list: (list<subdict>) subdict: (dict) header -> value index_name: (str) key in subdict that maps to index names iname: (str) the index name we want. Returns: list<dict> minimized such that only dicts with wanted index name are in it. """ new_list = [] for h in hash_list: if h[index_name] == iname: new_list.append(h) return new_list
34f346f80bfcac423ec89ae06c616dfc082fa2b0
43,381
def intersection(list1, list2): """ Compute and return the elements common to list1 and list2. `list1` and `list2` can be sets, lists, or pandas.Series. """ return list(set(list1).intersection(set(list2)))
dab9e5c597316070e452505980381a0251ef3385
43,383
def timeseries_wrapper(timeseries, starttimeind=0): """Decorator to convert a list or numpy array into a function which accepts a timeind.""" def out(timeind): return timeseries[timeind-starttimeind] return out
f09036ea7b6f7dbecca5468e60b3c908bf159417
43,387
import re def camel_to_snake_case(name): """Converts camelCase to the snake_case Args: name (str): Camel case name Returns: str: Name in stake case """ cunder = re.sub(r"(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", cunder).lower()
7997a25ed8ac751d935d05078cf9b5f8d3251b9b
43,389
def laplace(f, g_inv, g_det, X): """ Calculates Laplace(f), using the inverse metric g_inv, the determinant of the metric g_det, all in variables X. """ r = 0 for i in range(len(X)): for j in range(len(X)): r += g_inv[i, j]*f.diff(X[i]).diff(X[j]) for sigma in range(len(X)): for alpha in range(len(X)): r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \ f.diff(X[alpha]) / (2*g_det) return r
47bebab77205c47eabe2e7450263274a10a44c2f
43,390
import re def _check_if_item_allowed(item_name, allow_patterns, disallow_patterns): """ Check if an item with ``item_name`` is allowed based on ``allow_patterns`` and ``disallow_patterns``. Parameters ---------- item_name: str Name of the item. allow_patterns: list(str) Selected item should match at least one of the re patterns. If the value is ``[None]`` then all items are selected. If ``[]``, then no items are selected. disallow_patterns: list(str) Items are deselected based on re patterns in the list: if an item matches at least one of the patterns, it is deselected. If the value is ``[None]`` or ``[]`` then no items are deselected. Returns ------- boolean Indicates if the item is permitted based on ``allow_patterns`` and ``disallow_patterns``. """ item_is_allowed = False if allow_patterns: if allow_patterns[0] is None: item_is_allowed = True else: for pattern in allow_patterns: if re.search(pattern, item_name): item_is_allowed = True break if item_is_allowed: if disallow_patterns and (disallow_patterns[0] is not None): for pattern in disallow_patterns: if re.search(pattern, item_name): item_is_allowed = False break return item_is_allowed
6a14239913140130eb8ce0d12a797039a7625172
43,394
def py_opp(x): """ Function for python unary operator ``-``. @param x floats @return `-x` """ return -x
4d7f6260da54eaa9ea3d6e71a10cdb61d4bba8c1
43,396
import logging def query_stream_log_handler(logger): """Query stream handler from logger.""" if len(logger.handlers): ch = logger.handlers[0] else: ch = logging.StreamHandler() logger.addHandler(ch) return ch
67fc28298bdfa10c25c0be36eaed0922def3a1a3
43,397
import re def validate_cron_string(cron_string, error_on_invalid=False): """ Validate that a string is a Unix cron string. """ # Note: This is also a Temporal function, but I'm trying to avoid making Temporal a dependency of BTU. crontab_time_format_regex = re.compile( r"{0}\s+{1}\s+{2}\s+{3}\s+{4}".format( r"(?P<minute>\*(\/[0-5]?\d)?|[0-5]?\d)", r"(?P<hour>\*|[01]?\d|2[0-3])", r"(?P<day>\*|0?[1-9]|[12]\d|3[01])", r"(?P<month>\*|0?[1-9]|1[012])", r"(?P<day_of_week>\*|[0-6](\-[0-6])?)") # end of str.format() ) # end of re.compile() if crontab_time_format_regex.match(cron_string) is None: if error_on_invalid: raise Exception(f"String '{cron_string}' is not a valid Unix cron string.") return False return True
4cc4594bfc2fd2743f20fa9662a4083497d5e1fc
43,399
def aws_region_name(request): """ Returns AWS Region Name """ return request.config.getoption("--aws-region")
0400379c336ef156d32448b562b7c011b7eb2894
43,400
def flatten(data): """Flatten out all list items in data.""" flatten_list = [] if type(data) == list: for item in data: flatten_list.extend(flatten(item)) return flatten_list else: return [data]
6a5bea0f99e1c33ef178e0002f71e27d2efc36a0
43,403
def generate_omim_list(omimfile): """ Generate dictionary of genotype id-name key-value pairs. :omimfile: path to mimTitles.txt """ omimlist = {} with open(omimfile) as file: for line in file: # Clean up lines and pick first occurrence of titles lineparts = line.strip().split('\t') genotype_id = 'OMIM:' + lineparts[1] genotype_name = lineparts[2].split(';')[0] omimlist[genotype_id] = genotype_name return omimlist
aa3bff68005fbd771f2c942c6e15d11c6afd6913
43,406
def data_cleaning(post: dict): """ Functionality to clean up data and pass back only desired fields :param post: dict content and metadata of a reddit post :return: tuple of fields for insertion into database """ unwanted_authors = ['[deleted]', '[removed]', 'automoderator'] # skip posts from undesirable authors or posts to personal subreddits if (post['author'] in unwanted_authors) or (post['subreddit_name_prefixed'].startswith('u/')): return None # replace empty string with placeholder for posts with no body content if post['selftext'] == '': post['selftext'] = "[NO TEXT]" # only a subset of the fields are being saved to the db # adjust the table schema and add fields here if desired author = post['author'].lower().strip() if post['author_flair_text']: author_flair_text = post['author_flair_text'].lower().strip() else: author_flair_text = "none" if post['link_flair_text']: post_flair_text = post['link_flair_text'].lower().strip() else: post_flair_text = "none" created_utc = post['created_utc'] reddit_id = f"t3_{post['id']}" num_comments = post['num_comments'] nsfw = post['over_18'] score = post['score'] text = post['selftext'] subreddit = post['subreddit'].lower().strip() title = post['title'] total_awards_received = post['total_awards_received'] return (author, author_flair_text, post_flair_text, created_utc, reddit_id, num_comments, nsfw, score, text, subreddit, title, total_awards_received)
bfe61ac3689a21731e8aaa194df02c545ed8f840
43,409
def coords_to_simbad(ra, dec, search_radius): """ Get SIMBAD search url for objects within search_radius of ra, dec coordinates. Args: ra (float): right ascension in degrees dec (float): declination in degrees search_radius (float): search radius around ra, dec in arcseconds Returns: (str): SIMBAD database search url for objects at ra, dec """ return 'http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={0}+%09{1}&CooFrame=FK5&CooEpoch=2000&CooEqui=2000&CooDefinedFrames=none&Radius={2}&Radius.unit=arcmin&submit=submit+query&CoordList='.format(ra, dec, search_radius)
ced846c6962368c937fb2005b59ea5e0b6dc9afd
43,410
from typing import Optional def binstr(num: int, width: Optional[int] = None) -> str: """ Returns the binary representation of an integer. Parameters ---------- num : int The number to convert. width: int, optional Minimum number of digits used. The default is the global value `BITS`. Returns ------- binstr : str """ fill = width or 0 return f"{num:0{fill}b}"
9f842ad69b7a1cde2ccfb8c31ed806b4a959c766
43,414
def get_radosgw_username(r_id): """Generate a username based on a relation id""" gw_user = 'juju-' + r_id.replace(":", "-") return gw_user
347ff532464ef568daf753e820fef842084fe753
43,422
def start_vm(client, resource_group_name, vm_name): """ Start a VMware virtual machine. """ return client.start(resource_group_name, vm_name)
b9ff6fdd9e69f51783585a527fe182e4ec8ea152
43,426
def circuit_to_bench(c): """ Generates a str of Bench code from a `CircuitGraph`. Parameters ---------- c: Circuit the circuit to turn into Bench. Returns ------- str Bench code. """ inputs = [] outputs = [] insts = [] if c.blackboxes: raise ValueError(f"Bench format does not support blackboxes: {c.name}") # gates const_inp = c.inputs().pop() for n in c.nodes(): if c.type(n) in ["xor", "xnor", "buf", "not", "nor", "or", "and", "nand"]: fanin = ", ".join(c.fanin(n)) insts.append(f"{n} = {c.type(n).upper()}({fanin})") elif c.type(n) in ["0"]: insts.append(f"{n} = XOR({const_inp}, {const_inp})") elif c.type(n) in ["1"]: insts.append(f"{n} = XNOR({const_inp}, {const_inp})") elif c.type(n) in ["input"]: inputs.append(n) elif c.type(n) in ["output"]: fanin = c.fanin(n).pop() insts.append(f"{n} = BUF({fanin})") outputs.append(n) else: raise ValueError(f"unknown gate type: {c.type(n)}") bench = f"# {c.name}\n" bench += "".join(f"INPUT({inp})\n" for inp in inputs) bench += "\n" bench += "".join(f"OUTPUT({out};)\n" for out in outputs) bench += "\n" bench += "\n".join(insts) return bench
e97caa3b40f40ff0637dec825e1921d1a7c19e86
43,427
def prefix_filt(d, prefix): """return all items in dictionary d with key with given prefix.""" match_list = [] for k in d.keys(): if isinstance(k, str): if k.startswith(prefix): match_list.append(k) return dict([(k, d[k]) for k in match_list])
5da37ca9ee7db78959e2cd8cd67cc945b941c43e
43,428
def from_wsgi_header(header): """Convert a WSGI compliant HTTP header into the original header. See https://www.python.org/dev/peps/pep-3333/#environ-variables for information from the spec. """ HTTP_PREFIX = "HTTP_" # PEP 333 gives two headers which aren't prepended with HTTP_. UNPREFIXED_HEADERS = {"CONTENT_TYPE", "CONTENT_LENGTH"} if header.startswith(HTTP_PREFIX): header = header[len(HTTP_PREFIX) :] elif header not in UNPREFIXED_HEADERS: return None return header.replace("_", "-").title()
7b3b82aaf9c2fc5d6da9428b1f301b996761ca15
43,433
def unique(S, start, stop): """Return True if there are no duplicate elements in slice S[start: stop]""" print("start = {}, stop = {}".format(start, stop)) if stop - start <= 1: # at most 1 item return True elif not unique(S, start, stop-1): # first part has duplicate print("checking uniqueness in (S, {}, {})".format(start, stop-1)) return False elif not unique(S, start+1, stop): # second part has duplicate print("checking uniqueness in (S, {}, {})".format(start+1, stop)) return False else: print("Check uniqueness of 1st and last element for start = {} and stop = {}" .format(start, stop)) # do first and last differ return S[start] != S[stop] # do first and last differ
daf738f83cb7ccc1b33978a25e022638bbfc63fe
43,434
def split(string,keys): """ string :: 需要分割的字符 keys :: 分割关键字 eg. keys = [' ',',','"','.','(',')'] return :: 分割后的字符串数组 """ out_strings = [] cnt = 0 for i in range(len(string)): if string[i] in keys: if cnt != i: out_strings.append(string[cnt:i]) cnt = i+1 return out_strings
d5342606ded40512b185653a9593b0166cd3edff
43,436
def sort_gtf(giter, chrom_order=None): """ Sort GTF Sort GTF records by chromosome and start position Args: giter (iterable): List of GTFLine or GTFCluster objects chrom_order (list): Chromosome sort order. Default is alphabetical. Returns: list: Sorted GTF records. """ ret = sorted(giter, key=lambda x:x.start) if chrom_order is None: ret.sort(key=lambda x:x.chrom) else: c_d = {k:i for i,k in enumerate(chrom_order)} ret.sort(key=lambda x:c_d[x.chrom] if x.chrom in c_d else x.chrom) return ret
60f8edbeedd5f60d55d8d43affa0f8e02bfcfe3f
43,437
def cap_str(line): """ capitalize a string :param str line: :return str: """ return line.upper()
119a7637c57f3e8ee6481897cc46cd0f9c177b0f
43,440
def calculate_freq(idx): """ Estimate the sampling frequency of a pandas datetime index """ cfreq = (idx.max()-idx.min())/(len(idx)-1) cfreq = cfreq.seconds/60 print("Calculated frequency is " + "{:5.3f}".format(cfreq) + " minutes") print("Rounding to " + str(round(cfreq)) + 'min') return str(round(cfreq)) + "min"
e78018a3d7f580e89fd7121d3c07d38acb117552
43,443
from io import StringIO def ase_to_xyz(atoms, comment="", file=True): """Convert ASE to xyz This function is useful to save xyz to DataFrame. """ xyz = StringIO() symbols = atoms.get_chemical_symbols() natoms = len(symbols) xyz.write("%d\n%s\n" % (natoms, comment)) for s, (x, y, z) in zip(symbols, atoms.positions): xyz.write("%-2s %22.15f %22.15f %22.15f\n" % (s, x, y, z)) if file: return xyz else: return xyz.getvalue()
dcacdb5550c1cea2706190912556d84acae0094d
43,446
import re def read_gtf(file_name, query_gene_name): """Given a GTF file and a gene name to query for, return a dictionary where the keys are transcript IDs and the values are arrays of start and end offsets of the exons present in that transcript, e.g., { 'tr1' : [[10,12],[17,27]] }""" def read_gtf_keyvalues(keyvaluestr): parts = keyvaluestr.split(";") for keyvalue in parts: m = re.match(r'\s*(\S+)\s*"(\S+)"', keyvalue) if m: yield (m.group(1), m.group(2)) matching_transcripts = {} with open(file_name) as f: for line in f: parts = re.split(r"\s", line, maxsplit=8) if parts[2] in ["exon", "CDS", "UTR"]: gene_name, transcript_id = "", "" for k, v in read_gtf_keyvalues(parts[8]): if k == "gene_name": gene_name = v elif k == "transcript_id": transcript_id = v if gene_name == query_gene_name: if transcript_id not in matching_transcripts: matching_transcripts[transcript_id] = { "exons": [], "CDSs": [], "UTRs": [], } start_and_end_offset = (int(parts[3]), int(parts[4])) if parts[2] == "exon": matching_transcripts[transcript_id]["exons"].append( start_and_end_offset ) elif parts[2] == "CDS": matching_transcripts[transcript_id]["CDSs"].append( start_and_end_offset ) elif parts[2] == "UTR": matching_transcripts[transcript_id]["UTRs"].append( start_and_end_offset ) return matching_transcripts
f66448b52a5cd73b25de606c3a22a6e58ee4cadd
43,448
def _clean_name(net, name): """ Clear prefix and some suffixes for name """ # prefix = net._prefix # name = name.replace(prefix, "") if name.endswith("_fwd_output"): name = name[:-len("_fwd_output")] elif name.endswith("_fwd"): name = name[:-len("_fwd")] elif name.endswith("_output"): name = name[:-len("_output")] return name
5be69e1b073f7da970dcf784cb1c37382231e418
43,449
def zfp_expert_opts(minbits, maxbits, maxprec, minexp): """Create compression options for ZFP in "expert" mode See the ZFP docs for the meaning of the parameters. """ zfp_mode_expert = 4 return zfp_mode_expert, 0, minbits, maxbits, maxprec, minexp
11bdfddd1ff2a75e62f417deab8867d2e71b69b7
43,453
def cygwin_to_win_path(path): """ Converts a Cygwin path to a Windows path. Only paths starting with "/cygdrive/" can be converted. :param path: Cygwin path to convert. Must be an absolute path. :type path: str :returns: Windows path. :rtype: str :raises ValueError: Cannot convert the path. """ if not path.startswith("/cygdrive/"): raise ValueError( "Only paths starting with \"/cygdrive/\" can be converted.") drive = path[10].upper() path = path[11:] i = 0 r = [] while i < len(path): c = path[i] if c == "\\": r.append( path[i+1:i+2] ) i += 2 continue if c == "/": c = "\\" r.append(c) i += 1 path = "".join(r) return "%s:%s" % (drive, path)
1d90424208b3c5b42627b1d9ac321d9e40341b1a
43,461
import math def extract_source_at_pos(MAP, x_pos, y_pos, PSF_npix_x, PSF_npix_y): """ Extract a sub-MAP around the position (x_pos, y_pos) The shape of the sub-MAP is similar to the PSF_MAP Parameters ---------- MAP : numpy masked array The original MAP in which the source as to be extracted x_pos : float. The x (first dimension) position of the source y_pos : float The y (second dimension) position of the source PSF_npix_x : integer The x (fisrt dimension) size of the Point Spread Map PSF_npix_y : integer The y (second dimension) size of the Point Spread Map Returns ------- SCR_MAP : numpy masked array The SouRCe MAP. A map of size (npix_x, npix_y) with a source created according to the PSF_MAP at the position (x_pos, y_pos) """ # cpix_x = int(math.floor(PSF_npix_x / 2)) cpix_y = int(math.floor(PSF_npix_y / 2)) # corner pixels x_inf = int(round(x_pos)) - cpix_x; x_sup = x_inf + PSF_npix_x y_inf = int(round(y_pos)) - cpix_y; y_sup = y_inf + PSF_npix_y # extract map SRC_MAP = MAP[x_inf:x_sup, y_inf:y_sup] # return SRC_MAP
7a6549f5537336d467d403b4437128836927d141
43,465
def applyLatticeLimit(lattice, bounds): """Remove lattice points outside the data bounds. For 2D and 3D data. Parameters --------- lattice : ndarray; (N, 2) or (N, 3) From lattice2D bounds : tuple, Minimum and maximum for axes 0 and 1 (min0, max0, min1, max1) or axes 0, 1 and 2 (min0, max0, min1, max1, min2, max2) Returns ------- : ndarray; (M, 2) or (M, 3) Same as lattice input except only containing points within the bounds specified. M <= N """ if len(bounds) == 4: goodUVs = ((lattice[:, 0] > bounds[0]) & (lattice[:, 0] < bounds[1])) & ( (lattice[:, 1] > bounds[2]) & (lattice[:, 1] < bounds[3]) ) elif len(bounds) == 6: goodUVs = ( ((lattice[:, 0] > bounds[0]) & (lattice[:, 0] < bounds[1])) & ((lattice[:, 1] > bounds[2]) & (lattice[:, 1] < bounds[3])) & ((lattice[:, 2] > bounds[4]) & (lattice[:, 2] < bounds[5])) ) else: print("Bounds needs be be either 4 or 6 value tuple.") return None return lattice[goodUVs, :]
98a280830ecf9f838eafc8cc78fccf8f13fc3f4e
43,468
def _get_popinfo(popinfo_file): """ Helper function for make_data_dict_vcf. Takes an open file that contains information on the population designations of each sample within a VCF file, and returns a dictionary containing {"SAMPLE_NAME" : "POP_NAME"} pairs. The file should be formatted as a table, with columns delimited by whitespace, and rows delimited by new lines. Lines beginning with '#' are considered comments and will be ignored. Each sample must appear on its own line. If no header information is provided, the first column will be assumed to be the SAMPLE_NAME column, while the second column will be assumed to be the POP_NAME column. If a header is present, it must be the first non-comment line of the file. The column positions of the words "SAMPLE" and "POP" (ignoring case) in this header will be used to determine proper positions of the SAMPLE_NAME and POP_NAME columns in the table. popinfo_file : An open text file of the format described above. """ popinfo_dict = {} sample_col = 0 pop_col = 1 header = False # check for header info for line in popinfo_file: if line.startswith('#'): continue cols = [col.lower() for col in line.split()] if 'sample' in cols: header = True sample_col = cols.index('sample') if 'pop' in cols: header = True pop_col = cols.index('pop') break # read in population information for each sample popinfo_file.seek(0) for line in popinfo_file: if line.startswith('#'): continue cols = line.split() sample = cols[sample_col] pop = cols[pop_col] # avoid adding header to dict if (sample.lower() == 'sample' or pop.lower() == 'pop') and header: header = False continue popinfo_dict[sample] = pop return popinfo_dict
01b1ce7d45bbaace42ab3cc13e6ab9c29353e8bf
43,469
def diff(it1, it2): """Find the differences between two iterables""" s2 = set(it2) diff = [x for x in it1 if x not in s2] return diff
163a445679bcaee0ce8aa7db320456ecee49d794
43,472
def is_file_of_extension(entry, *args): """ Checks if the given directory entry is a file and ends in one of the given extensions. Extensions are passed in the args parameter and must be prefixed by a dot. """ if not entry.is_file(): return False if not args: return True for arg in args: if entry.name.endswith(arg): return True return False
e40390b88b0485323e96b328810abd3dbedf0155
43,475
def backward(my_string): """Return the reverse of the string 'my_string'. Examples: >>> backward("python") 'nohtyp' >>> backward("ipython") 'nohtypi' """ return my_string[::-1]
5414027b87240863764f84163757b2090fbdae72
43,478
import click def command_option(func): """Add a command option.""" return click.option('-c', '--command', metavar='COMMAND', help='Command to run remotely after operation is complete.')(func)
7e050a33e454499bf1519ef9af7a5400aa546bc8
43,485
def get_seq_lengths_from_seqs_dic(seqs_dic): """ Given a dictionary of sequences, return dictionary of sequence lengths. Mapping is sequence ID -> sequence length. """ seq_len_dic = {} assert seqs_dic, "sequence dictionary seems to be empty" for seq_id in seqs_dic: seq_l = len(seqs_dic[seq_id]) seq_len_dic[seq_id] = seq_l return seq_len_dic
13baeef01c1fc4825781b72939a3e6d284aa4762
43,487
import ast def parse(path): """parse a file at path and returns an AST tree structure Args: path (str): filepath of the file to parse Returns: ast.Module: ast tree of the parsed file """ with open(path) as source_code: return ast.parse(source_code.read(), path)
67fffbf3c694203a84cb15a5e75c0410f9f45e9e
43,489
def get_format(path): """ Get the file's format by path. :param path: (str) The path of the file. :return: (str) The format of the file. """ return path.split('.')[-1]
b5cf501a8b73a2114c82eb84915249dbf7e88a97
43,491
def l_to_q(liter): """ Convert liters to quarts US """ return liter * 1.056688
8dafbe2ebf86b7df94d2fcf5f55fff2aa10b17f9
43,492
def check_position_detection(bounds): """Check whether the specified range of 5 intervals has the right proportions to correspond to a slice through a position detection pattern. An ideal slice through a position detection pattern consists of 5 intervals colored B,W,B,W,B with lengths proportional to 1,1,3,1,1. Returns: (center_coord, pixels_per_module) if this could be a position detection pattern, otherwise (0, 0). """ # Expected relative positions of black/white boundaries # within the position detection pattern. expect_bound_pos = [-3.5, -2.5, -1.5, 1.5, 2.5, 3.5] if (len(bounds) != 6) or (bounds[4] >= bounds[5]): return (0, 0) pattern_width = float(bounds[5] - bounds[0]) middle_width = float(bounds[3] - bounds[2]) if (pattern_width < 7) or (middle_width < 3): return (0, 0) center = float(sum(bounds)) / 6.0 pitch = (pattern_width + middle_width) / 10.0 good = True for k in range(6): rel_bound_pos = (bounds[k] - center) / pitch if abs(rel_bound_pos - expect_bound_pos[k]) >= 0.5: good = False break if not good: return (0, 0) return (center, pitch)
c425527405e0452d2069168908a81d713b7c9f33
43,493
def select_login_form(forms): """ Select form having highest probability for login class. :param dict forms: Nested dict containing label probabilities for each form. :returns: (login form, login meta) :rtype: tuple """ login_form = None login_meta = None login_prob = 0 for form, meta in forms: for type_, prob in meta["form"].items(): if type_ == "login" and prob > login_prob: login_form = form login_meta = meta login_prob = prob return login_form, login_meta
8e2b95b6d575f8044d248ad38b77782cccdeedf8
43,499
from typing import Dict from typing import Any from typing import Optional def find_nested_field_path( field_name: str, mapping_definition: Dict[str, Any] ) -> Optional[str]: """ Given a field name, find the nested path if any related to field name definition in provided mapping definition Parameters ---------- field_name: The field name mapping_definition: A mapping definition where field name is defined Returns ------- The found nested path if any, None otherwise """ def build_flatten_properties_map( properties: Dict[str, Any], prefix: str = "" ) -> Dict[str, Any]: results = {} for prop_name, prop_value in properties.items(): if prefix: prop_name = f"{prefix}.{prop_name}" if "type" in prop_value: results[prop_name] = prop_value["type"] if "properties" in prop_value: results.update( build_flatten_properties_map( prop_value["properties"], prefix=prop_name ) ) return results properties_map = build_flatten_properties_map(mapping_definition) for prop in properties_map: if properties_map[prop] == "nested" and field_name.startswith(prop): return prop return None
6b60b2cfd0bbb255f100e8ed8ccf2c4cca2ef804
43,501
import re def findDirectives(code): """Find testing directives""" finder = re.compile("^'\s*VB2PY-Test\s*:\s*(\w+)\s*=\s*(.*)$", re.MULTILINE) return finder.findall(code)
8e98ba7453ba119a4ae7cabaeb67e74814bef008
43,503
def get_fileno(file): """Get the os-level fileno of a file-like object. This function decodes several common file wrapper structures in an attempt to determine the underlying OS-level fileno for an object. """ while not hasattr(file,"fileno"): if hasattr(file,"file"): file = file.file elif hasattr(file,"_file"): file = file._file elif hasattr(file,"_fileobj"): file = file._fileobj else: raise AttributeError return file.fileno()
ede197cee9e8b97bc62b14c46d5a91241a002f6f
43,505
def convert_percentage_string_to_float(percentage_string): """Converts a string of the form 'xx.xx%' to its equivalent decimal value. :param percentage_string: A string in percentage form to be converted. :returns: A floating-point number rounded to 4 decimal places (2 decimals in percentage form). """ return round(float(percentage_string.replace('%', '')) / 100, 4)
16fe6aa1475a922c15a13168400cc4a655ad544a
43,508
import re def get_platform(source = '<PLT_1>'): """A function to extract the platform from a source string. Args: source (str, optional): source string that is usually contains the platform that is used to post the tweet. Defaults to '<PLT_1>'. Returns: str: the platform if found, otherwise the stamp PLT_1. This stamp is used for any further updates. """ platform = 'PLT_1' try: platform = re.sub('[<>]', '\t', source).split('\t')[2] platform = platform.replace('Twitter for','').replace('Twitter','') except: platform = 'PLT_1' return platform.strip()
c12c7fd02b53b24a70a6f5343f9bc031c5d0f513
43,510
def read_raw_message(raw_data: bytes) -> tuple: """Splits the message header from the data bytes of the message. :param raw_data: Full UDP packet :type raw_data: bytes :raises ValueError: When there header line is not present. :return: (Header, Data bytes) :rtype: Tuple """ for i in range(len(raw_data)): if raw_data[i] == ord("\n"): return raw_data[0:i].decode("utf-8"), raw_data[i + 1 :] raise ValueError("Unable to find the end of line")
e69dfb570b524a2b3f2c83ae7abf59aafb616f2d
43,511
def convert_snake_case(target): """ 文字列をスネークケースに変換する。 :param target: 変換対象の文字列 :type target: str :return: 変換後の文字列 :rtype: str """ # 文字列をスネークケースに変換する return target.lower().replace(' ', '_')
a5116f16ff08545f2ce6654c6e3e5b8534168085
43,514
def is_feature_extractor_model(model_config): """ If the model is a feature extractor model: - evaluation model is on - trunk is frozen - number of features specified for features extraction > 0 """ return ( model_config.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON and ( model_config.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_ONLY or model_config.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_AND_HEAD ) and len(model_config.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP) > 0 )
53c9b398efb09099bbe8257f875720a2ec41a495
43,515
def create_friedman_line(point0,point1): """ Determines the second point needed to form the Friedman line :param point0: First point on glenoid line, anatomically defined as a point on the anterior margin of glenoid :param point1: Second point on glenoid line anatomically defined as a point on the posterior margin of glenoid :raises: Value Error if the z values of point0 and point1 are not equal :returns: The midpoint of the glenoid line, which is the second point of the Friedman line """ if point0[2] != point1[2]: raise ValueError("For Friedman method points must have equal z values") midpoint_x = (point0[0] + point1[0])/2 midpoint_y = (point0[1] + point1[1])/2 midpoint = [midpoint_x, midpoint_y,point0[2]] return midpoint
64c9f74985651d23cfabd6a68089a18d89f6ecfe
43,523
def int_to_signed_byte(x): """ Converts the signed integer to a 2s-complement byte. """ if x > 0: return x & 0xff elif x < 0: return ((-x) ^ 0xff) + 1 else: return 0
9b7cf347c48558ffcfe31ceba9884e9a703ccce2
43,524
import click def d_reduce_options(f): """Create common options for dimensionality reduction""" f = click.option('--axes', nargs=2, type=click.Tuple([int, int]), help='Plot the projection along which projection axes.', default=[0, 1])(f) f = click.option('--dimension', '-d', help='Number of the dimensions to keep in the output XYZ file.', default=10)(f) f = click.option('--scale/--no-scale', help='Standard scaling of the coordinates.', default=True)(f) return f
97577d5d52b777ea33d4d47a80cbacc0f394ad00
43,525
def normalize(D, value=1): """normalize. Normalize the coefficients to a maximum magnitude. Parameters ---------- D : dict or subclass of dict. value : float (optional, defaults to 1). Every coefficient value will be normalized such that the coefficient with the maximum magnitude will be +/- 1. Return ------ res : same as type(D). ``D`` but with coefficients that are normalized to be within +/- value. Examples -------- >>> from qubovert.utils import DictArithmetic, normalize >>> d = {(0, 1): 1, (1, 2, 'x'): 4} >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): 1} >>> from qubovert.utils import DictArithmetic, normalize >>> d = {(0, 1): 1, (1, 2, 'x'): -4} >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): -1} >>> from qubovert import PUBO >>> d = PUBO({(0, 1): 1, (1, 2, 'x'): 4}) >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): 1} >>> from qubovert.utils import PUBO >>> d = PUBO({(0, 1): 1, (1, 2, 'x'): -4}) >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): -1} """ res = type(D)() mult = value / max(abs(v) for v in D.values()) for k, v in D.items(): res[k] = mult * v return res
d8dcce6a35254790e82f948608a3f848f1a80286
43,527
def comment_lines_ocaml(text, start='(* ', end=' *)'): """ Build an OCaml comment line from input text. Parameters ---------- text : str Text to comment out. start : str Character to open the multi-line comment. end : str Character to close the multi-line comment. """ return start + ('\n').join(text.split('\n')) + end
13cd5de355bfbb8b9861016c1d95541d6aa0dbe2
43,530
def calculate_delta_time_series(times, valid_dt): """ calculate_delta_time_series This function calculates the differences between all the elements of a timeseries and compares the differences with a valid time difference. True is returned if all the differences are valid; i.e. equal to the valid time difference argument False is returned if any of the difference fail to match the valid the time difference argument :param times: List of times :param valid_dt: Valid time difference, usually scalar, list of valid times supported :return: boolean [True for success] """ return_msg = "" for t in range(len(times)-1): t_diff = times[t+1] - times[t] if t_diff not in valid_dt: return_msg = "Time difference {} is irregular or not in allowed values {}".format(t_diff, valid_dt) return False, return_msg return True, return_msg
c1f42e185b6b2bb9c71942222998e46758db7ff1
43,532
from typing import Dict from typing import Callable def dict_subset(adict: Dict, predicate: Callable): """Return a dict that is a subset of ``adict`` using a filter function. The signature of the filter is: ``predicate(key, val) -> bool`` """ return {k: v for k, v in adict.items() if predicate(k, v)}
2ee1b2b2a20665222440cc43fdbb99a5151386cb
43,534
from typing import List def asciiRowToStrings(filename: str) -> List[str]: """Extract and returns a list of strings from the first row of the file. Args: filename Returns: list of string """ f = open(filename, "r") line = f.readline() f.close() if "," in line: line = line.replace(",", " ") strings = line.split() return strings
8e9c5d181e542fd2c80186394ea98c50ad1fbd44
43,536
def calculate_boundbox(list_coordinates): """ coordinates are inverted x: colums y :rows list_coordinates: list of the form [ [x2,y1], [x2,y2], . . . ] returns top left point (x,y) and width (w) and heigth (h) of rectangle """ x = int(min(list_coordinates[:,0])) y = int(min(list_coordinates[:,1])) w =int(max(list_coordinates[:,0]) - x) h = int(max(list_coordinates[:,1]) - y) return x,y,w,h
bbabdd81e88dd4304293712aff389bfd568f4772
43,538
def _get_time_signature(e): """ Get the time signature and return Returns ------- tuple (mumber, number) OR None the tuple is (beats, beat_type). `beats` is the numerator, `beat_type` is the denominator of the key signature fraction. """ if e.find('time/beats') is not None and e.find('time/beat-type') is not None: beats = int(e.find('time/beats').text) beat_type = int(e.find('time/beat-type').text) return (beats, beat_type) else: return None
bf1ba2b885ed55c7793fa3a355eadbad1d287987
43,553
def is_observed_custom_module(module): """ Check if a module is marked as observed custom module or not """ return hasattr(module, '_is_observed_custom_module') and \ module._is_observed_custom_module
b5455ba014d397849bbae637e9367dda1d53c94a
43,555
def _temp_pad(F, x, padding=1, zeros=True): """ Pads a 3D input along temporal axis by repeating edges or zeros Args: x: dim 5 b,t,c,w,h padding: the number of dim to add on each side zeros: pad with zeros? Returns: padded x """ first = x.slice_axis(axis=1, begin=0, end=1) # symbol compatible indexing last = x.slice_axis(axis=1, begin=-1, end=None) if zeros: first = first * 0 last = last * 0 if padding > 1: first = first.repeat(repeats=padding, axis=1) last = last.repeat(repeats=padding, axis=1) x = F.concat(first, x, dim=1) x = F.concat(x, last, dim=1) return x
192c11c9aa694755f309237294ad55397fb24a34
43,557
def infinite(smaj, smin, bpa): """ If the beam is not correctly fitted by AWimager, one or more parameters will be recorded as infinite. :param smaj: Semi-major axis (arbitrary units) :param smin: Semi-minor axis :param bpa: Postion angle """ return smaj == float('inf') or smin == float('inf') or bpa == float('inf')
87a67ed9b880287def798fdfddd5c42c483e3b7d
43,559
def clip_paths(paths, bounds): """Return the paths that overlap the bounds.""" return [path for path in paths if path.bounds().overlap(bounds)]
c19d099673e1270fe10a9daf1b60e1da240e9660
43,562
def zerocase(case): """Check if the binary string is all zeroes""" if int(case, 2) == 0: return True else: return False
4227ff69e8ccd4250f519de4d09d498dfc13289e
43,566
def parts_to_uri(base_uri, uri_parts): """ Converts uri parts to valid uri. Example: /memebers, ['profile', 'view'] => /memembers/profile/view """ uri = "/".join(map(lambda x: str(x).rstrip('/'), [base_uri] + uri_parts)) return uri
7fd45dba18152aed9dd18d6b2757c4f449d10930
43,569
def create_ids(data): """Generate IDs for the species in the dataset, by combining: - original species name - trait - reference - latitude - longitude""" ids = {} for row in data: species = row[0] reference = row[2] trait = row[3] lat = row[7] lon = row[8] ids[species + reference + trait + str(lat) + str(lon)] = [species, reference, trait, lat, lon] return ids
3488e65358278c1cb97162bf9f8a4a8b06e54223
43,571
from functools import reduce def mac_aton(s): """Convert a Mac address to an integer.""" try: mac = list(map(lambda x: int(x, 16), s.split(':'))) mac = reduce(lambda a,b: a+b, [mac[i] << (5-i)*8 for i in range(6)]) except (ValueError, IndexError): raise ValueError('illegal Mac: {0}'.format(s)) return mac
dd29c6e99998bfd0676ae9e7200f02500c2eed0d
43,575
from typing import Optional from pathlib import Path def get_resource(filename: str, path: Optional[str] = None) -> str: """A utility method to get the absolute path to a resource in the test suite. Args: filename: the name of the file to get. path: an optional path relative to the root of the test suite. Returns: The absolute path of the file. """ root = Path(__file__).parent full_path = root if path is None else root / Path(path) return str(full_path / filename)
d2341484fb46dc4da2d00b9b62092d0f5b52339e
43,579
def check_input(prompt, assertion=None, default=None): """Get input from cmdline, ensuring that it passes the given assertion. assertion: a function that if given a value will return None if the check should pass, otherwise returning a helpful error message as a string.""" if default is not None: prompt += " [default=%s]: " % str(default) while True: value = input(prompt).strip() if value == "" and default is not None: value = default if assertion is not None: check = assertion(value) if check is not None: error_msg = '\tInvalid input' if not isinstance(check, bool): error_msg += ': ' + str(check) print(error_msg) continue return value
9cb72146a931974c186a8473a9f0b75e2f2515d6
43,583
import math def project_gdf(gdf, to_crs=None, to_latlong=False): """ Project a GeoDataFrame from its current CRS to another. If to_crs is None, project to the UTM CRS for the UTM zone in which the GeoDataFrame's centroid lies. Otherwise project to the CRS defined by to_crs. The simple UTM zone calculation in this function works well for most latitudes, but may not work for some extreme northern locations like Svalbard or far northern Norway. Parameters ---------- gdf : geopandas.GeoDataFrame the GeoDataFrame to be projected to_crs : dict or string or pyproj.CRS if None, project to UTM zone in which gdf's centroid lies, otherwise project to this CRS to_latlong : bool if True, project to settings.default_crs and ignore to_crs Returns ------- gdf_proj : geopandas.GeoDataFrame the projected GeoDataFrame """ if gdf.crs is None or len(gdf) < 1: raise ValueError("GeoDataFrame must have a valid CRS and cannot be empty") # if to_latlong is True, project the gdf to latlong if to_latlong: gdf_proj = gdf.to_crs({"init": "epsg:4326"}) # utils.log(f"Projected GeoDataFrame to {settings.default_crs}") # else if to_crs was passed-in, project gdf to this CRS elif to_crs is not None: gdf_proj = gdf.to_crs(to_crs) # utils.log(f"Projected GeoDataFrame to {to_crs}") # otherwise, automatically project the gdf to UTM else: # if CRS.from_user_input(gdf.crs).is_projected: # raise ValueError("Geometry must be unprojected to calculate UTM zone") # calculate longitude of centroid of union of all geometries in gdf avg_lng = gdf["geometry"].unary_union.centroid.x # calculate UTM zone from avg longitude to define CRS to project to utm_zone = int(math.floor((avg_lng + 180) / 6.0) + 1) utm_crs = ( f"+proj=utm +zone={utm_zone} +ellps=WGS84 +datum=WGS84 +units=m +no_defs" ) # project the GeoDataFrame to the UTM CRS gdf_proj = gdf.to_crs(utm_crs) # utils.log(f"Projected GeoDataFrame to {gdf_proj.crs}") return gdf_proj
d25ee19f834bb7befbee05a344c4581f872eb8dd
43,588
def list_experiment_names(exp_file_path): """Retrieve experiment names from the given file.""" def helper(): with open(exp_file_path, "r") as ef: for line in ef: exp_name = line.rstrip("\n") yield exp_name return [x for x in helper()]
143d958629c85926018f225d1b61427f7eceb30f
43,591