content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from datetime import datetime def sTimeUnitString( ismilli=False, abbr=True ): """OpendTect-like time stamp Parameters: * ismilli (bool, optional): Include millisecond (default is False) * abbr (bool, optional): Abbreviated (default is True) Returns: * str: Time stamp string formatted like done by OpendTect Examples: >>> sTimeUnitString() 'Mon 20 Apr 2020, 13:59:54' >>> sTimeUnitString( True ) 'Mon 20 Apr 2020, 13:59:54.001245' >>> sTimeUnitString( True, True ) 'Mon 20 Apr 2020, 13:59:54.001245' >>> sTimeUnitString( True, False ) 'Monday 20 April 2020, 13:59:54' """ if abbr: fmt = "%a %d %b" else: fmt = "%A %d %B" fmt += " %Y, %X" if ismilli: fmt += ".%f" return datetime.now().strftime(fmt)
2ba388ee895501490caecf37243f08bc8f4d2f24
43,596
def longest_line_length(file_name): """ This function takes a string file_name and returns the length of the longest line in the file. """ with open(file_name) as file: lines = file.readlines() length = 0 for line in lines: if len(line) > length: length = len(line) if length == 0: return None else: return length
ead6e352b5886191318df7308efff7099e874a68
43,604
from typing import Tuple from typing import List from typing import Set def _get_repeating_chars_count(box_id: str) -> Tuple[int, int]: """Returns a tuple (twice_repeated, thrice_repeated) twice_repeated, thrice_repeated - 1 - atleast 1 character is repeated twice exactly - 0 No character is repeated exactly twice in the id Arguments: box_id {String} -- Box id containing lowercase alphabets only Returns: [Tuple(int, int)] """ counting_bucket: List[int] = [0] * 26 char_code_start: int = ord("a") for letter in box_id: counting_bucket[(ord(letter) - char_code_start)] += 1 unique_char_counts: Set[int] = set(counting_bucket) return int(2 in unique_char_counts), int(3 in unique_char_counts)
af4ac5f5b972e69d591691cf3176cf48661a30c2
43,605
from typing import List from pathlib import Path def open_article(filename: str) -> List[str]: """Loads plain text article into memory. Args: filename (str): article filename Returns: str: raw article text """ input_path = Path("local", filename) with open(input_path, "r") as f: return f.readlines()
b064286cb097dd16fb9900278bdfd16e31d14b3b
43,615
def has_data_been_validated(flags): """Return True (or a boolean series) if flags has been validated""" return flags > 1
987f0c7dcd7d23d67075863642b2a7cbe314d6ac
43,616
import requests def get_job_id( domain, org_key, headers, hostname="*", process="*", window="10h", start="0", end="0", ): """ Function takes in the domain, org_key, headers, hostname, and timeframe to generate the initial query an retrieve the job id of that query returns job_id """ url = f"{domain}/api/investigate/v2/orgs/{org_key}/processes/search_jobs" if start != "0": time_range = {"end": end, "start": start} else: time_range = {"window": "-" + window} if hostname == "*": query_payload = { "query": "process_name:" + process, "fields": [ "device_name", "process_start_time", "process_cmdline", "process_name", "process_pid", "parent_pid", ], "sort": [{"field": "device_timestamp", "order": "asc"}], "start": 0, "rows": 10000, "time_range": time_range, } else: query_payload = { "criteria": {"device_name": [hostname]}, "query": "process_name:" + process, "fields": [ "device_name", "process_start_time", "process_cmdline", "process_name", "process_pid", "parent_pid", ], "sort": [{"field": "device_timestamp", "order": "asc"}], "start": 0, "rows": 10000, "time_range": time_range, } print("") response = requests.post(url, headers=headers, json=query_payload).json() job_id = response.get("job_id") print("Query sent to Carbon Black Cloud") return job_id
e004ec3b9be5cfb45b8b7cce5c6ed38e68f77928
43,618
def _match_specie_group(file_metadata): """ Classifies the virus taxonomy group from NCBI based structure to broadly used Baltimore Classification Based on https://en.wikipedia.org/wiki/Virus#Classification Baltimore Classification: I: dsDNA viruses (e.g. Adenoviruses, Herpesviruses, Poxviruses) II: ssDNA viruses (+ strand or "sense") DNA (e.g. Parvoviruses) III: dsRNA viruses (e.g. Reoviruses) IV: (+)ssRNA viruses (+ strand or sense) RNA (e.g. Picornaviruses, Togaviruses) V: (−)ssRNA viruses (− strand or antisense) RNA (e.g. Orthomyxoviruses, Rhabdoviruses) VI: ssRNA-RT viruses (+ strand or sense) RNA with DNA intermediate in life-cycle (e.g. Retroviruses) VII: dsDNA-RT viruses DNA with RNA intermediate in life-cycle (e.g. Hepadnaviruses) """ # NCBI based taxonomy # https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=10239&lvl=3&lin=f&keep=1&srchmode=1&unlock groups_patterns = [ '; dsDNA viruses, no RNA stage; ', '; ssDNA viruses; ', '; dsRNA viruses; ', '; ssRNA positive-strand viruses, no DNA stage; ', '; ssRNA negative-strand viruses; ', # '; ', # no clear match with VI from taxonomy # '; ' # no clear match with VII ] groups = [ 'dsDNA', 'ssDNA', 'dsRNA', '(+)ssRNA', '(-)ssRNA', 'ssRNA-RT', 'dsDNA-RT' ] for pattern in groups_patterns: if pattern in file_metadata: return groups[groups_patterns.index(pattern)] return 'unclassified'
36273c7678321acbea64dbd710fb2fbd40729a0a
43,619
def user_input_coords(string): """Converts user coordinate input from string to list""" while True: # makes sure the input has only two integer coordinates inp = input(string) if len([i for i in inp.split() if i.isdigit()]) == 2 and len(inp.split()) == 2: return list(map(int, inp.split()))
c4f4e617777c7076e14fcba3fd84251b8f46edf9
43,622
def softmax_bias(p, slope, bias): """ Symmetric softmax with bias. Only works for binary. Works elementwise. Cannot use too small or large bias (roughly < 1e-3 or > 1 - 1e-3) :param p: between 0 and 1. :param slope: arbitary real value. 1 gives identity mapping, 0 always 0.5. :param bias: between 1e-3 and 1 - 1e-3. Giving p=bias returns 0.5. :return: transformed probability. :type p: torch.FloatTensor :type slope: torch.FloatTensor :type bias: torch.FloatTensor :rtype: torch.FloatTensor """ k = (1. - bias) ** slope k = k / (bias ** slope + k) q = k * p ** slope q = q / (q + (1. - k) * (1. - p) ** slope) return q # k = -torch.log(tensor(2.)) / torch.log(tensor(bias)) # q = (p ** k ** slope) # return q / (q + (1. - p ** k) ** slope)
fe13cacabe721710c4108bc1322b90d2e971ca21
43,623
def index_to_angle(i): """ Takes an index into a LIDAR scan array and returns the associated angle, in degrees. """ return -135.0 + (i / 1081.0) * 0.25
63f99389ef532a662d5ea3a3a173a1ba8dd9df09
43,626
import binascii def hex_to_bytes(value: str) -> list: """ Parameters ---------- value : str hex encoded string Returns : list ------- bytes array of hex value """ return list(binascii.unhexlify(value))
d2ad089185accf72451a89bcd30cd0ddf90f5a49
43,633
import struct def readShort(f): """Read unsigned 2 byte value from a file f.""" (retVal,) = struct.unpack("H", f.read(2)) return retVal
ce730430018b9589670e411c4186139fc2f0345d
43,648
from typing import Union from typing import List from typing import Dict from typing import OrderedDict def sort_callbacks_by_order( callbacks: Union[List, Dict, OrderedDict] ) -> OrderedDict: """Creates an sequence of callbacks and sort them. Args: callbacks: either list of callbacks or ordered dict Returns: sequence of callbacks sorted by ``callback order`` Raises: TypeError: if `callbacks` is out of `None`, `dict`, `OrderedDict`, `list` """ if callbacks is None: output = OrderedDict() elif isinstance(callbacks, (dict, OrderedDict)): output = [(k, v) for k, v in callbacks.items()] output = sorted(output, key=lambda x: x[1].order) output = OrderedDict(output) elif isinstance(callbacks, list): output = sorted(callbacks, key=lambda x: x.order) output = OrderedDict([(i, value) for i, value in enumerate(output)]) else: raise TypeError( f"Callbacks must be either Dict/OrderedDict or list, " f"got {type(callbacks)}" ) return output
bba43ab6292e1132f8447e79403d9d730831e3de
43,649
def sign(value): """ Returns an integer that indicates the sign of a number. Parameters: value (int): The number to get the sign of Returns: -1 if the number is negative, 0 if the number is 0, or 1 if the number is positive. """ if value < 0: return -1 elif value == 0: return 0 else: return 1
71997a571fcdbadf45fa1dd3b8d2b6c54eafdd61
43,652
import socket import struct def build_ip_header(src_ip, dst_ip): """Builds a valid IP header and returns it Parameters: - src_ip: A string with a valid IP address which will be used as SRC IP - dst_ip: A string with a valid IP address where the packets will be sent to 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |Version| IHL |Type of Service| Total Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Identification |Flags| Fragment Offset | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Time to Live | Protocol | Header Checksum | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Source Address | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Destination Address | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Options | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ """ ihl = 5 version = 4 ip_ihl_ver = (version << 4) | ihl ip_tos = 0 ip_tot_len = 0 # kernel will fill the correct total length ip_id = 0xbeef # Id of this packet ip_frag_off = 0 ip_ttl = 255 ip_proto = socket.IPPROTO_UDP ip_check = 0 # kernel will fill the correct checksum ip_saddr = socket.inet_aton (src_ip) # Spoof the src IP if you want to ip_daddr = socket.inet_aton (dst_ip) # the ! in the pack format string means network order # see http://docs.python.org/2/library/struct.html#format-characters ip_header = struct.pack('!BBHHHBBH4s4s' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off, ip_ttl, ip_proto, ip_check, ip_saddr, ip_daddr) return ip_header
8f949e70ef55e18c7d2adaa812a8ed45fc86f358
43,653
def repr_object(o): """ Represent an object for testing purposes. Parameters ---------- o Object to represent. Returns ------- result : str The representation. """ if isinstance(o, (str, bytes, int, float, type)) or o is None: return repr(o) return "!" + str(type(o))
7ae24f46d626b7673af6389574b55583ec70971e
43,655
def ifb(bites): """ ifb is a wrapper for int.from_bytes """ return int.from_bytes(bites, byteorder="big")
ae02449bd48ebe69e6fcbc2714da90dc15a24d40
43,659
def build_container_sas_uri(storage_acc_url: str, container: str, sas: str) -> str: """ Create a container SAS URL in the format of: {account-url}/{container}?{SAS} Note that this method is not responsible for the generation of the SAS token. :param storage_acc_url: Base URL to the storage account :param container: Name of the container in the storage account :param sas: Generated SAS token """ return "{}/{}?{}".format(storage_acc_url, container, sas)
8ab30acf0b792324c5e170c8b3eadd657494b5c6
43,662
def _to_signed32(n): """Converts an integer to signed 32-bit format.""" n = n & 0xffffffff return (n ^ 0x80000000) - 0x80000000
b5562063cc0467222f3d972eca9305dddbe4e05e
43,664
from typing import Tuple import gc import torch def find_tensor_by_shape(target_shape: Tuple, only_param: bool = True) -> bool: """Find a tensor from the heap Args: target_shape (tuple): Tensor shape to locate. only_param (bool): Only match Parameter type (e.g. for weights). Returns: (bool): Return True if found. """ for obj in gc.get_objects(): try: # Only need to check parameter type objects if asked. if only_param and "torch.nn.parameter.Parameter" not in str(type(obj)): continue if torch.is_tensor(obj) or (hasattr(obj, "data") and torch.is_tensor(obj.data)): if obj.shape == target_shape: return True except Exception as e: pass return False
606e0223ea4014d8b1f7d5c6bba16ec64a1fc1ea
43,668
def get_photon_energy(wavelengths): """ computes the energy of the photon of a given wavelength :param wavelengths: [m] :return: J = W*s """ plank_constant = 6.62606957 * 10**-34 # J*s speed_of_light = 299792458 # m*s^-1 nu = speed_of_light / wavelengths # s^-1 E = plank_constant * nu # J = W*s return E
4c6985c90465cbcd79f204219762f13bc4a71203
43,669
def ALL_ELEMENTS_TRUE(*expressions): """ Evaluates an array as a set and returns true if no element in the array is false. Otherwise, returns false. An empty array returns true. https://docs.mongodb.com/manual/reference/operator/aggregation/allElementsTrue/ for more details :param expressions: The arrays (expressions) :return: Aggregation operator """ return {'$allElementsTrue': list(expressions)}
33c343c42bc8bcdf9dfdddf643c481fd85f4a784
43,670
def cmpTup2(tupA, tupB): """A comparator function that compares two tuples on the basis of the value of their second element.""" if (tupA[1] < tupB[1]): return -1 elif (tupA[1] == tupB[1]): return 0 else: return 1
b41dcdd85711027116b503d2a68b2a97c16013b2
43,677
def sample_trpo_params(trial): """ Sampler for TRPO hyperparams. :param trial: (optuna.trial) :return: (dict) """ gamma = trial.suggest_categorical('gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999]) timesteps_per_batch = trial.suggest_categorical('timesteps_per_batch', [16, 32, 64, 128, 256, 512, 1024, 2048]) max_kl = trial.suggest_loguniform('max_kl', 0.00000001, 1) ent_coef = trial.suggest_loguniform('ent_coef', 0.00000001, 0.1) lam = trial.suggest_categorical('lamdba', [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0]) cg_damping = trial.suggest_loguniform('cg_damping', 1e-5, 1) cg_iters = trial.suggest_categorical('cg_iters', [1, 10, 20, 30, 50]) vf_stepsize = trial.suggest_loguniform('vf_stepsize', 1e-5, 1) vf_iters = trial.suggest_categorical('vf_iters', [1, 3, 5, 10, 20]) return { 'gamma': gamma, 'timesteps_per_batch': timesteps_per_batch, 'max_kl': max_kl, 'entcoeff': ent_coef, 'lam': lam, 'cg_damping': cg_damping, 'cg_iters': cg_iters, 'vf_stepsize': vf_stepsize, 'vf_iters': vf_iters }
d3768c949b1bd9dd36131dc418d640b1ca545683
43,679
def check_convergence(new_measure, old_measure, direction, threshold): """Check if the performance meets the given threshold Args: new_measure (float): New performance old_measure (float): Old performance direction (str): String to indicate how to compare two measures threshold (float): The given threshold Returns: True if the new measure satisfies threshold, False otherwise """ sign = 1.0 if direction == 'higher' else -1.0 if sign * (new_measure - old_measure) / old_measure < threshold: return True else: return False
fca1c9deb85c27f36c9e50b9ee9839121778d074
43,680
def get_total_project_memberships(project): """Return tha total of memberships of a project (members and unaccepted invitations). :param project: A project object. :return: a number. """ return project.memberships.count()
bd7fc14f45279ec6d13d35e2d41a12a3bbd3ab3e
43,682
import re def _parse_human_filesize(m): """Parses human readable file sizes, such as "1240", "200k", "30m", and returns them as int. Raises ValueError if the value cannot be parsed.""" try: return int(m) except ValueError as e: match = re.match("^(\\d+)([kmgtp])$", m) if match: digits = match[1] suffix = match[2] multiplier = 1 for letter in ["k", "m", "g", "t", "p"]: multiplier *= 1024 if suffix == letter: return multiplier * int(digits) raise e
315166aff676dac777269201820714fc8f8e52f7
43,683
import torch def log_prob(p, c=1e-5): """ Truncated log_prob for numerical stability. """ return torch.log((1.0 - c) * p + c / 2)
b13db9f6377386fff31c69dbc55a84b07d1305fb
43,684
def get_study(assc, size=5): """ Return most annotated genes from association dict """ most_annotated = sorted(assc.keys(), key=lambda i: len(assc[i]), reverse=True) study = most_annotated[:size] study = frozenset(study) print(f"### Using the {size} most annotated genes as study: {','.join(study)} ") return study
0e37c6220c0d5e5db0904a673ffe6b4d5a42fd5c
43,693
import string def to_valid_filename(s): """Converts arbitrary string (for us domain name) into a valid file name for caching """ valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) return ''.join(c for c in s if c in valid_chars)
cd327e2f4b6c507b958c65b66dd412c0720a281b
43,699
def get_symbol_idx(symbol_vector, name): """ Find the index of a casadi symbol by its name in a vertcat (vertical concatenation) of symbols. If the symbol is a vector instead of a scalar, this method returns the index range of the symbol. """ v_len = symbol_vector.size()[0] slice_start = 0 for i in range(v_len): info = symbol_vector[slice_start:i + 1].info() if "slice" not in info: if symbol_vector[slice_start:i + 1].name() == name: return (slice_start, i + 1) if i != slice_start else i else: slice_start = i + 1 return None
8f4d7c88b8922c3c12efed05540a80a6dc4123ec
43,705
def most_digits(arr): """ Returns the maximum length of any of the numbers in the list """ length = 0 for num in arr: num_length = len(f"{num}") if num_length > length: length = num_length return length
61383769f3b788c6498b9597589a32364401597c
43,711
def _makeMsgRandomFunc(choices): """Function factory given `choices`""" async def func(cli, dest, user=None): cli.msgrandom(cli, choices, dest, user) return func
9a2eca62bcaad93dc4b6be9f4a14d5d53f88161d
43,714
import glob def get_files(directory, extension): """ Take a directory and an extension and return the files that match the extension """ return glob.glob('%s/*.%s' % (directory, extension))
34ce85197f49d39c9a18c15826bbfd0f70fdf0c0
43,716
def check_uniqueness(digits: list) -> bool: """ Checks if elements in the lists are different. >>> check_uniqueness([1, 2, 3]) True >>> check_uniqueness([2, 4, 4]) False """ unique = set(digits) if len(unique) == len(digits): return True return False
1a03da7a01113f39b91650db757c8f6378f5ad9a
43,721
def mapped_actors(data): """ Creates a mapping of actors to whom they have acted with. Returns a dictionary of form {actor_id_1: {actor_id_2}} """ d = {} # Map actor_1 to actor_2 and actor_2 to actor_1 for i in data: if i[0] not in d: d[i[0]] = set() d[i[0]].add(i[1]) if i[1] not in d: d[i[1]] = set() d[i[1]].add(i[0]) return d
8ded3f4a307c5ca156191f507c7449a24b4c75b6
43,723
def make_roi_header(**param): """ Format header data to be written when saving ROI data. Args: method (string): integration method. param (dict): integration parameters. Returns: hdr_list (string): header data. """ hdr_list = ['== Integration ROI =='] method = [i for i in param.keys() if "pos" in i][0].split('_pos')[0] hdr_list.append('Integration method: {}'.format(method)) for k, v in param.items(): hdr_list.append('{}: {}'.format(k, v)) header = "\n".join(['# ' + i for i in hdr_list]) return header
ba3ad2218cab4144e790b539fc7208bf065ce53c
43,727
def remove_es_keys(hit): """ Removes ES keys from a hit object in-place Args: hit (dict): Elasticsearch hit object Returns: dict: modified Elasticsearch hit object """ del hit['_id'] if '_type' in hit: del hit['_type'] return hit
ad41161cfa999f7fd313f7f4be7d1b7dcb770796
43,734
import ast def is_w_mode_open_call(node): """Return True if node represents `rasterio.open(path, "w", ...)`""" return isinstance( node.func, ast.Attribute ) and node.func.attr == "open" and isinstance(node.func.value, ast.Name) and node.func.value.id == "rasterio" and len( node.args ) > 1 and isinstance(node.args[1], ast.Str) and node.args[ 1 ].s == "w"
7be9af4c6c306e33c35b7ac38138ce4106396893
43,738
def make_edge(v1, v2): """ We store edges as tuple where the vertex indices are sorted (so the edge going from v1 to v2 and v2 to v1 is the same) """ return tuple(sorted((v1, v2)))
131260b83de9ed61f1175df72ca46c2e8f12ebfd
43,745
def text_to_bytes(text, encoding='UTF-8', size=None): """ Encode some text or string to a byte array :param text: text to encode to bytes :param encoding: optional encoding of the passed string. default to utf-8. :param size: optional, if given the text will be padded with 0x00 to the right size :return: a bytes object """ res = str(text).encode(encoding) if size: res = res.rjust(size, b'\x00') return res
b8b4b1f2cf0333633aa164c7a6894431feb11cca
43,748
def _uint32(x): """Transform x's type to uint32.""" return x & 0xFFFFFFFF
8b7547738f69b7aa39b40e85852ebbf03cc6fbfa
43,749
def form_gains(epsilon): """ Turn reflection coefficients into gains. Reflection gains are formed via g = 1 + eps where eps is the reflection coefficient eps = A exp(2j * pi * tau * freqs + 1j * phs) Args: epsilon : dictionary, ant-pol keys and ndarray values Returns: gains : dictionary, ant-pol keys and ndarray values """ return dict([(k, 1 + epsilon[k]) for k in epsilon.keys()])
443b7058494a195e79b1d6cc5596f3d49d84cc1b
43,750
def get_audio_config(features_list): """ Converts a list of features into a dictionary understandable by `data_extractor.AudioExtractor` class """ audio_config = {'mfcc': False, 'chroma': False, 'mel': False, 'contrast': False, 'tonnetz': False} for feature in features_list: if feature not in audio_config: raise TypeError(f"Feature passed: {feature} is not recognized.") audio_config[feature] = True return audio_config
737b81a3e37417deb93183ac3cb670f28238db39
43,751
def compatibility(i, j, i_n, j_n): """ Defines the compatibility function """ distance = ((i - i_n)**2.0 + (j - j_n)**2.0)**0.5 return 1 if distance > 0 else 0 # return distance > 0
8fe28d8b0451d245d039178fbf23f8f452fa9747
43,752
def grep_PAW_order(fpot): """ helper-function to extract order of PAW potential """ with open(fpot, 'r') as pfile: lines = pfile.readlines() order = [] for l in range(len(lines)): if len(lines[l].split()) > 0 and lines[l].split()[0] == 'PAW_PBE': order.append(lines[l].split()[1]) return(order)
c0ed123f408fd3ae9871d9124ddde35cd284cd62
43,756
import pickle def load_model(file_path): """ load data from .pkl file from disk assuming that data is a list containing [x_train, x_validation, y_train, y_validation, actual_labels, predicted_labels] :param file_path: file to be loaded with its full path :return: model data """ with open(file_path, "br") as load: data = pickle.load(load) return data
8966fdc7d0d728f5df66076732b77e77f7e519c4
43,760
def PNT2Tidal_Pv12(XA): """ TaylorT2 1PN Quadrupolar Tidal Coefficient, v^12 Phasing Term. XA = mass fraction of object """ XATo2nd = XA*XA XATo3rd = XATo2nd*XA return (15895)/(56)-(4595*XA)/(56) - (5715*XATo2nd)/(28)+(325*XATo3rd)/(14)
2fd86b73db292798a8a552a50a261b45e4d8cbe2
43,768
def longest_repeated_substring(word): """ Finds the longest repeated sub-sequence of characters in a string :param word: string - the word to search in :return: string - the longest repeated substring. If there are two or more substrings tied for the longest - the one with the first instance in word will be returned. """ # Since we are looking for the longest - start searching at the maximum possible length length = len(word) // 2 while length > 0: index = 0 # Go through the possible starting indexes and check whether the string appears later in word. while index + length < len(word): if word[index: index + length] in word[index + length:]: return word[index: index + length] index = index + 1 length = length - 1 # If we didn't find any repeated substring - even of length 1, return an empty string return ""
4f14392cb1f157364c6309e055502acfb4e0b4f5
43,770
def is_volume_attached(volume): """ This function checks if a volume is attached and returns the answer to taht question as a boolean """ if not volume.get('Attachments'): return False attached_states = {'attached', 'attaching'} for attachment in volume['Attachments']: if attachment.get('State') in attached_states: return True # The volume is not attached since the above return # did not exit the function return False
4d6715ee93025b895e4df29457f9d556882c713a
43,771
def _get_normalized_tde_config(tde_config): """Normalize the TDE configuration of a SQL database. Arguments: tde_config (dict): Raw TDE configuration of a SQL database Returns: dict: Normalized TDE configuration """ tde_info = {} tde_enabled = False tde_status = tde_config.get('status') if tde_status == 'Enabled': tde_enabled = True tde_info['tde_enabled'] = tde_enabled return tde_info
cdd425ed32b7a16ccf7443f41351a3303614811d
43,772
def is_all_o(values): """check if all the given values are O""" for val in values: if val != "O": return False return True
8b92c8a4894a5dd0acb01f903808b7dd286638b4
43,774
def get_markdown_title_id(section_title): """Returns the HTML equivalent id from a section title Arguments: section_title -- Section title """ return section_title.replace(" ", "_").lower()
54c27cefc5db7685d0f173bd756026764dea7e4a
43,776
def ntimes(string, char): """ Return number of times character 'char' occurs in string """ return string.count(char)
c24d0388a159d12f61e0d21302fff35d32357f78
43,777
def create_episode(conn, episode): """ Create a new episode into the episodes table :param conn: :param episode: :return: episode id """ sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path) VALUES(?,?,?,?,?)''' cur = conn.cursor() cur.execute(sql, episode) return cur.lastrowid
8b83bfc85d29c938316ffaa1e1f1b7534784c773
43,779
def get_dict_key(dic, n=0): """ Return the first (or nth) key name from a dict """ return list(dic.keys())[n]
3e2e7bc4eb2264247883cd567cecb67a978096c5
43,787
import re def escape_path(key): """ Convert a key to a filename by escaping invalid characters. """ return re.sub(r"[ \\/]+", "_", key)
4c63d436f669c8ecac2c6fba6e58c3d5be0b840f
43,790
import torch def _independent(distribution): """ Make predictive distribution for test set independent. Parameters ---------- distribution : `torch.distribution.Distribution` Input distribution. Returns ------- distribution : `torch.distribution.Distribution` Output distribution. """ return torch.distributions.normal.Normal( loc=distribution.mean.flatten(), scale=distribution.variance.pow(0.5).flatten(), )
6443904e46f6598dd8765708c37252e0cc6997b0
43,794
def caseInsensitiveStringMatch(string1, string2): """assumes string1 and string 2 are strings returns a boolean, True if string1 and string2 match irrespective of case, else False """ return string1.lower() == string2.lower()
3dfcb39f1fa9ae9d05a46f1f740af7a0e7f4bc84
43,795
def dmask(d, ks): """Copy dictionary ``d`` and remove key list ``ks``.""" d = d.copy() for k in ks: if k in d: del d[k] return d
d9747a830f7c3e1e13b7e224fdf7caf72d0ea909
43,797
import re def get_link_destinations(chunk): """Find any target of a link in HTML code Use regex to find tags with the id or name attribute, which makes them a possible target of a link :param str chunk: text string :return: destinations, destination_tags :rtype: Tuple[list[str], list[str]] """ destinations, destination_tags = [], [] # html links. label{} has already been converted pattern_tag = r'[\w _\-:]' pattern_backslash = '[\\\]' pattern = r'<' + pattern_tag + \ '+ (id|name)=' + pattern_backslash + '["\']' + \ '(' + pattern_tag + '+)' + pattern_backslash + '["\'][^>]*>' for m in re.finditer(pattern, chunk): match = m.group() tag = m.group(2) destinations.append(match) destination_tags.append(tag) return destinations, destination_tags
c3e2e48355aa2da147162b5cd5c97c800aea0b7d
43,800
def estimated_sp(vests): """Convert VESTS to SP units for display.""" return vests * 0.0005034
771dddf5b7c3373c8ca82c48a8889c4b3e1f39ae
43,809
def _get_top_values_categorical(series, num_x): """Get the most frequent values in a pandas Series. Will exclude null values. Args: column (pd.Series): data to use find most frequent values num_x (int): the number of top values to retrieve Returns: top_list (list(dict)): a list of dictionary with keys `value` and `count`. Output is sorted in descending order based on the value counts. """ frequencies = series.value_counts(dropna=True) df = frequencies.head(num_x).reset_index() df.columns = ["value", "count"] df = df.sort_values(["count", "value"], ascending=[False, True]) value_counts = list(df.to_dict(orient="index").values()) return value_counts
d3ea35ba6ee60536a56bbfc07cfae3633f26b138
43,810
import math def cosine_rule(v_original: float, v_target: float, angle_dif: int) -> float: """Apply the cosign rule to compute the Delta-V needed to transfer from one velocity to another with a difference in angle. Args: v_original: the original velocity. v_target: the target velocity. angle_dif: the angle at which the 2 velocities differ in degrees. Returns: the length of the velocity vector connecting the 2 ends of v_original and v_target.""" return math.sqrt(((v_original ** 2) + (v_target ** 2)) - (2 * v_original * v_target * math.cos(math.radians(angle_dif))))
3d0274e4ae98ff076c75341f709c785cb6430007
43,811
import re def as_fuse_id(shader_name,shader_id): """ Derive an identifier from shader_name. Remove whitespace, leading digits, special characters, etc to make something that can be used as an identifier out of `shader_name`. Such an identifier is in particular what's needed as the first parametert to `FuRegisterClass()`. """ name = shader_name # Example: "Fork Who cares? nmbr73 321" -> "Who cares" name = re.sub(r'^Fork (.+) ([^ ]+) \d+$',r'\1',name) # Replace all invalid characters with a ' ' name = re.sub(r'[^A-Za-z0-9 ]+',' ', name) # Put 'D' in front if first character is a digit name = re.sub(r'^(\d.*)$',r'D\1', name) # Transform leading characters to upper case name = name.title() # Eliminate all spaces name = ''.join(x for x in name if not x.isspace()) return name
78be0775207147b7de65138596338459c0f6b537
43,814
def next_frame_prediction(generator, input_tensor): """Just one forward pass through the generator""" output_tensor = generator.inference(input_tensor, None, None) return output_tensor
580af372c9b73306b1158327cde874c3d34649d4
43,818
def mirror_path(fn): """Simply replicate the source path in the build directory""" return fn
c4447ca82cd7d14fb9771da6e6340b46a622dbdc
43,824
def census_count( df, group2=None, weight="person_weight", normalize=False, total=None, drop=True ): """ Counts the total number of the given data in each PUMA / Calculates percentage of the given data in each PUMA Parameters ---------- df : DataFrame/GeoDataFrame group2 : str, optional a string keyword that specifies if there is a second column to group by in the groupby function weight : str, default = 'person_weight' change to 'house_weight' for household census data normalize : bool, optional a boolean keyword that specifies whether to divide by the total to calculate the percentage. If normalize = True, need keyword total. total : series, optional a series to divide the count by to return a percentage. drop : bool, optional a boolean keyword that specifies whether to drop the index when resetting Returns -------- census_count : series or dataframe """ # Two columns to groupby # Returns pivot dataframe with with second group as columns if group2 is not None: group = df.groupby(["geo_id", group2]) census_count = group[weight].sum().reset_index() census_count = census_count.pivot( index="geo_id", columns=group2, values=weight ).reset_index(drop=drop) # Groupby PUMA # Returns series else: group = df.groupby(["geo_id"]) census_count = group[weight].sum().reset_index(drop=drop) # Divide series or dataframe by total to return percentage if normalize: census_count = census_count.div(total, axis=0) * 100 return census_count
8c10307118e54c32486ceb407ec0432a7ff7bb5f
43,834
def clip_overflow(textblock, width, side='right'): """Clips overflowing text of TextBlock2D with respect to width. Parameters ---------- textblock : TextBlock2D The textblock object whose text needs to be clipped. width : int Required width of the clipped text. side : str, optional Clips the overflowing text according to side. It takes values "left" or "right". Returns ------- clipped text : str Clipped version of the text. """ side = side.lower() if side not in ['left', 'right']: raise ValueError("side can only take values 'left' or 'right'") original_str = textblock.message start_ptr = 0 end_ptr = len(original_str) prev_bg = textblock.have_bg textblock.have_bg = False if textblock.size[0] == width or textblock.size[0] <= width: textblock.have_bg = prev_bg return original_str if side == 'left': original_str = original_str[::-1] while start_ptr < end_ptr: mid_ptr = (start_ptr + end_ptr)//2 textblock.message = original_str[:mid_ptr] + "..." if textblock.size[0] < width: start_ptr = mid_ptr elif textblock.size[0] > width: end_ptr = mid_ptr if mid_ptr == (start_ptr + end_ptr)//2 or\ textblock.size[0] == width: textblock.have_bg = prev_bg if side == 'left': textblock.message = textblock.message[::-1] return textblock.message
9c17b49515d5028d172526612bfb2f956eecb248
43,838
def perc_bounds(percent_filter): """ Convert +/- percentage to decimals to be used to determine bounds. Parameters ---------- percent_filter : float or tuple, default None Percentage or tuple of percentages used to filter around reporting irradiance in the irr_rc_balanced function. Required argument when irr_bal is True. Returns ------- tuple Decimal versions of the percent irradiance filter. 0.8 and 1.2 would be returned when passing 20 to the input. """ if isinstance(percent_filter, tuple): perc_low = percent_filter[0] / 100 perc_high = percent_filter[1] / 100 else: perc_low = percent_filter / 100 perc_high = percent_filter / 100 low = 1 - (perc_low) high = 1 + (perc_high) return (low, high)
006999800a18ebf0ad96e4a6a8e022d1a7a79306
43,839
def isSpecialTrueType( glyph ): """ Fontforge treats three control characters as the special TrueType characters recommended by that standard """ e = glyph.encoding return e == 0 or e == 1 or e == 0xD
2f79894b077989660d07153ea360145e8b167b3b
43,840
def linear(x0: float, x1: float, p: float): """ Interplates linearly between two values suchh that when p=0 the interpolated value is x0 and at p=1 it's x1 """ return (1 - p) * x0 + p * x1
ec41990ab6dc277ac6b88bfc130bcd5a4b11ce0c
43,842
def extract_array_column(a, col): """Extracts a column from a tabular structure @type a: list/tuple @param a: an array of equal-sized arrays, representing a table as a list of rows. @type col: usually int or str @param col: the column key of the column to extract @rtype: list @return: the values of column col from each row in a """ column = [] for r in a: column.append( r[col] ) return column
9dbdeee05076339f54c5ab3e7985ac466d8ceddb
43,843
def expand_aabb(left, right, top, bottom, delta_pixel): """ Increases size of axis aligned bounding box (aabb). """ left = left - delta_pixel right = right + delta_pixel top = top - delta_pixel bottom = bottom + delta_pixel return left, right, top, bottom
0073df23538892a5ae0262b82f16eabbd1f41da2
43,848
def uncompleted_task_on(line: str) -> bool: """Return whether there's an uncompleted task on the line.""" return bool(line.strip() and not line.startswith("x "))
c68923a9134de99c9bfd80c52c5a8ad1e861f6e1
43,855
def retry(times, func, *args, **kwargs): """Try to execute multiple times function mitigating exceptions. :param times: Amount of attempts to execute function :param func: Function that should be executed :param args: *args that are passed to func :param kwargs: **kwargs that are passed to func :raises: Raise any exception that can raise func :returns: Result of func(*args, **kwargs) """ for i in range(times): try: return func(*args, **kwargs) except Exception: if i == times - 1: raise
3b25dab272c9919986775222cd29d8c7a9c78606
43,860
import hashlib def create(username, password): """ Create a hashed string of a password with username as the salt """ return hashlib.sha512(username + password).hexdigest()
5749142912c78113f6e25ec515854286eacc2de2
43,865
def sort_tuple_list(tuple_list:list): """sort_tuple Sort a list of tuple Sort tuple. Ref: https://www.geeksforgeeks.org/python-program-to-sort-a-\ list-of-tuples-by-second-item/ Args: tuple_list (list): a list of tuple """ tuple_list.sort(key = lambda x: x[0]) return tuple_list
d9e5d83b4e1cdae43ce0449693732b27636efa69
43,871
def indent(text: str, amount: int) -> str: """indent according to amount, but skip first line""" return "\n".join( [ amount * " " + line if id > 0 else line for id, line in enumerate(text.split("\n")) ] )
a78fedaada94bd9dd5129e7f85f2ba2fe506eb42
43,876
def _set_default_if_empty_str(tst_str, default=None): """ Return None if str is an empty string or return str. Used to test for general options that reset with value of "" and reset to either None or the default value. Parameters: tst_str (:term:1string): the string to test for value of "" default (:term:`string`): Optional default string value to return Returns: None or the value of the default parameter """ return default if tst_str == "" else tst_str
bac210253bc6ffd685fc3677ccdd062d4483035d
43,878
def is_wgs_accession_format(contig_accession): """ Check if a Genbank contig is part of WGS (Whole Genome Shotgun) sequence :param contig_accession: Genbank contig accession (ex: CM003032.1) :return: True if the provided contig is in the WGS format """ wgs_prefix = contig_accession[:4] wgs_numeric_suffix = contig_accession[4:].replace(".", "") return str.isalpha(wgs_prefix) and str.isnumeric(wgs_numeric_suffix)
1e4ece9c428264ed5e74e8f83ad9b0521bc57988
43,879
def build_empty_response(search_path, operation_name, service_model): """ Creates an appropriate empty response for the type that is expected, based on the service model's shape type. For example, a value that is normally a list would then return an empty list. A structure would return an empty dict, and a number would return None. :type search_path: string :param search_path: JMESPath expression to search in the response :type operation_name: string :param operation_name: Name of the underlying service operation. :type service_model: :ref:`ibm_botocore.model.ServiceModel` :param service_model: The Botocore service model :rtype: dict, list, or None :return: An appropriate empty value """ response = None operation_model = service_model.operation_model(operation_name) shape = operation_model.output_shape if search_path: # Walk the search path and find the final shape. For example, given # a path of ``foo.bar[0].baz``, we first find the shape for ``foo``, # then the shape for ``bar`` (ignoring the indexing), and finally # the shape for ``baz``. for item in search_path.split('.'): item = item.strip('[0123456789]$') if shape.type_name == 'structure': shape = shape.members[item] elif shape.type_name == 'list': shape = shape.member else: raise NotImplementedError( 'Search path hits shape type {0} from {1}'.format( shape.type_name, item)) # Anything not handled here is set to None if shape.type_name == 'structure': response = {} elif shape.type_name == 'list': response = [] elif shape.type_name == 'map': response = {} return response
09a3a143cbfb10a961807b26fcb7fd691a80e663
43,881
from hashlib import sha224 def create_path_for_genes_db(tissues): """Create almost-certainly unique path for a database which will contain information about genes having variant-egene pairs in given tissues.""" tissues_serialized = ','.join(tissues).encode('utf-8') tissues_hash_code = sha224(tissues_serialized).hexdigest() return 'genes_{hash_code}.db'.format( hash_code=tissues_hash_code )
39b93ffeb1fa8a0fd20b0d9c1f6554e93ed4b15f
43,884
def merge_config(config, *argv): """ merge multiple configs """ cf = {} cf.update(config) for d in argv: cf.update(d) return cf
3efa14ec86ba234303c4eb11a349202546067c19
43,886
import six def _to_text(obj, encoding): """ In Python3: Decode the bytes type object to str type with specific encoding In Python2: Decode the str type object to unicode type with specific encoding, or we just return the unicode string of object Args: obj(unicode|str|bytes) : The object to be decoded. encoding(str) : The encoding format Returns: decoded result of obj """ if obj is None: return obj if isinstance(obj, six.binary_type): return obj.decode(encoding) elif isinstance(obj, six.text_type): return obj elif isinstance(obj, (bool, float)): return obj else: return six.u(obj)
d1ea66b7d27b6ebc90e5252038254235e3b599ce
43,888
def make_wide(df, cols): """ Pivot an ACS table. This function takes rows and pivots them to be columns. df: str cols: list. One or more values from the new_var column may be given as a list. This function takes those values (rows), reshapes, and returns them as columns in a new df. """ return ( df[df.new_var.isin(cols)] .assign(num=df.num.astype("Int64")) .pivot(index="GEOID", columns="new_var", values="num") .reset_index() .rename_axis(None, axis=1) )
6f2559bf6b6f513d7510e2dc09b05248cce77232
43,890
def get_file_size(file_path): """ Get the size of a file in bytes. """ input_file = open(file_path, "rb") input_file.seek(0, 2) file_size = input_file.tell() input_file.close() return file_size
e073daa27ca3a52268d33f968a06d9c4f43b0d09
43,894
def sentences(s): """Split the string s into a list of sentences. """ assert isinstance(s, str) pos = 0 sentence_list = [] l = len(s) while pos < l: try: p = s.index('.', pos) except: p = l + 1 try: q = s.index('?', pos) except: q = l + 1 try: e = s.index('!', pos) except: e = l + 1 end = min(p, q, e) sentence_list.append(s[pos:end].strip()) pos = end + 1 if sentence_list: return sentence_list # If no sentences were found, return a one-item list containing the entire input string. return [s]
5095f8d56f08a60a62aa7ba430751f4c35865297
43,896
def find_intersection(x1, y1, x2, y2, x3, y3, x4, y4): """ find_intersection will find the intersection point of two line segments when four coordinates are given for both line segments. It will return the intersection point as a tuple. :param x1: x-coordinate of vertex 1 in line 1 :param y1: y-coordinate of vertex 2 in line 1 :param x2: x-coordinate of vertex 1 in line 2 :param y2: y-coordinate of vertex 2 in line 2 :param x3: x-coordinate of vertex 1 in line 3 :param y3: y-coordinate of vertex 2 in line 3 :param x4: x-coordinate of vertex 1 in line 4 :param y4: y-coordinate of vertex 2 in line 4 :return: intersection point of two line segments as tuple """ # Values that exist in both px and py cross_1 = (x1 * y2 - y1 * x2) cross_2 = (x3 * y4 - y3 * x4) denominator = ((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) # Find the x-coordinate of the center px = (cross_1 * (x3 - x4) - (x1 - x2) * cross_2) / denominator # Find the y-coordinate of the center py = (cross_1 * (y3 - y4) - (y1 - y2) * cross_2) / denominator # Return the center as tuple return px, py
a61d9999b0c4217f0b3156e131c0acb83f654826
43,905
def _get_assignmentgroup_name(assigmentgroup): """ Returns a string containing the group members of the assignmentgroup separated by '-'. """ cands = assigmentgroup.get_candidates() cands = cands.replace(", ", "-") return cands
469ff5e795e7e338d0cacce9fc275602a3c0d9a8
43,906
def mipmap_levels(base_width, base_height): """Return max number of mipmap for the size Args: base_width (int): Width source base_height (int): Height source Returns: int: Number of mipmap levels """ width = base_width height = base_height levels = 1 while width > 1 or height > 1: width = width // 2 or 1 height = height // 2 or 1 levels += 1 return levels
193c076aeeab36cc80f9e0862f67b4666dc9c902
43,909
def _format_2(raw): """ Format data with protocol 2. :param raw: returned by _load_raw :return: formatted data """ return raw[1:], raw[0]
6c750a37aecfd36922b765d1c5a1c1e30ea8ca68
43,911
from typing import Sequence def _group_dimensions(expr: str) -> Sequence[str]: """Splits an expression into its separate grouped dimensions. An unqualified dimension index is a group by itself. Parentheses are interpreted as demarcating a sequence of dimension indices to be grouped into a single dimension. '1' is an alias for '()', denoting a dimension of size 1 with no indices. Nested parentheses are not permitted. Examples: 'ijk' is grouped as ['i', 'j', 'k'] '(mn)hwc' is grouped as ['mn', 'h', 'w', 'c'] 'n111' is grouped as ['n', '', '', ''] 'n...' is grouped as ['n', '...'], where '...' stands for multiple groups. Args: expr: Shape expression to group. Returns: List of simple expressions, each consisting solely of dimension indices, specifying the indices that constitute each grouped dimension. """ groups = [] i = 0 while i < len(expr): if expr[i].isalpha(): # Top-level dimension index is a group by itself. groups.append(expr[i]) i += 1 elif expr[i] == '1': # Dimension of size 1 with no indices; equivalent to '()'. i += 1 groups.append('') elif expr[i] == '(': # Sequence of indices to be grouped as a single dimension. i += 1 group_begin = i while i < len(expr) and expr[i].isalpha(): i += 1 group_end = i if not(i < len(expr) and expr[i] == ')'): raise ValueError('Unclosed parenthesis') i += 1 groups.append(expr[group_begin:group_end]) elif expr[i:].startswith('...'): # Wildcard sequence of dimensions. i += len('...') if '...' in groups: raise ValueError('Wildcard "..." may only occur once') groups.append('...') else: raise ValueError(f'Illegal character: {ord(expr[i])}') return groups
578a9f990a66c050806260e6dfcf915e916c99d2
43,913
def _is_generic(cls) -> bool: """Return True if cls is a generic type. For example, List or List[int].""" if cls.__module__ != "typing": if not any(c.__module__ == "typing" for c in cls.mro()): return False params = getattr(cls, "__parameters__", ()) if params: return True return bool(getattr(cls, "__args__", ()))
cfc03585f0f1d1abc18e1f72edc54fb480eeff4e
43,919
def get_video_type(link): """ Takes a url and decides if it's Vimeo or YouTube. Returns None for unkown types. """ if 'vimeo.com/' in link: return 'vimeo' elif 'youtube.com/' in link or 'youtu.be' in link: return 'youtube' return None
a6f514c9eeae211490d61b5aa1cc635177b28809
43,923
def calc_average_precision(precisions, recalls): """Calculate average precision defined in VOC contest.""" total_precision = 0. for i in range(11): index = next(conf[0] for conf in enumerate(recalls) if conf[1] >= i/10) total_precision += max(precisions[index:]) return total_precision / 11
dcdd48bb0b6845c05c6c964f01060b53117d184b
43,930
def filter_duplicates(l): """ >>> filter_duplicates([{'a': 1}, {'b': 1}, {'a': 1}]) [{'a': 1}, {'b': 1}] """ def make_hashable(o): try: hash(o) return o except TypeError: return helper[type(o)](o) helper = { set: lambda o: tuple([make_hashable(e) for e in o]), tuple: lambda o: tuple([make_hashable(e) for e in o]), list: lambda o: tuple([make_hashable(e) for e in o]), dict: lambda o: frozenset( [(make_hashable(k), make_hashable(v)) for k, v in o.items()]), } l = list(l) return list({ hashable: entry for hashable, entry in zip(make_hashable(l), l) }.values())
2652134e8ee1e3f357daa37aa659a456ede8b06a
43,933
import math def generate_sine(offset: float = 0, sine_range: float = 1, second: int = 0, second_range: int = 60): """Return the point along an optionally-transformed sine wave for a given second""" return sine_range * math.sin((second / second_range) * 2 * math.pi) + offset
3b2516e8ee85941abc83bbd291aaa6bc144429b3
43,936
import inspect import importlib def list_class_names(clz, package): """ Find sub-classes in a specific module and return their names Args: clz: the target superclass package: the module Returns: list of potential classes """ def isclz(obj): if inspect.isclass(obj): return issubclass(obj, clz) and not obj == clz return False module = importlib.import_module(package) return [name for name, _ in inspect.getmembers(module, isclz)]
1f996b46de94fb656719cea1035769fe1ebb357e
43,944
def despine_ax(ax=None): """ Remove spines and ticks from a matplotlib axis Parameters ---------- ax : matplotlib.axes.Axes object axes from which to remote spines and ticks. if None, do nothing Returns ------- ax : matplotlib.axes.Axes object despined ax object """ # Nothing real passed in. if ax is None: return None # remove spines ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_xticks([]) ax.set_yticks([]) return ax
ab806f4d225099316db9f6e34fc5fc568d3f05aa
43,945