content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def count_lines(abspath): """Count how many lines in a pure text file. """ with open(abspath, "rb") as f: i = 0 for line in f: i += 1 pass return i
c892cefca6ec16b8d2b96b4b9bae3be6e22366d7
640,910
import logging def ensure_instance_group_manager_matches(manager_cfg, instance_group_manager): """Ensures the InstanceGroupManager matches the config. Args: manager_cfg: proto.config_pb2.InstanceGroupManagerConfig.InstanceGroupManagers. instance_group_manager: models.InstanceGroupManager. Returns: Whether or not the given InstanceGroupManager was modified. """ modified = False if instance_group_manager.minimum_size != manager_cfg.minimum_size: logging.info( 'Updating minimum size (%s -> %s): %s', instance_group_manager.minimum_size, manager_cfg.minimum_size, instance_group_manager.key, ) instance_group_manager.minimum_size = manager_cfg.minimum_size modified = True if instance_group_manager.maximum_size != manager_cfg.maximum_size: logging.info( 'Updating maximum size (%s -> %s): %s', instance_group_manager.maximum_size, manager_cfg.maximum_size, instance_group_manager.key, ) instance_group_manager.maximum_size = manager_cfg.maximum_size modified = True return modified
2266ba7c98fcd415b11c94511584f79cc9f222b0
516,681
def _gen_image_name(instance, _): """ Returns the upload path (relative to settings.MEDIA_ROOT) for the specified Team's image. """ # Must "return a Unix-style path (with forward slashes)" return 'team-images' + '/' + str(instance.user.id) + '.png'
d6a0c1cab445d22e5e4504b4175c33b661e3188b
345,797
def attrs(**kwargs): """ Helper decorator to function attributes to a function. """ def _decorator(func): for k, v in kwargs.items(): setattr(func, k, v) return func return _decorator
1356f817a7dbe1ade5f560842d315b18c1876db2
253,229
def to_list(val): """ Method for casting an object into a list if it isn't already a list """ return val if type(val) is list else [val]
c087b7cee3ed633da959d3c77ada38f7a1cc13e8
628,208
def localisationEnabled(knob): """localisationEnabled(knob) -> bool rief Checks if localisation is enabled on a given Read_File_Knob. @param knob: The Read_File_Knob to check. @return: true if enabled, false otherwise""" return True
6474cf92629291d63eedf9ec6fbc828d98593c6a
439,039
def count_lines(filename): """ Counts the number of lines in the given file. """ n_lines = 0 with open(filename) as f: for line in f: n_lines += 1 return n_lines
3e2d707b488a29512963223682ad418cce1aa70e
432,359
from typing import List import random def create_random_list(length: int = 50, lowest_num: int = 0, highest_num: int = 5) -> List[int]: """Returns a random list at a user set len, and lower and upper bounds""" # used to test return_index function random_list = list() for i in range(length): random_list.append(random.randint(lowest_num, highest_num)) return random_list
d57276acc2fcceacd377e1e394e9ea89f28a6f56
452,138
def _include_file_data(login, record): """Ensure that 'files' field is present in the input record""" if 'files' not in record: if 'files' in record.get('links', {}): url = record['links']['files'] elif 'id' in record: url = login.base_url + 'api/deposit/depositions/{0}/files'.format(record['id']) else: return record # We have failed, but we're not too upset r = login.session.get(url) if r.status_code == 200: # Otherwise, we just couldn't add this record; oh well. r_json = r.json() record['files'] = r_json return record
578c75ed34460c408fe52f28ea5fdf38de57ef6b
664,457
def flatten_dictionary(dictionary): """ Input: a request's JSON dictionary output with nested dictionary Output: a flattened dictionary (format: key1.key2 = value2) """ flattenedDictionary = dict() for key, value in dictionary.items(): if isinstance(value, dict): for subkey, subvalue in value.items(): flattenedDictionary[key + '.' + subkey] = subvalue else: flattenedDictionary[key] = value return flattenedDictionary
1ca7c9021360bc6c39fb1f3ba07794ac74831272
47,177
def seq_names(fasta_file): """Get sequence names from fasta file.""" names = [] f = open(fasta_file) fasta = f.read() f.close() for a in fasta.split(">"): names.append(a.split("\n")[0]) return [a for a in names if a != ""]
3fdc152552534353f75355004f37a3a7f923d488
656,176
def decodeMsg(aStr): """Decode a message received from the hub such that multiple lines are restored. """ return aStr.replace("\v", "\n")
f84cb538bc86df7f24ffeb375d3ddc42b7a9b80f
667,305
def unpack_checkpoint(chkpt): """Unpacks a checkpoint. Args: checkpoint: Checkpoint previously saved by calling save_checkpoint. Returns: epoch (int): Epoch number. encoder (encoder.Encoder): Encoder model decoder: Decoder model encoder_optimizer: optimizer to update encoder's weights, if fine-tuning decoder_optimizer: optimizer to update decoder's weights metrics (dict): Dictionary of metrics. """ return chkpt['epoch'], chkpt['encoder'], chkpt['decoder'], chkpt['encoder_optimizer'], chkpt['decoder_optimizer'], chkpt['metrics']
2c4475e450d051a7628bfc9af00686573c5717af
458,779
def is_warmer_than_avg(cur_temp,avg_temp,threshold): """ Purpose: Determine if the current temp is warmer than the average temp while being outside the avg_temp/threshold window cur_temp: int avg_temp: int threshold: pos int """ if threshold < 0: raise ValueError("threshold must be a positive int") if cur_temp >= avg_temp: if avg_temp + threshold <= cur_temp: return True return False
34270502b192f6d7bd392413d95721831ad6c9d3
459,182
def terraform_current_workspace(terraform): """ Get the current terraform workspace """ return_code, stdout, stderr = terraform.cmd('workspace show') return stdout.rstrip()
079ab06b2cfb5e030519567fda3916b8cad18d91
62,555
def is_key_value_exist(list_of_dict, key, value): """ Check if at least one value of a key is equal to the specified value. """ for d in list_of_dict: if d[key] == value: return True return False
b4ea785fdfe3f9b1284f0afd1e58af67817cedd0
618,426
def padto(msg, length): """Pads 'msg' with zeroes until it's length is divisible by 'length'. If the length of msg is already a multiple of 'length', does nothing.""" L = len(msg) if L % length: msg += '\x00' * (length - L % length) assert len(msg) % length == 0 return msg
1ee48e4a49f76fde6d9dc11de165c2972f10ef22
511,050
import psutil def get_ppn(includethreads=False): """ Return number of processors per node. For alternative solutions: https://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-using-python """ return psutil.cpu_count(includethreads)
3a23ad50be99da9aa3615e6724f844bbe3f1c62a
453,943
def vector_to_dictionary(vector, layers): """ Convert the parameter vector of a model into a dictionary used by the model Arguments: vector -- one-dimensional vector in orders: "W1", "W2", "WL", "b1", "b2", "bL" layers -- list of (n_uints, activations) pairs that define network structure, including input layer X Returns: ret -- parameter dictionary, {"W1": ..., "WL": ..., "b1": ..., "bL": ..., "r1": ..., "rL": ...} """ ret = {} idx = 0 # recover Ws first for l in range(1, len(layers)): length = layers[l][0] * layers[l - 1][0] ret["W" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], layers[l - 1][0])) idx = idx + length # recover bs for l in range(1, len(layers)): length = layers[l][0] ret["b" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], 1)) idx = idx + length # recover rs for l in range(1, len(layers)): length = layers[l][0] ret["r" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], 1)) idx = idx + length return ret
33f628463546892ae8127cad25bda1e7ae19a78a
10,637
def _ComponentFromDirmd(json_data, subpath): """Returns the component for a subpath based on dirmd output. Returns an empty string if no component can be extracted Args: json_data: json object output from dirmd. subpath: The subpath for the directory being queried, e.g. src/storage'. """ # If no component exists for the directory, or if METADATA migration is # incomplete there will be no component information. return json_data.get('dirs', {}).get(subpath, {}).get('monorail', {}).get('component', '')
178a86b2969c0bb7f5c141cf8a73c95c71dbf7f5
511,507
def normalise_suffix(suffix): """Bring suffix to lowercase without dot.""" if suffix.startswith('.'): suffix = suffix[1:] return suffix.lower()
c4e92087571f1789a804cc665144c71de4161316
617,986
from typing import List from typing import Tuple def are_filters_match_response_content(all_filter_arguments: List[Tuple[list, str]], api_response: dict) -> bool: """ Verify whether any filter arguments of a command match the api response content. Args: all_filter_arguments (list[tuple]): pairs of filter arguments inputs & a response key. api_response (dict): api response. Returns: bool: True if in any of the filter arguments there was a match, False otherwise. """ for arguments in all_filter_arguments: command_args, resp_key = arguments for arg in command_args: if arg == api_response.get(resp_key): return True return False
12fbd55bc8c1dcf2fd4712585f1788a808c5f9dc
154,109
def corr(self, parameter1, parameter2, method='pearson', min_periods=1): """ Compute pairwise correlation of data columns of parameter1 and parameter2, excluding NA/null values. Parameters ---------- parameter1: str Key name of the column 1 to correlate. parameter2: str Key name of the column 2 to correlate. method: {‘pearson’, ‘kendall’, ‘spearman’} pearson : standard correlation coefficient kendall : Kendall Tau correlation coefficient spearman : Spearman rank correlation min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for pearson and spearman correlation Returns ------- correlation_number: float """ correlation_number = self.data[ parameter1].corr(self.data[parameter2], method=method, min_periods=min_periods) return correlation_number
5f544f27300b3143465a55a68b2f012711835b81
607,137
import re def new_exposures(out): """Scan rsync output for exposures to be transferred. Parameters ---------- out : :class:`str` Output from :command:`rsync`. Returns ------- :class:`set` The unique exposure numbers detected in `out`. """ e = set() e_re = re.compile(r'([0-9]{8})/?') for l in out.split('\n'): m = e_re.match(l) if m is not None: e.add(m.groups()[0]) return e
8fc8817fe0ad79c177473ec676853569d733ec65
12,976
from typing import Optional import imghdr def find_file_with_imghdr(data: bytes) -> Optional[str]: """Find images with the help of imghdr magic numbers :param bytestring: Bytes to be examined. :type bytestring: bytes :return: A string with the file type :rtype: str """ res = imghdr.what("", data) if res: return res # try again with a potential padding byte removed res = imghdr.what("", data[1:]) return res
487fdac76a6ed11c44476d7fdbe9a7d51bc3baee
392,033
def convert_cost_units(items): """To convert cost_unit choice type to it's value. For example, for each item's cost_unit tuple (type="per-packet", value="Per Packet") will be converted to a `str` "Per Packet". """ for item in items: item.cost_unit = item.cost_unit.value return items
8fd736ab68cd6008f33a944efcc4a8b5238af5e7
641,548
def pv(rate, nper, pmt, fv): """Calculate the present value of an asset. Parameters ---------- rate : float Interest rate per period. nper : int Number of payment periods. pmt : float Constant payment made each period. fv : float Future value, i.e., balance after the last payment is made. Returns ------- float """ if rate == 0: return -(fv + pmt*nper) else: tmp = (1 + rate)**nper return -(fv + pmt*(tmp - 1) / rate) / tmp
9e8193013c8079f8f2368b907a6d6dd816daf463
284,175
def qpu_backend(provider): """Get the QPU backend from a provider. Args: provider (IonQProvider): Injected provider from :meth:`provider`. Returns: IonQQPUBackend: An instance of an IonQQPUBackend. """ return provider.get_backend("ionq_qpu")
ce155b6795c1b4e49e889989f2d87611c7e2da40
307,740
def normalize_attributes(xml): """Sort XML attributes to make it easier to find differences""" for node in xml.iterfind(".//*"): if node.attrib: attrs = sorted(node.attrib.items()) node.attrib.clear() node.attrib.update(attrs) return xml
bc14ce3cb24ffc998f4e9019b2002e79a3a96896
431,994
def parse_line(line): """ Parse each file's line to separate the customer from the amount :param line: a line from the data file :return: a tuple with customer and amount """ value = line.split(",") customer = int(value[0]) amount = float(value[2]) return customer, amount
4eb00b0537195ee40e3570183171b07f3c7f8921
202,727
def next_power_of_2(number): """Given a number returns the following power of 2 of that number.""" return 1 if number == 0 else 2 ** (number - 1).bit_length()
e612f50d96b10e222bb244bdf0051d5c17c4e3c0
237,703
from typing import List def build_uri(scheme: str, loc: str, paths: List[str] = [], **kwargs) -> str: """ Used to build URIs :param scheme: scheme e.g. http in http://google.com/ :param loc: netloc e.g. google.com in http://google.com/ :param paths: a list of paths e.g. ["example", "path"] in http://google.com/example/path :param kwargs: parameters e.g. {NETLOC}?k1=v&k2=v :return: """ path = "/" + "/".join(paths) if paths else "" params = "?" + "&".join([f"{k}={kwargs[k]}" for k in kwargs.keys()]) if kwargs else "" return "{}://{}{}{}".format(scheme, loc, path, params)
e2d03f675c5a91631edbc185c7bebca54284b882
657,105
def filter_common_word(possibles: list[str], common_word: str) -> list[str]: """ Given a list of possible decoded messages, narrow down the possibilities for checking for the presence of a specified common word. Only decoded messages containing common_word will be returned. >>> filter_common_word(['asfla adf', 'I am here', ' !?! #a'], 'am') ['I am here'] >>> filter_common_word(['athla amf', 'I am here', ' !?! #a'], 'am') ['athla amf', 'I am here'] """ return [possible for possible in possibles if common_word in possible.lower()]
035c0b4f5ead61479161a1c3540ba31effc0397b
349,428
def func_star(a_b, func): """Convert `f([1,2])` to `f(1,2)` call.""" return func(*a_b)
836b1fe67d280fa304d34af4f9e83e846f8d9085
118,339
import hashlib def get_sha1_hash(password): """For a given password string, utf-8 encode it and hash it with SHA1.""" encoded = password.encode('utf8') hashed = hashlib.sha1(encoded).hexdigest() return hashed
a7155cd1da31930fbc2f7a24875b75f3b444354c
659,059
def get_prefixed_tag(prefix:str, target_key) -> list: """ Return all the tags on a key with the given prefix """ output = [] for tag in target_key.getTags(): if tag.startswith(prefix): output.append(tag) return output
9637ca71e5d0374b1b5b712a70e27ba35cb34b78
479,716
import math def calculate(dimension): """Returns the number of routes from the top-left to the bottom-right of a n-dimension 2d grid""" return int((math.factorial(2 * dimension)) / ((math.factorial(dimension)) ** 2))
63efc151bb6e67cca5b4797cc85372f080518b54
538,511
def realign_shifted_streams(tokens, durations, F0s, shifts): """ Durations are shifted by 1, F0 by 2 >>> tokens = ["<s>", "t1", "t2", "t3", "</s>", "x", "x"] >>> durations = ["<0>", "<0>", "d1", "d2", "d3", "<0>", "x"] >>> F0s = ["<0>", "<0>", "<0>", "f1", "f2", "f3", "<0>"] >>> shifts = [1,2] >>> realign_shifted_streams(tokens, durations, F0s, shifts) (['<s>', 't1', 't2', 't3', '</s>'], ['<0>', 'd1', 'd2', 'd3', '<0>'], ['<0>', 'f1', 'f2', 'f3', '<0>']) """ max_shift = max(shifts) if max_shift > 0: shift_durations, shift_F0s = shifts tokens = tokens[:-max_shift] durations = durations[shift_durations:] if shift_durations < max_shift: durations = durations[: -(max_shift - shift_durations)] if F0s is not None: F0s = F0s[shift_F0s:] if shift_F0s < max_shift: F0s = F0s[: -(max_shift - shift_F0s)] assert len(tokens) == len(durations), f"{len(tokens)} =! {len(durations)}" if F0s is not None: assert len(tokens) == len(F0s), f"{len(tokens)} =! {len(F0s)}" return tokens, durations, F0s
e29675e36fd68a16c46f351874ef4002ffde81d4
626,843
from typing import List import click def list_options(options: List) -> int: """ This is a utility for click (cli) that prints of list of items as a numbered list. It prompts users to select an option from the list. For example, `["item1", "item2", "item3"]` would print... ``` (01) item1 (02) item2 (03) item3 ``` Parameters ---------- - `options`: a list of strings to choose from Returns -------- - `selected_index`: The integer value of the choice selected. This will follw python indexing so the index of the options list. (e.g. if item1 was selected, 0 would be returned) """ for i, item in enumerate(options): number = str(i + 1).zfill(2) click.echo(f"\t({number}) {item}") # Have the user select an option. We use -1 because indexing count is from 0, not 1. selected_index = click.prompt("\n\nPlease choose a number:", type=int) - 1 if selected_index >= len(options) or selected_index < 0: raise click.ClickException( "Number does not match any the options provided. Exiting." ) click.echo(f"You have selectd `{options[selected_index]}`.") return selected_index
62253091f6c05b688c4d8cb62d9ef81021c3bcb0
77,547
def load_contrib_worker(worker_name): """ Load a local worker """ return globals()[worker_name]
fa26b6965b804fbb20d7aca6469f38ed28104af1
500,156
def cmd_run(module, cmd, check_rc=True): """ Log and run ovs-vsctl command. """ return module.run_command(cmd.split(" "), check_rc=check_rc)
9affc21a99ed793a92d9e1b9f7b3cd6b7c12bd75
417,170
def _compute_time(index, align_type, timings): """Compute start and end time of utterance. Adapted from https://github.com/lumaku/ctc-segmentation Args: index: frame index value align_type: one of ["begin", "end"] Return: start/end time of utterance in seconds """ middle = (timings[index] + timings[index - 1]) / 2 if align_type == "begin": return max(timings[index + 1] - 0.5, middle) elif align_type == "end": return min(timings[index - 1] + 0.5, middle)
d2efc4768d4779e8452afdc2015fffbd30d14ffe
655,282
from datetime import datetime import pytz def semester_year_to_date(semester, year, ending=False): """ Convert semester and year to a rough date Args: semester (str): Semester ("Fall", "Spring", etc) year (int): Year ending (boolean): True for end of semester, False for beginning Returns: datetime: The rough date of the course """ if semester is None or year is None: return if semester.lower() == "fall": month_day = "12-31" if ending else "09-01" elif semester.lower() == "summer": month_day = "08-30" if ending else "06-01" elif semester.lower() == "spring": month_day = "05-31" if ending else "01-01" elif semester.lower() == "january iap": month_day = "01-31" if ending else "01-01" else: return return datetime.strptime("{}-{}".format(year, month_day), "%Y-%m-%d").replace( tzinfo=pytz.UTC )
b7522d2ef60041654f04dbb49f78b2bc2de628a0
565,987
def construct_bestuurseenheid_exists_query(bestuurseeheid_uri): """ Construct a query for asking if a bestuurseenehid exists in our database. :param bestuurseeheid_uri: string :returns: string containing SPARQL query """ q = """ PREFIX besluit: <http://data.vlaanderen.be/ns/besluit#> ASK {{ <{0}> a besluit:Bestuurseenheid . }} """.format(bestuurseeheid_uri) return q
0cdd139e4e9f1b8242e11aef7d272dea2ab36f9e
438,312
import math def lcm(a, b): """Compute the lowest common multiple of a and b""" # in case of large numbers, using floor division return a * b // math.gcd(a, b)
99405f1fb340d5488de652e96e21eedd9e9fb205
458,874
def contains_secret(line: str, secret: str) -> bool: """Returns True if `line` contains an obfuscated version of `secret`""" return f'"{secret[:6]}' in line and f'{secret[-6:]}"' in line
1d52df114f70cbbd8a8477a3f49d1fe0b98d8602
174,639
def bool_to_indices(bool_tensor): """ Convert an array of boolean indices to integer indices""" indices_int = [] for i in range(len(bool_tensor)): if bool_tensor[i]: indices_int.append(i) return indices_int
d75e0fc879a253d076a3d1351c8770ef82a1271d
171,164
import hashlib def get_account_id(public_key: bytes) -> bytes: """ Returns the account ID for a given public key. See https://xrpl.org/cryptographic-keys.html#account-id-and-address to learn about the relationship between keys and account IDs. Args: public_key: Unencoded public key. Returns: The account ID for the given public key. """ sha_hash = hashlib.sha256(public_key).digest() return hashlib.new("ripemd160", sha_hash).digest()
4345ed2a4ee751579a61238f1435803c3ef51cb9
164,115
from typing import List from typing import Tuple def neighbors(a: int, r: int, c: int) -> List[Tuple[int, int, int]]: """ Return the list of all six neighbors of the passed coordinate """ return [ (1 - a, r - (1 - a), c - (1 - a)), # NW (1 - a, r - (1 - a), c + a), # NE (a, r, c + 1), # E (1 - a, r + a, c + a), # SE (1 - a, r + a, c - (1 - a)), # SW (a, r, c - 1), # W ]
1cac9230658cce56fc4cd564109ac5ed5e91e7e2
429,546
import math def idf(posting, document_count): """A function to calculate the inverse document frequency for a posting. This is shared between the builder and the index. """ documents_with_term = 0 for field_name in posting: if field_name == "_index": continue documents_with_term += len(posting[field_name].keys()) x = (document_count - documents_with_term + 0.5) / (documents_with_term + 0.5) return math.log(1 + abs(x))
4fa123410684220f24865a8b68e7cd06d55af151
539,828
def decode(data): """ Decode byte to string in utf-8 :param data: Byte or str :return: Decoded string :raises: TypeError """ if isinstance(data, str): return data elif isinstance(data, bytes): return bytes.decode(data) else: return str(data)
8e57b936902bccf2f0b91c800351ccaafdf33d79
456,926
def spacydoc2tokens(doc): """ Transform spaCy doc to tokens list. :param doc: spaCy doc :return: list of token texts """ return [token.text for token in doc]
23ca1cdf9395cac883719dedcf34748701484f3c
44,625
def _process_occurs_attrs(node): """Process the min/max occurrence indicators""" max_occurs = node.get("maxOccurs", "1") min_occurs = int(node.get("minOccurs", "1")) if max_occurs == "unbounded": max_occurs = "unbounded" else: max_occurs = int(max_occurs) return min_occurs, max_occurs
61456f36271e0398df41ecc63259cd09128994ec
275,979
def _path_to_string(path): """Convert a list of path elements into a single path string.""" return '.'.join(path)
f692275e52c40107d95af87e820da6f4a3b8f098
194,586
import torch def ones(shape, dtype=None): """Wrapper of `torch.ones`. Parameters ---------- shape : tuple of ints Shape of output tensor. dtype : data-type, optional Data type of output tensor, by default None """ return torch.ones(shape, dtype=dtype)
a234936baa16c8efdc63e903d8455895ab7f2f0c
704,440
def test_url(url_to_test): """Basic testing to check if url is valid. Arguments: url_to_test {str} -- The url to check Returns: int -- Error number """ if not isinstance(url_to_test, str): return 103 url = str(url_to_test).strip() if not url: return 101 if ' ' in url: return 102 return 0
60f65c534883da9c40b113f7f9458bcae657536e
476,092
def test(expected, actual, epsilon=0): """ Takes expected and actual values. If they are different by more than epsilon, then print a fail string. Otherwise, print a pass string. >>> test(2, 2) PASS: Got 2 >>> test(2, 2.001, epsilon=.01) PASS: Got 2.001 >>> test(2, 3) FAIL: Expected 2, got 3 """ if abs(expected - actual) <= epsilon: print("PASS: Got {0}".format(actual)) return True else: print("FAIL: Expected {0}, got {1}".format(expected, actual)) return False
9382172122fc3be4a07f61cf33ce737a51b40e21
532,165
import inspect def has_arg(fn, name, accept_all=False): """Check if a callable accepts a given keyword argument. See https://github.com/tensorflow/tensorflow/pull/37004 Arguments: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the function accepts a `**kwargs` argument. Returns: bool, whether `fn` accepts a `name` keyword argument. """ arg_spec = inspect.getfullargspec(fn) if accept_all and arg_spec.varkw is not None: return True return name in arg_spec.args or name in arg_spec.kwonlyargs
1bb52cad13b72ecbd6f7778171d9e9382c7fcc1f
356,884
def get_admin_ids(bot, chat_id): """ Returns a list of admin IDs for a given chat. Results are cached for 1 hour. Private chats and groups with all_members_are_administrator flag are handled as empty admin list """ chat = bot.getChat(chat_id) if chat.type == "private" or chat.all_members_are_administrators: return [] return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]
6bd89e1d6b7333d97cbc60fd2617a86d1b69fb2f
9,910
def array_to_tuple(x): """Converts and array into a tuple""" return tuple(x.reshape(1,-1)[0])
e496a009d297c7d7b59b5fddb81efb89cf7c470c
282,192
def read_path(path): """ Fetch the contents of a filesystem `path` as bytes. """ return open(path, 'rb').read()
3ea784bf47cd87deb245608ccce8aca03f76e4ac
595,756
def get_grams(sentence, n): """ Returns phrases i.e. windowed sub strings with range (1-N) window Keyword arguments: sentence -- utterance (str) n -- max_ngram (int) """ all_grams = [] for l in range(1, n + 1): grams = [" ".join(sentence[i:i + l]) for i in range(len(sentence) - l + 1)] all_grams = all_grams + grams return all_grams
e23d88476231cf7912adadbee2f4159be81f262b
640,243
def corrected_pas(partitionA, partitionB, taxlen=None, excluded=None): """ Computed corrected partition agreement score. The corrected partition agreement score corrects for singleton character states and for character states that recur in all the taxonomic units in the data. These extreme cases are successively ignored when computing the partition agreement score. @param partitionA, partitionB: set partitions to be compared @param taxlen: if set to None, the number of taxa will be computed from partitionA @param excluded: how to return excluded characters (defaults to None) """ links, matches = [], [] # prune by getting number of taxa described by partition if not taxlen: all_taxa = set() for prt in partitionA: for taxon in prt: all_taxa.add(taxon) taxlenA = len(all_taxa) all_taxa = set() for prt in partitionB: for taxon in prt: all_taxa.add(taxon) taxlenB = len(all_taxa) else: taxlenA, taxlenB = taxlen, taxlen for i, prtB in enumerate(partitionB): for j, prtA in enumerate(partitionA): if taxlenA > len(prtA) > 1 and taxlenB > len(prtB) > 1: if prtA.intersection(prtB): links += [1] if prtB.issubset(prtA): matches += [1] if matches: return sum(matches)/sum(links) elif links: return 0 return excluded
9fb8b6a30498808eb1ba528f5b897a4aced3b54e
91,784
def get_color_indexes(msg_list): """ Return index of lines with {correct} (green) and {student} (red). """ indexes = {} for i, line in enumerate(msg_list): if "{correct}" in line: indexes["green"] = i - 1 elif "{student}" in line: indexes["red"] = i - 1 return indexes
af0d5c22ae7b0d19afdc26f7ca8de8ec2a0c8a61
285,390
def get(a, b): """a if not a is None else b. """ return a if a is not None else b
91e511f0ad2cbf62c4463d36a34525a1b71b281f
561,325
def get_num_msgs_for_file(file_name, summary): """Return the number of messages of any static code analysis for a particular source file. Parameters ---------- file_name: Source file name. summary: A dictionary containing per file dictionaries of static code analysis messages. Returns ------- The number of messages for the source file contained in the dictionary. """ num_msgs = 0 if summary is not None: for message, occurrences in summary[file_name].iteritems(): num_msgs += occurrences return num_msgs
68ba82c299a599ca1a1d358a1dd34aca8aea1cd7
424,270
def get_pred(succ): """Given a successor edge map, produce an inverted predecessor edge map. """ out = {key: [] for key in succ} for p, ss in succ.items(): for s in ss: out[s].append(p) return out
658d96292d814915daea273bb7f943ca8c160f3a
607,974
def get_raster(raster_list, y): """ Ensure we pull the appropriate raster for the given year and don't assume a 1:1 relationship between items in raster_list and years. :param raster_list: :param y: :return: """ raster = [r for r in raster_list if str(y) in r] if len(raster) > 0: # Here I'm assuming: # 1. that each file will contain a unique year, and # 2. there is only 1 year in the filename return raster[-1] else: return None
0cfba9dc557cb69fea20f6a4562cacbd73a9d706
285,093
def readable_keyword(s): """Return keyword with only the first letter in title case.""" if s and not s.startswith("*") and not s.startswith("["): if s.count("."): library, name = s.rsplit(".", 1) return library + "." + name[0].title() + name[1:].lower() else: return s[0].title() + s[1:].lower() else: return s
cda8151937feae49b69a945ae2f8feb8becb3d77
684,890
import re def parse_screen_properties_to_resolution_and_position(resolutions): """ Parse a list of resolutions of the format ####x####+####+#### into width, height, x_position, y_position :param resolutions: A list of resolutions in the form ####x####+####+#### :return: A list of dictionaries of the form {'width': #, 'height': #, 'x_pos': #, 'y_pos': #} """ parsed_resolutions = [] num_pattern = re.compile("[0-9]+") for res in resolutions: d = {} width, more = res.split('x') d["width"] = int(width) split = more.split('+') d["height"] = int(split[0]) d["x_pos"] = int(split[1]) d["y_pos"] = int(split[2]) parsed_resolutions.append(d) return parsed_resolutions
a331da8d696c0fd636ecc47c47e3a2eb1ed96398
515,330
def bps_to_human(bps): """ Convert bps to humand readble string. """ if bps >= 1000000: return "%f Mbps" % (float(bps) / 1000000) elif bps >= 100000: return "%f Kbps" % (float(bps) / 1000) else: return "%u bps" % bps
1c2365cbb357b36b77156550ef21646ed0e9db10
179,798
def label_smoothing(labels, epsilon=0.1): """ Implements label smoothing. This prevents the model from becoming over-confident about its predictions and thus, less prone to overfitting. Label smoothing regularizes the model and makes it more adaptable. :param labels: 3D tensor with the last dimension as the number of labels :param epsilon: smoothing rate :return: smoothed labels """ num_labels = labels.get_shape().as_list()[-1] return ((1 - epsilon) * labels) + (epsilon / num_labels)
aba83369639075d7fd031bea93d3ee9fff001fde
507,833
def poly(x, coefficients): """ Compute a polynome, useful for transformation laws parameters: x: Variable for the polynome coefficients: list of coefficients returns: float result of the polynome """ poly = 0 for i, coef in enumerate(coefficients): poly += coef * x ** i return poly
bc106afce8a91eefbdc1a9cdd1a86d2a1858d8e0
187,402
import copy def MergeCallingServiceAccountWithOwnerPermissionsIntoBindings(env, properties): """ A helper function that merges the acting service account of the project creator as an owner of the project being created """ service_account = ('serviceAccount:{0}@cloudservices.gserviceaccount.com'.format(env['project_number'])) set_creator_sa_as_owner = { 'role': 'roles/owner', 'members': [ service_account, ] } if 'iam-policy' not in properties: return { 'bindings': [ set_creator_sa_as_owner, ] } iam_policy = copy.deepcopy(properties['iam-policy']) bindings = [] if 'bindings' in iam_policy: bindings = iam_policy['bindings'] else: iam_policy['bindings'] = bindings merged = False for binding in bindings: if binding['role'] == 'roles/owner': merged = True if service_account not in binding['members']: binding['members'].append(service_account) break if not merged: bindings.append(set_creator_sa_as_owner) return iam_policy
20ec2fd5b86c1be70d7d9bd9709b5ddfe0075bfa
611,522
import typing def load_phoneme_map( phoneme_map_file: typing.TextIO, ) -> typing.Dict[str, typing.List[str]]: """ Load phoneme/phoneme mapping from a text file. Format is FROM_PHONEME<space>TO_PHONEME[<space>TO_PHONEME...] Comments start with # Args: phoneme_map_file: text file Returns: dict with from_phoneme -> [to_phoneme, to_phoneme, ...] """ phoneme_map = {} for line in phoneme_map_file: line = line.strip("\r\n") if (not line) or line.startswith("#") or (" " not in line): # Exclude blank lines, comments, or malformed lines continue from_phoneme, to_phonemes_str = line.split(" ", maxsplit=1) if not to_phonemes_str.strip(): # To whitespace phoneme_map[from_phoneme] = [" "] else: # To one or more non-whitespace phonemes phoneme_map[from_phoneme] = to_phonemes_str.split() return phoneme_map
8cb226b361a30941c5e983e24daf40de502a444f
439,346
def getConsistentValue(thelist, error, emptyval=None): """ Function for extracting a consistent value from a list, and throwing an error if: a) there are differing values present, or; b) the list is empty but no 'empty' value is supplied. """ if len(thelist) > 0: # Check that the values are consistent. if thelist.count(thelist[0]) != len(thelist): # Raise the exception with the user supplied error string. raise IOError(error) # If they are, return the first value. return thelist[0] else: if emptyval is not None: return emptyval else: raise ValueError("Empty list supplied but no empty value given!")
144d8b0dde1d40160f5cc3097d68a4049b6ee213
409,424
def prefix(start_text: str): """ Return a function that checks a message for prefix The returned function will check to see if it starts with the provided text. If it does, it will return an index for when the message actually starts. For example, if the prefix was py! and the message was py!help, it would return 3 because `'py!help'[3:]` is the message without the prefix, which is used by the client. :param start_text: The text that should act as a prefix :type start_text: str :return: A function described above :rtype: Callable """ def checker(msg): if msg.content.startswith(start_text): return len(start_text) return checker
05809872d9ef5f0902ebba0a452380fd6292a049
445,561
def axis2cat(axis): """ Axis is the dimension to sum (the pythonic way). Cat is the dimension that remains at the end (the Keops way). :param axis: 0 or 1 :return: cat: 1 or 0 """ if axis in [0, 1]: return (axis + 1) % 2 else: raise ValueError("Axis should be 0 or 1.")
8df4c0d9c12ade54d64c1ccb901bca3ac1fd2705
213,594
def lenient_issubclass(cls, class_or_tuple) -> bool: """ issubclass() that tolerates non-types """ return isinstance(cls, type) and issubclass(cls, class_or_tuple)
ae1f29105700bf4b8252ccb748c37351a9f0562d
547,811
def wrapto180(angles): """ Put all angles (in degrees) in the range -180..180 :param angles: numpy vector or scalar :return: """ return (angles % 360 + 180) % 360 - 180
2e7efc1e4e3b131519fc0d564c279ff6cdd92503
427,726
import re def get_urns_from_text(document): """Find all URNs in a text-file""" with open(document) as fp: text = fp.read() #print(text) return re.findall("[0-9]{13}", text)
c350ba4f7de53f1aaac1edf5cdcfc70aa77a3205
393,843
def num_higher(objects, key1, key2): """ Counts the number of times key2 is higher than key1 in rankers. """ count = 0 for ranker in range(len(objects[key1])): x1 = objects[key1][ranker] x2 = objects[key2][ranker] if x1 != None and x2 != None and x1 > x2: count += 1 return count
2dc13e6287281e692b67c85acdd14f0a18b9ba4c
200,505
def _truncate_with_offset(resource, value_list, offset): """Truncate a list of dictionaries with a given offset. """ if not offset: return resource offset = offset.lower() for i, j in enumerate(value_list): # if offset matches one of the values in value_list, # the truncated list should start with the one after current offset if j == offset: return resource[i + 1:] # if offset does not exist in value_list, find the nearest # location and truncate from that location. if j > offset: return resource[i:] return []
bc4b9fcff258f2fafae000b57c952abc74b418ce
311,374
import re def search_bible(values, search_for): """ searches through our dictionary values for string builds list of bcv to print later """ keys_list = [] items_list = values.items() searched = re.compile(search_for, re.IGNORECASE) for item in items_list: if searched.search(item[1]): keys_list.append(item[0]) return keys_list
cfb94e43e64f390ae823923c4e0dc79cba5b095a
393,657
def _is_num(data): """Verify if data is either int or float. Could be replaced by: from numbers import Number as number isinstance(data, number) but that requires Python v2.6+. """ return isinstance(data, int) or isinstance(data, float)
940ab3eb907a73bb5da2c807c1b30aff1ba8e4d9
609,073
from typing import ContextManager import contextlib def _null_context_manager_with_arguments(*args, **kwargs) -> ContextManager: """ A null context manager that does nothing, and accepts arguments. Python 3.6 does not have a null context manager (python 3.7 has), hence the 'suppress' context manager with no arguments is used to represent it instead. Additionally, the context manager accepts arguments and ignores them. """ return contextlib.suppress()
c5827bc26db8dfe2f3796e4d7421e00ed3bb0029
373,317
def sort_list_of_dicts_by(flats_list, key): """ Sort a list of dicts according to a given field common to all the dicts. :param flats_list: List of dicts to sort. :param key: The key of the dict items to sort on. :return: A sorted list. :Example: >>> sort_list_of_dicts_by([{1: 2}, {1: 1}], 1) [{1: 1}, {1: 2}] """ return sorted(flats_list, key=lambda x: x[key])
a51229b1939ab8ee0c9b3edb835a0cb45be2dd96
472,598
from typing import Iterable from typing import Any from typing import List def uniques_from_list(ls:Iterable[Any]) -> List[Any]: """Returns unique values from list whilst preserving the order Args: ls (list): a list to return unique values from Returns: list: the unique values with preserved order """ seen: set[Any] = set() seen_add = seen.add return [x for x in ls if not (x in seen or seen_add(x))]
ee53a766e3f82942e421cad7104eb01614579eb8
293,920
def assign_county(point, counties): """Assign a single point to its county.""" try: match = next( county['NAME'] for _, county in counties.iterrows() if point.intersects(county['geometry']) ) except StopIteration: match = None return match
b884ce7ca4727967e412633b44edca5140e19edc
650,299
def PickHistoryName(args): """Returns the results history name to use to look up a history ID. The history ID corresponds to a history name. If the user provides their own history name, we use that to look up the history ID; If not, but the user provides an app-package name, we use the app-package name with ' (gcloud)' appended as the history name. Otherwise, we punt and let the Testing service determine the appropriate history ID to publish to. Args: args: an argparse namespace. All the arguments that were provided to the command invocation (i.e. group and command arguments combined). Returns: Either a string containing a history name derived from user-supplied data, or None if we lack the required information. """ if args.results_history_name: return args.results_history_name if args.app_package: return args.app_package + ' (gcloud)' return None
2cc6487d804b2bdb586c699a50421f60869a1e18
616,495
def convert_tag(tag): """Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets""" tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'} try: return tag_dict[tag[0]] except KeyError: return None
4dd3afd300425d9c987d3e380e83a7034b7099eb
370,568
def counts_to_tfidf(count_matrix, idfs): """Convert counts to Tf-Idf scores given diagonal idf matrix.""" tfs = count_matrix.log1p() tfidfs = idfs.dot(tfs) return tfidfs
5be57dfdadad41552576cb2c8821f2227347a437
580,573
def compare_version(version1, version2): """ Compare version strings. Return 1 if version1 is after version2; -1 if version1 is before version2; 0 if two versions are the same. """ v1_arr = version1.split(".") v2_arr = version2.split(".") len1 = len(v1_arr) len2 = len(v2_arr) len_max = max(len1, len2) for x in range(len_max): v1_token = 0 if x < len1: v1_token = int(v1_arr[x]) v2_token = 0 if x < len2: v2_token = int(v2_arr[x]) if v1_token < v2_token: return -1 if v1_token > v2_token: return 1 return 0
0b6adc3861a8925d8ce69481927feac7e8d7bab0
619,152
import collections def odict_delete(odict, key): """Return an OrderedDict with selected key:value pair removed. Parameters ---------- odict : collections.OrderedDict Ordered dictionary to copy and insert new value into. key : string Key to delete. Returns ------- odict_new : collections.OrderedDict A copy of the dictionary the new key:value pair deleted. """ odict_new = collections.OrderedDict() for k in odict.keys(): if k != key: odict_new[k] = odict[k] return odict_new
6a7498a9440659c332e418ebd84c57c117c309ef
126,214
def _convert_list_of_objs_to_list_of_dicts(list_of_objects): """ Recursively convert a list of objects to a list of dicts. This works recursively and is needed because the NLP Textanalyzer sometimes gives back a list of non-json-serializable objects, which need to be converted to dicts. The list structure has to be kept. :param list list_of_objects: List of objects to be converted to list of dicts. :return list: list of objects or a single one """ if isinstance(list_of_objects, list): list_of_subobjects = [] for sub in list_of_objects: list_of_subobjects.append( _convert_list_of_objs_to_list_of_dicts(sub)) return list_of_subobjects else: return list_of_objects.__dict__
1a56b8ea1bd4cc985335ed013b0b6c7a90f60a4d
626,176
def find_tag(vobject_item): """Find component name from ``vobject_item``.""" if vobject_item.name == "VCALENDAR": for component in vobject_item.components(): if component.name != "VTIMEZONE": return component.name return None
e3bf0013cb4bbaa3cc9974ef866b79a9b7358fee
298,266
def avg_sentiment(tweet): """ Return the average of the afinn and vader sentiment scores for a tweet """ return (tweet['afinn_sentiment'] + tweet['vader_sentiment']['compound']) / 2.0
a97931be3422a458816cfb48c9a4851968e502e0
536,110
from typing import Optional import pathlib def resolve_cache_root( cache_root: Optional[pathlib.Path], *directories: str ) -> pathlib.Path: """ Resolve cache root. :param cache_root: The cache root. If None, use ~/.kgm :param directories: Additional directories inside the cache root which are created if necessary. :return: An absolute path to an existing directory. """ # default cache root if cache_root is None: cache_root = pathlib.Path('~', '.kgm') # Ensure it is an absolute path cache_root = cache_root.expanduser().absolute() # Create sub-directories for directory in directories: cache_root = cache_root / directory # Ensure that cache_root is an existing directory cache_root.mkdir(parents=True, exist_ok=True) return cache_root
720a18672b74c9aeabae68dc75cbd2ff41cbc449
366,648
def evaluate_fN(model, NHI): """ Evaluate an f(N,X) model at a set of NHI values Parameters ---------- NHI : array log NHI values Returns ------- log_fN : array f(NHI,X) values """ # Evaluate without z dependence log_fNX = model.__call__(NHI) return log_fNX
e952a29fdf5864b26dc534140b2ccfb0b59fe24b
707,564
def get_url(pif, dataset, version=1, site="https://citrination.com"): """ Construct the URL of a PIF on a site :param pif: to construct URL for :param dataset: the pif will belong to :param version: of the PIF (default: 1) :param site: for the dataset (default: https://citrination.com) :return: the URL as a string """ return "{site}/datasets/{dataset}/version/{version}/pif/{uid}".format( uid=pif.uid, version=version, dataset=dataset, site=site )
77cf91aeda34271fc68aa80fb8a1af879e48574d
471,145