content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_node_val(node, val_type): """Return the value as a string of a child node of the specified type, or raise ValueError if none exists""" for child in node.children: if child.expr_name == val_type: return child.text.strip('"') raise ValueError("No value of specified type.")
d501dd7ba20620e844a5d4d2d33112e64a9dfef0
694,804
def _make_pretty_arguments(arguments): """ Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ... """ if arguments.startswith("\n Arguments:"): arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:])) return "**Arguments:**\n\n%s\n\n" % arguments
b6e7571c3d0e432649edf8295d3aa640140c551f
694,806
import torch def nested_stack(params, roll: bool = False): """Form a tensor from a nexted list of tensors. This function is a generalization of torch.stack. For proper usage, it's important that params is a nested list with shape consistent with and array. The innermost elements of that nested list should be PyTorch tensors, all of which have identical size. For an example, suppose that a, b, c, and d are all tensors of size (5,). Then, nested_stack([[a, b], [c, d]]) returns a tensor of size (2, 2, 5). If roll is set to True, then the dimensions of the tensors (like a, b, c and d in the example above) will be permuted to the start of the output. This is useful if those dimensions were supposed to be batch dimensions. In the example, the output with roll=True would have size (5, 2, 2). If instead a, b, c, and d all had size (6, 9, 8), then the output size would be (6, 9, 8, 2, 2) if roll=True and (2, 2, 6, 9, 8) if roll=False. """ def recursive_stack(params_): if isinstance(params_[0], torch.Tensor): return torch.stack(params_) num_rows = len(params_) return torch.stack( [nested_stack(params_[i]) for i in range(num_rows)] ) stacked = recursive_stack(params).squeeze(0) if roll: inner = params[0] while not isinstance(inner, torch.Tensor): inner = inner[0] inner_dim = inner.dim() perm = list(range(stacked.dim()-inner_dim, stacked.dim())) + list(range(stacked.dim()-inner_dim)) return stacked.permute(perm) else: return stacked
9b8b56eb15f55cbc5bf0b726e7aaf7fd3d476ada
694,807
def count_features_type(features): """ Counts three different types of features (float, integer, binary). :param features: pandas.DataFrame A dataset in a panda's data frame :returns a tuple (binary, integer, float) """ counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes)} binary=0 if ('int64' in counter): binary=len(set(features.loc[:, (features<=1).all(axis=0)].columns.values) & set(features.loc[:, (features>=0).all(axis=0)].columns.values) & set(counter['int64'])) return (binary,len(counter['int64'])-binary if 'int64' in counter else 0,len(counter['float64']) if 'float64' in counter else 0)
7c759ac9289e7f2cdb542d67ad30bf36e06749c8
694,808
import re def default_cleaner_fn(fld): """ Return a copy of the given field cleaned up by removing any unwanted characters. """ if (isinstance(fld, str)): return re.sub("[\"\'\\\\]", "", fld) # remove quotes and backslashes else: return fld
7f9bacfe981f1b3591cde5cc7ae04b654689b0c4
694,810
def get_pagination_parameters(request, paginator, paginated): """ Prepare and return the template parameters needed for pagination. Thanks to https://gist.github.com/sbaechler/5636351 Args: ``request`` (django.http.HttpRequest): The request object. ``paginator`` (django.core.paginator.Paginator): An instance of the Paginator with the paginated data. ``paginated`` (django.core.paginator.Page): The paginated data. Returns: ``dict``. A dictionary with all values needed by the template to create the pagination. """ LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 10 LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 8 NUM_PAGES_OUTSIDE_RANGE = 2 ADJACENT_PAGES = 4 pages = paginator.num_pages page = paginated.number in_leading_range = in_trailing_range = False pages_outside_leading_range = pages_outside_trailing_range = range(0) if pages <= LEADING_PAGE_RANGE_DISPLAYED + NUM_PAGES_OUTSIDE_RANGE + 1: in_leading_range = in_trailing_range = True page_range = [n for n in range(1, pages + 1)] elif page <= LEADING_PAGE_RANGE: in_leading_range = True page_range = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1)] pages_outside_leading_range = [ n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] elif page > pages - TRAILING_PAGE_RANGE: in_trailing_range = True page_range = [n for n in range( pages - TRAILING_PAGE_RANGE_DISPLAYED + 1, pages + 1) if n > 0 and n <= pages] pages_outside_trailing_range = [ n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] else: page_range = [n for n in range( page - ADJACENT_PAGES, page + ADJACENT_PAGES + 1) if n > 0 and n <= pages] pages_outside_leading_range = [ n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] pages_outside_trailing_range = [ n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] # Now try to retain GET params, except for 'page' params = request.GET.copy() if 'page' in params: del(params['page']) get_params = params.urlencode() prev = paginated.previous_page_number() if paginated.has_previous() else "" return { 'pages': pages, 'page': page, 'previous': prev, 'next': paginated.next_page_number() if paginated.has_next() else "", 'has_previous': paginated.has_previous(), 'has_next': paginated.has_next(), 'page_range': page_range, 'in_leading_range': in_leading_range, 'in_trailing_range': in_trailing_range, 'pages_outside_leading_range': pages_outside_leading_range, 'pages_outside_trailing_range': pages_outside_trailing_range, 'get_params': get_params, 'count': paginator.count, }
0dd54bffdf31a3cf78bfdc510ac20a901e3adc5e
694,812
from typing import Tuple def extend_shape(original_shape: Tuple, new_size: int, axis: int = 0) -> Tuple: """Extend a dimension of a shape tuple""" shape = list(original_shape) shape[axis] = new_size return tuple(shape)
c48e2559900e88ec6c808735ac017c65bc82741a
694,813
def _resource_for_help(pkg_info, help_file): """ Get the resource name that references the help file in the given help package. The help file should be relative to the document root of the package. """ return "Packages/%s/%s" % (pkg_info.doc_root, help_file)
39214fe39db71935a85763e1731c0da97fe9e74b
694,817
def str2bool(v): """ Convert a string to a boolean :return boolean: Returns True if string is a true-type string. """ return v.lower() in ('true', 't', '1', 'yes', 'y')
32dc16194fa6096e53e1a0b21f0287c31a7cd824
694,819
def adjustbufsize( bufsize: int, bits: int) -> int: """Adjust buffer size to account for bit depth Args: bufsize: initial estimate of buffer size bits : bit depth of bitmap (1, 4, 8, 24) bits Returns: An adjusted int value of the buffer size """ if bits == 1: bufsize >>= 3 elif bits == 24: bufsize *= 3 elif bits == 4: bufsize >>= 1 return bufsize
9130c0fa174d9daf6dffabba67c77e9d85b29c13
694,821
def and_(a, b): """Same as a & b """ return a & b
a07f69143fd9eaa3b27bb07ee72c04efa31c5b7f
694,823
import re def generate_top_kmer_md_table(t_kmer_dic, g_kmer_dic, top=5, val_type="c"): """ Given k-mer count dictionaries for genomic and transcript context set, generate a markdown table with top 5 k-mers (sorted by decending dictionary value). val_type: Specify type of stored dictionary value. c : count (count of k-mer) r : ratio (k-mer count / total k-mer count) p : percentage ( (k-mer count / total k-mer count) * 100) """ assert t_kmer_dic, "given dictionary t_kmer_dic empty" assert g_kmer_dic, "given dictionary g_kmer_dic empty" assert re.search("^[c|p|r]$", val_type), "invalid val_type given" # Get size of k. k = 0 for kmer in t_kmer_dic: k = len(kmer) break # Expected kmer number. exp_kmer_nr = pow(4,k) t_kmer_nr = 0 g_kmer_nr = 0 for kmer in t_kmer_dic: kc = t_kmer_dic[kmer] if kc: t_kmer_nr += 1 for kmer in g_kmer_dic: kc = g_kmer_dic[kmer] if kc: g_kmer_nr += 1 t_kmer_perc = "%.2f " %((t_kmer_nr / exp_kmer_nr) * 100) + " %" g_kmer_perc = "%.2f " %((g_kmer_nr / exp_kmer_nr) * 100) + " %" # Adjust decimal places based on k-mer size. dc_p = 2 dc_r = 4 if k > 3: for i in range(k-3): dc_p += 1 dc_r += 1 dc_p_str = "%."+str(dc_p)+"f" dc_r_str = "%."+str(dc_r)+"f" add_ch = "" if val_type == "p": add_ch = " %" # Format percentage to two decimal places. for kmer in t_kmer_dic: new_v = dc_p_str % t_kmer_dic[kmer] t_kmer_dic[kmer] = new_v for kmer in g_kmer_dic: new_v = dc_p_str % g_kmer_dic[kmer] g_kmer_dic[kmer] = new_v elif val_type == "r": # Format percentage to four decimal places. for kmer in t_kmer_dic: new_v = dc_r_str % t_kmer_dic[kmer] t_kmer_dic[kmer] = new_v for kmer in g_kmer_dic: new_v = dc_r_str % g_kmer_dic[kmer] g_kmer_dic[kmer] = new_v # Get top j k-mers. i = 0 t_topk_list = [] for kmer, v in sorted(t_kmer_dic.items(), key=lambda item: item[1], reverse=True): i += 1 if i > top: break t_topk_list.append(kmer) i = 0 g_topk_list = [] for kmer, v in sorted(g_kmer_dic.items(), key=lambda item: item[1], reverse=True): i += 1 if i > top: break g_topk_list.append(kmer) # Generate markdown table. mdtable = "| Rank | &nbsp; &nbsp; Transcript context &nbsp; &nbsp; | &nbsp; &nbsp; Genomic context &nbsp; &nbsp;|\n" mdtable += "| :-: | :-: | :-: |\n" for i in range(top): t_kmer = t_topk_list[i] g_kmer = g_topk_list[i] pos = i + 1 mdtable += "| %i | %s (%s%s) | %s (%s%s) |\n" %(pos, t_kmer, str(t_kmer_dic[t_kmer]), add_ch, g_kmer, str(g_kmer_dic[g_kmer]), add_ch) mdtable += "| ... | &nbsp; | &nbsp; |\n" mdtable += "| # distinct k-mers | %i (%s) | %i (%s) |\n" %(t_kmer_nr, t_kmer_perc, g_kmer_nr, g_kmer_perc) # Return markdown table. return mdtable
f0be470a6b2e10a5f786cd7d01c6b09b7f1e395e
694,824
def schedule(year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, end_date=None, timezone=None): """Schedule a function to be executed according to a crontab-like schedule The decorated function will be executed according to the schedule provided. Slack Machine uses APScheduler under the hood for scheduling. For more information on the interpretation of the provided parameters, see :class:`CronTrigger<apscheduler:apscheduler.triggers.cron.CronTrigger>` :param int|str year: 4-digit year :param int|str month: month (1-12) :param int|str day: day of the (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) :param int|str minute: minute (0-59) :param int|str second: second (0-59) :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) """ kwargs = locals() def schedule_decorator(f): f.metadata = getattr(f, "metadata", {}) f.metadata['plugin_actions'] = f.metadata.get('plugin_actions', {}) f.metadata['plugin_actions']['schedule'] = kwargs return f return schedule_decorator
051ed1fbdf113834bcff626b3679f1dc6ea5580c
694,826
def get_default_ext(delim): """Retrieves the default extension for a delimiter""" if delim == ',': return "csv" if delim == '\t': return "tsv" return "txt"
37e27bebcb6c1fbfe3d794d3d2ac34d9909ed40c
694,830
def to_camel(s): """Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'""" bits = [(x.lower() if i == 0 else x.title()) for (i, x) in enumerate(s.split("_"))] return "".join(bits)
34400be6a346d886b2fca562b737b7811b871af1
694,831
import csv import json from typing import Counter def convert_newsqa(file_path): """ Converts NewsQA dataset to jtr format. Args: file_path: path to the NewsQA CSV file (data/NewsQA/) Returns: dictionary in jtr format """ # meta info if '/' in file_path: filename = file_path[file_path.rfind('/') + 1:] # Maybe support a system-specific delimiter else: filename = file_path # data question_sets = [] with open(file_path) as data_file: reader = csv.reader(data_file) reader.__next__() for row in reader: [story_id, question, answer_char_ranges, is_answer_absent, is_question_bad, validated_answers, story_text] = row if validated_answers: answers = json.loads(validated_answers) spans = [k for k, v in answers.items() if ":" in k] else: answers = Counter() for rs in answer_char_ranges.split("|"): for r in set(rs.split(",")): if ":" in r: answers[r] += 1 spans = [k for k, v in answers.items() if ":" in k and v >= 2] if spans: qa_set = { "support": [story_text], "questions": [{ 'question': { 'text': question, 'id': story_id + "_" + question.replace(" ", "_") }, 'answers': [{"span": [int(span.split(":")[0]), int(span.split(":")[1])], "text": story_text[int(span.split(":")[0]):int(span.split(":")[1])] } for span in spans] }] } question_sets.append(qa_set) corpus_dict = { 'meta': { 'source': filename }, 'instances': question_sets } return corpus_dict
0e48883e179f2d440ac8c72c8a5ff9344f595f1f
694,835
def _zones_to_regions(zones): """ Return list of regions from the input list of zones :param zones: List of zones. This is the output from `get_zones_in_project()`. :return: List of regions available to the project """ regions = set() for z in zones: # Chop off the last 2 chars to turn the zone to a region r = z['name'][:-2] regions.add(r) return list(regions)
83c59cc6c2a9fc6e36a64044dc8ccc73ec039801
694,836
import re def reformat_ISBNs(text: str, match_func) -> str: """Reformat ISBNs. :param text: text containing ISBNs :param match_func: function to reformat matched ISBNs :type match_func: callable :return: reformatted text """ isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[\dXx])') text = isbnR.sub(match_func, text) return text
a440d65e4bd747c28fb186e8b76389e8bb59526c
694,843
def get_memory_usage(pandas_df): """ Returns the number of bytes used by a pandas dataframe """ return pandas_df.memory_usage(deep=True).sum()
7ed31e0f20269224ea0517a71045992106df9030
694,848
def plot_evoked_topomaps(epochs, events, average_method, times): """ Plot evoked topomaps. One figure is generated for each event. Parameters ---------- epochs : mne.epochs.Epochs Epochs extracted from a Raw instance. events : list[str] Events to include. average_method : "mean" | "median How to average epochs. times : list[float] | "auto" | "peaks" | "interactive" The time point(s) to plot. Returns ------- list[matplotlib.figure.Figure] A list of the figure(s) generated. """ figs = [] for event in events: evoked = epochs[event].average(method=average_method) figs.append(evoked.plot_topomap(times, title=f'Event: {event}')) if times == 'interactive': figs[-1].set_size_inches(6, 4) return figs
83e040930cc8971aeb045b4accaa315fcb53cc4f
694,851
def CheckDoNotSubmitInDescription(input_api, output_api): """Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description. """ keyword = 'DO NOT ''SUBMIT' if keyword in input_api.change.DescriptionText(): return [output_api.PresubmitError( keyword + ' is present in the changelist description.')] else: return []
c2ac7c263db43897933120fac475a5a2fc6774bc
694,854
import math import itertools def permutate(k, l_exp): """ Generate the permutations for all exponents of y :param k: number of meaningful directions :param l_exp: expansion order :return perms: array of permutations """ Nt = int(math.factorial(l_exp + k) / (math.factorial(l_exp) * math.factorial(k))) lst = [ll for ll in range(l_exp + 1)] * k perms_all = set(itertools.permutations(lst, k)) perms = [] for per in perms_all: if sum(per) <= l_exp: perms.append(per) return perms
c0de0d2964299e2956ba4e04fe3ad5df9cafbc2d
694,856
import math def ebob(a,b): """ Returns the greatest common divisor of a and b. """ return math.gcd(a,b)
43039a85b7a1e5065f7365c17c308a1c9d9df37d
694,858
def convert_comma_separated_integer_to_float(comma_separated_number_string): """Converts a string of the form 'x,xxx,xxx' to its equivalent float value. :param comma_separated_number_string: A string in comma-separated float form to be converted. :returns: A float representing the comma-separated number. """ return float(comma_separated_number_string.replace(',', ''))
daddeaa78a3efb8ffd2d5eac122757f041da5f97
694,860
from datetime import datetime import json import requests def send_metrics( uuid, data, solution_id, url="https://metrics.awssolutionsbuilder.com/generic", ): """sends metric to aws-solutions Args: uuid (string): unique id to make metrics anonymous data (dict): usage metrics from the solution solution_id (string): solution id url (str, optional): aws-solutions endpoint. \ Defaults to "https://metrics.awssolutionsbuilder.com/generic". Returns: int: request code """ time_stamp = { "TimeStamp": str(datetime.utcnow().isoformat()) .replace("T", " ") .replace("Z", "") } # Date and time instant in a java.sql.Timestamp compatible format, params = {"Solution": solution_id, "UUID": uuid, "Data": data} metrics = dict(time_stamp, **params) json_data = json.dumps(metrics) headers = {"content-type": "application/json"} req = requests.post(url, data=json_data, headers=headers) code = req.status_code return code
ab29d0c8830cb6ead501e84fa09ec9c0c28c9dcd
694,861
def _encode_Bool_Flag(bBoolFlag : bool) -> int: """ Description ----------- Method encoding a bool value into an integer flag. Parameters ---------- `bBoolFlag` : bool Boolean flag which should be converted into an integer value Return ------ `iEncodedFlag` : int Boolean flag encoded as integer (0 : True | 1 : False) """ if (bBoolFlag == True): return 1 elif (bBoolFlag == False): return 0
1df72f50b6b3e8206a09634386cd14f308f931f5
694,865
def dataToTuple(stringData): """ Formats string to tuple ("id:data" -> (id,data)) """ splitLine = stringData.split(": ") result = (splitLine[0],splitLine[1].strip("\n")) return result
dce23300746935541822cd9e6924fca84bc702d4
694,867
def login(client, secret_id, rresp): """logging in as 'secret_id' with 'g-recaptcha-response' client (class Flask.testing.FlaskClient): the client secret_id (str): the secret_id to login rresp (str): the recapctha response code. can be empty in testing """ return client.post('/auth', json={ 'id': secret_id, 'g-recaptcha-response': rresp }, follow_redirects=True).get_json()
fdcf5a8646638fb14ec11a1a3e35c4f8aa71b96c
694,868
def aggregate_count(keyname): """ Straightforward sum of the given keyname. """ def inner(docs): return sum(doc[keyname] for doc in docs) return keyname, inner
431896d1c8d8c1f773b684b5235919ce1728a8ce
694,869
def new_version_entry(version): """ Returns a new entry for the version index JSON schema. """ return { "allOf": [ {"properties": {"version": {"oneOf": [{"const": version}]}}}, { "$ref": "https://raw.githubusercontent.com/deepset-ai/haystack/master/json-schemas/" f"haystack-pipeline-{version}.schema.json" }, ] }
7198e859b1634ebfbf181a4ff0a9f488f29afd32
694,875
def fourier(framearray): """ Fourier transforms all waves from array. (Real values only) :param framearray: array of waves (frames) :return: array of FFT waves (spectrums) """ fourier_frame = [] for frame in framearray: index = frame.make_spectrum() fourier_frame.append(index) return fourier_frame
0f97ec9bd3f9ceda8d449a35623e9a2beea0ccda
694,877
def get_media_stream(streams): """Returns the metadata for the media stream in an MXF file, discarding data streams.""" found = None for stream in streams: # skip 'data' streams that provide info about related mxf files if stream['codec_type'] == 'data': continue if found: raise UserWarning('Expected only one media stream per MXF.') found = stream return found
563ef35c2be3e9787340d7527e7fd0f27b6db036
694,879
def validate_iyr(issue_year: str) -> bool: """iyr (Issue Year) - four digits; at least 2010 and at most 2020.""" return len(issue_year) == 4 and int(issue_year) >= 2010 and int(issue_year) <= 2020
b30614523c1633ecf40a4f68556dcc820e1aefef
694,882
def drawAsInfinite(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. If the value is zero, draw the line at 0. If the value is above zero, draw the line at infinity. If the value is null or less than zero, do not draw the line. Useful for displaying on/off metrics, such as exit codes. (0 = success, anything else = failure.) Example: .. code-block:: none drawAsInfinite(Testing.script.exitCode) """ for series in seriesList: series.options['drawAsInfinite'] = True series.name = 'drawAsInfinite(%s)' % series.name return seriesList
8cb8273d57abca57600071101f6bd9b4a595ea38
694,883
def parentChainContainsCycle(term): """ Check if the chain of parents from an item contains a cycle by navigating along the sequence of parents :param term: The ontology term to check :type term: pronto.Term :return: Whether the parent chain contains a cycle :rtype: bool """ seen = set() toProcessed = [term] while len(toProcessed) > 0: processing = toProcessed.pop(0) if processing in seen: return True seen.add(processing) toProcessed += list(processing.parents) return False
162267808cfbafee1c1c4d07467d1b50eddcb58f
694,885
def _make_dict(my_tup, lang): """Make tuple in a dictionary so can be jsonified into an object.""" labels = ['comment_text', 'course_code', 'learner_classification', 'offering_city', 'offering_fiscal_year', 'offering_quarter', 'overall_satisfaction', 'stars', 'magnitude', 'nanos'] results = {key: val for key, val in zip(labels, my_tup)} return results
b7f010b4486889fe938a7a69513b0d5ca6e6cb86
694,886
import typing import re def split_sentences(text:str) -> typing.List[str]: """Split multiple sentences in one string into a list. Each item being a sentence. Args: text (str): Incoming string with multiple sentences. Returns: typing.List[str]: list of sentences. """ SEP_REGEX = r"[^.!?]+[.!?]" sentences = [] text_index = 0 # try finding all and ensure its a valid match match: re.Match for match in re.finditer(SEP_REGEX, text): if match: sub_text = match.string[match.start():match.end()] if sub_text != "": sentences.append(sub_text.strip()) text_index = match.end() + 1 if text_index < len(text): remaining_text = text[text_index:] sentences.append(f"{remaining_text.strip()}.") return sentences
b748311fb4b9c0b5249782ceb140a9f64b17fa15
694,893
def get_apim_nv_secret(client, resource_group_name, service_name, named_value_id): """Gets the secret of the NamedValue.""" return client.named_value.list_value(resource_group_name, service_name, named_value_id)
186030bca5285a82e133c8fc3acbf617d7247eb5
694,894
def extract_condition_disease(condition): """Extracts the disease encoded in the Condition resource. Example resource: { ... "code":{ "coding":[ { "code":"Yellow Fever", "system":"http://hl7.org/fhir/v3/ConditionCode" } ] } ... } Args: condition (Object): Returns: str: the disease in the condition. """ return condition['code']['coding'][0]['code']
a3adcf89c58c5a95790e8c9ea9089ccd6e8818af
694,900
def get_train_na_percentages(train): """ Return a Series with the percentage of Na values per columns in train dataframe. Must be called just after impute_train_missing_data(). Keyword arguments: train -- the train dataframe """ na_cols_pctg_train = train[train.columns[train.isna().sum() > 0]].isna().sum() / train.shape[0] return na_cols_pctg_train
4de1d8f509a6a94a7ef73de3f17fb4d218791a7b
694,903
import json def parse_node_file(filename): """ Parses a node file and creates the following variables: graph = {child:{None}, child:{Parent1, Parent2}, ...} prior = {node: [prior values]} lambdas = {parent:{child1:lambda1, child2:lambda2, leak_node:lambda0}} Those can then be used with the samplers, e.g., adaptive, annealed, etc. """ with open(filename) as inputfile: data = json.load(inputfile) graph = {} prior = {} lambdas = {} for key in data: # root node d = data[key] if len(d["parents"]) == 0: graph[key] = {None} prior[key] = d["cpt"] else: graph[key] = {p for p in d["parents"]} t = graph[key] c = d["cpt"] lambdas[key] = {node: c[i] for i, node in enumerate(t)} lambdas[key]["leak_node"] = c[len(t)] return graph, prior, lambdas
736786825bf9a3f44bc641b4ccdc1d2b46fe9816
694,911
import socket def isip(id): """ is the string an ipv4 address? """ try: socket.inet_aton(id) return True except: return False
258b0f09f5471ea276ee136ebfc7833c3e54fd04
694,912
def parameters_property(parameters): """Given the list of parameters this function constructs a property that simply returns the given list. It doesn't provide a setter so that the list of parameters cannot be overridden.""" def getter(self): return parameters return property(fget=getter)
65aa43e925d07ace01705234bb4130251c50adda
694,913
async def get_tz_offset(client, user_id): """ Retrieve the (seconds as an integer) time zone offset from UTC for a user. Outputs an integer in the range [-43200, 43200]. (-12h, +12h.) client: the client from the context object. user_id: the human-opaque Slack user identifier. """ # https://api.slack.com/methods/users.info res = await client.users_info(user=user_id) return res['user']['tz_offset']
02bcfd0cb21cc35eb331951e77bca41d8a58722c
694,915
def feature_normalize(X, mean=None, sigma=None): """ returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when working with learning algorithms. the normalization is processed separately for each feature :param X: numpy array of shape (m,n) Training data :return: X_norm, mu, sigma X_norm matrix(m,n) - Normalised Feature matrix mu matrix(1,n) - mean of individual features sigma matrix(1,n) - standard deviation of individual features """ if mean is None: mean = X.mean(0) if sigma is None: sigma = X.std(0) X_norm = (X - mean) / sigma return X_norm, mean, sigma
81342428799f5ac8d1815b54400e8eb4d7cc302b
694,919
def periodic_ordering(amin, amax, bmin, bmax): """Figures out the order of the permutation that maps the minima and maxima to their order, in canonical form (amin<amax, bmin<bmax if possible). Parameters ---------- amin : float minimum of first range amax : float maximum of first range bmin : float minimum of second range bmax : float maximum of second range Returns ------- list of int 0-3 Order index of amin, amax, bmin, bmax in that order; i.e. the return value [0, 2, 1, 3] means amin < bmin < amax < bmax; amin in order spot 0, amax in 2, bmin in 1, bmax in 3. """ dict2 = {'a' : amin, 'A' : amax, 'b' : bmin, 'B' : bmax} order = ['a'] # put the labels in the increasing order, starting at amin for label in ('A', 'b', 'B'): i = 0 val = dict2[label] while i < len(order): if val < dict2[order[i]]: order.insert(i, label) break i += 1 if label not in order: order.append(label) # Canonical order is 'a' always before 'A', and if possible, 'b' before # 'B', and 'a' before 'B' (in that order of priority). This defines a # unique member within the set of cyclic permutations; find that cyclic # permutation. idx0 = order.index('a') out = [] for i in range(4): out.append(order[(idx0+i) % 4]) if out[3] == 'b': out = [out[3]] + out[slice(0, 3)] # at this point we have a canonically ordered list of the letter a, A, # b, and B. final = [out.index(a) for a in ['a', 'A', 'b', 'B']] return final
ec81f998ea9abe2f23825f0de1890b6b2d62ba0e
694,920
def strip_comments(string, markers): """ Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out. :param string: a string input. :param markers: list of characters. :return: a new string with whitespace and comment markers removed. """ parts = string.split("\n") for v in markers: parts = [x.split(v)[0].rstrip() for x in parts] return "\n".join(parts)
0ab980996897a7c42254a318ed325a9108033f97
694,921
def encode_schedule(schedule): """Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple. """ interpolation, steps, pmfs = schedule return interpolation + ' ' + ' '.join( '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs))
d660bc6826dcef2bbc5fbfca0eafaf2d72ab061f
694,922
def ansi(num: int): """Return function that escapes text with ANSI color n.""" return lambda txt: f'\033[{num}m{txt}\033[0m'
88297df114c3670a24e5ffa248d5756de09183cb
694,923
from typing import Union def compound_interest( capital: float, application_time: float, fess: float, *, ret_dict=False, ret_text=False, ) -> Union[tuple, dict, str]: """ Function to apply compound interest. >>> from snakypy import helpers >>> helpers.calcs.compound_interest(2455, 12, 1) {'amount': 2766.36, 'fess': 311.36} >>> helpers.calcs.compound_interest(2455, 12, 1, ret_text=True) 'The amount was: $ 2766.36. The fees were: $ 311.36.' Args: capital (float): Capital value application_time (float): Time if applications fess (float): Value fees ret_dict (bool): If it is True returns in the dictionary form ret_text (bool): If it is True returns in the dictionary text Returns: Returns dictionary or a string or both. """ amount = capital * ((1 + fess / 100) ** application_time) fess_value = amount - capital format_text = f"The amount was: $ {amount:.2f}. The fees were: $ {fess_value:.2f}." format_dict = {"amount": float(f"{amount:.2f}"), "fess": float(f"{fess_value:.2f}")} if ret_dict and ret_text: return format_dict, format_text elif ret_text: return format_text elif ret_dict: return format_dict return format_dict
a70b75c867fc1795e9f2491751b6a93e2bea92ec
694,924
import torch def _edge_error(y, y_target, mask): """ Helper method to compute edge errors. Args: y: Edge predictions (batch_size, num_nodes, num_nodes) y_target: Edge targets (batch_size, num_nodes, num_nodes) mask: Edges which are not counted in error computation (batch_size, num_nodes, num_nodes) Returns: err: Mean error over batch err_idx: One-hot array of shape (batch_size)- 1s correspond to indices which are not perfectly predicted """ # Compute equalities between pred and target acc = (y == y_target).long() # Multipy by mask => set equality to 0 on disconnected edges acc = (acc * mask) # Get accuracy of each y in the batch (sum of 1s in acc_edges divided by sum of 1s in edges mask) acc = acc.sum(dim=1).sum(dim=1).to(dtype=torch.float) / mask.sum(dim=1).sum(dim=1).to(dtype=torch.float) # Compute indices which are not perfect err_idx = (acc < 1.0) # Take mean over batch acc = acc.sum().to(dtype=torch.float).item() / acc.numel() # Compute error err = 1.0 - acc return err, err_idx
900ce7bcf61a0f40cc9a36dd930a72c7d6f9cc51
694,926
import re def get_url_jk_part(urlpart): """given a url substring containing the jk id from the job detail page - strip out the jk id and return it string urlpart: a url substring containing the jk id returns (str): the jk id param only e.g. jk=886c10571b6df72a which can be appended to a working job detail url """ jkrx= re.compile(r'jk=(\w+)', re.I) m=jkrx.search(urlpart) if not m: print('could not find jk part of url') return None #print('matched string is ', m.groups()[0]) jkpart = m.groups()[0] return jkpart
97f1e5366f7ded9da84793fc39dbff965e3381d5
694,927
def unique_nonzero(ngb): """Return unique non-zero values from vector.""" uni = list() for n in ngb: if n > 0 and n not in uni: uni.append(n) return uni
88ab425a2e8fa85a733fcd0887772219beae0403
694,931
def is_current_connection(context, connection): """Returns True if connection named name is the default connection""" if context.pywbem_server_exists(): current_connection = context.pywbem_server else: current_connection = None if current_connection and current_connection.name == connection.name: return True return False
44cf36d4d389e9416ed469d76b63e46241c3eece
694,937
def to_dero(value): """Convert number in smallest unit to number in dero""" return value/10**12
b4a48ca750ec9f40c826d5dec3eb6209b54ccf24
694,945
def get_absolute_import_name(dir_path: str, import_name: str) -> str: """Joins a relative import path with an import name.""" return f'{dir_path}:{import_name}'
5cebf043df64b2f0160f5d7230c2a1eca94ce7a8
694,946
def install_start_end_marker(name: str, length: float) -> str: """Method to add start and end marker to the lattice. Parameters ---------- name : str lattice name length : float length of the lattice Returns ------- str MADX install string. """ # define start and end marker text = "{:12}: {:12};\n".format("MSTART", "MARKER") text += "{:12}: {:12};\n\n".format("MEND", "MARKER") # start sequence edit text += "USE, SEQUENCE={};\n".format(name) text += "SEQEDIT, SEQUENCE = {}; \nFLATTEN;\n".format(name) # install start and end marker line = "INSTALL, ELEMENT = {:16}, AT = {:12.6f};\n".format("MSTART", 0.00000) text += line line = "INSTALL, ELEMENT = {:16}, AT = {:12.6f};\n".format("MEND", length) text += line # end sequence edit text += "FLATTEN;\nENDEDIT;" return text
58dd4a789d4a12efdc454741ac8c9cafc40e22fe
694,947
import torch def remove_edge_cells(mask: torch.Tensor) -> torch.Tensor: """ Removes cells touching the border :param mask: (B, X, Y, Z) :return: mask (B, X, Y, Z) """ left = torch.unique(mask[:, 0, :, :]) right = torch.unique(mask[:, -1, :, :]) top = torch.unique(mask[:, :, 0, :]) bottom = torch.unique(mask[:, :, -1, :]) cells = torch.unique(torch.cat((left, right, top, bottom))) for c in cells: if c == 0: continue mask[mask == c] = 0 return mask
c5370d9f8519ba4e0b76c3069f72601f3c0e90f4
694,952
def strategy(history, memory): """ Orannis's punitive detective: Cooperate but when the other player defects, cooperate one more turn to see if they defect again. If they do, defect for 10 turns. Cooperate twice more and if they defect the second time, defect forever. memory is a tuple of (state, counter) where state is one of: "initial_cooperation" "first_punishment" "second_cooperation" "final_punishment" """ num_rounds = history.shape[1] if memory is None or memory[0] == "initial_cooperation": # If they defected twice in a row, transition to first punishment if num_rounds >= 2 and history[1, -1] == 0 and history[1, -2] == 0: return 0, ("first_punishment", 9) # Otherwise keep cooperating return 1, ("initial_cooperation", 0) elif memory[0] == "first_punishment": # Punish until the counter runs out if memory[1] > 0: return 0, ("first_punishment", memory[1] - 1) # Once done, transition to second cooperation else: return 1, ("second_cooperation", 0) elif memory[0] == "second_cooperation": # If they defected twice in a row, transition to final punishment if num_rounds >= 2 and history[1, -1] == 0 and history[1, -2] == 0: return 0, ("final_punishment", 0) # Otherwise keep cooperating return 1, ("second_cooperation", 0) elif memory[0] == "final_punishment": return 0, ("final_punishment", 0)
6d3b4a1b7029a8eb43eac935531603cff7c865dc
694,954
def main(*, left, right): """entrypoint function for this component Usage example: >>> main(left = pd.Series( ... { ... "2019-08-01T15:20:12": 1.2, ... "2019-08-01T15:44:12": None, ... "2019-08-03T16:20:15": 0.3, ... "2019-08-05T12:00:34": 0.5, ... } ... ), ... right = pd.Series( ... { ... "2019-08-01T15:20:12": 1.0, ... "2019-08-01T15:44:12": 27, ... "2019-08-03T16:20:15": 3.6, ... "2020-08-05T12:00:34": 17, ... "2021-08-05T12:00:34": None, ... } ... ), ... )["result"] 2019-08-01T15:20:12 True 2019-08-01T15:44:12 False 2019-08-03T16:20:15 False 2019-08-05T12:00:34 False 2020-08-05T12:00:34 False 2021-08-05T12:00:34 False dtype: bool """ # ***** DO NOT EDIT LINES ABOVE ***** # write your function code here. try: return {"result": left > right} except ValueError: return {"result": left.gt(right)}
4656d789a5a705fee7d6e552a64542e22ab1e73e
694,957
def _extract_jinja_frames(exc_tb) -> str: """ Extract all the frames in the traceback that look like jinja frames Returns: A multiline string with a formatted traceback of all the Jinja synthetic frames or an empty string if none were found. """ lines = [] while exc_tb: code = exc_tb.tb_frame.f_code if code.co_name in ( "template", "top-level template code", ) or code.co_name.startswith("block "): lines.append(f" at {code.co_filename}:{exc_tb.tb_lineno}") exc_tb = exc_tb.tb_next return "\n".join(lines)
170ede7e5fed4292c9e375555b6152ea4c8927bd
694,960
import mmap def get_lines(file_path): """ return an integer representing the number of lines in the given file :param file_path: Path to the given file :return: The number of lines in a file """ with open(file_path, 'r+') as file: line_count = 0 buffer = mmap.mmap(file.fileno(), 0) readline = buffer.readline while readline(): line_count += 1 return line_count
9f135ded40890a62fd99a24ee72943d12b21f6e8
694,961
def load_data(filename): """Open a text file of numbers & turn contents into a list of integers.""" with open(filename) as f: lines = f.read().strip().split('\n') return [int(i) for i in lines]
2baf679166eb1ee36f2b36c3e18f4f1d6a5272d9
694,964
import math def _compute_page(offset: int, items_per_page: int) -> int: """Compute the current page number based on offset. Args: offset (int): The offset to use to compute the page. items_per_page (int): Nimber of items per page. Returns: int: The page number. """ return int(math.ceil((int(offset) + 1) / int(items_per_page)))
94a0a0c18b8090cf0a1a8ac3eacdc2bcff6643b6
694,967
def dim_returns(k, inverse_scale_factor): """ A simple utility calculation method. Given k items in posession return the benefit of a K + 1th item given some inverse scale factor. The formula used is utility = 100% if no items are in posession or utility = 1 / inverse_scale_factor * (k + 1) """ if k == 0: return 1; return (1 / (inverse_scale_factor * (k + 1)))
05d8b9cfe690737fc03b2d2fa9410bafae06bd2b
694,968
def _filter_annotations(annotations, image, small_object_area_threshold, foreground_class_of_interest_id): """Filters COCO annotations to visual wakewords annotations. Args: annotations: dicts with keys: { u'objects': [{u'id', u'image_id', u'category_id', u'segmentation', u'area', u'bbox' : [x,y,width,height], u'iscrowd'}] } Notice that bounding box coordinates in the official COCO dataset are given as [x, y, width, height] tuples using absolute coordinates where x, y represent the top-left (0-indexed) corner. image: dict with keys: [u'license', u'file_name', u'coco_url', u'height', u'width', u'date_captured', u'flickr_url', u'id'] small_object_area_threshold: threshold on fraction of image area below which small objects are filtered foreground_class_of_interest_id: category of COCO dataset which visual wakewords filters Returns: annotations_filtered: dict with keys: { u'objects': [{"area", "bbox" : [x,y,width,height]}], u'label', } """ objects = [] image_area = image['height'] * image['width'] for annotation in annotations['objects']: normalized_object_area = annotation['area'] / image_area category_id = int(annotation['category_id']) # Filter valid bounding boxes if category_id == foreground_class_of_interest_id and \ normalized_object_area > small_object_area_threshold: objects.append({ u'area': annotation['area'], u'bbox': annotation['bbox'], }) label = 1 if objects else 0 return { 'objects': objects, 'label': label, }
3b0fb8d97b805808bc422cc68a6957102e494e69
694,970
import math def _rotate(point, angle, origin = (0,0),unit = 'degree'): """Rotate a point counterclockwise by a given angle around a given origin. Angle can be both in radian or degree. Helper function for rotating a layout. Parameters ---------- point : tuple position in (x,y) form angle : float angle to rotate the point origin : tuple in (x,y) form point will rotate with respect to the origin. unit : 'degree'/'radian' to indicate if the angle is in degrees or radians. if given in degrees angle is converted to radians. Returns ------- tuple rotated point as (x,y) tuple. """ ox, oy = origin px, py = point if unit == 'degree': angle = math.radians(angle) if unit == 'radian': angle = angle qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy) qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy) return qx, qy
ec01d3fc7038b2846b1f33626fa1a33b2b23968f
694,971
def exists(env): """ Check if `cscope` command is present """ return env['CSCOPE'] if 'CSCOPE' in env else None
f8feae0af16efbcbef1cce57a622d72f4e92a7b0
694,972
def _splitNameByAbbreviations(parts): """ If a name contains abbreviations, then split so that first name is abbreviated parts and last name is parts that are not abbreviated. :param parts: Individual words in the name :raises: ValueError if parts cannot be partitioned into given and family name :returns: Two values: Given name, Family name """ if len(parts[0]) == 1 or parts[0].endswith('.'): for i in range(1, len(parts)): if len(parts[i]) > 1 and not parts[i].endswith('.'): return ' '.join(parts[:i]), ' '.join(parts[i:]) raise ValueError('Could not split name on abbreviations')
c3ca4655fc15801ad57f88629e8f5ad5e3c7f530
694,973
import torch def log1p_exp(x): """ Computationally stable function for computing log(1+exp(x)). """ x_ = x * x.ge(0).to(torch.float32) res = x_ + torch.log1p(torch.exp(-torch.abs(x))) return res
e3a8cadd89a53af44254e1f28f61998125238dfc
694,974
def ape(accounts): """Get account.""" return accounts[0]
62edac0af9e3a45cdac6d179b3835698dcced587
694,975
def binary_search_iter(sorted_nums, target): """Binary search in sorted list by iteration. Time complexity: O(logn). Space complexity: O(1). """ # Edge case. if len(sorted_nums) == 0: return False # Compare middle number and iteratively search left or right part. left, right = 0, len(sorted_nums) - 1 while left < right: mid = left + (right - left) // 2 if sorted_nums[mid] == target: return True elif sorted_nums[mid] < target: left = mid + 1 else: right = mid - 1 # Final check when left = right. if sorted_nums[left] == target: return True else: return False
c0a5a02a6ef128dce44bcc6833bfbb281b5fb7c9
694,977
def get_uplink_downlink_count(duthost, tbinfo): """ Retrieves uplink and downlink count from DEVICE_NEIGHBOR_METADATA based on topology Args: duthost: DUT host object tbinfo: information about the running testbed Returns: uplink count, downlink count """ config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] device_neighbor_metadata = config_facts['DEVICE_NEIGHBOR_METADATA'] topo = tbinfo['topo']['name'] if "t1" in topo: spine_router_count = 0 tor_router_count = 0 for neighbor in device_neighbor_metadata.keys(): neighbor_data = device_neighbor_metadata[neighbor] if neighbor_data['type'] == "SpineRouter": spine_router_count += 1 elif neighbor_data['type'] == "ToRRouter": tor_router_count += 1 return spine_router_count, tor_router_count elif "t0" in topo: leaf_router_count = 0 server_count = 0 for neighbor in device_neighbor_metadata.keys(): neighbor_data = device_neighbor_metadata[neighbor] if neighbor_data['type'] == "LeafRouter": leaf_router_count += 1 elif neighbor_data['type'] == "Server": server_count += 1 return leaf_router_count, server_count
73349db69fe403356b2a27b79f3cb0467235973a
694,981
import socket def is_valid_ipv6(ipv6_str): """Check if the input ipv6_str is an valid ipv6 address Returns: True/False """ try: socket.inet_pton(socket.AF_INET6, ipv6_str) return True except: return False
17116c17caa4909a792040ebea74e5dd7b1741ca
694,986
def convert_seconds_to_str(sec: float): """Returns a str representing a number of seconds""" msg = "" sec = round(sec) years = sec // 31536000 if years != 0: msg += str(int(years)) + "y " sec -= years * 31536000 days = sec // 86400 if days != 0: msg += str(int(days)) + "d " sec -= days * 86400 hours = sec // 3600 if hours != 0: msg += str(int(hours)) + "h " sec -= hours * 3600 minutes = sec // 60 sec -= minutes * 60 if minutes != 0: msg += str(int(minutes)) + "m " if sec != 0: msg += str(int(sec)) + "s " return msg[:-1]
6e575b4b5d9641436330600cda8d8b71be2fe9e0
694,988
def remove_contained(a): """ remove contained intervals :param a: list of tuples (start, end, header) :return: intervals with contained intervals removed """ o = [] a = sorted(a, key=lambda x: (x[0], -x[1])) max_end = -1 for i in a: if i[1] > max_end: max_end = i[1] o.append(i) return o
36c3473e6fbc5de2c788662e21f67ff89add5610
694,989
def unflatten_dict(d_flat): """Unflattens single-level-tuple-keyed dict into dict """ result = type(d_flat)() for k_tuple, v in d_flat.items(): d_curr = result for i, k in enumerate(k_tuple): if i == len(k_tuple) - 1: d_curr[k] = v elif k not in d_curr: d_curr[k] = type(d_flat)() d_curr = d_curr[k] return result
9beee326a40f323ea123028de39da594cd237385
694,990
def _is_string_same_case(input: str): """Returns flag indicating whether input string is a single case. """ return input == input.lower() or input == input.upper()
93e8a41859cf8e6e6d871c787c18519c51ab5a4d
694,991
def rank_freq(hist): """Returns a list of (rank, freq) tuples. hist: map from word to frequency returns: list of (rank, freq) tuples """ # sort the list of frequencies in decreasing order freqs = list(hist.values()) freqs.sort(reverse=True) # enumerate the ranks and frequencies rf = [(r+1, f) for r, f in enumerate(freqs)] return rf
fc7731190ce44464526d9eda40cda235e07d0985
694,993
def get_subs(relativize_fn, links): """ Return a list of substitution pairs, where the first item is the original string (link) and the second item is the string to replace it (relativized link). Duplicate subs are filtered out.""" subs = ((l, relativize_fn(l)) for l in links) subs = filter(lambda p: p[0] != p[1], subs) # filter out no-op substitutions return list(subs)
f22b7fd13cf95ac2860c42d0f72b3fed4977d6cb
694,996
from typing import List from typing import Dict from typing import Any def list_dict_swap(v: List[Dict[Any, Any]]) -> Dict[Any, List[Any]]: """Convert list of dicts to a dict of lists. >>> list_dict_swap([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) == {'a': [1, 3], 'b': [2, 4]} """ return {k: [dic[k] for dic in v] for k in v[0]}
7953a19dfaa326297e666b0f378540c90b079989
695,004
def _parse_metal_choice(s, max_value): """ Parse user options provided in ``detect_metal_options``. The syntax is <position>,[<new name>], using semicolons to choose several ones. <new name> can only be 3-letters max and should not collide with existing Amber types. This is not checked, so be careful! If you choose several ones, they are considered part of the same metal center! Do not use it for unrelated ions; instead run the script several times and use the step 1n. For example: - 0 # would select the first one (default), without renaming - 0:ZN1 # select first one with a new name (ZN1) - 0:ZN1 1:ZN2 # select first and second with new names Parameters ========== s : str Return ====== list of (index, name) name can be None """ if not s: return [(0, None)] result = [] for selection in s.split(): name = None fields = selection.split(':') if len(fields) == 1: name == None elif len(fields) == 2 and 0 < len(fields[1]) <= 3: name = fields[1] else: raise ValueError(' !!! Wrong syntax!') index = int(fields[0]) if index < 0 or index >= max_value: raise ValueError(' !!! Index must be within 0 and ' + str(max_value)) result.append((index, name)) return result
0ad06942f782e5362dc8c9f530194320cbbb3a28
695,005
def invert_graph(graph): """ Inverts a directed graph. :param graph: the graph, represented as a dictionary of sets :type graph: dict(set) :return: the inverted graph :rtype: dict(set) """ inverted = {} for key in graph: for value in graph[key]: inverted.setdefault(value, set()).add(key) return inverted
943337d278107d520dde6a724c89016cd1452bee
695,006
def qobjects(cls_target, oid, foreign_name): """ Get queryset objects based on `cls_target` given the object/id `oid` and the `foreign_name` is equal. """ if isinstance(oid, int): fetcher = 'id' else: fetcher = 'in' kwargs = {'{0}__{1}'.format(foreign_name, fetcher): oid} return cls_target.objects.filter(**kwargs)
dc5c46e0aee6b65e41add750a40c9d10afd88235
695,019
def is_classic_netcdf(file_buffer): """ Returns True if the contents of the byte array matches the magic number in netCDF files :param str file_buffer: Byte-array of the first 4 bytes of a file """ # CDF. if file_buffer == b"\x43\x44\x46\x01": return True return False
dbb55b582526ad90132f4ef6d9485907e621ceed
695,020
def trim_version(version, num_parts=2): """ Return just the first <num_parts> of <version>, split by periods. For example, trim_version("1.2.3", 2) will return "1.2". """ if type(version) is not str: raise TypeError("Version should be a string") if num_parts < 1: raise ValueError("Cannot split to parts < 1") parts = version.split(".") trimmed = ".".join(parts[:num_parts]) return trimmed
5f23485c914a695bfb652fbc8cccb738147a8e7b
695,025
def is_vowel_sign_offset(c_offset): """ Is the offset a vowel sign (maatraa) """ return (c_offset >= 0x3e and c_offset <= 0x4c)
eaee5757d80e8775c802bc0136d34733dc955610
695,038
def custom404handler(err): """Custom handler for 404 errors.""" return dict(err=err)
b1771a98d95903214e96bd6dfa2055c62c783af1
695,039
import math def degrees(rad_angle) : """Converts any angle in radians to degrees. If the input is None, the it returns None. For numerical input, the output is mapped to [-180,180] """ if rad_angle is None : return None angle = rad_angle * 180 / math.pi while angle > 180 : angle = angle - 360 while angle < -180 : angle = angle + 360 return angle
e889ff09cf0bccc36beabc3451a9b64730f8e9e6
695,041
import re def remove_log_text(text): """ Remove log information from the given string. """ # The pattern basically searches for anything following ".cpp:[0-9]{1-4}] " pattern = "(?:(?<=\.cpp:[0-9]] )|(?<=\.cpp:[0-9][0-9]] )|(?<=\.cpp:[0-9][0-9][0-9]] )|(?<=\.cpp:[0-9][0-9][0-9][0-9]] )).+" match = re.search(pattern, text) return text if match == None else match.group(0)
8ec14757aae16d1de7c2651faea0f927bb324593
695,042
import io def make_pdf_from_image_array(image_list): """Make a pdf given an array of Image files :param image_list: List of images :type image_list: list :return: pdf_data :type pdf_data: PDF as bytes """ with io.BytesIO() as output: image_list[0].save( output, "PDF", resolution=100.0, save_all=True, append_images=image_list[1:], ) pdf_data = output.getvalue() return pdf_data
333c3e3475e345eeee7bb3ec17291db9abf990b2
695,043
def get_primary_label(topic, primary_labels): """Function that returns the primary (preferred) label for a topic. If this topic belongs to a cluster. Args: topic (string): Topic to analyse. primary_labels (dictionary): It contains the primary labels of all the topics belonging to clusters. Returns: topic (string): primary label of the analysed topic. """ try: topic = primary_labels[topic] except KeyError: pass return topic
58bda0113e6199e609d6160cf6c689d6f9c9f95e
695,046
def find_or_create_location(location, dbh): """Find or create the location""" cur = dbh.cursor() cur.execute('select location_id from location where name = %s', (location, )) res = cur.fetchone() location_id = 0 if res is None: print(f'Loading location "{location}"') cur.execute('insert into location (name) values (%s)', (location, )) location_id = cur.lastrowid dbh.commit() else: location_id = res[0] return location_id
363c41a94cdacb5a4c241483917c8523159f574f
695,047
def indent(string): """ indent string >>> indent('abc\\ndef\\n') '\\tabc\\n\\tdef\\n' """ return '\n'.join( '\t' + line if line else '' for line in string.split('\n') )
e1e88eda9ae0e9ad11a9eb68c8b51302bbbc5ab6
695,049
def scrabble(wort): """ Berechnet den Scrabble-Wert zu einem deutschen Wort :param wort: Das Wort dessen Wert berechnet wird (muss klein geschrieben sein) :return: den zugehörigen Scrabble-Wert """ werte = {"a": 1, "b": 3, "c": 4, "d": 1, "e": 1, "f": 4, "g": 2,"h": 2, "i": 1, "j": 6, "k": 4, "l": 2, "m": 3, "n": 1, "o":2, "p": 4, "q": 10, "r": 1, "s": 1, "t": 1, "u": 1, "v": 6, "w": 3, "x": 8, "y": 10, "z": 3, "ä": 6, "ö": 8, "ü": 6} summe = 0 for buchstabe in wort: summe += werte[buchstabe] return summe
1865dea59c3ff8f391975d4ee59e5c501c8d3781
695,050
def _days_before_year(year): """year -> number of days before January 1st of year.""" y = year - 1 return y * 365 + y // 4 - y // 100 + y // 400
5070c747c0e28235e51dadd107de77cedfca2886
695,051
import math def point_distance(p1, p2, squared=False): """ Calculate the squared eucliden distance of two points. Parameters ---------- p1 : associative array $p1 first point p2 : associative array $p2 second point Returns ------- float Examples -------- >>> point_distance({'x': 0, 'y': 0}, {'x': 3, 'y': 4}) 5.0 >>> '%.2f' % point_distance({'x': 0, 'y': 0}, {'x': 1, 'y': 22}) '22.02' """ dx = p1["x"] - p2["x"] dy = p1["y"] - p2["y"] if squared: return (dx*dx + dy*dy) else: return math.sqrt(dx*dx + dy*dy)
28e13856380783eab469a10b01ebd2769422ff1b
695,052
def path_starts_with(path, prefix): """Test whether the path starts with another path. >>> path_starts_with([1], [1]) True >>> path_starts_with([1, 2], [1]) True >>> path_starts_with([2], [1]) False >>> path_starts_with([1,2,3], [1,2,3]) True >>> path_starts_with([1,2,3], [1,2]) True >>> path_starts_with( ... ('\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01', ... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x03', ... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02', ... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'), ... ('\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01', ... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x03', ... '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02')) True """ return path[:len(prefix)] == prefix
dd2fae5ff605ab5b0c9ff49aaa846fb287e58270
695,057
def to_string(quantity_or_item: str) -> str: """ Returns the same string. """ return quantity_or_item
e1828ed0ddd6ea3f2db1140a2da2a94965df84b7
695,058
import requests def tmdb_data_for_id(tmdb_id: int, tmdb_api_token: str) -> dict: """ Get additional information for a movie for which you already have the ID Args: tmdb_id (int): the ID for a movie on The Movie Database tmdb_api_token (str): your tmdb v3 api token Returns: dict """ url = f"https://api.themoviedb.org/3/movie/{tmdb_id}?" params = {'language': 'en-US', 'api_key': tmdb_api_token, } return requests.get(url, params).json()
dbac37d7969e85138de18a24f1caf963cc7d44da
695,069