content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import uuid
def generate_dcos_engine_template(
linux_ssh_public_key: str,
num_masters: int,
master_vm_size: str,
num_windows_private_agents: int,
windows_private_vm_size: str,
num_windows_public_agents: int,
windows_public_vm_size: str,
num_linux_private_agents: int,
linux_private_vm_size: str,
num_linux_public_agents: int,
linux_public_vm_size: str,
windows_admin_user: str,
windows_admin_password: str,
linux_admin_user: str,
dcos_engine_orchestrator_release: str,
):
""" Generates the template provided to dcos-engine
"""
unique_id = str(uuid.uuid4())[:8] + 'dcos'
template = {
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "DCOS",
"orchestratorRelease": dcos_engine_orchestrator_release
},
"masterProfile": {
"count": num_masters,
"dnsPrefix": "master" + unique_id,
"vmSize": master_vm_size
},
"agentPoolProfiles": [
{
"name": "wpub",
"count": num_windows_public_agents,
"vmSize": windows_public_vm_size,
"osType": "Windows",
"dnsPrefix": "wpub" + unique_id,
"ports": [80, 443, 8080, 3389]
},
{
"name": "wpri",
"count": num_windows_private_agents,
"vmSize": windows_private_vm_size,
"osType": "Windows",
},
{
"name": "linpub",
"count": num_linux_public_agents,
"vmSize": linux_public_vm_size,
"osType": "Linux",
"dnsPrefix": "linpub" + unique_id,
"ports": [80, 443, 22]
},
{
"name": "linpri",
"count": num_linux_private_agents,
"vmSize": linux_private_vm_size,
"osType": "Linux"
}
],
"windowsProfile": {
"adminUsername": windows_admin_user,
"adminPassword": windows_admin_password
},
"linuxProfile": {
"adminUsername": linux_admin_user,
"ssh": {
"publicKeys": [
{
"keyData": linux_ssh_public_key
}
]
}
}
}
}
return template
|
221703cb1c1c31eb7f4f951c92fe50a76932e60e
| 40,521 |
def ll_intersection(A, B, P, Q):
"""Compute intersection of two segments formed by four points."""
denominator = (A[0]-B[0]) * (P[1]-Q[1]) - (A[1]-B[1]) * (P[0]-Q[0])
if denominator == 0:
return 0.0, 0.0
numerator_x = (A[0]*B[1]-B[0]*A[1]) * (P[0]-Q[0]) - (A[0]-B[0]) * (P[0]*Q[1]-Q[0]*P[1])
numerator_y = (A[0]*B[1]-B[0]*A[1]) * (P[1]-Q[1]) - (A[1]-B[1]) * (P[0]*Q[1]-Q[0]*P[1])
return numerator_x/denominator, numerator_y/denominator
|
96ffca17213b5fbc4ebd9fac338d4de30dd423a7
| 40,524 |
import json
def output_generic_result_lazily(out_file, name, retrieve_result):
"""This function is used to output data to file. It doesn't do this straight away
but instead returns a function which can be called when the data actually should
be written.
When the returned function is called a complete JSON-document containing data
from retrieve_result will be written as one line to the JSON-file.
:param out_file: file handle to write data to.
:param name: name to store data under in file.
:param retrieve_result: function that returns data to write. This should take
no arguments and is only called when calling the returned function.
"""
def output():
result = retrieve_result()
data = {
name: result
}
json.dump(data, fp=out_file)
# Newline to generate JSON Lines data, one doc per line
out_file.write('\n')
return output
|
a208da86f71df0a3a9a9e47be4ecb13b7b265ec4
| 40,528 |
def unindent(text, skip1=False):
"""Remove leading spaces that are present in all lines of ``text``.
Parameters
----------
test : str
The text from which leading spaces should be removed.
skip1 : bool
Ignore the first line when determining number of spaces to unindent,
and remove all leading whitespaces from it.
"""
# count leading whitespaces
lines = text.splitlines()
ws_lead = []
for line in lines[skip1:]:
len_stripped = len(line.lstrip(' '))
if len_stripped:
ws_lead.append(len(line) - len_stripped)
if len(ws_lead) > skip1:
rm = min(ws_lead)
if rm:
if skip1:
lines[0] = ' ' * rm + lines[0].lstrip()
text = '\n'.join(line[rm:] for line in lines)
return text
|
c787f5176b7b38ab5e6caec5175c4de3bbf1bbf5
| 40,529 |
def rc(seq):
"""reverse complement sequence"""
comp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
return ''.join(map(lambda x: comp[x], reversed(seq)))
|
15317f9178e8aae8255014756c58b928c305308c
| 40,534 |
def get_snapshot_id(snapshot):
"""Get backend snapshot id.
Take the id from provider_location in case this is managed snapshot.
"""
return snapshot['provider_location'] or snapshot['id']
|
806132d56e3ba617ffba299a5f766a6c017d7caa
| 40,536 |
def get_pk_query_name(model):
"""Format the primary key column of a model with its DB table."""
return '%s.%s' % (model._meta.db_table, model._meta.pk.column)
|
34549b6ae93e12c9a613377419935d5c26e173e4
| 40,537 |
def bool_filter(val):
"""
Convert true/false string to Python bool. Otherwise, return string.
"""
trues = ['True', 'TRUE', 'true', 'yes', 'Yes']
falses = ['False', 'FALSE', 'false', 'no', 'No']
if any([val == t for t in trues]):
return True
elif any([val == f for f in falses]):
return False
else:
return val
|
0b71f4b337dedf14b638b7bc2641e365c69d081e
| 40,543 |
import inspect
def steal_signature_from(original_func, *, steal_docstring=True):
# noinspection PyUnresolvedReferences
"""
Makes a decorator that will replace original_func with the decorated argument.
The decorated argument will have the same apparent signature as the initial
function, which is useful for defining decorators, etc.
Example usage::
>>> def foo(a, b, c, d):
... ...
>>> @steal_signature_from(foo)
... def bar(*args, **kwargs):
... print(foo, args, kwargs)
... return foo(*args, **kwargs)
>>> inspect.signature(bar)
(a, b, c, d)
Parameters:
original_func:
The original function to steal from.
steal_docstring:
Defaults to True. Specifies whether the docstring should be appended
to the new function's existing docstring.
See:
:func:`steal_docstring_from`
"""
def decorator(new_func):
"""Decorates the function with the original_func signature."""
# Update the signature
orig_sig = inspect.signature(original_func)
setattr(new_func, "__signature__", orig_sig)
if steal_docstring:
new_doc = getattr(new_func, "__doc__") or ""
new_doc += "\n\n"
new_doc += inspect.getdoc(original_func) or ""
new_doc = new_doc.lstrip()
setattr(new_func, "__doc__", new_doc)
return new_func
return decorator
|
b3d29181042649d12998e2b81d614c11720e6c9b
| 40,546 |
def rename_header(df):
"""Rename as follows
Chr -> chr
Pos -> pos
Chr_Allele -> ref
Alternative_Allele -> alt
Args:
df (pandas.DataFrame)
Returns:
df (pandas.DataFrame)
"""
df.drop("Type", axis=1, inplace=True)
df = df.rename(
columns={
"Chr": "chr",
"Pos": "pos",
"Chr_Allele": "ref",
"Alternative_Allele": "alt",
}
)
return df
|
14104fc5a3e42b99afe18ed21bebf7dcdfece4a2
| 40,547 |
from typing import Dict
from typing import Any
from typing import Optional
def __get(data: Dict[str, Any], key: str, src: Optional[str] = None) -> Any:
"""
Get a value from a dictionary; if the key does not exist, raise an
exception that identifies the missing key and the configuration section in
which it was expected.
:param data: The dictionary from which to get the value.
:param key: The key name.
:param src: The configuration section associated with the dictionary.
"""
try:
return data[key]
except KeyError:
if src is None:
src = 'Configuration'
if src:
raise ValueError('{}: "{}" is missing'.format(src, key))
else:
raise ValueError('"{}" is missing'.format(key))
|
263f8e13e28b304cdf50546e0df8c7ed5ae8589e
| 40,551 |
import re
import yaml
def load_recipe_yaml_str_no_classes(recipe_yaml_str: str) -> str:
"""
:param recipe_yaml_str: YAML string of a SparseML recipe
:return: recipe loaded into YAML with all objects replaced
as a dictionary of their parameters
"""
pattern = re.compile(r"!(?P<class_name>(?!.*\.)[a-zA-Z_][a-zA-Z^._0-9]+)")
classless_yaml_str = pattern.sub(r"OBJECT.\g<class_name>:", recipe_yaml_str)
return yaml.safe_load(classless_yaml_str)
|
7b2bff3f55df84fe65da8a67397bb68fb9660ea9
| 40,552 |
def sublist(lst, stopper):
"""Reads a list of strings until a stopper value is found at the beginning of a string"""
gathered = []
for item in lst:
if item.startswith(stopper):
break
gathered.append(item)
return gathered
|
c3bd48afd91930b1f9fabdf45fa250390407a744
| 40,553 |
def user_is_resource_reviewer(user):
"""
Single test for whether a user is in the Resource Reviewer group
"""
return user.groups.filter(name='Resource Reviewer').exists()
|
d36e34cd0d02b9df2cf1ed9a265229cc5045a26a
| 40,558 |
def collatz_sequence(initial_word, deletion_number = 2, production_rules = {'a': 'bc', 'b': 'a', 'c': 'aaa'}):
""" Computes a collatz sequence, wherein the words are determined (until length < deletion numbers) by:
1) deleting the first m (deletion number) symbols
2) appending production word P(x) calculated using the first symbol x.
2) appending production word P(x) calculated using the first symbol x.
Args:
initial_word (string): The starting word.
deletion_number (int): The positive integer used to determine deletion and halting.
production_rules (dict): Production rules associating symbols with production words.
"""
word = initial_word
sequence = [initial_word]
while len(word) >= deletion_number:
word = word[deletion_number:] + production_rules[word[0]]
sequence.append(word)
return sequence
|
8979fcfa918fd2accfe26d9d3661dc1fef080e14
| 40,561 |
def cols_to_drop(df, columns):
"""Drop selected columns and return DataFrame.
Args:
df: Pandas DataFrame.
columns: List of columns to drop.
Returns:
Original DataFrame without dropped columns.
"""
for col in columns:
df.drop([col], axis=1, inplace=True)
return df
|
b6a80fd6346473ceaa5f9983853fce25d6408d89
| 40,572 |
def dec_datestamp(datestamp):
"""Given a 5 character datestamp made by makestamp, it returns it as the tuple :
(daynumber, timestamp).
daynumber and timestamp can either be None *or*
daynumber is an integer between 1 and 16777215
timestamp is (HOUR, MINUTES)
The function 'counttodate' in dateutils will turn a daynumber back into a date."""
daynumber = datestamp[:3]
timechars = datestamp[3:]
daynumber = ord(daynumber[0])*65536 + ord(daynumber[1])*256 + ord(daynumber[2])
if daynumber == 0: daynumber = None
if ord(timechars[0]) == 255:
timestamp = None
else:
timestamp = (ord(timechars[0]), ord(timechars[1]))
return daynumber, timestamp
|
b24ca8c75a84a1d33bb445ffd150fccfbdd474f2
| 40,582 |
def get_autocomplete_location_query(qs, q):
"""Return qs if ``istartswith`` filter exists, else fallback to ``icontains``."""
startswith_qs = qs.filter(name__istartswith=q)
if startswith_qs.exists():
return startswith_qs
return qs.filter(name__icontains=q)
|
5f72aee824779af42a51cfee2fbaee907b30e535
| 40,585 |
import random
import math
def rational_sol(*args):
"""
Returns rational solutions for linear diophantine equation ax + by = c
Parameters
----------
*args : tuple
Expects three arguments and optionally fourth argument.
First three arguments denotes a, b and c respectively in ax + by = c
Optional Parameters
-------------------
fourth argument in *args : int
denotes number of solutions user wants
Return
------
if fourth argument not given : 1-d array
returns one solution in the form of 1-d array [x,y]
fourth argument given : 2-d array
returns 2-d array of solutions
"""
if(len(args)==3):
a = args[0]
b = args[1]
c = args[2]
x = random.randint(1,7) + math.pow(0.2,random.randint(2,4))
if(a == 0 and b == 0):
return []
elif(a == 0 and b != 0):
return [round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2),c/b]
elif(a != 0 and b == 0):
return [c/a,round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2)]
y = (c - a*x)/b
return [x,y]
elif(len(args)==4):
a = args[0]
b = args[1]
c = args[2]
n = args[3]
sol_arr = []
if(a == 0 and b == 0):
return []
elif(a == 0 and b != 0):
for i in range(n):
sol_arr.append([round(random.randint(1,1000) + math.pow(0.2,random.randint(2,4)),2),c/b])
return sol_arr
elif(a != 0 and b == 0):
for i in range(n):
sol_arr.append([c/a,round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2)])
return sol_arr
x = round(random.randint(1,7) + math.pow(0.2,random.randint(2,4)),2)
for i in range(n):
y = (c - a*x)/b
sol_arr.append([x,y])
x += 0.1
x = round(x,2)
return(sol_arr)
elif(len(args)>4 or len(args)<3):
raise NotImplementedError(
"Invalid Number Of Arguments"
)
|
3c34cabbe3eb7cff7c5c75d53471454a32d0a243
| 40,587 |
import random
def rand_sign() -> int:
"""Random sign.
Returns:
Randomly generated -1 or 1.
"""
return 1 if random.random() < 0.5 else -1
|
4260d7e1c55055ea1ed6e286d609e7ca309a06da
| 40,588 |
def wrap(headr, data):
"""
Input:
headr -- text of html field
data -- text to be wrapped.
Returns a corresponding portion of an html file.
"""
return '<%s>%s</%s>' % (headr, data, headr)
|
d956bef0e223d930d5f8b66b76312046a0024d66
| 40,589 |
def get_unique_values_in_column(df, col_name):
"""Get unique values in a column.
Args:
df (spark.DataFrame): Dataframe.
col_name (str): Column name.
Returns:
spark.DataFrame: Unique values.
**Examples**
.. code-block:: python
df = spark.createDataFrame([("a", 1), ("a", 2), ("c", 3)], ["letters", "numbers"])
get_unique_values_in_column(df, "letters")
# [Row(letters='c'), Row(letters='a')]
"""
return df.select(col_name).distinct().collect()
|
8dd370afe88cd9a1a5acb3e3cfec1bd4c6fe164c
| 40,590 |
import requests
def get_citeas_apa_citation(resource):
"""
Returns a dict with a resource and generated CiteAs citation in APA format.
"""
r = requests.get("https://api.citeas.org/product/" + resource)
citation = r.json()["citations"][0]["citation"]
return {resource: citation}
|
f01516b54e80304b3b603470f97cb8fa8189f574
| 40,591 |
def _get_num_components(num_components, num_samples, num_dimensions):
"""Get number of components (clusters).
"""
if num_components is None:
num_components = min(num_samples, num_dimensions)
return num_components
|
b3ea90e64245dae0853af005bdb922cbed517b61
| 40,593 |
def _trace_dense(op): # pragma: no cover
"""Trace of a dense operator.
"""
x = 0.0
for i in range(op.shape[0]):
x += op[i, i]
return x
|
e309d74c5e39834eb3c4d7382172ba0fd71b7130
| 40,594 |
def isStructure(s):
"""
Checks if the structure constraint only contains "(", ")", and "." and legal fuzzy structure constraint characters.
"""
returnvalue = 1
for a in range(0, len(s)):
if s[a] not in ".()[]{}<>":
if s[a] not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
returnvalue = 0
return returnvalue
|
4f7397349e626072653180f4205c5bccdea86557
| 40,600 |
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
|
513aaf79a626f8fc1af6ba6c13898be13221b843
| 40,602 |
def _critical_nemenyi_value(p_value, num_models):
"""Critical values for the Nemenyi test.
Table obtained from: https://gist.github.com/garydoranjr/5016455
"""
values = [# p 0.01 0.05 0.10 Models
[2.576, 1.960, 1.645], # 2
[2.913, 2.344, 2.052], # 3
[3.113, 2.569, 2.291], # 4
[3.255, 2.728, 2.460], # 5
[3.364, 2.850, 2.589], # 6
[3.452, 2.948, 2.693], # 7
[3.526, 3.031, 2.780], # 8
[3.590, 3.102, 2.855], # 9
[3.646, 3.164, 2.920], # 10
[3.696, 3.219, 2.978], # 11
[3.741, 3.268, 3.030], # 12
[3.781, 3.313, 3.077], # 13
[3.818, 3.354, 3.120], # 14
[3.853, 3.391, 3.159], # 15
[3.884, 3.426, 3.196], # 16
[3.914, 3.458, 3.230], # 17
[3.941, 3.489, 3.261], # 18
[3.967, 3.517, 3.291], # 19
[3.992, 3.544, 3.319], # 20
[4.015, 3.569, 3.346], # 21
[4.037, 3.593, 3.371], # 22
[4.057, 3.616, 3.394], # 23
[4.077, 3.637, 3.417], # 24
[4.096, 3.658, 3.439], # 25
[4.114, 3.678, 3.459], # 26
[4.132, 3.696, 3.479], # 27
[4.148, 3.714, 3.498], # 28
[4.164, 3.732, 3.516], # 29
[4.179, 3.749, 3.533], # 30
[4.194, 3.765, 3.550], # 31
[4.208, 3.780, 3.567], # 32
[4.222, 3.795, 3.582], # 33
[4.236, 3.810, 3.597], # 34
[4.249, 3.824, 3.612], # 35
[4.261, 3.837, 3.626], # 36
[4.273, 3.850, 3.640], # 37
[4.285, 3.863, 3.653], # 38
[4.296, 3.876, 3.666], # 39
[4.307, 3.888, 3.679], # 40
[4.318, 3.899, 3.691], # 41
[4.329, 3.911, 3.703], # 42
[4.339, 3.922, 3.714], # 43
[4.349, 3.933, 3.726], # 44
[4.359, 3.943, 3.737], # 45
[4.368, 3.954, 3.747], # 46
[4.378, 3.964, 3.758], # 47
[4.387, 3.973, 3.768], # 48
[4.395, 3.983, 3.778], # 49
[4.404, 3.992, 3.788], # 50
]
if num_models < 2 or num_models > 50:
raise ValueError("num_models must be in [2, 50].")
if p_value == 0.01:
return values[num_models - 2][0]
elif p_value == 0.05:
return values[num_models - 2][1]
elif p_value == 0.10:
return values[num_models - 2][2]
else:
raise ValueError("p_value must be in {0.01, 0.05, 0.10}")
|
54b0e64d2d543d6e122a0148c96cc78b1bef1b54
| 40,607 |
def choose_token_getter(getters):
"""Ask the user to choose a token getter amongst those available."""
if len(getters) == 1:
_label, getter = getters[0]
return getter
print("cogite needs a personal access token. There are several ways to get one:")
for idx, (label, _) in enumerate(getters, start=1):
print(f"{idx}. {label}")
choices = range(1, len(getters) + 1)
while 1:
choices_help = ', '.join(str(i) for i in range(1, len(getters)))
choices_help += f' or {len(getters)}'
choice = input(
f"Please choose one of the methods above by typing {choices_help}: "
)
try:
choice = int(choice)
except ValueError:
pass
else:
if choice in choices:
_label, getter = getters[choice - 1]
return getter
print("Wrong choice. Try again.")
|
69eba75c4c0bb4e2e9e0464a4d3a4b0a41708b1c
| 40,609 |
from typing import Dict
def normalize(dict_: Dict) -> Dict:
"""
Normalize the values of a dict.
Parameters
----------
dict_ : Dict
Returns
-------
argmax : Dict
Example
-------
>>> sorted(normalize({'a': 10, 'b': 70, 'c': 20}).items())
[('a', 0.1), ('b', 0.7), ('c', 0.2)]
"""
sum_ = sum(value for key, value in dict_.items())
dict_ = {key: value / float(sum_) for key, value in dict_.items()}
return dict_
|
8fab8bd8c169345f698d074a8d6f426f880e733e
| 40,610 |
def _pretty_frame_relation_type(freltyp):
"""
Helper function for pretty-printing a frame relation type.
:param freltyp: The frame relation type to be printed.
:type freltyp: AttrDict
:return: A nicely formated string representation of the frame relation type.
:rtype: str
"""
outstr = "<frame relation type ({0.ID}): {0.superFrameName} -- {0.name} -> {0.subFrameName}>".format(freltyp)
return outstr
|
86b59f1313f8785287441d379cc92fe86844ae38
| 40,613 |
def moyenne(donnees):
"""
Cette fonction calcule la moyenne d'une série de valeurs.
"""
somme = 0
n = 0
for valeur in donnees:
n = n+1
somme = somme + valeur
#print(f"La valeur actuelle de somme est : {somme}")
print()
moyenne = somme / n
#print(f"La moyenne est {moyenne}")
return moyenne
|
5848afa7ea9273d2ca6456b223086fe4109248f5
| 40,618 |
def create_unique_name(prefix, names, separator="_"):
""" Creates a name starting with 'prefix' that is not in 'names'. """
i = 1
name = prefix
while name in names:
name = prefix + separator + str(i)
i += 1
return name
|
82e8e30135cd94db3a470827bf999a72269f1efb
| 40,619 |
def _pose_equal(pose1, pose2):
""" True if pose1 is a different position or orientation than pose2
:param pose1:
:param pose2:
:return:
"""
p1_pos = pose1.pose.pose.position
p1_orient = pose1.pose.pose.orientation
p2_pos = pose2.pose.pose.position
p2_orient = pose2.pose.pose.orientation
if p1_pos.x != p2_pos.x or p1_pos.y != p2_pos.y or p1_orient.z != p2_orient.z or p1_orient.w != p2_orient.w:
return False
return True
|
c69897968bc6654fe246c02c5458f2276d445de6
| 40,623 |
def get_parameter_value(fhir_operation, parameter_name):
"""
Find the parameter value provided in the parameters
:param fhir_operation: the fhir operation definition
:param parameter_name: the name of the parameter to get the value of
:return: a string representation of th value
"""
parameter_value = ''
for param in fhir_operation.parameter:
if param.name == parameter_name:
parameter_value = param.binding.valueSetReference.identifier.value
return parameter_value
|
0bab2226309123da14045ed99202bd9491b579e0
| 40,624 |
import math
def is_prime(value: int) -> bool:
"""Detect whether a value is prime or not.
Args:
value (int): value.
Returns:
bool: Is the value prime.
"""
if value < 2:
return False
if value == 2:
return True
if value % 2 == 0:
return False
for k in range(3, int(math.sqrt(value)) + 1, 2):
if value % k == 0:
return False
return True
|
88906c1c6f7101ef1ad421ba18c246bbd5eea6ca
| 40,625 |
from typing import Any
from typing import List
def _dict_rec_get(d: dict[Any, Any], path: List[Any], default: Any) -> Any: # type: ignore # reason: dict
"""
Get an element of path from dict.
>>> d = {'a': 'a', 'b': {'c': 'bc', 'd': {'e': 'bde'}}}
Simple get:
>>> _dict_rec_get(d, ['a'], None)
'a'
Returns default if key does not exist:
>>> _dict_rec_get(d, ['c'], None) is None
True
>>> _dict_rec_get(d, ['c'], 0)
0
Get recursive:
>>> _dict_rec_get(d, ['b', 'c'], None)
'bc'
>>> _dict_rec_get(d, ['b', 'd'], None)
{'e': 'bde'}
>>> _dict_rec_get(d, ['b', 'd', 'e'], None)
'bde'
>>> _dict_rec_get(d, ['b', 'nopath'], None) is None
True
"""
assert isinstance(path, list)
while len(path) != 0:
p = path[0]
path = path[1:]
if isinstance(d, dict) and (p in d): # type: ignore
d = d[p]
else:
return default
return d
|
3c124b12bbe1d933239d73f9a6d4ace2156bc3ed
| 40,629 |
def id(obj): # pylint: disable=redefined-builtin,invalid-name
"""Return ``id`` key of dict."""
return obj['__id']
|
e3a5ef6af8218dfd7efb5948899db6a76826c73a
| 40,630 |
def bin2dec(x):
"""
Convert binary string to decimal number.
For instance: '11' -> 3
"""
return int(x, 2)
|
c64d4599ffb2c633abed18c3a1bc298f0da7ff2c
| 40,632 |
def stripped_string_concat(str1, str2):
"""Concatenates passed strings and truncates spaces in the result.
:param str1: First string
:type str1: str
:param str2: Second string
:type str2: str
:return: A string with truncated spaces
:rtype: str
"""
return f'{str1} {str2}'.strip()
|
17e45018e03f68ac5b635c149cc413e41a2cb343
| 40,633 |
from typing import Optional
def _is_version_at_least(version: str, major: int, minor: Optional[int] = None) -> bool:
"""
Check that a given version meets the minimum requirements.
:param version:
Version string in the form "<major>.<minor>[.<more>]"
:param major:
Major version requirement.
:param minor:
Minor version requirement, if any.
:return:
Whether the given version is sufficient.
"""
parts = version.split(".", maxsplit=3)
if int(parts[0]) < major:
return False
if minor and int(parts[1]) < minor:
return False
return True
|
568370d751ec814a8d6cd94e0486085dce5f62c5
| 40,636 |
import functools
def logger(fn):
"""Decorator to log method calls from classes"""
@functools.wraps(fn)
def func(class_obj, *args, **kwargs):
"""Logs method calls from classes"""
print(f'{class_obj.__class__.__name__}.{fn.__name__} was run with "{args}" args and {kwargs} kwargs')
return fn(class_obj, *args, **kwargs)
return func
|
b99d4c5bd4db1bed8eaf312b06de51bed53e814f
| 40,641 |
import glob
def get_test_files(test_dir):
"""
Function used to get all test .c scripts
:param test_dir: directory to get all .c files
:return test_files: list of test files
"""
test_files = glob.glob(test_dir, recursive=True)
return test_files
|
0bdfd7ae2967a10105a46de1ee9ea4ca69574a1c
| 40,642 |
def average_change(profit_loss):
"""
Function returns average change of the numbers in the list using formula (last_element - first_element)/(number of elements - 1)
"""
profit_loss_copy = profit_loss[:]
average_ch = (profit_loss_copy[-1] - profit_loss_copy[0])/(len(profit_loss_copy)-1)
return round(average_ch, 2)
|
58b158a9adc1482224038c8b8093e79fa645ac10
| 40,646 |
from datetime import datetime
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k, v in obj.items():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [squash_dates(o) for o in obj]
elif isinstance(obj, datetime):
obj = obj.isoformat()
return obj
|
4799784d15897e260aada2b4c5f18615b438bda5
| 40,653 |
def get_capillary_diameter(line1, line2):
"""
Defines capillary diameter in pixel length.
line1 = int32 - first point on left edge of capillary
line2 = int32 - second point on right edge of capillary
"""
#express first points on opposite side of capillary as x,z coordinates
L1x,L1y = line1
L2x,L2y = line2
#find straight line distance between points
dist = ((L2x-L1x)**2+(L2y-L1y)**2)**0.5
#Assumption: rotation of image is very small such that the scaled straight
#line distance is very close to true diameter of the capillary
return dist
|
5b11b6025c17b373d2f014dbb0519397561b0f30
| 40,654 |
def gcd(a: int, b: int) -> int:
"""
Finds the Greatest Common Divisor of two integers.
"""
a, b = abs(a), abs(b)
# Simple cases
if b == 0:
return a
if a == 0:
return b
sort = sorted([a, b])
if sort[1] % sort[0] == 0:
return sort[0]
return gcd(b, a % b)
|
81f05eb5411d8e94debdb53a87b739378f1dbeae
| 40,655 |
def find_packages_in_file(path):
"""
Parse a text file containing a list of packages and return their list
"""
with open(path, "r") as pkglist:
return pkglist.read().splitlines()
|
365e58266b2eb5d0ae35bce972dc4f1e49a10b6f
| 40,656 |
import string
import re
def remove_duplicate_punctuation(text: str) -> str:
"""
Remove duplicate punctuation, which may have been a feature of
gazette design.
"""
pattern = f"([{string.punctuation}])" + "{1,}"
pattern = re.compile(pattern)
text = re.sub(pattern, r"\1", text)
return text
|
b9afb176e75b2e873e2ec68751830002221bdbfc
| 40,662 |
def get_image_name(url, char_limit=60):
"""Get the file name of an image url.
Args:
url (str): Image url.
char_limit (int): Maximum number of characters for the name.
Returns:
str: Image name.
Examples:
>>> url = "https://miguelgfierro.com/static/blog/img/hoaphumanoid.png"
>>> get_image_name(url)
'hoaphumanoid.png'
>>> url = "https://miguelgfierro.com/static/blog/img/hoaphumanoid.png?itok=o-EKrRkB"
>>> get_image_name(url)
'hoaphumanoid.png'
>>> url = "https://miguelgfierro.com/static/blog/img/hoaphumanoid"
>>> get_image_name(url)
'hoaphumanoid.jpg'
>>> url = "https://miguelgfierro.com/012345678901234567890123456789.jpg"
>>> get_image_name(url, 20)
'01234567890123456789.jpg'
"""
name = str(url[(url.rfind("/")) + 1 :])
if "?" in name:
name = name[: name.find("?")]
extensions = (".jpg", ".jpeg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
if any(ext in name for ext in extensions):
pos = name.rfind(".")
ext = name[pos:]
name = name[:pos]
else:
ext = ".jpg"
if len(name) >= char_limit:
name = name[:char_limit]
return name + ext
|
c2a7da7e2332e31b0580751288699ad408e9b49a
| 40,663 |
import hashlib
def es2_activity_hash(activity, flow):
"""Generate unique ID for ecoinvent3 dataset.
Despite using a million UUIDs, there is actually no unique ID in an ecospold2 dataset. Datasets are uniquely identified by the combination of activity and flow UUIDs."""
return str(hashlib.md5((activity + flow).encode('utf-8')).hexdigest())
|
b30ce38708a7eadcba06e3615779469cfaba5fda
| 40,666 |
import string
def _get_placeholders(template):
"""Get all placeholders from a template string.
Parameters
----------
template : str
The template string to get the placeholders for.
Returns
-------
placeholders : list of str
The list of placeholder names that were found in the template string.
Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
"""
return [p[1] for p in string.Formatter().parse(template)
if p[1] is not None and len(p[1]) > 0]
|
f378486328afebf86f643cf8beaf1f883ffccd9c
| 40,668 |
from typing import List
from typing import Any
def get_internal_arg_copier(total_size: int, memory_dest: int) -> List[Any]:
"""
Copy arguments.
For internal functions, MSTORE arguments and callback pointer from the stack.
:param total_size: total size to copy
:param memory_dest: base memory position to copy to
:return: LLL list that copies total_size of memory
"""
copier: List[Any] = ["seq"]
for pos in range(0, total_size, 32):
copier.append(["mstore", memory_dest + pos, "pass"])
return copier
|
6b1c0777a136655bb7a28e82f615f4784e18bd74
| 40,673 |
def assign_pml_elems(sorted_elems, pml_elems, pml_partID='2'):
"""assign PML elements in the sorted element matrix
Args:
sorted_elems: sorted element matrix
pml_elems: list of tuples of # PML elems on each axis edge
([[xmin, max], [ymin, ymax], ...)
pml_partID: default = 2
Returns:
sorted_elems (to be written to new file)
"""
sorted_elems['pid'][0:pml_elems[0][0], :, :] = pml_partID
sorted_elems['pid'][-1:-pml_elems[0][1] - 1:-1, :, :] = pml_partID
sorted_elems['pid'][:, 0:pml_elems[1][0], :] = pml_partID
sorted_elems['pid'][:, -1:-pml_elems[1][1] - 1:-1, :] = pml_partID
sorted_elems['pid'][:, :, 0:pml_elems[2][0]] = pml_partID
sorted_elems['pid'][:, :, -1:-pml_elems[2][1] - 1:-1] = pml_partID
return sorted_elems
|
4dd0a4daeb3e66dc16f151422500a4f3a075fb82
| 40,676 |
from typing import Callable
import inspect
import functools
def wrap_in_coroutine(func: Callable) -> Callable:
"""Decorator to wrap a function into a coroutine function.
If `func` is already a coroutine function it is returned as-is.
Args:
func: A callable object (function or coroutine function)
Returns:
A coroutine function which executes `func`.
"""
if inspect.iscoroutinefunction(func):
return func
@functools.wraps(func)
async def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _wrapper
|
3134241771749d63ce5213180a34d7c26f8f0c76
| 40,677 |
def _clean_boto3_metadata(boto3_metadata: dict) -> dict:
"""Remove unwanted keys from boto3 metadata dictionaries.
Arguments:
boto3_metadata (dict): The raw dictionary of metadata typically found in resource.meta.data
"""
boto3_metadata = boto3_metadata or {}
unwanted_keys = ["ResponseMetadata"]
for key in unwanted_keys:
if key in boto3_metadata:
del boto3_metadata[key]
return boto3_metadata
|
13d3bbfa5a43642ac147eebcc53c1337d12afb6f
| 40,683 |
def GetOriginFromDataUnit(data_unit):
""" Return a shortened origin string from the data unit
E.g. 'fb' for Facebook, 'tw' for Twitter
Returns: shortened origin (string)
"""
origin = data_unit.get('origin', '').lower()
if origin == 'facebook':
origin = 'fb'
elif origin == 'twitter':
origin = 'tw'
return origin
|
859e65b4376629cc3c5a4ab10e331187f069aad4
| 40,703 |
def applescript_escape(string):
"""Escape backlsahes and double quotes for applescript"""
return string.replace('\\', '\\\\').replace('"', '\\"')
|
0c545042a8d4145ca064afe458fb9a14d16dee7a
| 40,707 |
def _to_list(obj):
"""Put obj in list if it is not a list."""
if not isinstance(obj, list):
return [obj]
else:
return obj
|
3b6888428f8f55a627e52bb13c9a5ea44528669f
| 40,709 |
import json
def read_gallery_config(gallery_path):
"""
Read the gallery config from the gallery.json file
:param gallery_path: path to the JSON file
:return: dict containing the gallery config
"""
try:
with open(gallery_path, "r") as gallery_in:
return json.load(gallery_in)
except OSError:
return []
|
105641dcfb22f70c5f93ad54ec6bbad85c988e87
| 40,710 |
import re
def _parse_parameters(parameters):
""" Parses parameters string and returns a dict of overrides.
This function assumes that parameters string is in the form of '"key1="value1" key2="value2"'.
Use of single quotes is optional but is helpful for strings that contain spaces.
Args:
parameters (str): A string in the form of '"key="value" key="value"'.
Returns:
dict: A dict containing key/value pairs parsed from the parameters string.
Raises:
ValueError: if the parameters string is malformed.
"""
if not re.match(r'^(\w+)="([^=]+)"(\s{1}(\w+)="([^=]+)")*$', parameters):
raise ValueError
# first we add tokens that separate key/value pairs.
# in case of key='ss sss ss', we skip tokenizing when we se the first single quote
# and resume when we see the second
replace_space = True
tokenized = ""
for c in parameters:
if c == '\"':
replace_space = not replace_space
elif c == ' ' and replace_space:
tokenized += "$$"
else:
tokenized += c
# now get the tokens
tokens = tokenized.split('$$')
result = {}
for token in tokens:
# separate key/values
key_value = token.split("=")
result[key_value[0]] = key_value[1]
return result
|
cf412e7927cd78e9c154d7a3af09220f68d1311b
| 40,714 |
def __get_owning_account_from_arn(arn):
"""
Get the owning aws account id part of the arn
"""
if arn is not None:
return arn.split(':')[4]
return None
|
0fa4a4b7de49cb42ebdd25a7e8d67074c85d3974
| 40,715 |
def execute_cypher_query(driver, query, params=None):
""" Given `neo4j.Driver` instance and `query` str, execute `query` via
the `driver` in a session, returning the `neo4j.BoltStatementResult` object
that results.
Args:
driver (neo4j.Driver): instance of database driver
query (str): Cypher query to execute
params (dict): Neo4j parameters that are substituted into `query`
Returns:
(neo4j.BoltStatementResult): the result object
"""
with driver.session() as session:
result = session.run(query, parameters=params)
return result
|
8db48ceec4c3ee8b30910934a4f2517443837bde
| 40,718 |
import re
def exclude_filter(excl_filter, paths):
"""
Matches a set of paths against an exclude filter, removing those that don't match
param: excl_filter: The filter to match.
param: paths: The set of paths to match against the filter.
returns: A set of paths which do not match the filter.
"""
misses = set()
for p in paths:
if re.search(excl_filter, p) is None:
misses.add(p)
return misses
|
4332ab8c75e71592ace91a614f73ce260a3895a0
| 40,721 |
def q6(vector, n):
"""
Revertse the input vector in chunks of size n
Args:
vector (1xd): The array to be reversed
n (int): chunk size
Returns:
Array: reversed array
"""
new_vector = []
while len(vector):
new_vector+= vector[-n:]
vector = vector[:-n]
return new_vector
|
f4f3c6bce5d886eb023575ab76898af766c25eff
| 40,722 |
def _is_empty(text: str) -> bool:
"""
Determine if a cell is empty.
Keyword arguments:
text -- the text to check
Returns: True if the cell is empty, False otherwise
"""
return text == ""
|
dec07be33afb22407107eb65fe45e7d06b3d48b9
| 40,723 |
import re
def remove_trailing_slashes(filepath: str) -> str:
"""
Removes trailing slashes from a directory path or full filepath
Examples:
remove_trailing_slashes("/my/path/") == "my/path"
remove_trailing_slashes("my/path/") == "my/path"
remove_trailing_slashes("/path/to/myfile.pdf") == "path/to/myfile.pdf"
"""
return re.sub(r"^\/|\/$", "", filepath)
|
c697d0f954d99dbf15be886ca37f06743346c564
| 40,726 |
def snake_to_camel(name):
"""Returns the camel case of the given snake cased input"""
parts = name.split('_')
return parts[0] + "".join(x.title() for x in parts[1:])
|
d9d42f4cba3a16af61da8eab1f6ba3cca58204b3
| 40,727 |
def det_dist(coord, coord_2):
"""
Determine the euclidean distance between two points.
"""
d_x = coord_2[0] - coord[0]
d_y = coord_2[1] - coord[1]
return (d_x**2 + d_y**2)**(0.5)
|
c5acb2e84475babf28a5ff1642847dcb337fe7e4
| 40,728 |
def izipcols(df, cols, index=False):
"""Return an iterator to go through rows of Pandas.DataFrame
(Much faster than DataFrame.rows() which involves instantiation of Series objects)
Args:
df: DataFrame
cols: list of column names
index: if True, includue index at the beginning (default False)
"""
if index:
l = [df.index]+[df[x] for x in cols]
else:
l = [df[x] for x in cols]
#return izip(*l) # python2
return zip(*l)
|
885d244ee05df2324a4246bfd5bd77ef1a43142e
| 40,730 |
def rm_brs(line):
"""Replace all whitespace (line breaks, etc) with spaces.""" # noqa: DAR101,DAR201
return ' '.join(line.split())
|
39f97bb6aa23fb54cbfaa90aa3d28537a139f3a0
| 40,731 |
def scale_unit_interval(mat, eps=1e-8):
"""Scales all values in `mat` to be between 0 and 1."""
mat = mat.copy()
mat -= mat.min()
mat *= 1.0 / (mat.max() + eps)
return mat
|
7523e0c707cc5fa8575dd9ac8155af623b19f58a
| 40,736 |
def calc_hilo(min_val, max_val, df, cols_to_test):
""" Return lowest and highest values from min_val and max_val if present, or calculate from df. """
# Calculate (or blindly accept) the range of the y-axis, which must be the same for all four axes.
if (max_val is None) and (len(df.index) > 0):
highest_possible_score = max([max(df[col]) for col in cols_to_test])
else:
highest_possible_score = max_val
if (min_val is None) and (len(df.index) > 0):
lowest_possible_score = min([min(df[col]) for col in cols_to_test])
else:
lowest_possible_score = min_val
return lowest_possible_score, highest_possible_score
|
49f0bc0ed1080ed0c59828fcdf1263554f32dc5e
| 40,737 |
def captured_article_ok(save_option, saved, post_option, posted):
"""
Given four boolean variables, return whether or not the article
should be considered captured or not.
save_option: Was the code required to save the article?
saved: Did the code save the article?
post_option: Was the code required to post the article?
posted: Did the code post the article?
"""
# Nothing to do with article, mark as done.
if save_option == False and post_option == False:
return True
# Only requested saving and it saved:
if saved == True and post_option == False:
return True
# Only requested posting and it posted:
if posted == True and save_option == False:
return True
# Did both saving and posting:
if saved == True and posted == True:
return True
return False
|
e5de6ce72fa239e509125e6fe213e2e9e6bacc04
| 40,738 |
def str_to_dict(s, join_symbol="\n", split_symbol=":"):
"""
把参数字符串转换为一个字典
例如: a=b&c=d join_symbol是&, split_symbol是=
:param s: 原字符串
:param join_symbol: 连接符
:param split_symbol: 分隔符
:return: 字典
"""
# 通过join_symbol把字符串分为一个列表
s_list = s.split(join_symbol)
# 定义一个新的字典
data = dict()
for item in s_list:
item = item.strip()
if item:
# a = b 分成一个元组,第二个参数:分割次数
k, v = item.split(split_symbol, 1)
# 去除空格
data[k.strip()] = v.strip()
return data
|
16bc3c31a60c591f3b2cfce282119aebfbb66f83
| 40,741 |
from typing import Any
def monkeypatch(obj: Any, attr: str, new: Any) -> Any:
"""Temporarily replace a method with a new funtion
The previously set method is passed as the first argument to the new function
"""
def patched(*args: Any, **kwargs: Any) -> Any:
return new(old, *args, **kwargs)
old = getattr(obj, attr)
try:
setattr(obj, attr, patched)
yield
finally:
setattr(obj, attr, old)
|
0d948b9d4600218d3d94f9088e9c82c500566e98
| 40,745 |
def stringToBool(s):
"""
Convert a string (True/true/1) to bool
s -- string/int value
return -- True/False
"""
return (s == "True" or s== "true" or s == "1" or s == 1)
|
309c0d7628c78dcced26e9442e504e7cdec1450c
| 40,747 |
import random
def make_random_ints_no_dups(num, lower_bound, upper_bound):
"""
Generate a list containing num random ints between
lower_bound and upper_bound. upper_bound is an open bound.
The result list cannot contain duplicates.
"""
result = []
rng = random.Random()
for i in range(num):
while True:
candidate = rng.randrange(lower_bound, upper_bound)
if candidate not in result:
break
result.append(candidate)
return result
|
e8854c1054b99828551a155b1f9f62e1fbd4c0cc
| 40,754 |
def taxon_file(taxon_id, page_no=1):
"""Build the taxon file name."""
file_name = f'taxon_id_{taxon_id}.html'
if page_no > 1:
file_name = f'taxon_id_{taxon_id}_{page_no}.html'
return file_name
|
a5a7ee2f8fe4387499bc3fdec909c8986b7fcbec
| 40,755 |
def delete_profile(db, user_id, profile_id):
"""Deletes a profile for the given user.
Args:
db (object): The db object
user_id (int): The id of the user.
profile_id (int): The id of the profile to delete.
Returns:
True if the record was deleted, False otherwise
"""
db.execute('''DELETE FROM profile
WHERE user_id=? and id=?''',
(user_id, profile_id))
return not db.rows_affected == 0
|
728247bd982a7b4f3916b8c358e95ff18c837625
| 40,760 |
import copy
def generate_keyed_value_combinations(args):
"""
From this:
args = {"attr1": ["a", "b", "c"], "attr2": ["1", "2"], "attr3": ["A"]}
To this:
[
{u'attr1': u'a', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'a', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'2', u'attr3': u'A'}
]
"""
# Return empty list if empty
if not args:
return []
# Turn `args` into a list of lists of key-value tuples:
# [
# [(u'attr2', u'1'), (u'attr2', u'2')],
# [(u'attr3', u'A')],
# [(u'attr1', u'a'), (u'attr1', u'b'), (u'attr1', u'c')]
# ]
key_value_lists = [[(key, val) for val in args[key]] for key in args.keys()]
# Store the first, but as objects
# [{u'attr2': u'1'}, {u'attr2': u'2'}]
results = key_value_lists.pop(0)
results = [{d[0]: d[1]} for d in results]
# Iterate the remaining
# Take the next list to fuse with existing results
for l in key_value_lists:
new_results = []
for res in results:
for key_val in l:
# create a new clone of object in result
obj = copy.deepcopy(res)
# to be used with every incoming new value
obj[key_val[0]] = key_val[1]
# and pushed into new_results
new_results.append(obj)
results = new_results
return results
|
5a07d45e93ce5ca308fb87fd76c43050a2c154ae
| 40,761 |
def validate_mask(mask):
"""Check if the netmask is valid
return mask as string in the range [0, 32] or False if not valid
"""
if not mask:
return False
mask = mask.strip()
if mask.isdigit() and int(mask) >= 0 and int(mask) <= 32:
return mask
return False
|
5420e65f0c19022fbf13d5847a94d1d52a6e9c4f
| 40,765 |
def parse_request(event):
"""
Parses the input api gateway event and returns the product id
Expects the input event to contain the pathPatameters dict with
the user id and school id key/value pair
:param event: api gateway event
:return: a dict containing the user id and org id
"""
query_params = event.get("queryStringParameters", {})
return {
"user_id": query_params.get('user_id', None),
"org_id": query_params.get('org_id', None),
}
|
733b32a3869834792384a568d6e6a5ed608cbd2e
| 40,767 |
def _translate_message(message):
"""Translate the Message model to a dict."""
return {
'id': message['id'],
'project_id': message['project_id'],
'request_id': message['request_id'],
'resource_type': message['resource_type'],
'resource_uuid': message.get('resource_uuid'),
'event_id': message['event_id'],
'message_level': message['message_level'],
'created_at': message['created_at'],
'expires_at': message.get('expires_at'),
}
|
d8ce8fc82441352e9a2a3ce0334e4afbad3679e8
| 40,779 |
def set_to_list(setstring, delimiter="|", affix="|"):
"""Turn a set string into a list."""
if setstring == affix:
return []
setstring = setstring.strip(affix)
return setstring.split(delimiter)
|
d7ede5607107a3e63ba9a13cb7011e49bde12933
| 40,791 |
from pathlib import Path
def check_arg_output_dir(output_dir: str) -> bool:
"""Return True of the output_dir can exist.
If the parent directory of the output dir does not exist, it has to either be created or fail the check.
:param output_dir: the output directory
:param create_parent_dir: create the output file's parent folder in case it does not exist
"""
# output_format
path_output_file = Path(output_dir)
if not path_output_file.is_dir():
path_output_file.mkdir(parents=True, exist_ok=True)
return True
|
d8afb739af85399a2fc24fd0be110f2e2415af77
| 40,797 |
def kWh2therms(x):
"""kWh -> therms"""
return x/29.3
|
5362fce32edfaeb9ba515a12fdc917be447280ea
| 40,798 |
import functools
def required_ploidy(n, return_val):
"""
Decorator for methods on GenotypeArrays that returns a given value if the ploidy is not n
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.variant.ploidy != n:
return return_val
else:
return func(*args, **kwargs)
return wrapper
return decorator
|
271fbb8646a48b936e991b7cc132666bca3d164f
| 40,799 |
def _short_mac(mac: str) -> str:
"""Get the short mac address from the full mac."""
return mac.replace(":", "").upper()[-6:]
|
f044dec93f0a635c3fc3245355137905c9a1e053
| 40,800 |
def size_of_shape(x):
"""
# This function returns one dimension size of shpae
Parameters:
x (np.array): Grid to get size from
Returns:
int: One dimension size of grid passed in
"""
return x.shape[0]
|
a51429a58770fe321c6062d3335aa7eb01724a56
| 40,806 |
from pathlib import Path
def get_datasets_in_path(path: Path):
"""
Gets all the dataset with stored information in a specific path.
This is used to check which datasets has features extracted.
"""
return set(file.name[:-5] for file in path.glob('*.json'))
|
782f1fea3daaf1a17fa343f91ee2f2d6d34cea57
| 40,810 |
def replace_characters(main_string, chars, new_string):
"""
Parameters:
main_string (str): The string for which you want to make the replacement
chars (str): The character that you want to replace
new_string (str): The new string that will replace the previous string (chars)
Return:
The original string, but with the new characters now incorporated.
"""
for char in chars:
try :
if char in main_string:
main_string = main_string.replace(char, new_string)
except:
continue
return main_string
|
cf1101840ab78913b62d53d331f4263d4946ec29
| 40,811 |
def get_index_action(index_name, document_type, document):
"""Generate index action for a given document.
:param index_name: Elasticsearch index to use
:type index_name: str
:param document_type: Elasticsearch document type to use
:type index_name: str
:param document: Document to be indexed
:type row: dict
:return: Action to be passed in bulk request
:rtype: dict
"""
action = {
'_index': index_name,
'_type': document_type,
'_source': document,
}
# Use the same _id field in elasticsearch as in the database table
if '_id' in document:
action['_id'] = document['_id']
return action
|
405690e65f1d1d3208ca37cd88efcbcf904add40
| 40,812 |
import re
def validate_account_to_dashed(account):
"""
Validates the the provided string is in valid AdWords account format and converts it to dashed format.
:param str account: AdWords Account
:rtype: str
:return: Dashed format
"""
account = str(account).strip()
if re.match("[0-9]{3}-[0-9]{3}-[0-9]{4}", account):
return account
if re.match("^[0-9]{10}$", account):
return '-'.join([str(account)[0:3], str(account)[3:6], str(account)[6:]])
raise ValueError("Invalid account format provided: {}".format(account))
|
30eae40d2b205aeebc99cfc38864893d2fe6e7b8
| 40,815 |
def colourfulness_components(C_RG, C_YB, B_rw):
"""
Returns the *colourfulness* components :math:`M_{RG}` and :math:`M_{YB}`.
Parameters
----------
C_RG : numeric
*Chroma* component :math:`C_{RG}`.
C_YB : numeric
*Chroma* component :math:`C_{YB}`.
B_rw : numeric
Ideal white *brightness* correlate :math:`B_{rw}`.
Returns
-------
numeric
*Colourfulness* components :math:`M_{RG}` and :math:`M_{YB}`.
Examples
--------
>>> C_RG = -0.0028852716381965863
>>> C_YB = -0.013039632941332499
>>> B_rw = 125.24353925846037
>>> colourfulness_components(C_RG, C_YB, B_rw) # doctest: +ELLIPSIS
(-0.0036136..., -0.0163312...)
"""
M_RG = C_RG * B_rw / 100
M_YB = C_YB * B_rw / 100
return M_RG, M_YB
|
52b92618442ab87eba516ca1f2d41349a5f1120e
| 40,817 |
def build_filename(
name: str, suffix: str = "", prefix: str = "", max_length: int = 128
) -> str:
"""
>>> build_filename("name")
'name'
>>> build_filename("name", "suffix", "prefix")
'prefix-name-suffix'
>>> build_filename("loooooooong_nameeeeee", "suffix", max_length=20)
'loooooooo-suffix'
"""
return "-".join(
filter(
None, [prefix, name[: max_length - len(suffix) - len(prefix) - 5], suffix]
)
)
|
2081b9b8f6723d0f0e1c80c919454a6b0b98f64a
| 40,820 |
def create_cfg_ti(run_dir, receptor_f, ligand_f, ambig_f, target_f):
"""
Create HADDOCK3 configuration file for the first scenario.
Parameters
----------
run_dir : path or str
Path to the run directory; where run results will be saved.
receptor_f : Path or str
Absolute path pointing to the receptor PDB file.
ligand_f : Path or str
Absolute path pointing to the ligand PDB file.
ambig_f : Path or str
Absolute path pointing to the `ambig.tbl` file.
Return
------
str
The HADDOCK3 configuration file for benchmarking.
"""
cfg_str = \
f"""
run_dir = {str(run_dir)!r}
ncores = 48
molecules = [
{str(receptor_f)!r},
{str(ligand_f)!r}
]
[topoaa]
[rigidbody]
ambig_fname = {str(ambig_f)!r}
sampling = 1000
noecv = false
[caprieval]
reference = {str(target_f)!r}
[seletop]
select = 200
[flexref]
ambig_fname = {str(ambig_f)!r}
noecv = true
[caprieval]
reference = {str(target_f)!r}
[mdref]
ambig_fname = {str(ambig_f)!r}
noecv = true
[caprieval]
reference = {str(target_f)!r}
"""
return cfg_str
|
9cf3dcc43e5e1c29de51c069d05a9eec7bd513e3
| 40,824 |
def get_type(attributes):
""" Compute mention type.
Args:
attributes (dict(str, object)): Attributes of the mention, must contain
values for "pos", "ner" and "head_index".
Returns:
str: The mention type, one of NAM (proper name), NOM (common noun),
PRO (pronoun), DEM (demonstrative pronoun) and VRB (verb).
"""
pos = attributes["pos"][attributes["head_index"]]
head_ner = attributes["ner"][attributes["head_index"]]
if pos.startswith("NNP"):
return "NAM"
elif head_ner != "NONE":
return "NAM"
elif pos.startswith("PRP"):
return "PRO"
elif pos.startswith("DT"):
return "DEM"
elif pos.startswith("VB"):
return "VRB"
elif pos.startswith("NN"):
return "NOM"
else:
return "NOM"
|
a8f8fd82f6b68b9bcb2332b0087fae47ba3ff50e
| 40,825 |
from typing import Mapping
from typing import Any
def with_extra_spec_options(
original: Mapping[str, Any],
extra_options: Mapping[str, Any],
context: str,
) -> Mapping[str, Any]:
"""
Given an original arbitrary spec and a set of overrides, verify the overrides don't intersect with the existing attributes and return both mappings merged.
Parameters
----------
original
Original map of key-values.
extra_options
Options to set.
context
Context in which this options are being set. This is used to produce a useful error message.
Raises
------
ValueError
If we attempt to override keys that are already present in the original mapping.
"""
if not extra_options:
return original
key_intersection = set(extra_options).intersection(original)
if key_intersection:
raise ValueError(
f"In {context}, you are trying to override the value of {sorted(list(key_intersection))}. The Argo runtime uses these attributes to guarantee the behavior of the supplied DAG is correct. Therefore, we cannot let you override them."
)
return {**original, **extra_options}
|
722130e5d92e6b62bdf726d3ecdfe0ea0b452b83
| 40,832 |
def xy_to_bit(x: int, y: int) -> int:
"""Transform x/y coordinates into a bitboard bit number."""
return y * 8 + x
|
84ea71147e6ae3a64a3402c6fe90525736c1682e
| 40,833 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.