content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def translate_line_test(string):
"""
Translates raw log line into sequence of integer representations for word tokens with sos and eos tokens.
:param string: Raw log line from auth_h.txt
:return: (list) Sequence of integer representations for word tokens with sos and eos tokens.
"""
data = string.split(",")
time = int(data[0]) # could be used to make categorical variables for day of week and time of day.
src_user, src_domain, dst_user, dst_domain, src_pc, dst_pc = split_line(string)
src_user = lookup(src_user, word_token_inds, None)
src_domain = lookup(src_domain, word_token_inds, domain_counts)
if dst_user.startswith('U'):
dst_user = lookup(dst_user, word_token_inds, None)
else:
dst_user = lookup(dst_user, word_token_inds, pc_counts)
dst_domain = lookup(dst_domain, word_token_inds, domain_counts)
src_pc = lookup(src_pc, word_token_inds, pc_counts)
dst_pc = lookup(dst_pc, word_token_inds, pc_counts)
if data[5].startswith("MICROSOFT_AUTH"): # Deals with file corruption for this value.
data[5] = "MICROSOFT_AUTH"
auth_type = lookup(data[5], word_token_inds, None)
logon_type = lookup(data[6], word_token_inds, None)
auth_orient = lookup(data[7], word_token_inds, None)
success = lookup(data[8].strip(), word_token_inds, None)
return "%s %s %s %s %s %s %s %s %s %s %s %s\n" % (str(sos), src_user, src_domain, dst_user,
dst_domain, src_pc, dst_pc, auth_type,
logon_type, auth_orient, success, str(eos))
| 5,600 |
def preprocess_signal(signal, sample_rate):
"""
Preprocess a signal for input into a model
Inputs:
signal: Numpy 1D array containing waveform to process
sample_rate: Sampling rate of the input signal
Returns:
spectrogram: STFT of the signal after resampling to 10kHz and adding
preemphasis.
X_in: Scaled STFT input feature for the model
"""
# Compute the spectrogram of the signal
spectrogram = make_stft_features(signal, sample_rate)
# Get the magnitude spectrogram
mag_spec = np.abs(spectrogram)
# Scale the magnitude spectrogram with a square root squashing, and percent
# normalization
X_in = np.sqrt(mag_spec)
m = X_in.min()
M = X_in.max()
X_in = (X_in - m)/(M - m)
return spectrogram, X_in
| 5,601 |
def unsubscribe_confirmations(message, futures):
"""Intercepts unsubscribe messages and check for
Future objets with a matching subscribe details.
Parameters
----------
message : str
The unaltered response message returned by bitfinex.
futures : dict
A dict of intercept_id's and future objects.
dict{intercept_id, future_object}
"""
future_id = f"unsubscribe_{message['chanId']}"
futures[future_id].set_result(message)
del futures[future_id]
| 5,602 |
def get_frameheight():
"""return fixed height for extra panel
"""
return 120
| 5,603 |
def default_heart_beat_interval() -> int:
"""
:return: in seconds
"""
return 60
| 5,604 |
def email_valid(email):
"""test for valid email address
>>> email_valid('test@testco.com')
True
>>> email_valid('test@@testco.com')
False
>>> email_valid('test@testco')
False
"""
if email == '':
return True
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r'\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
return bool(email_re.match(email))
| 5,605 |
def main():
"""Make a jazz noise here"""
# -------------------------------------------------------------------------------------------------------------------------------
args = get_args()
pic = args.picture
outFile = args.output
# -------------------------------------------------------------------------------------------------------------------------------
imagine = Image.open(pic)
colors = imagine.getpixel((random.randint(0, 150), random.randint(0, 150)))
print("RGB Pixel Colors:", colors)
# -----------------------------------------------------------------------------------------------------------------------------------
if colors[0] + colors[1] + colors[2] >= 550 or colors[2] > colors[
1] and colors[2] > colors[0] and (colors[0] + colors[1] >=
colors[2]):
print("Season Prediction: Winter")
sp = "Season Prediction: Winter"
elif colors[0] > colors[1] and colors[0] > colors[2]:
print("Season Prediction: Fall")
sp = "Season Prediction: Fall"
elif colors[1] > colors[0] and colors[1] > colors[2] and (
colors[2] + colors[0] <= colors[1]):
print("Season Prediction: Spring-Summer")
sp = "Season Prediction: Spring-Summer"
elif colors[2] > colors[1] and colors[2] > colors[0]:
print("Season Prediction: Summer-Spring")
sp = "Season Prediction: Summer-Spring"
if outFile != sys.stdout:
outFile.write("RGB Pixel Colors: " + str(colors) + '\n' + sp + '\n')
else:
pass
| 5,606 |
def test_daily_mean_integers():
"""Test that mean function works for an array of positive integers."""
from inflammation.models import daily_mean
test_array = np.array([[1, 2],
[3, 4],
[5, 6]])
# Need to use Numpy testing functions to compare arrays
npt.assert_array_equal(np.array([3, 4]), daily_mean(test_array))
| 5,607 |
def mysql(filename=None, **conf):
"""
mysql连接方法
examples:
:type(env) == dict
with mysql(**env) as cur:
cur.execute('select * from message.sms_log
where mobile=175001234567 group by send_time DESC limit 1;')
result = cur.fetchall()
return result
:return: 游标
"""
if filename:
conf = conf_load(filename).read()
else:
if not conf:
conf = conf_load('../__conf.yaml').read()['MYSQL']
conn = pymysql.connect(**conf)
cur = conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
yield cur
except Exception as e:
logger.error(e)
conn.rollback()
finally:
conn.commit()
cur.close()
conn.close()
| 5,608 |
def get_node_data(workspace: str, graph: str, table: str, node: str) -> Any:
"""Return the attributes associated with a node."""
return Workspace(workspace).graph(graph).node_attributes(table, node)
| 5,609 |
def siso_optional(fn, h_opt, scope=None, name=None):
"""Substitution module that determines to include or not the search
space returned by `fn`.
The hyperparameter takes boolean values (or equivalent integer zero and one
values). If the hyperparameter takes the value ``False``, the input is simply
put in the output. If the hyperparameter takes the value ``True``, the search
space is instantiated by calling `fn`, and the substitution module is
replaced by it.
Args:
fn (() -> (dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output])):
Function returning a graph fragment corresponding to a sub-search space.
h_opt (deep_architect.core.Hyperparameter): Hyperparameter for whether to
include the sub-search space or not.
scope (deep_architect.core.Scope, optional): Scope in which the module will be
registered. If none is given, uses the default scope.
name (str, optional): Name used to derive an unique name for the
module. If none is given, uses the class name to derive the name.
Returns:
(dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output]):
Tuple with dictionaries with the inputs and outputs of the
substitution module.
"""
def substitution_fn(dh):
return fn() if dh["opt"] else identity()
return substitution_module(_get_name(name, "SISOOptional"), substitution_fn,
{'opt': h_opt}, ['in'], ['out'], scope)
| 5,610 |
def tree_to_stream(entries, write):
"""Write the give list of entries into a stream using its write method
:param entries: **sorted** list of tuples with (binsha, mode, name)
:param write: write method which takes a data string"""
ord_zero = ord('0')
bit_mask = 7 # 3 bits set
for binsha, mode, name in entries:
mode_str = b''
for i in xrange(6):
mode_str = bchr(((mode >> (i * 3)) & bit_mask) + ord_zero) + mode_str
# END for each 8 octal value
# git slices away the first octal if its zero
if byte_ord(mode_str[0]) == ord_zero:
mode_str = mode_str[1:]
# END save a byte
# here it comes: if the name is actually unicode, the replacement below
# will not work as the binsha is not part of the ascii unicode encoding -
# hence we must convert to an utf8 string for it to work properly.
# According to my tests, this is exactly what git does, that is it just
# takes the input literally, which appears to be utf8 on linux.
if isinstance(name, text_type):
name = name.encode(defenc)
write(b''.join((mode_str, b' ', name, b'\0', binsha)))
# END for each item
| 5,611 |
def saconv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
**kwargs)
| 5,612 |
def add_hook(**_kwargs):
"""Creates and adds the import hook in sys.meta_path"""
hook = import_hook.create_hook(
transform_source=transform_source,
hook_name=__name__,
extensions=[".pyfr"],
)
return hook
| 5,613 |
def mul(n1, n2):
"""
multiply two numbers
"""
return n1 * n2
| 5,614 |
def pytorch_normalze(img):
"""
https://github.com/pytorch/vision/issues/223
return appr -1~1 RGB
"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img = normalize(torch.from_numpy(img))
return img.numpy()
| 5,615 |
def get_nic_capacity(driver_info, ilo_fw):
"""Gets the FRU data to see if it is NIC data
Gets the FRU data in loop from 0-255 FRU Ids
and check if the returned data is NIC data. Couldn't
find any easy way to detect if it is NIC data. We should't be
hardcoding the FRU Id.
:param driver_info: Contains the access credentials to access
the BMC.
:param ilo_fw: a tuple containing major and minor versions of firmware
:returns: the max capacity supported by the NIC adapter.
"""
i = 0x0
value = None
ilo_fw_rev = get_ilo_version(ilo_fw) or DEFAULT_FW_REV
# Note(vmud213): iLO firmware versions >= 2.3 support reading the FRU
# information in a single call instead of iterating over each FRU id.
if ilo_fw_rev < MIN_SUGGESTED_FW_REV:
for i in range(0xff):
# Note(vmud213): We can discard FRU ID's between 0x6e and 0xee
# as they don't contain any NIC related information
if (i < 0x6e) or (i > 0xee):
cmd = "fru print %s" % hex(i)
out = _exec_ipmitool(driver_info, cmd)
if out and 'port' in out and 'Adapter' in out:
value = _parse_ipmi_nic_capacity(out)
if value is not None:
break
else:
continue
else:
cmd = "fru print"
out = _exec_ipmitool(driver_info, cmd)
if out:
for line in out.split('\n'):
if line and 'port' in line and 'Adapter' in line:
value = _parse_ipmi_nic_capacity(line)
if value is not None:
break
return value
| 5,616 |
def test_jones_num_funcs():
""" Test utility functions to convert between jones polarization strings and numbers """
jnums = [-8, -7, -6, -5, -4, -3, -2, -1]
jstr = ['Jyx', 'Jxy', 'Jyy', 'Jxx', 'Jlr', 'Jrl', 'Jll', 'Jrr']
nt.assert_equal(jnums, uvutils.jstr2num(jstr))
nt.assert_equal(jstr, uvutils.jnum2str(jnums))
# Check shorthands
jstr = ['yx', 'xy', 'yy', 'y', 'xx', 'x', 'lr', 'rl', 'll', 'l', 'rr', 'r']
jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1]
nt.assert_equal(jnums, uvutils.jstr2num(jstr))
# Check individuals
nt.assert_equal(-6, uvutils.jstr2num('jyy'))
nt.assert_equal('Jxy', uvutils.jnum2str(-7))
# Check errors
nt.assert_raises(KeyError, uvutils.jstr2num, 'foo')
nt.assert_raises(ValueError, uvutils.jstr2num, 1)
nt.assert_raises(ValueError, uvutils.jnum2str, 7.3)
| 5,617 |
def app (arguments:Dict) -> None:
"""top-level of the polynomial solving app.
"""
root = os.path.dirname('/')
paths = {
'tasks': root,
'current_link': os.path.join(root, 'current'),
'solutions': os.path.join(root, 'current/json/solutions.jsonl'),
'pixels': os.path.join(root, 'current/json/pixels.jsonl'),
'images': os.path.join(root, 'current/images/'),
'final_image': os.path.join(root, 'current/final_image.png')
}
create_required_folder(paths)
# -- TODO wire in optional solving and drawing.
if True:
solve_all_polynomials(arguments, paths)
if True:
generate_polynomial_image(arguments, paths)
assemble_images(list_images(paths['images']), paths['final_image'])
| 5,618 |
def ctypes_header_file_create(dataset, output_dir, custom_includes=None):
"""Creates the C types header file in the specified output directory"""
output_path = os.path.join(output_dir, _header_name_get(dataset))
contents = ctypes_header_render(dataset, custom_includes)
templates.file_write_all_data(output_path, contents)
| 5,619 |
def fold_given_batch_norms(model, layer_pairs: List[PairType]):
"""
Fold a given set of batch_norm layers into conv layers
:param model: Model
:param layer_pairs: Pairs of conv and batch_norm layers to use for folding
:return: None
"""
# Assuming that the entire model in on one device
device = next(model.parameters()).device
model.to('cpu')
list_of_bn_layers = []
for pair in layer_pairs:
if isinstance(pair[0], (torch.nn.BatchNorm2d, torch.nn.BatchNorm1d)):
is_batch_norm_second = False
bn = pair[0]
conv_linear = pair[1]
else:
is_batch_norm_second = True
bn = pair[1]
conv_linear = pair[0]
assert isinstance(conv_linear, (torch.nn.Linear, torch.nn.Conv2d))
list_of_bn_layers.append(bn)
bn_params = libpymo.BNParams()
bn_params.gamma = bn.weight.detach().numpy().reshape(-1)
bn_params.beta = bn.bias.detach().numpy().reshape(-1)
bn_params.runningMean = bn.running_mean.detach().numpy().reshape(-1)
sigma = torch.sqrt(bn.running_var + bn.eps)
bn_params.runningVar = sigma.detach().numpy().reshape(-1)
weight_tensor = libpymo.TensorParams()
weight_tensor.data = conv_linear.weight.detach().numpy().reshape(-1)
weight_shape = np.array(conv_linear.weight.shape)
if len(conv_linear.weight.shape) == 2:
weight_shape = np.append(weight_shape, [1, 1])
weight_tensor.shape = weight_shape
bias_tensor = libpymo.TensorParams()
if conv_linear.bias is not None:
bias_tensor.data = conv_linear.bias.detach().numpy().reshape(-1)
bias_tensor.shape = np.array(conv_linear.bias.shape)
is_bias_valid = True
else:
is_bias_valid = False
bias = libpymo.fold(bn_params, weight_tensor, bias_tensor, is_bias_valid, is_batch_norm_second)
conv_linear.bias = torch.nn.Parameter(torch.Tensor(bias))
conv_linear.weight.data = torch.from_numpy(np.reshape(weight_tensor.data,
np.array(conv_linear.weight.shape)))
conv_linear.weight.data = conv_linear.weight.data.type(torch.FloatTensor)
_delete_bn_from_model(model, list_of_bn_layers)
model.to(device)
| 5,620 |
def isValidInifileKeyName(key):
""" Check that this key name is valid to be used in inifiles, and to be used as a python property name on a q or i object """
return re.match("^[\w_]+$", key)
| 5,621 |
def installed_pkgs():
"""
Return the list of installed packages on the machine
Returns:
list: List of installed packages
CLI Example:
.. code-block:: bash
salt '*' macpackage.installed_pkgs
"""
cmd = "pkgutil --pkgs"
return __salt__["cmd.run"](cmd).split("\n")
| 5,622 |
def extract_feature(audio, sr=44100):
"""
extract feature like below:
sig:
rmse:
silence:
harmonic:
pitch:
audio: audio file or audio list
return feature_list: np of [n_samples, n_features]
"""
feature_list = []
y = []
if isinstance(audio, str):
y, _ = librosa.load(audio, sr)
elif isinstance(audio, np.ndarray):
y = audio
# 1. sig
sig_mean = np.mean(abs(y))
feature_list.append(sig_mean) # sig_mean
feature_list.append(np.std(y)) # sig_std
# 2. rmse
rmse = librosa.feature.rms(y + 0.0001)[0]
feature_list.append(np.mean(rmse)) # rmse_mean
feature_list.append(np.std(rmse)) # rmse_std
# 3. silence
silence = 0
for e in rmse:
if e <= 0.4 * np.mean(rmse):
silence += 1
silence /= float(len(rmse))
feature_list.append(silence) # silence
# 4. harmonic
y_harmonic = librosa.effects.hpss(y)[0]
feature_list.append(np.mean(y_harmonic) * 1000) # harmonic (scaled by 1000)
# 5. pitch (instead of auto_correlation)
cl = 0.45 * sig_mean
center_clipped = []
for s in y:
if s >= cl:
center_clipped.append(s - cl)
elif s <= -cl:
center_clipped.append(s + cl)
elif np.abs(s) < cl:
center_clipped.append(0)
# auto_corrs = librosa.core.autocorrelate(np.array(center_clipped))
pitch, _, _ = librosa.pyin(y, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'))
pitch = [0 if math.isnan(p) else p for p in pitch]
feature_list.append(np.mean(pitch))
feature_list.append(np.std(pitch))
return np.array(feature_list).reshape(1, -1)
| 5,623 |
def run_with_config(sync, config):
"""
Execute the cartography.sync.Sync.run method with parameters built from the given configuration object.
This function will create a Neo4j driver object from the given Neo4j configuration options (URI, auth, etc.) and
will choose a sensible update tag if one is not specified in the given configuration.
:type sync: cartography.sync.Sync
:param sync: A sync task to run.
:type config: cartography.config.Config
:param config: The configuration to use to run the sync task.
"""
neo4j_auth = None
if config.neo4j_user or config.neo4j_password:
neo4j_auth = (config.neo4j_user, config.neo4j_password)
try:
neo4j_driver = GraphDatabase.driver(
config.neo4j_uri,
auth=neo4j_auth,
)
except neobolt.exceptions.ServiceUnavailable as e:
logger.debug("Error occurred during Neo4j connect.", exc_info=True)
logger.error(
(
"Unable to connect to Neo4j using the provided URI '%s', an error occurred: '%s'. Make sure the Neo4j "
"server is running and accessible from your network."
),
config.neo4j_uri,
e,
)
return
except neobolt.exceptions.AuthError as e:
logger.debug("Error occurred during Neo4j auth.", exc_info=True)
if not neo4j_auth:
logger.error(
(
"Unable to auth to Neo4j, an error occurred: '%s'. cartography attempted to connect to Neo4j "
"without any auth. Check your Neo4j server settings to see if auth is required and, if it is, "
"provide cartography with a valid username and password."
),
e,
)
else:
logger.error(
(
"Unable to auth to Neo4j, an error occurred: '%s'. cartography attempted to connect to Neo4j with "
"a username and password. Check your Neo4j server settings to see if the username and password "
"provided to cartography are valid credentials."
),
e,
)
return
default_update_tag = int(time.time())
if not config.update_tag:
config.update_tag = default_update_tag
return sync.run(neo4j_driver, config)
| 5,624 |
def BuildPartialUpdate(clear, remove_keys, set_entries, field_mask_prefix,
entry_cls, env_builder):
"""Builds the field mask and patch environment for an environment update.
Follows the environments update semantic which applies operations
in an effective order of clear -> remove -> set.
Leading and trailing whitespace is stripped from elements in remove_keys
and the keys of set_entries.
Args:
clear: bool, If true, the patch removes existing keys.
remove_keys: iterable(string), Iterable of keys to remove.
set_entries: {string: string}, Dict containing entries to set.
field_mask_prefix: string, The prefix defining the path to the base of the
proto map to be patched.
entry_cls: AdditionalProperty, The AdditionalProperty class for the type
of entry being updated.
env_builder: [AdditionalProperty] -> Environment, A function which produces
a patch Environment with the given list of entry_cls properties.
Returns:
(string, Environment), a 2-tuple of the field mask defined by the arguments
and a patch environment produced by env_builder.
"""
remove_keys = set(k.strip() for k in remove_keys or [])
# set_entries is sorted by key to make it easier for tests to set the
# expected patch object.
set_entries = OrderedDict(
(k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {})))
if clear:
entries = [
entry_cls(key=key, value=value)
for key, value in six.iteritems(set_entries)
]
return field_mask_prefix, env_builder(entries)
field_mask_entries = []
seen_keys = set()
for key in remove_keys:
field_mask_entries.append('{}.{}'.format(field_mask_prefix, key))
seen_keys.add(key)
entries = []
for key, value in six.iteritems(set_entries):
entries.append(entry_cls(key=key, value=value))
if key not in seen_keys:
field_mask_entries.append('{}.{}'.format(field_mask_prefix, key))
# Sorting field mask entries makes it easier for tests to set the expected
# field mask since dictionary iteration order is undefined.
field_mask_entries.sort()
return ','.join(field_mask_entries), env_builder(entries)
| 5,625 |
def readJSON(
json_path: FilePath, file_text: str = "", conf_file_name: str = ""
) -> object:
"""Reads the JSON from the given file and saves it to a class object with
the JSON elements as attributes.
If an error occurs, the program is exited with an error message!
The JSON must have an element `file_version` that has a value of at least
`CFG_VERSION`, if not, the program is exited with an error message.
Args:
json_path (FilePath): The path to the JSON file to read.
file_text (str, optional): The name of the JSON configuration file for
logging proposes. Defaults to "", which will be logged as
'a', like in "Writing _a_ JSON configuration file ...".
conf_file_name (str, optional): The string that has to be the value of
`file_name` in the JSON file, if not, the program exits.
Defaults to "".
Returns:
object: A class instance with the JSON elements as attributes.
"""
_logger.warning(
'Parsing {text} config file "{path}"'.format(text=file_text, path=json_path)
)
try:
with io.open(json_path, mode="r", encoding="utf-8") as file:
ret_val = json.load(file, object_hook=lambda dict: SimpleNamespace(**dict))
except Exception as exp:
_logger.critical(
'error "{error}" parsing file "{path}"'.format(error=exp, path=json_path)
)
sys.exit(EXT_ERR_LD_FILE)
try:
checkConfigName(json_path, conf_file_name, ret_val)
checkConfigVersion(json_path, ret_val)
except Exception as excp:
_logger.critical(
'error "{error}" parsing file "{path}", JSON file not valid'.format(
error=excp, path=json_path
)
)
sys.exit(EXT_ERR_NOT_VLD)
try:
setOrigFile(json_path, ret_val)
except Exception as excp:
_logger.critical(
'error "{error}" generating JSON file "{file}" checksum'.format(
error=excp, file=json_path
)
)
return ret_val
| 5,626 |
def add_test(name, func, submenu=None, runall=True, runsub=None):
"""Add a new test to the test menu.
Set submenu to a string to put the test in a submenu with that name. Set
runall=False to exclude the test from the top-level "Run all tests"; runall
defaults to True. Set runsub=False to exclude the test from "Run all
tests" in its submenu; runsub defaults to the same as runall."""
if runsub is None:
runsub = runall
if submenu not in tests:
tests[submenu] = []
if submenu is not None:
i = len(submenus)
submenus.append(submenu)
tests[submenu].append(_Test(name, func, runall, runsub))
| 5,627 |
def create_schema_usb():
"""Create schema usb."""
return vol.Schema(CONFIG_SCHEMA_USB)
| 5,628 |
def _build_field_queries(filters):
"""
Builds field queries.
Same as _build_field_query but expects a dict of field/values and returns a list of queries.
"""
return [
_build_field_query(field, value)
for field, value in filters.items()
]
| 5,629 |
def bycode(ent, group):
"""
Get the data with the given group code from an entity.
Arguments:
ent: An iterable of (group, data) tuples.
group: Group code that you want to retrieve.
Returns:
The data for the given group code. Can be a list of items if the group
code occurs multiple times.
"""
data = [v for k, v in ent if k == group]
if len(data) == 1:
return data[0]
return data
| 5,630 |
def change_output_dir(model_name, old_dat, new_dat):
"""
Change the name of an output directory.
Parameters
----------
model_name : string
String of model name.
old_dat : string
String with current output date of model run (to be replaced).
new_dat : string
String with future output date of model run.
"""
outdir = (os.environ['HOME'] + "/shematOutputDir/" + model_name +
"_output")
# Rename root-date-directory
if os.path.exists(outdir + "/" + old_dat):
os.rename(outdir + "/" + old_dat, outdir + "/" + new_dat)
else:
raise RuntimeError("Directory does not exist: \n" + outdir + "/" +
old_dat)
# Rename dates in subdirectories
subdirs_old = os.listdir(outdir + "/" + new_dat)
subdirs_new = [subdir.replace(old_dat, new_dat) for subdir in subdirs_old]
for i in range(len(subdirs_old)):
os.rename(outdir + "/" + new_dat + "/" + subdirs_old[i],
outdir + "/" + new_dat + "/" + subdirs_new[i])
| 5,631 |
def cmd_web(argv, args):
"""
Usage:
localstack web <subcommand> [options]
Commands:
web start Start the Web dashboard
Options:
--port=<> Network port for running the Web server (default: 8080)
"""
print_version()
if len(argv) <= 1 or argv[1] != 'start':
argv = ['web', 'start'] + argv[1:]
args['<args>'] = ['start'] + args['<args>']
args.update(docopt(cmd_web.__doc__.strip(), argv=argv))
if args['<subcommand>'] == 'start':
import localstack.dashboard.api
port = args['--port'] or config.PORT_WEB_UI
localstack.dashboard.api.serve(port)
| 5,632 |
def subtract_bg(inputs, out_names=None, x_order=None, y_order=None,
reprocess=None):
"""
Model the instrumental background or scattered light level as a function
of position in the input files (based on the counts within specified
nominally-unilluminated regions) and subtract the result from each input
to remove the estimated contamination.
Parameters
----------
inputs : DataFileList or DataFile
Input, bias-subtracted images, in the raw data format, each of which
must have an entry named 'bg_reg' in its `cals` dictionary, specifying
the unilluminated detector regions to use for background estimation;
see ``background_regions``.
out_names : `str`-like or list of `str`-like, optional
Names of output images containing the background-subtracted spectra. If
None (default), the names of the DataFile instances returned will be
constructed from those of the corresponding input files, prefixed with
'b' as in the Gemini IRAF package.
x_order, y_order : int or list of int, optional
Order of the Legendre surface fit along rows and columns, respectively,
for each CCD (or all CCDs if a single integer). With the default of
None, orders of [5,9,5] or [5,5,9,5,5,5] are used for x and [5,7,5] or
[5,5,7,5,5,5] for columns, as appropriate. The index of the higher
number may need adjusting by the user to match the CCD where the IFU
slits overlap (if applicable). This logic will probably be made a bit
more intelligent in a future version.
See "help gfscatsub" in IRAF for more detailed information.
Returns
-------
outimage : DataFileList
The background-subtracted images produced by gfscatsub.
Package 'config' options
------------------------
reprocess : bool or None
Re-generate and overwrite any existing output files on disk or skip
processing and re-use existing results, where available? The default
of None instead raises an exception where outputs already exist
(requiring the user to delete them explicitly). The processing is
always performed for outputs that aren't already available.
"""
# Here we have to expand out the output filenames instead of letting
# run_task do it because it currently doesn't recognize text files as main
# inputs. This should be replaced by run-task-like decorator functionality
# in the longer run.
# Convert inputs to a DataFileList if needed:
inputs = to_datafilelist(inputs)
# Use default prefix if the output filenames are unspecified:
prefix = 'b'
if not out_names:
out_names = [FileName(indf, prefix=prefix) for indf in inputs]
elif len(out_names) != len(inputs):
raise ValueError('inputs & out_names have unmatched lengths')
# Get lists of bg regions to use from the input file "cals" dictionaries:
try:
bg_reg_list = [df.cals['bg_reg'] for df in inputs]
except KeyError:
raise KeyError('one or more inputs is missing associated list of '\
'background regions')
# Avoid raising obscure errors if the wrong thing gets attached as bg_reg.
# To do: consider writing a more generic type-checking function.
if not all(bg_reg and hasattr(bg_reg, '__iter__') and \
all(reg and hasattr(reg, '__iter__') and \
all(isinstance(n, (int, str)) for n in reg) \
for reg in bg_reg \
) for bg_reg in bg_reg_list
):
raise ValueError('cals[\'bg_reg\'] should be a list of limit lists')
# Loop over the inputs explicitly, since run_task currently can't recognize
# lists of text files as inputs:
mode = 'update' if not reprocess else 'overwrite'
outputs = DataFileList(mode=mode)
for indf, bg_reg, outname in zip(inputs, bg_reg_list, out_names):
# Save the background regions for each instance as a temporary file
# for IRAF:
gapfn = new_filename(base=indf.filename.base+'_gaps', ext='')
with open(gapfn, 'w') as gapfile:
for reg in bg_reg:
gapfile.write('{0}\n'.format(' '.join(str(n) for n in reg)))
# Generate default orders appropriate for the number of detectors in
# each DataFile, if unspecified:
len_df = len(indf)
if x_order is None:
xorder = [5] * len_df
xorder[(len_df-1)//2] = 9
else:
if isinstance(x_order, (numbers.Integral, str)):
xorder = (x_order,)
else:
xorder = x_order
if y_order is None:
yorder = [5] * len_df
yorder[(len_df-1)//2] = 7
else:
if isinstance(y_order, (numbers.Integral, str)):
yorder = (y_order,)
else:
yorder = y_order
# Convert list of orders to comma-separated IRAF syntax:
xorder = ','.join(str(n) for n in xorder)
yorder = ','.join(str(n) for n in yorder)
result = run_task(
'gemini.gmos.gfscatsub',
inputs={'image' : indf}, outputs={'outimage' : outname},
prefix=None, suffix=None, comb_in=False, MEF_ext=False,
path_param=None, reprocess=reprocess, mask=gapfn,
xorder=xorder, yorder=yorder, cross=True
)
# Finished with the temporary file:
os.remove(gapfn)
# Accumulate the output DataFileList, copying the dictionary of cals
# from each input to the output until persistence is implemented, since
# the same ones are usually needed at the next step:
outdf = result['outimage'][0]
outdf.cals.update(indf.cals)
outputs.append(outdf)
return outputs
| 5,633 |
def update_permissions() -> None:
"""
更新权限角色数据
"""
for role_name, permissions in mapping.items():
role = get_or_create(Role, role_name)
permissions = get_or_create_from_lst(Permission, *permissions)
role.add_permissions(permissions)
| 5,634 |
def get_midi_programs(midi: MidiFile) -> List[Tuple[int, bool]]:
""" Returns the list of programs of the tracks of a MIDI, deeping the
same order. It returns it as a list of tuples (program, is_drum).
:param midi: the MIDI object to extract tracks programs
:return: the list of track programs, as a list of tuples (program, is_drum)
"""
return [(int(track.program), track.is_drum) for track in midi.instruments]
| 5,635 |
def is_depth_wise_conv(module):
"""Determine Conv2d."""
if hasattr(module, "groups"):
return module.groups != 1 and module.in_channels == module.out_channels
elif hasattr(module, "group"):
return module.group != 1 and module.in_channels == module.out_channels
| 5,636 |
def list_all_routed():
"""
List all the notifications that have been routed to any repository, limited by the parameters supplied
in the URL.
See the API documentation for more details.
:return: a list of notifications appropriate to the parameters
"""
return _list_request()
| 5,637 |
def sliding_window(img1, img2, patch_size=(100,302), istep=50):#, jstep=1, scale=1.0):
"""
get patches and thier upper left corner coordinates
The size of the sliding window is currently fixed.
patch_size: sliding_window's size'
istep: Row stride
"""
Ni, Nj = (int(s) for s in patch_size)
for i in range(0, img1.shape[0] - Ni+1, istep):
#for j in range(0, img1.shape[1] - Nj, jstep):
#patch = (img1[i:i + Ni, j:j + Nj], img2[i:i + Ni, j:j + Nj])
patch = (img1[i:i + Ni, 39:341], img2[i:i + Ni, 39:341])
yield (i, 39), patch
| 5,638 |
def read(file_name):
"""Read in the supplied file name from the root directory.
Args:
file_name (str): the name of the file
Returns: the content of the file
"""
this_dir = os.path.dirname(__file__)
file_path = os.path.join(this_dir, file_name)
with open(file_path) as f:
return f.read()
| 5,639 |
def run_command(message, command_name, params, command=None):
"""登録したコマンドに対して各種操作を行う
:param message: slackbot.dispatcher.Message
:param str command: 登録済のコマンド名
:param str params: 操作内容 + 語録
"""
# コマンドが登録済みの場合、登録済みコマンドの方で
# 応答をハンドルしている為ここではリターンする
if command_name in command_patterns(message):
return
data = params.split(maxsplit=1)
subcommand = data[0]
try:
if subcommand == 'pop':
# 最後に登録された語録を削除
pop_term(message, command)
elif subcommand == 'list':
# 語録の一覧を返す
get_term(message, command)
elif subcommand == 'search':
# 語録を検索
search_term(message, command, data[1])
elif subcommand in ('del', 'delete', 'rm', 'remove'):
# 語録を削除
del_term(message, command, data[1])
elif subcommand == 'add':
# 語録を追加
add_term(message, command, data[1])
else:
# サブコマンドが存在しない場合も追加
add_term(message, command, params)
except IndexError:
# ヘルプを返す
botsend(message, HELP)
| 5,640 |
def prepare_stdin(
method: str, basis: str, keywords: Dict[str, Any], charge: int, mult: int, geoopt: Optional[str] = ""
) -> str:
"""Prepares a str that can be sent to define to produce the desired
input for Turbomole."""
# Load data from keywords
unrestricted = keywords.get("unrestricted", False)
grid = keywords.get("grid", "m3")
methods_flat = list(it.chain(*[m for m in METHODS.values()]))
if method not in methods_flat:
raise InputError(f"Method {method} not in supported methods " f"{methods_flat}!")
# This variable may contain substitutions that will be made to
# the control file after it was created from a define call, e.g.
# setting XC functionals that aren't hardcoded in define etc.
subs = None
def occ_num_mo_data(charge: int, mult: int, unrestricted: Optional[bool] = False) -> str:
"""Handles the 'Occupation Number & Molecular Orbital' section
of define. Sets appropriate charge and multiplicity in the
system and decided between restricted and unrestricted calculation.
RHF and UHF are supported. ROHF could be implemented later on
by using the 's' command to list the available MOs and then
close the appropriate number of MOs to doubly occupied MOs
by 'c' by comparing the number of total MOs and the desired
multiplicity."""
# Do unrestricted calculation if explicitly requested or mandatory
unrestricted = unrestricted or (mult != 1)
unpaired = mult - 1
charge = int(charge)
occ_num_mo_data_stdin = f"""eht
y
{charge}
y
"""
if unrestricted:
# Somehow Turbomole/define asks us if we want to write
# natural orbitals... we don't want to.
occ_num_mo_data_stdin = f"""eht
y
{charge}
n
u {unpaired}
*
n
"""
return occ_num_mo_data_stdin
def set_method(method, grid):
if method == "hf":
method_stdin = ""
elif method in METHODS["ricc2"]:
# Setting geoopt in $ricc2 will make the ricc2 module to produce
# a gradient.
# Drop the 'ri'-prefix of the method string.
geoopt_stdin = f"geoopt {method[2:]} ({geoopt})" if geoopt else ""
method_stdin = f"""cc
freeze
*
cbas
*
ricc2
{method}
list models
{geoopt_stdin}
list geoopt
*
*
"""
elif method in METHODS["dft_hardcoded"]:
method_stdin = f"""dft
on
func
{method}
grid
{grid}
"""
# TODO: Handle xcfuncs that aren't defined in define, e.g.
# new functionals introduced in 7.4 from libxc. ...
# Maybe the best idea would be to not set the functional here
# but just turn on DFT and add it to the control file later on.
elif method in METHODS["dft_libxc"]:
raise InputError("libxc functionals are not supported right now.")
return method_stdin
# Resolution of identity
def set_ri(keywords):
# TODO: senex/RIJCOSX?
ri_kws = {ri_kw: keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]}
ri_stdins = {"rijk": "rijk\non\n\n", "ri": "ri\non\n\n", "marij": "marij\n\n"}
ri_stdin = "\n".join([ri_stdins[ri_kw] for ri_kw, use in ri_kws.items() if use])
return ri_stdin
# ri_stdin = ""
# # Use either RIJK or RIJ if requested.
# if ri_kws["rijk"]:
# ri_stdin = """rijk
# on
# """
# elif ri_kws["rij"]:
# ri_stdin = """rij
# on
# """
# # MARIJ can be used additionally.
# if ri_kws["marij"]:
# ri_stdin += """marij
# """
# return ri_stdin
# Dispersion correction
def set_dsp(keywords):
# TODO: set_ri and set_dsp are basically the same funtion. Maybe
# we could abstract this somehow?
dsp_kws = {dsp_kw: keywords.get(dsp_kw, False) for dsp_kw in KEYWORDS["dsp"]}
dsp_stdins = {"d3": "dsp\non\n\n", "d3bj": "dsp\nbj\n\n"}
dsp_stdin = "\n".join([dsp_stdins[dsp_kw] for dsp_kw, use in dsp_kws.items() if use])
return dsp_stdin
kwargs = {
"init_guess": occ_num_mo_data(charge, mult, unrestricted),
"set_method": set_method(method, grid),
"ri": set_ri(keywords),
"dsp": set_dsp(keywords),
"title": "QCEngine Turbomole",
"scf_conv": 8,
"scf_iters": 150,
"basis": basis,
}
stdin = """
{title}
a coord
*
no
b
all {basis}
*
{init_guess}
{set_method}
{ri}
{dsp}
scf
conv
{scf_conv}
iter
{scf_iters}
*
""".format(
**kwargs
)
return stdin, subs
| 5,641 |
def Lstart(gridname='BLANK', tag='BLANK', ex_name='BLANK'):
"""
This adds more run-specific entries to Ldir.
"""
# put top level information from input into a dict
Ldir['gridname'] = gridname
Ldir['tag'] = tag
Ldir['ex_name'] = ex_name
# and add a few more things
Ldir['gtag'] = gridname + '_' + tag
Ldir['gtagex'] = gridname + '_' + tag + '_' + ex_name
Ldir['grid'] = Ldir['data'] / 'grids' / gridname
Ldir['forecast_days'] = 3
Ldir['ds_fmt'] = ds_fmt
Ldir['roms_time_units'] = roms_time_units
Ldir['modtime0'] = modtime0
return Ldir.copy()
# the use of copy() means different calls to Lstart (e.g. when importing
# plotting_functions) to not overwrite each other
| 5,642 |
def summation(n, term):
"""Return the sum of numbers 1 through n (including n) wíth term applied to each number.
Implement using recursion!
>>> summation(5, lambda x: x * x * x) # 1^3 + 2^3 + 3^3 + 4^3 + 5^3
225
>>> summation(9, lambda x: x + 1) # 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10
54
>>> summation(5, lambda x: 2**x) # 2^1 + 2^2 + 2^3 + 2^4 + 2^5
62
>>> # Do not use while/for loops!
>>> from construct_check import check
>>> # ban iteration
>>> check(HW_SOURCE_FILE, 'summation',
... ['While', 'For'])
True
"""
assert n >= 1
"*** YOUR CODE HERE ***"
| 5,643 |
def rfe_w2(x, y, p, classifier):
"""RFE algorithm, where the ranking criteria is w^2,
described in [Guyon02]_. `classifier` must be an linear classifier
with learn() and w() methods.
.. [Guyon02] I Guyon, J Weston, S Barnhill and V Vapnik. Gene Selection for Cancer Classification using Support Vector Machines. Machine Learning, 2002.
:Parameters:
x: 2d array_like object (N,P)
training data
y : 1d array_like object integer (N)
class labels (only two classes)
p : float [0.0, 1.0]
percentage of features (upper rounded) to remove
at each iteration (p=0 one variable)
classifier : object with learn() and w() methods
object
:Returns:
ranking : 1d numpy array int
feature ranking. ranking[i] contains the feature index ranked
in i-th position.
"""
if (p < 0.0) or (p > 1.0):
raise ValueError("parameter p must be in [0.0, 1.0]")
if not (hasattr(classifier, 'learn') and hasattr(classifier, 'w')):
raise ValueError("parameter classifier must have learn() and w() methods")
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
labels = np.unique(yarr)
if labels.shape[0] != 2:
raise ValueError("number of classes must be = 2")
idxglobal = np.arange(xarr.shape[1], dtype=np.int)
ranking = []
while True:
nelim = np.max((int(np.ceil(idxglobal.shape[0] * p)), 1))
xi = xarr[:, idxglobal]
classifier.learn(xi, yarr)
w = classifier.w()
idxsorted = np.argsort(w**2)
# indexes to remove
idxelim = idxglobal[idxsorted[:nelim]][::-1]
ranking.insert(0, idxelim)
# update idxglobal
idxglobal = idxglobal[idxsorted[nelim:]]
idxglobal.sort()
if len(idxglobal) <= 1:
ranking.insert(0, idxglobal)
break
return np.concatenate(ranking)
| 5,644 |
def compress_timeline(timeline: List, salt: bytes) -> List:
"""
Compress the verbose Twitter feed into a small one. Just keep the useful elements.
The images are downloaded per-request.
Args:
timeline (List): The Twitter timeline.
salt (bytes): The salt to apply on the filename.
Returns:
List: The timeline with less information and links to the (locally) stored images.
"""
compressed_timeline = []
for tweet in timeline:
profile_image_url = tweet["user"]["profile_image_url_https"]
compressed_tweet = {
"created_at": tweet["created_at"],
"text": tweet["text"],
"id_str": tweet["id_str"],
"user": {
"name": tweet["user"]["name"],
"screen_name": tweet["user"]["screen_name"],
"profile_image_origin": encode_media_origin(profile_image_url),
"profile_image_filename": create_media_filename(
profile_image_url, salt
),
},
}
if tweet["retweeted"]:
original_source = tweet["retweeted_status"]["user"]
profile_image_url = original_source["profile_image_url_https"]
compressed_tweet["retweeted_status"] = {
"user": {
"name": original_source["name"],
"screen_name": original_source["screen_name"],
"profile_image_origin": encode_media_origin(profile_image_url),
"profile_image_filename": create_media_filename(
profile_image_url, salt
),
}
}
compressed_timeline.append(compressed_tweet)
return compressed_timeline
| 5,645 |
def validate_non_empty_img_list(item_list):
"""[summary]
Args:
item_list ([type]): [description]
Raises:
EmptyImageDatasetError: [description]
"""
if item_list:
pass
else:
raise EmptyImageDatasetError(item_list)
| 5,646 |
def createFinalCompactedData(compacted_data,elevations):
"""
This function creates a dataframe that combines the RGB data and the elevations data
into a dataframe that can be used for analysis
Parameters
----------
compacted_data : list of compacted data returned from condensePixels.
elevations : list of elevations from getUSGSElevations.
Returns
-------
final_compacted_data : dataframe of merged data.
"""
lats = []
lons = []
reds = []
greens = []
blues = []
els = []
for i in range(len(compacted_data)):
for j in range(len(compacted_data[0])):
reds.append(compacted_data[i][j][0])
greens.append(compacted_data[i][j][1])
blues.append(compacted_data[i][j][2])
lats.append(compacted_data[i][j][3])
lons.append(compacted_data[i][j][4])
els.append(elevations[i][j])
final_compacted_data = pd.DataFrame({'Lat':lats,'Lon':lons,'Elevation':els,'Red':reds,'Green':greens,'Blue':blues})
return final_compacted_data
| 5,647 |
def get_sorted_nodes_edges(bpmn_graph):
"""
Assure an ordering as-constant-as-possible
Parameters
--------------
bpmn_graph
BPMN graph
Returns
--------------
nodes
List of nodes of the BPMN graph
edges
List of edges of the BPMN graph
"""
graph = bpmn_graph.get_graph()
graph_nodes = list(graph.nodes(data=False))
graph_edges = list(graph.edges(data=False))
bfs = bfs_bpmn(graph_nodes, graph_edges)
graph_nodes = sort_nodes_given_bfs(graph_nodes, bfs)
graph_edges = sort_edges_given_bfs(graph_edges, bfs)
return graph_nodes, graph_edges
| 5,648 |
def treat_the_ducks(animals):
"""
treat the ducks finds all the ducks in the list of given animals and:
1. gives them proper exercise - walking and quacking
2. feeds them with appropriate duck food
"""
for animal in animals:
print()
print(animal_type_name(animal), ':')
try:
animal.walk() # walks like a duck?
animal.quack() # talks like a duck?
except:
continue # this is not a duck, go to next animal
# its a duck!, so give it some duck food
duck = animal
feed_animal(duck, 'duck food')
| 5,649 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Samsung TV platform."""
known_devices = hass.data.get(KNOWN_DEVICES_KEY)
if known_devices is None:
known_devices = set()
hass.data[KNOWN_DEVICES_KEY] = known_devices
uuid = None
# Is this a manual configuration?
if config.get(CONF_HOST) is not None:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
mac = config.get(CONF_MAC)
broadcast = config.get(CONF_BROADCAST_ADDRESS)
timeout = config.get(CONF_TIMEOUT)
update_method = config.get(CONF_UPDATE_METHOD)
update_custom_ping_url = config.get(CONF_UPDATE_CUSTOM_PING_URL)
source_list = config.get(CONF_SOURCE_LIST)
app_list = config.get(CONF_APP_LIST)
channel_list = config.get(CONF_CHANNEL_LIST)
api_key = config.get(CONF_API_KEY)
device_id = config.get(CONF_DEVICE_ID)
show_channel_number = config.get(CONF_SHOW_CHANNEL_NR)
scan_app_http = config.get(CONF_SCAN_APP_HTTP)
is_frame_tv = config.get(CONF_IS_FRAME_TV)
show_logos = config.get(CONF_SHOW_LOGOS)
elif discovery_info is not None:
tv_name = discovery_info.get("name")
model = discovery_info.get("model_name")
host = discovery_info.get("host")
name = f"{tv_name} ({model})"
port = DEFAULT_PORT
timeout = DEFAULT_TIMEOUT
update_method = DEFAULT_UPDATE_METHOD
update_custom_ping_url = None
source_list = DEFAULT_SOURCE_LIST
app_list = None
mac = None
udn = discovery_info.get("udn")
if udn and udn.startswith("uuid:"):
uuid = udn[len("uuid:") :]
else:
_LOGGER.warning("Cannot determine device")
return
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr not in known_devices:
known_devices.add(ip_addr)
add_entities([SamsungTVDevice(host, port, name, timeout, mac, uuid, update_method, update_custom_ping_url, source_list, app_list, channel_list, api_key, device_id, show_channel_number, broadcast, scan_app_http, is_frame_tv, show_logos)])
_LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name)
else:
_LOGGER.info("Ignoring duplicate Samsung TV %s:%d", host, port)
| 5,650 |
def test_work_order_specialcharacter_data_single_index_indata(setup_config):
"""Testing work order request with all
special characters in index of indata """
# input file name
request = 'work_order_tests/input' \
'/work_order_specialcharacter_data_single_index_indata.json'
work_order_response, generic_params = (work_order_request_params
(setup_config, request))
err_cd, work_order_get_result_response = (work_order_get_result_params
(work_order_response[:6],
generic_params))
assert (verify_work_order_signature(work_order_get_result_response,
generic_params[0])
is TestStep.SUCCESS.value)
assert (decrypt_work_order_response(work_order_get_result_response,
work_order_response[3],
work_order_response[4])[0]
is TestStep.SUCCESS.value)
# WorkOrderGetResult API Response validation with key parameters
assert (validate_response_code(work_order_get_result_response) is
TestStep.SUCCESS.value)
| 5,651 |
def output_nums(
queue_double_nums: queue.Queue[int],
p_double_nums_done: threading.Event) -> None:
""" output nums """
one_last_time = False
while True:
try:
num = queue_double_nums.get(timeout=0.1)
print("output nums: " + str(num))
except queue.Empty:
if p_double_nums_done.is_set():
if one_last_time:
break
else:
one_last_time = True
continue
| 5,652 |
def list_commits(
access_key: str,
url: str,
owner: str,
dataset: str,
*,
revision: Optional[str] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
) -> Dict[str, Any]:
"""Execute the OpenAPI `GET /v2/datasets/{owner}/{dataset}/commits`.
Arguments:
access_key: User's access key.
url: The URL of the graviti website.
owner: The owner of the dataset.
dataset: Name of the dataset, unique for a user.
revision: The information to locate the specific commit, which can be the commit id,
the branch name, or the tag name.
offset: The offset of the page. The default value of this param in OpenAPIv2 is 0.
limit: The limit of the page. The default value of this param in OpenAPIv2 is 24.
Returns:
The response of OpenAPI.
Examples:
>>> list_commits(
... "ACCESSKEY-********",
... "https://api.graviti.com",
... "czhual",
... "MNIST",
... )
{
"commits": [
{
"commit_id": "85c57a7f03804ccc906632248dc8c359",
"parent_commitId": "784ba0d3bf0a41f6a7bfd771d8c00fcb",
"title": "upload data",
"description": "",
"committer": "Gravitier",
"committed_at": "2021-03-03T18:58:10Z"
}
],
"offset": 0,
"record_size": 1,
"total_count": 1
}
"""
url = f"{url}/v2/datasets/{owner}/{dataset}/commits"
params: Dict[str, Any] = {}
if offset is not None:
params["offset"] = offset
if limit is not None:
params["limit"] = limit
if revision is not None:
params["revision"] = revision
return open_api_do("GET", access_key, url, params=params).json()
| 5,653 |
def dijkstra(graph, start, end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Original by
David Eppstein, UC Irvine, 4 April 2002
http://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/
>>> G = DirectedGraph({'s':{'u':10, 'x':5}, 'u':{'v':1, 'x':2}, 'v':{'y':4}, 'x':{'u':3, 'v':9, 'y':2}, \
'y':{'s':7, 'v':6}})
>>> distances, predecessors = dijkstra(G, 's', 'v')
>>> sorted(distances.items())
[('s', 14), ('u', 8), ('v', 9), ('x', 5), ('y', 7)]
>>> sorted(predecessors.items())
[('s', 'y'), ('u', 'x'), ('v', 'u'), ('x', 's'), ('y', 'x')]
"""
import heapq
distances = {} # dictionary of final distances
predecessors = {} # dictionary of predecessors (previous node)
queue = [] # queue
heapq.heappush(queue, (0, start))
while len(queue) > 0:
distance, node = heapq.heappop(queue)
if node in distances and distance > distances[node]:
continue
if node == end:
break
# Loop through neighbours
edges = graph.edges(node, distance=distance)
for neighbour, length in edges.items():
total = distance + length
if neighbour in distances:
if total >= distances[neighbour]:
continue
distances[neighbour] = total
predecessors[neighbour] = node
heapq.heappush(queue, (total, neighbour))
return distances, predecessors
| 5,654 |
def get_owner_from_path(path):
"""Get the username of the owner of the given file"""
if "pwd" in sys.modules:
# On unix
return pwd.getpwuid(os.stat(path).st_uid).pw_name
# On Windows
f = win32security.GetFileSecurity(path, win32security.OWNER_SECURITY_INFORMATION)
username, _, _ = win32security.LookupAccountSid(
None, f.GetSecurityDescriptorOwner()
)
return username
| 5,655 |
def tournament_selection(pop, size):
""" tournament selection
individual eliminate one another until desired breeding size is reached
"""
participants = [ind for ind in pop.population]
breeding = []
# could implement different rounds here
# but I think that's almost the same as calling tournament different times with smaller sizes
for i in range(size):
a, b = rng.choice(participants, 2)
if a > b:
breeding.append(a)
participants.remove(a)
else:
breeding.append(b)
participants.remove(b)
return breeding
| 5,656 |
def save_state_temp(state_temp_df, output_data):
"""
Save United States' state temperature dimension table dataframe to parquet files in S3.
Arguments:
state_temp_df - State temperature dataframe
output_data - Location of parquet files output data
Returns:
None
"""
# write state tempearature data to parquet files partitioned by state and city in S3
state_temp_df.write.partitionBy("partition_state").mode('overwrite').parquet(output_data + "state_temperature/states.parquet")
| 5,657 |
def bond_number(r_max, sigma, rho_l, g):
""" calculates the Bond number for the largest droplet according to
Cha, H.; Vahabi, H.; Wu, A.; Chavan, S.; Kim, M.-K.; Sett, S.; Bosch, S. A.; Wang, W.; Kota, A. K.; Miljkovic, N.
Dropwise Condensation on Solid Hydrophilic Surfaces. Science Advances 2020, 6 (2), eaax0746.
https://doi.org/10.1126/sciadv.aax0746"""
l_y = math.sqrt(sigma / (rho_l*g))
bond = r_max**2 / l_y**2
return bond
| 5,658 |
def test_parent_table_type(input_field, table_type):
"""Test ``parent_table_type`` property."""
assert input_field.parent_table_type == table_type
| 5,659 |
def validate_singularity(descript_dict, sub_params, params, name):
"""If Singularity is enabled in a group, there should be at least one user wrapper for that group
@param descript_dict: dictionaries with user files
@param sub_params: attributes in the group section of the XML file
@param params: attributes in the general section of the XML file
@param name: group name
@return:
"""
glidein_singularity_use = ""
if "GLIDEIN_Singularity_Use" in sub_params.attrs:
glidein_singularity_use = sub_params.attrs["GLIDEIN_Singularity_Use"]["value"]
elif "GLIDEIN_Singularity_Use" in params.attrs:
glidein_singularity_use = params.attrs["GLIDEIN_Singularity_Use"]["value"]
if glidein_singularity_use in ["OPTIONAL", "PREFERRED", "REQUIRED", "REQUIRED_GWMS"]:
# Using Singularity, check that there is a wrapper
if not has_file_wrapper(descript_dict): # Checks within the group files
if not has_file_wrapper_params(
params.files
): # Check global files using the params (main file dict is not accessible)
raise RuntimeError(
"Error: group %s allows Singularity (%s) but has no wrapper file in the files list"
% (name, glidein_singularity_use)
)
| 5,660 |
def spatial_conv(inputs,
conv_type,
kernel,
filters,
stride,
is_training,
activation_fn='relu',
data_format='channels_last'):
"""Performs 1x1 conv followed by 2d or depthwise conv.
Args:
inputs: `Tensor` of size `[batch*time, height, width, channels]`. Only
supports 'channels_last' as the data format.
conv_type: 'string' of "std", "depth", "maxpool", or "avgpool" this selects
the spatial conv/pooling method.
kernel: `int` kernel size to be used for `conv2d` or max_pool2d` operations.
Should be a positive integer.
filters: `int` number of filters in the convolution.
stride: 'int' temporal stride
is_training: 'bool' specifying whether in training mode or not.
activation_fn: 'string' the activation function to use (relu or swish)
data_format: `str`. Only supports 'channels_last' as the data format.
Returns:
A `Tensor` of the same data_format
"""
if kernel == 1:
return inputs
use_relu = (activation_fn == 'relu')
if conv_type == 'std' or conv_type == 'depth':
inputs = conv2d(inputs, 1, filters, 1, is_training, use_relu=use_relu)
if not use_relu:
inputs = hard_swish(inputs)
if conv_type == 'std' or conv_type == '1std':
inputs = conv2d(inputs, int(kernel), filters, int(stride), is_training,
use_relu=use_relu)
if not use_relu:
inputs = hard_swish(inputs)
elif conv_type == 'depth':
depth_multiplier = 1
depthwise_kernel_shape = (int(kernel), int(kernel), inputs.shape[-1],
depth_multiplier)
depthwise_kernel = contrib_framework.model_variable(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
dtype=tf.float32,
initializer=contrib_layers.variance_scaling_initializer(
factor=2.0, mode='FAN_IN', uniform=False),
trainable=True)
inputs = tf.nn.depthwise_conv2d(
inputs,
tf.cast(depthwise_kernel, inputs.dtype),
strides=[1, int(stride), int(stride), 1],
padding='SAME',
rate=[1, 1],
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
inputs = bn.batch_norm_relu(
inputs,
is_training,
relu=use_relu,
data_format=data_format)
if not use_relu:
inputs = hard_swish(inputs)
elif conv_type == 'maxpool':
inputs = tf.layers.max_pooling2d(
inputs,
int(kernel),
int(stride),
padding='same',
data_format=data_format)
elif conv_type == 'avgpool':
inputs = tf.layers.average_pooling2d(
inputs,
int(kernel),
int(stride),
padding='same',
data_format=data_format)
return inputs
| 5,661 |
async def timeron(websocket, battleID):
"""Start the timer on a Metronome Battle.
"""
return await websocket.send(f'{battleID}|/timer on')
| 5,662 |
def to_unit_vector(this_vector):
""" Convert a numpy vector to a unit vector
Arguments:
this_vector: a (3,) numpy array
Returns:
new_vector: a (3,) array with the same direction but unit length
"""
norm = numpy.linalg.norm(this_vector)
assert norm > 0.0, "vector norm must be greater than 0"
if norm:
return this_vector/numpy.linalg.norm(this_vector)
else:
return this_vector
| 5,663 |
def simulate_SA_faults(model, path, N, criterion):
""" Load the model"""
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state_dict'])
print('Best acc ', checkpoint['test_accuracy']) # assumes model saves checkpoint accuracy
print("Evaluating Stuck-At Faults:---------------------------")
test_runs, best_acc = N, 0 # define test runs
fi = StuckAtFaults() # define fault injection class
start_percent = 5
end_percent = 20
for percent in range(start_percent, end_percent, 5):
print('Percentage of SA both faults: ', percent, '\n')
test_accs = []
for test_run in range(test_runs):
model = fi.FI_SA_Both(percent, model, first=True)
bn_stat_calibrate(model)
test_acc = test(model, criterion)
test_accs.append(test_acc)
if best_acc < test_acc:
print('---------------------- best acc updated--------------------')
best_acc = test_acc
print("Epochs {} Test accuracy: {:.4f} Best accuracy: {:.4f} -----------".format(test_run, test_acc,
best_acc))
model.load_state_dict(checkpoint['model_state_dict'])
mean_acc = np.round(np.mean(test_accs), 3)
std_acc = np.round(np.std(test_accs), 3)
print('percent ', percent, ' mean acc ', mean_acc, ' deviation ', std_acc)
| 5,664 |
def decode(msg):
""" Convert data per pubsub protocol / data format
Args:
msg: The msg from Google Cloud
Returns:
data: The msg data as a string
"""
if 'data' in msg:
data = base64.b64decode(msg['data']).decode('utf-8')
return data
| 5,665 |
def pivot_longer_by_humidity_and_temperature(df: pd.DataFrame) -> pd.DataFrame:
"""
Reshapes the dataframe by collapsing all of the temperature and humidity
columns into an temperature, humidity, and location column
Parameters
----------
df : pd.DataFrame
The cleaned and renamed dataframe from add_date_features().
Returns
-------
pd.DataFrame
A much longer dataframe with an exposed location column
to perform operations on.
"""
# Need to melt both variables individually, which creates
# a ton of meaningless rows in the second melt.
temporary_df = df.melt(
id_vars=[colname for colname in df.columns if "temp" not in colname],
var_name="temperature_location",
value_name="temperature",
ignore_index=False,
)
temporary_df = temporary_df.melt(
id_vars=[
colname for colname in temporary_df.columns if "humidity" not in colname
],
var_name="humidity_location",
value_name="humidity",
ignore_index=False,
)
temporary_df["temperature_location"] = temporary_df[
"temperature_location"
].str.replace("temperature_", "")
temporary_df["humidity_location"] = temporary_df["humidity_location"].str.replace(
"humidity_", ""
)
# We know all measurements come from slices of time that contain a measurement of both humidity
# and temperature from one location, so if we combine the location columns we can drop
# the extra rows created during the second melt.
df = temporary_df[
temporary_df["temperature_location"] == temporary_df["humidity_location"]
]
df = df.drop(columns=["humidity_location"]).rename(
columns={"temperature_location": "measurement_location"}
)
return df
| 5,666 |
def add_counter_text(img, box_shape, people_in):
"""
Add person counter text on the image
Args:
img (np.array): Image
box_shape (tuple): (width, height) of the counter box
people_in (int): Number representing the amount of
people inside the space
Returns:
(np.array): Updated image
"""
box_width, box_height = box_shape
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
# set in/capacity numbers
text_in = "{}".format(people_in)
text_cap = "{}".format(CAPACITY)
# import constants for re-use
TEXT_COUNTER_UP = TEXT_CONF["TEXT_COUNTER_UP"]
TEXT_COUNTER_DOWN = TEXT_CONF["TEXT_COUNTER_DOWN"]
# get shapes for parts of text
w_up, h_up = draw.textsize(TEXT_COUNTER_UP, stroke_width=1, font=FONT_SMALL)
w_down, h_down = draw.textsize(TEXT_COUNTER_DOWN, stroke_width=1, font=FONT_SMALL)
w_in, h_in = draw.textsize(text_in, stroke_width=1, font=FONT_SMALL)
w_cap, h_cap = draw.textsize(text_cap, stroke_width=1, font=FONT_SMALL)
w_slash, h_slash = draw.textsize(" / ", stroke_width=1, font=FONT_SMALL)
# calculate coordinates for each part of the text
textX_up = int((box_width - w_up) / 2)
textY_up = int(0.05 * box_height)
textX_down = int((box_width - w_down) / 2)
textY_down = int(0.1 * box_height + h_up)
textX_in = int((box_width - w_slash) / 2 - w_in)
textY_stat = int(0.2 * box_height + h_down + h_up)
textX_slash = int((box_width - w_slash) / 2)
textX_cap = int((box_width + w_slash) / 2)
# add text on image
draw.text(
(textX_up, textY_up),
TEXT_COUNTER_UP,
font=FONT_SMALL,
fill=WHITE,
stroke_width=1,
)
draw.text(
(textX_down, textY_down),
TEXT_COUNTER_DOWN,
font=FONT_SMALL,
fill=WHITE,
stroke_width=1,
)
draw.text(
(textX_in, textY_stat),
text_in,
font=FONT_SMALL,
fill=(0, 255, 0),
stroke_width=1,
)
draw.text(
(textX_slash, textY_stat), " / ", font=FONT_SMALL, fill=WHITE, stroke_width=1
)
draw.text(
(textX_cap, textY_stat), text_cap, font=FONT_SMALL, fill=WHITE, stroke_width=1
)
img = np.array(img_pil, dtype="uint8")
return img
| 5,667 |
def create_override(override):
"""Takes override arguments as dictionary and applies them to copy of current context"""
override_context = bpy.context.copy()
for key, value in override.items():
override_context[key] = value
return override_context
| 5,668 |
def get_result_file(request):
"""Return the content of the transformed code.
"""
resdir = get_resultdir(request)
workdir = os.path.basename(request.session['workdir']) # sanitized
name = os.path.basename(request.matchdict.get('name', 'result-%s.txt' % workdir))
ext = os.path.splitext(name)[1]
path = os.path.join(resdir, name)
request.response.headers['Content-type'] = mimetypes.types_map.get(ext, 'text/plain;charset=utf-8')
if ext == '.txt': # Open text file as an attachment
request.response.headers['Content-disposition'] = str('attachment; filename=%s' % name)
return file(path).read()
| 5,669 |
def load_or_make_json(file, *, default=None):
"""Loads a JSON file, or makes it if it does not exist."""
if default is None:
default = {}
return __load_or_make(file, default, json.load, json.dump)
| 5,670 |
def plot_predictions(image, df, color=None, thickness=1):
"""Plot a set of boxes on an image
By default this function does not show, but only plots an axis
Label column must be numeric!
Image must be BGR color order!
Args:
image: a numpy array in *BGR* color order! Channel order is channels first
df: a pandas dataframe with xmin, xmax, ymin, ymax and label column
color: color of the bounding box as a tuple of BGR color, e.g. orange annotations is (0, 165, 255)
thickness: thickness of the rectangle border line in px
Returns:
image: a numpy array with drawn annotations
"""
if image.shape[0] == 3:
raise ValueError("Input images must be channels last format [h, w, 3] not channels first [3, h, w], use np.rollaxis(image, 0, 3) to invert")
if image.dtype == "float32":
image = image.astype("uint8")
image = image.copy()
if not color:
if not ptypes.is_numeric_dtype(df.label):
warnings.warn("No color was provided and the label column is not numeric. Using a single default color.")
color=(0,165,255)
for index, row in df.iterrows():
if not color:
color = label_to_color(row["label"])
cv2.rectangle(image, (int(row["xmin"]), int(row["ymin"])), (int(row["xmax"]), int(row["ymax"])), color=color, thickness=thickness, lineType=cv2.LINE_AA)
return image
| 5,671 |
def apply_changes(new_orders):
"""
Updates API items based on the parameter.
:param new_orders: dictionary mapping item IDs to ordering numbers.
"""
__api.items.reorder(items=[{'id': id, 'child_order': order} for id, order in new_orders.items()])
| 5,672 |
def stationary_points(f, symbol, domain=S.Reals):
"""
Returns the stationary points of a function (where derivative of the
function is 0) in the given domain.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for which the stationary points are to be determined.
domain : Interval
The domain over which the stationary points have to be checked.
If unspecified, S.Reals will be the default domain.
Examples
========
>>> from sympy import Symbol, S, sin, log, pi, pprint, stationary_points
>>> from sympy.sets import Interval
>>> x = Symbol('x')
>>> stationary_points(1/x, x, S.Reals)
EmptySet()
>>> pprint(stationary_points(sin(x), x), use_unicode=False)
pi 3*pi
{2*n*pi + -- | n in Integers} U {2*n*pi + ---- | n in Integers}
2 2
>>> stationary_points(sin(x),x, Interval(0, 4*pi))
{pi/2, 3*pi/2, 5*pi/2, 7*pi/2}
"""
from sympy import solveset, diff
if isinstance(domain, EmptySet):
return S.EmptySet
domain = continuous_domain(f, symbol, domain)
set = solveset(diff(f, symbol), symbol, domain)
return set
| 5,673 |
def _create_table(data_list, headers):
""" Create a table for given data list and headers.
Args:
data_list(list): list of dicts, which keys have to cover headers
headers(list): list of headers for the table
Returns:
new_table(tabulate): created table, ready to print
"""
list_table = list()
for row in data_list:
row_data = list()
for header in headers:
if header.lower() in row:
row_data.append(row[header.lower()])
else:
row_data.append(None)
list_table.append(row_data)
new_table = tabulate(list_table, headers=headers)
return new_table
| 5,674 |
def test_get_dns_name_and_fall_back_ip_address_cannot_be_resolved(mocker, capsys):
"""
When the fallback to mount target ip address is enabled, the mount target ip address is retrieved but cannot be connected
"""
config = _get_mock_config()
mount_efs.BOTOCORE_PRESENT = True
check_fallback_enabled_mock = mocker.patch(
"mount_efs.check_if_fall_back_to_mount_target_ip_address_is_enabled",
return_value=True,
)
get_fallback_mount_target_ip_mock = mocker.patch(
"mount_efs.get_fallback_mount_target_ip_address_helper",
return_value=FALLBACK_IP_ADDRESS,
)
check_ip_resolve_mock = mocker.patch(
"mount_efs.mount_target_ip_address_can_be_resolved",
side_effect=[mount_efs.FallbackException("timeout")],
)
with pytest.raises(SystemExit) as ex:
mount_efs.get_fallback_mount_target_ip_address(
config, FS_ID, DEFAULT_NFS_OPTIONS, DNS_NAME
)
assert 0 != ex.value.code
out, err = capsys.readouterr()
assert "Failed to resolve" in err
assert "cannot be found" in err
utils.assert_called(check_fallback_enabled_mock)
utils.assert_called(get_fallback_mount_target_ip_mock)
utils.assert_called(check_ip_resolve_mock)
| 5,675 |
def set_text(view, text, scroll=False):
"""Replaces the entire content of view with the text specified.
`scroll` parameter specifies whether the view should be scrolled to the end.
"""
with Edit(view) as edit:
edit.erase(Region(0, view.size()))
edit.insert(0, text)
if scroll:
view.show(view.size())
else:
view.sel().clear()
view.sel().add(Region(0, 0))
| 5,676 |
def vagrant_upload(args):
"""Replaces an input file in the VM.
"""
target = Path(args.target[0])
files = args.file
unpacked_info = read_dict(target)
input_files = unpacked_info.setdefault('input_files', {})
use_chroot = unpacked_info['use_chroot']
try:
SSHUploader(target, input_files, files, use_chroot)
finally:
write_dict(target, unpacked_info)
| 5,677 |
def _validate_attribute_id(this_attributes, this_id, xml_ids, enforce_consistency, name):
""" Validate attribute id.
"""
# the given id is None and we don't have setup attributes
# -> increase current max id for the attribute by 1
if this_id is None and this_attributes is None:
this_id = max(xml_ids) + 1
# the given id is None and we do have setup attributes
# set id to the id present in the setup
elif this_id is None and this_attributes is not None:
this_id = this_attributes[name]
# the given id is not None and we do have setup attributes
# -> check that the ids match (unless we are in over-write mode)
elif this_id is not None and this_attributes is not None:
if (this_id != this_attributes[name]) and enforce_consistency:
raise ValueError("Expect id %i for attribute %s, got %i" % (this_attributes[name],
name,
this_id))
return this_id
| 5,678 |
def Arrows2D(startPoints, endPoints=None,
shaftLength=0.8,
shaftWidth=0.09,
headLength=None,
headWidth=0.2,
fill=True,
c=None,
cmap=None,
alpha=1):
"""
Build 2D arrows between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
Color can be specified as a colormap which maps the size of the arrows.
:param float shaftLength: fractional shaft length
:param float shaftWidth: fractional shaft width
:param float headLength: fractional head length
:param float headWidth: fractional head width
:param bool fill: if False only generate the outline
:param c: color
:param float alpha: set transparency
:Example:
.. code-block:: python
from vedo import Grid, Arrows2D
g1 = Grid(sx=1, sy=1)
g2 = Grid(sx=1.2, sy=1.2).rotateZ(4)
arrs2d = Arrows2D(g1, g2, c='jet')
arrs2d.show(axes=1, bg='white')
|quiver|
"""
if isinstance(startPoints, Points): startPoints = startPoints.points()
if isinstance(endPoints, Points): endPoints = endPoints.points()
startPoints = np.array(startPoints)
if endPoints is None:
strt = startPoints[:,0]
endPoints = startPoints[:,1]
startPoints = strt
else:
endPoints = np.array(endPoints)
if headLength is None:
headLength = 1 - shaftLength
arr = Arrow2D((0,0,0), (1,0,0),
shaftLength, shaftWidth,
headLength, headWidth, fill)
orients = endPoints - startPoints
if orients.shape[1] == 2: # make it 3d
orients = np.c_[np.array(orients), np.zeros(len(orients))]
pts = Points(startPoints)
arrg = Glyph(pts,
arr.polydata(False),
orientationArray=orients,
scaleByVectorSize=True,
c=c, alpha=alpha).flat().lighting('off')
if c is not None:
arrg.color(c)
arrg.name = "Arrows2D"
return arrg
| 5,679 |
def filter_characters(results: list) -> str:
"""Filters unwanted and duplicate characters.
Args:
results: List of top 1 results from inference.
Returns:
Final output string to present to user.
"""
text = ""
for i in range(len(results)):
if results[i] == "$":
continue
elif i + 1 < len(results) and results[i] == results[i + 1]:
continue
else:
text += results[i]
return text
| 5,680 |
def add_label_noise(y: torch.Tensor, p_flip: float):
"""Flips binary labels with some probability `p_flip`."""
n_select = int(p_flip * y.shape[0])
flip_ix = choice([i for i in range(y.shape[0])], size=n_select)
y[flip_ix] = 1 - y[flip_ix]
| 5,681 |
def seq_alignment_files(file1, file2, outputfile=""):
"""This command takes 2 fasta files as input, each file contains a single sequence. It reads the 2 sequences from
files and get all their alignments along with the score. The -o is an optional parameter if we need the output to
be written on a file instead of the screen. """
try:
seq1 = SeqIO.read(file1, 'fasta')
seq2 = SeqIO.read(file2, 'fasta')
except OSError as Error:
print(Error)
return 'Please Enter a valid File name'
alignments = pairwise2.align.globalxx(seq1, seq2) # global alignment
if outputfile == '':
for alignment in alignments:
print(alignment)
print(format_alignment(*alignment))
else:
output_alignment(alignments, outputfile)
print('Alignmnet Done to File ', outputfile)
| 5,682 |
def test_present_set_target():
"""
test alias.present set target
"""
name = "saltdude"
target = "dude@saltstack.com"
ret = {
"comment": "Set email alias {} -> {}".format(name, target),
"changes": {"alias": name},
"name": name,
"result": True,
}
has_target = MagicMock(return_value=False)
set_target = MagicMock(return_value=True)
with patch.dict(alias.__salt__, {"aliases.has_target": has_target}):
with patch.dict(alias.__opts__, {"test": False}):
with patch.dict(alias.__salt__, {"aliases.set_target": set_target}):
assert alias.present(name, target) == ret
| 5,683 |
def main(Block: type[_Block], n: int, difficulty: int) -> list[tuple[float, int]]:
"""Test can hash a block"""
times_and_tries = []
for i in range(n):
block = Block(rand_block_hash(), [t], difficulty=difficulty)
# print(f"starting {i}... ", end="", flush=True)
with time_it() as timer:
block.hash()
# print(f"took {timer.interval:.3g} seconds and {block.nonce+1} tries")
times_and_tries.append((timer.interval, block.nonce + 1))
return times_and_tries
| 5,684 |
def test_model_id(model_id):
"""
Check model has same model_id as referred
"""
model = get_model(model_id)
assert model
real_model_id = get_model_id(model)
assert real_model_id == model_id
| 5,685 |
def main():
"""Main processing."""
usage = '%prog [options]'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-m', '--minimum-age', type='int',
dest='minimum_age', default=MINIMUM_AGE_TO_LOG_IN_SECONDS,
help='Minimum age of tentative log to process.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, unused_args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
LOG.debug('Starting')
logs = db(db.tentative_activity_log).select(
db.tentative_activity_log.book_id,
groupby=db.tentative_activity_log.book_id,
)
for log in logs:
LOG.debug('Checking book id: %s', log.book_id)
filters = {'book_id': log.book_id}
tentative_log_set = TentativeLogSet.load(filters=filters)
youngest_log = tentative_log_set.youngest()
age = youngest_log.age()
if age.total_seconds() < options.minimum_age:
LOG.debug(
'Tentative log records too young, book_id: %s', log.book_id)
continue
LOG.debug('Logging book id: %s', log.book_id)
log_set_classes = [
PageAddedTentativeLogSet,
CompletedTentativeLogSet,
]
for log_set_class in log_set_classes:
log_set = log_set_class.load(filters=filters)
activity_log_data = log_set.as_activity_log()
if activity_log_data:
activity_log = ActivityLog.from_add(activity_log_data)
LOG.debug(
'Created activity_log action: %s',
activity_log.action
)
for tentative_activity_log in tentative_log_set.tentative_records:
tentative_activity_log.delete()
LOG.debug('Done')
| 5,686 |
def is_castable_to_float(value: Union[SupportsFloat, str, bytes, bytearray]) -> bool:
"""
prüft ob das objekt in float umgewandelt werden kann
Argumente : o_object : der Wert der zu prüfen ist
Returns : True|False
Exceptions : keine
>>> is_castable_to_float(1)
True
>>> is_castable_to_float('1')
True
>>> is_castable_to_float('1.0')
True
>>> is_castable_to_float('1,0')
False
>>> is_castable_to_float('True')
False
>>> is_castable_to_float(True)
True
>>> is_castable_to_float('')
False
>>> is_castable_to_float(None) # noqa
False
"""
try:
float(value)
return True
except (ValueError, TypeError):
return False
| 5,687 |
def list_unique(hasDupes):
"""Return the sorted unique values from a list"""
# order preserving
from operator import itemgetter
d = dict((x, i) for i, x in enumerate(hasDupes))
return [k for k, _ in sorted(d.items(), key=itemgetter(1))]
| 5,688 |
def filter_by_networks(object_list, networks):
"""Returns a copy of object_list with all objects that are not in the
network removed.
Parameters
----------
object_list: list
List of datamodel objects.
networks: string or list
Network or list of networks to check for.
Returns
-------
filtered
List of filtered datamodel objects.
"""
filtered = [obj for obj in object_list if check_network(networks, obj)]
return filtered
| 5,689 |
def get_all_data(year=year, expiry=1, fielding=False, chadwick=False):
"""Grab all data and write core files."""
"""Options for fielding data and bio data for rookies/master."""
name_url_pairs = url_maker(year=year, fielding=fielding)
# if debugging warn about the webviews
if module_log.isEnabledFor(logging.DEBUG):
print "ALERT: Spynner windows should open."
print "ALERT: This throws more AttributeError(s)."
print "ALERT: No need to worry. They're uncaught but it all works."
# loop over tuples and get_dats
for pair in name_url_pairs:
get_data(pair[1], pair[0], year)
# either do chadwick or not
if chadwick is True:
get_biographical()
# Check if data is there, new and in range of len
past_due, exists = check_files(year, expiry, fielding=fielding) # , chadwick=chadwick)
if past_due is False and exists is True:
module_log.info("Files now up to date.")
return past_due, exists
| 5,690 |
def score_per_year_by_country(country):
"""Returns the Global Terrorism Index (GTI) per year of the given country."""
cur = get_db().execute('''SELECT iyear, (
1*COUNT(*)
+ 3*SUM(nkill)
+ 0.5*SUM(nwound)
+ 2*SUM(case propextent when 1.0 then 1 else 0 end)
+ 2*SUM(case propextent when 2.0 then 1 else 0 end)
+ 2*SUM(case propextent when 3.0 then 1 else 0 end)
+ 2*SUM(case propextent when 4.0 then 1 else 0 end)) FROM Attacks WHERE iso_code="{}" GROUP BY iyear''' .format(country))
score = cur.fetchall()
cur.close()
return jsonify(score)
| 5,691 |
def line_2d_to_3d(line, zs=0, zdir='z'):
"""Convert a 2D line to 3D."""
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
| 5,692 |
def build(options, is_training):
"""Builds a model based on the options.
Args:
options: A model_pb2.Model instance.
Returns:
A model instance.
Raises:
ValueError: If the model proto is invalid or cannot find a registered entry.
"""
if not isinstance(options, model_pb2.Model):
raise ValueError('The options has to be an instance of model_pb2.Model.')
for extension, model_proto in options.ListFields():
if extension in MODELS:
return MODELS[extension](model_proto, is_training)
raise ValueError('Invalid model config!')
| 5,693 |
def string_to_rdkit(frmt: str, string: str, **kwargs) -> RDKitMol:
"""
Convert string representation of molecule to RDKitMol.
Args:
frmt: Format of string.
string: String representation of molecule.
**kwargs: Other keyword arguments for conversion function.
Returns:
RDKitMol corresponding to string representation.
"""
try:
converter = RDKIT_STRING_TO_MOL_CONVERTERS[frmt.lower()]
except KeyError:
raise ValueError(f'{frmt} is not a recognized RDKit format')
else:
remove_hs = kwargs.pop('removeHs', False) # Don't remove hydrogens by default
rdkit_mol = converter(string, removeHs=remove_hs, **kwargs)
return RDKitMol(rdkit_mol)
| 5,694 |
def test_compare_ccm_block_toeplitz_pi_grads(lorenz_dataset):
"""Test whether calculating the grad of PI from cross cov mats with and without
the block-Toeplitz algorithm gives the same value."""
_, d, _, ccms, _ = lorenz_dataset
assert_allclose(calc_pi_from_cross_cov_mats(ccms),
calc_pi_from_cross_cov_mats_block_toeplitz(ccms))
tccms = torch.tensor(ccms)
rng = np.random.RandomState(202001291)
proj = rng.randn(d, 3)
tproj = torch.tensor(proj, requires_grad=True)
PI = calc_pi_from_cross_cov_mats(tccms, tproj)
PI.backward()
grad = tproj.grad
tproj = torch.tensor(proj, requires_grad=True)
PI_BT = calc_pi_from_cross_cov_mats_block_toeplitz(tccms, tproj)
PI_BT.backward()
grad_BT = tproj.grad
assert torch.allclose(grad, grad_BT)
| 5,695 |
def scatterplot():
"""
Graphs a scatterplot of computed values at every r value in range [2.9, 4] with an iteration of 0.001
Sets r value at 2.9
"""
xs = []
ys = []
r = 2.9
for r in np.arange(2.9, 4, 0.001):
temp_mystery = MysterySequence(r, 0.5, 300)
temp_ys = temp_mystery()
del temp_ys[0 : 151]
xs = xs + [r for i in range(1,151)]
ys = ys + temp_ys
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(xs, ys, marker='.', color='r')
ax.set(ylim = (0,1), xlim = (2.9,4), xlabel = 'Iteration R', ylabel = 'Asymptotic Value', title = "Sequence Parameters: x0={0.5}, r={[2.9,4]}, N={300}")
| 5,696 |
def eh_bcm_debug_show(dut, af='both', table_type='all', ifname_type=None):
"""
Error handling debug API
Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param af:
:param table_type:
:return:
"""
st.banner("Error handling DEBUG Calls - START")
if af == 'ipv4' or af == 'both':
if table_type == 'route' or table_type == 'all':
asicapi.bcmcmd_l3_defip_show(dut)
if table_type == 'nbr' or table_type == 'all':
asicapi.bcmcmd_l3_l3table_show(dut)
if af == 'ipv6' or af == 'both':
if table_type == 'route' or table_type == 'all':
asicapi.bcmcmd_l3_ip6route_show(dut)
if table_type == 'nbr' or table_type == 'all':
asicapi.bcmcmd_l3_ip6host_show(dut)
if table_type == 'all':
verify_show_error_db(dut, ifname_type=ifname_type)
st.banner("Error handling DEBUG Calls - END")
| 5,697 |
def clean_datetime_remove_ms(atime):
"""
将时间对象的 毫秒 全部清零
:param atime:
:return:
"""
return datetime(atime.year, atime.month, atime.day, atime.hour, atime.minute, atime.second)
| 5,698 |
def perDay(modified):
"""Auxiliary in provenance filtering: chunk the trails into daily bits."""
chunks = {}
for m in modified:
chunks.setdefault(dt.date(m[1]), []).append(m)
return [chunks[date] for date in sorted(chunks)]
| 5,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.