content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_signal_extract_clip_longer_sowhat(audio_file, tmpdir):
"""Expects a soundfile greater than 3.0 seconds in duration"""
start_time = 1.0
exp_duration = float(claudio.sox.soxi(audio_file, 'D')) - start_time
assert exp_duration > 0.0
output_file = os.path.join(
str(tmpdir), "test_signal_extract_clip_longer_sowhat_output.wav")
assert minst.signal.extract_clip(audio_file, output_file, start_time,
start_time + 9.0, None)
obs_duration = float(claudio.sox.soxi(output_file, 'D'))
assert np.abs(obs_duration - exp_duration) < TOLERANCE
| 4,800 |
def conv_block(data, name, channels,
kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),
epsilon=1e-5):
"""Helper function to construct conv-bn-relu"""
# convolution + bn + relu
conv = sym.conv2d(data=data, channels=channels,
kernel_size=kernel_size, strides=strides,
padding=padding, use_bias=False,
layout="NCHW", name=name + "_conv")
bn = sym.batch_norm(data=conv, epsilon=epsilon, name=name + "_bn")
act = sym.relu(data=bn, name=name + "_relu")
return act
| 4,801 |
def var2fa(stream, gzipped=False):
"""convert variant calling's .var file to fasta"""
for line in stream:
if gzipped: line = line.decode()
if line[0]!='V': continue
line = line.strip().split('\t')
_1, chrom, start, end, _2, _3, ref, alt, queryname, q_start, q_end, strand = line
if abs(len(ref)-len(alt))<50: continue # not long enough
if len(ref)>len(alt):
newname = 'DEL_'+'_'.join([queryname, chrom+strand, start+'-'+end, q_start+'-'+q_end])
seq = ref.upper()
else:
newname = 'INS_'+'_'.join([queryname, chrom+strand, start+'-'+end, q_start+'-'+q_end])
seq = alt.upper()
sys.stdout.write('>{0}\n{1}\n'.format(newname, seq))
return 0
| 4,802 |
def utzappos_tensor_dset(img_size, observed, binarized, drop_infreq,
cache_fn, *dset_args, transform=None, **dset_kwargs):
"""
Convert folder dataset to tensor dataset.
"""
cache_fn = UTZapposIDImageFolder.get_cache_name(cache_fn, img_size, observed, binarized, drop_infreq)
try:
with open(cache_fn, 'rb') as f:
dset_samples, dset_labels, dset_label_info = pickle.load(f)
except FileNotFoundError:
img_transform = torchvision.transforms.Compose([torchvision.transforms.Resize((img_size, img_size)),
torchvision.transforms.ToTensor()])
dset = UTZapposIDImageFolder(*dset_args, img_size=img_size, transform=img_transform,
observed=observed, binarized=binarized, drop_infreq=drop_infreq,
**dset_kwargs)
dset_examples = [dset[ind] for ind in range(len(dset))]
dset_samples, dset_labels = map(torch.stack, zip(*dset_examples))
# find_duplicates_in_dsets((dset_samples, dset_labels), (dset_samples, dset_labels),
# tuple_format=True, itself=True)
dset_label_info = dset._label_info
with open(cache_fn, 'wb') as handle:
pickle.dump((dset_samples, dset_labels, dset_label_info), handle, protocol=4)
return CustomTensorDataset(dset_samples, dset_labels, transform=transform), dset_label_info, cache_fn
| 4,803 |
def test_rdn_scenario1(rambank, chip, register, char):
"""Test instruction RDn"""
from random import randint
chip_test = Processor()
chip_base = Processor()
value = randint(0, 15)
address = encode_command_register(chip, register, 0,
'DATA_RAM_STATUS_CHAR')
chip_test.CURRENT_RAM_BANK = rambank
chip_test.COMMAND_REGISTER = address
chip_test.STATUS_CHARACTERS[rambank][chip][register][char] = value
# Perform the instruction under test:
if char == 0:
Processor.rd0(chip_test)
if char == 1:
Processor.rd1(chip_test)
if char == 2:
Processor.rd2(chip_test)
if char == 3:
Processor.rd3(chip_test)
# Simulate conditions at end of instruction in base chip
chip_base.COMMAND_REGISTER = address
chip_base.CURRENT_RAM_BANK = rambank
chip_base.increment_pc(1)
chip_base.set_accumulator(value)
chip_base.STATUS_CHARACTERS[rambank][chip][register][char] = value
# Make assertions that the base chip is now at the same state as
# the test chip which has been operated on by the instruction under test.
assert chip_test.read_program_counter() == chip_base.read_program_counter()
assert chip_test.read_accumulator() == chip_base.read_accumulator()
# Pickling each chip and comparing will show equality or not.
assert pickle.dumps(chip_test) == pickle.dumps(chip_base)
| 4,804 |
def compare_versions(aStr, bStr):
"""
Assumes Debian version format:
[epoch:]upstream_version[-debian_revision]
Returns:
-1 : a < b
0 : a == b
1 : a > b
"""
# Compare using the version class
return cmp(Version(aStr), Version(bStr))
| 4,805 |
def repdirstruc(src, dst, credentials):
"""Replicate directory structure onto GCS bucket.
SRC is path to a local directory. Directories within will be replicated.
DST is gs://[bucket] and optional path to replicate SRC into.
If --credentials or -c is not explicitly given, checks the
GOOGLE_APPLICATION_CREDENTIALS environment variable for path to a GCS
credentials file, or default service accounts for authentication. See
https://googleapis.dev/python/google-api-core/latest/auth.html for more
details.
"""
client = authenticated_client(credentials)
replicate_directory_structure_on_gcs(src, dst, client)
| 4,806 |
def alpha_034(code, end_date=None, fq="pre"):
"""
公式:
MEAN(CLOSE,12)/CLOSE
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals())
| 4,807 |
def create_mean_median_plot(data, filename):
"""Create a plot of the mean and median of the features
Args:
data: pandas.DataFrame
Description of the features. Result of running the
pandas.DataFrame.describe method on the features
filename: str
Path to which to save the figure. The file format is inferred
from the extension.
Returns:
None
"""
cols = ['mean', '50%']
to_plot = data.transpose()[cols]
to_plot.columns = ['mean', 'median']
create_feature_value_plot(to_plot, filename)
| 4,808 |
def unscale_parameter(value: numbers.Number,
petab_scale: str) -> numbers.Number:
"""Bring parameter from scale to linear scale.
:param value:
Value to scale
:param petab_scale:
Target scale of ``value``
:return:
``value`` on linear scale
"""
if petab_scale == LIN:
return value
if petab_scale == LOG10:
return np.power(10, value)
if petab_scale == LOG:
return np.exp(value)
raise ValueError(f"Unknown parameter scale {petab_scale}. "
f"Must be from {(LIN, LOG, LOG10)}")
| 4,809 |
def save_answers(article_title, system_answers):
""" Create a file at output folder with article's name with name system_answers.txt in this form:
ArticleTitle Question Answer AnswersComment
ie. Zebra question name question's answer answer's comment
:param article_title: The articles test.
:type article_title: str
:param system_answers: System's question, answer_text & answer_comment.
:type system_answers: list(tuple(question, tuple(answer_text,answer_comment) ) )
"""
file = open(data_reading.OUTPUT_DIR + SELECTED_DATASET + '/' + article_title + '/system_answers.txt', 'w', encoding='utf-8')
# Write data to file.
file.write('ArticleTitle Question Answer Answer\'sComment \n')
for system_answer in system_answers:
# Unwrap answer text.
question, answer = system_answer
answer_text, answer_comment = answer
line = article_title + ' ' + question + ' ' + answer_text + ' ' + answer_comment
file.write(line)
file.write('\n')
file.close()
| 4,810 |
def perturb(sentence, bertmodel, num):
"""Generate a list of similar sentences by BERT
Arguments:
sentence: Sentence which needs to be perturbed
bertModel: MLM model being used (BERT here)
num: Number of perturbations required for a word in a sentence
"""
# Tokenize the sentence
tokens = tokenizer.tokenize(sent)
pos_inf = nltk.tag.pos_tag(tokens)
# the elements in the lists are tuples <index of token, pos tag of token>
bert_masked_indexL = list()
# collect the token index for substitution
for idx, (word, tag) in enumerate(pos_inf):
if (tag.startswith("JJ") or tag.startswith("JJR") or tag.startswith("JJS")
or tag.startswith("PRP") or tag.startswith("PRP$") or tag.startswith("RB")
or tag.startswith("RBR") or tag.startswith("RBS") or tag.startswith("VB") or
tag.startswith("VBD") or tag.startswith("VBG") or tag.startswith("VBN") or
tag.startswith("VBP") or tag.startswith("VBZ") or tag.startswith("NN") or
tag.startswith("NNS") or tag.startswith("NNP") or tag.startswith("NNPS")):
tagFlag = tag[:2]
if (idx!=0 and idx!=len(tokens)-1):
bert_masked_indexL.append((idx, tagFlag))
bert_new_sentences = list()
# generate similar setences using Bert
if bert_masked_indexL:
bert_new_sentences = perturbBert(sent, bertmodel, num, bert_masked_indexL)
return bert_new_sentences
| 4,811 |
async def test_disallowed_duplicated_auth_provider_config(hass):
"""Test loading insecure example auth provider is disallowed."""
core_config = {
"latitude": 60,
"longitude": 50,
"elevation": 25,
"name": "Huis",
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_IMPERIAL,
"time_zone": "GMT",
CONF_AUTH_PROVIDERS: [{"type": "homeassistant"}, {"type": "homeassistant"}],
}
with pytest.raises(Invalid):
await config_util.async_process_ha_core_config(hass, core_config)
| 4,812 |
def test_base_data_object():
"""Test the base data object."""
obj = MockDataObject({
"attr1": "value1",
"attr2": "value2",
})
assert obj.attr1 == "value1"
assert obj.data == {"attr1": "value1"}
with pytest.raises(AttributeError):
obj.attr2
with pytest.raises(AttributeError):
obj.attr3
| 4,813 |
def discovery_dispatch(task: TaskRequest) -> TaskResponse:
"""Runs appropriate discovery function based on protocol
Args:
task (TaskRequest): namedtuple
Returns:
TaskResponse[str, dict[str, str|int|bool|list]]
"""
task = TaskRequest(*task)
proto = constant.Proto(task.proto)
logging.info(
"Dispatching: host=%s, hostname=%s, proto=%s",
task.host,
task.hostname,
proto,
)
discoverer = get_discovery(proto)
device = discoverer(
host=task.host,
hostname=task.hostname,
sysinfo=task.sysinfo,
extra=task.extra,
**task.kwargs,
)
logging.info("Dispatch received response from %s", task.host)
return TaskResponse(task.host, device.dump())
| 4,814 |
def get_free_times(busy_times, begin_date, end_date):
"""
Gets a list of free times calculated from a list of busy times.
:param busy_times: is the list of busy times in ascending order.
:param begin_date: is the start of the selected time interval.
:param end_date: is the end of the selected time interval.
:return: a list of free times.
"""
free_times = []
busy_times_original = busy_times
begin_date = arrow.get(begin_date).replace(hour=9)
end_date = arrow.get(end_date).replace(hour=17)
# print('free times')
if len(busy_times) == 0:
free_times.append((begin_date.isoformat(), end_date.isoformat()))
else:
begin_date_end = begin_date.replace(hour=17)
begin_day = begin_date.format('YYYYMMDD')
begin_time = '09:00'
end_time = '17:00'
end_date_start = arrow.get(end_date).replace(hour=9)
end_day = end_date.format('YYYYMMDD')
stored_event = busy_times[0]
busy_times = busy_times[1:]
if len(busy_times) == 0:
stored_event_start = arrow.get(stored_event['start']['dateTime'])
stored_event_end = arrow.get(stored_event['end']['dateTime'])
if (stored_event_start == begin_date and
stored_event_end < begin_date_end):
free_times.append((stored_event_end.isoformat(),
end_date.isoformat()))
elif (stored_event_end == end_date and
stored_event_start > end_date_start):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
elif (stored_event_start > begin_date and
stored_event_end < end_date):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
free_times.append((stored_event_end.isoformat(),
end_date.isoformat()))
for event in busy_times:
event_start = arrow.get(event['start']['dateTime'])
event_end = arrow.get(event['end']['dateTime'])
event_start_time = event_start.format('HH:mm')
event_end_time = event_end.format('HH:mm')
event_end_day = event_end.format('YYYYMMDD')
stored_event_start = arrow.get(stored_event['start']['dateTime'])
stored_event_start_time = stored_event_start.format('HH:mm')
stored_event_start_day = arrow.get(
stored_event['start']['dateTime']).format('YYYYMMDD')
stored_event_end = stored_event['end']['dateTime']
stored_event_end_time = arrow.get(stored_event_end).format('HH:mm')
event_start = event_start.isoformat()
# starting free time on begin day after start of day
if (stored_event_start_day == begin_day and
stored_event_start_time > begin_time):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
# print('0 {} - {}'.format(begin_date.isoformat(),
# stored_event_start.isoformat()))
# middle free times
if (stored_event_end < event_start and
(stored_event_end, event_start) not in free_times):
if event_start_time == '09:00':
event_start = arrow.get(
event['start']['dateTime']).replace(
days=-1, hour=17).isoformat()
if stored_event_end_time == '17:00':
stored_event_end = arrow.get(
stored_event_end).replace(days=+1,
hour=START_TIME).isoformat()
free_times.append((stored_event_end, event_start))
# print('1 {} - {}'.format(stored_event_end,
# event_start))
# ending free time
if (event_end_day == end_day and
event_end_time != end_time):
free_times.append((event_end.isoformat(), end_date.isoformat()))
# print('2 {} - {}'.format(event_end.isoformat(),
# end_date.isoformat()))
# ending free time for final events that end before end_date
if (busy_times.index(event) == len(busy_times) - 1 and
event_end < end_date):
if event_end_time == '17:00':
event_end = event_end.replace(days=+1, hour=START_TIME)
free_times.append((event_end.isoformat(), end_date.isoformat()))
# print('3 {} - {}'.format(event_end.isoformat(),
# end_date.isoformat()))
# starting free time not on begin day
if (arrow.get(free_times[0][0]) != begin_date and
stored_event_start != begin_date and
begin_date != arrow.get(
busy_times_original[0]['start']['dateTime'])):
free_times.insert(0, (begin_date.isoformat(),
stored_event_start.isoformat()))
# print('4 {} - {}'.format(begin_date.isoformat(),
# stored_event_start.isoformat()))
stored_event = event
# print()
# print('free times')
# for time in free_times:
# print(time)
return free_times
| 4,815 |
def upstream_has_data(valid):
"""Does data exist upstream to even attempt a download"""
utcnow = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
# NCEP should have at least 24 hours of data
return (utcnow - datetime.timedelta(hours=24)) < valid
| 4,816 |
def encode_array(x, base=2, **kwds):
"""Encode array of integer-symbols.
Parameters
----------
x : (N, k) array_like
Array of integer symbols.
base : int
Encoding base.
**kwds :
Keyword arguments passed to :py:func:`numpy.ravel`.
Returns
-------
int
Integer code of an array.
"""
seq = np.ravel(x, **kwds)
return encode_sequence(seq, base=base)
| 4,817 |
def str_for_model(model: Model, formatting: str = "plain", include_params: bool = True) -> str:
"""Make a human-readable string representation of Model, listing all random variables
and their distributions, optionally including parameter values."""
all_rv = itertools.chain(model.unobserved_RVs, model.observed_RVs, model.potentials)
rv_reprs = [rv.str_repr(formatting=formatting, include_params=include_params) for rv in all_rv]
rv_reprs = [rv_repr for rv_repr in rv_reprs if "TransformedDistribution()" not in rv_repr]
if not rv_reprs:
return ""
if "latex" in formatting:
rv_reprs = [
rv_repr.replace(r"\sim", r"&\sim &").strip("$")
for rv_repr in rv_reprs
if rv_repr is not None
]
return r"""$$
\begin{{array}}{{rcl}}
{}
\end{{array}}
$$""".format(
"\\\\".join(rv_reprs)
)
else:
# align vars on their ~
names = [s[: s.index("~") - 1] for s in rv_reprs]
distrs = [s[s.index("~") + 2 :] for s in rv_reprs]
maxlen = str(max(len(x) for x in names))
rv_reprs = [
("{name:>" + maxlen + "} ~ {distr}").format(name=n, distr=d)
for n, d in zip(names, distrs)
]
return "\n".join(rv_reprs)
| 4,818 |
def get_device(
raw_data: dict, control_data: dict, request: Callable
) -> Optional[
Union[
HomeSeerDimmableDevice,
HomeSeerFanDevice,
HomeSeerLockableDevice,
HomeSeerStatusDevice,
HomeSeerSwitchableDevice,
HomeSeerCoverDevice,
HomeSeerSetPointDevice
]
]:
"""
Parses control_data to return an appropriate device object
based on the control pairs detected for the device.
On/Off = HomeSeerSwitchableDevice
On/Off/Dim = HomeSeerDimmableDevice
On/Off/Fan = HomeSeerFanDevice
Lock/Unlock = HomeSeerLockableDevice
other = HomeSeerStatusDevice
"""
item = next((x for x in control_data if x["ref"] == raw_data["ref"]), None)
supported_features = get_supported_features(item)
return build_device(raw_data, item, request, supported_features)
| 4,819 |
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
| 4,820 |
def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:
"""
Processes the given MSD id and increments the counter. The
method will find and return the artist.
:param msd_id: the MSD id to process
:param counter: the counter to increment
:return: the dictionary containing the MSD id and the artist, raises an
exception if the file cannot be processed
"""
try:
with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:
artist = h5.root.metadata.songs.cols.artist_name[0].decode("utf-8")
return {"msd_id": msd_id, "artist": artist}
except Exception as e:
print(f"Exception during processing of {msd_id}: {e}")
finally:
counter.increment()
| 4,821 |
def choose_run(D, var2align, run):
"""Get input for the alignment.
Do it by indicating a run to align to.
Args:
D (pd.DataFrame): DataFrame containing columns 'id', 'run', and ...
var2align (str): Name of the column to align.
run (whatever): The run to align to.
Returns:
tuple of pd.DataFrames: The data ready for alignment and the remainder.
"""
X = D[['id', 'run', var2align]] # subselect the data for alignment
X.columns = ['id', 'run', 'x']
ref = X.loc[X.run == run] # the reference peptides
other = X.loc[X.run != run] # all other peptides
# we can align peptides in other runs only to those found in chosen run.
alignable_idx = other.id.isin(set(other.id) & set(ref.id))
X = other.loc[alignable_idx,]
unalignable = other.loc[~alignable_idx,]
ref = ref[['id','x']].set_index('id')
ref.columns = ['y']
X = pd.concat([X.set_index('id'), ref], axis=1, join='inner')
return X, unalignable
| 4,822 |
def test_div():
"""division of complexes, 64x"""
for s in numbers:
for t in numbers:
s / t
| 4,823 |
def keyring(homedir, monkeypatch, scope='module'):
"""Default keyring, using the test profile"""
monkeypatch.setattr(os.path, "expanduser", lambda d: homedir)
kr = S3Keyring(profile_name='test')
kr.configure(ask=False)
return kr
| 4,824 |
def stack(tensor_list, axis=0):
"""
This function is the same as torch.stack but handles both
numpy.ndarray and torch.Tensor
:param tensor_list:
:param axis:
:return:
"""
if isinstance(tensor_list[0], th.Tensor):
return th.stack(tensor_list, axis)
else:
return np.stack(tensor_list, axis)
| 4,825 |
def generate_svg(input_file, input_hocr, options):
"""
Generates page SVG with embedded raster image and text overlay.
"""
output_file_path = get_result_file_path(
input_file_path=str(input_file),
output_dir=options.sidecar_dir,
output_ext=options.sidecar_format
)
base64_img, size = image_to_base64(input_file)
words = get_words(input_hocr)
output_format = options.sidecar_format # svg | html
template_name = f"page.{output_format}.j2" # svg | html
rendered_string = render_to_string(
template_name,
base64_img=base64_img.decode('utf-8'),
width=size[0],
height=size[1],
words=words
)
with open(output_file_path, 'wt') as f:
f.write(rendered_string)
| 4,826 |
def alias(alias):
"""Select a single alias."""
return {'alias': alias}
| 4,827 |
def model_creator(config):
"""Constructor function for the model(s) to be optimized.
You will also need to provide a custom training
function to specify the optimization procedure for multiple models.
Args:
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more torch.nn.Module objects.
"""
return nn.Linear(1, 1)
| 4,828 |
def ensure_r_vector(x):
"""Ensures that the input is rendered as a vector in R.
It is way more complicated to define an array in R than in Python because an array
in R cannot end with an comma.
Examples
--------
>>> ensure_r_vector("string")
"c('string')"
>>> ensure_r_vector(1)
'c(1)'
>>> ensure_r_vector(list("abcd"))
"c('a', 'b', 'c', 'd')"
>>> ensure_r_vector((1, 2))
'c(1, 2)'
"""
if isinstance(x, str):
out = f"c('{x}')"
elif isinstance(x, numbers.Number):
out = f"c({x})"
elif isinstance(x, (tuple, list)):
mapped = map(lambda l: str(l) if isinstance(l, numbers.Number) else f"'{l}'", x)
concatenated = ", ".join(mapped)
out = f"c({concatenated})"
else:
raise NotImplementedError(
f"'ensure_r_vector' is not defined for dtype {type(x)}"
)
return out
| 4,829 |
def get_games():
"""Actually retrieves the games and adds them to the datastore"""
stats = utils.retrieve_stats()
total_games = 0
games = Games()
all_games = games.get_all('us')
current_stack = []
for game in all_games:
if game.type == 'game':
name = game.name.encode('ascii', 'ignore')
store_url = create_store_url(game.appid)
search_name = utils.remove_specials(name.lower().replace('amp;', ''))
new_game = Games_DB(id=game.appid,appid=int(game.appid),game_name=name,image_url=game.header_image,
store_url=store_url,price=game.price,search_name=search_name)
if int(game.appid) not in mp_games:
new_game.multiplayer_only = False
else:
new_game.multiplayer_only = True
current_stack.append(new_game)
if len(current_stack) > 250:
ndb.put_multi(current_stack)
current_stack = []
total_games += 1
stats.total_steam = total_games
stats.games_last_updated = datetime.now()
stats.put()
| 4,830 |
def open_and_prepare_avatar(image_bytes: Optional[bytes]) -> Optional[Image.Image]:
"""Opens the image as bytes if they exist, otherwise opens the 404 error image. then circular crops and resizes it"""
if image_bytes is not None:
try:
with Image.open(BytesIO(image_bytes)) as im:
prepared_image = crop_circular_border_w_transparent_bg(im)
prepared_image = resize_image(prepared_image)
except UnidentifiedImageError as e:
log.error("Error loading Avatar", exc_info=e)
return None
else:
with Image.open("resources/404 Avatar Not Found.png") as im:
prepared_image = crop_circular_border_w_transparent_bg(im)
prepared_image = resize_image(prepared_image)
return prepared_image
| 4,831 |
def _load_env(
file=".env",
):
"""Load environment variables from dotenv files
Uses https://github.com/theskumar/python-dotenv
Parameters
----------
file
Path to the dotenv file
"""
if not XSH.env:
return
from dotenv import dotenv_values
vals = dotenv_values(file)
for name, val in vals.items():
print(f"Setting {name}")
XSH.env[name] = val
| 4,832 |
def test_find_file_present():
"""Test if existing datafile is found.
Using the bird-size dataset which is included for regression testing.
We copy the raw_data directory to retriever_root_dir
which is the current working directory.
This enables the data to be in the DATA_SEARCH_PATHS.
"""
test_engine.script.name = 'bird-size'
assert test_engine.find_file('5599229') == os.path.normpath(
'raw_data/bird-size/5599229')
| 4,833 |
async def handler(client, _path):
"""
Immediate function called when WebSocket payload received
:param client: client that sent payload
:type client: websockets.WebSocketCommonProtocol
:type _path: str
"""
try:
async for message in client:
try:
payload = json.loads(message)
except json.JSONDecodeError:
await ech.send_error(client)
return
if (extracted := await ech.safe_extract(client, payload, {"api": str, "operation": str})) is not None:
if extracted["api"] == eclib.apis.login and extracted["operation"] == "login":
if (passcode := await ech.safe_extract(client, payload, {"accessCode": str})) is not None:
success = False
for user in ecusers.User.userlist:
if passcode == user.passcode:
success = True
print("[LOGIN] " + user.name)
ecsocket.unregister(client)
ecsocket.register(client, user)
await echandle(client, user, eclib.apis.main, "get", None)
break
if not success:
await ecsocket.send_by_client({"api": eclib.apis.login, "failure": True}, client)
else:
if (user := find_user_from_client(client)) is not None:
await echandle(client, user, extracted["api"], extracted["operation"], payload)
else:
await ech.send_error(client)
return
except websockets.ConnectionClosed:
pass
finally:
ecsocket.unregister(client)
| 4,834 |
def is_GammaH(x):
"""
Return True if x is a congruence subgroup of type GammaH.
EXAMPLES::
sage: from sage.modular.arithgroup.all import is_GammaH
sage: is_GammaH(GammaH(13, [2]))
True
sage: is_GammaH(Gamma0(6))
True
sage: is_GammaH(Gamma1(6))
True
sage: is_GammaH(sage.modular.arithgroup.congroup_generic.CongruenceSubgroup(5))
False
"""
return isinstance(x, GammaH_class)
| 4,835 |
def plot_images_overlap(imgs, title, order_axes, meta_title=None, savefig=False, show_plot=True,
cmaps=None, alphas=None, **kwargs):
""" Plot one or more images with overlap. """
cmaps = cmaps or ['gray'] + ['Reds']*len(imgs)
alphas = alphas or [1.0]*len(imgs)
defaults = {'figsize': (15, 15)}
plt.figure(**{**defaults, **kwargs})
for i, (img, cmap, alpha) in enumerate(zip(imgs, cmaps, alphas)):
img = _to_img(img, order_axes=order_axes, convert=(i > 0))
plt.imshow(img, alpha=alpha, cmap=cmap)
plt.title('{}\n{}'.format(meta_title, title), fontdict={'fontsize': 15})
if savefig:
plt.savefig(savefig, bbox_inches='tight', pad_inches=0)
plt.show() if show_plot else plt.close()
| 4,836 |
def _run_with_interpreter_if_needed(fuzzer_path, args, max_time):
"""Execute the fuzzer script with an interpreter, or invoke it directly."""
interpreter = shell.get_interpreter(fuzzer_path)
if interpreter:
executable = interpreter
args.insert(0, fuzzer_path)
else:
executable = fuzzer_path
runner = new_process.UnicodeProcessRunner(executable)
return runner.run_and_wait(timeout=max_time, additional_args=args)
| 4,837 |
def remove_vol(im_in, index_vol_user, todo):
"""
Remove specific volumes from 4D data.
:param im_in: [str] input image.
:param index_vol: [int] list of indices corresponding to volumes to remove
:param todo: {keep, remove} what to do
:return: 4d volume
"""
# get data
data = im_in.data
nt = data.shape[3]
# define index list of volumes to keep/remove
if todo == 'remove':
index_vol = [i for i in range(0, nt) if i not in index_vol_user]
elif todo == 'keep':
index_vol = index_vol_user
else:
printv('ERROR: wrong assignment of variable "todo"', 1, 'error')
# define new 4d matrix with selected volumes
data_out = data[:, :, :, index_vol]
# save matrix inside new Image object
im_out = im_in.copy()
im_out.data = data_out
return im_out
| 4,838 |
def cost_logistic(p, x, y):
"""
Sum of absolute deviations of obs and logistic function L/(1+exp(-k(x-x0)))
Parameters
----------
p : iterable of floats
parameters (`len(p)=3`)
`p[0]` L - Maximum of logistic function
`p[1]` k - Steepness of logistic function
`p[2]` x0 - Inflection point of logistic function
x : float or array_like of floats
independent variable
y : float or array_like of floats
dependent variable, observations
Returns
-------
float
sum of absolute deviations
"""
return np.sum(np.abs(y-logistic_p(x,p)))
| 4,839 |
def estimate_dt(time_array):
"""Automatically estimate timestep in a time_array
Args:
time_array ([list]): List or dataframe with time entries
Returns:
dt ([datetime.timedelta]): Timestep in dt.timedelta format
"""
if len(time_array) < 2:
# Assume arbitrary value
return datetime.timedelta(seconds=0)
dt = np.median(np.diff(time_array))
if not isinstance(dt, datetime.timedelta):
dt = datetime.timedelta(seconds=dt.astype(float)/1e9)
# Check if data is all ascending
if dt <= datetime.timedelta(0):
raise UserWarning('Please only insert time ascending data.')
return dt
| 4,840 |
def gen_uuid() -> str:
"""
获取uuid
:return: uuid
"""
return uu.uuid4().hex
| 4,841 |
def make_signature(arg_names, member=False):
"""Make Signature object from argument name iterable or str."""
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
if isinstance(arg_names, str):
arg_names = map(str.strip, arg_name_list.split(','))
if member and arg_names and arg_names[0] != 'self':
arg_names = ['self'] + arg_names
return inspect.Signature([inspect.Parameter(n, kind) for n in arg_names])
| 4,842 |
def copy_images(source_list: list[str], source_path: str, destination_path: str):
""" Copy the images in the source_list from the source_path to the destination_path
:param source_list: The list with images
:param source_path: The current location of the images
:param destination_path: The location where the images should be copied
"""
for image in source_list:
shutil.copyfile(f'{source_path}{image}', f'./train/{destination_path}/{image}')
| 4,843 |
def set_trace_platform(*args):
"""
set_trace_platform(platform)
Set platform name of current trace.
@param platform (C++: const char *)
"""
return _ida_dbg.set_trace_platform(*args)
| 4,844 |
def leapfrog2(init, tspan, a, beta, omega, h):
"""
Integrate the damped oscillator with damping factor a using single step
Leapfrog for separable Hamiltonians.
"""
f = forcing(beta, omega)
return sym.leapfrog(init, tspan, h, lambda x, p, t: -x-a*p+f(t))
| 4,845 |
def delete(state='', datefilter='', key=''):
"""Delete raw state files from S3
Supports deleting:
1) A single file using 'key' argument
2) All files in cache using 'state' argument, or a
subset of cached files when 'datefilter' provided.
"""
if key:
state = key.lstrip('/').split('/')[2].lower()
#import ipdb;ipdb.set_trace()
archiver = BaseArchiver(state)
key = archiver.delete_file(key)
print("Deleted from S3: %s" % key.key)
elif state:
archiver = BaseArchiver(state)
for key in archiver.keys(datefilter):
key = archiver.delete_file(key)
print("Deleted from S3: %s" % key.key)
else:
print("Failed to supply proper arguments. No action executed.")
| 4,846 |
def get_paths(config, action, dir_name):
"""
Returns 'from' and 'to' paths.
@param config: wrapsync configuration
@param action: 'push'/'pull'
@param dir_name: name of the directory to append to paths from the config
@return: dictionary containing 'from' and 'to' paths
"""
path_from = ''
path_to = ''
if action == 'push':
if dir_name == 'all':
path_from = build_local_path(config, False)
path_to = build_remote_path(config, True)
else:
path_from = f"{build_local_path(config, False)}/{dir_name}"
path_to = build_remote_path(config, False)
else:
if dir_name == 'all':
path_from = build_remote_path(config, False)
path_to = build_local_path(config, True)
else:
path_from = f"{build_remote_path(config, False)}/{dir_name}"
path_to = build_local_path(config, False)
return {
'from': path_from,
'to': path_to
}
| 4,847 |
def to_dense(arr):
"""
Convert a sparse array to a dense numpy array. If the
array is already a numpy array, just return it. If the
array passed in is a list, then we recursively apply this
method to its elements.
Parameters
-----------
arr : :obj:`numpy.ndarray`, :obj:`scipy.sparse.spmatrix`, or list
Any matrix (or list of matrices) that must be converted
to a dense numpy array.
Raises
--------
TypeError
If the array provided is not a list, `numpy` array,
or `scipy.sparse` matrix.
Returns
--------
dense_args: tuple
"""
if isinstance(arr, np.ndarray):
return arr
if isinstance(arr, list):
return [to_dense(el) for el in arr]
# assume it must be a `scipy.sparse.spmatrix`
if isinstance(arr, sp.spmatrix):
return arr.toarray()
error_msg = (
"Can only convert numpy matrices, scipy matrices, or "
"lists of those elements to dense arrays"
)
raise TypeError(error_msg)
| 4,848 |
def output_results(results, way):
"""Helper method with most of the logic"""
tails = way(results)
heads = len(results) - tails
result = ", ".join([["Heads", "Tails"][flip] for flip in results])
return result + f"\n{heads} Heads; {tails} Tails"
| 4,849 |
def guess_init(model, focal_length, j2d, init_pose):
"""Initialize the camera translation via triangle similarity, by using the torso
joints .
:param model: SMPL model
:param focal_length: camera focal length (kept fixed)
:param j2d: 14x2 array of CNN joints
:param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
:returns: 3D vector corresponding to the estimated camera translation
"""
cids = np.arange(0, 12)
# map from LSP to SMPL joints
j2d_here = j2d[cids]
smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
opt_pose = ch.array(init_pose)
_, A_global = global_rigid_transformation(opt_pose, model.J, model.kintree_table, xp=ch)
Jtr = ch.vstack([g[:3, 3] for g in A_global])
Jtr = Jtr[smpl_ids].r
# 9 is L shoulder, 3 is L hip
# 8 is R shoulder, 2 is R hip
diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))
diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))
est_d = focal_length * (mean_height3d / mean_height2d)
init_t = np.array([0., 0., est_d])
return init_t
| 4,850 |
def clean_containers(args):
"""
Delete non-running containers.
Images cannot be deleted if in use. Deleting dead containers allows
more images to be cleaned.
"""
for container in determine_containers_to_remove(args):
print "Removing container ID: {}, Name: {}, Image: {}".format(
container["Id"],
(container.get("Names") or ["N/A"])[0],
container["Image"],
)
if args.dry_run:
continue
try:
args.client.remove_container(container["Id"])
except APIError as error:
print "Unable to remove container: {}: {}".format(
container["Id"],
error,
)
| 4,851 |
def write_package_to_disk(
package: GreatExpectationsContribPackageManifest, path: str
) -> None:
"""Serialize a GreatExpectationsContribPackageManifest instance into a JSON file.
Args:
package: The GreatExpectationsContribPackageManifest you wish to serialize.
path: The relative path to the target package JSON file.
"""
json_dict = package.to_json_dict()
data = json.dumps(json_dict, indent=4)
with open(path, "w") as f:
f.write(data)
logger.info(f"Succesfully wrote state to {path}.")
| 4,852 |
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 4,853 |
def TCnCom_Dump(*args):
"""
Dump(TCnComV const & CnComV, TStr Desc=TStr())
Parameters:
CnComV: TCnComV const &
Desc: TStr const &
TCnCom_Dump(TCnComV const & CnComV)
Parameters:
CnComV: TCnComV const &
"""
return _snap.TCnCom_Dump(*args)
| 4,854 |
def roughness_convert(reference, invert) :
"""
Open an image and modify it to acomodate for the way Mitsuba Renderer interprets roughness.
returns the reference for the image.
"""
# Conversion for Mitsuba : no value above .5, and if this is a glossinness map, do an inversion.
# I'm doing a linear inversion, it's closest to the way it seems to behave in 3ds Max.
# However, it's not perfect, there's maybe a need for a custom conversion.
if not pilimported :
print("Pillow doesn't seem to be installed, roughness maps may cause some problems.")
return reference
reference = config.filepath+"export\\textures\\"+reference
input = Image.open(reference)# Convert to luminance
# ri means roughnes inverted, r just roughness.
# Different names in case the same texture is used for roughness, revert roughness, and something else.
filename = ".".join(reference.split("\\")[-1].split(".")[:-1])+("_ri."if invert else "_r.")+reference.split(".")[-1]
# Dither it ? There is a loss of precision with halving and reconverting to 8bit channels
# With simple random function, it does'nt work, the lambda seem to work by blocks in the image.
# Random with pixel coordinates as seed should work
if invert : # Linear inversion : -> convert to linear, invert , reconvert to perceptual
# This is intended if i use 254 instead of 255, this is to ensure that the smoothest surfaces give a pitch black roughness, instead of the greyish tone obtained otherwise.
output = input.point(lambda px : int(.5 * 255. * (max(0, 1 - (float(px)/254.)**(2.2)))**(1./2.2)))
else :
output = input.point(lambda px : int(.5 * 255. * (float(px)/255.)))
savedir = config.filepath+"export\\textures"
if not os.path.exists(savedir) :
os.makedirs(savedir)
output.save(savedir+"\\"+filename)
return "textures\\"+filename
| 4,855 |
def get_reparametrize_functions(
params, constraints, scaling_factor=None, scaling_offset=None
):
"""Construct functions to map between internal and external parameters.
All required information is partialed into the functions.
Args:
params (pandas.DataFrame): See :ref:`params`.
constraints (list): List of constraint dictionaries.
scaling_factor (np.ndarray or None): If None, no scaling factor is used.
scaling_offset (np.ndarray or None): If None, no scaling offset is used
Returns:
func: Function that maps an external parameter vector to an internal one
func: Function that maps an internal parameter vector to an external one
"""
params = add_default_bounds_to_params(params)
check_params_are_valid(params)
processed_constraints, processed_params = process_constraints(
constraints=constraints,
params=params,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# get partialed reparametrize from internal
pre_replacements = processed_params["_pre_replacements"].to_numpy()
post_replacements = processed_params["_post_replacements"].to_numpy()
fixed_values = processed_params["_internal_fixed_value"].to_numpy()
# get partialed reparametrize to internal
internal_free = processed_params["_internal_free"].to_numpy()
partialed_to_internal = functools.partial(
reparametrize_to_internal,
internal_free=internal_free,
processed_constraints=processed_constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
partialed_from_internal = functools.partial(
reparametrize_from_internal,
fixed_values=fixed_values,
pre_replacements=pre_replacements,
processed_constraints=processed_constraints,
post_replacements=post_replacements,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
return partialed_to_internal, partialed_from_internal
| 4,856 |
def gaussNewton(P, model, target, targetLandmarks, sourceLandmarkInds, NN, jacobi = True, calcId = True):
"""
Energy function to be minimized for fitting.
"""
# Shape eigenvector coefficients
idCoef = P[: model.idEval.size]
expCoef = P[model.idEval.size: model.idEval.size + model.expEval.size]
# Rotation Euler angles, translation vector, scaling factor
angles = P[model.idEval.size + model.expEval.size:][:3]
R = rotMat2angle(angles)
t = P[model.idEval.size + model.expEval.size:][3: 6]
s = P[model.idEval.size + model.expEval.size:][6]
# Transpose if necessary
if targetLandmarks.shape[0] != 3:
targetLandmarks = targetLandmarks.T
# The eigenmodel, before rigid transformation and scaling
model = model.idMean + np.tensordot(model.idEvec, idCoef, axes = 1) + np.tensordot(model.expEvec, expCoef, axes = 1)
# After rigid transformation and scaling
source = s*np.dot(R, model) + t[:, np.newaxis]
# Find the nearest neighbors of the target to the source vertices
# start = clock()
distance, ind = NN.kneighbors(source.T)
targetNN = target[ind.squeeze(axis = 1), :].T
# print('NN: %f' % (clock() - start))
# Calculate resisduals
rVert = targetNN - source
rLand = targetLandmarks - source[:, sourceLandmarkInds]
rAlpha = idCoef ** 2 / model.idEval
rDelta = expCoef ** 2 / model.expEval
# Calculate costs
Ever = np.linalg.norm(rVert, axis = 0).sum() / model.numVertices
Elan = np.linalg.norm(rLand, axis = 0).sum() / sourceLandmarkInds.size
Ereg = np.sum(rAlpha) + np.sum(rDelta)
if jacobi:
# start = clock()
drV_dalpha = -s*np.tensordot(R, model.idEvec, axes = 1)
drV_ddelta = -s*np.tensordot(R, model.expEvec, axes = 1)
drV_dpsi = -s*np.dot(dR_dpsi(angles), model)
drV_dtheta = -s*np.dot(dR_dtheta(angles), model)
drV_dphi = -s*np.dot(dR_dphi(angles), model)
drV_dt = -np.tile(np.eye(3), [source.shape[1], 1])
drV_ds = -np.dot(R, model)
drR_dalpha = np.diag(2*idCoef / model.idEval)
drR_ddelta = np.diag(2*expCoef / model.expEval)
# Calculate Jacobian
if calcId:
r = np.r_[rVert.flatten('F'), rLand.flatten('F'), rAlpha, rDelta]
J = np.r_[np.c_[drV_dalpha.reshape((source.size, idCoef.size), order = 'F'), drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')], np.c_[drV_dalpha[:, sourceLandmarkInds, :].reshape((targetLandmarks.size, idCoef.size), order = 'F'), drV_ddelta[:, sourceLandmarkInds, :].reshape((targetLandmarks.size, expCoef.size), order = 'F'), drV_dpsi[:, sourceLandmarkInds].flatten('F'), drV_dtheta[:, sourceLandmarkInds].flatten('F'), drV_dphi[:, sourceLandmarkInds].flatten('F'), drV_dt[:sourceLandmarkInds.size * 3, :], drV_ds[:, sourceLandmarkInds].flatten('F')], np.c_[drR_dalpha, np.zeros((idCoef.size, expCoef.size + 7))], np.c_[np.zeros((expCoef.size, idCoef.size)), drR_ddelta, np.zeros((expCoef.size, 7))]]
# Parameter update (Gauss-Newton)
dP = -np.linalg.inv(np.dot(J.T, J)).dot(J.T).dot(r)
else:
r = np.r_[rVert.flatten('F'), rLand.flatten('F'), rDelta]
J = np.r_[np.c_[drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')], np.c_[drV_ddelta[:, sourceLandmarkInds, :].reshape((np.prod(targetLandmarks.shape), expCoef.size), order = 'F'), drV_dpsi[:, sourceLandmarkInds].flatten('F'), drV_dtheta[:, sourceLandmarkInds].flatten('F'), drV_dphi[:, sourceLandmarkInds].flatten('F'), drV_dt[:sourceLandmarkInds.size * 3, :], drV_ds[:, sourceLandmarkInds].flatten('F')], np.c_[drR_ddelta, np.zeros((expCoef.size, 7))]]
# Parameter update (Gauss-Newton)
dP = np.r_[np.zeros(model.idEval.size), -np.linalg.inv(np.dot(J.T, J)).dot(J.T).dot(r)]
# print('GN: %f' % (clock() - start))
return Ever + Elan + Ereg, dP
return Ever + Elan + Ereg
| 4,857 |
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1-D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
"""
return T.arange(start, stop=stop, step=step, dtype=dtype)
| 4,858 |
def test_multiple_observation_sequences_1d_some_flat_with_single():
"""Multiple 1D (flat and non-flat) observation sequences with allow_single=True"""
X = [np.arange(2).reshape(-1, 1), np.arange(3)]
assert_all_equal(val.is_observation_sequences(X, allow_single=True), [
np.array([
[0],
[1]
]),
np.array([
[0],
[1],
[2]
])
])
| 4,859 |
def create_attachable_access_entity_profile(infra, entity_profile, **args):
"""Create an attached entity profile. This provides a template to deploy hypervisor policies on a large set of leaf ports. This also provides the association of a Virtual Machine Management (VMM) domain and the physical network infrastructure. """
args = args['optional_args'] if 'optional_args' in args.keys() else args
infra_attentityp = AttEntityP(infra, entity_profile)
if 'enable_infrastructure_vlan' in args.keys():
if args['enable_infrastructure_vlan'] in [True, 'True', 'true', 't', 'T']:
infra_provacc = ProvAcc(infra_attentityp)
elif args['enable_infrastructure_vlan'] in [False, 'False', 'false', 'f', 'F']:
infra_provacc = ProvAcc(infra_attentityp)
infra_provacc.delete()
if 'domain_profiles' in args.keys() and is_valid(args['domain_profiles']):
for domain in args['domain_profiles']:
if domain['type'] == 'physical':
path = 'uni/phys-'
elif domain['type'] == 'vcenter':
path = 'uni/vmmp-VMware/dom-'
elif domain['type'] == 'layer2':
path = 'uni/l2dom-'
elif domain['type'] == 'layer3':
path = 'uni/l3dom-'
else:
print 'Invalid domain type.'
path = ''
infra_rsdomp = RsDomP(infra_attentityp, path+domain['name'])
if is_valid_key(args, 'interface_policy_group'):
infra_funcp = FuncP(infra)
infra_accportgrp = AccPortGrp(infra_funcp, args['interface_policy_group'])
infra_rsattentp = RsAttEntP(infra_accportgrp)
return infra_attentityp
| 4,860 |
def test_destroy():
"""Test the destroy action"""
with patch(
"salt.cloud.clouds.hetzner._connect_client", return_value=MagicMock()
) as connect:
with patch("salt.cloud.clouds.hetzner.wait_until", return_value=True) as wait:
with patch("salt.cloud.clouds.hetzner.show_instance") as show_instance:
with pytest.raises(SaltCloudSystemExit):
hetzner.destroy("myvm", "function")
wait.return_value = False
show_instance.return_value = {"state": "running"}
connect.return_value.servers.get_by_name.return_value = None
hetzner.destroy("myvm", "action")
server = (
connect.return_value.servers.get_by_name.return_value
) = MagicMock()
# Stop the server before shutdown but failed
hetzner.destroy("myvm", "action")
server.delete.assert_not_called()
wait.assert_called_once_with("myvm", "off")
wait.return_value = True
hetzner.destroy("myvm", "action")
server.delete.assert_called_once()
# Don't stop if the server isn't running
show_instance.return_value = {"state": "off"}
wait.reset_mock()
hetzner.destroy("myvm", "action")
wait.assert_not_called()
| 4,861 |
def test_merged_conecpts(processed_ids, dynamodb, is_test_env):
"""Create merged concepts and load to db."""
if is_test_env:
dynamodb.merge.create_merged_concepts(processed_ids)
| 4,862 |
def update_scorboard(player1, player2, screen):
"""
updates the scoreboard to the new values
"""
# make the bar white to remove old points
WIDTH_OF_SCOREBOARD = 300
screen.fill(pygame.Color("white"), (0, 0, WIDTH_OF_SCOREBOARD, BORDER))
# we render the new points
textsurface = myfont.render(
f"Points: {player2.points} - {player1.points}", 1, (0, 0, 0))
screen.blit(textsurface, (0, 0))
| 4,863 |
def warning(msg: str):
"""Utility function for print an warning message and also log it
Args:
msg (str): Warning message
"""
print(f'[Warning] {msg}')
logging.warning(msg)
| 4,864 |
def tensor_projection_reader(
embedding_file_path: str,
label_file_path: str
) -> Tuple[np.ndarray, List[List[str]]]:
"""
Reads the embedding and labels stored at the given paths and returns an np.ndarray and list of labels
:param str embedding_file_path: Path to the embedding file
:param str label_file_path: Path to the labels file
:return: An embedding and list of labels
:rtype: (numpy.ndarray, List[List[str]])
"""
embedding = np.loadtxt(embedding_file_path, delimiter='\t')
labels: List[List[str]] = []
with open(label_file_path) as f:
csv_reader = csv.reader(f, delimiter='\t')
for label_row in csv_reader:
labels.append(label_row)
return embedding, labels
| 4,865 |
def xml_to_dict(xmlobj, saveroot=True):
"""Parse the xml into a dictionary of attributes.
Args:
xmlobj: An ElementTree element or an xml string.
saveroot: Keep the xml element names (ugly format)
Returns:
An ElementDict object or ElementList for multiple objects
"""
if isinstance(xmlobj, basestring):
# Allow for blank (usually HEAD) result on success
if xmlobj.isspace():
return {}
try:
element = ET.fromstring(xmlobj)
except Exception, err:
raise Error('Unable to parse xml data: %s' % err)
else:
element = xmlobj
element_type = element.get('type', '').lower()
if element_type == 'array':
element_list_type = element.tag.replace('-', '_')
return_list = element_containers.ElementList(element_list_type)
for child in element.getchildren():
child_element = xml_to_dict(child, saveroot)
if saveroot and isinstance(child_element, dict):
return_list.append(child_element.values()[0])
else:
return_list.append(child_element)
if saveroot:
return element_containers.ElementDict(element_list_type,
{element_list_type:
return_list})
else:
return return_list
elif element.get('nil') == 'true':
return None
elif element_type in ('integer', 'datetime', 'date',
'decimal', 'double', 'float') and not element.text:
return None
elif element_type == 'integer':
return int(element.text)
elif element_type == 'datetime':
if date_parse:
return date_parse(element.text)
else:
try:
timestamp = calendar.timegm(
time.strptime(element.text, '%Y-%m-%dT%H:%M:%S+0000'))
return datetime.datetime.utcfromtimestamp(timestamp)
except ValueError, err:
raise Error('Unable to parse timestamp. Install dateutil'
' (http://labix.org/python-dateutil) or'
' pyxml (http://pyxml.sf.net/topics/)'
' for ISO8601 support.')
elif element_type == 'date':
time_tuple = time.strptime(element.text, '%Y-%m-%d')
return datetime.date(*time_tuple[:3])
elif element_type == 'decimal':
return decimal.Decimal(element.text)
elif element_type in ('float', 'double'):
return float(element.text)
elif element_type == 'boolean':
if not element.text:
return False
return element.text.strip() in ('true', '1')
elif element_type == 'yaml':
if not yaml:
raise ImportError('PyYaml is not installed: http://pyyaml.org/')
return yaml.safe_load(element.text)
elif element_type == 'base64binary':
return base64.decodestring(element.text)
elif element_type == 'file':
content_type = element.get('content_type',
'application/octet-stream')
filename = element.get('name', 'untitled')
return FileObject(element.text, filename, content_type)
elif element_type in ('symbol', 'string'):
if not element.text:
return ''
return element.text
elif element.getchildren():
# This is an element with children. The children might be simple
# values, or nested hashes.
if element_type:
attributes = element_containers.ElementDict(
underscore(element.get('type', '')), element.items())
else:
attributes = element_containers.ElementDict(singularize(
element.tag.replace('-', '_')), element.items())
for child in element.getchildren():
attribute = xml_to_dict(child, saveroot)
child_tag = child.tag.replace('-', '_')
if saveroot:
# If this is a nested hash, it will come back as
# {child_tag: {key: value}}, we only want the inner hash
if isinstance(attribute, dict):
if len(attribute) == 1 and child_tag in attribute:
attribute = attribute[child_tag]
# Handle multiple elements with the same tag name
if child_tag in attributes:
if isinstance(attributes[child_tag], list):
attributes[child_tag].append(attribute)
else:
attributes[child_tag] = [attributes[child_tag],
attribute]
else:
attributes[child_tag] = attribute
if saveroot:
return {element.tag.replace('-', '_'): attributes}
else:
return attributes
elif element.items():
return element_containers.ElementDict(element.tag.replace('-', '_'),
element.items())
else:
return element.text
| 4,866 |
def test_update_nonexistent_cve():
""" the cve record cannot be updated because it doesn't exist """
with open('./src/test/cve_tests/cve_record_fixtures/CVE-2009-0009_rejected.json') as json_file:
data = json.load(json_file)
res = requests.put(
f'{env.AWG_BASE_URL}{CVE_URL}/{reject_cve_id}',
headers=utils.BASE_HEADERS,
json=data
)
assert res.status_code == 403
response_contains_json(res, 'error', 'CVE_RECORD_DNE')
| 4,867 |
def test_success(database):
""" Tests success for when AssistanceType field is required and must be one of the allowed values:
'02', '03', '04', '05', '06', '07', '08', '09', '10', '11'
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(assistance_type='02', correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(assistance_type='03', correction_delete_indicatr=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(assistance_type='04', correction_delete_indicatr='C')
det_award_4 = DetachedAwardFinancialAssistanceFactory(assistance_type='05', correction_delete_indicatr='c')
det_award_5 = DetachedAwardFinancialAssistanceFactory(assistance_type='06', correction_delete_indicatr='')
det_award_6 = DetachedAwardFinancialAssistanceFactory(assistance_type='07', correction_delete_indicatr='')
det_award_7 = DetachedAwardFinancialAssistanceFactory(assistance_type='08', correction_delete_indicatr='')
det_award_8 = DetachedAwardFinancialAssistanceFactory(assistance_type='09', correction_delete_indicatr='')
det_award_9 = DetachedAwardFinancialAssistanceFactory(assistance_type='10', correction_delete_indicatr='')
det_award_10 = DetachedAwardFinancialAssistanceFactory(assistance_type='11', correction_delete_indicatr='')
# Ignore correction delete indicator of D
det_award_11 = DetachedAwardFinancialAssistanceFactory(assistance_type='Thing', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5,
det_award_6, det_award_7, det_award_8, det_award_9,
det_award_10, det_award_11])
assert errors == 0
| 4,868 |
def conve_interaction(
h: torch.FloatTensor,
r: torch.FloatTensor,
t: torch.FloatTensor,
t_bias: torch.FloatTensor,
input_channels: int,
embedding_height: int,
embedding_width: int,
hr2d: nn.Module,
hr1d: nn.Module,
) -> torch.FloatTensor:
"""Evaluate the ConvE interaction function.
:param h: shape: (batch_size, num_heads, 1, 1, dim)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, dim)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, dim)
The tail representations.
:param t_bias: shape: (batch_size, 1, 1, num_tails, 1)
The tail entity bias.
:param input_channels:
The number of input channels.
:param embedding_height:
The height of the reshaped embedding.
:param embedding_width:
The width of the reshaped embedding.
:param hr2d:
The first module, transforming the 2D stacked head-relation "image".
:param hr1d:
The second module, transforming the 1D flattened output of the 2D module.
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
"""
# repeat if necessary, and concat head and relation, batch_size', num_input_channels, 2*height, width
# with batch_size' = batch_size * num_heads * num_relations
x = broadcast_cat(
[
h.view(*h.shape[:-1], input_channels, embedding_height, embedding_width),
r.view(*r.shape[:-1], input_channels, embedding_height, embedding_width),
],
dim=-2,
).view(-1, input_channels, 2 * embedding_height, embedding_width)
# batch_size', num_input_channels, 2*height, width
x = hr2d(x)
# batch_size', num_output_channels * (2 * height - kernel_height + 1) * (width - kernel_width + 1)
x = x.view(-1, numpy.prod(x.shape[-3:]))
x = hr1d(x)
# reshape: (batch_size', embedding_dim) -> (b, h, r, 1, d)
x = x.view(-1, h.shape[1], r.shape[2], 1, h.shape[-1])
# For efficient calculation, each of the convolved [h, r] rows has only to be multiplied with one t row
# output_shape: (batch_size, num_heads, num_relations, num_tails)
t = t.transpose(-1, -2)
x = (x @ t).squeeze(dim=-2)
# add bias term
return x + t_bias.squeeze(dim=-1)
| 4,869 |
def index():
"""Show Homepage"""
return render_template("index.html")
| 4,870 |
def wav_to_spectrogram(audio_path, save_path, spectrogram_dimensions=(64, 64), noverlap=16, cmap='gray_r'):
""" Creates a spectrogram of a wav file.
:param audio_path: path of wav file
:param save_path: path of spectrogram to save
:param spectrogram_dimensions: number of pixels the spectrogram should be. Defaults (64,64)
:param noverlap: See http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html
:param cmap: the color scheme to use for the spectrogram. Defaults to 'gray_r'
:return:
"""
sample_rate, samples = wav.read(audio_path)
fig = plt.figure()
fig.set_size_inches((spectrogram_dimensions[0]/fig.get_dpi(), spectrogram_dimensions[1]/fig.get_dpi()))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.specgram(samples, cmap=cmap, Fs=2, noverlap=noverlap)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
fig.savefig(save_path, bbox_inches="tight", pad_inches=0)
| 4,871 |
def add_path(G, data, one_way):
"""
Add a path to the graph.
Parameters
----------
G : networkx multidigraph
data : dict
the attributes of the path
one_way : bool
if this path is one-way or if it is bi-directional
Returns
-------
None
"""
# extract the ordered list of nodes from this path element, then delete it
# so we don't add it as an attribute to the edge later
path_nodes = data['nodes']
del data['nodes']
# set the oneway attribute to the passed-in value, to make it consistent
# True/False values, but only do this if you aren't forcing all edges to
# oneway with the all_oneway setting. With the all_oneway setting, you
# likely still want to preserve the original OSM oneway attribute.
if not settings.all_oneway:
data['oneway'] = one_way
# zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)
# and so on
path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))
G.add_edges_from(path_edges, **data)
# if the path is NOT one-way
if not one_way:
# reverse the direction of each edge and add this path going the
# opposite direction
path_edges_opposite_direction = [(v, u) for u, v in path_edges]
G.add_edges_from(path_edges_opposite_direction, **data)
| 4,872 |
def structure_standardization(smi: str) -> str:
"""
Standardization function to clean up smiles with RDKit. First, the input smiles is converted into a mol object.
Not-readable SMILES are written to the log file. The molecule size is checked by the number of atoms (non-hydrogen).
If the molecule has more than 100 non-hydrogen atoms, the compound is discarded and written in the log file.
Molecules with number of non-hydrogen atoms <= 100 are standardized with the MolVS toolkit
(https://molvs.readthedocs.io/en/latest/index.html) relying on RDKit. Molecules which failed the standardization
process are saved in the log file. The remaining standardized structures are converted back into their canonical
SMILES format.
:param smi: Input SMILES from the given structure data file T4
:return: smi_clean: Cleaned and standardized canonical SMILES of the given input SMILES.
Args:
smi (str): Non-standardized smiles string
Returns:
str: standardized smiles string
"""
# tautomer.TAUTOMER_TRANSFORMS = update_tautomer_rules()
# importlib.reload(MolVS_standardizer)
# param = ReadConfig()
standardization_param = ConfigDict.get_parameters()["standardization"]
max_num_atoms = standardization_param["max_num_atoms"]
max_num_tautomers = standardization_param["max_num_tautomers"]
include_stereoinfo = standardization_param["include_stereoinfo"]
## Load new tautomer enumarator/canonicalizer
tautomerizer = rdMolStandardize.TautomerEnumerator()
tautomerizer.SetMaxTautomers(max_num_tautomers)
tautomerizer.SetRemoveSp3Stereo(
False
) # Keep stereo information of keto/enol tautomerization
def isotope_parent(mol: Chem.Mol) -> Chem.Mol:
"""
Isotope parent from MOLVS
Return the isotope parent of a given molecule.
The isotope parent has all atoms replaced with the most abundant isotope for that element.
Args:
mol (Chem.Mol): input rdkit mol object
Returns:
Chem.Mol: isotope parent rdkit mol object
"""
mol = copy.deepcopy(mol)
# Replace isotopes with common weight
for atom in mol.GetAtoms():
atom.SetIsotope(0)
return mol
def my_standardizer(mol: Chem.Mol) -> Chem.Mol:
"""
MolVS implementation of standardization
Args:
mol (Chem.Mol): non-standardized rdkit mol object
Returns:
Chem.Mol: stndardized rdkit mol object
"""
mol = copy.deepcopy(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
disconnector = rdMolStandardize.MetalDisconnector()
mol = disconnector.Disconnect(mol)
normalizer = rdMolStandardize.Normalizer()
mol = normalizer.normalize(mol)
reionizer = rdMolStandardize.Reionizer()
mol = reionizer.reionize(mol)
Chem.AssignStereochemistry(mol, force=True, cleanIt=True)
# TODO: Check this removes symmetric stereocenters
return mol
mol = MolFromSmiles(smi) # Read SMILES and convert it to RDKit mol object.
if (
mol is not None
): # Check, if the input SMILES has been converted into a mol object.
if (
mol.GetNumAtoms() <= max_num_atoms
): # check size of the molecule based on the non-hydrogen atom count.
try:
mol = rdMolStandardize.ChargeParent(
mol
) # standardize molecules using MolVS and RDKit
mol = isotope_parent(mol)
if include_stereoinfo is False:
Chem.RemoveStereochemistry(mol)
mol = tautomerizer.Canonicalize(mol)
mol_clean = my_standardizer(mol)
smi_clean = MolToSmiles(
mol_clean
) # convert mol object back to SMILES
else:
mol = tautomerizer.Canonicalize(mol)
mol_clean = my_standardizer(mol)
smi_clean = MolToSmiles(mol_clean)
except (ValueError, AttributeError) as e:
smi_clean = np.nan
logging.error(
"Standardization error, " + smi + ", Error Type: " + str(e)
) # write failed molecules during standardization to log file
else:
smi_clean = np.nan
logging.error("Molecule too large, " + smi)
else:
smi_clean = np.nan
logging.error("Reading Error, " + smi)
return smi_clean
| 4,873 |
def main() -> None:
"""
Entry point of this test project.
"""
ap.Stage(
background_color='#333',
stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(
color='#0af', thickness=10, dot_setting=ap.LineDotSetting(dot_size=3))
line_1: ap.Line = sprite.graphics.draw_dashed_line(
x_start=50, y_start=50, x_end=350, y_end=50,
dash_size=10, space_size=5)
options: _SpOptions = {'sprite': sprite}
line_1.click(on_line_click, options=options)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
| 4,874 |
def upload(fname, space, parent_id):
"""Upload documentation for CMake module ``fname`` as child of page with
id ``parent_id``."""
pagename = path.splitext(path.basename(fname))[0]
rst = '\n'.join(extract(fname))
if not rst:
return
log.debug('=' * 79)
log.info('Uploading %s', pagename)
log.debug('=' * 79)
log.debug('rST')
log.debug('-' * 79)
log.debug(rst)
log.debug('-' * 79)
log.debug('Confluence')
log.debug('-' * 79)
cwiki = publish_string(rst, writer=confluence.Writer())
log.debug(cwiki)
page = {'type': 'page',
'title': pagename,
'space': {'key': space},
'body': {'storage': {'value': cwiki, 'representation': 'wiki'}},
'ancestors': [{'type': 'page', 'id': parent_id}]}
create_or_update(page, space)
| 4,875 |
def with_input_dtype(policy, dtype):
"""Copies "infer" `policy`, adding `dtype` to it.
Policy must be "infer" or "infer_float32_vars" (i.e., has no compute dtype).
Returns a new policy with compute dtype `dtype`. The returned policy's
variable dtype is also `dtype` if `policy` is "infer", and is `float32` if
`policy` is "infer_with_float32_vars".
Args:
policy: An "infer" or "infer_float32_vars" policy
dtype: The dtype of an input to a layer.
Returns:
A new policy copied from `policy`, but with compute dtype and maybe
variable_dtype set to `dtype`.
"""
assert not policy.compute_dtype
dtype = dtypes.as_dtype(dtype).name
if policy.variable_dtype is None:
return Policy(dtype)
else:
# Policies without a compute dtype are either "infer" or
# "infer_with_float32_vars", so the variable_dtype must be float32 here.
assert policy.variable_dtype == 'float32'
try:
Policy._warn_about_float32_vars = False # pylint: disable=protected-access
return Policy(dtype + '_with_float32_vars')
finally:
Policy._warn_about_float32_vars = True # pylint: disable=protected-access
| 4,876 |
def download_users_start(api: API, start_point: str, n: float = math.inf) -> None:
"""
This function downloads n Twitter users by using a friends-chain.
Since there isn't an API or a database with all Twitter users, we can't obtain a strict list
of all Twitter users, nor can we obtain a list of strictly random or most popular Twitter
users. Therefore, we use the method of follows chaining: we start from a specific individual,
obtain their followers, and pick 6 random individuals from the friends list. Then, we repeat
the process for the selected friends: we pick 6 random friends of the 6 random friends
that we picked.
In reality, this method will be biased toward individuals that are worthy of following since
"friends" are the list of users that someone followed.
Data Directory
--------
It will download all user data to ./data/twitter/user/users/<screen_name>.json
It will save meta info to ./data/twitter/user/meta/
Twitter API Reference
--------
It will be using the API endpoint api.twitter.com/friends/list (Documentation:
https://developer.twitter.com/en/docs/twitter-api/v1/accounts-and-users/follow-search-get-users/api-reference/get-friends-list)
This will limit the rate of requests to 15 requests in a 15-minute window, which is one request
per minute. But it is actually the fastest method of downloading a wide range of users on
Twitter because it can download a maximum of 200 users at a time while the API for downloading
a single user is limited to only 900 queries per 15, which is only 60 users per minute.
There is another API endpoint that might do the job, which is api.twitter.com/friends/ids (Doc:
https://developer.twitter.com/en/docs/twitter-api/v1/accounts-and-users/follow-search-get-users/api-reference/get-friends-ids)
However, even though this endpoint has a much higher request rate limit, it only returns user
ids and not full user info.
Parameters
--------
:param api: Tweepy's API object
:param start_point: Starting user's screen name.
:param n: How many users do you want to download? (Default: math.inf)
:return: None
"""
# Set of all the downloaded users' screen names
downloaded = set()
# The set of starting users that are queried.
done_set = set()
# The set of starting users currently looping through
current_set = {start_point}
# The next set of starting users
next_set = set()
# Start download
download_users_execute(api, n, downloaded,
done_set, current_set, next_set)
| 4,877 |
def decode_object_based(effects):
"""
Reads and decodes info about object-based layer effects.
"""
fp = io.BytesIO(effects)
version, descriptor_version = read_fmt("II", fp)
try:
descriptor = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring object-based layer effects tagged block (%s)" % e)
return effects
return ObjectBasedEffects(version, descriptor_version, descriptor)
| 4,878 |
def example_add(x: int, y: int):
"""
...
"""
return x + y
| 4,879 |
def inverse(text: str, reset_style: Optional[bool] = True) -> str:
"""Returns text inverse-colored.
Args:
reset_style: Boolean that determines whether a reset character should
be appended to the end of the string.
"""
return set_mode("inverse", False) + text + (reset() if reset_style else "")
| 4,880 |
def make():
""" hook function for entrypoints
:return:
"""
return LocalFileSystem
| 4,881 |
def nkp(directory):
"""
Args:
directory:
Returns:
"""
# TODO Seems to fail. Could be related to the fact that the symmetry
# tolerances are different.
input_dir = os.path.abspath(directory)
structure = Structure.from_file(os.path.join(input_dir, "POSCAR"))
# incar = Incar.from_file(os.path.join(directory, "INCAR"))
# if incar.get("MAGMOM", None) is not None:
# structure.add_site_property(("magmom"), incar.get("MAGMOM", None))
kpoints = Kpoints.from_file(os.path.join(input_dir, "KPOINTS"))
print("Number of irreducible kpoints = " +
str(find_irr_kpoints(structure, kpoints)))
| 4,882 |
def configure():
"""read configuration from command line options and config file values"""
opts = parse_options()
defaults = dict(v.split('=') for v in opts.S or [])
with open(opts.config_file) as config:
targets = read_config(config, defaults, opts.ignore_colon)
if opts.T:
return {opts.T: targets[opts.T]}
else:
return targets
| 4,883 |
def access_contact(the_config, the_browser, page_event_new) -> None:
"""I should access to the contact page."""
p_page = ContactPage(_driver=the_browser, _config=the_config['urls'], _contact=page_event_new.contact)
assert p_page.visit() is True
assert verify_contact(p_page) is True
| 4,884 |
def proper_loadmat(file_path):
"""Loads using scipy.io.loadmat, and cleans some of the metadata"""
data = loadmat(file_path)
clean_data = {}
for key, value in data.items():
if not key.startswith("__"):
clean_data[key] = value.squeeze().tolist()
return clean_data
| 4,885 |
def _get_time_total(responses: List[DsResponse]) -> List[str]:
"""Get formated total time metrics."""
metric_settings = {
"name": "time_total",
"type": "untyped",
"help": "Returns the total time in seconds (time taken to request, render and download).",
"func": lambda response: __float2str(response.time_total),
}
return _get_metrics(responses, metric_settings)
| 4,886 |
async def list_sessions(
cache: Redis = Depends(depends_redis),
) -> ListSessionsResponse:
"""Get all session keys"""
keylist = []
for key in await cache.keys(pattern=f"{IDPREFIX}*"):
if not isinstance(key, bytes):
raise TypeError(
"Found a key that is not stored as bytes (stored as type "
f"{type(key)!r})."
)
keylist.append(key.decode(encoding="utf-8"))
return ListSessionsResponse(keys=keylist)
| 4,887 |
def install(comicrack):
"""
Installs this module. This must be called before any other method in this
module is called. You must take steps to GUARANTEE that this module's
uninstall() method is called once you have called this method.
Takes a single parameter, which is the ComicRack object that we are
running as part of.
"""
global __i18n
if __i18n is None:
__i18n = __I18n(comicrack)
# the MessageBoxManager is a helpful little DLL that I downloaded from here:
# http://www.codeproject.com/KB/miscctrl/Localizing_MessageBox.aspx
#
# it allows me to define localized strings for the different button types in
# a MessageBox. it MUST be uninstalled afterwards, to change things back!
MessageBoxManager.Register()
MessageBoxManager.OK = get("MessageBoxOk")
MessageBoxManager.Cancel = get("MessageBoxCancel");
MessageBoxManager.Retry = get("MessageBoxRetry")
MessageBoxManager.Ignore = get("MessageBoxIgnore");
MessageBoxManager.Abort = get("MessageBoxAbort");
MessageBoxManager.Yes = get("MessageBoxYes");
MessageBoxManager.No = get("MessageBoxNo");
| 4,888 |
async def update_listener(hass, entry):
"""Update listener."""
entry.data = entry.options
await hass.config_entries.async_forward_entry_unload(entry, PLATFORM)
hass.async_add_job(hass.config_entries.async_forward_entry_setup(entry, PLATFORM))
| 4,889 |
def square(x, out=None, where=True, **kwargs):
"""
Return the element-wise square of the input.
Args:
x (numpoly.ndpoly):
Input data.
out (Optional[numpy.ndarray]):
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where (Optional[numpy.ndarray]):
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value. Note
that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
out (numpoly.ndpoly):
Element-wise `x*x`, of the same shape and dtype as `x`.
This is a scalar if `x` is a scalar.
Examples:
>>> numpoly.square([-1j, 1])
polynomial([(-1-0j), (1+0j)])
>>> numpoly.square(numpoly.sum(numpoly.symbols("x y")))
polynomial(y**2+2*x*y+x**2)
"""
return multiply(x, x, out=out, where=where, **kwargs)
| 4,890 |
def evaluate_tuple(columns,mapper,condition):
"""
"""
if isinstance(condition, tuple):
return condition[0](columns,mapper,condition[1],condition[2])
else:
return condition(columns,mapper)
| 4,891 |
def imread(path, is_grayscale=True):
"""
Read image using its path.
Default value is gray-scale, and image is read by YCbCr format as the paper said.
"""
if is_grayscale:
return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)
else:
return scipy.misc.imread(path, mode='YCbCr').astype(np.float)
| 4,892 |
def configparser(subparsers):
"""The config subparser setup."""
parser = subparsers.add_parser('config', help="Configuring the presets.")
parser.set_defaults(func=configuring)
parser.add_argument('-dp', '--default-preset', nargs='?', const=True,
help="Setup a default pattern. Providing no argument shows the current default.")
parser.add_argument('-pl', '--preset-list', action='store_true', help="Shows the current presets.")
parser.add_argument('-pa', '--preset-add', nargs=2, metavar=('NAME', 'PATTERN'),
help="Adds a pattern in the preset. Multi-worded names are to be quoted.")
parser.add_argument('-pd', '--preset-delete', nargs='+', metavar='NAME',
help="Deletes one/multiple presets by it's name. Multi-worded names are to be quoted." \
" Remove the default preset by 'default'.")
| 4,893 |
def get_priority_text(priority):
"""
Returns operation priority name by numeric value.
:param int priority: Priority numeric value.
:return: Operation priority name.
:rtype: str | None
"""
if priority == NSOperationQueuePriorityVeryLow:
return "VeryLow"
elif priority == NSOperationQueuePriorityLow:
return "Low"
elif priority == NSOperationQueuePriorityNormal:
return "Normal"
elif priority == NSOperationQueuePriorityHigh:
return "High"
elif priority == NSOperationQueuePriorityVeryHigh:
return "VeryHigh"
return "{}".format(priority)
| 4,894 |
def get_experiment_tag_for_image(image_specs, tag_by_experiment=True):
"""Returns the registry with the experiment tag for given image."""
tag = posixpath.join(experiment_utils.get_base_docker_tag(),
image_specs['tag'])
if tag_by_experiment:
tag += ':' + experiment_utils.get_experiment_name()
return tag
| 4,895 |
def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
| 4,896 |
def call_pager():
"""
Convenient wrapper to call Pager class
"""
return _Pager()
| 4,897 |
def sign_in(request, party_id, party_guest_id):
"""
Sign guest into party.
"""
if request.method != "POST":
return HttpResponse("Endpoint supports POST method only.", status=405)
try:
party = Party.objects.get(pk=party_id)
party_guest = PartyGuest.objects.get(pk=party_guest_id)
except Party.DoesNotExist:
return HttpResponse("Requested Party ID does not exist.", status=404)
except PartyGuest.DoesNotExist:
return HttpResponse("Requested Party Guest does not exist.", status=404)
if not party.is_list_closed():
return HttpResponse("Can't sign in guests before the party starts.", status=403)
if not party_guest.signed_in:
party.sign_in(party_guest)
party.save()
party_guest.save()
return JsonResponse(party_guest.to_json())
return HttpResponse(
"Guest already signed in. Refresh to see updated list.", status=409
)
| 4,898 |
def mcoolqc_status(connection, **kwargs):
"""Searches for annotated bam files that do not have a qc object
Keyword arguments:
lab_title -- limit search with a lab i.e. Bing+Ren, UCSD
start_date -- limit search to files generated since a date formatted YYYY-MM-DD
run_time -- assume runs beyond run_time are dead (default=24 hours)
"""
start = datetime.utcnow()
check = CheckResult(connection, 'mcoolqc_status')
my_auth = connection.ff_keys
check.action = "mcoolqc_start"
check.brief_output = []
check.full_output = {}
check.status = 'PASS'
# check indexing queue
check, skip = wfr_utils.check_indexing(check, connection)
if skip:
return check
# Build the query (find mcool files)
default_stati = 'released&status=uploaded&status=released+to+project'
stati = 'status=' + (kwargs.get('status') or default_stati)
query = 'search/?file_format.file_format=mcool&{}'.format(stati)
query += '&type=FileProcessed'
query += '&quality_metric.display_title=No+value'
# add date
s_date = kwargs.get('start_date')
if s_date:
query += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query += '&lab.display_title=' + lab
# The search
print(query)
res = ff_utils.search_metadata(query, key=my_auth)
if not res:
check.action_message = 'No action required at this moment'
check.summary = 'All Good!'
return check
check.summary = '{} files need a mcoolqc'. format(len(res))
check.status = 'WARN'
check = wfr_utils.check_runs_without_output(res, check, 'mcoolQC', my_auth, start)
return check
| 4,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.