hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1a5f97d2b356e735d40f301af546f938382c01
| 9,684 |
py
|
Python
|
egg/zoo/systematicity/calculate_TRE.py
|
XeniaOhmer/SystematicRepresentations
|
825208d1be659dc820e61f577cdb53afc47302f4
|
[
"MIT"
] | null | null | null |
egg/zoo/systematicity/calculate_TRE.py
|
XeniaOhmer/SystematicRepresentations
|
825208d1be659dc820e61f577cdb53afc47302f4
|
[
"MIT"
] | null | null | null |
egg/zoo/systematicity/calculate_TRE.py
|
XeniaOhmer/SystematicRepresentations
|
825208d1be659dc820e61f577cdb53afc47302f4
|
[
"MIT"
] | null | null | null |
from egg.zoo.systematicity.metrics.tre import *
from typing import Iterable, Type
import torch
import os
from abc import ABC
import argparse
import copy
import pickle
def get_protocol(interaction, vocab_size):
sender_in = interaction.sender_input.cpu()
n_atts = int(sum(sender_in[0]))
n_vals = int(len(sender_in[0]) // n_atts)
messages = interaction.message[:, :-1].cpu() - 1
k_hot_messages = []
for m in messages:
k_hot_messages.append(torch.nn.functional.one_hot(
m, num_classes=vocab_size).reshape(-1))
k_hot_messages = torch.stack(k_hot_messages, dim=0)
derivations = []
for att in range(n_atts):
derivations.append(torch.argmax(sender_in[:, att * n_vals:(att + 1) * n_vals], dim=1))
derivations = torch.stack(derivations, dim=1)
protocol = {}
for i, derivation in enumerate(derivations):
protocol[tuple([torch.unsqueeze(elem, dim=0) for elem in derivation])] = k_hot_messages[i]
return protocol
def get_name(atts, vals, vs, ml, seed):
name = ('atts' + str(atts) + '_vals' + str(vals) + '_vs' + str(vs) + '_len' + str(ml) + '/seed' +
str(seed) + '/')
return name
class TreeReconstructionError(ABC):
def __init__(
self,
num_concepts: int,
message_length: int,
vocab_size: int,
composition_fn: Type[CompositionFunction],
weight_decay=1e-1,
lr=1e-3,
early_stopping=True
):
self.num_concepts = num_concepts
self.message_length = message_length
self.composition_fn = composition_fn
self.weight_decay = weight_decay
self.vocab_size = vocab_size
self.learning_rate = lr
if early_stopping:
self.patience = 50
else:
self.patience = 1000
def measure(self, interaction) -> (float, float):
protocol = get_protocol(interaction, self.vocab_size)
objective = Objective(
num_concepts=self.num_concepts,
vocab_size=self.vocab_size,
message_length=self.message_length,
composition_fn=self.composition_fn(representation_size=self.message_length * self.vocab_size),
loss_fn=MultipleCrossEntropyLoss(representation_size=self.message_length * self.vocab_size,
message_length=self.message_length)
)
error_train, error_val, objective_final, objective_es, epoch_es = self._train_model(
messages=list(protocol.values()),
derivations=list(protocol.keys()),
objective=objective,
optimizer=torch.optim.Adam(objective.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay),
n_epochs=1000
)
return error_train, error_val, objective_final, objective_es, epoch_es
def evaluate(self, interaction, trained_objective) -> (float, float):
protocol = get_protocol(interaction, self.vocab_size)
messages = protocol.values()
derivations = protocol.keys()
with torch.no_grad():
errors = [trained_objective(message, derivation) for message, derivation in zip(messages, derivations)]
return torch.mean(torch.tensor(errors)).item()
def _train_model(
self,
messages: Iterable[torch.Tensor],
derivations: Iterable[torch.Tensor],
objective: torch.nn.Module,
optimizer: torch.optim.Optimizer,
n_epochs: int,
quiet: bool = False
) -> (float, float):
collect_error_train = []
collect_error_val = []
n_samples = len(messages)
n_train = int(round(n_samples * 0.9))
messages_train = messages[:n_train]
messages_val = messages[n_train:]
derivations_train = derivations[:n_train]
derivations_val = derivations[n_train:]
patience_count = 0
min_val_error = 1e10
early_stopping_flag = False
for t in range(n_epochs):
if patience_count == self.patience:
early_stopping_flag = True
optimizer.zero_grad()
errors = [objective(message, derivation) for message, derivation in zip(messages_train, derivations_train)]
loss = sum(errors)
loss.backward()
optimizer.step()
mean_train_loss = torch.mean(torch.tensor(errors)).item()
collect_error_train.append(mean_train_loss)
with torch.no_grad():
errors_val = [objective(message, derivation) for message, derivation
in zip(messages_val, derivations_val)]
mean_val_loss = torch.mean(torch.tensor(errors_val)).item()
collect_error_val.append(mean_val_loss)
if (mean_val_loss < min_val_error) and (early_stopping_flag is False):
min_val_error = mean_val_loss
patience_count = 0
min_val_objective = copy.deepcopy(objective)
min_val_epoch = t
elif early_stopping_flag is False:
patience_count += 1
if (t == n_epochs - 1) and (early_stopping_flag is False):
min_val_epoch = t-1
min_val_objective = copy.deepcopy(objective)
if not quiet and t % 50 == 0:
print(f'Training loss at epoch {t} is {mean_train_loss:.4f}',
f'Validation loss at epoch {t} is {mean_val_loss:.4f}')
return collect_error_train, collect_error_val, objective, min_val_objective, min_val_epoch
def main(n_atts, n_vals, prefix, composition_fn):
modes = ['test', 'generalization_hold_out', 'uniform_holdout']
try:
if composition_fn == 'linear':
composition_function = LinearComposition
elif composition_fn == 'mlp':
composition_function = MLPComposition
except UnboundLocalError:
print('Invalid composition function provided')
for message_length in [3, 4, 6, 8]:
for vocab_size in [10, 50, 100]:
for seed_orig in range(3):
print(composition_fn, "values", n_vals, "vs", vocab_size, "ml", message_length, seed_orig)
path = (prefix + 'egg/zoo/systematicity/results/' +
get_name(n_atts, n_vals, vocab_size, message_length, seed_orig))
try:
interaction_paths = {}
for mode in modes:
interaction_paths[mode] = path + 'interactions/' + mode + '/'
interactions = {}
for mode in modes:
for filename in os.listdir(interaction_paths[mode]):
interactions[mode] = torch.load(
interaction_paths[mode] + filename + '/interaction_gpu0')
except FileNotFoundError:
continue
NUM_SEEDS = 1
tre_errors = {}
for seed in range(NUM_SEEDS):
tre_errors['seed' + str(seed)] = {}
TRE = TreeReconstructionError(n_atts * n_vals, message_length, vocab_size,
composition_function)
error_train, error_val, objective, ES_objective, ES_epoch = TRE.measure(interactions['test'])
print('mean error train', error_train[-1], 'mean_error val', error_val[-1])
tre_errors['seed' + str(seed)]['training_mean'] = error_train
tre_errors['seed' + str(seed)]['validation_mean'] = error_val
tre_errors['seed' + str(seed)]['early_stopping_epoch'] = ES_epoch
error_gen_holdout = TRE.evaluate(interactions['generalization_hold_out'], objective)
tre_errors['seed' + str(seed)]['generalization_holdout_mean'] = error_gen_holdout
error_gen_holdout = TRE.evaluate(interactions['generalization_hold_out'], ES_objective)
tre_errors['seed' + str(seed)]['generalization_holdout_mean_es'] = error_gen_holdout
error_uniform_holdout = TRE.evaluate(interactions['uniform_holdout'], objective)
tre_errors['seed' + str(seed)]['uniform_holdout_mean'] = error_uniform_holdout
print('generalization error', error_uniform_holdout)
error_uniform_holdout = TRE.evaluate(interactions['uniform_holdout'], ES_objective)
tre_errors['seed' + str(seed)]['uniform_holdout_mean_es'] = error_uniform_holdout
print('generalization error es', error_uniform_holdout)
if not os.path.exists(path + 'tre/'):
os.makedirs(path + 'tre/')
pickle.dump(tre_errors, open(path + 'tre/tre_' + composition_fn + '.pkl', 'wb'))
torch.save(objective, open(path + 'tre/tre_objective_' + composition_fn + '.pt', 'wb'))
torch.save(objective, open(path + 'tre/tre_objective_es_' + composition_fn + '.pt', 'wb'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--n_attributes", type=int, default=2)
parser.add_argument("--n_values", type=int, default=50)
parser.add_argument("--composition_fn", type=str, default='mlp')
parser.add_argument("--prefix", type=str, default='C:/Users/Xenia/PycharmProjects/SystematicRepresentations/')
args = parser.parse_args()
main(args.n_attributes, args.n_values, args.prefix, args.composition_fn)
| 43.426009 | 119 | 0.604709 |
4a1a60d958da7b3e4e95656f042d140f590977ac
| 3,476 |
py
|
Python
|
salt/states/keystone_group.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
salt/states/keystone_group.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
salt/states/keystone_group.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Management of OpenStack Keystone Groups
=======================================
.. versionadded:: Oxygen
:depends: shade
:configuration: see :py:mod:`salt.modules.keystoneng` for setup instructions
Example States
.. code-block:: yaml
create group:
keystone_group.present:
- name: group1
delete group:
keystone_group.absent:
- name: group1
create group with optional params:
keystone_group.present:
- name: group1
- domain: domain1
- description: 'my group'
'''
from __future__ import absolute_import
__virtualname__ = 'keystone_endpoint'
def __virtual__():
if 'keystoneng.group_get' in __salt__:
return __virtualname__
return (False, 'The keystoneng execution module failed to load: shade python module is not available')
def _common(kwargs):
'''
Returns: None if group wasn't found, otherwise a group object
'''
search_kwargs = {'name': kwargs['name']}
if 'domain' in kwargs:
domain = __salt__['keystoneng.get_entity'](
'domain', name=kwargs.pop('domain'))
domain_id = domain.id if hasattr(domain, 'id') else domain
search_kwargs['filters'] = {'domain_id': domain_id}
kwargs['domain'] = domain
return __salt__['keystoneng.group_get'](**search_kwargs)
def present(name, auth=None, **kwargs):
'''
Ensure an group exists and is up-to-date
name
Name of the group
domain
The name or id of the domain
description
An arbitrary description of the group
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
__salt__['keystoneng.setup_cloud'](auth)
kwargs['name'] = name
group = _common(kwargs)
if group is None:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = kwargs
ret['pchanges'] = ret['changes']
ret['comment'] = 'Group will be created.'
return ret
group = __salt__['keystoneng.group_create'](**kwargs)
ret['changes'] = group
ret['comment'] = 'Created group'
return ret
changes = __salt__['keystoneng.compare_changes'](group, **kwargs)
if changes:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = changes
ret['pchanges'] = ret['changes']
ret['comment'] = 'Group will be updated.'
return ret
__salt__['keystoneng.group_update'](**kwargs)
ret['changes'].update(changes)
ret['comment'] = 'Updated group'
return ret
def absent(name, auth=None, **kwargs):
'''
Ensure group does not exist
name
Name of the group
domain
The name or id of the domain
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
__salt__['keystoneng.setup_cloud'](auth)
kwargs['name'] = name
group = _common(kwargs)
if group:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = {'id': group.id}
ret['pchanges'] = ret['changes']
ret['comment'] = 'Group will be deleted.'
return ret
__salt__['keystoneng.group_delete'](name=group)
ret['changes']['id'] = group.id
ret['comment'] = 'Deleted group'
return ret
| 24.652482 | 106 | 0.569908 |
4a1a61298fd488865f439c918e78abc7d1a64a35
| 1,980 |
py
|
Python
|
users/migrations/0010_auto_20200418_2304.py
|
andywar65/rp_repo
|
726c1426d738b962cabeabd8995aa35767df0c41
|
[
"BSD-2-Clause"
] | null | null | null |
users/migrations/0010_auto_20200418_2304.py
|
andywar65/rp_repo
|
726c1426d738b962cabeabd8995aa35767df0c41
|
[
"BSD-2-Clause"
] | null | null | null |
users/migrations/0010_auto_20200418_2304.py
|
andywar65/rp_repo
|
726c1426d738b962cabeabd8995aa35767df0c41
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-18 21:04
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_profile_is_trusted'),
]
operations = [
migrations.AddField(
model_name='profile',
name='mc_expiry',
field=models.DateField(blank=True, null=True, verbose_name='Scadenza CM/CMA'),
),
migrations.AddField(
model_name='profile',
name='mc_state',
field=models.CharField(blank=True, choices=[('0-NF', 'Manca il file'), ('1-VF', 'Verifica file'), ('2-RE', 'Regolare'), ('6-IS', 'In scadenza'), ('3-SV', 'Scaduto, da verificare'), ('4-SI', 'Scaduto, inviare notifica'), ('5-NI', 'Scaduto, notifica inviata')], max_length=4, null=True, verbose_name='Stato del CM/CMA'),
),
migrations.AddField(
model_name='profile',
name='membership',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Tessera'),
),
migrations.AddField(
model_name='profile',
name='settled',
field=models.CharField(blank=True, choices=[('VI', 'Verifica importo totale'), ('YES', 'A posto'), ('NO', 'No!')], max_length=4, null=True, verbose_name='In regola?'),
),
migrations.AddField(
model_name='profile',
name='total_amount',
field=models.FloatField(default=0.0, verbose_name='Importo totale'),
),
migrations.AlterField(
model_name='profile',
name='is_trusted',
field=models.BooleanField(default=False, verbose_name='Di fiducia'),
),
migrations.AlterField(
model_name='usermessage',
name='attachment',
field=models.FileField(blank=True, null=True, upload_to=users.models.user_directory_path, verbose_name='Allegato'),
),
]
| 39.6 | 330 | 0.589899 |
4a1a6239b22d83eef3de71706341aa33456278c0
| 4,119 |
py
|
Python
|
train.py
|
iShohei220/svm
|
738e350f93228865ed423a9bbf661f9c182bd71f
|
[
"MIT"
] | null | null | null |
train.py
|
iShohei220/svm
|
738e350f93228865ed423a9bbf661f9c182bd71f
|
[
"MIT"
] | null | null | null |
train.py
|
iShohei220/svm
|
738e350f93228865ed423a9bbf661f9c182bd71f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from SVM import SVM
np.random.seed(0)
def linear_data(N):
X_1_pos = np.random.rand(N)
X_2_pos = X_1_pos + np.random.normal(0, 0.3, N) + 0.5
X_pos = np.array([[x_1, x_2, 1] for x_1, x_2 in zip(X_1_pos, X_2_pos)])
X_1_neg = np.random.rand(N)
X_2_neg = X_1_neg + np.random.normal(0, 0.3, N) - 0.5
X_neg = np.array([[x_1, x_2, -1] for x_1, x_2 in zip(X_1_neg, X_2_neg)])
X = np.vstack([X_pos, X_neg])
np.random.shuffle(X)
y = X[:, 2]
X = X[:, :2]
return X, y
def sin_data(N):
X_1_pos = np.random.rand(N) * 4 * np.pi
X_2_pos = np.sin(X_1_pos) + np.random.normal(0, 0.4, N)
X_pos = np.array([[x_1, x_2, 1] for x_1, x_2 in zip(X_1_pos, X_2_pos)])
X_1_neg = np.random.rand(N) * 4 * np.pi
X_2_neg = np.sin(X_1_neg) + np.random.normal(0, 0.4, N) - 1.5
X_neg = np.array([[x_1, x_2, -1] for x_1, x_2 in zip(X_1_neg, X_2_neg)])
X = np.vstack([X_pos, X_neg])
np.random.shuffle(X)
y = X[:, 2]
X = X[:, :2]
return X, y
def train_test_split(X, y):
X_train, X_test = np.split(X, [int(len(X) * 0.8)])
y_train, y_test = np.split(y, [int(len(y) * 0.8)])
return X_train, X_test, y_train, y_test
def show_data(X, y):
X_pos = X[y==1]
X_neg = X[y==-1]
plt.plot(X_pos[:, 0], X_pos[:, 1], 'o', c='b')
plt.plot(X_neg[:, 0], X_neg[:, 1], 'o', c='r')
def show_boader(model, X):
X_border = np.linspace(min(X[:, 0]), max(X[:, 0]))
y_border = -(model.w[0] * X_border + model.b) / model.w[1]
plt.plot(X_border, y_border, c='y')
plt.show()
# 正解率
def score(y, y_pred):
true_idx = np.where(y_pred == 1)
TP = np.sum(y_pred[true_idx] == y[true_idx])
false_idx = np.where(y_pred == -1)
TN = np.sum(y_pred[false_idx] == y[false_idx])
return float(TP + TN) / len(y)
# 学習
def train(model, X_train, X_test, y_train, y_test):
# 学習
model.fit(X_train, y_train)
# 予測値
y_pred = model.predict(X_test)
# 正解率
acc = model.score(y_test, y_pred)
print('正解率: %.3f' % acc)
print('学習時間: %.3f' % model.elapsed_time)
return acc
def main():
print('線形データ')
X, y = linear_data(500)
X_train, X_test, y_train, y_test = train_test_split(X, y)
# 初期化
model = SVM(kernel='rbf')
# 学習
acc = train(model, X_train, X_test, y_train, y_test)
show_data(X_test, y_test)
show_boader(model, X_train)
print('実験2: 非線形データ')
Ns = [50, 100, 500, 1000]
print('実験2-1: 異なるカーネルでの実験')
models = [SVM(kernel='rbf'), SVM(kernel='sigmoid'), SVM(kernel='linear')]
kernels = ['RBF', 'Sigmoid', 'Linear']
df_score = pd.DataFrame(index=Ns, columns=kernels)
df_time = pd.DataFrame(index=Ns, columns=kernels)
for N in Ns:
print('データ数: %d' % N)
X, y = sin_data(N)
X_train, X_test, y_train, y_test = train_test_split(X, y)
show_data(X, y)
plt.show()
for model, kernel in zip(models, kernels):
print(kernel)
acc = train(model, X_train, X_test, y_train, y_test)
df_score.loc[N, kernel] = acc
df_time.loc[N, kernel] = model.elapsed_time
print(df_score)
print(df_time)
df_score.to_csv('カーネルごとの正解率')
df_time.to_csv('カーネルごとの学習時間')
print('実験2-2: 異なるパラメータでの実験')
X, y = sin_data(500)
X_train, X_test, y_train, y_test = train_test_split(X, y)
show_data(X, y)
plt.show()
Cs = [2**a for a in range(-2, 3)]
gammas = [2**a for a in range(-4, 2)]
df_score = pd.DataFrame(index=Cs, columns=gammas)
df_time = pd.DataFrame(index=Cs, columns=gammas)
for C in Cs:
for gamma in gammas:
print('C: %.2f, gamma: %.4f' % (C, gamma))
model = SVM(C=C, gamma=gamma)
# 学習
acc = train(model, X_train, X_test, y_train, y_test)
df_score.loc[C, gamma] = acc
df_time.loc[C, gamma] = model.elapsed_time
print(df_score)
print(df_time)
df_score.to_csv('パラメータごとの正解率.csv')
df_time.to_csv('パラメータごとの学習時間.csv')
if __name__ == '__main__':
main()
| 32.179688 | 77 | 0.584365 |
4a1a62b9e57ece83c844824e3f11933923a575ba
| 3,172 |
py
|
Python
|
cpo/lib/fyre/data/openshift_version_data.py
|
IBM/data-gate-cli
|
fc0cb1a560a0156c71eb63a550e198d0cd36e1df
|
[
"Apache-2.0"
] | 9 |
2020-08-21T08:46:34.000Z
|
2021-09-02T15:47:41.000Z
|
cpo/lib/fyre/data/openshift_version_data.py
|
IBM/data-gate-cli
|
fc0cb1a560a0156c71eb63a550e198d0cd36e1df
|
[
"Apache-2.0"
] | 10 |
2020-11-26T15:31:43.000Z
|
2021-11-08T15:00:01.000Z
|
cpo/lib/fyre/data/openshift_version_data.py
|
IBM/data-gate-cli
|
fc0cb1a560a0156c71eb63a550e198d0cd36e1df
|
[
"Apache-2.0"
] | 1 |
2022-03-10T07:14:49.000Z
|
2022-03-10T07:14:49.000Z
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import click
import semver
from tabulate import tabulate
class OpenShiftVersionData:
def __init__(
self,
openshift_versions_p: List[semver.VersionInfo],
openshift_versions_x: List[semver.VersionInfo],
openshift_versions_z: List[semver.VersionInfo],
):
self._openshift_versions_p = openshift_versions_p
self._openshift_versions_x = openshift_versions_x
self._openshift_versions_z = openshift_versions_z
def format(self):
openshift_versions_union: List[semver.VersionInfo] = []
openshift_versions_union += self._openshift_versions_p
openshift_versions_union += self._openshift_versions_x
openshift_versions_union += self._openshift_versions_z
openshift_versions_union = list(dict.fromkeys(openshift_versions_union))
openshift_versions_union.sort()
openshift_versions_list: List[List[str]] = []
openshift_versions_list.append(
self._add_openshift_versions_list_element("p", openshift_versions_union, self._openshift_versions_p)
)
openshift_versions_list.append(
self._add_openshift_versions_list_element("x", openshift_versions_union, self._openshift_versions_x)
)
openshift_versions_list.append(
self._add_openshift_versions_list_element("z", openshift_versions_union, self._openshift_versions_z)
)
click.echo(
tabulate(
openshift_versions_list,
headers=["Platform"]
+ list(
map(
lambda openshift_version: str(openshift_version),
openshift_versions_union,
)
),
)
)
def get_openshift_versions_p(self) -> List[semver.VersionInfo]:
return self._openshift_versions_p
def get_openshift_versions_x(self) -> List[semver.VersionInfo]:
return self._openshift_versions_x
def get_openshift_versions_z(self) -> List[semver.VersionInfo]:
return self._openshift_versions_z
def _add_openshift_versions_list_element(
self,
platform: str,
openshift_versions_union: List[semver.VersionInfo],
openshift_versions: List[semver.VersionInfo],
):
openshift_version_list: List[str] = [platform]
for openshift_version in openshift_versions_union:
openshift_version_list.append("✓" if openshift_version in openshift_versions else "-")
return openshift_version_list
| 36.045455 | 112 | 0.692308 |
4a1a63f80d29ab94150b21b7883017f177955c02
| 668 |
py
|
Python
|
test/read_img.py
|
ethan4335/pytorch-YOLOv4
|
44f67130d83fc2949efb50afe67337735836169b
|
[
"Apache-2.0"
] | null | null | null |
test/read_img.py
|
ethan4335/pytorch-YOLOv4
|
44f67130d83fc2949efb50afe67337735836169b
|
[
"Apache-2.0"
] | null | null | null |
test/read_img.py
|
ethan4335/pytorch-YOLOv4
|
44f67130d83fc2949efb50afe67337735836169b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = 'pytorch-YOLOv4'
__author__ = 'deagle'
__date__ = '11/15/2020 23:27'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
"""
import datetime
import cv2
img_path = 'D:/work_source/CV_Project/datasets/footbridge_20201111/train2/pic/IMG_1824_4260.jpg'
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
print(img)
def main():
print()
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
time_cost = end_time - start_time
print(str(time_cost).split('.')[0])
| 22.266667 | 96 | 0.697605 |
4a1a6409b635c40302aa1ee9c39aadbb8b4f2c1c
| 901 |
py
|
Python
|
1/sjb.py
|
xuegao-tzx/Python-exampe
|
f757c2e3292e883826185a55f6a61f7127cc03bb
|
[
"Apache-2.0"
] | 2 |
2021-06-20T09:23:36.000Z
|
2021-12-13T08:34:26.000Z
|
1/sjb.py
|
xuegao-tzx/Python-exampe
|
f757c2e3292e883826185a55f6a61f7127cc03bb
|
[
"Apache-2.0"
] | null | null | null |
1/sjb.py
|
xuegao-tzx/Python-exampe
|
f757c2e3292e883826185a55f6a61f7127cc03bb
|
[
"Apache-2.0"
] | null | null | null |
import random
import requests
import json
import datetime
from urllib import parse
#数据包的POST请求
head={
'Content-Type': '网络文件的类型和网页的编码',
'Accept-Encoding': 'gzip'#浏览器发给服务器,声明浏览器支持的编码类型
'User-Agent':'eg:Mozilla/5.0 (Linux; Harmony 2; ***-**** Build/HUAWEI***-****; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/83.0.4103.106 Mobile Safari/537.36'#特殊字符串头,使得服务器能够识别客户使用,不一定必须有
}
def main():
print("开始发送")
try:
sj = requests.post(
url='你的目标链接地址',
headers=head,
data='你要发送的数据包内容'
)
# 通过字符串创建map,来检查结果
res_map = json.loads(sj.text)
except:
print("请求错误!")
return False
if res_map['code'] != '200':
print(f"发送失败,错误信息:[{res_map['msg']}]")
return False
else:
print("发送成功!")
return True
#Writen by TZX.
#2021.5.20
| 25.742857 | 209 | 0.570477 |
4a1a647c6129765555c2b3afe7458681fa4d7b0f
| 33,390 |
py
|
Python
|
tests/test_sns/test_publishing_boto3.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sns/test_publishing_boto3.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 4 |
2017-09-30T07:52:52.000Z
|
2021-12-13T06:56:55.000Z
|
tests/test_sns/test_publishing_boto3.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 2 |
2021-11-24T08:05:43.000Z
|
2021-11-25T16:18:48.000Z
|
from __future__ import unicode_literals
import base64
import json
import boto3
import re
from freezegun import freeze_time
import sure # noqa
from botocore.exceptions import ClientError
import pytest
from moto import mock_sns, mock_sqs, settings
from moto.core import ACCOUNT_ID
from moto.core.models import responses_mock
from moto.sns import sns_backend
MESSAGE_FROM_SQS_TEMPLATE = (
'{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:'
+ ACCOUNT_ID
+ ':some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:'
+ ACCOUNT_ID
+ ':some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}'
)
@mock_sqs
@mock_sns
def test_publish_to_sqs():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
sqs_conn = boto3.resource("sqs", region_name="us-east-1")
sqs_conn.create_queue(QueueName="test-queue")
conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID),
)
message = "my message"
with freeze_time("2015-01-01 12:00:00"):
published_message = conn.publish(TopicArn=topic_arn, Message=message)
published_message_id = published_message["MessageId"]
queue = sqs_conn.get_queue_by_name(QueueName="test-queue")
with freeze_time("2015-01-01 12:00:01"):
messages = queue.receive_messages(MaxNumberOfMessages=1)
expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, "us-east-1")
acquired_message = re.sub(
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z",
"2015-01-01T12:00:00.000Z",
messages[0].body,
)
acquired_message.should.equal(expected)
@mock_sqs
@mock_sns
def test_publish_to_sqs_raw():
sns = boto3.resource("sns", region_name="us-east-1")
topic = sns.create_topic(Name="some-topic")
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-queue")
subscription = topic.subscribe(
Protocol="sqs", Endpoint=queue.attributes["QueueArn"]
)
subscription.set_attributes(
AttributeName="RawMessageDelivery", AttributeValue="true"
)
message = "my message"
with freeze_time("2015-01-01 12:00:00"):
topic.publish(Message=message)
with freeze_time("2015-01-01 12:00:01"):
messages = queue.receive_messages(MaxNumberOfMessages=1)
messages[0].body.should.equal(message)
@mock_sqs
@mock_sns
def test_publish_to_sqs_bad():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
sqs_conn = boto3.resource("sqs", region_name="us-east-1")
sqs_conn.create_queue(QueueName="test-queue")
conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID),
)
message = "my message"
try:
# Test missing Value
conn.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"store": {"DataType": "String"}},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameterValue")
try:
# Test empty DataType (if the DataType field is missing entirely
# botocore throws an exception during validation)
conn.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={
"store": {"DataType": "", "StringValue": "example_corp"}
},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameterValue")
try:
# Test empty Value
conn.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"store": {"DataType": "String", "StringValue": ""}},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameterValue")
try:
# Test Number DataType, with a non numeric value
conn.publish(
TopicArn=topic_arn,
Message=message,
MessageAttributes={"price": {"DataType": "Number", "StringValue": "error"}},
)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameterValue")
err.response["Error"]["Message"].should.equal(
"An error occurred (ParameterValueInvalid) when calling the Publish operation: Could not cast message attribute 'price' value to number."
)
@mock_sqs
@mock_sns
def test_publish_to_sqs_msg_attr_byte_value():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-queue")
conn.subscribe(
TopicArn=topic_arn, Protocol="sqs", Endpoint=queue.attributes["QueueArn"],
)
queue_raw = sqs.create_queue(QueueName="test-queue-raw")
conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_raw.attributes["QueueArn"],
Attributes={"RawMessageDelivery": "true"},
)
conn.publish(
TopicArn=topic_arn,
Message="my message",
MessageAttributes={
"store": {"DataType": "Binary", "BinaryValue": b"\x02\x03\x04"}
},
)
message = json.loads(queue.receive_messages()[0].body)
message["Message"].should.equal("my message")
message["MessageAttributes"].should.equal(
{
"store": {
"Type": "Binary",
"Value": base64.b64encode(b"\x02\x03\x04").decode(),
}
}
)
message = queue_raw.receive_messages()[0]
message.body.should.equal("my message")
@mock_sqs
@mock_sns
def test_publish_to_sqs_msg_attr_number_type():
sns = boto3.resource("sns", region_name="us-east-1")
topic = sns.create_topic(Name="test-topic")
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-queue")
topic.subscribe(Protocol="sqs", Endpoint=queue.attributes["QueueArn"])
queue_raw = sqs.create_queue(QueueName="test-queue-raw")
topic.subscribe(
Protocol="sqs",
Endpoint=queue_raw.attributes["QueueArn"],
Attributes={"RawMessageDelivery": "true"},
)
topic.publish(
Message="test message",
MessageAttributes={"retries": {"DataType": "Number", "StringValue": "0"}},
)
message = json.loads(queue.receive_messages()[0].body)
message["Message"].should.equal("test message")
message["MessageAttributes"].should.equal(
{"retries": {"Type": "Number", "Value": 0}}
)
message = queue_raw.receive_messages()[0]
message.body.should.equal("test message")
@mock_sns
def test_publish_sms():
client = boto3.client("sns", region_name="us-east-1")
result = client.publish(PhoneNumber="+15551234567", Message="my message")
result.should.contain("MessageId")
if not settings.TEST_SERVER_MODE:
sns_backend.sms_messages.should.have.key(result["MessageId"]).being.equal(
("+15551234567", "my message")
)
@mock_sns
def test_publish_bad_sms():
client = boto3.client("sns", region_name="us-east-1")
# Test invalid number
with pytest.raises(ClientError) as cm:
client.publish(PhoneNumber="NAA+15551234567", Message="my message")
cm.value.response["Error"]["Code"].should.equal("InvalidParameter")
cm.value.response["Error"]["Message"].should.contain("not meet the E164")
# Test to long ASCII message
with pytest.raises(ClientError) as cm:
client.publish(PhoneNumber="+15551234567", Message="a" * 1601)
cm.value.response["Error"]["Code"].should.equal("InvalidParameter")
cm.value.response["Error"]["Message"].should.contain("must be less than 1600")
@mock_sqs
@mock_sns
def test_publish_to_sqs_dump_json():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
sqs_conn = boto3.resource("sqs", region_name="us-east-1")
sqs_conn.create_queue(QueueName="test-queue")
conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID),
)
message = json.dumps(
{
"Records": [
{
"eventVersion": "2.0",
"eventSource": "aws:s3",
"s3": {"s3SchemaVersion": "1.0"},
}
]
},
sort_keys=True,
)
with freeze_time("2015-01-01 12:00:00"):
published_message = conn.publish(TopicArn=topic_arn, Message=message)
published_message_id = published_message["MessageId"]
queue = sqs_conn.get_queue_by_name(QueueName="test-queue")
with freeze_time("2015-01-01 12:00:01"):
messages = queue.receive_messages(MaxNumberOfMessages=1)
escaped = message.replace('"', '\\"')
expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, "us-east-1")
acquired_message = re.sub(
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z",
"2015-01-01T12:00:00.000Z",
messages[0].body,
)
acquired_message.should.equal(expected)
@mock_sqs
@mock_sns
def test_publish_to_sqs_in_different_region():
conn = boto3.client("sns", region_name="us-west-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
sqs_conn = boto3.resource("sqs", region_name="us-west-2")
sqs_conn.create_queue(QueueName="test-queue")
conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint="arn:aws:sqs:us-west-2:{}:test-queue".format(ACCOUNT_ID),
)
message = "my message"
with freeze_time("2015-01-01 12:00:00"):
published_message = conn.publish(TopicArn=topic_arn, Message=message)
published_message_id = published_message["MessageId"]
queue = sqs_conn.get_queue_by_name(QueueName="test-queue")
with freeze_time("2015-01-01 12:00:01"):
messages = queue.receive_messages(MaxNumberOfMessages=1)
expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, "us-west-1")
acquired_message = re.sub(
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z",
"2015-01-01T12:00:00.000Z",
messages[0].body,
)
acquired_message.should.equal(expected)
@freeze_time("2013-01-01")
@mock_sns
def test_publish_to_http():
def callback(request):
request.headers["Content-Type"].should.equal("text/plain; charset=UTF-8")
json.loads.when.called_with(request.body.decode()).should_not.throw(Exception)
return 200, {}, ""
responses_mock.add_callback(
method="POST", url="http://example.com/foobar", callback=callback
)
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
conn.subscribe(
TopicArn=topic_arn, Protocol="http", Endpoint="http://example.com/foobar"
)
response = conn.publish(
TopicArn=topic_arn, Message="my message", Subject="my subject"
)
@mock_sqs
@mock_sns
def test_publish_subject():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
response = conn.list_topics()
topic_arn = response["Topics"][0]["TopicArn"]
sqs_conn = boto3.resource("sqs", region_name="us-east-1")
sqs_conn.create_queue(QueueName="test-queue")
conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID),
)
message = "my message"
subject1 = "test subject"
subject2 = "test subject" * 20
with freeze_time("2015-01-01 12:00:00"):
conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1)
# Just that it doesnt error is a pass
try:
with freeze_time("2015-01-01 12:00:00"):
conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2)
except ClientError as err:
err.response["Error"]["Code"].should.equal("InvalidParameter")
else:
raise RuntimeError("Should have raised an InvalidParameter exception")
@mock_sns
def test_publish_message_too_long():
sns = boto3.resource("sns", region_name="us-east-1")
topic = sns.create_topic(Name="some-topic")
with pytest.raises(ClientError):
topic.publish(Message="".join(["." for i in range(0, 262145)]))
# message short enough - does not raise an error
topic.publish(Message="".join(["." for i in range(0, 262144)]))
def _setup_filter_policy_test(filter_policy):
sns = boto3.resource("sns", region_name="us-east-1")
topic = sns.create_topic(Name="some-topic")
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-queue")
subscription = topic.subscribe(
Protocol="sqs", Endpoint=queue.attributes["QueueArn"]
)
subscription.set_attributes(
AttributeName="FilterPolicy", AttributeValue=json.dumps(filter_policy)
)
return topic, subscription, queue
@mock_sqs
@mock_sns
def test_filtering_exact_string():
topic, subscription, queue = _setup_filter_policy_test({"store": ["example_corp"]})
topic.publish(
Message="match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"store": {"Type": "String", "Value": "example_corp"}}]
)
@mock_sqs
@mock_sns
def test_filtering_exact_string_multiple_message_attributes():
topic, subscription, queue = _setup_filter_policy_test({"store": ["example_corp"]})
topic.publish(
Message="match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"},
"event": {"DataType": "String", "StringValue": "order_cancelled"},
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[
{
"store": {"Type": "String", "Value": "example_corp"},
"event": {"Type": "String", "Value": "order_cancelled"},
}
]
)
@mock_sqs
@mock_sns
def test_filtering_exact_string_OR_matching():
topic, subscription, queue = _setup_filter_policy_test(
{"store": ["example_corp", "different_corp"]}
)
topic.publish(
Message="match example_corp",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"}
},
)
topic.publish(
Message="match different_corp",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "different_corp"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match example_corp", "match different_corp"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[
{"store": {"Type": "String", "Value": "example_corp"}},
{"store": {"Type": "String", "Value": "different_corp"}},
]
)
@mock_sqs
@mock_sns
def test_filtering_exact_string_AND_matching_positive():
topic, subscription, queue = _setup_filter_policy_test(
{"store": ["example_corp"], "event": ["order_cancelled"]}
)
topic.publish(
Message="match example_corp order_cancelled",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"},
"event": {"DataType": "String", "StringValue": "order_cancelled"},
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match example_corp order_cancelled"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[
{
"store": {"Type": "String", "Value": "example_corp"},
"event": {"Type": "String", "Value": "order_cancelled"},
}
]
)
@mock_sqs
@mock_sns
def test_filtering_exact_string_AND_matching_no_match():
topic, subscription, queue = _setup_filter_policy_test(
{"store": ["example_corp"], "event": ["order_cancelled"]}
)
topic.publish(
Message="match example_corp order_accepted",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"},
"event": {"DataType": "String", "StringValue": "order_accepted"},
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_exact_string_no_match():
topic, subscription, queue = _setup_filter_policy_test({"store": ["example_corp"]})
topic.publish(
Message="no match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "different_corp"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_exact_string_no_attributes_no_match():
topic, subscription, queue = _setup_filter_policy_test({"store": ["example_corp"]})
topic.publish(Message="no match")
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_exact_number_int():
topic, subscription, queue = _setup_filter_policy_test({"price": [100]})
topic.publish(
Message="match",
MessageAttributes={"price": {"DataType": "Number", "StringValue": "100"}},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([{"price": {"Type": "Number", "Value": 100}}])
@mock_sqs
@mock_sns
def test_filtering_exact_number_float():
topic, subscription, queue = _setup_filter_policy_test({"price": [100.1]})
topic.publish(
Message="match",
MessageAttributes={"price": {"DataType": "Number", "StringValue": "100.1"}},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([{"price": {"Type": "Number", "Value": 100.1}}])
@mock_sqs
@mock_sns
def test_filtering_exact_number_float_accuracy():
topic, subscription, queue = _setup_filter_policy_test({"price": [100.123456789]})
topic.publish(
Message="match",
MessageAttributes={
"price": {"DataType": "Number", "StringValue": "100.1234561"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"price": {"Type": "Number", "Value": 100.1234561}}]
)
@mock_sqs
@mock_sns
def test_filtering_exact_number_no_match():
topic, subscription, queue = _setup_filter_policy_test({"price": [100]})
topic.publish(
Message="no match",
MessageAttributes={"price": {"DataType": "Number", "StringValue": "101"}},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_exact_number_with_string_no_match():
topic, subscription, queue = _setup_filter_policy_test({"price": [100]})
topic.publish(
Message="no match",
MessageAttributes={"price": {"DataType": "String", "StringValue": "100"}},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_string_array_match():
topic, subscription, queue = _setup_filter_policy_test(
{"customer_interests": ["basketball", "baseball"]}
)
topic.publish(
Message="match",
MessageAttributes={
"customer_interests": {
"DataType": "String.Array",
"StringValue": json.dumps(["basketball", "rugby"]),
}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[
{
"customer_interests": {
"Type": "String.Array",
"Value": json.dumps(["basketball", "rugby"]),
}
}
]
)
@mock_sqs
@mock_sns
def test_filtering_string_array_no_match():
topic, subscription, queue = _setup_filter_policy_test(
{"customer_interests": ["baseball"]}
)
topic.publish(
Message="no_match",
MessageAttributes={
"customer_interests": {
"DataType": "String.Array",
"StringValue": json.dumps(["basketball", "rugby"]),
}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_string_array_with_number_match():
topic, subscription, queue = _setup_filter_policy_test({"price": [100, 500]})
topic.publish(
Message="match",
MessageAttributes={
"price": {"DataType": "String.Array", "StringValue": json.dumps([100, 50])}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"price": {"Type": "String.Array", "Value": json.dumps([100, 50])}}]
)
@mock_sqs
@mock_sns
def test_filtering_string_array_with_number_float_accuracy_match():
topic, subscription, queue = _setup_filter_policy_test(
{"price": [100.123456789, 500]}
)
topic.publish(
Message="match",
MessageAttributes={
"price": {
"DataType": "String.Array",
"StringValue": json.dumps([100.1234561, 50]),
}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"price": {"Type": "String.Array", "Value": json.dumps([100.1234561, 50])}}]
)
@mock_sqs
@mock_sns
# this is the correct behavior from SNS
def test_filtering_string_array_with_number_no_array_match():
topic, subscription, queue = _setup_filter_policy_test({"price": [100, 500]})
topic.publish(
Message="match",
MessageAttributes={"price": {"DataType": "String.Array", "StringValue": "100"}},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"price": {"Type": "String.Array", "Value": "100"}}]
)
@mock_sqs
@mock_sns
def test_filtering_string_array_with_number_no_match():
topic, subscription, queue = _setup_filter_policy_test({"price": [500]})
topic.publish(
Message="no_match",
MessageAttributes={
"price": {"DataType": "String.Array", "StringValue": json.dumps([100, 50])}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
# this is the correct behavior from SNS
def test_filtering_string_array_with_string_no_array_no_match():
topic, subscription, queue = _setup_filter_policy_test({"price": [100]})
topic.publish(
Message="no_match",
MessageAttributes={
"price": {"DataType": "String.Array", "StringValue": "one hundred"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_attribute_key_exists_match():
topic, subscription, queue = _setup_filter_policy_test(
{"store": [{"exists": True}]}
)
topic.publish(
Message="match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"store": {"Type": "String", "Value": "example_corp"}}]
)
@mock_sqs
@mock_sns
def test_filtering_attribute_key_exists_no_match():
topic, subscription, queue = _setup_filter_policy_test(
{"store": [{"exists": True}]}
)
topic.publish(
Message="no match",
MessageAttributes={
"event": {"DataType": "String", "StringValue": "order_cancelled"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_attribute_key_not_exists_match():
topic, subscription, queue = _setup_filter_policy_test(
{"store": [{"exists": False}]}
)
topic.publish(
Message="match",
MessageAttributes={
"event": {"DataType": "String", "StringValue": "order_cancelled"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[{"event": {"Type": "String", "Value": "order_cancelled"}}]
)
@mock_sqs
@mock_sns
def test_filtering_attribute_key_not_exists_no_match():
topic, subscription, queue = _setup_filter_policy_test(
{"store": [{"exists": False}]}
)
topic.publish(
Message="no match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"}
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
@mock_sqs
@mock_sns
def test_filtering_all_AND_matching_match():
topic, subscription, queue = _setup_filter_policy_test(
{
"store": [{"exists": True}],
"event": ["order_cancelled"],
"customer_interests": ["basketball", "baseball"],
"price": [100],
}
)
topic.publish(
Message="match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"},
"event": {"DataType": "String", "StringValue": "order_cancelled"},
"customer_interests": {
"DataType": "String.Array",
"StringValue": json.dumps(["basketball", "rugby"]),
},
"price": {"DataType": "Number", "StringValue": "100"},
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal(["match"])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal(
[
{
"store": {"Type": "String", "Value": "example_corp"},
"event": {"Type": "String", "Value": "order_cancelled"},
"customer_interests": {
"Type": "String.Array",
"Value": json.dumps(["basketball", "rugby"]),
},
"price": {"Type": "Number", "Value": 100},
}
]
)
@mock_sqs
@mock_sns
def test_filtering_all_AND_matching_no_match():
topic, subscription, queue = _setup_filter_policy_test(
{
"store": [{"exists": True}],
"event": ["order_cancelled"],
"customer_interests": ["basketball", "baseball"],
"price": [100],
"encrypted": [False],
}
)
topic.publish(
Message="no match",
MessageAttributes={
"store": {"DataType": "String", "StringValue": "example_corp"},
"event": {"DataType": "String", "StringValue": "order_cancelled"},
"customer_interests": {
"DataType": "String.Array",
"StringValue": json.dumps(["basketball", "rugby"]),
},
"price": {"DataType": "Number", "StringValue": "100"},
},
)
messages = queue.receive_messages(MaxNumberOfMessages=5)
message_bodies = [json.loads(m.body)["Message"] for m in messages]
message_bodies.should.equal([])
message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages]
message_attributes.should.equal([])
| 33.490471 | 496 | 0.645343 |
4a1a65a3405fabd6e5dc6ce3a56a8139491f5eb4
| 11,986 |
py
|
Python
|
custom_components/alexa_media/alexa_entity.py
|
JoshuaGarrison27/Home-Assistant-Configuration
|
18d1b7df7d15400008bc93c40d7f0fb5babc90fa
|
[
"MIT"
] | 27 |
2018-10-13T10:00:53.000Z
|
2022-02-07T23:33:12.000Z
|
custom_components/alexa_media/alexa_entity.py
|
JoshuaGarrison27/Home-Assistant-Configuration
|
18d1b7df7d15400008bc93c40d7f0fb5babc90fa
|
[
"MIT"
] | 3 |
2018-10-03T03:13:21.000Z
|
2019-11-11T22:16:26.000Z
|
custom_components/alexa_media/alexa_entity.py
|
JoshuaGarrison27/Home-Assistant-Configuration
|
18d1b7df7d15400008bc93c40d7f0fb5babc90fa
|
[
"MIT"
] | 5 |
2019-06-01T10:27:37.000Z
|
2020-09-18T14:14:56.000Z
|
"""
Alexa Devices Sensors.
SPDX-License-Identifier: Apache-2.0
For more details about this platform, please refer to the documentation at
https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers-needed/58639
"""
from datetime import datetime
import json
import logging
import re
from typing import Any, Dict, List, Optional, Text, Tuple, TypedDict, Union
from alexapy import AlexaAPI, AlexaLogin, hide_serial
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
def has_capability(
appliance: Dict[Text, Any], interface_name: Text, property_name: Text
) -> bool:
"""Determine if an appliance from the Alexa network details offers a particular interface with enough support that is worth adding to Home Assistant.
Args:
appliance(Dict[Text, Any]): An appliance from a call to AlexaAPI.get_network_details
interface_name(Text): One of the interfaces documented by the Alexa Smart Home Skills API
property_name(Text): The property that matches the interface name.
"""
for cap in appliance["capabilities"]:
props = cap.get("properties")
if (
cap["interfaceName"] == interface_name
and props
and (props["retrievable"] or props["proactivelyReported"])
):
for prop in props["supported"]:
if prop["name"] == property_name:
return True
return False
def is_hue_v1(appliance: Dict[Text, Any]) -> bool:
"""Determine if an appliance is managed via the Philips Hue v1 Hub.
This check catches old Philips Hue bulbs and hubs, but critically, it also catches things pretending to be older
Philips Hue bulbs and hubs. This includes things exposed by HA to Alexa using the emulated_hue integration.
"""
return appliance.get("manufacturerName") == "Royal Philips Electronics"
def is_local(appliance: Dict[Text, Any]) -> bool:
"""Test whether locally connected.
This is mainly present to prevent loops with the official Alexa integration.
There is probably a better way to prevent that, but this works.
"""
if appliance.get("connectedVia"):
# connectedVia is a flag that determines which Echo devices holds the connection. Its blank for
# skill derived devices and includes an Echo name for zigbee and local devices.
return True
# This catches the Echo/AVS devices. connectedVia isn't reliable in this case.
# Only the first appears to get that set.
if "ALEXA_VOICE_ENABLED" in appliance.get("applianceTypes", []):
namespace = appliance.get("driverIdentity", {}).get("namespace", "")
return namespace and namespace != "SKILL"
# Zigbee devices are guaranteed to be local and have a particular pattern of id
zigbee_pattern = re.compile(
"AAA_SonarCloudService_([0-9A-F][0-9A-F]:){7}[0-9A-F][0-9A-F]", flags=re.I
)
return zigbee_pattern.fullmatch(appliance.get("applianceId", "")) is not None
def is_alexa_guard(appliance: Dict[Text, Any]) -> bool:
"""Is the given appliance the guard alarm system of an echo."""
return appliance["modelName"] == "REDROCK_GUARD_PANEL" and has_capability(
appliance, "Alexa.SecurityPanelController", "armState"
)
def is_temperature_sensor(appliance: Dict[Text, Any]) -> bool:
"""Is the given appliance the temperature sensor of an Echo."""
return is_local(appliance) and has_capability(
appliance, "Alexa.TemperatureSensor", "temperature"
)
def is_light(appliance: Dict[Text, Any]) -> bool:
"""Is the given appliance a light controlled locally by an Echo."""
return (
is_local(appliance)
and "LIGHT" in appliance["applianceTypes"]
and has_capability(appliance, "Alexa.PowerController", "powerState")
)
def get_friendliest_name(appliance: Dict[Text, Any]) -> Text:
"""Find the best friendly name. Alexa seems to store manual renames in aliases. Prefer that one."""
aliases = appliance.get("aliases", [])
for alias in aliases:
friendly = alias.get("friendlyName")
if friendly:
return friendly
return appliance["friendlyName"]
def get_device_serial(appliance: Dict[Text, Any]) -> Optional[Text]:
"""Find the device serial id if it is present."""
alexa_device_id_list = appliance.get("alexaDeviceIdentifierList", [])
for alexa_device_id in alexa_device_id_list:
if isinstance(alexa_device_id, dict):
return alexa_device_id.get("dmsDeviceSerialNumber")
return None
class AlexaEntity(TypedDict):
"""Class for Alexaentity."""
id: Text
appliance_id: Text
name: Text
is_hue_v1: bool
class AlexaLightEntity(AlexaEntity):
"""Class for AlexaLightEntity."""
brightness: bool
color: bool
color_temperature: bool
class AlexaTemperatureEntity(AlexaEntity):
"""Class for AlexaTemperatureEntity."""
device_serial: Text
class AlexaEntities(TypedDict):
"""Class for holding entities."""
light: List[AlexaLightEntity]
guard: List[AlexaEntity]
temperature: List[AlexaTemperatureEntity]
def parse_alexa_entities(network_details: Optional[Dict[Text, Any]]) -> AlexaEntities:
"""Turn the network details into a list of useful entities with the important details extracted."""
lights = []
guards = []
temperature_sensors = []
location_details = network_details["locationDetails"]["locationDetails"]
for location in location_details.values():
amazon_bridge_details = location["amazonBridgeDetails"]["amazonBridgeDetails"]
for bridge in amazon_bridge_details.values():
appliance_details = bridge["applianceDetails"]["applianceDetails"]
for appliance in appliance_details.values():
processed_appliance = {
"id": appliance["entityId"],
"appliance_id": appliance["applianceId"],
"name": get_friendliest_name(appliance),
"is_hue_v1": is_hue_v1(appliance),
}
if is_alexa_guard(appliance):
guards.append(processed_appliance)
elif is_temperature_sensor(appliance):
serial = get_device_serial(appliance)
processed_appliance["device_serial"] = (
serial if serial else appliance["entityId"]
)
temperature_sensors.append(processed_appliance)
elif is_light(appliance):
processed_appliance["brightness"] = has_capability(
appliance, "Alexa.BrightnessController", "brightness"
)
processed_appliance["color"] = has_capability(
appliance, "Alexa.ColorController", "color"
)
processed_appliance["color_temperature"] = has_capability(
appliance,
"Alexa.ColorTemperatureController",
"colorTemperatureInKelvin",
)
lights.append(processed_appliance)
return {"light": lights, "guard": guards, "temperature": temperature_sensors}
class AlexaCapabilityState(TypedDict):
"""Class for AlexaCapabilityState."""
name: Text
namespace: Text
value: Union[int, Text, TypedDict]
AlexaEntityData = Dict[Text, List[AlexaCapabilityState]]
async def get_entity_data(
login_obj: AlexaLogin, entity_ids: List[Text]
) -> AlexaEntityData:
"""Get and process the entity data into a more usable format."""
raw = await AlexaAPI.get_entity_state(login_obj, entity_ids=entity_ids)
entities = {}
device_states = raw.get("deviceStates")
if device_states:
for device_state in device_states:
entity_id = device_state["entity"]["entityId"]
entities[entity_id] = []
for cap_state in device_state["capabilityStates"]:
entities[entity_id].append(json.loads(cap_state))
return entities
def parse_temperature_from_coordinator(
coordinator: DataUpdateCoordinator, entity_id: Text
) -> Optional[Text]:
"""Get the temperature of an entity from the coordinator data."""
value = parse_value_from_coordinator(
coordinator, entity_id, "Alexa.TemperatureSensor", "temperature"
)
return value.get("value") if value and "value" in value else None
def parse_brightness_from_coordinator(
coordinator: DataUpdateCoordinator, entity_id: Text, since: datetime
) -> Optional[int]:
"""Get the brightness in the range 0-100."""
return parse_value_from_coordinator(
coordinator, entity_id, "Alexa.BrightnessController", "brightness", since
)
def parse_color_temp_from_coordinator(
coordinator: DataUpdateCoordinator, entity_id: Text, since: datetime
) -> Optional[int]:
"""Get the color temperature in kelvin"""
return parse_value_from_coordinator(
coordinator,
entity_id,
"Alexa.ColorTemperatureController",
"colorTemperatureInKelvin",
since,
)
def parse_color_from_coordinator(
coordinator: DataUpdateCoordinator, entity_id: Text, since: datetime
) -> Optional[Tuple[float, float, float]]:
"""Get the color as a tuple of (hue, saturation, brightness)"""
value = parse_value_from_coordinator(
coordinator, entity_id, "Alexa.ColorController", "color", since
)
if value is not None:
hue = value.get("hue", 0)
saturation = value.get("saturation", 0)
brightness = parse_brightness_from_coordinator(coordinator, entity_id, since)
if brightness is not None:
return hue, saturation, brightness / 100
return None
def parse_power_from_coordinator(
coordinator: DataUpdateCoordinator, entity_id: Text, since: datetime
) -> Optional[Text]:
"""Get the power state of the entity."""
return parse_value_from_coordinator(
coordinator, entity_id, "Alexa.PowerController", "powerState", since
)
def parse_guard_state_from_coordinator(
coordinator: DataUpdateCoordinator, entity_id: Text
) -> Optional[Text]:
"""Get the guard state from the coordinator data."""
return parse_value_from_coordinator(
coordinator, entity_id, "Alexa.SecurityPanelController", "armState"
)
def parse_value_from_coordinator(
coordinator: DataUpdateCoordinator,
entity_id: Text,
namespace: Text,
name: Text,
since: Optional[datetime] = None,
) -> Any:
"""Parse out values from coordinator for Alexa Entities."""
if coordinator.data and entity_id in coordinator.data:
for cap_state in coordinator.data[entity_id]:
if (
cap_state.get("namespace") == namespace
and cap_state.get("name") == name
):
if is_cap_state_still_acceptable(cap_state, since):
return cap_state.get("value")
else:
_LOGGER.debug(
"Coordinator data for %s is too old to be returned.",
hide_serial(entity_id),
)
return None
else:
_LOGGER.debug("Coordinator has no data for %s", hide_serial(entity_id))
return None
def is_cap_state_still_acceptable(
cap_state: Dict[Text, Any], since: Optional[datetime]
) -> bool:
"""Determine if a particular capability state is still usable given its age."""
if since is not None:
formatted_time_of_sample = cap_state.get("timeOfSample")
if formatted_time_of_sample:
try:
time_of_sample = datetime.strptime(
formatted_time_of_sample, "%Y-%m-%dT%H:%M:%S.%fZ"
)
return time_of_sample >= since
except ValueError:
pass
return True
| 36.431611 | 153 | 0.661689 |
4a1a67116e1697ed19728189b38f94e83e10adca
| 304 |
py
|
Python
|
ledger/__init__.py
|
Funk66/ledger
|
b06f39281b81cebb75a6c5f92fa3b8e47b65800c
|
[
"MIT"
] | null | null | null |
ledger/__init__.py
|
Funk66/ledger
|
b06f39281b81cebb75a6c5f92fa3b8e47b65800c
|
[
"MIT"
] | 3 |
2021-11-16T06:38:48.000Z
|
2021-11-16T06:43:18.000Z
|
ledger/__init__.py
|
Funk66/ledger
|
b06f39281b81cebb75a6c5f92fa3b8e47b65800c
|
[
"MIT"
] | null | null | null |
from logging import getLogger
from pathlib import Path
__author__ = "Guillermo Guirao Aguilar"
__email__ = "contact@guillermoguiraoaguilar.com"
__license__ = "MIT"
__version__ = "0.2.1"
log = getLogger('ledger')
home = Path(Path.home() / '.config' / 'ledger')
home.mkdir(parents=True, exist_ok=True)
| 21.714286 | 48 | 0.743421 |
4a1a6737321fc88873cec88540448bfcf7787054
| 790 |
py
|
Python
|
Tests/Validation/Material/M400_50A.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
Tests/Validation/Material/M400_50A.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
Tests/Validation/Material/M400_50A.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
from pyleecan.Classes.Material import Material
from pyleecan.Classes.MatLamination import MatLamination
from pyleecan.Classes.ImportMatrixXls import ImportMatrixXls
from os.path import dirname, abspath, join
file_path = abspath(join(dirname(__file__), "M400-50A.xlsx"))
M400_50A = Material(name="M400-50A")
M400_50A.mag = MatLamination()
M400_50A.mag.mur_lin = 2500.0
M400_50A.mag.Wlam = 0.0005
M400_50A.mag.BH_curve = ImportMatrixXls(file_path=file_path, sheet="BH")
M400_50A.struct.rho = 7650.0
M400_50A.struct.Ex = 215000000000.0
M400_50A.struct.Ey = 215000000000.0
M400_50A.struct.Ez = 80000000000.0
M400_50A.struct.Gxy = 0.0
M400_50A.struct.Gxz = 2000000000.0
M400_50A.struct.Gyz = 2000000000.0
M400_50A.struct.nu_xy = 0.3
M400_50A.struct.nu_xz = 0.03
M400_50A.struct.nu_yz = 0.03
| 31.6 | 72 | 0.794937 |
4a1a6aa72e0d5c104455a233e7bfe231e6485944
| 3,677 |
py
|
Python
|
src/gameobjects/players/player.py
|
Isaac-Muscat/Pygame-Smash-Bros-Platformer
|
1e527efabc9252de8e8cdf5dfc1fb3623f8b00e8
|
[
"MIT"
] | null | null | null |
src/gameobjects/players/player.py
|
Isaac-Muscat/Pygame-Smash-Bros-Platformer
|
1e527efabc9252de8e8cdf5dfc1fb3623f8b00e8
|
[
"MIT"
] | null | null | null |
src/gameobjects/players/player.py
|
Isaac-Muscat/Pygame-Smash-Bros-Platformer
|
1e527efabc9252de8e8cdf5dfc1fb3623f8b00e8
|
[
"MIT"
] | 1 |
2021-05-21T16:10:29.000Z
|
2021-05-21T16:10:29.000Z
|
# Imports from other modules
from physics.rigidbody import Rigidbody
from physics.collider2 import BoxCollider2
import settings as s
from physics.vector2 import Vector2
import physics.vector2 as vec
class Player(Rigidbody):
'''
This class handles the generic makeup of a character/player.
This should be used as an abstract class and it should be extended.
'''
def __init__(self, x, y, key_bindings, **settings):
'''
Constructor.
:param x: the starting x postion.
:param y: the starting y position.
:param key_bindings: the key bindings of the player.
:param settings: a dictionary/**kwargs that stores mutable stats and abilities of different players.
This should be tuned to balance the character stats.
'''
super().__init__(int(x), int(y), settings.get('mass', 10))
self.key_bindings = key_bindings
self.max_fallspeed = settings.get('max_fallspeed', 0.7)
self.max_runspeed = settings.get('max_runspeed', 0.8)
self.gravity_coef = settings.get('gravity_coef', 0.22)
self.friction_coef = settings.get('friction_coef', 0.05)
self.drag_coef = settings.get('drag_coef', 0.1)
self.jump_force = settings.get('jump_force', Vector2(0, -10))
self.run_force = settings.get('run_force', Vector2(0.3, 0))
self.jumps = settings.get('jumps', 4)
self.jumps_left = self.jumps
# Basically acts as a stun duration
self.frames_in_tumble = 0
self.direction_facing = settings.get('direction_facing', 1) # 1 for player facing right and -1 for player facing left
self.grounded_on = None # Holds obstacle if player is on a ground and None if in the air
self.lives = 3
self.damage_percentage = 0
self.size = (settings.get('width',25), settings.get('height', 50))
# The hitbox for the player.
self.collider = BoxCollider2(self.position.x, self.position.y, self.position.x + self.size[0],
self.position.y + self.size[1])
# The hitbox for the attack if there is one.
self.attack_collider = None
# The previous hitbox for the player used for detecting collisions using interpolation.
self.prev_collider = self.collider.clone()
def draw(self, screen):
print("You did not override this in the child class.")
def update(self, time):
super().update(time)
# Update previous collider position for interpollation
self.prev_collider.set_position(self.collider.p1.x, self.collider.p1.y)
# Update collider position based on physics
self.collider.set_position(int(self.position.x), int(self.position.y))
# Update attack collider position and duration
if self.attack_collider is not None:
if self.attack_collider.total_lag <= 0:
self.attack_collider = None
else:
self.attack_collider.set_position_from_player(self)
self.attack_collider.total_lag -= vec.clamp(time * s.FPS / 1000, 0, 100000)
# Update tumble/stun duration
if self.frames_in_tumble > 0:
self.frames_in_tumble -= time * s.FPS / 1000
self.frames_in_tumble = vec.clamp(self.frames_in_tumble, 0, 100000)
# Reset the player if out of bounds
if self.position.y > 1400 or self.position.y < -300 or -300>self.position.x or self.position.x>2400:
self.position.x = (1000)
self.position.y = (400)
self.lives += -1
self.damage_percentage = 0
self.frames_in_tumble = 0
| 40.855556 | 126 | 0.644275 |
4a1a6ac065146619777ac795268f62a704014ede
| 11,623 |
py
|
Python
|
cwr/grammar/factory/adapter.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 37 |
2015-04-21T15:33:53.000Z
|
2022-02-07T00:02:29.000Z
|
cwr/grammar/factory/adapter.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 86 |
2015-02-01T22:26:02.000Z
|
2021-07-09T08:49:36.000Z
|
cwr/grammar/factory/adapter.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 27 |
2015-01-26T16:01:09.000Z
|
2021-11-08T23:53:55.000Z
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from cwr.grammar.field import basic, special, table, filename
"""
CWR fields grammar adapters.
These classes allow the factories to create rules in an homogeneous way,
by setting a basic interface which will wrap around field rules, giving
a basic common method through which rules can be created.
This interface is the FieldAdapter, having only the get_field method, which
will receive a series of parameters, all of them optional, and generate a
field rule from them. The concrete rule will depend on the implementation.
Additionally, it offers the wrap_as_optional method, which allows setting a
field as optional. It is meant to be used with a field created by the adapter,
so it can be overriden for specific fields.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class FieldAdapter(object, metaclass=ABCMeta):
"""
Interface for adapting field rules creation to the parser factory
requirements.
This is meant to receive always the same, or similar, groups of values,
and then generate a specific field rule
from them.
"""
def __init__(self):
pass
@abstractmethod
def get_field(self, name=None, columns=None, values=None):
"""
Generates the rules for the field, applying the received parameters.
:param name: the name of the field
:param columns: number of columns
:param values: allowed values for the field
:return: the rule for the field
"""
raise NotImplementedError("The get_field method is not implemented")
def is_numeric(self):
return False
class AlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(AlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
field = basic.alphanum(columns, name, extended=False)
return field
class ExtendedAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(ExtendedAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.alphanum(columns, name, extended=True)
class EndAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(EndAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
reg = basic.alphanum(columns, name, extended=True, isLast=True)
return reg
class NumericAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field, accepting only the specified
number of characters.
This version only allows integers.
"""
def __init__(self):
super(NumericAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.numeric(columns, name)
class BooleanAdapter(FieldAdapter):
"""
Creates the grammar for a Boolean (B) field, accepting only 'Y' or 'N'
"""
def __init__(self):
super(BooleanAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.boolean(name)
class FlagAdapter(FieldAdapter):
"""
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'.
"""
def __init__(self):
super(FlagAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.flag(name)
class DateAdapter(FieldAdapter):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
"""
def __init__(self):
super(DateAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.date(name)
def is_numeric(self):
return True
class TimeAdapter(FieldAdapter):
"""
Creates the grammar for a Time (D) field, accepting only numbers in a
certain pattern.
"""
def __init__(self):
super(TimeAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.time(name)
class DateTimeAdapter(FieldAdapter):
"""
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T)
.
"""
def __init__(self):
super(DateTimeAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.date_time(name)
class BlankAdapter(FieldAdapter):
"""
Creates the grammar for a blank field.
These are for constant empty strings which should be ignored, as they are
used just as fillers.
"""
def __init__(self):
super(BlankAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.blank(columns, name)
class LookupAdapter(FieldAdapter):
"""
Creates the grammar for a Lookup (L) field, accepting only values from a
list.
"""
def __init__(self):
super(LookupAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.lookup(values, name)
class ISWCAdapter(FieldAdapter):
"""
ISWC field.
"""
def __init__(self):
super(ISWCAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.iswc(name)
class IPIBaseNumberAdapter(FieldAdapter):
"""
IPI Base Number field.
"""
def __init__(self):
super(IPIBaseNumberAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ipi_base_number(name)
class IPINameNumberAdapter(FieldAdapter):
"""
IPI Name Number field.
"""
def __init__(self):
super(IPINameNumberAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ipi_name_number(name, )
class PercentageAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field storing a percentage and
accepting only the specified number of
characters.
"""
def __init__(self):
super(PercentageAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
maximum = int(values[0])
else:
maximum = 100
return special.percentage(columns=columns, maximum=maximum, name=name)
class EAN13Adapter(FieldAdapter):
"""
Creates the grammar for an EAN 13 code.
"""
def __init__(self):
super(EAN13Adapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ean_13(name=name)
class ISRCAdapter(FieldAdapter):
"""
Creates the grammar for an ISRC code.
"""
def __init__(self):
super(ISRCAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.isrc(name=name)
class VISANAdapter(FieldAdapter):
"""
Creates the grammar for a V-ISAN code.
"""
def __init__(self):
super(VISANAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.visan(name=name)
class AudioVisualKeydapter(FieldAdapter):
"""
Creates the grammar for an Audio Visual Key code.
"""
def __init__(self):
super(AudioVisualKeydapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
field = special.audio_visual_key(name=name)
return field
class CharSetAdapter(FieldAdapter):
"""
Character set code field.
"""
def __init__(self):
super(CharSetAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return table.char_code(columns=columns, name=name)
class VariableAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an alphanumeric code where the size ranges between
two values.
"""
def __init__(self):
super(VariableAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
min_size = int(values[0])
else:
min_size = columns
return filename.alphanum_variable(min_size=min_size, max_size=columns,
name=name)
class NumericFloatAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field, accepting only the specified
number of characters.
"""
def __init__(self):
super(NumericFloatAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
nums_int = int(values[0])
else:
nums_int = columns
return basic.numeric_float(columns=columns, nums_int=nums_int,
name=name)
class YearAdapter(FieldAdapter):
"""
Creates the grammar for a year field, accepting only the specified number
of integers.
"""
def __init__(self):
super(YearAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return filename.year(columns=columns, name=name)
class FilenameVersionAdapter(FieldAdapter):
"""
Creates the grammar for a filename version field, accepting only specific
delimiters.
"""
def __init__(self):
super(FilenameVersionAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return filename.filename_version(values=values, name=name)
class LookupIntAdapter(FieldAdapter):
"""
Creates the grammar for an integer lookup field, accepting only specific
values, and transforming them to an integer.
"""
def __init__(self):
super(LookupIntAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.lookup_int(values=values, name=name)
| 27.220141 | 78 | 0.677364 |
4a1a6ba3b936d0ce79c49141fda740d3a10ea993
| 8,362 |
py
|
Python
|
processor/processor.py
|
WangTaoAs/PFD_Net
|
14a598108e4b16772273057ac7a8fa8674dfc35f
|
[
"MIT"
] | 39 |
2021-12-06T02:02:13.000Z
|
2022-03-30T13:00:45.000Z
|
processor/processor.py
|
WangTaoAs/PFD-Net-Pose-guided-feature-distangling
|
14a598108e4b16772273057ac7a8fa8674dfc35f
|
[
"MIT"
] | 5 |
2021-12-13T03:12:07.000Z
|
2022-03-24T13:22:56.000Z
|
processor/processor.py
|
WangTaoAs/PFD-Net-Pose-guided-feature-distangling
|
14a598108e4b16772273057ac7a8fa8674dfc35f
|
[
"MIT"
] | 5 |
2021-12-20T07:47:04.000Z
|
2022-03-09T07:44:46.000Z
|
import logging
import os
import time
import torch
import torch.nn as nn
from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from torch.cuda import amp
import torch.distributed as dist
from loss.pose_push_loss import Push_Loss_batch, Push_Loss
def do_train(cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler,
loss_fn,
num_query, local_rank):
log_period = cfg.SOLVER.LOG_PERIOD
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
eval_period = cfg.SOLVER.EVAL_PERIOD
device = "cuda"
epochs = cfg.SOLVER.MAX_EPOCHS
logger = logging.getLogger("PFDreid.train")
logger.info('start training')
_LOCAL_PROCESS_GROUP = None
if device:
# model.to(local_rank)
model.to(local_rank)
if torch.cuda.device_count() > 1 and cfg.MODEL.DIST_TRAIN:
print('Using {} GPUs for training'.format(torch.cuda.device_count()))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], find_unused_parameters=True)
loss_meter = AverageMeter()
acc_meter = AverageMeter()
acc_decoder = AverageMeter()
evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING)
scaler = amp.GradScaler()
# push_loss = Push_Loss_batch()
push_single_loss = Push_Loss()
# train
for epoch in range(1, epochs + 1):
start_time = time.time()
loss_meter.reset()
acc_meter.reset()
acc_decoder.reset()
evaluator.reset()
scheduler.step(epoch)
model.train()
for n_iter, (img, vid, target_cam, target_view) in enumerate(train_loader):
optimizer.zero_grad()
optimizer_center.zero_grad()
img = img.to(device)
target = vid.to(device)
target_cam = target_cam.to(device)
target_view = target_view.to(device)
# ------------- change ---------------
with amp.autocast(enabled=True):
encoder_score, encoder_feat, out_score, out, non_skt_parts = model(img, target, cam_label=target_cam, view_label=target_view )
loss_encoder = loss_fn(encoder_score, encoder_feat, target, target_cam)
loss_decoder = loss_fn(out_score, out, target, target_cam)
loss_push = push_single_loss(out[0], non_skt_parts)
loss = 0.5*loss_encoder + 0.5*loss_decoder + loss_push
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if 'center' in cfg.MODEL.METRIC_LOSS_TYPE:
for param in center_criterion.parameters():
param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
scaler.step(optimizer_center)
scaler.update()
# score = torch.cat([encoder_score, out_score], dim=0)
score = encoder_score
decoder_score = out_score
if isinstance(score, list):
acc = (score[0].max(1)[1] == target).float().mean()
else:
acc = (score.max(1)[1] == target).float().mean()
if isinstance(decoder_score, list):
decoder_acc = (decoder_score[0].max(1)[1] == target).float().mean()
else:
decoder_acc = (decoder_score.max(1)[1] == target).float().mean()
loss_meter.update(loss.item(), img.shape[0])
acc_meter.update(acc, 1)
acc_decoder.update(decoder_acc,1)
torch.cuda.synchronize()
if (n_iter + 1) % log_period == 0:
logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Decoder Acc: {:.3f} , Base Lr: {:.2e}"
.format(epoch, (n_iter + 1), len(train_loader),
loss_meter.avg, acc_meter.avg, acc_decoder.avg , scheduler._get_lr(epoch)[0]))
end_time = time.time()
time_per_batch = (end_time - start_time) / (n_iter + 1)
if cfg.MODEL.DIST_TRAIN:
pass
else:
logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]"
.format(epoch, time_per_batch, train_loader.batch_size / time_per_batch))
if epoch % checkpoint_period == 0:
if cfg.MODEL.DIST_TRAIN:
if dist.get_rank() == 0:
torch.save(model.state_dict(),
os.path.join(cfg.OUTPUT_DIR, cfg.MODEL.NAME + '_{}.pth'.format(epoch)))
else:
torch.save(model.state_dict(),
os.path.join(cfg.OUTPUT_DIR, cfg.MODEL.NAME + '_{}.pth'.format(epoch)))
if epoch % eval_period == 0:
if cfg.MODEL.DIST_TRAIN:
if dist.get_rank() == 0:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
camids = camids.to(device)
target_view = target_view.to(device)
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
else:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
camids = camids.to(device)
target_view = target_view.to(device)
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
def do_inference(cfg,
model,
val_loader,
num_query):
device = "cuda"
logger = logging.getLogger("PFDreid.test")
logger.info("Enter inferencing")
evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM, reranking=cfg.TEST.RE_RANKING)
evaluator.reset()
if device:
if torch.cuda.device_count() > 1:
print('Using {} GPUs for inference'.format(torch.cuda.device_count()))
model = nn.DataParallel(model)
model.to(device)
model.eval()
img_path_list = []
cum = 0
for n_iter, (img, pid, camid, camids, target_view, imgpath) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
camids = camids.to(device)
target_view = target_view.to(device)
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, pid, camid))
img_path_list.extend(imgpath)
cum = cum + 1
# break
print('iter num', cum)
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results ")
logger.info("mAP: {:.1%}".format(mAP))
if cfg.TEST.DATASET_TEST == 'partial' or cfg.TEST.DATASET_TEST == 'partial_idils':
for r in [1, 3]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
return cmc[0], cmc[2]
else:
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
return cmc[0], cmc[4]
| 38.182648 | 142 | 0.544009 |
4a1a6cce5a786933a2616476be952f412ff5fac4
| 2,602 |
py
|
Python
|
src/edubot/snapext/joystick/mappings.py
|
wendlers/edubot-snap
|
09c471ef8738a3fc2aae6772a1e02ef8e15d5737
|
[
"MIT"
] | null | null | null |
src/edubot/snapext/joystick/mappings.py
|
wendlers/edubot-snap
|
09c471ef8738a3fc2aae6772a1e02ef8e15d5737
|
[
"MIT"
] | null | null | null |
src/edubot/snapext/joystick/mappings.py
|
wendlers/edubot-snap
|
09c471ef8738a3fc2aae6772a1e02ef8e15d5737
|
[
"MIT"
] | null | null | null |
##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
from edubot.snapext.joystick.constants import *
# map JS functions to axis and buttons
JS_MAPPINGS = {
"Generic": {
AXIS: {
X_AXIS_1: 0,
Y_AXIS_1: 1,
},
BUTTONS: {
BUTTON_1: 0,
BUTTON_2: 1,
BUTTON_3: 2,
BUTTON_4: 3,
},
},
"Sony PLAYSTATION(R)3 Controller": {
AXIS: {
X_AXIS_1: 0,
Y_AXIS_1: 1,
X_AXIS_2: 2,
Y_AXIS_2: 3
},
BUTTONS: {
BUTTON_1: 10,
BUTTON_2: 11,
BUTTON_3: 8,
BUTTON_4: 9,
L_UP: 4,
L_DOWN: 6,
L_LEFT: 7,
L_RIGHT: 5,
R_UP: 12,
R_DOWN: 14,
R_LEFT: 15,
R_RIGHT: 13,
L_1: 10,
L_2: 8,
R_1: 11,
R_2: 9,
SELECT: 0,
START: 3
},
},
"Microsoft X-Box 360 pad": {
AXIS: {
X_AXIS_1: 0,
Y_AXIS_1: 1,
X_AXIS_2: 3,
Y_AXIS_2: 4,
X_AXIS_3: 2,
Y_AXIS_3: 5
},
BUTTONS: {
BUTTON_1: 4,
BUTTON_2: 5,
BUTTON_3: 3,
BUTTON_4: 0,
R_UP: 3,
R_DOWN: 0,
R_LEFT: 2,
R_RIGHT: 1,
L_1: 4,
R_1: 5,
SELECT: 6,
START: 7
},
},
}
| 27.680851 | 79 | 0.534589 |
4a1a6d403e6805470528914f154178cdf5c71f42
| 2,981 |
py
|
Python
|
setup.py
|
tobias-urdin/cinder-auto-snapshot
|
0b136c42be0aca2017b416f56f84e35712a2762b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tobias-urdin/cinder-auto-snapshot
|
0b136c42be0aca2017b416f56f84e35712a2762b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tobias-urdin/cinder-auto-snapshot
|
0b136c42be0aca2017b416f56f84e35712a2762b
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
# cinder-auto-snapshot
# Copyright (C) 2015 Tobias Urdin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from distutils.core import setup
from distutils.core import Command
from unittest import TextTestRunner, TestLoader
from subprocess import call
class TestCommand(Command):
description = "run test"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
status = self._run_tests()
sys.exit(status)
def _run_tests(self):
print "hello world"
class Pep8Command(Command):
description = "run pep8"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
status = self._run_tests()
sys.exit(status)
def _run_tests(self):
try:
import pep8
pep8
except ImportError:
print('Missing "pep8" library. You can install it using pip:'
'pip install pep8')
sys.exit(1)
cwd = os.getcwd()
retcode = call(('pep8 %s/cinder_auto_snapshot/ %s/test/' %
(cwd, cwd)).split(' '))
sys.exit(retcode)
class CoverageCommand(Command):
description = "run coverage"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import coverage
except ImportError:
print('Missing "coverage" library. You can install it using pip:'
'pip install coverage')
sys.exit(1)
cover = coverage.coverage(config_file='.coveragerc')
cover.start()
tc = TestCommand(self.distribution)
tc._run_tests()
cover.stop()
cover.save()
cover.html_report()
setup(name='cinder-auto-snapshot',
version='1.0',
description='Create and delete cinder snapshots automatically.',
author='Tobias Urdin',
author_email='tobias.urdin@gmail.com',
license='Apache License 2.0',
packages=['cinder_auto_snapshot'],
package_dir={
'cinder_auto_snapshot': 'cinder_auto_snapshot',
},
url='https://github.com/tobias-urdin/cinder-auto-snapshot',
cmdclass={
'test': TestCommand,
'pep8': Pep8Command,
'coverage': CoverageCommand
},
)
| 24.841667 | 77 | 0.621939 |
4a1a6ec889f51461d2f21c1f4a6757912b8bda49
| 66 |
py
|
Python
|
dict_hash/__version__.py
|
KairosAerospace/dict_hash
|
8c6343639cc66d46f7ec6621f27792d4ce56ad22
|
[
"MIT"
] | 11 |
2020-09-01T20:17:17.000Z
|
2022-01-27T08:45:21.000Z
|
dict_hash/__version__.py
|
KairosAerospace/dict_hash
|
8c6343639cc66d46f7ec6621f27792d4ce56ad22
|
[
"MIT"
] | 4 |
2020-05-28T08:46:02.000Z
|
2021-10-03T15:57:54.000Z
|
dict_hash/__version__.py
|
KairosAerospace/dict_hash
|
8c6343639cc66d46f7ec6621f27792d4ce56ad22
|
[
"MIT"
] | 2 |
2021-10-02T12:41:49.000Z
|
2022-01-19T03:21:14.000Z
|
"""Current version of package dict_hash"""
__version__ = "1.1.20"
| 22 | 42 | 0.712121 |
4a1a6ed4bed1b4a60130338a30dc8b7a302232d3
| 1,629 |
py
|
Python
|
profiles_api/models.py
|
abdulbasidh/profiles-rest-api
|
b97fad3d2588f92abeb0c78897838d6f93c86074
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
abdulbasidh/profiles-rest-api
|
b97fad3d2588f92abeb0c78897838d6f93c86074
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
abdulbasidh/profiles-rest-api
|
b97fad3d2588f92abeb0c78897838d6f93c86074
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save new superuser with given details"""
user = self.create_user(email,name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""DB model fro user in the sysytem"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FILED = ['name']
def get_full_name(self):
"""Retrive full name of user"""
return self.name
def get_short_name(self):
"""Retrive short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
| 28.578947 | 63 | 0.674647 |
4a1a6f1f8799e7096e391137961093d55cb289cb
| 372 |
py
|
Python
|
mushroom/__init__.py
|
ML2R-center/mushroom
|
07fe4f43fd7bb7ebdec71db8f75e0a9b87573e00
|
[
"MIT"
] | 2 |
2019-02-28T04:56:29.000Z
|
2020-09-01T07:52:35.000Z
|
mushroom/__init__.py
|
ML2R-center/mushroom
|
07fe4f43fd7bb7ebdec71db8f75e0a9b87573e00
|
[
"MIT"
] | 1 |
2020-02-15T03:15:04.000Z
|
2022-03-24T05:21:08.000Z
|
mushroom/__init__.py
|
ML2R-center/mushroom
|
07fe4f43fd7bb7ebdec71db8f75e0a9b87573e00
|
[
"MIT"
] | 2 |
2019-12-05T03:15:46.000Z
|
2021-01-12T11:23:35.000Z
|
from mushroom.result_analysis import get_accuracy_precision_recall_from_series
from mushroom.result_analysis import analysis_with_different_length_stems
from mushroom.result_analysis import get_accuracy_precision_recall_from_series_with_stem_length
from mushroom.core import mushroom_triple_classification_with_different_pars
from mushroom.util import download_nball_files
| 74.4 | 95 | 0.935484 |
4a1a704c81f45b2d6ff90141933b441cf59e8e06
| 10,448 |
py
|
Python
|
foo/comm.py
|
ThomasZh/legend-league-portal
|
df06ac05ea506c3e257517716b6d692b69c8bf6b
|
[
"Apache-2.0"
] | null | null | null |
foo/comm.py
|
ThomasZh/legend-league-portal
|
df06ac05ea506c3e257517716b6d692b69c8bf6b
|
[
"Apache-2.0"
] | null | null | null |
foo/comm.py
|
ThomasZh/legend-league-portal
|
df06ac05ea506c3e257517716b6d692b69c8bf6b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 planc2c.com
# thomas@time2box.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.web
import logging
import time
import sys
import os
import uuid
import smtplib
import random
import hashlib
from hashlib import md5
import string
import json as JSON # 启用别名,不会跟方法里的局部变量混淆
from bson import json_util
from tornado.escape import json_encode, json_decode
from tornado.httpclient import *
from tornado.httputil import url_concat
from global_const import *
class singleton(object):
_singleton = None;
def __new__(cls):
if cls._singleton is None:
cls._singleton = object.__new__(cls);
return cls._singleton;
#获取脚本文件的当前路径
def cur_file_dir():
#获取脚本路径
path = sys.path[0]
#判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是py2exe编译后的文件,则返回的是编译后的文件路径
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
# 时间格式转换
def timestamp_date(value):
#_format = '%Y-%m-%d %H:%M:%S'
_format = '%Y/%m/%d/%H'
# value is timestamp(int), eg: 1332888820
_value = time.localtime(value)
## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=0)
_dt = time.strftime(_format, _value)
return _dt
def timestamp_friendly_date(value):
#_format = '%Y-%m-%d %H:%M:%S'
y_format = '%Y'
m_format = '%m'
d_format = '%d'
w_format = '%w'
# value is timestamp(int), eg: 1332888820
_value = time.localtime(value)
_current = time.localtime()
## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=0)
current_y_dt = time.strftime(y_format, _current)
y_dt = time.strftime(y_format, _value)
m_dt = time.strftime(m_format, _value)
d_dt = time.strftime(d_format, _value)
w_dt = time.strftime(w_format, _value)
if w_dt == '0':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期日'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期日'
elif w_dt == '1':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期一'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期一'
elif w_dt == '2':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期二'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期二'
elif w_dt == '3':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期三'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期三'
elif w_dt == '4':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期四'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期四'
elif w_dt == '5':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期五'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期五'
elif w_dt == '6':
if current_y_dt == y_dt:
_dt = str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期六'
else:
_dt = str(int(y_dt)) + '年' + str(int(m_dt)) + '月' + str(int(d_dt)) + ' 星期六'
return _dt
def timestamp_datetime(value):
#_format = '%Y-%m-%d %H:%M:%S'
_format = '%m/%d/%Y %H:%M'
# value is timestamp(int), eg: 1332888820
_value = time.localtime(value)
## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=0)
_dt = time.strftime(_format, _value)
return _dt
def datetime_timestamp(dt):
# dt is string
time.strptime(dt, '%m/%d/%Y %H:%M')
## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=-1)
# "2012-03-28 06:53:40" to timestamp(int)
_timestamp = time.mktime(time.strptime(dt, '%m/%d/%Y %H:%M'))
return int(_timestamp)
def generate_md5(fp):
m = md5()
m.update(fp)
return m.hexdigest()
# 创建发生短信的 sendcloud 签名
def generate_sms_sign(SMS_KEY, param):
param_keys = list(param.keys())
param_keys.sort()
param_str = ""
for key in param_keys:
param_str += key + '=' + str(param[key]) + '&'
param_str = param_str[:-1]
sign_str = SMS_KEY + '&' + param_str + '&' + SMS_KEY
#sign = generate_md5(sign_str)
sign = hashlib.md5(sign_str).hexdigest()
return sign
# 生成4位数字验证码
def generate_verify_code():
chars=['0','1','2','3','4','5','6','7','8','9']
x = random.choice(chars),random.choice(chars),random.choice(chars),random.choice(chars)
verifyCode = "".join(x)
return verifyCode
#验证码函数
def randon_x(i):
code = []
for i in range(i):
if i == random.randint(1,3):
code.append(str(random.randint(1,9)))
else:
tmp = random.randint(65,90)
code.append(chr(tmp))
return ''.join(code)
def generate_uuid_str():
return str(uuid.uuid1()).replace('-', '')
def generate_nonce_str():
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
def hash_pwd(md5pwd, salt):
md5salt = hashlib.md5(salt).hexdigest()
ecrypted_pwd = hashlib.md5(md5pwd + md5salt).hexdigest()
return ecrypted_pwd
class PageNotFoundHandler(tornado.web.RequestHandler):
def get(self):
self.render('comm/page-404.html')
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("pragma","no-cache")
self.set_header("Cache-Control","no-store")
self.set_header("Cache-Control","no-cache")
self.set_header("expires","0")
def get_league_info(self):
# league(联盟信息)
url = API_DOMAIN+"/api/leagues/"+LEAGUE_ID
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
league_info = data['rs']
return league_info
def get_code(self):
url = API_DOMAIN+"/api/auth/codes"
http_client = HTTPClient()
data = {"appid":"7x24hs:blog",
"app_secret":"2518e11b3bc89ebec594350d5739f29e"}
_json = json_encode(data)
response = http_client.fetch(url, method="POST", body=_json)
session_code = json_decode(response.body)
logging.info("got session_code %r", session_code)
code = session_code['code']
return code
def write_error(self, status_code, **kwargs):
host = self.request.headers['Host']
logging.info("got host %r", host)
try:
reason = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
if "HTTP 404: Not Found" in line:
self.render('comm/page-404.html')
self.finish()
reason += line
logging.info("got status_code %r reason %r", status_code, reason)
params = {"app":"club-ops", "sys":host, "level":status_code, "message": reason}
url = url_concat("http://kit.7x24hs.com/api/sys-error", params)
http_client = HTTPClient()
_json = json_encode(params)
response = http_client.fetch(url, method="POST", body=_json)
logging.info("got response.body %r", response.body)
except:
logging.warn("write log to http://kit.7x24hs.com/api/sys-error error")
self.render("comm/page-500.html",
status_code=status_code)
class AuthorizationHandler(BaseHandler):
def get_current_user(self):
self.set_secure_cookie("login_next", self.request.uri)
access_token = self.get_secure_cookie("access_token")
logging.info("got access_token %r from cookie", access_token)
if not access_token:
return None
else:
expires_at = self.get_secure_cookie("expires_at")
logging.info("got expires_at %r from cookie", expires_at)
if not expires_at:
return None
else:
_timestamp = int(time.time())
if int(expires_at) > _timestamp:
return access_token
else:
# Logic: refresh_token
refresh_token = self.get_secure_cookie("refresh_token")
if not refresh_token:
return None
else:
try:
url = API_DOMAIN+"/api/auth/tokens"
http_client = HTTPClient()
headers={"Authorization":"Bearer "+refresh_token}
data = {"action":"refresh"}
_json = json_encode(data)
logging.info("request %r body %r", url, _json)
response = http_client.fetch(url, method="POST", headers=headers, body=_json)
logging.info("got response %r", response.body)
session_ticket = json_decode(response.body)
self.set_secure_cookie("access_token", session_ticket['access_token'])
self.set_secure_cookie("expires_at", str(session_ticket['expires_at']))
self.set_secure_cookie("refresh_token", session_ticket['refresh_token'])
return session_ticket['access_token']
except:
return None
return None
| 34.710963 | 129 | 0.584992 |
4a1a7058a8ffe5b60680836ac2a1f0dc268ff553
| 525 |
py
|
Python
|
Build_Web_With_Flask/Building web applications with Flask_Code/chapter10/chapter10/fabfile.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
Build_Web_With_Flask/Building web applications with Flask_Code/chapter10/chapter10/fabfile.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
Build_Web_With_Flask/Building web applications with Flask_Code/chapter10/chapter10/fabfile.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from fabric.api import *
from fabric.contrib.files import exists
env.linewise = True
# forward_agent allows you to git pull from your repository
# if you have your ssh key setup
env.forward_agent = True
env.hosts = ['your.host.ip.address']
def create_project():
if not exists('~/project'):
run('git clone git://path/to/repo.git')
def update_code():
with cd('~/project'):
run('git pull')
def reload():
"Reloads project instance"
run('touch --no-dereference /tmp/reload')
| 20.192308 | 59 | 0.678095 |
4a1a705df8527f8cf57558cb597ced9fd49cbe27
| 7,524 |
py
|
Python
|
pettingzoo/classic/connect_four/connect_four.py
|
raphaelavalos/PettingZoo
|
f34b57a9f4f20947ae56c6708f66c4510413d148
|
[
"Apache-2.0"
] | null | null | null |
pettingzoo/classic/connect_four/connect_four.py
|
raphaelavalos/PettingZoo
|
f34b57a9f4f20947ae56c6708f66c4510413d148
|
[
"Apache-2.0"
] | null | null | null |
pettingzoo/classic/connect_four/connect_four.py
|
raphaelavalos/PettingZoo
|
f34b57a9f4f20947ae56c6708f66c4510413d148
|
[
"Apache-2.0"
] | null | null | null |
from pettingzoo import AECEnv
from gym import spaces
import numpy as np
import os
import pygame
from pettingzoo.utils import wrappers
from pettingzoo.utils.agent_selector import agent_selector
def get_image(path):
import pygame
from os import path as os_path
cwd = os_path.dirname(__file__)
image = pygame.image.load(cwd + '/' + path)
return image
def env():
env = raw_env()
env = wrappers.TerminateIllegalWrapper(env, illegal_reward=-1)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
class raw_env(AECEnv):
metadata = {'render.modes': ['human', "rgb_array"], "name": "connect_four_v3"}
def __init__(self):
super().__init__()
# 6 rows x 7 columns
# blank space = 0
# agent 0 -- 1
# agent 1 -- 2
# flat representation in row major order
self.screen = None
self.board = [0] * (6 * 7)
self.agents = ['player_0', 'player_1']
self.possible_agents = self.agents[:]
self.action_spaces = {i: spaces.Discrete(7) for i in self.agents}
self.observation_spaces = {i: spaces.Dict({
'observation': spaces.Box(low=0, high=1, shape=(6, 7, 2), dtype=np.int8),
'action_mask': spaces.Box(low=0, high=1, shape=(7,), dtype=np.int8)
}) for i in self.agents}
# Key
# ----
# blank space = 0
# agent 0 = 1
# agent 1 = 2
# An observation is list of lists, where each list represents a row
#
# array([[0, 1, 1, 2, 0, 1, 0],
# [1, 0, 1, 2, 2, 2, 1],
# [0, 1, 0, 0, 1, 2, 1],
# [1, 0, 2, 0, 1, 1, 0],
# [2, 0, 0, 0, 1, 1, 0],
# [1, 1, 2, 1, 0, 1, 0]], dtype=int8)
def observe(self, agent):
board_vals = np.array(self.board).reshape(6, 7)
cur_player = self.possible_agents.index(agent)
opp_player = (cur_player + 1) % 2
cur_p_board = np.equal(board_vals, cur_player + 1)
opp_p_board = np.equal(board_vals, opp_player + 1)
observation = np.stack([cur_p_board, opp_p_board], axis=2).astype(np.int8)
legal_moves = self._legal_moves() if agent == self.agent_selection else []
action_mask = np.zeros(7, int)
for i in legal_moves:
action_mask[i] = 1
return {'observation': observation, 'action_mask': action_mask}
def _legal_moves(self):
return [i for i in range(7) if self.board[i] == 0]
# action in this case is a value from 0 to 6 indicating position to move on the flat representation of the connect4 board
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
# assert valid move
assert (self.board[0:7][action] == 0), "played illegal move."
piece = self.agents.index(self.agent_selection) + 1
for i in list(filter(lambda x: x % 7 == action, list(range(41, -1, -1)))):
if self.board[i] == 0:
self.board[i] = piece
break
next_agent = self._agent_selector.next()
winner = self.check_for_winner()
# check if there is a winner
if winner:
self.rewards[self.agent_selection] += 1
self.rewards[next_agent] -= 1
self.dones = {i: True for i in self.agents}
# check if there is a tie
elif all(x in [1, 2] for x in self.board):
# once either play wins or there is a draw, game over, both players are done
self.dones = {i: True for i in self.agents}
else:
# no winner yet
self.agent_selection = next_agent
self._accumulate_rewards()
def reset(self):
# reset environment
self.board = [0] * (6 * 7)
self.agents = self.possible_agents[:]
self.rewards = {i: 0 for i in self.agents}
self._cumulative_rewards = {name: 0 for name in self.agents}
self.dones = {i: False for i in self.agents}
self.infos = {i: {} for i in self.agents}
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
def render(self, mode='human'):
screen_width = 1287
screen_height = 1118
if self.screen is None:
if mode == "human":
pygame.init()
self.screen = pygame.display.set_mode((screen_width, screen_height))
else:
self.screen = pygame.Surface((screen_width, screen_height))
if mode == "human":
pygame.event.get()
# Load and scale all of the necessary images
tile_size = (screen_width * (91 / 99)) / 7
red_chip = get_image(os.path.join('img', 'C4RedPiece.png'))
red_chip = pygame.transform.scale(red_chip, (int(tile_size * (9 / 13)), int(tile_size * (9 / 13))))
black_chip = get_image(os.path.join('img', 'C4BlackPiece.png'))
black_chip = pygame.transform.scale(black_chip, (int(tile_size * (9 / 13)), int(tile_size * (9 / 13))))
board_img = get_image(os.path.join('img', 'Connect4Board.png'))
board_img = pygame.transform.scale(board_img, ((int(screen_width)), int(screen_height)))
self.screen.blit(board_img, (0, 0))
# Blit the necessary chips and their positions
for i in range(0, 42):
if self.board[i] == 1:
self.screen.blit(red_chip, ((i % 7) * (tile_size) + (tile_size * (6 / 13)), int((i / 7)) * (tile_size) + (tile_size * (6 / 13))))
elif self.board[i] == 2:
self.screen.blit(black_chip, ((i % 7) * (tile_size) + (tile_size * (6 / 13)), int((i / 7)) * (tile_size) + (tile_size * (6 / 13))))
if mode == "human":
pygame.display.update()
observation = np.array(pygame.surfarray.pixels3d(self.screen))
return np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
def close(self):
if self.screen is not None:
import pygame
pygame.quit()
self.screen = None
def check_for_winner(self):
board = np.array(self.board).reshape(6, 7)
piece = self.agents.index(self.agent_selection) + 1
# Check horizontal locations for win
column_count = 7
row_count = 6
for c in range(column_count - 3):
for r in range(row_count):
if board[r][c] == piece and board[r][c + 1] == piece and board[r][c + 2] == piece and board[r][c + 3] == piece:
return True
# Check vertical locations for win
for c in range(column_count):
for r in range(row_count - 3):
if board[r][c] == piece and board[r + 1][c] == piece and board[r + 2][c] == piece and board[r + 3][c] == piece:
return True
# Check positively sloped diagonals
for c in range(column_count - 3):
for r in range(row_count - 3):
if board[r][c] == piece and board[r + 1][c + 1] == piece and board[r + 2][c + 2] == piece and board[r + 3][c + 3] == piece:
return True
# Check negatively sloped diagonals
for c in range(column_count - 3):
for r in range(3, row_count):
if board[r][c] == piece and board[r - 1][c + 1] == piece and board[r - 2][c + 2] == piece and board[r - 3][c + 3] == piece:
return True
return False
| 36.347826 | 147 | 0.569112 |
4a1a706333e97d749328f9bcaba45ede8eb9ce96
| 4,796 |
py
|
Python
|
software/multifluids_icferst/tests/turbine_flux_penalty_2plus1/mesh/scripts/triangle_add_edgeowner.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | 2 |
2020-05-11T02:39:46.000Z
|
2020-05-11T03:08:38.000Z
|
software/multifluids_icferst/tests/turbine_flux_dg_2d/mesh/scripts/triangle_add_edgeowner.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | null | null | null |
software/multifluids_icferst/tests/turbine_flux_dg_2d/mesh/scripts/triangle_add_edgeowner.py
|
msc-acse/acse-9-independent-research-project-Wade003
|
cfcba990d52ccf535171cf54c0a91b184db6f276
|
[
"MIT"
] | 2 |
2020-05-21T22:50:19.000Z
|
2020-10-28T17:16:31.000Z
|
#!/usr/bin/env python
import sys
import triangle
import copy
import numpy
from sets import Set
#input surface_id, filename
# 5.5.2010: this script adds a new attribute to the .edge file which holds the "owner" element number of this edge
# Here is an examle geo file for this script:
# Point(1) = {0, 0, 0, 2};
# Point(2) = {1, 0, 0, 2};
# Point(3) = {1, 1, 0, 2};
# Point(4) = {0, 1, 0, 2};
# Point(5) = {0.5, 0, 0, 2};
# Point(6) = {0.5, 1, 0, 2};
# Point(7) = {0.500001, 0, 0, 2};
# Point(8) = {0.500001, 1, 0, 2};
# Point(9) = {0.4, -0.1, 0, 2};
# Point(10) = {0.4, 1.1, 0, 2};
#
#
# Line(1) = {4, 1};
# Line(2) = {1, 9};
# Line(3) = {9, 5};
# Line(4) = {5, 6};
# Line(9) = {6, 10};
# Line(10) = {10, 4};
#
# Line(5) = {8, 7};
# Line(6) = {7, 2};
# Line(7) = {2, 3};
# Line(8) = {3, 8};
#
# Physical Line(20) = {1};
# Physical Line(21) = {2};
# Physical Line(22) = {3};
# Physical Line(23) = {4};
# Physical Line(28) = {9};
# Physical Line(29) = {10};
#
# Physical Line(24) = {5};
# Physical Line(25) = {6};
# Physical Line(26) = {7};
# Physical Line(27) = {8};
#
# Line Loop(10) = {4, 9, 10, 1, 2, 3};
# Line Loop(11) = {8, 5, 6, 7};
#
# Plane Surface(11) = {10};
# Plane Surface(12) = {11};
# Physical Surface(12) = {11, 12};
########################################################################################################
def nodedupl_recursion(elein, edgein, nodeid, boundary_id):
global copy_eles, copy_edges, copy_nodes, debug, copy_surface_ids, copy_surface_id, copy_surfaceowner_ids, copy_region_ids
next_edgein=triangle.get_partner_edge(edgein, nodeid, boundary_id)
if next_edgein==None:
print "Reached one end of the surface boundary."
return
if debug>1:
print "Lets loop around nodeid", nodeid, " starting with ele", elein+1, " with boundary edge ", edgein+1, " until we reach the next surface edge with id ", next_edgein+1
next_elein_list=triangle.get_eles_on_ele_side(elein, nodeid, edgein, boundary_id)
if debug>1:
print "Duplicate edge ", next_edgein +1
copy_edges.append(triangle.edges[next_edgein])
copy_surface_ids.append(new_surface_id)
copy_surfaceowner_ids.append(next_elein_list[len(next_elein_list)-1]+1) # update copy_surfaceowner_ids for the new edge
# update copy_surfaceowner_ids for the old edge
if triangle.ele_with_edgeids(next_edgein)[0]==next_elein_list[len(next_elein_list)-1]:
copy_surfaceowner_ids[next_edgein]=triangle.ele_with_edgeids(next_edgein)[1]+1
else:
copy_surfaceowner_ids[next_edgein]=triangle.ele_with_edgeids(next_edgein)[0]+1
if (triangle.edges[next_edgein][0]==nodeid):
next_nodeid=triangle.edges[next_edgein][1]
else:
next_nodeid=triangle.edges[next_edgein][0]
nodedupl_recursion(next_elein_list[len(next_elein_list)-1], next_edgein, next_nodeid, boundary_id)
########################################################################################################
if not len(sys.argv)==2:
print "Usage: seperate_internal_boundary.py file"
print ""
print "output fixed .edge, .ele and .node file with new edge attribute holding the element owner of the edge. "
print ""
print "The outout files will be have the suffix edgow"
exit()
filename=sys.argv[1]
debug=2
triangle.read_nodefile(filename+'.node')
if triangle.dim!=2:
print "Only 2 dim meshes supported so far"
triangle.read_edgefile(filename+'.edge')
triangle.read_elefile(filename+'.ele')
copy_eles=copy.deepcopy(triangle.eles)
copy_region_ids=copy.deepcopy(triangle.region_ids)
copy_edges=copy.deepcopy(triangle.edges)
copy_surface_ids=copy.deepcopy(triangle.surface_ids)
copy_surfaceowner_ids=[-1 for i in range(0,len(triangle.surface_ids))] # Will store the elemed id for each surface edge
copy_nodes=copy.deepcopy(triangle.nodes)
# Now assign the surfaceowner_id to the external boundaries
for e in range(0,len(copy_surfaceowner_ids)):
if copy_surfaceowner_ids[e]>=0:
print "Internal Error. Ask simon.funke@gmail.com!"
exit()
if len(triangle.ele_with_edgeids(e))!=1:
print "Error Found internal boundary!"
exit()
copy_surfaceowner_ids[e]=triangle.ele_with_edgeids(e)[0]+1
if debug>0:
print "save node file as ", filename, "_edgow.node"
triangle.save_nodefile(copy_nodes, 2, filename+"_edgow.node")
if debug>0:
print "save ele file as ", filename, "_edgow.ele"
triangle.save_elefile(copy_eles, copy_region_ids, filename+"_edgow.ele")
if debug>0:
print "save edge file as ", filename, "_edgow.edge"
triangle.save_edgefile2(copy_edges, copy_surface_ids, copy_surfaceowner_ids, filename+"_edgow.edge")
| 35.007299 | 177 | 0.634487 |
4a1a707cfdfd24389298bd4acb4d9bf4b007347e
| 4,860 |
py
|
Python
|
selfdrive/car/vw/carcontroller.py
|
micksmi/openpilot
|
15e128d08cf6fcbb12bb9c665b711f16f75a4e5a
|
[
"MIT"
] | null | null | null |
selfdrive/car/vw/carcontroller.py
|
micksmi/openpilot
|
15e128d08cf6fcbb12bb9c665b711f16f75a4e5a
|
[
"MIT"
] | null | null | null |
selfdrive/car/vw/carcontroller.py
|
micksmi/openpilot
|
15e128d08cf6fcbb12bb9c665b711f16f75a4e5a
|
[
"MIT"
] | null | null | null |
from cereal import car
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.vw.carstate import CarState, get_gateway_can_parser, get_extended_can_parser
from selfdrive.car.vw import vwcan
from selfdrive.car.vw.values import CAR, DBC
from selfdrive.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
AUDIBLE_WARNINGS = [AudibleAlert.chimeWarning1, AudibleAlert.chimeWarning2, AudibleAlert.chimeWarningRepeat]
class CarControllerParams():
def __init__(self, car_fingerprint):
self.HCA_STEP_ACTIVE = 2 # HCA_01 message frequency 50Hz when applying torque (100 / 2)
self.HCA_STEP_INACTIVE = 10 # HCA_01 message frequency 10Hz when not applying torque (100 / 10)
self.LDW_STEP = 10 # LDW_02 message frequency 10Hz (100 / 10)
self.STEER_MAX = 300 # Max heading control assist torque 3.00nm
self.STEER_DELTA_INC = 16 # Max HCA reached in 0.375s (STEER_MAX / (50Hz * 0.375))
self.STEER_DELTA_DEC = 16 # Min HCA reached in 0.375s (STEER_MAX / (50Hz * 0.375))
class CarController(object):
def __init__(self, canbus, car_fingerprint):
self.start_time = sec_since_boot()
self.counter = 0
self.apply_steer_prev = 0
self.car_fingerprint = car_fingerprint
# Setup detection helper. Routes commands to
# an appropriate CAN bus number.
self.canbus = canbus
self.params = CarControllerParams(car_fingerprint)
print(DBC)
self.packer_gw = CANPacker(DBC[car_fingerprint]['pt'])
def update(self, sendcan, enabled, CS, frame, actuators, visual_alert, audible_alert, leftLaneVisible, rightLaneVisible):
""" Controls thread """
P = self.params
# Send CAN commands.
can_sends = []
canbus = self.canbus
#
# Prepare HCA_01 steering torque message
#
if (frame % P.HCA_STEP_ACTIVE) == 0:
if enabled and not CS.standstill:
# TODO: Verify our lkas_enabled DBC bit is correct, VCDS thinks it may not be
lkas_enabled = 1
plan_requested_torque = int(round(actuators.steer * P.STEER_MAX))
# If the driver is actively providing steering input, prevent the planned torque request
# from exceeding one-third of maximum. We adjust the plan prior to smoothing so we get
# smooth ramp-down of HCA torque if we were above this before the driver intervened.
if(CS.steer_override):
plan_requested_torque = clip(plan_requested_torque, -P.STEER_MAX / 3, P.STEER_MAX / 3)
# Apply increase and decrease rate limits for HCA torque in accordance with safety model.
if self.apply_steer_prev >= 0:
# Previously steering LEFT or STRAIGHT, normal calculations
hca_steer_min = max(self.apply_steer_prev - P.STEER_DELTA_DEC, 0 - P.STEER_DELTA_INC)
hca_steer_max = min(self.apply_steer_prev + P.STEER_DELTA_INC, P.STEER_MAX)
else:
# Previously steering RIGHT, inverted calculations
hca_steer_min = max(self.apply_steer_prev - P.STEER_DELTA_INC, -P.STEER_MAX)
hca_steer_max = min(self.apply_steer_prev + P.STEER_DELTA_DEC, 0 + P.STEER_DELTA_INC)
apply_steer = clip(plan_requested_torque, hca_steer_min, hca_steer_max)
self.apply_steer_prev = apply_steer
# FIXME: Ugly hack to reset EPS hardcoded 180 second limit for HCA intervention.
# Deal with this by disengaging HCA anytime we have a zero-crossing. Need to refactor
# the up/down rate code above to enforce a zero-crossing on all changes of direction
# just for additional safety margin.
if apply_steer == 0:
lkas_enabled = 0
else:
# Disable heading control assist
lkas_enabled = 0
apply_steer = 0
self.apply_steer_prev = 0
idx = (frame / P.HCA_STEP_ACTIVE) % 16
can_sends.append(vwcan.create_steering_control(self.packer_gw, canbus.gateway, CS.CP.carFingerprint, apply_steer, idx, lkas_enabled))
#
# Prepare LDW_02 HUD message with lane lines and confidence levels
#
if (frame % P.LDW_STEP) == 0:
if enabled and not CS.standstill:
lkas_enabled = 1
else:
lkas_enabled = 0
if visual_alert == VisualAlert.steerRequired:
if audible_alert in AUDIBLE_WARNINGS:
hud_alert = 7
else:
hud_alert = 8
else:
hud_alert = 0
can_sends.append(vwcan.create_hud_control(self.packer_gw, canbus.gateway, CS.CP.carFingerprint, lkas_enabled, hud_alert, leftLaneVisible, rightLaneVisible))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
| 42.631579 | 162 | 0.703086 |
4a1a70a5bfd96eb835ca829ff5a1442f9bbaa2fb
| 939 |
py
|
Python
|
directvscraper/items.py
|
santteegt/directv-scraper
|
f5b87a8409fad6cff5148c9045b8ce4f4fb549a6
|
[
"MIT"
] | null | null | null |
directvscraper/items.py
|
santteegt/directv-scraper
|
f5b87a8409fad6cff5148c9045b8ce4f4fb549a6
|
[
"MIT"
] | null | null | null |
directvscraper/items.py
|
santteegt/directv-scraper
|
f5b87a8409fad6cff5148c9045b8ce4f4fb549a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
import logging
logger = logging.getLogger('serializerLogger')
def unicode_serialization(stream):
logger.info(stream)
new_stream = stream.encode('utf-8')
logger.info(new_stream)
return new_stream
class Program(scrapy.Item):
show_id = scrapy.Field(serializer=str)
channel_number = scrapy.Field(serializer=int)
channel_name = scrapy.Field(serializer=str)
title = scrapy.Field(serializer=unicode_serialization)
start_time = scrapy.Field(serializer=str)
time_length = scrapy.Field(serializer=str)
day = scrapy.Field(serializer=unicode_serialization)
query_date = scrapy.Field(serializer=str)
class TvShow(scrapy.Item):
id = scrapy.Field(serializer=str)
description = scrapy.Field(serializer=unicode_serialization)
| 26.083333 | 64 | 0.743344 |
4a1a70bff0812194a3af47f5021bd00b1254ecc3
| 47,396 |
py
|
Python
|
cogs/pug.py
|
rksouthee/pugbot
|
9802c2f10574b350ca78adfb613048d5830ac858
|
[
"MIT"
] | null | null | null |
cogs/pug.py
|
rksouthee/pugbot
|
9802c2f10574b350ca78adfb613048d5830ac858
|
[
"MIT"
] | null | null | null |
cogs/pug.py
|
rksouthee/pugbot
|
9802c2f10574b350ca78adfb613048d5830ac858
|
[
"MIT"
] | null | null | null |
import asyncio
import collections
import collections.abc
import contextlib
import functools
import heapq
import itertools
import random
import re
import shelve
import os
from discord.ext import commands
import discord
import pendulum
PICKMODES = [
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]]
MAXPLAYERS = len(PICKMODES[0]) + 2
MAXTAGLENGTH = 10
PLASEP = '\N{SMALL ORANGE DIAMOND}'
MODSEP = '\N{SMALL BLUE DIAMOND}'
OKMSG = '\N{OK HAND SIGN}'
DISCORD_MD_CHARS = '*~_`'
DISCORD_MD_ESCAPE_RE = re.compile('[{}]'.format(DISCORD_MD_CHARS))
DISCORD_MD_ESCAPE_DICT = {c: '\\' + c for c in DISCORD_MD_CHARS}
def discord_md_escape(value):
return DISCORD_MD_ESCAPE_RE.sub(lambda match: DISCORD_MD_ESCAPE_DICT[match.group(0)], value)
def display_name(member):
return discord_md_escape(member.display_name)
class Mod(collections.abc.MutableSet):
"""Maintains the state for players in a PUG"""
def __init__(self, name, desc, maxplayers):
self.name = name
self.desc = desc
self.maxplayers = maxplayers
self.players = []
self.maps = set()
def __contains__(self, member):
return member in self.players
def __iter__(self):
return iter(self.players)
def __len__(self):
return len(self.players)
def __getstate__(self):
state = self.__dict__.copy()
del state['players']
return state
def __setstate__(self, state):
self.__dict__ = state
self.players = []
@property
def brief(self):
return '**{}** [{}/{}]'.format(self.name, len(self), self.maxplayers)
@property
def full(self):
return len(self) == self.maxplayers
@property
def needed(self):
return self.maxplayers - len(self)
@property
def teamgame(self):
return False
def add(self, member):
if member not in self and not self.full:
self.players.append(member)
return True
def discard(self, member):
if member in self:
self.players.remove(member)
return True
def fullreset(self):
self.players = []
class Team(list):
def __init__(self):
super().__init__()
@property
def captain(self):
return self[0]
class TeamMod(Mod):
def __init__(self, name, desc, maxplayers, pickmode):
super().__init__(name, desc, maxplayers)
self.teams = (Team(), Team())
self.pickmode = pickmode
self.task = None
self.here = [True, True]
def __getstate__(self):
state = super().__getstate__()
del state['teams']
del state['task']
del state['here']
return state
def __setstate__(self, state):
super().__setstate__(state)
self.teams = (Team(), Team())
self.task = None
self.here = [True, True]
@property
def teamgame(self):
return True
@property
def hascaptains(self):
return self.red and self.blue
@property
def team(self):
return PICKMODES[self.pickmode][len(self.red) + len(self.blue) - 2]
@property
def captain(self):
return self.teams[self.team].captain if self.hascaptains else None
@property
def teamsready(self):
return len(self.red) + len(self.blue) == self.maxplayers
@property
def red(self):
return self.teams[0]
@property
def blue(self):
return self.teams[1]
def __contains__(self, member):
return member in (self.players + self.red + self.blue)
def discard(self, member):
if member in self:
if self.red:
self.reset()
if self.task:
self.task.cancel()
self.players.remove(member)
return True
def reset(self):
if self.red:
self.players += self.red + self.blue
self.players = list(filter(None, self.players))
self.red.clear()
self.blue.clear()
self.here = [True, True]
if self.task:
self.task.cancel()
return True
return False
def fullreset(self):
self.players = []
self.red.clear()
self.blue.clear()
self.here = [True, True]
if self.task:
self.task.cancel()
def setcaptain(self, player):
if player in self.players and self.full:
index = self.players.index(player)
if not self.red:
self.red.append(player)
elif not self.blue:
self.blue.append(player)
else:
return False
self.players[index] = None
return True
return False
def pick(self, captain, index):
if captain == self.captain:
self.here[self.team] = True
if all(self.here) and self.task:
self.task.cancel()
if index < 0 or index >= len(self) or not self.players[index]:
return False
player = self.players[index]
self.teams[self.team].append(player)
self.players[index] = None
# check to see if next team has any choice and move them
index = len(self.red) + len(self.blue) - 2
remaining = PICKMODES[self.pickmode][index:self.maxplayers - 2]
if len(set(remaining)) == 1:
self.teams[remaining[0]].extend(p for p in self.players if p)
return True
class PUGChannel(collections.abc.MutableMapping):
def __init__(self):
self.active = True
self.server_name = ''
self.randcaptaintimer = 20
self.idlecaptaintimer = 60
self.mods = collections.OrderedDict()
def __setitem__(self, key: str, mod: Mod):
self.mods[key.lower()] = mod
def __getitem__(self, key: str):
return self.mods[key.lower()]
def __delitem__(self, key: str):
del self.mods[key.lower()]
def __iter__(self):
return iter(self.mods)
def __len__(self):
return len(self.mods)
@property
def team_mods(self):
return (mod for mod in self.values() if mod.teamgame)
class ModStats:
def __init__(self):
self.total = 0
self.timestamp = pendulum.now().timestamp
self.last_timestamp = self.timestamp
@property
def last(self):
return HistoryItem(self.last_timestamp)
@property
def daily(self):
days = (pendulum.now() - pendulum.from_timestamp(self.timestamp)).days
return self.total / (days + 1)
def update(self, timestamp):
self.total += 1
self.last_timestamp = timestamp
return self
class TeamStats(ModStats):
def __init__(self):
super().__init__()
self.picks = 0
self.captain = 0
@property
def average_pick(self):
total = self.total - self.captain
return 0 if total == 0 else self.picks / total
def update(self, timestamp, pick):
self.picks += pick
self.captain += 0 if pick else 1
return super().update(timestamp)
class HistoryItem:
def __init__(self, timestamp, players=None, modid=None):
self.timestamp = timestamp
self.players = '\n' + players if players else ''
self.modid = modid
def __str__(self):
name = '**{}** '.format(self.modid) if self.modid else ''
when = (pendulum.now() - pendulum.from_timestamp(self.timestamp)).in_words()
return '{}[{} ago]{}'.format(name, when, self.players)
def __lt__(self, other):
return self.timestamp < other.timestamp
class PUGStats:
def __init__(self):
self.total = 0
self.timestamp = pendulum.now().timestamp
self.history = collections.deque(maxlen=3)
@property
def daily(self):
days = (pendulum.now() - pendulum.from_timestamp(self.timestamp)).days
return self.total / (days + 1)
@property
def last_timestamp(self):
return self.last.timestamp
@property
def last(self):
return HistoryItem(*self.history[-1])
@property
def lastt(self):
return HistoryItem(*self.history[min(0, len(self.history) - 1)])
@property
def lasttt(self):
return HistoryItem(*self.history[0])
def update(self, timestamp, players):
self.total += 1
self.history.append((timestamp, players))
return self
class Stats(collections.abc.MutableMapping):
def __init__(self):
self.data = dict()
def __getitem__(self, mod):
return self.data[mod.name]
def __setitem__(self, mod, value):
self.data[mod.name] = value
def __delitem__(self, mod):
del self.data[mod.name]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def items(self):
return self.data.items()
def values(self):
return self.data.values()
@property
def timestamp(self):
return min(self.values(), key=lambda x: x.timestamp).timestamp
@property
def total(self):
return sum(mod.total for mod in self.values())
@property
def daily(self):
days = (pendulum.now() - pendulum.from_timestamp(self.timestamp)).days
return self.total / (days + 1)
@property
def last(self):
modid, stats = max(self.items(), key=lambda x: x[1].last)
last = stats.last
last.modid = modid
return last
class ChannelStats(Stats):
"""Stores the PUG stats for the channel"""
@property
def history(self):
for mod, stats in self.items():
for timestamp, players in stats.history:
yield HistoryItem(timestamp, players, modid=mod)
@property
def lastt(self):
history = sorted(self.history, reverse=True)
return history[min(1, len(history) - 1)]
@property
def lasttt(self):
history = sorted(self.history, reverse=True)
return history[min(2, len(history) - 1)]
class MemberStats(Stats):
"""Stores the member's stats for a channel"""
@property
def team_stats(self):
return (mod for mod in self.values() if isinstance(mod, TeamStats))
@property
def captain(self):
return sum(mod.captain for mod in self.team_stats)
@property
def average_pick(self):
total, picks = 0, 0
for mod in self.team_stats:
total += mod.total - mod.captain
picks += mod.picks
return 0 if total == 0 else picks / total
class ChannelStatsView:
def __init__(self, db, channel):
self.db = db
self.channel = channel
def __iter__(self):
for member_id, stats in self.db.items():
member = self.channel.server.get_member(member_id)
if member and not member.bot and self.channel.id in stats:
yield member, stats[self.channel.id]
class ModStatsView(ChannelStatsView):
def __init__(self, db, channel, mod):
super().__init__(db, channel)
self.mod = mod
def __iter__(self):
return ((member, stats[self.mod]) for member, stats in super().__iter__() if self.mod in stats)
class StatsDB(collections.abc.MutableMapping):
def __init__(self, db, channel, mod):
self.db = db
self.channel = channel
self.mod = mod
def __getitem__(self, member):
stats = self.db[member.id]
if self.channel.id in stats:
if self.mod is None:
return stats[self.channel.id]
elif self.mod in stats[self.channel.id]:
return stats[self.channel.id][self.mod]
raise KeyError
def __setitem__(self, member, value):
stats = self.db.get(member.id, dict())
cls = ChannelStats if member.bot else MemberStats
channel_stats = stats.setdefault(self.channel.id, cls())
channel_stats[self.mod] = value
self.db[member.id] = stats
def __delitem__(self, member):
del self.db[member.id]
def __len__(self):
return len(self.db)
def __iter__(self):
if self.mod is None:
return iter(ChannelStatsView(self.db, self.channel))
return iter(ModStatsView(self.db, self.channel, self.mod))
@contextlib.contextmanager
def stats_open(channel, mod, flag='c', writeback=False):
with shelve.open('data/stats', flag=flag, writeback=writeback) as db:
yield StatsDB(db, channel, mod)
def clamp(n, low, high):
return max(low, min(n, high))
def ispugchannel(ctx):
pugchannel = ctx.bot.get_cog('PUG').channels.get(ctx.message.channel)
return pugchannel is not None and pugchannel.active
class ModConverter(commands.Converter):
def convert(self):
mod = self.ctx.cog.channels[self.ctx.message.channel].get(self.argument)
if mod is None:
raise commands.errors.BadArgument('PUG "{}" not found'.format(self.argument))
return mod
class TeamModConverter(ModConverter):
def convert(self):
mod = super().convert()
if not mod.teamgame:
raise commands.errors.BadArgument('"{}" is not a team PUG'.format(mod.name))
return mod
class PUG:
"""PUG related commands"""
def __init__(self, bot):
self.bot = bot
self.last_teams = dict()
self.tags = collections.defaultdict(lambda: collections.defaultdict(str))
self.nocaptains = collections.defaultdict(set)
self.channels = dict()
async def on_ready(self):
"""Load PUGChannels"""
with shelve.open('data/pug') as db:
for (channel_id, pugchannel) in list(db.items()):
channel = self.bot.get_channel(channel_id)
if channel is not None:
self.channels[channel] = pugchannel
else:
del db[channel_id]
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_server=True)
async def pugbot(self, ctx, enable: bool):
"""Enables/Disables PUG commands in the channel"""
pugchannel = self.channels.get(ctx.message.channel)
if pugchannel is None:
if not enable:
return
self.channels[ctx.message.channel] = PUGChannel()
else:
if pugchannel.active == enable:
return
pugchannel.active = enable
status = ' enabled' if enable else ' disabled'
await self.bot.say('PUG commands have been' + status)
with shelve.open('data/pug', 'w') as db:
db[ctx.message.channel.id] = self.channels[ctx.message.channel]
@commands.command(no_pm=True, aliases=['pickorders'])
@commands.check(ispugchannel)
async def pickmodes(self):
"""Displays the available pickmodes"""
await self.bot.say('```{}```'.format(
'\n'.join('{}) {}'.format(i, ', '.join(map(str, pm)))
for i, pm in enumerate(PICKMODES))))
@commands.command(pass_context=True, no_pm=True, aliases=['setpickorder'])
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def setpickmode(self, ctx, mod: TeamModConverter, pickmode: int):
"""Set pickmode for mod"""
if 0 <= pickmode < len(PICKMODES):
mod.pickmode = pickmode
await self.bot.say(OKMSG)
with shelve.open('data/pug', 'w') as db:
db[ctx.message.channel.id] = self.channels[ctx.message.channel]
@commands.command(no_pm=True, aliases=['pickorder'])
@commands.check(ispugchannel)
async def pickmode(self, mod: TeamModConverter):
"""Displays the pickmode for mod"""
pickmode = PICKMODES[mod.pickmode][:mod.maxplayers - 2]
await self.bot.say('```[{}]```'.format(', '.join(map(str, pickmode))))
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def setlimit(self, ctx, mod: ModConverter, limit: int):
"""Sets number of players required to fill mod"""
if limt > 1 and not mod.full and (not mod.teamgame or limit <= MAXPLAYERS):
mod.maxplayers = limit
await self.bot.say(OKMSG)
with shelve.open('data/pug') as db:
db[ctx.message.channel.id] = self.channels[ctx.message.channel]
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def setrandcaptaintimer(self, ctx, duration: int):
"""Set the amount of time before selecting random captains
duration - number of seconds to wait (clamped to 10-999)
"""
pugchannel = self.channels[ctx.message.channel]
pugchannel.randcaptaintimer = clamp(duration, 10, 999)
await self.bot.say(OKMSG)
with shelve.open('data/pug', 'w') as db:
db[ctx.message.channel.id] = pugchannel
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def setidlecaptaintimer(self, ctx, duration: int):
"""Set the amount time before kicking idle captains
duration - number of seconds to wait (clamped to 10-999)
"""
pugchannel = self.channels[ctx.message.channel]
pugchannel.idlecaptaintimer = clamp(duration, 10, 999)
await self.bot.say(OKMSG)
with shelve.open('data/pug', 'w') as db:
db[ctx.message.channel.id] = pugchannel
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def setserver(self, ctx, *, server: str):
"""Set the channel's PUG server"""
pugchannel = self.channels[ctx.message.channel]
pugchannel.server_name = server
await self.bot.say(OKMSG)
with shelve.open('data/pug', 'w') as db:
db[ctx.message.channel.id] = pugchannel
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def server(self, ctx):
"""Displays channel's PUG server"""
pugchannel = self.channels[ctx.message.channel]
if pugchannel.server_name:
await self.bot.say(pugchannel.server_name)
else:
await self.bot.say('No server set, use `.setserver` to set the server')
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def addmod(self, ctx, mod: str, name: str, n: int, teams: bool=True, pickmode: int=1):
"""Adds new mod to the channel"""
pugchannel = self.channels[ctx.message.channel]
if n < 2 or mod in pugchannel:
return
if n == 2:
teams = False
if teams:
if 4 > n > MAXPLAYERS or n % 2 == 1 or 0 > pickmode >= len(PICKMODES):
return
pickmode = 0 if n == 4 else pickmode
pugchannel[mod] = TeamMod(mod, name, n, pickmode)
else:
pugchannel[mod] = Mod(mod, name, n)
await self.bot.say(OKMSG)
with shelve.open('data/pug') as db:
db[ctx.message.channel.id] = pugchannel
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def delmod(self, ctx, mod: ModConverter):
"""Deletes mod from the channel"""
pugchannel = self.channels[ctx.message.channel]
del pugchannel[mod.name]
await self.bot.say(OKMSG)
with shelve.open('data/pug', 'w') as db:
db[ctx.message.channel.id] = pugchannel
async def on_command_error(self, error, ctx):
"""If a PUG command is used in a channel that doesn't have active
PUGs send a message display the active channels on the server
"""
cmds = {'join', 'list', 'last', 'liast', 'lastt', 'liastt', 'lasttt', 'liasttt'}
if isinstance(error, commands.errors.CheckFailure) and ctx.command.name in cmds:
server = ctx.message.server
active_channels = (channel for channel in self.channels if channel.server == server and self.channels[channel].active)
channel_mentions = [channel.mention for channel in active_channels]
if channel_mentions:
await self.bot.send_message(ctx.message.channel, '**Active Channels:** {}'.format(' '.join(channel_mentions)))
def get_tag(self, member):
return self.tags[member.server][member]
def format_players(self, ps, number=False, mention=False, tags=True):
def name(p):
return p.mention if mention else display_name(p)
xs = ((i, name(p), self.get_tag(p)) for i, p in enumerate(ps, 1) if p)
fmt = '**{0})** {1}' if number else '{1}'
fmt += '{2}' if tags else ''
return PLASEP.join(fmt.format(*x) for x in xs)
def format_mod(self, mod):
fmt = '**__{0.desc} [{1}/{0.maxplayers}]:__**\n{2}'
return fmt.format(mod, len(mod), self.format_players(mod, number=mod.full))
def format_teams(self, mod, mention=False, tags=False):
teams = '**Red Team:** {}\n**Blue Team:** {}'
red = self.format_players(mod.red, mention=mention, tags=tags)
blue = self.format_players(mod.blue, mention=mention, tags=tags)
return teams.format(red, blue)
def format_last(self, channel, mod, attr='last'):
with stats_open(channel, mod, flag='r') as db:
pugstats = db.get(self.bot.user, None)
if pugstats is not None:
history_item = getattr(pugstats, attr)
return '**{}:** {}'.format(attr.title(), history_item)
return 'No PUGs recorded'
def format_list(self, channel, mod):
if mod is None:
pugchannel = self.channels[channel]
return MODSEP.join(mod.brief for mod in pugchannel.values())
else:
return self.format_mod(mod)
def format_liast(self, channel, mod, attr='last'):
ls = self.format_list(channel, mod)
la = self.format_last(channel, mod, attr)
return '{}\n{}'.format(ls, la)
@commands.command(name='list', pass_context=True, no_pm=True, aliases=['ls'])
@commands.check(ispugchannel)
async def _list(self, ctx, mod: ModConverter=None):
"""Displays mods/players in the channel"""
await self.bot.say(self.format_list(ctx.message.channel, mod))
@commands.command(pass_context=True, no_pm=True, aliases=['la'])
@commands.check(ispugchannel)
async def last(self, ctx, mod: ModConverter=None):
"""Displays players from last PUG"""
await self.bot.say(self.format_last(ctx.message.channel, mod))
@commands.command(pass_context=True, no_pm=True, aliases=['lia'])
@commands.check(ispugchannel)
async def liast(self, ctx, mod: ModConverter=None):
"""Display mods/players and last PUG"""
await self.bot.say(self.format_liast(ctx.message.channel, mod))
@commands.command(pass_context=True, no_pm=True, hidden=True)
@commands.check(ispugchannel)
async def lastt(self, ctx, mod: ModConverter=None):
await self.bot.say(self.format_last(ctx.message.channel, mod, 'lastt'))
@commands.command(pass_context=True, no_pm=True, hidden=True)
@commands.check(ispugchannel)
async def liastt(self, ctx, mod: ModConverter=None):
await self.bot.say(self.format_liast(ctx.message.channel, mod, 'lastt'))
@commands.command(pass_context=True, no_pm=True, hidden=True)
@commands.check(ispugchannel)
async def lasttt(self, ctx, mod: ModConverter=None):
await self.bot.say(self.format_last(ctx.message.channel, mod, 'lasttt'))
@commands.command(pass_context=True, no_pm=True, hidden=True)
@commands.check(ispugchannel)
async def liasttt(self, ctx, mod: ModConverter=None):
await self.bot.say(self.format_liast(ctx.message.channel, mod, 'lasttt'))
async def addplayers_impl(self, channel, mod, members):
if not any(list(mod.add(m) for m in members)):
return
if not mod.full:
return await self.bot.say(self.format_mod(mod))
msg = ['**{}** has been filled'.format(mod.name)]
msg.append(self.format_players(mod, mention=True, tags=False))
mods = (other for other in self.channels[channel].values() if other is not mod)
for other in mods:
wasfull = other.full
if any(list(other.discard(p) for p in mod)) and wasfull:
msg.append('**{}** was reset'.format(other.name))
await self.bot.say('\n'.join(msg))
if mod.teamgame:
mod.task = self.bot.loop.create_task(self.randcaptains(channel, mod))
else:
timestamp = pendulum.now().timestamp
with stats_open(channel, mod) as db:
for member in mod:
db[member] = db.get(member, ModStats()).update(timestamp)
self.remove_tags(member)
players = self.format_players(mod, mention=False, tags=False)
db[self.bot.user] = db.get(self.bot.user, PUGStats()).update(timestamp, players)
mod.fullreset()
@commands.command(pass_context=True, no_pm=True, aliases=['addplayer'])
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def addplayers(self, ctx, mod: ModConverter, *members: discord.Member):
"""Adds players to mod"""
await self.addplayers_impl(ctx.message.channel, mod, (m for m in members if not m.bot))
@commands.command(pass_context=True, no_pm=True, aliases=['j'])
@commands.check(ispugchannel)
async def join(self, ctx, mod: ModConverter):
"""Joins mod"""
await self.addplayers_impl(ctx.message.channel, mod, [ctx.message.author])
async def randcaptains(self, channel, mod):
"""Waits for n seconds before selecting random captains"""
content = '`Random captains in {:3d} seconds`'
seconds = self.channels[channel].randcaptaintimer
message = await self.bot.send_message(channel, content.format(seconds))
mod.here = [False, False]
for i in range(seconds - 1, -1, -1):
try:
await asyncio.sleep(1)
if i % 5 == 0 or i < 10:
message = await self.bot.edit_message(message, content.format(i))
except asyncio.CancelledError:
return await self.bot.edit_message(message, '`Random captains cancelled`')
if not mod.full or mod.hascaptains:
return
candidates = [p for p in mod if p and p not in self.nocaptains[channel.server]]
if len(candidates) < 2:
candidates = list(mod)
random.shuffle(candidates)
msg, redset = [], False
if not mod.red:
redset = mod.setcaptain(candidates.pop(0))
msg.append(mod.red.captain.mention + ' is captain for the **Red Team**')
blue_captain = candidates.pop(0)
mod.setcaptain(blue_captain)
mod.here = [not redset, False]
await self.bot.edit_message(message, '`Random captains selected`')
msg.append(blue_captain.mention + ' is captain for the **Blue Team**')
msg.append('Type .here to prevent being kicked')
msg.append('{} to pick'.format(mod.captain.mention))
msg.append(self.format_players(mod, number=True))
await self.bot.send_message(channel, '\n'.join(msg))
mod.task = self.bot.loop.create_task(self.kick_idle(channel, mod))
async def kick_idle(self, channel, mod):
"""Removes captains if they did not pick or type .here"""
try:
await asyncio.sleep(self.channels[channel].idlecaptaintimer)
except asyncio.CancelledError:
return
if mod.hascaptains and not all(mod.here):
msg = ['**{}** was reset'.format(mod.name)]
kick = []
for i in range(2):
if not mod.here[i]:
captain = mod.teams[i].captain
kick.append(captain)
msg.append('{} was removed for being idle'.format(captain.mention))
# Send the message before we kick the players, otherwise the task will be cancelled
await self.bot.send_message(channel, '\n'.join(msg))
[mod.discard(p) for p in kick]
@commands.command(pass_context=True, no_pm=True, aliases=['pro'])
@commands.cooldown(2, 5.0, type=commands.BucketType.channel)
@commands.check(ispugchannel)
async def promote(self, ctx, mod: ModConverter):
"""Notify other members in the channel"""
await self.bot.say('@here Only **{0.needed}** more needed for **{0.name}**'.format(mod))
async def remove_player(self, channel, mod, player, reason):
wasfull = mod.full
name = player.mention if reason == 'was removed' else display_name(player)
if mod.discard(player):
if wasfull:
await self.bot.say('**{}** was reset because **{}** {}'.format(mod.name, name, reason))
else:
await self.bot.say('**{}** was removed from **{}** because they {}'.format(name, mod.name, reason))
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def delplayer(self, ctx, mod: ModConverter, member: discord.Member):
"""Removes player from mod"""
await self.remove_player(ctx.message.channel, mod, member, 'was removed')
@commands.command(pass_context=True, no_pm=True, aliases=['l'])
@commands.check(ispugchannel)
async def leave(self, ctx, mod: ModConverter):
"""Leave mod"""
await self.remove_player(ctx.message.channel, mod, ctx.message.author, 'left')
@commands.command(pass_context=True, no_pm=True, aliases=['lva'])
@commands.check(ispugchannel)
async def leaveall(self, ctx):
"""Leaves all mods you have joined, including other channels"""
for channel in self.channels:
await self.remove_from_channel(ctx.message.author, channel, 'left')
async def on_member_update(self, before, after):
"""Remove member from all mods if they go offline"""
if after.status is discord.Status.offline:
await self.remove_from_server(before, 'quit')
def removed_from(self, member, channel):
pugchannel = self.channels[channel]
mods = (mod for mod in pugchannel.values() if member in mod)
for mod in mods:
yield mod
mod.discard(member)
async def remove_from_channel(self, member, channel, reason):
reset, removed = None, []
for mod in self.removed_from(member, channel):
if mod.full:
reset = mod.name
else:
removed.append(mod.name)
msg, name = [], display_name(member)
if reset:
fmt = '**{}** was reset because **{}** {}'
msg.append(fmt.format(reset, name, reason))
if removed:
fmt = '**{}** was removed from **{}** because they {}'
if len(removed) > 1:
mods = ', '.join(removed[:-1]) + ' & ' + removed[-1]
else:
mods = removed[0]
msg.append(fmt.format(name, mods, reason))
if msg:
await self.bot.send_message(channel, '\n'.join(msg))
async def remove_from_server(self, member, reason):
"""Removes the member from the server"""
self.remove_tags(member)
for channel in self.channels:
if channel.server == member.server:
await self.remove_from_channel(member, channel, reason)
async def on_member_remove(self, member):
"""Remove member from all mods in the server"""
await self.remove_from_server(member, 'left the server')
async def on_member_ban(self, member):
"""Remove member from all mods in the server"""
with shelve.open('data/bans') as db:
bans = db.get(member.server.id, collections.Counter())
bans[member.id] += 1
db[member.server.id] = bans
await self.remove_from_server(member, 'was banned')
async def on_channel_delete(self, channel):
"""Remove PUGChannel if the associated channel was deleted"""
if channel in self.channels:
del self.channels[channel]
with shelve.open('data/pug', 'w') as db:
del db[channel.id]
async def on_server_remove(self, server):
"""Remove server tags when server is removed from the bot"""
self.tags.pop(server)
self.nocaptains.pop(server)
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def reset(self, ctx, mod: TeamModConverter=None):
"""Resets teams"""
if mod is None:
pugchannel = self.channels[ctx.message.channel]
mods = [mod for mod in pugchannel.team_mods if mod.red]
if len(mods) == 1:
mod = mods[0]
if mod is not None and mod.reset():
await self.bot.say('**{}** was reset'.format(mod.name))
mod.task = self.bot.loop.create_task(self.randcaptains(ctx.message.channel, mod))
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def fullreset(self, ctx, mod: ModConverter):
"""Resets players in the mod"""
mod.fullreset()
await self.bot.say('**{}** was reset'.format(mod.name))
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def here(self, ctx):
"""Prevent being kicked when set as random captain"""
channel = ctx.message.channel
pugchannel = self.channels[channel]
captain = ctx.message.author
for mod in pugchannel.team_mods:
if mod.red:
if mod.red.captain == captain:
mod.here[0] = True
return
elif mod.blue and mod.blue.captain == captain:
if not mod.here[1]:
mod.here[1] = True
if all(mod.here) and mod.task:
mod.task.cancel()
return
async def setcaptain_impl(self, channel, member, mention=False):
pugchannel = self.channels[channel]
mod = next((mod for mod in pugchannel.team_mods if mod.setcaptain(member)), None)
name = member.mention if mention else '**{}**'.format(display_name(member))
if mod is not None:
if mod.hascaptains:
if mod.task is not None:
mod.task.cancel()
msg = [name + ' is captain for the **Blue Team**']
msg.append('{} to pick'.format(mod.captain.mention))
msg.append(self.format_players(mod, number=True))
await self.bot.say('\n'.join(msg))
else:
await self.bot.say('**{}** is captain for the **Red Team**'.format(name))
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def setcaptain(self, ctx, member: discord.Member):
"""Set player as captain"""
await self.setcaptain_impl(ctx.message.channel, member, mention=True)
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def captain(self, ctx):
"""Become captain for mod"""
await self.setcaptain_impl(ctx.message.channel, ctx.message.author)
@commands.command(pass_context=True, no_pm=True, aliases=['p'])
@commands.check(ispugchannel)
async def pick(self, ctx, *players: int):
"""Pick player by number"""
channel = ctx.message.channel
pugchannel = self.channels[channel]
captain = ctx.message.author
mod = next((mod for mod in pugchannel.team_mods if mod.captain == captain), None)
if mod is None:
return
picks = list(itertools.takewhile(functools.partial(mod.pick, captain), (x - 1 for x in players)))
if picks:
teams = self.format_teams(mod)
if mod.teamsready:
self.last_teams[channel] = '**{}**\n{}'.format(mod.desc, teams)
msg = 'Teams have been selected:\n{}'.format(self.format_teams(mod, mention=True))
await self.bot.say(msg)
timestamp = pendulum.now().timestamp
with stats_open(channel, mod) as db:
members = mod.red + mod.blue
xs = PICKMODES[mod.pickmode][:mod.maxplayers - 2]
picks = [0] + [i + 1 for i, x in enumerate(xs) if x == 0]
picks += [0] + [i + 1 for i, x in enumerate(xs) if x == 1]
for i in range(mod.maxplayers):
member = members[i]
db[member] = db.get(member, TeamStats()).update(timestamp, picks[i])
self.remove_tags(member)
db[self.bot.user] = db.get(self.bot.user, PUGStats()).update(timestamp, teams)
mod.fullreset()
else:
msg = '\n'.join([
self.format_players(mod, number=True, tags=True),
self.format_teams(mod, tags=True),
'{} to pick'.format(mod.captain.mention)])
await self.bot.say(msg)
@pick.error
async def pick_error(self, error, ctx):
if isinstance(error, commands.errors.BadArgument) and ctx.invoked_with == 'p':
ctx.view = commands.view.StringView(ctx.message.content[len(ctx.prefix) + 1:])
await self.promote.invoke(ctx)
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def teams(self, ctx):
"""Displays current teams, or teams from last PUG"""
pugchannel = self.channels[ctx.message.channel]
mods = [(mod.desc, self.format_teams(mod, tags=True)) for mod in pugchannel.team_mods if mod.red]
if mods:
await self.bot.say('\n'.join('**__{}:__**\n{}'.format(*mod) for mod in mods))
elif ctx.message.channel in self.last_teams:
await self.bot.say(self.last_teams[ctx.message.channel])
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def turn(self, ctx):
"""Displays captain whose turn it is to pick and current teams"""
pugchannel = self.channels[ctx.message.channel]
mods = [(display_name(mod.captain), mod.desc) for mod in pugchannel.team_mods if mod.hascaptains]
if mods:
await self.bot.say('\n'.join('**{}** to pick for **{}**'.format(*mod) for mod in mods))
async def display_stats(self, member, channel, mod):
with stats_open(channel, mod, flag='r') as db:
stats = db.get(member)
if stats is None:
return await self.bot.say('No stats available')
out = []
out.append('**Total:** [{}]'.format(stats.total))
out.append('**Daily:** [{:.2f}]'.format(stats.daily))
if hasattr(stats, 'captain') and not member.bot:
out.append('**Captain:** [{}]'.format(stats.captain))
mp = '/' + str(mod.maxplayers - 2) if mod is not None else ''
out.append('**Avg. Pick:** [{:.2f}{}]'.format(stats.average_pick, mp))
if not member.bot:
try:
db = shelve.open('data/bans', 'r')
except:
out.append('**Bans:** [0]')
else:
bans = db.get(member.server.id, collections.Counter())
db.close()
out.append('**Bans:** [{}]'.format(bans[member.id]))
out.append('**Last:** {}'.format(stats.last))
await self.bot.say(MODSEP.join(out))
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def stats(self, ctx, member: discord.Member, mod: ModConverter=None):
"""Display PUG stats for player"""
await self.display_stats(member, ctx.message.channel, mod)
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def mystats(self, ctx, mod: ModConverter=None):
"""Display your PUG stats"""
await self.display_stats(ctx.message.author, ctx.message.channel, mod)
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def pugstats(self, ctx, mod: ModConverter=None):
"""Display channel PUG stats"""
await self.display_stats(self.bot.user, ctx.message.channel, mod)
@commands.command(pass_context=True, no_pm=True, aliases=['nocapt'])
@commands.check(ispugchannel)
async def nocaptain(self, ctx):
"""Prevent being made captain for next PUG, resets after next PUG"""
self.nocaptains[ctx.message.server].add(ctx.message.author)
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def nomic(self, ctx):
"""Sets tag to 'nomic'"""
self.tags[ctx.message.server][ctx.message.author] = ' [nomic]'
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def tag(self, ctx, *, tag: str):
"""Sets custom tag for all mods"""
if tag == 'nocapt' or tag == 'nocaptain':
self.nocaptains[ctx.message.server].add(ctx.message.author)
else:
self.tags[ctx.message.server][ctx.message.author] = ' [{}]'.format(discord_md_escape(tag[:MAXTAGLENGTH]))
def remove_tags(self, member):
self.nocaptains[member.server].discard(member)
self.tags[member.server].pop(member, None)
@commands.command(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def deltag(self, ctx):
"""Deletes tags"""
self.remove_tags(ctx.message.author)
@commands.command(pass_context=True, no_pm=True, hidden=True)
@commands.has_permissions(manage_server=True)
async def cleartags(self, ctx):
"""Clear current tags for the server"""
self.nocaptains.pop(ctx.message.server)
self.tags.pop(ctx.message.server)
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_server=True)
async def numtags(self, ctx):
"""Displays the number tags in use on the server"""
server = ctx.message.server
await self.bot.whisper('tags: {}\nnocaptains: {}'.format(len(self.tags[server]), len(self.nocaptains[server])))
@commands.group(pass_context=True, no_pm=True)
@commands.check(ispugchannel)
async def top(self, ctx, n: int):
"""Displays a top n list for the channel"""
if ctx.invoked_subcommand is not None:
self.count = n
@top.command(pass_context=True, no_pm=True)
async def picks(self, ctx, mod: TeamModConverter=None):
"""Displays top average picks"""
with stats_open(ctx.message.channel, mod, flag='r') as db:
ps = ((display_name(p[0]), p[1].average_pick) for p in db if p[1].average_pick)
topn = heapq.nsmallest(self.count, ps, key=lambda p: p[1])
if topn:
entries = ('**{})** {} [{:.2f}]'.format(i, *p) for i, p in enumerate(topn, 1))
await self.bot.say(PLASEP.join(entries))
@top.command(pass_context=True, no_pm=True)
async def puggers(self, ctx, mod: ModConverter=None):
"""Displays top puggers"""
with stats_open(ctx.message.channel, mod, flag='r') as db:
ps = ((display_name(p[0]), p[1].total) for p in db)
topn = heapq.nlargest(self.count, ps, key=lambda p: p[1])
if topn:
entries = ('**{})** {} [{}]'.format(i, *p) for i, p in enumerate(topn, 1))
await self.bot.say(PLASEP.join(entries))
@top.command(pass_context=True, no_pm=True)
async def lamers(self, ctx, mod: ModConverter=None):
"""Displays top lamers"""
with stats_open(ctx.message.channel, mod, flag='r') as db:
ps = ((display_name(p[0]), p[1].total) for p in db)
topn = heapq.nsmallest(self.count, ps, key=lambda p: p[1])
if topn:
entries = ('**{})** {} [{}]'.format(i, *p) for i, p in enumerate(topn, 1))
await self.bot.say(PLASEP.join(entries))
@top.command(pass_context=True, no_pm=True)
async def captains(self, ctx, mod: TeamModConverter=None):
"""Display top captains"""
with stats_open(ctx.message.channel, mod, flag='r') as db:
ps = ((display_name(p[0]), p[1].captain) for p in db if p[1].captain)
topn = heapq.nlargest(self.count, ps, key=lambda p: p[1])
if topn:
entries = ('**{})** {} [{}]'.format(i, *p) for i, p in enumerate(topn, 1))
await self.bot.say(PLASEP.join(entries))
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def addmaps(self, ctx, mod: ModConverter, *maps: str):
"""Adds maps to mod"""
if maps:
mod.maps.update(maps)
await self.bot.say(OKMSG)
with shelve.open('data/pug') as db:
db[ctx.message.channel.id] = self.channels[ctx.message.channel]
@commands.command(pass_context=True, no_pm=True)
@commands.has_permissions(manage_channels=True)
@commands.check(ispugchannel)
async def delmaps(self, ctx, mod: ModConverter, *maps: str):
"""Removes maps from mod"""
if maps:
mod.maps -= set(maps)
await self.bot.say(OKMSG)
with shelve.open('data/pug') as db:
db[ctx.message.channel.id] = self.channels[ctx.message.channel]
@commands.command(pass_context=True, no_pm=True, aliases=['maps'])
@commands.check(ispugchannel)
async def maplist(self, ctx, mod: ModConverter=None):
"""Displays maps for mod"""
if mod is not None and mod.maps:
await self.bot.say('**__{}__:**\n{}'.format(mod.desc, MODSEP.join(sorted(mod.maps))))
elif mod is None:
pugchannel = self.channels[ctx.message.channel]
mods = [mod for mod in pugchannel.values() if mod.maps]
if mods:
await self.bot.say('\n'.join('**__{}__:** {}'.format(mod.desc, MODSEP.join(sorted(mod.maps))) for mod in mods))
def setup(bot):
if not os.path.exists('data'):
os.makedirs('data')
shelve.open('data/stats', 'c').close()
bot.add_cog(PUG(bot))
| 37.645751 | 130 | 0.605473 |
4a1a729c1854a3240bbc870f7d4eb2fb9b5858cc
| 14,922 |
py
|
Python
|
O365/event.py
|
BDeliers/python-o365
|
24bf6d8aa82b00b02faf9a5d1d790481d19991b7
|
[
"Apache-2.0"
] | null | null | null |
O365/event.py
|
BDeliers/python-o365
|
24bf6d8aa82b00b02faf9a5d1d790481d19991b7
|
[
"Apache-2.0"
] | null | null | null |
O365/event.py
|
BDeliers/python-o365
|
24bf6d8aa82b00b02faf9a5d1d790481d19991b7
|
[
"Apache-2.0"
] | null | null | null |
from O365.contact import Contact
from O365.group import Group
from O365.connection import Connection
import logging
import json
import requests
import time
log = logging.getLogger(__name__)
class Event( object ):
'''
Class for managing the creation and manipluation of events in a calendar.
Methods:
create -- Creates the event in a calendar.
update -- Sends local changes up to the cloud.
delete -- Deletes event from the cloud.
toJson -- returns the json representation.
fullcalendarioJson -- gets a specific json representation used for fullcalendario.
getSubject -- gets the subject of the event.
getBody -- gets the body of the event.
getStart -- gets the starting time of the event. (struct_time)
getEnd -- gets the ending time of the event. (struct_time)
getAttendees -- gets the attendees of the event.
getReminder -- returns True if reminder is enabled, False if not.
getCategories -- returns a list of the event's categories.
addAttendee -- adds an attendee to the event. update needs to be called for notification.
setSubject -- sets the subject line of the event.
setBody -- sets the body of the event.
setStart -- sets the starting time of the event. (struct_time)
setEnd -- sets the starting time of the event. (struct_time)
setAttendees -- sets the attendee list.
setStartTimeZone -- sets the timezone for the start of the event item.
setEndTimeZone -- sets the timezone for the end of the event item.
setReminder -- sets the reminder.
setCategories -- sets a list of the event's categories.
Variables:
time_string -- Formated time string for translation to and from json.
create_url -- url for creating a new event.
update_url -- url for updating an existing event.
delete_url -- url for deleting an event.
'''
#Formated time string for translation to and from json.
time_string = '%Y-%m-%dT%H:%M:%S'
#takes a calendar ID
create_url = 'https://outlook.office365.com/api/v1.0/me/calendars/{0}/events'
#takes current event ID
update_url = 'https://outlook.office365.com/api/v1.0/me/events/{0}'
#takes current event ID
delete_url = 'https://outlook.office365.com/api/v1.0/me/events/{0}'
def __init__(self,json=None,auth=None,cal=None,verify=True):
'''
Creates a new event wrapper.
Keyword Argument:
json (default = None) -- json representation of an existing event. mostly just used by
this library internally for events that are downloaded by the callendar class.
auth (default = None) -- a (email,password) tuple which will be used for authentication
to office365.
cal (default = None) -- an instance of the calendar for this event to associate with.
'''
self.auth = auth
self.calendar = cal
self.attendees = []
if json:
self.json = json
self.isNew = False
else:
self.json = {}
self.verify = verify
self.startTimeZone = time.strftime("%Z", time.gmtime())
self.endTimeZone = time.strftime("%Z", time.gmtime())
def create(self,calendar=None):
'''
This method creates an event on the calender passed.
IMPORTANT: It returns that event now created in the calendar, if you wish
to make any changes to this event after you make it, use the returned value
and not this particular event any further.
calendar -- a calendar class onto which you want this event to be created. If this is left
empty then the event's default calendar, specified at instancing, will be used. If no
default is specified, then the event cannot be created.
'''
connection = Connection()
# Change URL if we use Oauth
if connection.is_valid() and connection.oauth != None:
self.create_url = self.create_url.replace("outlook.office365.com/api", "graph.microsoft.com")
elif not self.auth:
log.debug('failed authentication check when creating event.')
return False
if calendar:
calId = calendar.calendarId
self.calendar = calendar
log.debug('sent to passed calendar.')
elif self.calendar:
calId = self.calendar.calendarId
log.debug('sent to default calendar.')
else:
log.debug('no valid calendar to upload to.')
return False
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
log.debug('creating json for request.')
data = json.dumps(self.json)
response = None
try:
log.debug('sending post request now')
response = connection.post_data(self.create_url.format(calId),data,headers=headers,auth=self.auth,verify=self.verify)
log.debug('sent post request.')
if response.status_code > 399:
log.error("Invalid response code [{}], response text: \n{}".format(response.status_code, response.text))
return False
except Exception as e:
if response:
log.debug('response to event creation: %s',str(response))
else:
log.error('No response, something is very wrong with create: %s',str(e))
return False
log.debug('response to event creation: %s',str(response))
return Event(response.json(),self.auth,calendar)
def update(self):
'''Updates an event that already exists in a calendar.'''
connection = Connection()
# Change URL if we use Oauth
if connection.is_valid() and connection.oauth != None:
self.update_url = self.update_url.replace("outlook.office365.com/api", "graph.microsoft.com")
elif not self.auth:
return False
if self.calendar:
calId = self.calendar.calendarId
else:
return False
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
data = json.dumps(self.json)
response = None
print(data)
try:
response = connection.patch_data(self.update_url.format(self.json['id']),data,headers=headers,auth=self.auth,verify=self.verify)
log.debug('sending patch request now')
except Exception as e:
if response:
log.debug('response to event creation: %s',str(response))
else:
log.error('No response, something is very wrong with update: %s',str(e))
return False
log.debug('response to event creation: %s',str(response))
return Event(json.dumps(response),self.auth)
def delete(self):
'''
Delete's an event from the calendar it is in.
But leaves you this handle. You could then change the calendar and transfer the event to
that new calendar. You know, if that's your thing.
'''
connection = Connection()
# Change URL if we use Oauth
if connection.is_valid() and connection.oauth != None:
self.delete_url = self.delete_url.replace("outlook.office365.com/api", "graph.microsoft.com")
elif not self.auth:
return False
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
response = None
try:
log.debug('sending delete request')
response = connection.delete_data(self.delete_url.format(self.json['id']),headers=headers,auth=self.auth,verify=self.verify)
except Exception as e:
if response:
log.debug('response to deletion: %s',str(response))
else:
log.error('No response, something is very wrong with delete: %s',str(e))
return False
return response
def toJson(self):
'''
Creates a JSON representation of the calendar event.
oh. uh. I mean it simply returns the json representation that has always been in self.json.
'''
return self.json
def fullcalendarioJson(self):
'''
returns a form of the event suitable for the vehicle booking system here.
oh the joys of having a library to yourself!
'''
ret = {}
ret['title'] = self.json['subject']
ret['driver'] = self.json['organizer']['emailAddress']['name']
ret['driverEmail'] = self.json['organizer']['emailAddress']['address']
ret['start'] = self.json['start']
ret['end'] = self.json['end']
ret['isAllDay'] = self.json['isAllDay']
return ret
def getSubject(self):
'''Gets event subject line.'''
return self.json['subject']
def getBody(self):
'''Gets event body content.'''
return self.json['body']['content']
def getStart(self):
'''Gets event start struct_time'''
if 'Z' in self.json['start']:
return time.strptime(self.json['start'], self.time_string+'Z')
else:
return time.strptime(self.json['start']["dateTime"].split('.')[0], self.time_string)
def getEnd(self):
'''Gets event end struct_time'''
if 'Z' in self.json['end']:
return time.strptime(self.json['end'], self.time_string+'Z')
else:
return time.strptime(self.json['end']["dateTime"].split('.')[0], self.time_string)
def getAttendees(self):
'''Gets list of event attendees.'''
return self.json['attendees']
def getReminder(self):
'''Gets the reminder's state.'''
return self.json['isReminderOn']
def getCategories(self):
'''Gets the list of categories for this event'''
return self.json['categories']
def setSubject(self,val):
'''sets event subject line.'''
self.json['subject'] = val
def setBody(self,val,contentType='Text'):
'''
sets event body content:
Examples for ContentType could be 'Text' or 'HTML'
'''
cont = False
while not cont:
try:
self.json['body']['content'] = val
self.json['body']['contentType'] = contentType
cont = True
except:
self.json['body'] = {}
def setStart(self,val):
'''
sets event start time.
Argument:
val - this argument can be passed in three different ways. You can pass it in as a int
or float, in which case the assumption is that it's seconds since Unix Epoch. You can
pass it in as a struct_time. Or you can pass in a string. The string must be formated
in the json style, which is %Y-%m-%dT%H:%M:%S. If you stray from that in your string
you will break the library.
'''
if isinstance(val,time.struct_time):
self.json['start'] = {"dateTime":time.strftime(self.time_string,val), "timeZone": self.startTimeZone}
elif isinstance(val,int):
self.json['start'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.startTimeZone}
elif isinstance(val,float):
self.json['start'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.startTimeZone}
else:
#this last one assumes you know how to format the time string. if it brakes, check
#your time string!
self.json['start'] = val
def setEnd(self,val):
'''
sets event end time.
Argument:
val - this argument can be passed in three different ways. You can pass it in as a int
or float, in which case the assumption is that it's seconds since Unix Epoch. You can
pass it in as a struct_time. Or you can pass in a string. The string must be formated
in the json style, which is %Y-%m-%dT%H:%M:%SZ. If you stray from that in your string
you will break the library.
'''
if isinstance(val,time.struct_time):
self.json['end'] = {"dateTime":time.strftime(self.time_string,val), "timeZone": self.endTimeZone}
elif isinstance(val,int):
self.json['end'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.endTimeZone}
elif isinstance(val,float):
self.json['end'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.endTimeZone}
else:
#this last one assumes you know how to format the time string. if it brakes, check
#your time string!
self.json['end'] = val
def setAttendees(self,val):
'''
set the attendee list.
val: the one argument this method takes can be very flexible. you can send:
a dictionary: this must to be a dictionary formated as such:
{"EmailAddress":{"Address":"recipient@example.com"}}
with other options such ass "Name" with address. but at minimum it must have this.
a list: this must to be a list of libraries formatted the way specified above,
or it can be a list of libraries objects of type Contact. The method will sort
out the libraries from the contacts.
a string: this is if you just want to throw an email address.
a contact: type Contact from this library.
For each of these argument types the appropriate action will be taken to fit them to the
needs of the library.
'''
self.json['attendees'] = []
if isinstance(val,list):
self.json['attendees'] = val
elif isinstance(val,dict):
self.json['attendees'] = [val]
elif isinstance(val,str):
if '@' in val:
self.addAttendee(val)
elif isinstance(val,Contact):
self.addAttendee(val)
elif isinstance(val,Group):
self.addAttendee(val)
else:
return False
return True
def setStartTimeZone(self,val):
'''sets event start timezone'''
self.startTimeZone = val
self.json['start']["startTimeZone"] = val
def setEndTimeZone(self,val):
'''sets event end timezone'''
self.endTimeZone = val
self.json['end']["endTimeZone"] = val
def addAttendee(self,address,name=None):
'''
Adds a recipient to the attendee list.
Arguments:
address -- the email address of the person you are sending to. <<< Important that.
Address can also be of type Contact or type Group.
name -- the name of the person you are sending to. mostly just a decorator. If you
send an email address for the address arg, this will give you the ability
to set the name properly, other wise it uses the email address up to the
at sign for the name. But if you send a type Contact or type Group, this
argument is completely ignored.
'''
if isinstance(address,Contact):
self.json['attendees'].append(address.getFirstEmailAddress())
elif isinstance(address,Group):
for con in address.contacts:
self.json['attendees'].append(address.getFirstEmailAddress())
else:
if name is None:
name = address[:address.index('@')]
self.json['attendees'].append({'emailAddress':{'address':address,'name':name}})
def setLocation(self,loc):
'''
Sets the event's location.
Arguments:
loc -- two options, you can send a dictionary in the format discribed here:
https://msdn.microsoft.com/en-us/office/office365/api/complex-types-for-mail-contacts-calendar#LocationBeta
this will allow you to set address, coordinates, displayname, location email
address, location uri, or any combination of the above. If you don't need that much
detail you can simply send a string and it will be set as the locations display
name. If you send something not a string or a dict, it will try to cast whatever
you send into a string and set that as the display name.
'''
if 'Location' not in self.json:
self.json['location'] = {"adress":None}
if isinstance(loc,dict):
self.json['location'] = loc
else:
self.json['location'] = {'displayName':str(loc)}
def getLocation(self):
'''
Get the current location, if one is set.
'''
if 'location' in self.json:
return self.json['location']
return None
def setReminder(self,val):
'''
Sets the event's reminder.
Argument:
val -- a boolean
'''
if val == True or val == False:
self.json['isReminderOn'] = val
def setCategories(self,cats):
'''
Sets the event's categories.
Argument:
cats -- a list of categories
'''
if isinstance(cats, (list, tuple)):
self.json['categories'] = cats
#To the King!
| 32.940397 | 131 | 0.704798 |
4a1a73927d9a85a5378529c7edd1c4a37f95ee68
| 695 |
py
|
Python
|
backend/tests/factories.py
|
DSBUGAY2/zcash-grant-system
|
729b9edda13bd1eeb3f445d889264230c6470d7e
|
[
"MIT"
] | 8 |
2019-06-03T16:29:49.000Z
|
2021-05-11T20:38:36.000Z
|
backend/tests/factories.py
|
DSBUGAY2/zcash-grant-system
|
729b9edda13bd1eeb3f445d889264230c6470d7e
|
[
"MIT"
] | 342 |
2019-01-15T19:13:58.000Z
|
2020-03-24T16:38:13.000Z
|
backend/tests/factories.py
|
DSBUGAY2/zcash-grant-system
|
729b9edda13bd1eeb3f445d889264230c6470d7e
|
[
"MIT"
] | 5 |
2019-02-15T09:06:47.000Z
|
2022-01-24T21:38:41.000Z
|
# -*- coding: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from grant.app import db
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
| 23.965517 | 66 | 0.67482 |
4a1a73eb6f2a2f6181f925554b845a9a4c5caff6
| 5,744 |
py
|
Python
|
google/ads/googleads/v9/resources/types/campaign_simulation.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/resources/types/campaign_simulation.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/resources/types/campaign_simulation.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.common.types import simulation
from google.ads.googleads.v9.enums.types import simulation_modification_method
from google.ads.googleads.v9.enums.types import simulation_type
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"CampaignSimulation",},
)
class CampaignSimulation(proto.Message):
r"""A campaign simulation. Supported combinations of advertising channel
type, simulation type and simulation modification method is detailed
below respectively.
SEARCH - CPC_BID - UNIFORM SEARCH - CPC_BID - SCALING SEARCH -
TARGET_CPA - UNIFORM SEARCH - TARGET_CPA - SCALING SEARCH -
TARGET_ROAS - UNIFORM SEARCH - TARGET_IMPRESSION_SHARE - UNIFORM
SEARCH - BUDGET - UNIFORM SHOPPING - BUDGET - UNIFORM SHOPPING -
TARGET_ROAS - UNIFORM MULTIPLE - TARGET_CPA - UNIFORM
OWNED_AND_OPERATED - TARGET_CPA - DEFAULT DISPLAY - TARGET_CPA -
UNIFORM
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Output only. The resource name of the campaign simulation.
Campaign simulation resource names have the form:
``customers/{customer_id}/campaignSimulations/{campaign_id}~{type}~{modification_method}~{start_date}~{end_date}``
campaign_id (int):
Output only. Campaign id of the simulation.
type_ (google.ads.googleads.v9.enums.types.SimulationTypeEnum.SimulationType):
Output only. The field that the simulation
modifies.
modification_method (google.ads.googleads.v9.enums.types.SimulationModificationMethodEnum.SimulationModificationMethod):
Output only. How the simulation modifies the
field.
start_date (str):
Output only. First day on which the
simulation is based, in YYYY-MM-DD format.
end_date (str):
Output only. Last day on which the simulation
is based, in YYYY-MM-DD format
cpc_bid_point_list (google.ads.googleads.v9.common.types.CpcBidSimulationPointList):
Output only. Simulation points if the simulation type is
CPC_BID.
This field is a member of `oneof`_ ``point_list``.
target_cpa_point_list (google.ads.googleads.v9.common.types.TargetCpaSimulationPointList):
Output only. Simulation points if the simulation type is
TARGET_CPA.
This field is a member of `oneof`_ ``point_list``.
target_roas_point_list (google.ads.googleads.v9.common.types.TargetRoasSimulationPointList):
Output only. Simulation points if the simulation type is
TARGET_ROAS.
This field is a member of `oneof`_ ``point_list``.
target_impression_share_point_list (google.ads.googleads.v9.common.types.TargetImpressionShareSimulationPointList):
Output only. Simulation points if the simulation type is
TARGET_IMPRESSION_SHARE.
This field is a member of `oneof`_ ``point_list``.
budget_point_list (google.ads.googleads.v9.common.types.BudgetSimulationPointList):
Output only. Simulation points if the
simulation type is BUDGET.
This field is a member of `oneof`_ ``point_list``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign_id = proto.Field(proto.INT64, number=2,)
type_ = proto.Field(
proto.ENUM,
number=3,
enum=simulation_type.SimulationTypeEnum.SimulationType,
)
modification_method = proto.Field(
proto.ENUM,
number=4,
enum=simulation_modification_method.SimulationModificationMethodEnum.SimulationModificationMethod,
)
start_date = proto.Field(proto.STRING, number=5,)
end_date = proto.Field(proto.STRING, number=6,)
cpc_bid_point_list = proto.Field(
proto.MESSAGE,
number=7,
oneof="point_list",
message=simulation.CpcBidSimulationPointList,
)
target_cpa_point_list = proto.Field(
proto.MESSAGE,
number=8,
oneof="point_list",
message=simulation.TargetCpaSimulationPointList,
)
target_roas_point_list = proto.Field(
proto.MESSAGE,
number=9,
oneof="point_list",
message=simulation.TargetRoasSimulationPointList,
)
target_impression_share_point_list = proto.Field(
proto.MESSAGE,
number=10,
oneof="point_list",
message=simulation.TargetImpressionShareSimulationPointList,
)
budget_point_list = proto.Field(
proto.MESSAGE,
number=11,
oneof="point_list",
message=simulation.BudgetSimulationPointList,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 39.888889 | 128 | 0.696205 |
4a1a747df51a0428c835594168f000e0b51798ca
| 9,271 |
py
|
Python
|
pypureclient/flashblade/FB_2_0/models/file_system_performance.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14 |
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flashblade/FB_2_0/models/file_system_performance.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28 |
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flashblade/FB_2_0/models/file_system_performance.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15 |
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashBlade REST API Client
A lightweight client for FlashBlade REST API 2.0, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_0 import models
class FileSystemPerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'bytes_per_op': 'float',
'bytes_per_read': 'float',
'bytes_per_write': 'float',
'others_per_sec': 'float',
'read_bytes_per_sec': 'float',
'reads_per_sec': 'float',
'time': 'int',
'usec_per_other_op': 'float',
'usec_per_read_op': 'float',
'usec_per_write_op': 'float',
'write_bytes_per_sec': 'float',
'writes_per_sec': 'float'
}
attribute_map = {
'name': 'name',
'id': 'id',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'others_per_sec': 'others_per_sec',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'time': 'time',
'usec_per_other_op': 'usec_per_other_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
id=None, # type: str
bytes_per_op=None, # type: float
bytes_per_read=None, # type: float
bytes_per_write=None, # type: float
others_per_sec=None, # type: float
read_bytes_per_sec=None, # type: float
reads_per_sec=None, # type: float
time=None, # type: int
usec_per_other_op=None, # type: float
usec_per_read_op=None, # type: float
usec_per_write_op=None, # type: float
write_bytes_per_sec=None, # type: float
writes_per_sec=None, # type: float
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
id (str): A non-modifiable, globally unique ID chosen by the system.
bytes_per_op (float): Average operation size (read bytes+write bytes/read ops+write ops).
bytes_per_read (float): Average read size in bytes per read operation.
bytes_per_write (float): Average write size in bytes per write operation.
others_per_sec (float): Other operations processed per second.
read_bytes_per_sec (float): Bytes read per second.
reads_per_sec (float): Read requests processed per second.
time (int): Sample time in milliseconds since UNIX epoch.
usec_per_other_op (float): Average time, measured in microseconds, it takes the array to process other operations.
usec_per_read_op (float): Average time, measured in microseconds, it takes the array to process a read request.
usec_per_write_op (float): Average time, measured in microseconds, it takes the array to process a write request.
write_bytes_per_sec (float): Bytes written per second.
writes_per_sec (float): Write requests processed per second.
"""
if name is not None:
self.name = name
if id is not None:
self.id = id
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if others_per_sec is not None:
self.others_per_sec = others_per_sec
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if time is not None:
self.time = time
if usec_per_other_op is not None:
self.usec_per_other_op = usec_per_other_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileSystemPerformance`".format(key))
if key == "bytes_per_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0.0`")
if key == "bytes_per_read" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0.0`")
if key == "bytes_per_write" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0.0`")
if key == "others_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `others_per_sec`, must be a value greater than or equal to `0.0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0.0`")
if key == "reads_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0.0`")
if key == "usec_per_other_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_other_op`, must be a value greater than or equal to `0.0`")
if key == "usec_per_read_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0.0`")
if key == "usec_per_write_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0.0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0.0`")
if key == "writes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemPerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemPerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 41.573991 | 126 | 0.596807 |
4a1a74b966d0956a3bebe66b65a241795398078f
| 273 |
py
|
Python
|
probability_combinatorics/distribute_stuff.py
|
codecakes/random_games
|
1e670021ec97a196726e937e658878dc63ba9d34
|
[
"MIT"
] | null | null | null |
probability_combinatorics/distribute_stuff.py
|
codecakes/random_games
|
1e670021ec97a196726e937e658878dc63ba9d34
|
[
"MIT"
] | null | null | null |
probability_combinatorics/distribute_stuff.py
|
codecakes/random_games
|
1e670021ec97a196726e937e658878dc63ba9d34
|
[
"MIT"
] | null | null | null |
def distribute_something(count, people):
"""
Given count = X things
distribute it among people=N
"""
l = [0] * people
while count > 0:
for i in xrange(len(l)):
l[i] += 1
count -= 1
if count == 0: return l
| 22.75 | 40 | 0.490842 |
4a1a753458e4de149ecb045549c7dfb81ef4d62c
| 3,763 |
py
|
Python
|
tests/test_euklid_vector.py
|
airgproducts/euklid_rd
|
3222cfeca8a9216d1a6bfc5c41606fb0801192d0
|
[
"MIT"
] | null | null | null |
tests/test_euklid_vector.py
|
airgproducts/euklid_rd
|
3222cfeca8a9216d1a6bfc5c41606fb0801192d0
|
[
"MIT"
] | 15 |
2022-01-07T16:42:39.000Z
|
2022-03-01T18:13:22.000Z
|
tests/test_euklid_vector.py
|
airgproducts/euklid_rs
|
3222cfeca8a9216d1a6bfc5c41606fb0801192d0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
"""Unittest for euklid_rs.vector against euklid.vector"""
import math
import unittest
import euklid
import euklid_rs
class TestVectorFunctions(unittest.TestCase):
"""Test euklid_rs.vector against euklid.vector"""
def setUp(self) -> None:
# Vector2D
self.c_p2d_1 = euklid.vector.Vector2D([2, 3])
self.c_p2d_2 = euklid.vector.Vector2D([-4, -3])
self.r_p2d_1 = euklid_rs.vector.Vector2D([2, 3])
self.r_p2d_2 = euklid_rs.vector.Vector2D([-4, -3])
# Vector3D
self.c_p3d_1 = euklid.vector.Vector3D([2, 3, 4])
self.c_p3d_2 = euklid.vector.Vector3D([-4, -3, -2])
self.r_p3d_1 = euklid_rs.vector.Vector3D([2, 3, 4])
self.r_p3d_2 = euklid_rs.vector.Vector3D([-4, -3, -2])
def test_angle(self):
"""test_angle comparision"""
assert self.r_p2d_1.angle() == self.c_p2d_1.angle()
def test_cross(self):
"""test_cross comparision"""
assert self.r_p2d_1.cross(self.r_p2d_2) == self.c_p2d_1.cross(self.c_p2d_2)
assert str(self.r_p3d_1.cross(self.r_p3d_2)) == str(
self.c_p3d_1.cross(self.c_p3d_2)
)
def test_copy(self):
"""test_copy comparision"""
assert str(self.r_p2d_1.copy()) == str(self.c_p2d_1.copy())
assert str(self.r_p3d_1.copy()) == str(self.c_p3d_1.copy())
def test_dot(self):
"""test_dot comparision"""
assert self.r_p2d_1.dot(self.r_p2d_2) == self.c_p2d_1.dot(self.c_p2d_2)
assert self.r_p3d_1.dot(self.r_p3d_2) == self.c_p3d_1.dot(self.c_p3d_2)
def test_length(self):
"""test_length comparision"""
assert self.r_p2d_1.length() == self.c_p2d_1.length()
assert self.r_p3d_1.length() == self.c_p3d_1.length()
def test_normalized(self):
"""test_normalized comparision"""
assert str(self.r_p2d_1.normalized()) == str(self.c_p2d_1.normalized())
assert str(self.r_p3d_1.normalized()) == str(self.c_p3d_1.normalized())
def test__repr__(self):
"""test__repr__ comparision"""
assert str(self.r_p2d_1) == str(self.c_p2d_1)
assert str(self.r_p3d_1) == str(self.c_p3d_1)
class TestVectorTransformFunctions(unittest.TestCase):
"""Test euklid_rs.vector.Transformation against euklid.vector.Transformation"""
def setUp(self) -> None:
self.c_p3d_1 = euklid.vector.Vector3D([3, 4, 5])
self.c_p3d_2 = euklid.vector.Vector3D([-1, -2, -3])
self.r_p3d_1 = euklid_rs.vector.Vector3D([3, 4, 5])
self.r_p3d_2 = euklid_rs.vector.Vector3D([-1, -2, -3])
def test_translation(self):
"""test_translation comparision"""
excepted = euklid.vector.Transformation.translation(self.c_p3d_1).apply(
self.c_p3d_2
)
result = euklid_rs.vector.Transformation.translation(self.r_p3d_1).apply(
self.r_p3d_2
)
assert str(result) == str(excepted)
def test_rotation(self):
"""test_rotation comparision"""
c_axis = euklid.vector.Vector3D([1, 1, 0])
c_rotation = euklid.vector.Transformation.rotation(math.pi, c_axis).apply(
self.c_p3d_1
)
r_axis = euklid_rs.vector.Vector3D([1, 1, 0])
r_rotation = euklid_rs.vector.Transformation.rotation(math.pi, r_axis).apply(
self.r_p3d_1
)
assert str(c_rotation) == str(r_rotation)
def test_scale(self):
"""test_scale comparision"""
excepted = euklid.vector.Transformation.scale(0.5).apply(self.c_p3d_1).length()
result = euklid_rs.vector.Transformation.scale(0.5).apply(self.r_p3d_1).length()
assert result == excepted
if __name__ == "__main__":
unittest.main(exit=False)
| 35.838095 | 88 | 0.639383 |
4a1a757ec031f7126b0bba916e35fc7bcca487f4
| 12,770 |
py
|
Python
|
fairseq/tasks/translation_multi_simple_epoch.py
|
cece95/fairseq
|
92f27771b4cec979d4e7c1dc47b63d40d8220823
|
[
"MIT"
] | null | null | null |
fairseq/tasks/translation_multi_simple_epoch.py
|
cece95/fairseq
|
92f27771b4cec979d4e7c1dc47b63d40d8220823
|
[
"MIT"
] | null | null | null |
fairseq/tasks/translation_multi_simple_epoch.py
|
cece95/fairseq
|
92f27771b4cec979d4e7c1dc47b63d40d8220823
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import datetime
import time
import torch
from fairseq.data import (
data_utils,
FairseqDataset,
iterators,
LanguagePairDataset,
ListDataset,
)
from fairseq.tasks import FairseqTask, register_task
from fairseq.data.multilingual.sampling_method import SamplingMethod
from fairseq.data.multilingual.multilingual_data_manager import MultilingualDatasetManager
###
def get_time_gap(s, e):
return (datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)).__str__()
###
logger = logging.getLogger(__name__)
@register_task('translation_multi_simple_epoch')
class TranslationMultiSimpleEpochTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
langs (List[str]): a list of languages that are being supported
dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries
training (bool): whether the task should be configured for training or not
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
# fmt: on
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(
args, self.lang_pairs, langs, dicts, self.sampling_method)
@classmethod
def setup_task(cls, args, **kwargs):
langs, dicts, training = MultilingualDatasetManager.prepare(
cls.load_dictionary, args, **kwargs
)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split in self.datasets:
dataset = self.datasets[split]
if self.has_sharded_data(split) and dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
# no need to load next shard so skip loading
# also this avoid always loading from beginning of the data
return
else:
shard_epoch = None
logger.info(f'loading data for {split} epoch={epoch}/{shard_epoch}')
self.datasets[split] = self.data_manager.load_sampled_multi_epoch_dataset(
split,
self.training,
epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
src_langtok_spec, tgt_langtok_spec = self.args.langtoks['main']
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(
dataset,
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(
self.args.source_lang,
self.args.target_lang,
dataset=dataset.src,
spec=src_langtok_spec,
)
return dataset
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
_, tgt_langtok_spec = self.args.langtoks['main']
if not self.args.lang_tok_replacing_bos_eos:
if prefix_tokens is None and tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.size(0)
prefix_tokens = torch.LongTensor(
[[tgt_lang_tok]]
).expand(bsz, 1).to(src_tokens)
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
)
else:
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
if tgt_langtok_spec else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def create_batch_sampler_func(
self, max_positions, ignore_invalid_inputs,
max_tokens, max_sentences
):
def construct_batch_sampler(
dataset, epoch
):
splits = [s for s, _ in self.datasets.items() if self.datasets[s] == dataset]
split = splits[0] if len(splits) > 0 else None
if epoch is not None:
dataset.set_epoch(epoch)
start_time = time.time()
# get indices ordered by example size
indices = dataset.ordered_indices()
logger.debug(f'[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}')
# filter examples that are too large
if max_positions is not None:
my_time = time.time()
indices = data_utils.filter_by_size(
indices, dataset, max_positions, raise_exception=(not ignore_invalid_inputs),
)
logger.debug(f'[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}')
# create mini-batches with given size constraints
my_time = time.time()
batch_sampler = data_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
)
logger.debug(f'[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}')
logger.debug(f'[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}')
return batch_sampler
return construct_batch_sampler
# we need to override get_batch_iterator because we want to reset the epoch iterator each time
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# initialize the dataset with the correct starting epoch
assert isinstance(dataset, FairseqDataset)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
if (
self.args.sampling_method == 'RoundRobin'
):
batch_iter = super().get_batch_iterator(
dataset, max_tokens=max_tokens, max_sentences=max_sentences, max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs, required_batch_size_multiple=required_batch_size_multiple,
seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch,
)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(
max_positions, ignore_invalid_inputs,
max_tokens, max_sentences)
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=construct_batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
| 42.006579 | 119 | 0.626703 |
4a1a7722274bfd7b333b96412ddc86e49d966dd0
| 208 |
py
|
Python
|
spockpy/config/__init__.py
|
gavindsouza/spockpy
|
8664550d46be088a5ef8439220353ba4c33893f8
|
[
"Apache-2.0"
] | 58 |
2017-03-25T05:52:23.000Z
|
2021-09-11T08:16:14.000Z
|
spockpy/config/__init__.py
|
gavindsouza/spockpy
|
8664550d46be088a5ef8439220353ba4c33893f8
|
[
"Apache-2.0"
] | 4 |
2017-08-16T13:51:57.000Z
|
2018-06-12T08:09:17.000Z
|
spockpy/config/__init__.py
|
gavindsouza/spockpy
|
8664550d46be088a5ef8439220353ba4c33893f8
|
[
"Apache-2.0"
] | 18 |
2017-03-26T18:26:21.000Z
|
2021-03-25T23:08:17.000Z
|
# module - spockpy.config
# imports - compatibility imports
from __future__ import absolute_import
# imports - module imports
from spockpy.config.base import Config
from spockpy.config.app import AppConfig
| 26 | 41 | 0.8125 |
4a1a774be5fc53ca407b375c0783fe70fdf2e193
| 586 |
py
|
Python
|
hackerrank/30 Days of Code/Day 0 - Hello, World/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4 |
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerrank/30 Days of Code/Day 0 - Hello, World/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerrank/30 Days of Code/Day 0 - Hello, World/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', return_value='Welcome to 30 Days of Code!')
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'Hello, World.' + '\n' +
'Welcome to 30 Days of Code!' + '\n'
)
if __name__ == '__main__':
unittest.main()
| 27.904762 | 72 | 0.595563 |
4a1a78c94f8f49f22753e98aceaf4ca83f82ceae
| 1,806 |
py
|
Python
|
Python/code.py
|
snehalovhal/greyatom-python-for-data-science
|
54da11b6ab64ecc6556249554ebe3f03f3d73aad
|
[
"MIT"
] | null | null | null |
Python/code.py
|
snehalovhal/greyatom-python-for-data-science
|
54da11b6ab64ecc6556249554ebe3f03f3d73aad
|
[
"MIT"
] | null | null | null |
Python/code.py
|
snehalovhal/greyatom-python-for-data-science
|
54da11b6ab64ecc6556249554ebe3f03f3d73aad
|
[
"MIT"
] | null | null | null |
# --------------
# Code starts here
# Create the lists
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
# Concatenate both the strings
new_class = class_1 + class_2
print(new_class)
# Append the list
new_class.append('Peter Warden')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('Carla Gentry')
# Print the list
print(new_class)
# Create the Dictionary
courses = {'Math':65, 'English':70, 'History':80, 'French':70, 'Science':60}
# Slice the dict and stores the all subjects marks in variable
math = courses['Math']
english = courses['English']
history = courses['History']
french = courses['French']
science = courses['Science']
# Store the all the subject in one variable `Total`
total = math + english + history + french + science
# Print the total
print(total)
# Insert percentage formula
percentage = total * 100/500
# Print the percentage
print(percentage)
# Create the Dictionary
mathematics = {'Geoffrey Hinton':78, 'Andrew Ng':95, 'Sebastian Raschka':65, 'Yoshua Benjio':50, 'Hilary Mason':70, 'Corinna Cortes':66, 'Peter Warden':75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Given string
topper = 'andrew ng'
topper.split(' ')
print(topper)
# Create variable first_name
first_name = (topper.split()[0])
print(first_name)
# Create variable Last_name and store last two element in the list
last_name = (topper.split()[1])
print(last_name)
# Concatenate the string
full_name = last_name + ' ' + first_name
# print the full_name
print(full_name)
# print the name in upper case
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 24.405405 | 156 | 0.700443 |
4a1a78f6884ca6f5e27b39090583d0be9d780925
| 3,313 |
py
|
Python
|
src/niweb/apps/noclook/tests/test_views.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 2 |
2018-12-21T09:35:27.000Z
|
2019-07-31T18:51:58.000Z
|
src/niweb/apps/noclook/tests/test_views.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 6 |
2019-07-25T07:10:23.000Z
|
2021-02-08T09:58:57.000Z
|
src/niweb/apps/noclook/tests/test_views.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 5 |
2019-02-06T12:00:26.000Z
|
2021-11-19T14:48:06.000Z
|
from .neo4j_base import NeoTestCase
from apps.noclook.helpers import set_user, set_noclook_auto_manage
from apps.noclook import forms
from django.urls import reverse
class ViewTest(NeoTestCase):
"""
Excercises the view fiels, by running at least one of the views in them.
"""
def test_router_list_view(self):
router1 = self.create_node('awesome-router.test.dev', 'router')
router2 = self.create_node('fine.test.dev', 'router')
router3 = self.create_node('different-router.test.dev', 'router')
resp = self.client.get('/router/')
self.assertContains(resp, router1.node_name)
self.assertContains(resp, router2.node_name)
self.assertContains(resp, router3.node_name)
table_rows = resp.context['table'].rows
self.assertEqual(table_rows[0].cols[0].get('handle_id'), router1.handle_id)
self.assertEqual(table_rows[2].cols[0].get('handle_id'), router2.handle_id)
self.assertEqual(table_rows[1].cols[0].get('handle_id'), router3.handle_id)
def test_host_detail_view(self):
host = self.create_node('sweet-host.nordu.net', 'host')
resp = self.client.get(reverse('detail_host', args=[host.handle_id]))
self.assertContains(resp, host.node_name)
self.assertEqual(resp.context['node_handle'].handle_id, host.handle_id)
def test_router_edit_view(self):
router = self.create_node('awesome-router.test.dev', 'router')
resp = self.client.get(reverse('generic_edit', args=['router', router.handle_id]))
self.assertContains(resp, router.node_name)
self.assertEqual(resp.context['node_handle'].handle_id, router.handle_id)
self.assertIsInstance(resp.context['form'], forms.EditRouterForm)
def test_debug_view(self):
something = self.create_node('fancy.test.dev', 'magic-device')
resp = self.client.get(reverse('debug', args=[something.handle_id]))
self.assertContains(resp, something.node_name)
self.assertEqual(resp.context['node_handle'].handle_id, something.handle_id)
def test_create_view(self):
resp = self.client.get(reverse('create_node', args=['host']))
self.assertIsInstance(resp.context['form'], forms.NewHostForm)
def test_other_view(self):
router = self.create_node('nice.test.dev', 'router')
resp = self.client.get(reverse('visualize', args=['router', router.handle_id]))
self.assertEqual(resp.context['slug'], 'router')
self.assertEqual(resp.context['node_handle'].handle_id, router.handle_id)
def test_redirect_view(self):
router = self.create_node('nice.test.dev', 'router')
resp = self.client.get(reverse('node_redirect', args=[router.handle_id]))
self.assertRedirects(resp, router.url())
def test_report_view(self):
host_user = self.create_node('AwesomeCo', 'host-user', 'Relation')
host = self.create_node('sweet-host.nordu.net', 'host', 'Logical')
host_node = host.get_node()
set_noclook_auto_manage(host_node, True)
set_user(self.user, host.get_node(), host_user.handle_id)
url = reverse('host_users_report')
resp = self.client.get(url)
self.assertContains(resp, host.node_name)
# import nodes? it is tested seperatly
| 42.474359 | 90 | 0.686991 |
4a1a79033fe86d457ccf1a04fe9fdfc1cbaf33db
| 21,406 |
py
|
Python
|
tests/packagedcode/test_maven.py
|
quepop/scancode-toolkit
|
cea1d29064812e89a5d59cc3a3a39a5adb0b3e15
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1 |
2021-06-25T20:11:53.000Z
|
2021-06-25T20:11:53.000Z
|
tests/packagedcode/test_maven.py
|
quepop/scancode-toolkit
|
cea1d29064812e89a5d59cc3a3a39a5adb0b3e15
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
tests/packagedcode/test_maven.py
|
quepop/scancode-toolkit
|
cea1d29064812e89a5d59cc3a3a39a5adb0b3e15
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import json
import os.path
import pytest
from commoncode import fileutils
from commoncode import text
from commoncode import testcase
from packagedcode import maven
from commoncode.resource import Codebase
class TestIsPom(testcase.FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_is_pom_non_pom(self):
test_file = self.get_test_loc('maven_misc/non-maven.pom')
assert not maven.is_pom(test_file)
def test_is_pom_maven2(self):
test_dir = self.get_test_loc('maven2')
for test_file in fileutils.resource_iter(test_dir, with_dirs=False):
if test_file.endswith('.json'):
continue
loc = os.path.join(test_dir, test_file)
assert maven.is_pom(loc), loc + ' should be a POM'
def test_is_pom_not_misc2(self):
test_file = self.get_test_loc('maven_misc/properties-section-single.xml')
assert not maven.is_pom(test_file)
def test_is_pom_m2(self):
test_dir = self.get_test_loc('m2')
for test_file in fileutils.resource_iter(test_dir, with_dirs=False):
if test_file.endswith('.json'):
continue
loc = os.path.join(test_dir, test_file)
assert maven.is_pom(loc), 'file://' + loc + ' should be a POM'
def test_is_pom_not_misc(self):
test_file = self.get_test_loc('maven_misc/properties-section.xml')
assert not maven.is_pom(test_file)
def compare_results(results, test_pom_loc, expected_json_loc, regen=False):
if regen:
with open(expected_json_loc, 'w') as ex:
json.dump(results, ex, indent=2)
with io.open(expected_json_loc, encoding='utf-8') as ex:
expected = json.load(ex)
results_dump = json.dumps(results, indent=2)
expected_dump = json.dumps(expected, indent=2)
try:
assert results_dump == expected_dump
except AssertionError:
test_pom_loc = 'file://' + test_pom_loc
expected_json_loc = 'file://' + expected_json_loc
expected = [test_pom_loc, expected_json_loc, expected_dump]
assert results_dump == '\n'.join(expected)
def parse_pom(location=None, text=None, check_is_pom=False):
"""
Return a POM mapping from the Maven POM file at location.
"""
pom = maven.get_maven_pom(location, text, check_is_pom)
if not pom:
return {}
return pom.to_dict()
class BaseMavenCase(testcase.FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def check_parse_pom(self, test_pom, regen=False):
"""
Test the parsing of POM at test_pom against an expected JSON
from the same name with a .json extension.
"""
test_pom_loc = self.get_test_loc(test_pom)
expected_json_loc = test_pom_loc + '.json'
results = parse_pom(location=test_pom_loc)
compare_results(results, test_pom_loc, expected_json_loc, regen)
def check_parse_to_package(self, test_pom, regen=False):
"""
Test the creation of a Package from a POM at test_pom against an
expected JSON from the same name with a .package.json extension.
"""
test_pom_loc = self.get_test_loc(test_pom)
expected_json_loc = test_pom_loc + '.package.json'
package = maven.parse(location=test_pom_loc)
if not package:
results = {}
else:
package.license_expression = package.compute_normalized_license()
results = package.to_dict()
compare_results(results, test_pom_loc, expected_json_loc, regen)
class TestMavenMisc(BaseMavenCase):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_parse_pom_non_pom(self):
test_pom_loc = self.get_test_loc('maven_misc/non-maven.pom')
results = parse_pom(location=test_pom_loc, check_is_pom=True)
assert results == {}
self.check_parse_pom(test_pom_loc, regen=False)
def test_MavenPom_simple_creation(self):
test_loc = self.get_test_loc('maven_misc/mini-pom.xml')
pom = maven.MavenPom(test_loc)
assert pom.artifact_id == 'activemq-camel'
# note: there has been no parent resolving yet
assert pom.group_id == None
def test_pom_dependencies(self):
test_loc = self.get_test_loc('maven2/activemq-camel-pom.xml')
pom = maven.MavenPom(test_loc)
expected = [
('compile', [
(('commons-logging', 'commons-logging-api', 'latest.release'), True),
(('org.apache.camel', 'camel-jms', 'latest.release'), True),
(('${project.groupId}', 'activemq-core', 'latest.release'), True),
(('${project.groupId}', 'activemq-pool', 'latest.release'), True),
(('org.apache.geronimo.specs', 'geronimo-annotation_1.0_spec', 'latest.release'), False)
]),
('test', [
(('${project.groupId}', 'activemq-core', 'latest.release'), True),
(('org.apache.camel', 'camel-core', 'latest.release'), True),
(('org.apache.camel', 'camel-spring', 'latest.release'), True),
(('org.springframework', 'spring-test', 'latest.release'), True),
(('junit', 'junit', 'latest.release'), True),
(('org.hamcrest', 'hamcrest-all', 'latest.release'), True),
]),
]
expected = [(s, sorted(v)) for s, v in expected]
results = [(s, sorted(v)) for s, v in pom.dependencies.items()]
assert results == expected
def test_pom_issue_management_properties_are_resolved(self):
test_loc = self.get_test_loc('maven2/xml-format-maven-plugin-3.0.6.pom')
pom = maven.MavenPom(test_loc)
pom.resolve()
expected = dict([
(u'system', 'GitHub Issues'),
(u'url', 'https://github.com/acegi/xml-format-maven-plugin/issues')]
)
result = pom.issue_management
assert result == expected
def test_pom_dependencies_are_resolved(self):
test_loc = self.get_test_loc('maven2/activemq-camel-pom.xml')
pom = maven.MavenPom(test_loc)
pom.resolve()
expected = [
(u'compile', [
((u'commons-logging', u'commons-logging-api', u'latest.release'), True),
((u'org.apache.camel', u'camel-jms', u'latest.release'), True),
((u'org.apache.activemq', u'activemq-core', u'latest.release'), True),
((u'org.apache.activemq', u'activemq-pool', u'latest.release'), True),
((u'org.apache.geronimo.specs', u'geronimo-annotation_1.0_spec', u'latest.release'), False)
]),
(u'test', [
((u'org.apache.activemq', u'activemq-core', u'latest.release'), True),
((u'org.apache.camel', u'camel-core', u'latest.release'), True),
((u'org.apache.camel', u'camel-spring', u'latest.release'), True),
((u'org.springframework', u'spring-test', u'latest.release'), True),
((u'junit', u'junit', u'latest.release'), True),
((u'org.hamcrest', u'hamcrest-all', u'latest.release'), True),
]),
]
expected = [(s, sorted(v)) for s, v in expected]
results = [(s, sorted(v)) for s, v in pom.dependencies.items()]
assert results == expected
def test_parse_to_package_base(self):
test_file = self.get_test_loc('maven_misc/spring-beans-4.2.2.RELEASE.pom.xml')
self.check_parse_pom(test_file, regen=False)
def test_parse_to_package_and_validate(self):
test_file = self.get_test_loc('maven_misc/spring-beans-4.2.2.RELEASE.pom.xml')
package = maven.parse(test_file)
assert isinstance(package, maven.MavenPomPackage)
def test_parse_to_package_then_back(self):
test_file = self.get_test_loc('maven_misc/spring-beans-4.2.2.RELEASE.pom.xml')
package = maven.parse(test_file)
package2 = maven.MavenPomPackage.create(**package.to_dict())
assert package2.to_dict().items() == package.to_dict().items()
def test_package_root_is_properly_returned_for_metainf_poms(self):
from packagedcode.plugin_package import PackageScanner
test_dir = self.get_test_loc('maven_misc/package_root')
codebase = Codebase(test_dir, resource_attributes=PackageScanner.resource_attributes)
manifest_resource = [r for r in codebase.walk() if r.name == 'pom.xml'][0]
packages = list(maven.MavenPomPackage.recognize(manifest_resource.location))
assert packages
manifest_resource.packages.append(packages[0].to_dict())
manifest_resource.save(codebase)
proot = maven.MavenPomPackage.get_package_root(manifest_resource, codebase)
assert proot.name == 'activiti-image-generator-7-201802-EA-sources.jar-extract'
def test_package_dependency_not_missing(self):
test_file = self.get_test_loc('maven2/log4j-pom.xml')
self.check_parse_to_package(test_file, regen=False)
class TestPomProperties(testcase.FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_resolve_properties(self):
properties = {'groupId': 'org.apache'}
value = '${groupId}.mycomponent'
expected = 'org.apache.mycomponent'
test = maven.MavenPom._replace_props(value, properties)
assert test == expected
def test_resolve_properties_with_expression(self):
properties = {'groupId': 'org.apache'}
value = '${groupId.substring(4)}.mycomponent'
expected = 'apache.mycomponent'
test = maven.MavenPom._replace_props(value, properties)
assert test == expected
def test_resolve_properties_with_substring_expression(self):
properties = {'groupId': 'org.apache'}
value = '${groupId.substring(0,3)}.mycomponent'
expected = 'org.mycomponent'
test = maven.MavenPom._replace_props(value, properties)
assert test == expected
def test_get_properties(self):
test_loc = self.get_test_loc('maven2_props/multiple/pom.xml')
pom = maven.MavenPom(test_loc)
test = pom.properties
expected = {
'groupId': 'org.apache.geronimo.bundles',
'project.groupId': 'org.apache.geronimo.bundles',
'pom.groupId': 'org.apache.geronimo.bundles',
'artifactId': 'axis',
'project.artifactId': 'axis',
'pom.artifactId': 'axis',
'version': '1.4_1-SNAPSHOT',
'project.version': '1.4_1-SNAPSHOT',
'pom.version': '1.4_1-SNAPSHOT',
'parent.groupId': 'org.apache.geronimo.framework',
'project.parent.groupId': 'org.apache.geronimo.framework',
'pom.parent.groupId': 'org.apache.geronimo.framework',
'parent.artifactId': 'framework',
'project.parent.artifactId': 'framework',
'pom.parent.artifactId': 'framework',
'parent.version': '3.0-SNAPSHOT',
'project.parent.version': '3.0-SNAPSHOT',
'pom.parent.version': '3.0-SNAPSHOT',
'pkgArtifactId': 'axis',
'pkgGroupId': 'org.apache.axis',
'pkgVersion': '1.4',
}
assert test == expected
def test_get_properties_single(self):
test_loc = self.get_test_loc('maven2_props/single/pom.xml')
pom = maven.MavenPom(test_loc)
test = pom.properties
expected = {
'artifactId': None,
'groupId': None,
'pkgGroupId': 'org.apache.axis',
'pom.artifactId': None,
'pom.groupId': None,
'pom.version': None,
'project.artifactId': None,
'project.groupId': None,
'project.version': None,
'version': None
}
assert test == expected
def test_get_properties_advanced(self):
test_loc = self.get_test_loc('maven2_props/xml-format-maven-plugin-3.0.6.pom')
pom = maven.MavenPom(test_loc)
test = pom.properties
expected = {
'artifactId': 'xml-format-maven-plugin',
'github.org': 'acegi',
'github.repo': 'xml-format-maven-plugin',
'groupId': 'au.com.acegi',
'license.excludes': '**/test*.xml,**/invalid.xml',
'license.licenseName': 'apache_v2',
'maven.compiler.source': '1.7',
'maven.compiler.target': '1.7',
'maven.enforcer.java': '1.7',
'parent.artifactId': u'acegi-standard-project',
'parent.groupId': u'au.com.acegi',
'parent.version': '0.1.4',
'pom.artifactId': 'xml-format-maven-plugin',
'pom.groupId': 'au.com.acegi',
'pom.parent.artifactId': u'acegi-standard-project',
'pom.parent.groupId': u'au.com.acegi',
'pom.parent.version': '0.1.4',
'pom.version': '3.0.6',
'project.artifactId': 'xml-format-maven-plugin',
'project.groupId': 'au.com.acegi',
'project.parent.artifactId': u'acegi-standard-project',
'project.parent.groupId': u'au.com.acegi',
'project.parent.version': '0.1.4',
'project.version': '3.0.6',
'version': '3.0.6'
}
assert test == expected
def test_parse_can_run_without_pom_check(self):
test_loc = self.get_test_loc('maven_misc/ant-1.6.5.maven')
pom = maven.parse(test_loc, check_is_pom=False)
assert pom
pom = maven.parse(test_loc, check_is_pom=True)
assert not pom
def test_parse_will_load_extra_pom_properties_if_file_present(self):
# there is a file at maven2_props/props_file/activiti-image-generator/pom.properties
test_loc = self.get_test_loc('maven2_props/props_file/activiti-image-generator/pom.xml')
pom = maven.parse(test_loc, check_is_pom=False)
assert pom.namespace == 'org.activiti'
class TestMavenComputeNormalizedLicense(testcase.FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_compute_normalized_license_two_names_only(self):
declared_license = [
{'name': 'apache-2.0'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_tree_nodes(self):
declared_license = [
{'name': 'apache-2.0'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_with_unknown_url(self):
declared_license = [
{'name': 'apache-2.0', 'url': 'unknown'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_with_unknown_url_known_comments(self):
declared_license = [
{'name': 'apache-2.0', 'url': 'unknown', 'comments': 'apache-2.0'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_with_unknown_url_unknown_comments(self):
declared_license = [
{'name': 'apache-2.0', 'url': 'unknown', 'comments': 'unknown'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_unknown_name(self):
declared_license = [
{'name': 'unknown', 'url': 'apache-2.0'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = '(unknown AND apache-2.0) AND mit'
assert result == expected
def test_compute_normalized_license_same_name_and_url(self):
declared_license = [
{'name': 'apache-2.0', 'url': 'apache-2.0'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_same_name_url_comments(self):
declared_license = [
{'name': 'apache-2.0', 'url': 'apache-2.0', 'comments': 'apache-2.0'},
{'name': 'mit'}
]
result = maven.compute_normalized_license(declared_license)
expected = 'apache-2.0 AND mit'
assert result == expected
def test_compute_normalized_license_with_url_invalid(self):
declared_license = [
{'name': 'MIT', 'url': 'LICENSE.txt'},
]
result = maven.compute_normalized_license(declared_license)
expected = 'mit'
assert result == expected
def test_compute_normalized_license_with_duplicated_license(self):
declared_license = [
{'name': 'LGPL'},
{'name': 'GNU Lesser General Public License', 'url': 'http://www.gnu.org/licenses/lgpl.html'},
]
result = maven.compute_normalized_license(declared_license)
expected = 'lgpl-2.0-plus'
assert result == expected
def relative_walk(dir_path):
"""
Walk path and yield POM files paths relative to dir_path.
"""
for base_dir, _dirs, files in os.walk(dir_path):
for file_name in files:
if file_name.endswith('.json'):
continue
file_path = os.path.join(base_dir, file_name)
file_path = file_path.replace(dir_path, '', 1)
file_path = file_path.strip(os.path.sep)
yield file_path
def create_test_function(test_pom_loc, test_name, check_pom=True, regen=False):
"""
Return a test function closed on test arguments.
If check_parse_pom is True, test the POM parsing; otherwise, test Package creation
"""
# closure on the test params
if check_pom:
def test_pom(self):
self.check_parse_pom(test_pom_loc, regen)
else:
def test_pom(self):
self.check_parse_to_package(test_pom_loc, regen)
# set a proper function name to display in reports and use in discovery
# function names are best as bytes
if isinstance(test_name, bytes):
test_name = test_name.decode('utf-8')
test_pom.__name__ = test_name
return test_pom
def build_tests(test_dir, clazz, prefix='test_maven2_parse_', check_pom=True, regen=False):
"""
Dynamically build test methods for each POMs in `test_dir` and
attach the test method to the `clazz` class.
If check_parse_pom is True, test the POM parsing; otherwise, test Package creation
"""
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
test_dir = os.path.join(test_data_dir, test_dir)
# loop through all items and attach a test method to our test class
for test_file in relative_walk(test_dir):
test_name = prefix + text.python_safe_name(test_file)
test_pom_loc = os.path.join(test_dir, test_file)
test_method = create_test_function(test_pom_loc, test_name, check_pom=check_pom, regen=regen)
# attach that method to the class
setattr(clazz, test_name, test_method)
class TestMavenDataDrivenPomMisc(BaseMavenCase):
pytestmark = pytest.mark.scanslow
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
build_tests(test_dir='maven_misc/parse', clazz=TestMavenDataDrivenPomMisc,
prefix='test_maven2_parse_misc_', check_pom=True, regen=False)
build_tests(test_dir='maven_misc/parse', clazz=TestMavenDataDrivenPomMisc,
prefix='test_maven2_package_misc_', check_pom=False, regen=False)
class TestMavenDataDrivenPomBasic(BaseMavenCase):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
build_tests(test_dir='maven2', clazz=TestMavenDataDrivenPomBasic,
prefix='test_maven2_basic_parse_', check_pom=True, regen=False)
build_tests(test_dir='maven2', clazz=TestMavenDataDrivenPomBasic,
prefix='test_maven2_basic_package_', check_pom=False, regen=False)
class TestMavenDataDrivenPomComprehensive(BaseMavenCase):
pytestmark = pytest.mark.scanslow
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# note: we use short dir names to deal with Windows long paths limitations
build_tests(test_dir='m2', clazz=TestMavenDataDrivenPomComprehensive,
prefix='test_maven2_parse', check_pom=True, regen=False)
build_tests(test_dir='m2', clazz=TestMavenDataDrivenPomComprehensive,
prefix='test_maven2_package', check_pom=False, regen=False)
| 40.388679 | 107 | 0.638045 |
4a1a7996e893afce90861c72fe40ca794de7eaff
| 255 |
py
|
Python
|
employee_management/employee_management/doctype/product_bin_gg/product_bin_gg.py
|
Vivekananthan112599/Frappe-Vivek
|
6a2b70c736e17e9748c6a30e5722341acfb3b5c5
|
[
"MIT"
] | null | null | null |
employee_management/employee_management/doctype/product_bin_gg/product_bin_gg.py
|
Vivekananthan112599/Frappe-Vivek
|
6a2b70c736e17e9748c6a30e5722341acfb3b5c5
|
[
"MIT"
] | null | null | null |
employee_management/employee_management/doctype/product_bin_gg/product_bin_gg.py
|
Vivekananthan112599/Frappe-Vivek
|
6a2b70c736e17e9748c6a30e5722341acfb3b5c5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Gopi and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ProductBinGG(Document):
pass
| 23.181818 | 49 | 0.772549 |
4a1a79b62106e9a5f7ece770f6285cbc29d164b3
| 4,377 |
py
|
Python
|
triopg/_triopg.py
|
touilleMan/triopg
|
1178ce8cf0bd5eb133b3f709af7157f7591a8284
|
[
"Apache-2.0",
"MIT"
] | 2 |
2021-11-08T02:44:55.000Z
|
2021-11-08T09:41:05.000Z
|
triopg/_triopg.py
|
touilleMan/triopg
|
1178ce8cf0bd5eb133b3f709af7157f7591a8284
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
triopg/_triopg.py
|
touilleMan/triopg
|
1178ce8cf0bd5eb133b3f709af7157f7591a8284
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from functools import wraps, partial
import trio
import asyncpg
import trio_asyncio
def _shielded(f):
@wraps(f)
async def wrapper(*args, **kwargs):
with trio.open_cancel_scope(shield=True):
return await f(*args, **kwargs)
return wrapper
def connect(*args, **kwargs):
return TrioConnectionProxy(*args, **kwargs)
def create_pool(*args, **kwargs):
return TrioPoolProxy(*args, **kwargs)
class TrioTransactionProxy:
def __init__(self, asyncpg_transaction):
self._asyncpg_transaction = asyncpg_transaction
@trio_asyncio.aio_as_trio
async def __aenter__(self, *args):
return await self._asyncpg_transaction.__aenter__(*args)
@_shielded
@trio_asyncio.aio_as_trio
async def __aexit__(self, *args):
return await self._asyncpg_transaction.__aexit__(*args)
class TrioConnectionProxy:
def __init__(self, *args, **kwargs):
self._asyncpg_create_connection = partial(
asyncpg.connect, *args, **kwargs
)
self._asyncpg_conn = None
def transaction(self, *args, **kwargs):
asyncpg_transaction = self._asyncpg_conn.transaction(*args, **kwargs)
return TrioTransactionProxy(asyncpg_transaction)
def __getattr__(self, attr):
target = getattr(self._asyncpg_conn, attr)
if callable(target):
@wraps(target)
@trio_asyncio.aio_as_trio
async def wrapper(*args, **kwargs):
return await target(*args, **kwargs)
# Only generate the function wrapper once per connection instance
setattr(self, attr, wrapper)
return wrapper
return target
@_shielded
@trio_asyncio.aio_as_trio
async def close(self):
return await self._asyncpg_conn.close()
async def __aenter__(self):
if not self._asyncpg_conn:
self._asyncpg_conn = await trio_asyncio.aio_as_trio(
self._asyncpg_create_connection
)()
return self
async def __aexit__(self, *exc):
return await self.close()
class TrioPoolAcquireContextProxy:
def __init__(self, asyncpg_acquire_context):
self._asyncpg_acquire_context = asyncpg_acquire_context
@trio_asyncio.aio_as_trio
async def __aenter__(self, *args):
proxy = await self._asyncpg_acquire_context.__aenter__(*args)
conn_proxy = TrioConnectionProxy()
conn_proxy._asyncpg_conn = proxy._con
return conn_proxy
@_shielded
@trio_asyncio.aio_as_trio
async def __aexit__(self, *args):
return await self._asyncpg_acquire_context.__aexit__(*args)
class TrioPoolProxy:
def __init__(self, *args, **kwargs):
self._asyncpg_create_pool = partial(
asyncpg.create_pool, *args, **kwargs
)
self._asyncpg_pool = None
def acquire(self):
return TrioPoolAcquireContextProxy(self._asyncpg_pool.acquire())
async def execute(self, statement: str, *args, timeout: float = None):
async with self.acquire() as conn:
return await conn.execute(statement, *args, timeout=timeout)
async def executemany(
self, statement: str, args, *, timeout: float = None
):
async with self.acquire() as conn:
return await conn.executemany(statement, args, timeout=timeout)
async def fetch(self, query, *args, timeout: float = None):
async with self.acquire() as conn:
return await conn.fetch(query, *args, timeout=timeout)
async def fetchval(self, query, *args, timeout: float = None):
async with self.acquire() as conn:
return await conn.fetchval(query, *args, timeout=timeout)
async def fetchrow(self, query, *args, timeout: float = None):
async with self.acquire() as conn:
return await conn.fetchrow(query, *args, timeout=timeout)
@_shielded
@trio_asyncio.aio_as_trio
async def close(self):
return await self._asyncpg_pool.close()
def terminate(self):
return self._asyncpg_pool.terminate()
async def __aenter__(self):
if not self._asyncpg_pool:
self._asyncpg_pool = await trio_asyncio.aio_as_trio(
self._asyncpg_create_pool
)()
return self
async def __aexit__(self, *exc):
return await self.close()
| 29.574324 | 77 | 0.657756 |
4a1a7a5802cdd1b5519551a8694336f1c7730411
| 5,082 |
py
|
Python
|
007/src/object_detector/yolov3.py
|
AzharMithani/99-ML-Learning-Projects
|
88777e6da153f3de97fe8ec09ee86c7d8ebbf27b
|
[
"MIT"
] | null | null | null |
007/src/object_detector/yolov3.py
|
AzharMithani/99-ML-Learning-Projects
|
88777e6da153f3de97fe8ec09ee86c7d8ebbf27b
|
[
"MIT"
] | null | null | null |
007/src/object_detector/yolov3.py
|
AzharMithani/99-ML-Learning-Projects
|
88777e6da153f3de97fe8ec09ee86c7d8ebbf27b
|
[
"MIT"
] | null | null | null |
# Azhar Mithani
import os
import time
import itertools
import cv2
import numpy as np
# A class defining all the functions used in the detection of People aka PeopleDetector function
class PeopleDetector:
def __init__(self, yolocfg='yolo_weights/yolov3.cfg',
yoloweights='yolo_weights/yolov3.weights',
labelpath='yolo_weights/coco.names',
confidence=0.5,
nmsthreshold=0.4):
self._yolocfg = yolocfg
self._yoloweights = yoloweights
self._confidence = confidence
self._nmsthreshold = nmsthreshold
self._labels = open(labelpath).read().strip().split("\n")
self._colors = np.random.randint(
0, 255, size=(len(self._labels), 3), dtype="uint8")
self._net = None
self._layer_names = None
self._boxes = []
self._confidences = []
self._classIDs = []
self._centers = []
self._layerouts = []
self._MIN_DIST = 150
self._mindistances = {}
# Loading the yolov3 network backend
def load_network(self):
self._net = cv2.dnn.readNetFromDarknet(
self._yolocfg, self._yoloweights)
self._net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self._net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
self._layer_names = [self._net.getLayerNames()[i[0] - 1]
for i in self._net.getUnconnectedOutLayers()]
print("yolov3 loaded successfully\n")
# Function calculating time for prediction
def predict(self, image):
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
[0, 0, 0], 1, crop=False)
self._net.setInput(blob)
start = time.time()
self._layerouts = self._net.forward(self._layer_names)
end = time.time()
print("yolo took {:.6f} seconds".format(end - start))
return(self._layerouts)
# Function performing prediction
def process_preds(self, image, outs):
(frameHeight, frameWidth) = image.shape[:2]
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
if classId != 0: # filter person class
continue
confidence = scores[classId]
if confidence > self._confidence:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
self._classIDs.append(classId)
self._confidences.append(float(confidence))
self._boxes.append([left, top, width, height])
self._centers.append((center_x, center_y))
indices = cv2.dnn.NMSBoxes(
self._boxes, self._confidences, self._confidence, self._nmsthreshold)
for i in indices:
i = i[0]
box = self._boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
self.draw_pred(image, self._classIDs[i], self._confidences[i], left,
top, left + width, top + height)
return self._centers
# Function initializing variables
def clear_preds(self):
self._boxes = []
self._confidences = []
self._classIDs = []
self._centers = []
self._layerouts = []
self._mindistances = {}
# Function drawing prediction based on frames
def draw_pred(self, frame, classId, conf, left, top, right, bottom):
cv2.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
label = '%.2f' % conf
label = '%s:%s' % (self._labels[classId], label)
labelSize, baseLine = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv2.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(
1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, label, (left, top),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1)
self.find_min_distance(self._centers)
for k in self._mindistances:
cv2.line(frame, k[0], k[1], (0, 0, 255), 7)
# Function returning minimum euclidean distance between predicted anchor boxes
def find_min_distance(self, centers):
'''
return min euclidean distance between predicted anchor boxes
'''
centers = self._centers
comp = list(itertools.combinations(centers, 2))
for pts in comp:
ecdist = np.linalg.norm(np.asarray(pts[0])-np.asarray(pts[1]))
if ecdist < self._MIN_DIST:
self._mindistances.update({pts: ecdist})
| 39.395349 | 96 | 0.569264 |
4a1a7a734a30f82148461097c717170372a6a88d
| 7,272 |
py
|
Python
|
onnxruntime/python/tools/bert/compare_bert_results.py
|
lizy14/onnxruntime
|
8f00147c14c64715ffd4b1512df5356ddeb75462
|
[
"MIT"
] | 1 |
2020-07-12T16:33:35.000Z
|
2020-07-12T16:33:35.000Z
|
onnxruntime/python/tools/bert/compare_bert_results.py
|
Montaer/onnxruntime
|
6dc25a60f8b058a556964801d99d5508641dcf69
|
[
"MIT"
] | null | null | null |
onnxruntime/python/tools/bert/compare_bert_results.py
|
Montaer/onnxruntime
|
6dc25a60f8b058a556964801d99d5508641dcf69
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
# It is a tool to compare the inference results of the original model and optimized model.
import sys
import argparse
import numpy as np
import os
import random
from pathlib import Path
import statistics
import onnx
import onnx.utils
import psutil
import csv
import timeit
from datetime import datetime
from onnx import ModelProto, TensorProto, numpy_helper
from OnnxModel import OnnxModel
from bert_test_data import get_bert_inputs, generate_test_data, output_test_data
from bert_perf_test import create_session, onnxruntime_inference, setup_openmp_environ
def run_model(model_path, all_inputs, use_gpu, use_openmp, disable_optimization):
# Import onnxruntime shall be after OpenMP environment variable setting.
# So we put import here to delay importing.
import onnxruntime
graph_optimization_level = None
if disable_optimization:
graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
intra_op_num_threads = 1 if use_openmp else psutil.cpu_count(logical=False)
session = create_session(model_path, use_gpu, intra_op_num_threads, graph_optimization_level)
output_names = [output.name for output in session.get_outputs()]
results, latency_list = onnxruntime_inference(session, all_inputs, output_names)
return results, latency_list, output_names
def compare(baseline_results, treatment_results, verbose, rtol=1e-3, atol=1e-4):
# Validate the output of baseline and treatment, to make sure the results are similar.
diff_count = 0
max_rel_diff = 0
max_abs_diff = 0
for test_case_id, results in enumerate(baseline_results):
case_passed = True
for i in range(len(results)):
treatment_output = treatment_results[test_case_id][i]
rel_diff = np.amax(np.abs((treatment_output - results[i]) / results[i]))
abs_diff = np.amax(np.abs(treatment_output - results[i]))
max_rel_diff = max(max_rel_diff, rel_diff)
max_abs_diff = max(max_abs_diff, abs_diff)
if not np.allclose(results[i].tolist(), treatment_output.tolist(), rtol=rtol, atol=atol):
if case_passed:
case_passed = False
diff_count += 1
if verbose:
print("case {} output {}".format(test_case_id, i))
print("baseline={}\ntreatment={}".format(results[i].tolist(), treatment_output))
print("rel_diff={} abs_diff={}".format(rel_diff, abs_diff))
if diff_count == 0:
print("100% passed for {} random inputs given thresholds (rtol={}, atol={}).".format(len(baseline_results), rtol, atol))
else:
print("{} out of {} results not passed for thresholds (rtol={}, atol={}).".format(diff_count, len(baseline_results), rtol, atol))
print("maximum absolute difference={}".format(max_abs_diff))
print("maximum relative difference={}".format(max_rel_diff))
def run_test(baseline_model, optimized_model, output_dir, batch_size, sequence_length, use_gpu, test_cases, seed, use_openmp, verbose, rtol, atol):
# Try deduce input names from optimized model.
input_ids, segment_ids, input_mask = get_bert_inputs(optimized_model)
# Use random mask length for accuracy test. It might introduce slight inflation in latency reported in this script.
all_inputs = generate_test_data(batch_size, sequence_length, test_cases, seed, verbose, input_ids, segment_ids, input_mask, random_mask_length=True)
# OpenMP environment variables must be set before the very first "import onnxruntime"
if use_openmp:
setup_openmp_environ(omp_num_threads=psutil.cpu_count(logical=False), omp_wait_policy='ACTIVE')
else:
setup_openmp_environ(omp_num_threads=1, omp_wait_policy='ACTIVE')
baseline_results, baseline_latency, output_names = run_model(baseline_model, all_inputs, use_gpu, use_openmp, disable_optimization=True)
if verbose:
print("baseline average latency (all optimizations disabled): {} ms".format(statistics.mean(baseline_latency) * 1000))
if output_dir is not None:
for i, inputs in enumerate(all_inputs):
output_test_data(output_dir, i, inputs)
treatment_results, treatment_latency, treatment_output_names = run_model(optimized_model, all_inputs, use_gpu, use_openmp, disable_optimization=False)
if verbose:
print("treatment average latency: {} ms".format(statistics.mean(treatment_latency) * 1000))
# Validate the output of baseline and treatment, to make sure the results are similar.
compare(baseline_results, treatment_results, verbose, rtol, atol)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--baseline_model', required=True, type=str,
help="baseline onnx model path.")
parser.add_argument('--optimized_model', required=True, type=str, default=None,
help="path of the optimized model. It shall have same inputs as the baseline model.")
parser.add_argument('--output_dir', required=False, type=str, default=None,
help="output test data path. If not specified, test data will not be saved.")
parser.add_argument('--batch_size', required=True, type=int,
help="batch size of input")
parser.add_argument('--sequence_length', required=True, type=int,
help="maximum sequence length of input")
parser.add_argument('--rtol', required=False, type=float, default=1e-3,
help="relative tolerance")
parser.add_argument('--atol', required=False, type=float, default=1e-4,
help="absolute tolerance")
parser.add_argument('--samples', required=False, type=int, default=100,
help="number of test cases to be generated")
parser.add_argument('--seed', required=False, type=int, default=3,
help="random seed")
parser.add_argument('--use_gpu', required=False, action='store_true', help="use GPU")
parser.set_defaults(use_gpu=False)
parser.add_argument('--openmp', required=False, action='store_true', help="use openmp")
parser.set_defaults(openmp=False)
parser.add_argument('--verbose', required=False, action='store_true', help="print verbose information")
parser.set_defaults(verbose=False)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
if args.output_dir is not None:
# create the output directory if not existed
path = Path(args.output_dir)
path.mkdir(parents=True, exist_ok=True)
run_test(
args.baseline_model,
args.optimized_model,
args.output_dir,
args.batch_size,
args.sequence_length,
args.use_gpu,
args.samples,
args.seed,
args.openmp,
args.verbose,
args.rtol,
args.atol)
if __name__ == "__main__":
main()
| 43.029586 | 154 | 0.679318 |
4a1a7b55989de2db00140e9f2e2173d3c0269bf1
| 2,212 |
py
|
Python
|
webhook_trigger_service/basic/run.py
|
GShepherdTC/tcex-app-templates
|
fae927965563f98eed0bd7716afa3bf4d4fda3bf
|
[
"Apache-2.0"
] | 1 |
2022-02-23T16:04:16.000Z
|
2022-02-23T16:04:16.000Z
|
webhook_trigger_service/basic/run.py
|
GShepherdTC/tcex-app-templates
|
fae927965563f98eed0bd7716afa3bf4d4fda3bf
|
[
"Apache-2.0"
] | null | null | null |
webhook_trigger_service/basic/run.py
|
GShepherdTC/tcex-app-templates
|
fae927965563f98eed0bd7716afa3bf4d4fda3bf
|
[
"Apache-2.0"
] | 3 |
2022-02-16T18:13:58.000Z
|
2022-03-31T18:46:20.000Z
|
"""Playbook App"""
# standard library
import traceback
# first-party
from app_lib import AppLib
# pylint: disable=no-member
def run(**kwargs) -> None:
"""Update path and run the App."""
# update the path to ensure the App has access to required modules
app_lib = AppLib()
app_lib.update_path()
# import modules after path has been updated
# third-party
from tcex import TcEx # pylint: disable=import-outside-toplevel
# first-party
from app import App # pylint: disable=import-outside-toplevel
from app_inputs import AppInputs # pylint: disable=import-outside-toplevel
tcex = TcEx()
try:
# load App class
app = App(tcex)
# set app property in testing framework
if callable(kwargs.get('set_app')):
kwargs.get('set_app')(app)
# configure custom trigger message handler
tcex.service.create_config_callback = app.create_config_callback
tcex.service.delete_config_callback = app.delete_config_callback
tcex.service.shutdown_callback = app.shutdown_callback
tcex.service.webhook_event_callback = app.webhook_event_callback
# set the createConfig model
tcex.service.trigger_input_model = AppInputs
# perform prep/setup operations
app.setup(**{})
# listen on channel/topic
tcex.service.listen()
# start heartbeat threads
tcex.service.heartbeat()
# inform TC that micro-service is Ready
tcex.service.ready = True
# loop until exit
if hasattr(app, 'loop_forever'):
app.loop_forever() # pylint: disable=no-member
else:
tcex.log.info('Looping until shutdown')
while tcex.service.loop_forever(sleep=1):
pass
# perform cleanup/teardown operations
app.teardown(**{})
# explicitly call the exit method
tcex.playbook.exit(msg=app.exit_message)
except Exception as e:
main_err = f'Generic Error. See logs for more details ({e}).'
tcex.log.error(traceback.format_exc())
tcex.playbook.exit(1, main_err)
if __name__ == '__main__':
# Run the App
run()
| 27.65 | 79 | 0.64557 |
4a1a7bdbc5158595bec15ee75f44f44f07597f14
| 952 |
py
|
Python
|
baselibs/python/example_lsres.py
|
openhpi2/openhpi_apr25
|
720d4043124ac44d17715db4ffb735c623c08e38
|
[
"BSD-3-Clause"
] | 5 |
2018-12-18T01:32:53.000Z
|
2021-11-15T10:41:48.000Z
|
baselibs/python/example_lsres.py
|
openhpi2/openhpi_apr25
|
720d4043124ac44d17715db4ffb735c623c08e38
|
[
"BSD-3-Clause"
] | 34 |
2018-05-11T21:31:33.000Z
|
2021-01-12T07:13:46.000Z
|
baselibs/python/example_lsres.py
|
openhpi2/openhpi_apr25
|
720d4043124ac44d17715db4ffb735c623c08e38
|
[
"BSD-3-Clause"
] | 8 |
2018-08-27T22:48:44.000Z
|
2022-03-15T03:49:55.000Z
|
# -*- python -*-
#
# Copyright (C) 2012, Pigeon Point Systems
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This
# file and program are licensed under a BSD style license. See
# the Copying file included with the OpenHPI distribution for
# full licensing terms.
#
# Author(s):
# Anton Pak <anton.pak@pigeonpoint.com>
#
from openhpi_baselib import *
( rv, sid ) = saHpiSessionOpen( SAHPI_UNSPECIFIED_DOMAIN_ID, None )
if rv != SA_OK:
print "ERROR: saHpiSessionOpen: %s " % HpiUtil.fromSaErrorT( rv )
exit()
for rpte in HpiIterators.Rpt( sid ):
tag = HpiUtil.fromSaHpiTextBufferT( rpte.ResourceTag )
print "Resource Id: %d, Tag: %s" % ( rpte.ResourceId, tag )
rv = saHpiSessionClose( sid )
if rv != SA_OK:
print "ERROR: saHpiSessionClose: %s " % HpiUtil.fromSaErrorT( rv )
| 28.848485 | 70 | 0.705882 |
4a1a7d0ea611db492931467cc0f853c3adbc334e
| 631 |
py
|
Python
|
encoding.py
|
Hydrazer/vyxal-2.4.1
|
fc377aeba95928cfbf7c2aa5cf98961948c09d9a
|
[
"MIT"
] | 1 |
2021-05-26T02:00:14.000Z
|
2021-05-26T02:00:14.000Z
|
encoding.py
|
Command-Master/Vyxal
|
2a1fd535c786dcbce2796360931c994438777cca
|
[
"MIT"
] | null | null | null |
encoding.py
|
Command-Master/Vyxal
|
2a1fd535c786dcbce2796360931c994438777cca
|
[
"MIT"
] | null | null | null |
from commands import codepage
import string
def vyxal_to_utf8(code):
# Taken from the old 05AB1E interpreter
processed_code = ""
for char in code:
processed_code += codepage[char]
return processed_code
def utf8_to_vyxal(code):
# Taken from the old 05AB1E interpreter
processed_code = ""
for char in code:
processed_code += chr(codepage.index(char))
return processed_code
compression = codepage
for char in string.printable:
compression = compression.replace(char, "")
codepage_number_compress = codepage.replace("»", "")
codepage_string_compress = codepage.replace("«", "")
| 25.24 | 52 | 0.713154 |
4a1a7e003a3969eec1c8ad2c8d85b621112b8886
| 3,001 |
py
|
Python
|
plugins/yahoo_weather.1h.py
|
longpdo/bitbar-plugins-custom
|
58cff1571ae4a939f7edac9c42fcd1156e3c8661
|
[
"MIT"
] | 4 |
2020-07-08T23:47:51.000Z
|
2021-04-15T12:03:08.000Z
|
plugins/yahoo_weather.1h.py
|
longpdo/bitbar-plugins-custom
|
58cff1571ae4a939f7edac9c42fcd1156e3c8661
|
[
"MIT"
] | null | null | null |
plugins/yahoo_weather.1h.py
|
longpdo/bitbar-plugins-custom
|
58cff1571ae4a939f7edac9c42fcd1156e3c8661
|
[
"MIT"
] | 3 |
2020-07-08T23:48:29.000Z
|
2021-03-17T07:37:02.000Z
|
#!/usr/bin/env LC_ALL=en_US.UTF-8 /usr/local/bin/python3
#
# <bitbar.title>Yahoo Weather</bitbar.title>
# <bitbar.version>v3.0</bitbar.version>
# <bitbar.author>mgjo5899</bitbar.author>
# <bitbar.author.github>mgjo5899</bitbar.author.github>
# <bitbar.desc>It tells you the current weather condition of the location where your computer is located at. It knows the location of the computer by using its public IP. You can also manually set the city and region through modifying the file. </bitbar.desc>
# <bitbar.image>https://i.imgur.com/YNypf0P.jpg</bitbar.image>
# <bitbar.dependencies>python</bitbar.dependencies>
#
# by mgjo589
# tweaked by longpdo (https://github.com/longpdo)
import json
import uuid
import time
import hmac
import hashlib
from base64 import b64encode
from datetime import datetime
from urllib.request import urlopen, Request
from urllib.parse import urlencode, quote
# General Placeholders
url = 'https://weather-ydn-yql.media.yahoo.com/forecastrss'
# Credentials
app_id = 'f776QQ32'
consumer_key = 'dj0yJmk9RlJhbUVpUEpsSUxEJmQ9WVdrOVpqYzNObEZSTXpJbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmc3Y9MCZ4PTk0'
consumer_secret = '75c592717d22c5cce623d2c2a1d5a5b36786d865'
# Query and authentication related
query = {
'location': 'Nuremberg,BY',
'format': 'json',
'u': 'c'
}
def get_auth_header():
oauth = {
'oauth_consumer_key': consumer_key,
'oauth_nonce': uuid.uuid4().hex,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': str(int(time.time())),
'oauth_version': '1.0'
}
merged_dict = {**query, **oauth}
sorted_dict = [k + '=' + quote(merged_dict[k], safe='')
for k in sorted(merged_dict.keys())]
signature_base = 'GET&' + \
quote(url, safe='') + '&' + quote('&'.join(sorted_dict))
composite_key = consumer_secret + '&'
oauth_signature = b64encode(hmac.new(composite_key.encode(
), msg=signature_base.encode(), digestmod=hashlib.sha1).digest()).decode()
oauth['oauth_signature'] = oauth_signature
auth_header = 'OAuth ' + \
', '.join(['{}="{}"'.format(k, v) for k, v in oauth.items()])
return auth_header
def get_weather(auth_header):
request_url = url + '?' + urlencode(query)
request = Request(request_url)
request.add_header('Authorization', auth_header)
request.add_header('X-Yahoo-App-Id', app_id)
r = urlopen(request).read()
j = json.loads(r)
return j
if __name__ == '__main__':
auth_header = get_auth_header()
weather_data = get_weather(auth_header)
current_temperatur = weather_data['current_observation']['condition']['temperature']
forecasts = weather_data['forecasts']
print(str(current_temperatur) + '°C')
# Dropdown info
print('---')
for day in forecasts:
date = datetime.fromtimestamp(int(day['date']))
print(date.strftime('%A %d. %B'))
print(day['text'] + ': ' + str(day['low']) + '-' + str(day['high']) + '°C')
print('---')
| 33.344444 | 261 | 0.681106 |
4a1a7e4c7a135a427fa569ff73aa4b12532b9faf
| 37,568 |
py
|
Python
|
chia/wallet/wallet_node.py
|
Kaieida/bluepool
|
88feb12da64673815ff20c503e497b28fa9f9b82
|
[
"Apache-2.0"
] | 15 |
2021-06-05T00:53:48.000Z
|
2021-06-22T10:33:40.000Z
|
chia/wallet/wallet_node.py
|
Kaieida/bluepool
|
88feb12da64673815ff20c503e497b28fa9f9b82
|
[
"Apache-2.0"
] | 24 |
2021-06-06T16:50:33.000Z
|
2021-08-31T19:14:09.000Z
|
chia/wallet/wallet_node.py
|
Kaieida/bluepool
|
88feb12da64673815ff20c503e497b28fa9f9b82
|
[
"Apache-2.0"
] | 13 |
2021-06-06T13:21:27.000Z
|
2021-12-31T01:34:59.000Z
|
import asyncio
import json
import logging
import socket
import time
import traceback
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
from blspy import PrivateKey
from chia.consensus.block_record import BlockRecord
from chia.consensus.constants import ConsensusConstants
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.protocols import wallet_protocol
from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
RejectAdditionsRequest,
RejectRemovalsRequest,
RequestAdditions,
RequestHeaderBlocks,
RespondAdditions,
RespondBlockHeader,
RespondHeaderBlocks,
RespondRemovals,
)
from chia.server.node_discovery import WalletPeers
from chia.server.outbound_message import Message, NodeType, make_msg
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.header_block import HeaderBlock
from chia.types.peer_info import PeerInfo
from chia.util.byte_types import hexstr_to_bytes
from chia.util.errors import Err, ValidationError
from chia.util.ints import uint32, uint128
from chia.util.keychain import Keychain
from chia.util.merkle_set import (
MerkleSet,
confirm_included_already_hashed,
confirm_not_included_already_hashed,
)
from chia.util.path import mkdir, path_from_root
from chia.wallet.block_record import HeaderBlockRecord
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.settings.settings_objects import BackupInitialized
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.backup_utils import open_backup_file
from chia.wallet.util.wallet_types import WalletType
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_blockchain import ReceiveBlockResult
from chia.wallet.wallet_state_manager import WalletStateManager
class WalletNode:
key_config: Dict
config: Dict
constants: ConsensusConstants
server: Optional[ChiaServer]
log: logging.Logger
wallet_peers: WalletPeers
# Maintains the state of the wallet (blockchain and transactions), handles DB connections
wallet_state_manager: Optional[WalletStateManager]
# How far away from LCA we must be to perform a full sync. Before then, do a short sync,
# which is consecutive requests for the previous block
short_sync_threshold: int
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
syncing: bool
full_node_peer: Optional[PeerInfo]
peer_task: Optional[asyncio.Task]
logged_in: bool
def __init__(
self,
config: Dict,
keychain: Keychain,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.config = config
self.constants = consensus_constants
self.root_path = root_path
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
# Normal operation data
self.cached_blocks: Dict = {}
self.future_block_hashes: Dict = {}
self.keychain = keychain
# Sync data
self._shut_down = False
self.proof_hashes: List = []
self.header_hashes: List = []
self.header_hashes_error = False
self.short_sync_threshold = 15 # Change the test when changing this
self.potential_blocks_received: Dict = {}
self.potential_header_hashes: Dict = {}
self.state_changed_callback = None
self.wallet_state_manager = None
self.backup_initialized = False # Delay first launch sync after user imports backup info or decides to skip
self.server = None
self.wsm_close_task = None
self.sync_task: Optional[asyncio.Task] = None
self.new_peak_lock: Optional[asyncio.Lock] = None
self.logged_in_fingerprint: Optional[int] = None
self.peer_task = None
self.logged_in = False
def get_key_for_fingerprint(self, fingerprint: Optional[int]):
private_keys = self.keychain.get_all_private_keys()
if len(private_keys) == 0:
self.log.warning("No keys present. Create keys with the UI, or with the 'chia keys' program.")
return None
private_key: Optional[PrivateKey] = None
if fingerprint is not None:
for sk, _ in private_keys:
if sk.get_g1().get_fingerprint() == fingerprint:
private_key = sk
break
else:
private_key = private_keys[0][0]
return private_key
async def _start(
self,
fingerprint: Optional[int] = None,
new_wallet: bool = False,
backup_file: Optional[Path] = None,
skip_backup_import: bool = False,
) -> bool:
private_key = self.get_key_for_fingerprint(fingerprint)
if private_key is None:
self.logged_in = False
return False
db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
db_path_replaced: str = (
self.config["database_path"]
.replace("CHALLENGE", self.config["selected_network"])
.replace("KEY", db_path_key_suffix)
)
path = path_from_root(self.root_path, db_path_replaced)
mkdir(path.parent)
assert self.server is not None
self.wallet_state_manager = await WalletStateManager.create(
private_key, self.config, path, self.constants, self.server
)
self.wsm_close_task = None
assert self.wallet_state_manager is not None
backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
if backup_settings.user_initialized is False:
if new_wallet is True:
await self.wallet_state_manager.user_settings.user_created_new_wallet()
self.wallet_state_manager.new_wallet = True
elif skip_backup_import is True:
await self.wallet_state_manager.user_settings.user_skipped_backup_import()
elif backup_file is not None:
await self.wallet_state_manager.import_backup_info(backup_file)
else:
self.backup_initialized = False
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
self.logged_in = False
return False
self.backup_initialized = True
if backup_file is not None:
json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
if "start_height" in json_dict["data"]:
start_height = json_dict["data"]["start_height"]
self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
else:
self.config["starting_height"] = 0
else:
self.config["starting_height"] = 0
if self.state_changed_callback is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
self._shut_down = False
self.peer_task = asyncio.create_task(self._periodically_check_full_node())
self.sync_event = asyncio.Event()
self.sync_task = asyncio.create_task(self.sync_job())
self.logged_in_fingerprint = fingerprint
self.logged_in = True
return True
def _close(self):
self.log.info("self._close")
self.logged_in_fingerprint = None
self._shut_down = True
async def _await_closed(self):
self.log.info("self._await_closed")
await self.server.close_all_connections()
asyncio.create_task(self.wallet_peers.ensure_is_closed())
if self.wallet_state_manager is not None:
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
if self.sync_task is not None:
self.sync_task.cancel()
self.sync_task = None
if self.peer_task is not None:
self.peer_task.cancel()
self.peer_task = None
self.logged_in = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
def _pending_tx_handler(self):
if self.wallet_state_manager is None or self.backup_initialized is False:
return
asyncio.create_task(self._resend_queue())
async def _action_messages(self) -> List[Message]:
if self.wallet_state_manager is None or self.backup_initialized is False:
return []
actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions()
result: List[Message] = []
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
msg = make_msg(
ProtocolMessageTypes.request_puzzle_solution,
wallet_protocol.RequestPuzzleSolution(coin_name, height),
)
result.append(msg)
return result
async def _resend_queue(self):
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return
for msg, sent_peers in await self._messages_to_resend():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
if peer.peer_node_id in sent_peers:
continue
await peer.send_message(msg)
for msg in await self._action_messages():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]:
if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down:
return []
messages: List[Tuple[Message, Set[bytes32]]] = []
records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent()
for record in records:
if record.spend_bundle is None:
continue
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(record.spend_bundle),
)
already_sent = set()
for peer, status, _ in record.sent_to:
already_sent.add(hexstr_to_bytes(peer))
messages.append((msg, already_sent))
return messages
def set_server(self, server: ChiaServer):
self.server = server
self.wallet_peers = WalletPeers(
self.server,
self.root_path,
self.config["target_peer_count"],
self.config["wallet_peers_path"],
self.config["introducer_peer"],
self.config["peer_connect_interval"],
self.log,
)
asyncio.create_task(self.wallet_peers.start())
async def on_connect(self, peer: WSChiaConnection):
if self.wallet_state_manager is None or self.backup_initialized is False:
return
messages_peer_ids = await self._messages_to_resend()
for msg, peer_ids in messages_peer_ids:
if peer.peer_node_id in peer_ids:
continue
await peer.send_message(msg)
if not self.has_full_node() and self.wallet_peers is not None:
asyncio.create_task(self.wallet_peers.on_connect(peer))
async def _periodically_check_full_node(self) -> None:
tries = 0
while not self._shut_down and tries < 5:
if self.has_full_node():
await self.wallet_peers.ensure_is_closed()
break
tries += 1
await asyncio.sleep(self.config["peer_connect_interval"])
def has_full_node(self) -> bool:
if self.server is None:
return False
if "full_node_peer" in self.config:
full_node_peer = PeerInfo(
self.config["full_node_peer"]["host"],
self.config["full_node_peer"]["port"],
)
peers = [c.get_peer_info() for c in self.server.get_full_node_connections()]
full_node_resolved = PeerInfo(socket.gethostbyname(full_node_peer.host), full_node_peer.port)
if full_node_peer in peers or full_node_resolved in peers:
self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}")
for connection in self.server.get_full_node_connections():
if (
connection.get_peer_info() != full_node_peer
and connection.get_peer_info() != full_node_resolved
):
self.log.info(f"Closing unnecessary connection to {connection.get_peer_info()}.")
asyncio.create_task(connection.close())
return True
return False
async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChiaConnection):
if self.wallet_state_manager is None:
return
header_block_records: List[HeaderBlockRecord] = []
async with self.wallet_state_manager.blockchain.lock:
for block in header_blocks:
if block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
block, block.transactions_filter, None
)
# Get Additions
added_coins = await self.get_additions(peer, block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
hbr = HeaderBlockRecord(block, added_coins, removed_coins)
else:
hbr = HeaderBlockRecord(block, [], [])
header_block_records.append(hbr)
(
result,
error,
fork_h,
) = await self.wallet_state_manager.blockchain.receive_block(hbr)
if result == ReceiveBlockResult.NEW_PEAK:
if not self.wallet_state_manager.sync_mode:
self.wallet_state_manager.blockchain.clean_block_records()
self.wallet_state_manager.state_changed("new_block")
self.wallet_state_manager.state_changed("sync_changed")
elif result == ReceiveBlockResult.INVALID_BLOCK:
self.log.info(f"Invalid block from peer: {peer.get_peer_info()} {error}")
await peer.close()
return
else:
self.log.debug(f"Result: {result}")
async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection):
if self.wallet_state_manager is None:
return
curr_peak = self.wallet_state_manager.blockchain.get_peak()
if curr_peak is not None and curr_peak.weight >= peak.weight:
return
if self.new_peak_lock is None:
self.new_peak_lock = asyncio.Lock()
async with self.new_peak_lock:
request = wallet_protocol.RequestBlockHeader(peak.height)
response: Optional[RespondBlockHeader] = await peer.request_block_header(request)
if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None:
return
header_block = response.header_block
if (curr_peak is None and header_block.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or (
curr_peak is not None and curr_peak.height > header_block.height - 200
):
top = header_block
blocks = [top]
# Fetch blocks backwards until we hit the one that we have,
# then complete them with additions / removals going forward
while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0:
request_prev = wallet_protocol.RequestBlockHeader(top.height - 1)
response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev)
if response_prev is None:
return
if not isinstance(response_prev, RespondBlockHeader):
return
prev_head = response_prev.header_block
blocks.append(prev_head)
top = prev_head
blocks.reverse()
await self.complete_blocks(blocks, peer)
elif header_block.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# Request weight proof
# Sync if PoW validates
if self.wallet_state_manager.sync_mode:
return
weight_request = RequestProofOfWeight(header_block.height, header_block.header_hash)
weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
weight_request, timeout=180
)
if weight_proof_response is None:
return
weight_proof = weight_proof_response.wp
if self.wallet_state_manager is None:
return
if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]):
(
valid,
fork_point,
) = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations(weight_proof)
else:
(
valid,
fork_point,
) = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(weight_proof)
if not valid:
self.log.error(
f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}"
f" recent blocks num ,{len(weight_proof.recent_chain_data)}"
)
self.log.debug(f"{weight_proof}")
return None
self.log.info(f"Validated, fork point is {fork_point}")
self.wallet_state_manager.sync_store.add_potential_fork_point(
header_block.header_hash, uint32(fork_point)
)
self.wallet_state_manager.sync_store.add_potential_peak(header_block)
self.start_sync()
def start_sync(self) -> None:
self.log.info("self.sync_event.set()")
self.sync_event.set()
async def check_new_peak(self) -> None:
if self.wallet_state_manager is None:
return
current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak()
if current_peak is None:
return
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
for _, block in potential_peaks:
if current_peak.weight < block.weight:
await asyncio.sleep(5)
self.start_sync()
return
async def sync_job(self) -> None:
while True:
self.log.info("Loop start in sync job")
if self._shut_down is True:
break
asyncio.create_task(self.check_new_peak())
await self.sync_event.wait()
self.sync_event.clear()
if self._shut_down is True:
break
try:
assert self.wallet_state_manager is not None
self.wallet_state_manager.set_sync_mode(True)
await self._sync()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Loop exception in sync {e}. {tb}")
finally:
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_sync_mode(False)
self.log.info("Loop end in sync job")
async def _sync(self) -> None:
"""
Wallet has fallen far behind (or is starting up for the first time), and must be synced
up to the LCA of the blockchain.
"""
if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None:
return
highest_weight: uint128 = uint128(0)
peak_height: uint32 = uint32(0)
peak: Optional[HeaderBlock] = None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
for header_hash, potential_peak_block in potential_peaks:
if potential_peak_block.weight > highest_weight:
highest_weight = potential_peak_block.weight
peak_height = potential_peak_block.height
peak = potential_peak_block
if peak_height is None or peak_height == 0:
return
if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight:
self.log.info("Not performing sync, already caught up.")
return
peers: List[WSChiaConnection] = self.server.get_full_node_connections()
if len(peers) == 0:
self.log.info("No peers to sync to")
return
async with self.wallet_state_manager.blockchain.lock:
fork_height = None
if peak is not None:
fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash)
if fork_height is None:
fork_height = uint32(0)
await self.wallet_state_manager.blockchain.warmup(fork_height)
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
advanced_peak = False
for i in range(max(0, fork_height - 1), peak_height, batch_size):
start_height = i
end_height = min(peak_height, start_height + batch_size)
peers = self.server.get_full_node_connections()
added = False
for peer in peers:
try:
added, advanced_peak = await self.fetch_blocks_and_validate(
peer,
uint32(start_height),
uint32(end_height),
None if advanced_peak else fork_height,
)
if added:
break
except Exception as e:
await peer.close()
exc = traceback.format_exc()
self.log.error(f"Error while trying to fetch from peer:{e} {exc}")
if not added:
raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}")
peak = self.wallet_state_manager.blockchain.get_peak()
assert peak is not None
self.wallet_state_manager.blockchain.clean_block_record(
min(
end_height - self.constants.BLOCKS_CACHE_SIZE,
peak.height - self.constants.BLOCKS_CACHE_SIZE,
)
)
async def fetch_blocks_and_validate(
self,
peer: WSChiaConnection,
height_start: uint32,
height_end: uint32,
fork_point_with_peak: Optional[uint32],
) -> Tuple[bool, bool]:
"""
Returns whether the blocks validated, and whether the peak was advanced
"""
if self.wallet_state_manager is None:
return False, False
self.log.info(f"Requesting blocks {height_start}-{height_end}")
request = RequestHeaderBlocks(uint32(height_start), uint32(height_end))
res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request)
if res is None or not isinstance(res, RespondHeaderBlocks):
raise ValueError("Peer returned no response")
header_blocks: List[HeaderBlock] = res.header_blocks
advanced_peak = False
if header_blocks is None:
raise ValueError(f"No response from peer {peer}")
if (
self.full_node_peer is not None
and peer.peer_host == self.full_node_peer.host
or peer.peer_host == "127.0.0.1"
):
trusted = True
pre_validation_results: Optional[List[PreValidationResult]] = None
else:
trusted = False
pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing(
header_blocks
)
if pre_validation_results is None:
return False, advanced_peak
assert len(header_blocks) == len(pre_validation_results)
for i in range(len(header_blocks)):
header_block = header_blocks[i]
if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None:
raise ValidationError(Err(pre_validation_results[i].error))
fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak
if header_block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
header_block,
header_block.transactions_filter,
fork_point_with_old_peak,
)
# Get Additions
added_coins = await self.get_additions(peer, header_block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, header_block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins)
else:
header_block_record = HeaderBlockRecord(header_block, [], [])
start_t = time.time()
if trusted:
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record, None, trusted, fork_point_with_old_peak
)
else:
assert pre_validation_results is not None
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record,
pre_validation_results[i],
trusted,
fork_point_with_old_peak,
)
self.log.debug(
f"Time taken to validate {header_block.height} with fork "
f"{fork_point_with_old_peak}: {time.time() - start_t}"
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
self.wallet_state_manager.state_changed("new_block")
elif result == ReceiveBlockResult.INVALID_BLOCK:
raise ValueError("Value error peer sent us invalid block")
if advanced_peak:
await self.wallet_state_manager.create_more_puzzle_hashes()
return True, advanced_peak
def validate_additions(
self,
coins: List[Tuple[bytes32, List[Coin]]],
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]],
root,
):
if proofs is None:
# Verify root
additions_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle_hash, coins_l in coins:
additions_merkle_set.add_already_hashed(puzzle_hash)
additions_merkle_set.add_already_hashed(hash_coin_list(coins_l))
additions_root = additions_merkle_set.get_root()
if root != additions_root:
return False
else:
for i in range(len(coins)):
assert coins[i][0] == proofs[i][0]
coin_list_1: List[Coin] = coins[i][1]
puzzle_hash_proof: bytes32 = proofs[i][1]
coin_list_proof: Optional[bytes32] = proofs[i][2]
if len(coin_list_1) == 0:
# Verify exclusion proof for puzzle hash
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if not_included is False:
return False
else:
try:
# Verify inclusion proof for coin list
included = confirm_included_already_hashed(
root,
hash_coin_list(coin_list_1),
coin_list_proof,
)
if included is False:
return False
except AssertionError:
return False
try:
# Verify inclusion proof for puzzle hash
included = confirm_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if included is False:
return False
except AssertionError:
return False
return True
def validate_removals(self, coins, proofs, root):
if proofs is None:
# If there are no proofs, it means all removals were returned in the response.
# we must find the ones relevant to our wallets.
# Verify removals root
removals_merkle_set = MerkleSet()
for name_coin in coins:
# TODO review all verification
name, coin = name_coin
if coin is not None:
removals_merkle_set.add_already_hashed(coin.name())
removals_root = removals_merkle_set.get_root()
if root != removals_root:
return False
else:
# This means the full node has responded only with the relevant removals
# for our wallet. Each merkle proof must be verified.
if len(coins) != len(proofs):
return False
for i in range(len(coins)):
# Coins are in the same order as proofs
if coins[i][0] != proofs[i][0]:
return False
coin = coins[i][1]
if coin is None:
# Verifies merkle proof of exclusion
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
proofs[i][1],
)
if not_included is False:
return False
else:
# Verifies merkle proof of inclusion of coin name
if coins[i][0] != coin.name():
return False
included = confirm_included_already_hashed(
root,
coin.name(),
proofs[i][1],
)
if included is False:
return False
return True
async def get_additions(self, peer: WSChiaConnection, block_i, additions) -> Optional[List[Coin]]:
if len(additions) > 0:
additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions)
additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions(
additions_request
)
if additions_res is None:
await peer.close()
return None
elif isinstance(additions_res, RespondAdditions):
validated = self.validate_additions(
additions_res.coins,
additions_res.proofs,
block_i.foliage_transaction_block.additions_root,
)
if not validated:
await peer.close()
return None
added_coins = []
for ph_coins in additions_res.coins:
ph, coins = ph_coins
added_coins.extend(coins)
return added_coins
elif isinstance(additions_res, RejectRemovalsRequest):
await peer.close()
return None
return None
else:
added_coins = []
return added_coins
async def get_removals(self, peer: WSChiaConnection, block_i, additions, removals) -> Optional[List[Coin]]:
assert self.wallet_state_manager is not None
request_all_removals = False
# Check if we need all removals
for coin in additions:
puzzle_store = self.wallet_state_manager.puzzle_store
record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash(
coin.puzzle_hash.hex()
)
if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN:
# TODO why ?
request_all_removals = True
break
if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID:
request_all_removals = True
break
if len(removals) > 0 or request_all_removals:
if request_all_removals:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None)
else:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals)
removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals(
removals_request
)
if removals_res is None:
return None
elif isinstance(removals_res, RespondRemovals):
validated = self.validate_removals(
removals_res.coins,
removals_res.proofs,
block_i.foliage_transaction_block.removals_root,
)
if validated is False:
await peer.close()
return None
removed_coins = []
for _, coins_l in removals_res.coins:
if coins_l is not None:
removed_coins.append(coins_l)
return removed_coins
elif isinstance(removals_res, RejectRemovalsRequest):
return None
else:
return None
else:
return []
| 42.885845 | 119 | 0.594575 |
4a1a7f1f20cc46c762d68b98e026daece631e120
| 2,613 |
py
|
Python
|
duckietown_rl/gym_duckietown/envs/multimap_env.py
|
rizavelioglu/AIDO-CITEC
|
97f4d8564dc6eb743063a7902a8932a429349c04
|
[
"MIT"
] | 18 |
2020-08-31T11:30:41.000Z
|
2022-02-15T07:35:12.000Z
|
duckietown_rl/gym_duckietown/envs/multimap_env.py
|
rizavelioglu/AIDO-CITEC
|
97f4d8564dc6eb743063a7902a8932a429349c04
|
[
"MIT"
] | 5 |
2020-09-27T02:15:56.000Z
|
2022-01-23T17:56:24.000Z
|
duckietown_rl/gym_duckietown/envs/multimap_env.py
|
rizavelioglu/AIDO-CITEC
|
97f4d8564dc6eb743063a7902a8932a429349c04
|
[
"MIT"
] | 21 |
2020-04-28T16:38:01.000Z
|
2021-11-16T14:21:08.000Z
|
# coding=utf-8
import os
import gym
from .duckietown_env import DuckietownEnv
from ..utils import get_subdir_path
class MultiMapEnv(gym.Env):
"""
Environment which samples from multiple environments, for
multi-taks learning
"""
def __init__(self, **kwargs):
self.env_list = []
maps_dir = get_subdir_path('maps')
self.window = None
# Try loading each of the available map files
for map_file in os.listdir(maps_dir):
map_name = map_file.split('.')[0]
# Do not load the regression test maps
if map_name.startswith('regress'):
continue
env = DuckietownEnv(map_name=map_name, **kwargs)
self.action_space = env.action_space
self.observation_space = env.observation_space
self.reward_range = env.reward_range
self.env_list.append(env)
assert len(self.env_list) > 0
self.cur_env_idx = 0
self.cur_reward_sum = 0
self.cur_num_steps = 0
def seed(self, seed):
for env in self.env_list:
env.seed(seed)
# Seed the random number generator
self.np_random, _ = gym.utils.seeding.np_random(seed)
return [seed]
def reset(self):
#self.cur_env_idx = self.np_random.randint(0, len(self.env_list))
self.cur_env_idx = (self.cur_env_idx + 1) % len(self.env_list)
env = self.env_list[self.cur_env_idx]
return env.reset()
def step(self, action):
env = self.env_list[self.cur_env_idx]
obs, reward, done, info = env.step(action)
# Keep track of the total reward for this episode
self.cur_reward_sum += reward
self.cur_num_steps += 1
# If the episode is done, sample a new environment
if done:
self.cur_reward_sum = 0
self.cur_num_steps = 0
return obs, reward, done, info
def render(self, mode='human', close=False):
env = self.env_list[self.cur_env_idx]
# Make all environments use the same rendering window
if self.window is None:
ret = env.render(mode, close)
self.window = env.window
else:
env.window = self.window
ret = env.render(mode, close)
return ret
def close(self):
for env in self.env_list:
env.close()
self.cur_env_idx = 0
self.env_names = None
self.env_list = None
@property
def step_count(self):
env = self.env_list[self.cur_env_idx]
return env.step_count
| 26.13 | 73 | 0.600459 |
4a1a800e7b1ec1c3c27bddad5ff399926f8f1d70
| 4,168 |
py
|
Python
|
wordnet2neo4j.py
|
sergey-zarealye-com/wordnet2neo4j
|
2e97dda005549d60f284f851a2e6432f9a71422f
|
[
"Apache-2.0"
] | null | null | null |
wordnet2neo4j.py
|
sergey-zarealye-com/wordnet2neo4j
|
2e97dda005549d60f284f851a2e6432f9a71422f
|
[
"Apache-2.0"
] | null | null | null |
wordnet2neo4j.py
|
sergey-zarealye-com/wordnet2neo4j
|
2e97dda005549d60f284f851a2e6432f9a71422f
|
[
"Apache-2.0"
] | 3 |
2016-07-01T19:05:39.000Z
|
2020-04-01T17:28:23.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 15:03:08 2015
@author: sergey, comcon1
Example usage:
NOUNS
-i dict/data.noun --neo4j bolt://127.0.0.1:7687 --nodelabel Enwordnet --reltype Pointer --limit 1000
VERBS
-i dict/data.verb --neo4j bolt://127.0.0.1:7687 --nodelabel Enwordnet --reltype Pointer --limit 1000
"""
import argparse
import re, sys
from neo4jstuff import StuffNeo4j
def main(argv):
parser = argparse.ArgumentParser(description=
"Parses WordNet database. Stores the results in neo4j dtabase/")
parser.add_argument(
"--neo4j", required=True,
help="URI string for connection to neo4j database, e.g. 'bolt://127.0.0.1:7687'."
)
parser.add_argument(
"--password", required=False,
help="Password for neo4j user for connection to DB."
)
parser.add_argument(
"-i", "--input", required=True,
help="Wordnet data file e.g. dict/data.noun ."
)
parser.add_argument(
"--nodelabel", required=True,
help="Wordnet node label."
)
parser.add_argument(
"--reltype", required=True,
help="Wordnet relation type."
)
parser.add_argument(
"--limit", default=sys.maxsize, type=int,
help="Maximum number of lines to process in input file, for debugging."
)
parser.add_argument(
"--encoding",
help="Wordnet data file encoding e.g. cp1251."
)
args = parser.parse_args()
# Initialize params
the = StuffNeo4j(args.nodelabel, args.reltype)
# Connect to DB
if args.password is None:
the.connect(args.neo4j)
else:
the.connect(args.neo4j, pwd=args.password)
entry_pattern = re.compile(r'(\d{8,8}) \d\d (\w) \d\d (\w+) ')
dictionary = []
cnt = 0
pos = None
with open(args.input) as wordnet:
for raw_line in wordnet:
if args.encoding is not None:
line = raw_line.decode(args.encoding)
else:
line = raw_line
entry = entry_pattern.findall(line)
if len(entry):
name = entry[0][2]
pos = entry[0][1]
synset_id = pos + entry[0][0]
word_node = the.create_node(args.nodelabel,
name=name,
synset_id=synset_id)
dictionary.append(word_node)
cnt += 1
if cnt % 100 == 0:
the.insert_bulk(dictionary)
print( "%d/%d words inserted" % (len(dictionary), cnt) )
dictionary = []
if cnt > args.limit:
break
the.insert_bulk(dictionary)
the.create_indexes()
#TODO we only add relations to existing nodes!
pointer_pattern = re.compile(r'([@!;~i#msp%=+-cru<\^>*]{1,2} \d{8,8} \w)')
cnt = 0
relations = []
with open(args.input) as wordnet:
for line in wordnet:
entry = entry_pattern.findall(line)
if len(entry):
name = entry[0][2]
synset_id = entry[0][1] + entry[0][0]
pointers = pointer_pattern.findall(line)
if len(pointers):
for pointer in pointers:
ptype, target, target_pos = pointer.split()
try:
rel = the.create_wordnet_rel(synset_id,
target_pos+target,
ptype)
relations.append(rel)
except:
pass
cnt += 1
if cnt % 100 == 0:
the.insert_bulk(relations)
print( "%d/%d relations inserted" % \
(len(relations), cnt) )
relations = []
if cnt > args.limit:
break
the.insert_bulk(relations)
if __name__ == '__main__':
main(sys.argv)
| 32.818898 | 100 | 0.5 |
4a1a802518ab8315af45466eec97e5dfda31e5d7
| 1,147 |
py
|
Python
|
tests/test_provider_gavinbunney_kubectl.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507 |
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_gavinbunney_kubectl.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135 |
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_gavinbunney_kubectl.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81 |
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_gavinbunney_kubectl.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:27 UTC)
def test_provider_import():
import terrascript.provider.gavinbunney.kubectl
def test_resource_import():
from terrascript.resource.gavinbunney.kubectl import kubectl_manifest
from terrascript.resource.gavinbunney.kubectl import kubectl_server_version
def test_datasource_import():
from terrascript.data.gavinbunney.kubectl import kubectl_file_documents
from terrascript.data.gavinbunney.kubectl import kubectl_filename_list
from terrascript.data.gavinbunney.kubectl import kubectl_path_documents
from terrascript.data.gavinbunney.kubectl import kubectl_server_version
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.gavinbunney.kubectl
#
# t = terrascript.provider.gavinbunney.kubectl.kubectl()
# s = str(t)
#
# assert 'https://github.com/gavinbunney/terraform-provider-kubectl' in s
# assert '1.11.3' in s
| 31 | 80 | 0.787271 |
4a1a8028bee7a7c47ec7a99a8e7eb8965401d944
| 11,230 |
py
|
Python
|
caer/io/resize.py
|
brccabral/caer
|
2ddb84095202aa98224b04612eff9e97c8680309
|
[
"MIT"
] | null | null | null |
caer/io/resize.py
|
brccabral/caer
|
2ddb84095202aa98224b04612eff9e97c8680309
|
[
"MIT"
] | null | null | null |
caer/io/resize.py
|
brccabral/caer
|
2ddb84095202aa98224b04612eff9e97c8680309
|
[
"MIT"
] | null | null | null |
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++, Cuda
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
import math
import cv2 as cv
from ..coreten import Tensor, to_tensor
from .._internal import _check_target_size
from ..globals import (
INTER_AREA, INTER_CUBIC, INTER_NEAREST, INTER_LINEAR
)
__all__ = [
'resize'
]
def resize(tens, target_size=None, resize_factor=None, preserve_aspect_ratio=False, interpolation='bilinear'):
r"""
Resizes an image to a target_size without aspect ratio distortion.
Your output images will be of size ``target_size``, and will not be distorted. Instead, the parts of the image that do not fit within the target size get cropped out.
The resizing process is:
1. Resize the image as minimally as possible.
2. Take the largest centered crop of the image with dimensions = ``target_size``.
Alternatively, you may use:
```python
size = (200,200)
tens = caer.resize(tens, target_size=size, preserve_aspect_ratio=True)
```
Note:
``caer.imread()`` comes with an in-built functionality to resize your images, eliminating the need for you to call ``caer.resize()``. This is purely optional and may appeal to certain users.
You may also use ``caer.smart_resize()`` for on-the-fly image resizing that `preserves the aspect ratio`.
Args:
tens (Tensor): Input Image. Must be in the format ``(height, width, channels)``.
target_size (tuple): Target size. Must be a tuple of ``(width, height)`` integer.
resize_factor (float, tuple): Resizing Factor to employ.
Shrinks the image if ``resize_factor < 1``
Enlarges the image if ``resize_factor > 1``
preserve_aspect_ratio (bool): Prevent aspect ratio distortion (employs center crop).
interpolation (str): Interpolation to use for resizing. Defaults to `'bilinear'`.
Supports `'bilinear'`, `'bicubic'`, `'area'`, `'nearest'`.
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.sunrise()
>> tens.shape
(427, 640, 3)
>> resized = caer.resize(tens, target_size=(200,200)) # Hard-resize. May distort aspect ratio
>> resized.shape
(200, 200, 3)
>> resized_wf = caer.resize(tens, resize_factor=.5) # Resizes the image to half its dimensions
>> resized_wf.shape
(213, 320, 3)
>> resized = caer.resize(tens, target_size=(200,200), preserve_aspect_ratio=True) # Preserves aspect ratio
>> resized.shape
(200, 200, 3)
"""
# Opencv uses the (h,w) format
height, width = tens.shape[:2]
interpolation = str(interpolation)
cspace = None
if isinstance(tens, Tensor):
# We'll need to preserve this before returning
cspace = tens.cspace
if resize_factor is None:
if target_size is None:
if preserve_aspect_ratio:
raise ValueError('Specify a target size')
else:
raise ValueError('Specify either a resize factor or target dimensions')
if target_size is not None:
if len(target_size) == 2:
new_shape = target_size
else:
raise ValueError('Tuple shape must be = 2 (width, height)')
if resize_factor is not None:
target_size = None
preserve_aspect_ratio = False
if not isinstance(resize_factor, (int, float)):
raise ValueError('resize_factor must be an integer or float')
if resize_factor > 1:
interpolation = 'bicubic'
new_shape = (int(resize_factor * width), int(resize_factor * height))
interpolation_methods = {
'nearest': INTER_NEAREST, '0': INTER_NEAREST, 0: INTER_NEAREST, # 0
'bilinear': INTER_LINEAR, '1': INTER_LINEAR, 1: INTER_LINEAR, # 1
'bicubic': INTER_CUBIC, '2': INTER_CUBIC, 2: INTER_CUBIC, # 2
'area': INTER_AREA, '3': INTER_AREA, 3: INTER_AREA # 3
}
if interpolation not in interpolation_methods:
raise ValueError('Specify a valid interpolation type - area/nearest/bicubic/bilinear')
if preserve_aspect_ratio:
im = _resize_with_ratio(tens, target_size=target_size, preserve_aspect_ratio=preserve_aspect_ratio, interpolation=interpolation_methods[interpolation])
else:
width, height = new_shape[:2]
im = _cv2_resize(tens, (width, height), interpolation=interpolation_methods[interpolation])
# For this function, the <cspace> attribute is not required.
# So, we disable the mandatory check that the <cspace> attribute needs to be passed for
# foreign Tensors/ndarrays
return to_tensor(im, cspace=cspace, override_checks=True)
def smart_resize(tens, target_size, interpolation='bilinear'):
r"""
Resizes an image to a target_size without aspect ratio distortion.
Your output images will be of size `target_size`, and will not be distorted. Instead, the parts of the image that do not fit within the target size get cropped out.
The resizing process is:
1. Resize the image as minimally as possible.
2. Take the largest centered crop of the image with dimensions = `target_size`.
Alternatively, you may use:
```python
size = (200,200)
tens = caer.resize(tens, target_size=size, preserve_aspect_ratio=True)
```
Args:
tens (Tensor): Input Image. Must be in the format `(height, width, channels)`.
target_size (tuple): Target size. Must be a tuple of `(width, height)` integer.
interpolation (str): Interpolation to use for resizing. Defaults to `'bilinear'`.
Supports `'bilinear'`, `'bicubic'`, `'area'`, `'nearest'`.
Returns:
Tensor of shape `(height, width, channels)`
Examples::
>> tens = caer.data.sunrise()
>> tens.shape
(427, 640, 3)
>> resized = caer.smart_resize(tens, target_size=(200,200))
>> resized.shape
(200, 200, 3)
"""
# if not isinstance(tens, Tensor):
# raise ValueError('To use `caer.smart_resize()`, `tens` needs to be a caer.Tensor')
im = _resize_with_ratio(tens, target_size=target_size, preserve_aspect_ratio=True, interpolation=interpolation)
# For this function, the <cspace> attribute is not required.
# So, we disable the mandatory check that the <cspace> attribute needs to be passed for
# foreign Tensors/ndarrays
return to_tensor(im, override_checks=True)
def _cv2_resize(image, target_size, interpolation=None):
"""
ONLY TO BE USED INTERNALLY. NOT AVAILABLE FOR EXTERNAL USAGE.
Resizes the image ignoring the aspect ratio of the original image
"""
_ = _check_target_size(target_size)
width, height = target_size[:2]
if interpolation is None:
interpolation = INTER_AREA
dimensions = (width, height)
return cv.resize(image, dimensions, interpolation=interpolation)
def _resize_with_ratio(tens, target_size, preserve_aspect_ratio=False, interpolation='bilinear'):
"""
Resizes an image using advanced algorithms
:param target_size: Tuple of size 2 in the format (width,height)
:param preserve_aspect_ratio: Boolean to keep/ignore aspect ratio when resizing
"""
_ = _check_target_size(target_size)
interpolation = str(interpolation)
if not isinstance(preserve_aspect_ratio, bool):
raise ValueError('preserve_aspect_ratio must be a boolean')
interpolation_methods = {
'nearest': INTER_NEAREST, '0': INTER_NEAREST, # 0
'bilinear': INTER_LINEAR, '1': INTER_LINEAR,# 1
'bicubic': INTER_CUBIC, '2': INTER_CUBIC,# 2
'area': INTER_AREA, '3': INTER_AREA,# 3
}
if interpolation not in interpolation_methods:
raise ValueError('Specify a valid interpolation type - area/nearest/bicubic/bilinear')
oh, ow = tens.shape[:2]
target_w, target_h = target_size
if target_h > oh or target_w > ow:
raise ValueError('To compute resizing keeping the aspect ratio, the target size dimensions must be <= actual image dimensions')
# Computing minimal resize
# min_width, w_factor = _compute_minimal_resize(ow, target_w)
# min_height, h_factor = _compute_minimal_resize(oh, target_h)
minimal_resize_factor = _compute_minimal_resize((ow, oh), (target_w, target_h))
# Resizing minimally
tens = _cv2_resize(tens, (ow//minimal_resize_factor, oh//minimal_resize_factor))
# Computing centre crop (to avoid extra crop, we resize minimally first)
tens = _compute_centre_crop(tens, (target_w, target_h))
if tens.shape[:2] != target_size[:2]:
tens = _cv2_resize(tens, (target_w, target_h), interpolation=interpolation_methods[interpolation])
return tens
def _compute_minimal_resize(org_size, target_dim):
# for i in range(10):
# i += 1
# d = dim*i
# if org_dim >= d and dim < dim*(i+1):
# if (org_dim - dim*(i+1)) > dim:
# continue
# else:
# return d, i
# import math
# mi = math.floor(org_dim/dim)
# d = dim * mi
# return d, mi
if not isinstance(org_size, tuple) or not isinstance(target_dim, tuple):
raise ValueError('org_size and target_dim must be a tuple')
if len(org_size) != 2 or len(target_dim) != 2:
raise ValueError('Size of tuple must be = 2')
oh, ow = org_size[:2]
targ_w, targ_h = target_dim[:2]
h_factor = math.floor(oh/targ_h)
w_factor = math.floor(ow/targ_w)
if h_factor <= w_factor:
return h_factor
else:
return w_factor
def _compute_centre_crop(tens, target_size):
_ = _check_target_size(target_size)
# Getting org height and target
oh, ow = tens.shape[:2]
target_w, target_h = target_size
# The following line is actually the right way of accessing height and width of an opencv-specific image (height, width). However for some reason, while the code runs, this is flipped (it now becomes (width,height)). Testing needs to be done to catch this little bug
# oh, ow = tens.shape[:2]
if target_h > oh or target_w > ow:
raise ValueError('To compute centre crop, target size dimensions must be <= tens dimensions')
diff_h = (oh - target_h) // 2
diff_w = (ow - target_w ) // 2
# tens[y:y+h, x:x+h]
return tens[diff_h:diff_h + target_h, diff_w:diff_w + target_w]
| 37.811448 | 270 | 0.634907 |
4a1a81945766d3bfa7a4bdf301ebfca34eb81973
| 12,840 |
py
|
Python
|
gibson/gibson/envs/minitaur_env.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 120 |
2019-04-22T04:45:28.000Z
|
2022-03-23T01:53:17.000Z
|
gibson/envs/minitaur_env.py
|
bradleyemi/GibsonEnv
|
30c2b54c29e8561fc5b5127cb4bdb08ef6869299
|
[
"MIT"
] | 14 |
2019-06-12T08:21:21.000Z
|
2021-08-25T15:36:58.000Z
|
gibson/envs/minitaur_env.py
|
bradleyemi/GibsonEnv
|
30c2b54c29e8561fc5b5127cb4bdb08ef6869299
|
[
"MIT"
] | 19 |
2019-06-19T07:00:36.000Z
|
2022-03-24T07:18:30.000Z
|
'''
Customized Minitaur Environment for Cambria
Author: Zhiyang He, Stanford University
Original: Pybullet
Note that in the original pybullet environment, major difference exist in simulation
accuracy.
Original:
Solver iterations: 300 (Major difference)
Time step: 1/100.0
Action repeat: 1
Original Accurate:
Solver iterations: 60
Time step: 1/500.0
Action repeat: 5
Current:
Solver iterations: 5
Time step: 1/88.0
Action repeat: 4
'''
from gibson.envs.env_modalities import CameraRobotEnv, BaseRobotEnv
from gibson.envs.env_bases import *
from gibson.core.physics.drivers.minitaur import Minitaur
import os, inspect
import math
import time
import gym
from gym import spaces
from gym.utils import seeding
import gibson
import numpy as np
import pybullet
MINITAUR_TIMESTEP = 1.0/(4 * 22)
MINITAUR_FRAMESKIP = 4
ACTION_EPS = 0.01
tracking_camera = {
'yaw': 40,
'z_offset': 0.3,
'distance': 1,
'pitch': -0
}
class MinitaurNavigateEnv(CameraRobotEnv):
"""The gym environment for the minitaur.
It simulates the locomotion of a minitaur, a quadruped robot. The state space
include the angles, velocities and torques for all the motors and the action
space is the desired motor angle for each motor. The reward function is based
on how far the minitaur walks in 1000 steps and penalizes the energy
expenditure.
"""
distance_weight = 1.0
energy_weight = 0.005
shake_weight = 0.0
drift_weight = 0.0
distance_limit = float("inf")
observation_noise_stdev = 0.0
action_bound = 1
env_randomizer = None
hard_reset = False
leg_model_enabled = True
num_bullet_solver_iterations = 300
pd_control_enabled = True
accurate_motor_model_enabled = True
NUM_SUBSTEPS = 5 # PD control needs smaller time step for stability.
def __init__(self, config, gpu_count=0):
"""Initialize the minitaur gym environment.
Args:
distance_weight: The weight of the distance term in the reward.
energy_weight: The weight of the energy term in the reward.
shake_weight: The weight of the vertical shakiness term in the reward.
drift_weight: The weight of the sideways drift term in the reward.
distance_limit: The maximum distance to terminate the episode.
observation_noise_stdev: The standard deviation of observation noise.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
hard_reset: Whether to wipe the simulation and load everything when reset
is called. If set to false, reset just place the minitaur back to start
position and set its pose to initial configuration.
env_randomizer: An EnvRandomizer to randomize the physical properties
during reset().
"""
self.config = self.parse_config(config)
assert(self.config["envname"] == self.__class__.__name__ or self.config["envname"] == "TestEnv")
CameraRobotEnv.__init__(self, self.config, gpu_count,
scene_type="building",
tracking_camera=tracking_camera)
self.robot_introduce(Minitaur(self.config, env=self,
pd_control_enabled=self.pd_control_enabled,
accurate_motor_model_enabled=self.accurate_motor_model_enabled))
self.scene_introduce()
self.gui = self.config["mode"] == "gui"
self.total_reward = 0
self.total_frame = 0
self.action_repeat = 1
## Important: PD controller needs more accuracy
'''if self.pd_control_enabled or self.accurate_motor_model_enabled:
self.time_step = self.config["speed"]["timestep"]
self.time_step /= self.NUM_SUBSTEPS
self.num_bullet_solver_iterations /= self.NUM_SUBSTEPS
self.action_repeat *= self.NUM_SUBSTEPS
pybullet.setPhysicsEngineParameter(physicsClientId=self.physicsClientId,
numSolverIterations=int(self.num_bullet_solver_iterations))
pybullet.setTimeStep(self.time_step, physicsClientId=self.physicsClientId)
'''
pybullet.setPhysicsEngineParameter(physicsClientId=self.physicsClientId,
numSolverIterations=int(self.num_bullet_solver_iterations))
self._observation = []
self._last_base_position = [0, 0, 0]
self._action_bound = self.action_bound
self._env_randomizer = self.env_randomizer
if self._env_randomizer is not None:
self._env_randomizer.randomize_env(self)
self._objectives = []
self.viewer = None
self.Amax = [0] * 8
def set_env_randomizer(self, env_randomizer):
self._env_randomizer = env_randomizer
def configure(self, args):
self._args = args
#def _reset(self):
#if self._env_randomizer is not None:
# self._env_randomizer.randomize_env(self)
#self._last_base_position = [0, 0, 0]
#self._objectives = []
#if not self._torque_control_enabled:
# for _ in range(1 / self.timestep):
# if self._pd_control_enabled or self._accurate_motor_model_enabled:
# self.robot.ApplyAction([math.pi / 2] * 8)
# pybullet.stepSimulation()
#return self._noisy_observation()
def _transform_action_to_motor_command(self, action):
if self.leg_model_enabled:
#for i, action_component in enumerate(action):
# if not (-self._action_bound - ACTION_EPS <= action_component <= self._action_bound + ACTION_EPS):
# raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self.robot.ConvertFromLegModel(action)
return action
def _step(self, action):
"""Step forward the simulation, given the action.
Args:
action: A list of desired motor angles for eight motors.
Returns:
observations: The angles, velocities and torques of all motors.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
#print("Env apply raw action", action)
action = self._transform_action_to_motor_command(action)
#print("Env apply action", action)
#for _ in range(self._action_repeat):
# self.robot.ApplyAction(action)
# pybullet.stepSimulation()
for i in range(len(self.Amax)):
if action[i] > self.Amax[i]:
self.Amax[i] = action[i]
#print("Action max", self.Amax)
for _ in range(self.action_repeat):
state = CameraRobotEnv._step(self, action)
return state
def calc_rewards_and_done(self, action, state):
## TODO (hzyjerry): make use of action, state
done = self._termination(state)
rewards = self._rewards(a)
#return reward, False
return rewards, done
def get_minitaur_motor_angles(self):
"""Get the minitaur's motor angles.
Returns:
A numpy array of motor angles.
"""
return self.robot.GetMotorAngles()
def get_minitaur_motor_velocities(self):
"""Get the minitaur's motor velocities.
Returns:
A numpy array of motor velocities.
"""
return self.robot.GetMotorVelocities()
def get_minitaur_motor_torques(self):
"""Get the minitaur's motor torques.
Returns:
A numpy array of motor torques.
"""
return self.robot.GetMotorTorques()
def get_minitaur_base_orientation(self):
"""Get the minitaur's base orientation, represented by a quaternion.
Returns:
A numpy array of minitaur's orientation.
"""
return self.robot.GetBaseOrientation()
def is_fallen(self):
"""Decide whether the minitaur has fallen.
If the up directions between the base and the world is larger (the dot
product is smaller than 0.85) or the base is very low on the ground
(the height is smaller than 0.13 meter), the minitaur is considered fallen.
Returns:
Boolean value that indicates whether the minitaur has fallen.
"""
orientation = self.robot.GetBaseOrientation()
rot_mat = pybullet.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
pos = self.robot.GetBasePosition()
#return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85 or
# pos[2] < 0.13)
return False
def _termination(self, state=None, debugmode=False):
position = self.robot.GetBasePosition()
distance = math.sqrt(position[0]**2 + position[1]**2)
#return self.is_fallen() or distance > self.distance_limit
return False
def _rewards(self, action=None, debugmode=False):
a = action
current_base_position = self.robot.GetBasePosition()
forward_reward = current_base_position[0] - self._last_base_position[0]
drift_reward = -abs(current_base_position[1] - self._last_base_position[1])
shake_reward = -abs(current_base_position[2] - self._last_base_position[2])
self._last_base_position = current_base_position
energy_reward = np.abs(
np.dot(self.robot.GetMotorTorques(),
self.robot.GetMotorVelocities())) * self.timestep
reward = (
self.distance_weight * forward_reward -
self.energy_weight * energy_reward + self.drift_weight * drift_reward
+ self.shake_weight * shake_reward)
self._objectives.append(
[forward_reward, energy_reward, drift_reward, shake_reward])
return [reward, ]
def get_objectives(self):
return self._objectives
def _get_observation(self):
self._observation = self.robot.GetObservation()
return self._observation
def _noisy_observation(self):
self._get_observation()
observation = np.array(self._observation)
if self.observation_noise_stdev > 0:
observation += (np.random.normal(
scale=self.observation_noise_stdev, size=observation.shape) *
self.robot.GetObservationUpperBound())
return observation
#==================== Environemnt Randomizer ====================
## (hzyjerry) TODO: still under construction, not ready to use
def randomize_env(self, env):
self._randomize_minitaur(env.minitaur)
def _randomize_minitaur(self, minitaur):
"""Randomize various physical properties of minitaur.
It randomizes the mass/inertia of the base, mass/inertia of the legs,
friction coefficient of the feet, the battery voltage and the motor damping
at each reset() of the environment.
Args:
minitaur: the Minitaur instance in minitaur_gym_env environment.
"""
base_mass = minitaur.GetBaseMassFromURDF()
randomized_base_mass = random.uniform(
base_mass * (1.0 + self._minitaur_base_mass_err_range[0]),
base_mass * (1.0 + self._minitaur_base_mass_err_range[1]))
minitaur.SetBaseMass(randomized_base_mass)
leg_masses = minitaur.GetLegMassesFromURDF()
leg_masses_lower_bound = np.array(leg_masses) * (
1.0 + self._minitaur_leg_mass_err_range[0])
leg_masses_upper_bound = np.array(leg_masses) * (
1.0 + self._minitaur_leg_mass_err_range[1])
randomized_leg_masses = [
np.random.uniform(leg_masses_lower_bound[i], leg_masses_upper_bound[i])
for i in range(len(leg_masses))
]
minitaur.SetLegMasses(randomized_leg_masses)
randomized_battery_voltage = random.uniform(BATTERY_VOLTAGE_RANGE[0],
BATTERY_VOLTAGE_RANGE[1])
minitaur.SetBatteryVoltage(randomized_battery_voltage)
randomized_motor_damping = random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
minitaur.SetMotorViscousDamping(randomized_motor_damping)
randomized_foot_friction = random.uniform(MINITAUR_LEG_FRICTION[0],
MINITAUR_LEG_FRICTION[1])
minitaur.SetFootFriction(randomized_foot_friction)
| 37.988166 | 114 | 0.648442 |
4a1a81a27d9347befdb44374a33456f35cc5bcd1
| 2,189 |
py
|
Python
|
deeprob/spn/utils/statistics.py
|
deeprob-org/deeprob-kit
|
c46050eb8047dcfa0cc2420887624184c042e32e
|
[
"MIT"
] | 38 |
2021-09-27T11:39:23.000Z
|
2022-02-09T15:33:44.000Z
|
deeprob/spn/utils/statistics.py
|
deeprob-org/deeprob-kit
|
c46050eb8047dcfa0cc2420887624184c042e32e
|
[
"MIT"
] | 14 |
2021-09-27T15:04:46.000Z
|
2021-12-08T21:08:01.000Z
|
deeprob/spn/utils/statistics.py
|
deeprob-org/deeprob-kit
|
c46050eb8047dcfa0cc2420887624184c042e32e
|
[
"MIT"
] | 3 |
2021-09-30T08:05:06.000Z
|
2022-01-02T04:44:19.000Z
|
# MIT License: Copyright (c) 2021 Lorenzo Loconte, Gennaro Gala
from deeprob.spn.structure.leaf import Leaf
from deeprob.spn.structure.node import Node, Sum, Product, bfs
from deeprob.spn.utils.filter import collect_nodes, filter_nodes_by_type
def compute_statistics(root: Node) -> dict:
"""
Compute some statistics of a SPN given its root.
The computed statistics are the following:
- n_nodes, the number of nodes
- n_sum, the number of sum nodes
- n_prod, the number of product nodes
- n_leaves, the number of leaves
- n_edges, the number of edges
- n_params, the number of parameters
- depth, the depth of the network
:param root: The root of the SPN.
:return: A dictionary containing the statistics.
"""
stats = {
'n_nodes': len(collect_nodes(root)),
'n_sum': len(filter_nodes_by_type(root, Sum)),
'n_prod': len(filter_nodes_by_type(root, Product)),
'n_leaves': len(filter_nodes_by_type(root, Leaf)),
'n_edges': compute_edges_count(root),
'n_params': compute_parameters_count(root),
'depth': compute_depth(root)
}
return stats
def compute_edges_count(root: Node) -> int:
"""
Get the number of edges of a SPN given its root.
:param root: The root of the SPN.
:return: The number of edges.
"""
return sum(len(n.children) for n in filter_nodes_by_type(root, (Sum, Product)))
def compute_parameters_count(root: Node) -> int:
"""
Get the number of parameters of a SPN given its root.
:param root: The root of the SPN.
:return: The number of parameters.
"""
n_weights = sum(len(n.weights) for n in filter_nodes_by_type(root, Sum))
n_leaf_params = sum(n.params_count() for n in filter_nodes_by_type(root, Leaf))
return n_weights + n_leaf_params
def compute_depth(root: Node) -> int:
"""
Get the depth of the SPN given its root.
:param root: The root of the SPN.
:return: The depth of the network.
"""
depths = dict()
for node in bfs(root):
d = depths.setdefault(node, 0)
for c in node.children:
depths[c] = d + 1
return max(depths.values())
| 30.830986 | 83 | 0.66423 |
4a1a81ea5908d97445ea0eab3151ee37164ae532
| 2,048 |
py
|
Python
|
sandbox/src1/TCSE3-3rd-examples/src/py/intro/loop4simviz2.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 5 |
2016-05-28T14:12:28.000Z
|
2021-04-22T10:23:12.000Z
|
sandbox/src1/TCSE3-3rd-examples/src/py/intro/loop4simviz2.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | null | null | null |
sandbox/src1/TCSE3-3rd-examples/src/py/intro/loop4simviz2.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 2 |
2015-07-13T10:04:10.000Z
|
2021-04-22T10:23:23.000Z
|
#!/usr/bin/env python
"""
As loop4simviz1.py, but here we call simviz2.py, make movies,
and also allow any simviz2.py option to be varied in a loop.
"""
import sys, os, commands
usage = 'Usage: %s parameter min max increment '\
'[ simviz2.py options ]' % sys.argv[0]
try:
option_name = sys.argv[1]
min = float(sys.argv[2])
max = float(sys.argv[3])
incr = float(sys.argv[4])
except:
print usage; sys.exit(1)
simviz2_options = ' '.join(sys.argv[5:])
html = open('tmp_%s_runs.html' % option_name, 'w')
html.write('<HTML><BODY BGCOLOR="white">\n')
psfiles = [] # plot files in PostScript format
pngfiles = [] # plot files in PNG format
value = min
while value <= max:
case = 'tmp_%s_%g' % (option_name, value)
cmd = 'python simviz2.py %s -%s %g -case %s' % \
(simviz2_options, option_name, value, case)
print 'running', cmd
failure, output = commands.getstatusoutput(cmd)
psfile = os.path.join(case,case+'.ps')
pngfile = os.path.join(case,case+'.png')
html.write('<H1>%s=%g</H1> <IMG SRC="%s">\n' \
% (option_name, value, pngfile))
psfiles.append(psfile)
pngfiles.append(pngfile)
value += incr
cmd = 'convert -delay 50 -loop 1000 %s tmp_%s.gif' \
% (' '.join(pngfiles), option_name)
print 'converting PNG files to animated GIF:\n', cmd
failure, output = commands.getstatusoutput(cmd)
html.write('<H1>Movie</H1> <IMG SRC="tmp_%s.gif">\n' % \
option_name)
cmd = 'ps2mpeg.py %s' % ' '.join(psfiles)
print 'converting PostScript files to an MPEG movie:\n', cmd
failure, output = commands.getstatusoutput(cmd)
os.rename('movie.mpeg', 'tmp_%s.mpeg' % option_name)
html.write('<H1><A HREF="tmp_%s.mpeg">MPEG Movie</A></H1>\n' \
% option_name)
html.write('</BODY></HTML>\n')
html.close()
cmd = 'epsmerge -o tmp_%s_runs.ps -x 2 -y 3 -par %s' \
% (option_name, ' '.join(psfiles))
print cmd
failure, output = commands.getstatusoutput(cmd)
failure, output = commands.getstatusoutput(\
'ps2pdf tmp_%s_runs.ps' % option_name)
| 35.310345 | 62 | 0.643066 |
4a1a8531b4f4c89cf1764ca0c5bd8ed8443a1df5
| 4,171 |
py
|
Python
|
modules/kmeans_vector_quantizer.py
|
lahiruts/Online-Speech-Recognition
|
6f1b231d6cdd164505a612b008d60120547f0f87
|
[
"Apache-2.0"
] | 201 |
2020-06-15T15:48:12.000Z
|
2021-02-02T04:25:31.000Z
|
modules/kmeans_vector_quantizer.py
|
lahiruts/Online-Speech-Recognition
|
6f1b231d6cdd164505a612b008d60120547f0f87
|
[
"Apache-2.0"
] | 14 |
2021-02-03T00:33:08.000Z
|
2021-11-14T13:19:25.000Z
|
modules/kmeans_vector_quantizer.py
|
lahiruts/Online-Speech-Recognition
|
6f1b231d6cdd164505a612b008d60120547f0f87
|
[
"Apache-2.0"
] | 25 |
2020-06-22T15:46:25.000Z
|
2021-01-21T15:31:07.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from modules.group_norm import Fp32GroupNorm
class KmeansVectorQuantizer(nn.Module):
def __init__(
self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25
):
"""Vector quantization using straight pass-through estimator (i.e. kmeans)
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
gamma: commitment loss coefficient
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.vq_dim = vq_dim
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
self.var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.embedding = nn.Parameter(
0.01 * torch.randn(num_vars, num_groups, self.var_dim)
)
self.projection = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False),
Fp32GroupNorm(groups, dim),
)
self.gamma = gamma
self.mse_mean = nn.MSELoss(reduction="mean")
def _pass_grad(self, x, y):
"""Manually set gradient for backward pass.
for y = f(x), ensure that during the backward pass,
dL/dy = dL/dx regardless of f(x).
Returns:
y, with the gradient forced to be dL/dy = dL/dx.
"""
return y.detach() + (x - x.detach())
@property
def expand_embedding(self):
if self.combine_groups:
return self.embedding.expand(self.num_vars, self.groups, self.var_dim)
return self.embedding
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars}
if self.time_first:
x = x.transpose(1, 2)
bsz, fsz, tsz = x.shape
ze = self.projection(x)
ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2)
d = (
(ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1))
.view(self.num_vars, bsz, tsz, self.groups, -1)
.norm(dim=-1, p=2)
)
idx = d.argmin(dim=0)
zq = (
torch.stack(
[
self.expand_embedding[idx[..., group], group]
for group in range(self.groups)
],
dim=-2,
)
.view(bsz, tsz, self.groups * self.var_dim)
.permute(0, 2, 1)
)
assert ze.shape == zq.shape, (ze.shape, zq.shape)
x = self._pass_grad(ze, zq)
hard_x = (
idx.new_zeros(bsz * tsz * self.groups, self.num_vars)
.scatter_(-1, idx.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
if produce_targets:
result["targets"] = idx
if self.time_first:
x = x.transpose(1, 2) # BCT -> BTC
result["x"] = x
ze = ze.float()
zq = zq.float()
latent_loss = self.mse_mean(zq, ze.detach())
commitment_loss = self.mse_mean(ze, zq.detach())
result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss
return result
| 33.103175 | 83 | 0.572525 |
4a1a858bad6a74755ef95024f907938d5bb202b2
| 1,122 |
py
|
Python
|
testTordu.py
|
icarito/guy
|
9477b548b91ae81bfc327dac7ba1ec80804f4f8d
|
[
"Apache-2.0"
] | null | null | null |
testTordu.py
|
icarito/guy
|
9477b548b91ae81bfc327dac7ba1ec80804f4f8d
|
[
"Apache-2.0"
] | null | null | null |
testTordu.py
|
icarito/guy
|
9477b548b91ae81bfc327dac7ba1ec80804f4f8d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from guy import Guy
from datetime import datetime
class Simplest(Guy):
size=(200,200)
__doc__="""
<h1>Hello</h1>
<button style="float:right;font-size:2em" onclick="self.exit()">X</button>
"""
class Tordu(Guy):
size=(200,400)
__doc__="""
<style>body {margin:0px; padding:5px; border: 1px solid black}</style>
<script>
async function testInstance() {
var x=await self.testInstance()
}
async function testJsReturn() {
var x=await self.testJsReturn()
}
</script>
<button onclick="testInstance()">Run another instance</button>
<button onclick="testJsReturn()">testJsReturn</button>
<button onclick="self.testOpen()">testOpen</button>
<button style="float:right;font-size:2em" onclick="guy.exit()">X</button>
<hr/>
"""
def testInstance(self):
t=Simplest()
t.run()
async def testJsReturn(self):
return dict( script="guy.exit()" ) #it's evil!
def testOpen(self):
return Simplest()
if __name__ == "__main__":
x=Tordu()
x.run()
| 23.375 | 78 | 0.612299 |
4a1a859b30ba8ea78451d9ec57adb117cf92c1e4
| 7,352 |
py
|
Python
|
flink-python/pyflink/ml/tests/test_pipeline_it_case.py
|
mnmhouse/flink
|
8b05cbee4425c5ee33d73bed1473e075d7e17387
|
[
"Apache-2.0"
] | 41 |
2018-11-14T04:05:42.000Z
|
2022-02-09T10:39:23.000Z
|
flink-python/pyflink/ml/tests/test_pipeline_it_case.py
|
mnmhouse/flink
|
8b05cbee4425c5ee33d73bed1473e075d7e17387
|
[
"Apache-2.0"
] | 15 |
2021-06-13T18:06:12.000Z
|
2022-02-09T22:40:04.000Z
|
flink-python/pyflink/ml/tests/test_pipeline_it_case.py
|
fantasticKe/flink
|
c42ad0fcbcd5f2666952ee3fc4763490915091f6
|
[
"Apache-2.0"
] | 16 |
2019-01-04T09:19:03.000Z
|
2022-01-10T14:34:31.000Z
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink import keyword
from pyflink.java_gateway import get_gateway
from pyflink.ml.api import JavaTransformer, Transformer, Estimator, Model, \
MLEnvironmentFactory, Pipeline
from pyflink.ml.api.param import WithParams, ParamInfo, TypeConverters
from pyflink.ml.lib.param.colname import HasSelectedCols, \
HasPredictionCol, HasOutputCol
from pyflink.table.types import DataTypes
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import MLTestCase
class HasVectorCol(WithParams):
"""
Trait for parameter vectorColName.
"""
vector_col = ParamInfo(
"vectorCol",
"Name of a vector column",
is_optional=False,
type_converter=TypeConverters.to_string)
def set_vector_col(self, v: str) -> 'HasVectorCol':
return super().set(self.vector_col, v)
def get_vector_col(self) -> str:
return super().get(self.vector_col)
class WrapperTransformer(JavaTransformer, HasSelectedCols):
"""
A Transformer wrappers Java Transformer.
"""
@keyword
def __init__(self, *, selected_cols=None):
_j_obj = get_gateway().jvm.org.apache.flink.ml.pipeline.\
UserDefinedPipelineStages.SelectColumnTransformer()
super().__init__(_j_obj)
kwargs = self._input_kwargs
self._set(**kwargs)
class PythonAddTransformer(Transformer, HasSelectedCols, HasOutputCol):
"""
A Transformer which is implemented with Python. Output a column
contains the sum of all columns.
"""
@keyword
def __init__(self, *, selected_cols=None, output_col=None):
super().__init__()
kwargs = self._input_kwargs
self._set(**kwargs)
def transform(self, table_env, table):
input_columns = self.get_selected_cols()
expr = "+".join(input_columns)
expr = expr + " as " + self.get_output_col()
return table.add_columns(expr)
class PythonEstimator(Estimator, HasVectorCol, HasPredictionCol):
def __init__(self):
super().__init__()
def fit(self, table_env, table):
return PythonModel(
table_env,
table.select("max(features) as max_sum"),
self.get_prediction_col())
class PythonModel(Model):
def __init__(self, table_env, model_data_table, output_col_name):
self._model_data_table = model_data_table
self._output_col_name = output_col_name
self.max_sum = 0
self.load_model(table_env)
def load_model(self, table_env):
"""
Train the model to get the max_sum value which is used to predict data.
"""
table_sink = source_sink_utils.TestRetractSink(["max_sum"], [DataTypes.BIGINT()])
table_env.register_table_sink("Model_Results", table_sink)
self._model_data_table.execute_insert("Model_Results").wait()
actual = source_sink_utils.results()
self.max_sum = actual.apply(0)
def transform(self, table_env, table):
"""
Use max_sum to predict input. Return turn if input value is bigger than max_sum
"""
return table\
.add_columns("features > {} as {}".format(self.max_sum, self._output_col_name))\
.select("{}".format(self._output_col_name))
class PythonPipelineTest(MLTestCase):
def test_java_transformer(self):
t_env = MLEnvironmentFactory().get_default().get_stream_table_environment()
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
t_env.register_table_sink("TransformerResults", table_sink)
source_table = t_env.from_elements([(1, 2, 3, 4), (4, 3, 2, 1)], ['a', 'b', 'c', 'd'])
transformer = WrapperTransformer(selected_cols=["a", "b"])
transformer.transform(t_env, source_table).execute_insert("TransformerResults").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2", "4,3"])
def test_pipeline(self):
t_env = MLEnvironmentFactory().get_default().get_stream_table_environment()
train_table = t_env.from_elements(
[(1, 2), (1, 4), (1, 0), (10, 2), (10, 4), (10, 0)], ['a', 'b'])
serving_table = t_env.from_elements([(0, 0), (12, 3)], ['a', 'b'])
table_sink = source_sink_utils.TestAppendSink(
['predict_result'],
[DataTypes.BOOLEAN()])
t_env.register_table_sink("PredictResults", table_sink)
# transformer, output features column which is the sum of a and b.
transformer = PythonAddTransformer(selected_cols=["a", "b"], output_col="features")
# estimator
estimator = PythonEstimator()\
.set_vector_col("features")\
.set_prediction_col("predict_result")
# pipeline
pipeline = Pipeline().append_stage(transformer).append_stage(estimator)
pipeline.fit(t_env, train_table).transform(t_env, serving_table) \
.execute_insert('PredictResults').wait()
actual = source_sink_utils.results()
# the first input is false since 0 + 0 is smaller than the max_sum 14.
# the second input is true since 12 + 3 is bigger than the max_sum 14.
self.assert_equals(actual, ["false", "true"])
def test_pipeline_from_and_to_java_json(self):
# json generated from Java api
java_json = '[{"stageClassName":"org.apache.flink.ml.pipeline.' \
'UserDefinedPipelineStages$SelectColumnTransformer",' \
'"stageJson":"{\\"selectedCols\\":\\"[\\\\\\"a\\\\\\",' \
'\\\\\\"b\\\\\\"]\\"}"}]'
# load json
p = Pipeline()
p.load_json(java_json)
python_json = p.to_json()
t_env = MLEnvironmentFactory().get_default().get_stream_table_environment()
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
t_env.register_table_sink("TestJsonResults", table_sink)
source_table = t_env.from_elements([(1, 2, 3, 4), (4, 3, 2, 1)], ['a', 'b', 'c', 'd'])
transformer = p.get_stages()[0]
transformer.transform(t_env, source_table).execute_insert("TestJsonResults").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2", "4,3"])
self.assertEqual(python_json, java_json)
| 39.106383 | 94 | 0.646763 |
4a1a86089297721a5a0bdd05581ba2df4e0ba508
| 7,223 |
py
|
Python
|
mir_driver/nodes/laithlin_move.py
|
K-F-P/mir_robot
|
1c2a4f2efbe20f2bc6eabf8ea7d0528ac50363c6
|
[
"BSD-3-Clause"
] | null | null | null |
mir_driver/nodes/laithlin_move.py
|
K-F-P/mir_robot
|
1c2a4f2efbe20f2bc6eabf8ea7d0528ac50363c6
|
[
"BSD-3-Clause"
] | null | null | null |
mir_driver/nodes/laithlin_move.py
|
K-F-P/mir_robot
|
1c2a4f2efbe20f2bc6eabf8ea7d0528ac50363c6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import rospy
import sys
import argparse
import numpy
import time
import geometry_msgs.msg as gm
from move_base_msgs.msg import MoveBaseActionFeedback, MoveBaseActionGoal, MoveBaseActionResult, MoveBaseFeedback, MoveBaseResult
from std_msgs.msg import String
from geometry_msgs.msg import Pose, PoseStamped, PoseArray, Quaternion
from tf.transformations import quaternion_from_euler
from actionlib_msgs.msg import GoalStatusArray
from scipy.spatial import distance
#wspolrzedne punkyow (x, y, obrot z)
mir1_position = [[17.5, 17.6, 6.6, 6.9],
[8.0, 5.0, 5.1, 10.7],
[-0.71, -1.0, 0.71, -0.15]]
# [0.71, 0.00, 0.70, 0.99]]
mir2_position = [[5.0, 11.0, 5.0, 11.0],
[1.2, 9.0, 5.0, 11.0],
[0.0, 1.0, 5.0, 11.0]]
# [1.06, 0.0, 5.0, 11.0]]
#obecne wspolrzedne brane z feedback
pos_m1 = [0.0, 0.0]
pos_m2 = [0.0, 0.0]
#moj kod
m1_m = PoseStamped()
m2_m = PoseStamped()
stat_go1 = 0
stat_go2 = 0
#flagi do grafow
mir_status = [-1, -1]
#flaga startowa jak rowna zero to startuje
# f1 = 0
#moj kod
f_mir1 = 0
f_mir2 = 0
done1 = False
done2 = False
started = False
send1 = False
send2 = False
#to jest wlasny pub
pub = rospy.Publisher('/mir_state', String, queue_size=10)
#moj kod
mir1_pub = rospy.Publisher("mir/move_base_simple/goal", PoseStamped, queue_size=5)
mir2_pub = rospy.Publisher("mir2/move_base_simple/goal", PoseStamped, queue_size=5)
def mir1_feed(data):
# global pos_m1_x, pos_m1_y
global pos_m1, mir_status
location = data.feedback.base_position.pose
status = data.status.status
print(1, status, location.position.x, location.position.y)
# pos_m1_x = float(location.position.x)
# pos_m1_y = float(location.position.y)
pos_m1 = [float(location.position.x), float(location.position.y)]
mir_status[0] = status
# rospy.sleep(0.5)
#moj kod
global done1
if(not done1):
done1 = True
def mir2_feed(data):
# global pos_m2_x, pos_m2_y
global pos_m2, mir_status
location = data.feedback.base_position.pose
status = data.status.status
print(2, status, location.position.x, location.position.y)
# pos_m2_x = float(location.position.x)
# pos_m2_y = float(location.position.y)
pos_m2 = [float(location.position.x), float(location.position.y)]
mir_status[1] = status
# rospy.sleep(0.5)
# moj kod
global done2
if (not done2):
done2 = True
# def mir1_status(data):
# print(data.status_list.goal_id.status)
#
#
# def mir2_status(data):
# print(data.status_list.goal_id.status)
#
#
def mir1_move(p_x, p_y, o_z):
# to ponizej bylo tylko odkomentowane
#mir1_pub = rospy.Publisher("mir/move_base_simple/goal", PoseStamped, queue_size=5)
p = PoseStamped()
p.header.seq = 1
p.header.stamp = rospy.Time.now()
p.header.frame_id = "map"
p.pose.position.x = p_x
p.pose.position.y = p_y
p.pose.position.z = 0.0
p.pose.orientation.x = 0.0
p.pose.orientation.y = 0.0
p.pose.orientation.z = o_z
p.pose.orientation.w = 1.0
# rospy.sleep(1)
# to ponizej bylo tylko odkomentowane
# mir1_pub.publish(p)
#
# print("done mir1")
#moj kod
return p
def mir2_move(p_x, p_y, o_z):
# to ponizej bylo tylko odkomentowane
#mir2_pub = rospy.Publisher("mir2/move_base_simple/goal", PoseStamped, queue_size=5)
p = PoseStamped()
p.header.seq = 1
p.header.stamp = rospy.Time.now()
p.header.frame_id = "map"
p.pose.position.x = p_x
p.pose.position.y = p_y
p.pose.position.z = 0.0
p.pose.orientation.x = 0.0
p.pose.orientation.y = 0.0
p.pose.orientation.z = o_z
p.pose.orientation.w = 1.0
# rospy.sleep(1)
#to ponizej bylo tylko odkomentowane
#mir2_pub.publish(p)
#print("done mir 2")
#moj kod
return p
def timer_callback(event):
global mir_status, mir2_pub, m1_m, m2_m, send1, send2, stat_go2, stat_go1
#to bylo odkomentowane
# while not rospy.is_shutdown():
# pub.publish(mir_status)
#moj kod
if (done1 is True) and (done2 is True):
pub.publish(mir_status)
if (started):
mir2_pub.publish(m2_m)
print("m2 done")
mir1_pub.publish(m1_m)
print("m1 done")
if stat_go1 == 2:
send1 = True
if stat_go2 == 2:
send2 = True
def start():
global f_mir1, f_mir2, middle1, middle2
# mir1_move(mir1_position[0][0], mir1_position[1][0], mir1_position[2][0])
# mir2_move(mir2_position[0][0], mir2_position[1][0], mir2_position[2][0])
#moj kod
global m1_m, m2_m, started
m1_m = mir1_move(mir1_position[0][0], mir1_position[1][0], mir1_position[2][0])
m2_m = mir2_move(mir2_position[0][0], mir2_position[1][0], mir2_position[2][0])
if(not started):
started = True
#moj kod
f_mir1 = 1
f_mir2 = 1
#moj kod
def mir1_reach(m_r):
global m1_m, f_mir1, send1, stat_go1
stat = m_r.status_list[0]
#print(stat.status)
stat_go1 = stat.status
if stat_go1 == 3:
if (f_mir1 == 1) and (send1 is True):
m1_m = mir1_move(mir1_position[0][1], mir1_position[1][1], mir1_position[2][1])
print("mir 1 krok 2")
f_mir1 = 2
send1 = False
elif (f_mir1 == 2) and (send1 is True):
m1_m = mir1_move(mir1_position[0][2], mir1_position[1][2], mir1_position[2][2])
print("mir 1 krok 3")
f_mir1 = 0
send1 = False
def mir2_reach(m_r):
global m2_m, f_mir2, send2, stat_go2
stat = m_r.status_list[0]
#print(stat.status)
stat_go2 = stat.status
if stat_go2 == 3:
if (f_mir2 == 1) and (send2 is True):
m2_m = mir2_move(mir2_position[0][1], mir2_position[1][1], mir2_position[2][1])
print("mir 2 krok 2")
f_mir2 = 2
send2 = False
elif (f_mir2 == 2) and (send2 is True):
m2_m = mir2_move(mir2_position[0][2], mir2_position[1][2], mir2_position[2][2])
print("mir 2 krok 3")
f_mir2 = 0
send2 = False
#moj kod
#TODO funkcja obliczajaca odleglosc razem z tym zeby sie zatrzymywaly jak ona jest za mala
def mir_distance():
global pos_m1, pos_m2
dist = distance.euclidean(pos_m1, pos_m2)
return dist
def make_it_happen():
global f_mir1, f_mir2
rospy.init_node('kfp_mir_move')
if (f_mir1 == 0) and (f_mir2 == 0):
start()
#TODO z /move_base/status wyciac numr statusu jak zrobie echo na tym topicu to jest cyfra ktora trzeba wyciagnac
#moj kod
rospy.Subscriber("mir/move_base/status", GoalStatusArray, mir1_reach)
rospy.Subscriber("mir2/move_base/status", GoalStatusArray, mir2_reach)
rospy.Subscriber("mir/move_base/feedback", MoveBaseActionFeedback, mir1_feed)
rospy.Subscriber("mir2/move_base/feedback", MoveBaseActionFeedback, mir2_feed)
timer = rospy.Timer(rospy.Duration(2.0), timer_callback)
rospy.spin()
timer.shutdown()
if __name__ == '__main__':
try:
make_it_happen()
except rospy.ROSInterruptException:
pass
| 23.759868 | 129 | 0.637685 |
4a1a862653498fe786d568cbe7d3bd9bdeb70bf0
| 3,536 |
py
|
Python
|
pydeconz/api.py
|
klada/deconz
|
485e915822404d292156ff2a83488954e1ed8286
|
[
"MIT"
] | null | null | null |
pydeconz/api.py
|
klada/deconz
|
485e915822404d292156ff2a83488954e1ed8286
|
[
"MIT"
] | null | null | null |
pydeconz/api.py
|
klada/deconz
|
485e915822404d292156ff2a83488954e1ed8286
|
[
"MIT"
] | null | null | null |
"""API base classes."""
import logging
from asyncio import get_running_loop
from .errors import BridgeBusy
LOGGER = logging.getLogger(__name__)
class APIItems:
"""Base class for a map of API Items."""
def __init__(self, raw, request, path, item_cls) -> None:
self._request = request
self._path = path
self._item_cls = item_cls
self._items = {}
self.process_raw(raw)
def update(self) -> None:
raw = self._request("get", self._path)
self.process_raw(raw)
def process_raw(self, raw: dict, **kwargs) -> None:
for id, raw_item in raw.items():
obj = self._items.get(id)
if obj is not None:
obj.update(raw_item, **kwargs)
else:
self._items[id] = self._item_cls(id, raw_item, self._request)
def items(self):
return self._items.items()
def keys(self):
return self._items.keys()
def values(self):
return self._items.values()
def __getitem__(self, obj_id: str):
return self._items[obj_id]
def __iter__(self):
return iter(self._items)
class APIItem:
def __init__(self, raw, request):
self._raw = raw
self._request = request
self._loop = get_running_loop()
self._callbacks = []
self._cancel_retry = None
self._changed_keys = set()
@property
def raw(self):
"""Read only raw data."""
return self._raw
@property
def changed_keys(self):
"""Read only changed keys data."""
return self._changed_keys
def register_callback(self, callback):
"""Register callback for signalling.
Callback will be called at the end of updating device information in self.async_update.
"""
self._callbacks.append(callback)
def remove_callback(self, callback):
"""Remove callback previously registered."""
if callback in self._callbacks:
self._callbacks.remove(callback)
def update(self, raw, **kwargs):
"""Update input attr in self.
Store a set of keys with changed values.
Kwargs will be passed on to callbacks.
"""
changed_keys = set()
for k, v in raw.items():
changed_keys.add(k)
if isinstance(self.raw.get(k), dict) and isinstance(v, dict):
changed_keys.update(set(v.keys()))
self._raw[k].update(v)
else:
self._raw[k] = v
self._changed_keys = changed_keys
for async_signal_update in self._callbacks:
async_signal_update(**kwargs)
async def async_set(self, field, data, tries=0):
"""Set state of device."""
self.cancel_retry()
try:
await self._request("put", field, json=data)
except BridgeBusy:
LOGGER.debug("BridgeBusy, schedule retry %s %s", field, str(data))
def retry_set():
"""Retry set state."""
self._cancel_retry = None
self._loop.create_task(self.async_set(field, data, tries + 1))
if tries < 3:
retry_delay = 2 ** (tries + 1)
self._cancel_retry = self._loop.call_later(retry_delay, retry_set)
def cancel_retry(self):
"""Cancel retry.
Called at the start of async_set.
"""
if self._cancel_retry is not None:
self._cancel_retry.cancel()
self._cancel_retry = None
| 26.38806 | 95 | 0.580034 |
4a1a86ff341aa444b877c8289a2416a952f0e606
| 12,278 |
py
|
Python
|
hw/dendogram/cluster.py
|
colonel8377/hkust_machine_learning
|
80d880a8bd6a0139d5d5409000f836900855b0ba
|
[
"MIT"
] | 3 |
2021-09-14T11:45:08.000Z
|
2022-03-24T14:15:45.000Z
|
hw/dendogram/cluster.py
|
colonel8377/hkust_machine_learning
|
80d880a8bd6a0139d5d5409000f836900855b0ba
|
[
"MIT"
] | 1 |
2021-11-02T09:05:03.000Z
|
2021-11-02T09:05:03.000Z
|
hw/dendogram/cluster.py
|
colonel8377/hkust_machine_learning
|
80d880a8bd6a0139d5d5409000f836900855b0ba
|
[
"MIT"
] | 2 |
2021-09-04T12:04:47.000Z
|
2021-09-29T02:22:27.000Z
|
from __future__ import division
from __future__ import absolute_import
import numpy
import copy
import argparse
from operator import itemgetter
from collections import defaultdict
from itertools import combinations, product
import numpy as np
from api import AbstractClusterer
from dendrogram import Dendrogram
from linkage import linkage_fn
from distance import *
from sklearn.metrics.pairwise import pairwise_distances
class CooccurrenceMatrix(numpy.ndarray):
""" Represents a co-occurrence matrix. """
def __new__(cls, data, dtype=None):
if not isinstance(data, CooccurrenceMatrix):
data, rownames, colnames = CooccurrenceMatrix.convert(data)
else:
rownames, colnames = data.rownames, data.colnames
obj = numpy.asarray(data).view(cls)
obj.rownames = rownames
obj.colnames = colnames
return obj
def __array_finialize__(self, obj):
if obj is None: return
self.rownames = getattr(obj, 'rownames', None)
self.colnames = getattr(obj, 'colnames', None)
def row(self, row):
return self[self.rownames.get(row)]
def col(self, col):
return self[:, self.colnames.get(col)]
def cell(self, row, col):
return self[self.rownames.get(row), self.colnames.get(col)]
@classmethod
def convert(cls, data):
matrix = numpy.zeros(
(len(set(k for k, v in data)), len(set(v for k, v in data))))
colnames, rownames = {}, {}
for k, v in sorted(data):
if k not in rownames:
rownames[k] = len(rownames)
if v not in colnames:
colnames[v] = len(colnames)
matrix[rownames[k], colnames[v]] += 1
# rownames = [k for k,v in sorted(rownames.items(), key=itemgetter(1))]
# colnames = [k for k,v in sorted(colnames.items(), key=itemgetter(1))]
return matrix, rownames, colnames
def tfidf(self):
"""
Returns a matrix in which for all entries in the co-occurence matrix
the 'term frequency-inverse document frequency' is calculated.
"""
matrix = numpy.zeros(self.shape)
# the number of words in a document
words_per_doc = numpy.asarray(self.sum(axis=1), dtype=float)
# the number of documents in which a word is attested.
word_frequencies = numpy.asarray(numpy.sum(self > 0, axis=0),
dtype=float)
# calculate the term frequencies
for i in range(self.shape[0]):
tf = self[i] / words_per_doc[i] # array of tf's
matrix[i] = tf * (numpy.log(self.shape[0] / word_frequencies))
return matrix
class DistanceMatrix(numpy.ndarray):
"""
Simple wrapper around numpy.ndarray, to provide some custom
Distance Matrix functionality like plotting the distance matrix
with matplotlib.
"""
def __new__(cls, data, dist_metric=euclidean_distance, lower=True):
if (not isinstance(data, (numpy.ndarray, DistanceMatrix))
or len(data) != len(data[0])
or not max(numpy.diag(data)) == 0):
data = DistanceMatrix.convert_to_distmatrix(data,
dist_metric,
lower=lower)
obj = numpy.asarray(data).view(cls)
obj.distance_metric = dist_metric
return obj
def __array_finialize__(self, obj):
if obj is None: return
self.distance_metric = getattr(obj, 'distance_metric', None)
def row(self, row):
return self[self.rownames.get(row)]
def col(self, col):
return self[:, self.colnames.get(col)]
def cell(self, row, col):
return self[self.rownames.get(row), self.colnames.get(col)]
def rows(self):
return [k for k, v in sorted(self.rownames.items(), key=itemgetter(1))]
@classmethod
def convert_to_distmatrix(cls, data, distance, lower=True):
matrix = numpy.zeros((len(data), len(data)))
for i, j in combinations(range(len(data)), 2):
matrix[i][j] = distance(data[i], data[j])
if lower == True:
matrix[j][i] = matrix[i][j]
# add a nan-diagonal, useful for further computations.
numpy.fill_diagonal(matrix, numpy.nan)
return matrix
def diag_is_zero(self):
"""Check if the diagonal contains only distances of 0."""
return max(numpy.diag(self)) == 0
def remove(self, idx):
"""
Delete a row and column with index IDX.
WARNING this function is NOT destructive!
"""
indices = range(len(self))
indices.remove(idx)
return self.take(indices, axis=0).take(indices, axis=1)
def draw(self, save=False, format="pdf"):
"""Make a nice colorful plot of the distance matrix."""
try:
import pylab
except ImportError:
raise ImportError("Install pylab.")
fig = pylab.figure()
axmatrix = fig.add_axes([0.1, 0.1, 0.8, 0.8])
im = axmatrix.matshow(self,
aspect='auto',
origin='upper',
cmap=pylab.cm.YlGnBu)
axcolor = fig.add_axes([0.91, 0.1, 0.02, 0.8])
pylab.colorbar(im, cax=axcolor)
fig.show()
if save:
fig.savefig('distance-matrix.%s' % (format, ))
def summary(self):
"""Return a small summary of the matrix."""
print('DistanceMatrix (n=%s)' % len(self))
print('Distance metric = %s' % self.distance_metric.__name__)
print(self)
class Clusterer(AbstractClusterer):
"""
The Hierarchical Agglomerative Clusterer starts with each of the N vectors
as singleton clusters. It then iteratively merges pairs of clusters which
have the smallest distance according to function LINKAGE. This continues
until there is only one cluster.
"""
def __init__(self, data, linkage='ward', num_clusters=1):
self._num_clusters = num_clusters
vector_ids = [[i] for i in range(len(data))]
self._dendrogram = Dendrogram(vector_ids)
numpy.fill_diagonal(data, numpy.inf)
self._dist_matrix = data
self.linkage = linkage_fn(linkage)
def smallest_distance(self, clusters):
"""
Return the smallest distance in the distance matrix.
The smallest distance depends on the possible connections in
the distance matrix.
@param clusters: an object of the class L{DistanceMatrix} holding the
clusters at a specific state in the clustering procedure.
@type clusters: L{DistanceMatrix}
@return: a tuple containing the smallest distance and the indexes of
the clusters yielding the smallest distance.
"""
i, j = numpy.unravel_index(numpy.argmin(clusters), clusters.shape)
return clusters[i, j], i, j
def cluster(self, verbose=0, sum_ess=False):
clusters = copy.copy(self._dist_matrix)
# clusters = self._dist_matrix
summed_ess = 0.0
while len(clusters) > max(self._num_clusters, 1):
if verbose >= 1:
print('k=%s' % len(clusters))
if verbose == 2:
print(clusters)
best, i, j = self.smallest_distance(clusters)
print(str(best))
# In Ward (1963) ess is summed at each iteration
# in R's hclust and Python's hcluster and some text books it is not.
# Here it is optional...
if sum_ess:
summed_ess += best
else:
summed_ess = best
clusters = self.update_distmatrix(i, j, clusters)
self._dendrogram.merge(i, j)
self._dendrogram[i].distance = summed_ess
indices = numpy.arange(clusters.shape[0])
indices = indices[indices != j]
clusters = clusters.take(indices, axis=0).take(indices, axis=1)
print(clusters)
def update_distmatrix(self, i, j, clusters):
"""
Update the distance matrix using the specified linkage method so that
it represents the correct distances to the newly formed cluster.
"""
return self.linkage(clusters, i, j, self._dendrogram)
@property
def dendrogram(self):
"""Return the dendrogram object."""
return self._dendrogram
def num_clusters(self):
return self._num_clusters
def __repr__(self):
return """<Hierarchical Agglomerative Clusterer(linkage method: %r,
n=%d clusters>""" % (self.linkage.__name__,
self._num_clusters)
class VNClusterer(Clusterer):
"""
Variability Neighbor Clustering Class. A subclass of the regular Clusterer
where the order of clustering can be predetermined. In the normal clustering
procedure, all clusters can be clustered with all other clusters. In this
class, the clusters that are allowed to be clustered follow a specific order.
"""
def __init__(self, data, linkage='ward', num_clusters=1):
Clusterer.__init__(self, data, linkage, num_clusters=num_clusters)
def iterate_clusters(self, clusters):
for i in range(1, len(clusters)):
yield i - 1, i
def smallest_distance(self, clusters):
best = None
for i, j in self.iterate_clusters(clusters):
if best is None or clusters[i][j] <= best[0]:
best = (clusters[i][j], i, j)
print(best)
return best
def cluster(self, verbose=False):
# we must sum the error sum of squares in order not to obtain
# singleton clustering.
Clusterer.cluster(self, verbose=verbose, sum_ess=True)
class EuclideanNeighborClusterer(VNClusterer):
def iterate_clusters(self, x, y):
n_features, n_samples = x, y
offset = (0, -1, 1)
indices = ((i, j) for i in range(n_features) for j in range(n_samples))
for i, j in indices:
all_neigh = ((i + x, j + y) for x in offset for y in offset)
valid = ((i * n_features + j) for i, j in all_neigh
if (0 <= i < n_features) and (0 <= j < n_samples))
target = valid.next()
for neighbor in list(valid):
yield target, neighbor
def demo():
"""
Demo to show some basic functionality.
"""
# declare dummy input vector with two dimensions:
# vectors = numpy.array([[0, 0], [5, 6], [1, 1], [3, 2], [4, 0], [2, 2], [8, 9], [8, 11]])
# compute the distance matrix on the basis of the vectors via sklearn:
# dist_matrix = pairwise_distances(vectors, metric='cityblock')
# dist_matrix = np.array([[0.0, 11.0, 5.0, 12.0, 7.0, 13.0, 9.0, 11.0],
# [11.0, 0.0, 13.0, 2.0, 17.0, 4.0, 15.0, 20.0],
# [5.0, 13.0, 0.0, 14.0, 1.0, 15.0, 12.0, 12.0],
# [12.0, 2.0, 14.0, 0.0, 18.0, 5.0, 16.0, 21.0],
# [7.0, 17.0, 1.0, 18.0, 0.0, 20.0, 15.0, 17.0],
# [13.0, 4.0, 15.0, 5.0, 20.0, 0.0, 19.0, 22.0],
# [9.0, 15.0, 12.0, 16.0, 15.0, 19.0, 0.0, 30.0],
# [11.0, 20.0, 12.0, 21.0, 17.0, 22.0, 30.0, 0.0]])
dist_matrix = np.array([[0.0, 1.0, 4.0, 5.10], [1.0, 0.0, 3.0, 4.12],
[4.0, 3.0, 0.0, 1.41], [5.10, 4.12, 1.41, 0.0]])
print(dist_matrix)
# plot the distance matrix:
# dist_matrix.draw() this doesn't work anymore
# initialize a temporal VNC clusterer, here with the Ward linkage method:
clusterer = Clusterer(dist_matrix, linkage='median')
# could also be a plain Clusterer()
# start the clustering procedure:
clusterer.cluster(verbose=0)
labels = ['n' + str(i + 1) for i in range(dist_matrix.shape[0])]
# plot the result as a dendrogram
clusterer.dendrogram.draw(save=True,
labels=labels,
title="VNC Analysis (Group Average's Linkage)")
if __name__ == '__main__':
demo()
| 37.895062 | 94 | 0.586252 |
4a1a878c105a5271a80e688070f9672278848e49
| 3,816 |
py
|
Python
|
simulation/src/simulation_evaluation/src/state_machine/states/overtaking.py
|
LeonardII/KitCarFork
|
b2802c5b08cc8250446ce3731cb622af064db4ca
|
[
"MIT"
] | 13 |
2020-06-30T17:18:28.000Z
|
2021-07-20T16:55:35.000Z
|
simulation/src/simulation_evaluation/src/state_machine/states/overtaking.py
|
LeonardII/KitCarFork
|
b2802c5b08cc8250446ce3731cb622af064db4ca
|
[
"MIT"
] | 1 |
2020-11-10T20:15:42.000Z
|
2020-12-25T18:27:56.000Z
|
simulation/src/simulation_evaluation/src/state_machine/states/overtaking.py
|
LeonardII/KitCarFork
|
b2802c5b08cc8250446ce3731cb622af064db4ca
|
[
"MIT"
] | 3 |
2020-07-20T09:09:08.000Z
|
2021-07-20T17:00:37.000Z
|
"""States used in the OvertakingStateMachine."""
from simulation_evaluation.msg import Speaker as SpeakerMsg
from simulation_evaluation.msg import State as StateMsg
from ..state_machines.state_machine import StateMachine
from .state import State
class OvertakingState(State):
def next(self, state_machine, input_msg: int):
"""Return updated state."""
if input_msg == SpeakerMsg.NO_OVERTAKING_ZONE:
return state_machine.off
return super().next(state_machine, input_msg)
class Off(OvertakingState):
"""This state is the default state.
Once the state machine receives this state, the next state will we chage accordingly to
its next method.
"""
def __init__(self):
"""Init state.
Initializing does not need any arguments however description and value have to
initialized to super.
"""
super().__init__(
description="Car is not inside an overtaking zone.",
value=StateMsg.OVERTAKING_BEFORE_START,
)
def next(self, state_machine: StateMachine, input_msg: int):
"""Next state.
Arguments:
state_machine: On which state machine the states gets executed
input_msg: Integer of message
Returns:
Class object of next state. If no state change was detected here,
check for failure state before returning this state.
"""
if input_msg == SpeakerMsg.OVERTAKING_ZONE:
return state_machine.right
return super().next(state_machine, input_msg)
class Right(OvertakingState):
"""This state occurs when the car drives into the overtaking zone and is on the right
line.
Once the state machine receives this state, the next state will we chage accordingly to
its next method.
"""
def __init__(self):
"""Init state.
Initializing does not need any arguments however description and value have to
initialized to super.
"""
super().__init__(
description="Car is inside an overtaking zone, on the right line.",
value=StateMsg.OVERTAKING_RIGHT,
)
def next(self, state_machine, input_msg: int):
"""Next state.
Arguments:
state_machine: On which state machine the states gets executed
input_msg: Integer of message
Returns:
Class object of next state. If no state change was detected here,
check for failure state before returning this state.
"""
if input_msg == SpeakerMsg.LEFT_LANE:
return state_machine.left
return super().next(state_machine, input_msg)
class Left(OvertakingState):
"""This state occurs when the car is in the overtaking zone and in the left line.
Once the state machine receives this state, the next state will we chage accordingly to
its next method.
"""
def __init__(self):
"""Init state.
Initializing does not need any arguments however description and value have to
initialized to super.
"""
super().__init__(
description="Car is inside an overtaking zone, on the left line.",
value=StateMsg.OVERTAKING_LEFT,
)
def next(self, state_machine, input_msg: int):
"""Next state.
Arguments:
state_machine (StateMachine): On which state machine the states gets executed
input_msg: Integer of message
Returns:
Class object of next state. If no state change was detected here,
check for failure state before returning this state.
"""
if input_msg == SpeakerMsg.RIGHT_LANE:
return state_machine.right
return super().next(state_machine, input_msg)
| 31.02439 | 91 | 0.650419 |
4a1a888d255abe1228513fb6d10c8eee86b438f5
| 28,225 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_route_filter_rules_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2 |
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_route_filter_rules_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4 |
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_route_filter_rules_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2 |
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_04_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_04_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_04_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_04_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| 50.22242 | 221 | 0.673056 |
4a1a8897b1cd7eebc62ab1f33dd53b52c384ae1b
| 6,591 |
py
|
Python
|
link-prediction/GIC/GIC/execute_link.py
|
Super-Dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites
|
1b5077342756ba6dc587a2af49abd2451319e5df
|
[
"MIT"
] | 3 |
2021-07-04T04:32:33.000Z
|
2022-01-14T08:36:02.000Z
|
link-prediction/GIC/GIC/execute_link.py
|
super-dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites
|
1b5077342756ba6dc587a2af49abd2451319e5df
|
[
"MIT"
] | null | null | null |
link-prediction/GIC/GIC/execute_link.py
|
super-dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites
|
1b5077342756ba6dc587a2af49abd2451319e5df
|
[
"MIT"
] | 1 |
2022-01-16T11:35:45.000Z
|
2022-01-16T11:35:45.000Z
|
"Implementation based on https://github.com/PetarV-/DGI"
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
from models import GIC, LogReg
from utils import process
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import statistics
import argparse
def get_roc_score(edges_pos, edges_neg, embeddings, adj_sparse):
"from https://github.com/tkipf/gae"
score_matrix = np.dot(embeddings, embeddings.T)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Store positive edge predictions, actual values
preds_pos = []
pos = []
for edge in edges_pos:
preds_pos.append(sigmoid(score_matrix[edge[0], edge[1]])) # predicted score
pos.append(adj_sparse[edge[0], edge[1]]) # actual value (1 for positive)
# Store negative edge predictions, actual values
preds_neg = []
neg = []
for edge in edges_neg:
preds_neg.append(sigmoid(score_matrix[edge[0], edge[1]])) # predicted score
neg.append(adj_sparse[edge[0], edge[1]]) # actual value (0 for negative)
# Calculate scores
preds_all = np.hstack([preds_pos, preds_neg])
labels_all = np.hstack([np.ones(len(preds_pos)), np.zeros(len(preds_neg))])
#print(preds_all, labels_all )
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
torch.manual_seed(1234)
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('--d', dest='dataset', type=str, default='cora',help='')
parser.add_argument('--b', dest='beta', type=int, default=100,help='')
parser.add_argument('--c', dest='num_clusters', type=float, default=128,help='')
parser.add_argument('--a', dest='alpha', type=float, default=0.5,help='')
parser.add_argument('--test_rate', dest='test_rate', type=float, default=0.1,help='')
args = parser.parse_args()
#print(args.accumulate(args.integers))
cuda0 = torch.cuda.is_available()#False
beta = args.beta
alpha = args.alpha
num_clusters = int(args.num_clusters)
dataset = args.dataset
# training params
batch_size = 1
nb_epochs = 2000
patience = 50
lr = 0.001
l2_coef = 0.0
drop_prob = 0.0
hid_units = 16
sparse = True
nonlinearity = 'prelu' # special name to separate parameters
torch.cuda.empty_cache()
roc0=[]
ap0=[]
roc1=[]
ap1=[]
roc100 = []
ap100 = []
for m in range(1):
adj, features, labels, idx_train, idx_val, idx_test = process.load_data(dataset)
adj_sparse = adj
#print('Edges init',adj.getnnz())
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = process.mask_test_edges(adj, test_frac=args.test_rate, val_frac=0.05)
adj = adj_train
#print('Edges new',adj.getnnz())
ylabels = labels
features, _ = process.preprocess_features(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
nb_classes = labels.shape[1]
adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
if sparse:
sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = (adj + sp.eye(adj.shape[0])).todense()
features = torch.FloatTensor(features[np.newaxis])
if not sparse:
adj = torch.FloatTensor(adj[np.newaxis])
labels = torch.FloatTensor(labels[np.newaxis])
#idx_train = torch.LongTensor(idx_train)
#idx_val = torch.LongTensor(idx_val)
#idx_test = torch.LongTensor(idx_test)
if cuda0:
#print('Using CUDA')
features = features.cuda()
if sparse:
sp_adj = sp_adj.cuda()
else:
adj = adj.cuda()
labels = labels.cuda()
#idx_train = idx_train.cuda()
#idx_val = idx_val.cuda()
#idx_test = idx_test.cuda()
b_xent = nn.BCEWithLogitsLoss()
b_bce = nn.BCELoss()
#xent = nn.CrossEntropyLoss()
all_accs = []
for beta in [args.beta]:
print()
for K in [int(args.num_clusters)]:
#K = int(Kr * nb_nodes)
for alpha in [args.alpha]:
#print(m, alpha)
model = GIC(nb_nodes,ft_size, hid_units, nonlinearity, num_clusters, beta)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)
cnt_wait = 0
best = 1e9
best_t = 0
val_best = 0
if cuda0:
#print('Using CUDA')
model.cuda()
for epoch in range(nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(nb_nodes)
shuf_fts = features[:, idx, :]
lbl_1 = torch.ones(batch_size, nb_nodes)
lbl_2 = torch.zeros(batch_size, nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
if cuda0:
shuf_fts = shuf_fts.cuda()
lbl = lbl.cuda()
logits, logits2 = model(features, shuf_fts, sp_adj if sparse else adj, sparse, None, None, None, beta)
loss = alpha* b_xent(logits, lbl) + (1-alpha)*b_xent(logits2, lbl)
if loss < best:
best = loss
best_t = epoch
cnt_wait = 0
torch.save(model.state_dict(), dataset+'-link.pkl')
else:
cnt_wait += 1
if cnt_wait == patience:
#print('Early stopping!')
break
loss.backward()
optimiser.step()
model.load_state_dict(torch.load(dataset+'-link.pkl'))
embeds, _,_, S= model.embed(features, sp_adj if sparse else adj, sparse, None, beta)
embs = embeds[0, :]
embs = embs / embs.norm(dim=1)[:, None]
sc_roc, sc_ap = get_roc_score(test_edges, test_edges_false, embs.cpu().detach().numpy(), adj_sparse)
#print(beta, K, alpha, sc_roc, sc_ap,flush=True)
print('Dataset',args.dataset)
print('alpha, beta, K:',alpha,beta,K)
print('AUC', sc_roc, 'AP', sc_ap)
| 29.959091 | 124 | 0.572144 |
4a1a8d0153e92b0a5c2ef7b848d1c01db85feb86
| 74 |
py
|
Python
|
learn2learn/optim/update_rules/__init__.py
|
Brikwerk/learn2learn
|
7997c13c26ec627d13ce77ba98427260df78ada8
|
[
"MIT"
] | 1,774 |
2019-09-05T20:41:16.000Z
|
2022-03-30T09:49:02.000Z
|
learn2learn/optim/update_rules/__init__.py
|
Kostis-S-Z/learn2learn
|
c0b7c088f15986880b136ec27059644ac513db60
|
[
"MIT"
] | 196 |
2019-09-05T08:11:31.000Z
|
2022-03-31T12:08:25.000Z
|
learn2learn/optim/update_rules/__init__.py
|
Kostis-S-Z/learn2learn
|
c0b7c088f15986880b136ec27059644ac513db60
|
[
"MIT"
] | 266 |
2019-09-13T10:17:54.000Z
|
2022-03-28T07:17:21.000Z
|
#!/usr/bin/env python3
from .differentiable_sgd import DifferentiableSGD
| 18.5 | 49 | 0.824324 |
4a1a8e797922624e7b5fb7b664574c388ac12d78
| 1,172 |
py
|
Python
|
core/voice.py
|
xe1gyq/NuupXe
|
94608ac72bb1cf3e648c27d8402831dfb165b8af
|
[
"Apache-2.0"
] | null | null | null |
core/voice.py
|
xe1gyq/NuupXe
|
94608ac72bb1cf3e648c27d8402831dfb165b8af
|
[
"Apache-2.0"
] | null | null | null |
core/voice.py
|
xe1gyq/NuupXe
|
94608ac72bb1cf3e648c27d8402831dfb165b8af
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import commands
import subprocess
import time
from core.irlp import Irlp
class Voice(object):
def __init__(self):
self.filename = "voice.wav"
self.proc = None
self.irlp = Irlp()
def filenameset(self, name):
self.filename = name
def recordstart(self):
args = ['arecord','-t', 'wav', '-f', 'S16_LE', '-r', '48000', self.filename]
proc = subprocess.Popen(args)
print "PID:", proc.pid
return proc
def recordstop(self, proc):
proc.kill()
def record(self):
time.sleep(1)
if self.irlp.exists():
while self.irlp.cosenabled() is 256:
pass
while self.irlp.cosenabled() is 0:
pass
proc = self.recordstart()
if self.irlp.exists():
while self.irlp.cosenabled() is 256:
pass
else:
time.sleep(5)
self.recordstop(proc)
def play(self):
status, output = commands.getstatusoutput("aplay " + self.filename)
def erase(self):
status, output = commands.getstatusoutput("rm " + self.filename)
# End of File
| 23.44 | 84 | 0.558874 |
4a1a8ed453fb5f8b2da73333682c640adac71a20
| 3,189 |
py
|
Python
|
kuryr_kubernetes/handlers/k8s_base.py
|
al1216/kuryr-kubernetes
|
e21d2f3d8bc12384fb2e352e024e2637c523b1e3
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/handlers/k8s_base.py
|
al1216/kuryr-kubernetes
|
e21d2f3d8bc12384fb2e352e024e2637c523b1e3
|
[
"Apache-2.0"
] | 1 |
2021-04-16T11:12:00.000Z
|
2021-04-16T11:12:00.000Z
|
kuryr_kubernetes/handlers/k8s_base.py
|
al1216/kuryr-kubernetes
|
e21d2f3d8bc12384fb2e352e024e2637c523b1e3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_kubernetes.handlers import dispatch
from kuryr_kubernetes.handlers import health
def object_kind(event):
try:
return event['object']['kind']
except KeyError:
return None
def object_uid(event):
try:
return event['object']['metadata']['uid']
except KeyError:
return None
class ResourceEventHandler(dispatch.EventConsumer, health.HealthHandler):
"""Base class for K8s event handlers.
Implementing classes should override both `OBJECT_KIND` and
'OBJECT_WATCH_PATH' attributes.
The `OBJECT_KIND` should be set to a valid Kubernetes object type
name (e.g. 'Pod' or 'Namespace'; see [1] for more details).
The `OBJECT_WATCH_PATH` should point to object's watched path,
(e.g. for the 'Pod' case the OBJECT_WATCH_PATH should be '/api/v1/pods').
Implementing classes are expected to override any or all of the
`on_added`, `on_present`, `on_modified`, `on_deleted` methods that would
be called depending on the type of the event (with K8s object as a single
argument).
[1] https://github.com/kubernetes/kubernetes/blob/release-1.4/docs/devel\
/api-conventions.md#types-kinds
"""
OBJECT_KIND = None
OBJECT_WATCH_PATH = None
def __init__(self):
super(ResourceEventHandler, self).__init__()
def get_watch_path(self):
return self.OBJECT_WATCH_PATH
@property
def consumes(self):
return {object_kind: self.OBJECT_KIND}
def _check_finalize(self, obj):
deletion_timestamp = None
try:
deletion_timestamp = obj['metadata']['deletionTimestamp']
except (KeyError, TypeError):
pass
return deletion_timestamp
def __call__(self, event, *args, **kwargs):
event_type = event.get('type')
obj = event.get('object')
if 'MODIFIED' == event_type:
if self._check_finalize(obj):
self.on_finalize(obj)
return
self.on_modified(obj)
self.on_present(obj)
elif 'ADDED' == event_type:
if self._check_finalize(obj):
self.on_finalize(obj)
return
self.on_added(obj)
self.on_present(obj)
elif 'DELETED' == event_type:
self.on_deleted(obj)
def on_added(self, obj):
pass
def on_present(self, obj):
pass
def on_modified(self, obj):
pass
def on_deleted(self, obj):
pass
def on_finalize(self, obj):
pass
| 29.527778 | 78 | 0.648793 |
4a1a8f0f2090f2cc961ba72a704d399369517e0d
| 878 |
py
|
Python
|
Deep-Learning/Licence_Plate_Recognition/character_segmentation.py
|
ghassenetanabene6/Vehicle-Recognition-System-in-Tunisia
|
3d34d8ca535f73a0be4107483c0cc7fcfa1806b3
|
[
"CNRI-Python"
] | 3 |
2020-07-08T10:25:00.000Z
|
2021-06-19T16:24:48.000Z
|
Deep-Learning/Licence_Plate_Recognition/character_segmentation.py
|
ghassenetanabene6/Vehicle-Recognition-System-in-Tunisia
|
3d34d8ca535f73a0be4107483c0cc7fcfa1806b3
|
[
"CNRI-Python"
] | null | null | null |
Deep-Learning/Licence_Plate_Recognition/character_segmentation.py
|
ghassenetanabene6/Vehicle-Recognition-System-in-Tunisia
|
3d34d8ca535f73a0be4107483c0cc7fcfa1806b3
|
[
"CNRI-Python"
] | 2 |
2020-07-20T14:14:27.000Z
|
2021-08-28T06:24:01.000Z
|
# Find characters in the resulting images
def segment_characters(image) :
# Preprocess cropped license plate image
img = cv2.resize(image, (333, 75))
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, img_binary = cv2.threshold(img_gray, 200, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
img_erode = cv2.erode(img_binary, (3,3))
img_dilate = cv2.dilate(img_erode, (3,3))
LP_WIDTH = img_dilate.shape[0]
LP_HEIGHT = img_dilate.shape[1]
# Make borders white
img_dilate[0:3,:] = 255
img_dilate[:,0:3] = 255
img_dilate[72:75,:] = 255
img_dilate[:,330:333] = 255
# Estimations of character contours sizes of cropped license plates
dimensions = [LP_WIDTH/6, LP_WIDTH/2, LP_HEIGHT/10, 2*LP_HEIGHT/3]
# Get contours within cropped license plate
char_list = find_contours(dimensions, img_dilate)
return char_list
| 31.357143 | 88 | 0.697039 |
4a1a902f5e2c8fa225633af27feb573ba35259d6
| 2,498 |
py
|
Python
|
nanoblocks/wallet/wallet.py
|
ipazc/nanoblocks
|
d7433b60029e4bcda4c2c802c3ff05c53d7b220a
|
[
"MIT"
] | 3 |
2021-03-16T23:59:37.000Z
|
2021-12-11T13:52:46.000Z
|
nanoblocks/wallet/wallet.py
|
ipazc/nanoblocks
|
d7433b60029e4bcda4c2c802c3ff05c53d7b220a
|
[
"MIT"
] | 1 |
2021-04-02T14:11:02.000Z
|
2021-06-16T00:03:33.000Z
|
nanoblocks/wallet/wallet.py
|
ipazc/nanoblocks
|
d7433b60029e4bcda4c2c802c3ff05c53d7b220a
|
[
"MIT"
] | null | null | null |
from nanoblocks.base import NanoblocksClass
from nanoblocks.protocol.crypto.crypto_functions import make_seed, derive_seed, derive_bip39, fill_bip39_words
from nanoblocks.wallet.wallet_accounts import WalletAccounts
class Wallet(NanoblocksClass):
"""
Represents a Wallet in the Nano ecosystem.
This is class does not use any backend for creating or managing account keys.
Keep in mind that this class holds private keys, thus should be secured.
"""
def __init__(self, nano_network, seed=None):
"""
Creates a new Wallet.
:param nano_network:
A network object giving access to node and work backends.
:param seed:
Seed to use for the accounts management within the wallet.
If None, a random seed will be sampled from a valid cryptographic randomizer.
"""
super().__init__(nano_network)
if seed is None:
seed = make_seed()
self._seed = seed
@classmethod
def from_mnemonic(cls, words_list, nano_network):
"""
Instantiates this class based on a bip39 mnemonic list of keywords.
This method tolerates missing words in the list (set to None). In case it detects missing words, the method
will attempt to refill them with a random word.
:param words_list:
List of 24 words to use for importing the seed.
:param nano_network:
A network object giving access to node and work backends.
"""
if len(words_list) != 24:
raise KeyError("The length of the list should be 24, no more, no less words")
if any([x is None for x in words_list]):
words_list = fill_bip39_words(words_list)
seed = derive_seed(words_list)
return cls(nano_network=nano_network, seed=seed)
@property
def seed(self):
"""
Retrieves the seed of this wallet
"""
return self._seed
@property
def mnemonic(self):
"""
Derives the bip39 mnemonic for the seed of this wallet.
"""
return derive_bip39(self._seed)
@property
def accounts(self):
"""
Retrieves access to the accounts from this wallet.
"""
return WalletAccounts(self._seed, nano_network=self.network)
def __repr__(self):
return "Nano Wallet (Type wallet.accounts[integer_index] to access an account)."
def __str__(self):
return self.__repr__()
| 29.388235 | 115 | 0.644516 |
4a1a90866677bb7a30d8106651c53c91f9ab4dca
| 3,026 |
py
|
Python
|
meiduo3/apps/users/migrations/0001_initial.py
|
caoyongpeng/CYP_meiduo
|
378cc05a8621b36dc15714a10258606860bb5ad2
|
[
"MIT"
] | null | null | null |
meiduo3/apps/users/migrations/0001_initial.py
|
caoyongpeng/CYP_meiduo
|
378cc05a8621b36dc15714a10258606860bb5ad2
|
[
"MIT"
] | null | null | null |
meiduo3/apps/users/migrations/0001_initial.py
|
caoyongpeng/CYP_meiduo
|
378cc05a8621b36dc15714a10258606860bb5ad2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-10-20 08:21
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('mobile', models.CharField(max_length=11, unique=True, verbose_name='手机号')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'db_table': 'tb_users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 63.041667 | 329 | 0.66226 |
4a1a91d353a7ac7abd5afa1066c881ea1e33c751
| 1,810 |
py
|
Python
|
customers/migrations/0001_initial.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
customers/migrations/0001_initial.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
customers/migrations/0001_initial.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-11 04:16
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('drivers', '0002_cab_vehicle'),
]
operations = [
migrations.CreateModel(
name='CustomerCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('document_number', models.CharField(max_length=15, unique=True)),
('gener', models.CharField(blank=True, choices=[('F', 'Female'), ('M', 'Male')], max_length=1, null=True)),
('active', models.BooleanField(default=True)),
('picture', models.ImageField(blank=True, null=True, upload_to='')),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('category_id', models.ForeignKey(blank=True, db_column='category_id', null=True, on_delete=django.db.models.deletion.SET_NULL, to='customers.customercategory')),
('document_type_id', models.ForeignKey(blank=True, db_column='document_type_id', null=True, on_delete=django.db.models.deletion.SET_NULL, to='drivers.documenttype')),
],
options={
'abstract': False,
},
),
]
| 42.093023 | 182 | 0.596133 |
4a1a92a68aa930168528884f5808a4870e3e74c8
| 2,271 |
py
|
Python
|
bak/nasa_ngrams.py
|
NorthDecoder/nasaMining
|
81706cb9e48d9469b27314123a4f7b6e063f033e
|
[
"MIT"
] | 18 |
2015-04-16T03:12:57.000Z
|
2021-08-20T08:07:23.000Z
|
bak/nasa_ngrams.py
|
jonroberts/nasaMining
|
32680b58de9111dfa714e355dbc79de3faba59c3
|
[
"MIT"
] | 17 |
2021-05-25T23:45:19.000Z
|
2022-03-31T22:55:06.000Z
|
bak/nasa_ngrams.py
|
NorthDecoder/nasaMining
|
81706cb9e48d9469b27314123a4f7b6e063f033e
|
[
"MIT"
] | 12 |
2015-04-14T20:21:57.000Z
|
2021-05-12T22:01:53.000Z
|
from __future__ import unicode_literals
import json
from gensim.models.phrases import Phrases
from textblob import TextBlob
# from gensim: threshold represents a threshold for forming the phrases (higher means fewer phrases).
# A phrase of words a and b is accepted if (cnt(a, b) - min_count) * N / (cnt(a) * cnt(b)) > threshold, where N is the total vocabulary size.
thresh = 10
# n = 5
if __name__ == '__main__':
data = json.load(open('data/nasa.json'))
dataset = data['dataset']
print len(dataset), 'datasets'
# tokenize description fields
print 'Tokenizing descriptions'
desc = []
doc_id = []
for i, ds in enumerate(dataset):
text = TextBlob(ds['description'])
for sentence in text.sentences:
desc.append(sentence.tokens)
doc_id.append(i)
# text = TextBlob(ds['title'])
# for sentence in text.sentences:
# desc.append(sentence.tokens)
# doc_id.append(i)
print 'Constructing ngrams'
print 'Bigrams'
desc_bigrams = Phrases(desc, threshold=thresh)
bigrams = desc_bigrams[desc]
print 'Trigrams'
desc_trigrams = Phrases(bigrams, threshold=thresh)
trigrams = desc_trigrams[bigrams]
print 'Fourgrams'
desc_fourgrams = Phrases(trigrams, threshold=thresh)
fourgrams = desc_fourgrams[trigrams]
print 'Fivegrams'
desc_fivegrams = Phrases(fourgrams, threshold=thresh)
fivegrams = desc_fivegrams[fourgrams]
# pull out keywords
field = 'gensim_ngram_kw_%s' % thresh
for i, ngram in enumerate(fivegrams):
doc = doc_id[i]
if field not in dataset[doc]:
dataset[doc][field] = set()
for kw in filter(lambda k: '_' in k, ngram):
keyword = kw.replace('_', ' ').lower()
# filter out punctuation, etc (make sure that there are two non-punc words)
if len(TextBlob(keyword).words) < 2:
continue
dataset[doc][field].add(keyword)
# convert set into list for json serialization
for d in dataset:
d[field] = list(d[field])
# update the original data json and save
data['dataset'] = dataset
with open('data/nasa_ngram_%s.json' % thresh, 'w') as f:
json.dump(data, f)
| 29.881579 | 141 | 0.639806 |
4a1a9305f053139b4890f03eaf9f50e093973bce
| 1,165 |
py
|
Python
|
src/airing.py
|
punkhere/HerokuAnimeDLBot
|
90c0e34577d50981f4180f218b91f9bb7ad78a72
|
[
"MIT"
] | null | null | null |
src/airing.py
|
punkhere/HerokuAnimeDLBot
|
90c0e34577d50981f4180f218b91f9bb7ad78a72
|
[
"MIT"
] | null | null | null |
src/airing.py
|
punkhere/HerokuAnimeDLBot
|
90c0e34577d50981f4180f218b91f9bb7ad78a72
|
[
"MIT"
] | null | null | null |
# Copyright © 2021 BaraniARR
# Encoding = 'utf-8'
# Licensed under MIT License
# Special Thanks for gogoanime
from pyrogram import *
from pyrogram.types import *
import requests
from requests_html import HTMLSession
from bs4 import BeautifulSoup
import sys
# Getting currently airing Anime from the API
# Returns an "Inline Keyboard List" of Currently airing Anime
def airing_eps(client, message):
url = f"https://gogoanime.pe/"
session = HTMLSession()
response = session.get(url)
response_html = response.text
soup = BeautifulSoup(response_html, 'html.parser')
anime = soup.find("nav", {"class": "menu_series cron"}).find("ul")
air = []
for link in anime.find_all('a'):
airing_link = link.get('href')
name = link.get('title')
link = airing_link.split('/')
lnk_final = link[2]
res = sys.getsizeof(lnk_final)
if int(res) > 64:
pass
else:
air.append([InlineKeyboardButton(f"{name}", callback_data=f"dt_{lnk_final}")])
repl = InlineKeyboardMarkup(air)
message.reply_text("**Currently Airing Anime: **", reply_markup=repl, parse_mode="markdown")
| 32.361111 | 96 | 0.671245 |
4a1a940984c95031184ed3256dbf5cad58a1f7fe
| 1,150 |
py
|
Python
|
6/6.py
|
dvento/projectEuler
|
3debbc9453ae50166a91b990145418f3c26fced8
|
[
"MIT"
] | null | null | null |
6/6.py
|
dvento/projectEuler
|
3debbc9453ae50166a91b990145418f3c26fced8
|
[
"MIT"
] | null | null | null |
6/6.py
|
dvento/projectEuler
|
3debbc9453ae50166a91b990145418f3c26fced8
|
[
"MIT"
] | null | null | null |
# coding=utf-8
'''
Daniel Vento, 2020
PROBLEM #6:
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is:
3025 - 385 = 2640
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum of the first one hundredd natural numbers
'''
# target number
n = 100
def squaredSum():
# the mathematical formula to calculate the sum of n numbers is (n*(n + 1)) / 2
res = ((n*(n + 1)) / 2)**2
return res
def sumOfSquares():
# The mathematical formula to calculate the sum of n squared is (n*(n + 1)*(2*n + 1)) / 6
res = (n*(n + 1)*(2*n + 1)) / 6
return res
def diff():
res = squaredSum() - sumOfSquares()
print("Result of the difference between the sum of the squares of the first "
"one hundred natural numbers and the square of the sum of the first one hundredd natural numbers is: ", int(res))
diff()
| 28.75 | 159 | 0.66 |
4a1a940ad1729b93c4de0d6cd1355d5abcc91c68
| 724 |
py
|
Python
|
examples/CNCEncoderPad/layers/layer0.py
|
lesley-byte/pykey
|
ce21b5b6c0da938bf24891e5acb196d6779c433a
|
[
"MIT"
] | null | null | null |
examples/CNCEncoderPad/layers/layer0.py
|
lesley-byte/pykey
|
ce21b5b6c0da938bf24891e5acb196d6779c433a
|
[
"MIT"
] | null | null | null |
examples/CNCEncoderPad/layers/layer0.py
|
lesley-byte/pykey
|
ce21b5b6c0da938bf24891e5acb196d6779c433a
|
[
"MIT"
] | null | null | null |
from pykey.keycode import PK_Keycode as KC # REQUIRED if using KC.* values
layer = { # REQUIRED dict, must be named 'layer'
'name' : 'Layer 0', # Application name
'encoder' : [ (0x202000, 'LEFT', [ KC.LEFT ]),
(0x202000, 'RIGHT',[ KC.RIGHT ])
],
'macros' : [ # keys ...
# COLOR LABEL KEY SEQUENCE
(0x202000, '1', [ KC.ONE ]),
(0x202000, '2', [ KC.TWO ]),
(0x202000, '3', [ KC.THREE ]),
(0x202000, '4', [ KC.FOUR ]),
(0x101010, '5', [ KC.FIVE ]),
(0x202000, '6', [ KC.SIX ]),
(0x202000, '7', [ KC.SEVEN ]),
(0x202000, '8', [ KC.EIGHT ]),
(0x101010, '9', [ KC.NINE ])
]
}
| 34.47619 | 74 | 0.453039 |
4a1a964338ba2191de4113e3263f76536df807c8
| 2,445 |
py
|
Python
|
python/tvm/exec/rpc_tracker.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 40 |
2021-06-14T23:14:46.000Z
|
2022-03-21T14:32:23.000Z
|
python/tvm/exec/rpc_tracker.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 14 |
2021-06-08T03:15:54.000Z
|
2022-02-01T23:50:24.000Z
|
python/tvm/exec/rpc_tracker.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 11 |
2021-06-14T05:56:18.000Z
|
2022-02-27T06:52:07.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Tool to start RPC tracker"""
from __future__ import absolute_import
import logging
import argparse
import multiprocessing
import sys
from ..rpc.tracker import Tracker
def main(args):
"""Main funciton"""
tracker = Tracker(args.host, port=args.port, port_end=args.port_end, silent=args.silent)
tracker.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0", help="the hostname of the tracker")
parser.add_argument("--port", type=int, default=9190, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--no-fork",
dest="fork",
action="store_false",
help="Use spawn mode to avoid fork. This option \
is able to avoid potential fork problems with Metal, OpenCL \
and ROCM compilers.",
)
parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
parser.set_defaults(fork=True)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.fork is False:
if sys.version_info[0] < 3:
raise RuntimeError("Python3 is required for spawn mode.")
multiprocessing.set_start_method("spawn")
else:
if not args.silent:
logging.info(
"If you are running ROCM/Metal, fork will cause "
"compiler internal error. Try to launch with arg ```--no-fork```"
)
main(args)
| 38.809524 | 100 | 0.685072 |
4a1a96d339e935eb446e26650691412d21a90fd0
| 447 |
py
|
Python
|
code_week10_629_75/pascals_triangle_ii.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week10_629_75/pascals_triangle_ii.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week10_629_75/pascals_triangle_ii.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
'''
给定一个非负索引 k,其中 k ≤ 33,返回杨辉三角的第 k 行。
在杨辉三角中,每个数是它左上方和右上方的数的和。
示例:
输入: 3
输出: [1,3,3,1]
进阶:
你可以优化你的算法到 O(k) 空间复杂度吗?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/pascals-triangle-ii
'''
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
row = [1 for _ in range(rowIndex+1)]
for i in range(rowIndex + 1):
for j in range(i-1,0,-1):
row[j] = row[j] + row[j-1]
return row
| 17.88 | 55 | 0.579418 |
4a1a9716a9976e844b69083435237d3f29d0a98d
| 3,530 |
py
|
Python
|
app/template_db/template_engine/model_handler/utils.py
|
Plawn/petit_publipost_gateway
|
e0a09207ae5bcad1623f8e7662e004ad9b59ffbe
|
[
"Apache-2.0"
] | null | null | null |
app/template_db/template_engine/model_handler/utils.py
|
Plawn/petit_publipost_gateway
|
e0a09207ae5bcad1623f8e7662e004ad9b59ffbe
|
[
"Apache-2.0"
] | 7 |
2021-06-22T09:48:59.000Z
|
2022-01-10T16:08:00.000Z
|
app/template_db/template_engine/model_handler/utils.py
|
Plawn/petit_publiposter
|
e0a09207ae5bcad1623f8e7662e004ad9b59ffbe
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from collections.abc import Mapping
from typing import Any, Dict, Iterable, List, Set, Tuple, Callable
from ..adapter_middleware import MultiAdapter
class FallbackAction(ABC):
def __init__(self, field_name: str, replacer: MultiAdapter):
self.field_name = field_name
self.replacer = replacer
@abstractmethod
def prepare_fallback(self, _dict: dict, key: str) -> None:
pass
class MissingPlaceholderFallbackAction(FallbackAction):
def prepare_fallback(self, _dict: dict, key: str) -> None:
"""
Prevents error by recreating the missing keys in the input data,
we won't have missing fields so we can avoid errors and let the placeholder in place
"""
new_key = self.replacer.to_doc(key)
_dict[new_key] = _dict[key][self.field_name]
if key != new_key:
del _dict[key]
def merge_dict(d1: dict, d2: dict):
"""
Modifies d1 in-place to contain values from d2. If any value
in d1 is a dictionary (or dict-like), *and* the corresponding
value in d2 is also a dictionary, then merge them in-place.
"""
for key, v2 in d2.items():
v1 = d1.get(key) # returns None if v1 has no value for this key
if (isinstance(v1, Mapping) and isinstance(v2, Mapping)):
merge_dict(v1, v2)
else:
d1[key] = v2
def ensure_keys(d: dict, fallback_action: FallbackAction):
for key, item in d.items():
if isinstance(item, Mapping) and fallback_action.field_name in item:
fallback_action.prepare_fallback(d, key)
else:
if isinstance(item, Mapping):
ensure_keys(item, fallback_action)
def change_keys(obj: dict, convert: Callable) -> dict:
"""
Recursively goes through the dictionary obj and replaces keys with the convert function.
"""
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, dict):
new = obj.__class__()
for k, v in obj.items():
new[convert(k)] = change_keys(v, convert)
elif isinstance(obj, (list, set, tuple)):
new = obj.__class__(change_keys(v, convert) for v in obj)
else:
return obj
return new
def prepare_name(string: str) -> Tuple[str, str]:
top_level, *other_level = string.split('.')
return top_level, '.'.join(other_level)
def prepare_names(strings: Iterable[str]) -> Dict[str, List[str]]:
d: Dict[str, Set[str]] = {}
for string in strings:
top_level, rest = prepare_name(string)
if top_level in d:
d[top_level].add(rest)
else:
d[top_level] = {rest}
return {
i: list(j) for i, j in d.items()
}
def from_strings_to_dict(data: Dict[str, Any]):
"""
Makes a model for a given list of string like :
"mission.document.name": "test" => {
mission: {
document: {
name: "test"
}
}
}
"""
res = {}
for key, value in data.items():
l = key.split('.')
previous = []
end = len(l) - 1
for i, item in enumerate(l):
d = res
for prev in previous[:-1]:
d = d[prev]
if len(previous) > 0:
d = d[previous[-1]]
if item not in d:
if i != end:
d[item] = {}
else:
d[item] = value
previous.append(item)
return res
| 29.416667 | 92 | 0.57932 |
4a1a97a393eba06d6fa6498ac5afd499e0b8a29d
| 3,778 |
py
|
Python
|
automl/cloud-client/get_model_evaluation.py
|
summersab/python-docs-samples
|
7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66
|
[
"Apache-2.0"
] | 2 |
2020-09-19T04:22:52.000Z
|
2020-09-23T14:04:17.000Z
|
automl/cloud-client/get_model_evaluation.py
|
summersab/python-docs-samples
|
7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66
|
[
"Apache-2.0"
] | 1 |
2020-07-24T19:18:29.000Z
|
2020-07-24T19:45:23.000Z
|
automl/cloud-client/get_model_evaluation.py
|
summersab/python-docs-samples
|
7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66
|
[
"Apache-2.0"
] | 2 |
2020-11-24T18:20:51.000Z
|
2020-12-12T12:21:52.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_model_evaluation(project_id, model_id, model_evaluation_id):
"""Get model evaluation."""
# [START automl_language_entity_extraction_get_model_evaluation]
# [START automl_language_sentiment_analysis_get_model_evaluation]
# [START automl_language_text_classification_get_model_evaluation]
# [START automl_translate_get_model_evaluation]
# [START automl_vision_classification_get_model_evaluation]
# [START automl_vision_object_detection_get_model_evaluation]
from google.cloud import automl
# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
# model_id = "YOUR_MODEL_ID"
# model_evaluation_id = "YOUR_MODEL_EVALUATION_ID"
client = automl.AutoMlClient()
# Get the full path of the model evaluation.
model_evaluation_full_id = client.model_evaluation_path(
project_id, "us-central1", model_id, model_evaluation_id
)
# Get complete detail of the model evaluation.
response = client.get_model_evaluation(model_evaluation_full_id)
print("Model evaluation name: {}".format(response.name))
print("Model annotation spec id: {}".format(response.annotation_spec_id))
print("Create Time:")
print("\tseconds: {}".format(response.create_time.seconds))
print("\tnanos: {}".format(response.create_time.nanos / 1e9))
print(
"Evaluation example count: {}".format(response.evaluated_example_count)
)
# [END automl_language_sentiment_analysis_get_model_evaluation]
# [END automl_language_text_classification_get_model_evaluation]
# [END automl_translate_get_model_evaluation]
# [END automl_vision_classification_get_model_evaluation]
# [END automl_vision_object_detection_get_model_evaluation]
print(
"Entity extraction model evaluation metrics: {}".format(
response.text_extraction_evaluation_metrics
)
)
# [END automl_language_entity_extraction_get_model_evaluation]
# [START automl_language_sentiment_analysis_get_model_evaluation]
print(
"Sentiment analysis model evaluation metrics: {}".format(
response.text_sentiment_evaluation_metrics
)
)
# [END automl_language_sentiment_analysis_get_model_evaluation]
# [START automl_language_text_classification_get_model_evaluation]
# [START automl_vision_classification_get_model_evaluation]
print(
"Classification model evaluation metrics: {}".format(
response.classification_evaluation_metrics
)
)
# [END automl_language_text_classification_get_model_evaluation]
# [END automl_vision_classification_get_model_evaluation]
# [START automl_translate_get_model_evaluation]
print(
"Translation model evaluation metrics: {}".format(
response.translation_evaluation_metrics
)
)
# [END automl_translate_get_model_evaluation]
# [START automl_vision_object_detection_get_model_evaluation]
print(
"Object detection model evaluation metrics: {}".format(
response.image_object_detection_evaluation_metrics
)
)
# [END automl_vision_object_detection_get_model_evaluation]
| 40.623656 | 79 | 0.750926 |
4a1a97e4d96a38157a2896fd70e283df8e7f63ff
| 16,311 |
py
|
Python
|
codebuddy.py
|
davidfurlong/CodeBuddy
|
eb76272987c187a8cd18547348b5fc1fd3009fa3
|
[
"MIT"
] | 1 |
2015-12-06T23:53:24.000Z
|
2015-12-06T23:53:24.000Z
|
codebuddy.py
|
davidfurlong/CodeBuddy
|
eb76272987c187a8cd18547348b5fc1fd3009fa3
|
[
"MIT"
] | null | null | null |
codebuddy.py
|
davidfurlong/CodeBuddy
|
eb76272987c187a8cd18547348b5fc1fd3009fa3
|
[
"MIT"
] | null | null | null |
import sublime, sublime_plugin, math, random
# GNU License Copyright David Furlong
# Used some Scrolling code from
# https://github.com/zzjin/syncViewScroll
# which is licensed under GNU
# Copyright (C) 2012 Tito Bouzout <tito.bouzout@gmail.com>
# TODOS: Auto Language detection + comment syntax {}
# Find, Search, Replace, Save, Save as, Save all
# IE Non text based shortcuts
# Double click to select a line selects the new line char too, so isnt being counted as "select line"
# Probably falsely.
languagesCommentSymbol = []
keyHistory = []
actionLog = []
actionLineLog = []
specialkey = "cmd" if sublime.platform() == "osx" else "ctrl"
sublime.log_commands(False)
sublime.log_input(False)
global hasWarned
sublime.run_command('toggle_sync_scroll')
todaysFocus = random.randrange(1,5+1)
if(sublime.active_window().active_view().size() > 50000):
sublime.run_command("sub_notify", {"title": "Welcome to CodeBuddy, Try "+specialkey+" + R", "msg": "This file is big, use "+specialkey+" + R to quickly navigate functions", "sound": False})
# sublime.message_dialog("Welcome to CodeBuddy. This file is sizeable, so remember to use "+specialkey+" + R to quickly navigate functions")
elif(todaysFocus == 1):
sublime.run_command("sub_notify", {"title": "Welcome to CodeBuddy, Try "+specialkey+" + P", "msg": specialkey+" + P to quickly navigate files", "sound": False})
# sublime.message_dialog(". Try to focus on using "+specialkey+" + P to quickly navigate files")
elif(todaysFocus == 2):
sublime.run_command("sub_notify", {"title": "Welcome to CodeBuddy, Try "+specialkey+" + D", "msg": "Did you know you can use "+specialkey+" + D or "+specialkey+" + click to create multiple cursors?", "sound": False})
# sublime.message_dialog("Welcome to CodeBuddy. Did you know you can use "+specialkey+" + D or "+specialkey+" + click to create multiple cursors?")
elif(todaysFocus == 3):
sublime.run_command("sub_notify", {"title": "Welcome to CodeBuddy, Try "+specialkey+" + P then :40", "msg": specialkey+" + P followed by :<line> to navigate by line", "sound": False})
# sublime.message_dialog("Welcome to CodeBuddy. Did you know you can use "+specialkey+" + D or "+specialkey+" + click to create multiple cursors?")
elif(todaysFocus == 4):
sublime.run_command("sub_notify", {"title": "Welcome to CodeBuddy", "msg": "Try "+specialkey+" + K B to toggle sidebar", "sound": False})
# sublime.message_dialog("Welcome to CodeBuddy. Did you know you can use "+specialkey+" + D or "+specialkey+" + click to create multiple cursors?")
class isDeletingLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
lineA = self.view.full_line(region.a)
lineB = self.view.full_line(region.b)
posA = self.view.rowcol(region.a)
posB = self.view.rowcol(region.b)
if(posA[0] - posB[0] == 1 or posA[0] - posB[0] == -1):
if(posA[0] > posB[0]):
if(len(self.view.substr(lineA)[:posA[1]].replace(' ', '').replace('\t', '')) == 0 and self.view.substr(lineB)[posB[1]:].replace(' ', '').replace('\t', '') == "\n"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip, "+specialkey+" + J", "msg": "Press Cmd J to delete the new line after the current line", "sound": False})
else:
if(len(self.view.substr(lineB)[:posB[1]].replace(' ', '').replace('\t', '')) == 0 and self.view.substr(lineA)[posA[1]:].replace(' ', '').replace('\t', '') == "\n"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip, "+specialkey+" + J", "msg": "Press Cmd J to delete the new line after the current line", "sound": False})
pos = self.view.rowcol(region.a)[1]
line_contents = self.view.substr(lineA)
if(pos > 0):
l = len(actionLog)
if(l > 1):
if(actionLog[l-1] == "drag_select" or actionLog[l-2] == "drag_select"):
if(line_contents[pos-1] == "{" or line_contents[pos] == "{" or line_contents[pos-1] == "}" or line_contents[pos] == "}"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": "Press ^ + M to find matching bracket, or ^ + shift + M to select all contents of current parentheses", "sound": False})
class isNextToBracketCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
lines = self.view.line(region)
pos = self.view.rowcol(region.a)[1]
line_contents = self.view.substr(lines) + '\n'
if(pos > 0):
l = len(actionLog)
if(l > 1):
if(actionLog[l-1] == "drag_select" or actionLog[l-2] == "drag_select"):
if(line_contents[pos-1] == "{" or line_contents[pos] == "{" or line_contents[pos-1] == "}" or line_contents[pos] == "}"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": "Press ^ + M to find matching bracket, or ^ + shift + M to select all contents of current parentheses", "sound": False})
# sublime.message_dialog("Press ^ + M to find matching bracket, or ^ + shift + M to select all contents of current parentheses")
class getSelectedRegionCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
line = self.view.line(region)
line_contents = self.view.substr(line) + '\n'
else:
print(region)
class isRegionWholeLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
return
else:
if(self.view.rowcol(region.a)[0] != self.view.rowcol(region.b)[0]):
return
else:
if (self.view.rowcol(region.a)[1] == 0 and len(self.view.substr(self.view.line(region.b))) == self.view.rowcol(region.b)[1]) or (self.view.rowcol(region.b)[1] == 0 and len(self.view.substr(self.view.line(region.a))) == self.view.rowcol(region.a)[1]) or (self.view.rowcol(region.a)[1] == 0 and len(self.view.substr(self.view.full_line(region.b))) == self.view.rowcol(region.b)[1]) or (self.view.rowcol(region.b)[1] == 0 and len(self.view.substr(self.view.full_line(region.a))) == self.view.rowcol(region.a)[1]):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": "Press "+specialkey+" + L to select line", "sound": False})
# sublime.message_dialog("Press "+specialkey+" + L to select line")
actionLog.append('select_line')
actionLineLog.append(self.view.rowcol(region.a)[0])
return
class isAtLineStartCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
x = self.view.line(region.a)
p = self.view.rowcol(region.a)[1]
if len(self.view.substr(x)[:p].replace(" ", "").replace("\t", "").replace("//", "").replace("/*", "").replace("<!--", "").replace('#', '')) != 0:
return
else:
if len(self.view.substr(region).replace(" ", "").replace("\t", "").replace("//", "").replace("/*", "").replace("<!--", "").replace('#', '')) != 0:
return
sublime.run_command("sub_notify", {"title": "Comment current Line "+specialkey+" + /", "msg": "To Comment current line press "+specialkey+" + /", "sound": False})
# sublime.message_dialog("To Comment current line press "+specialkey+" + /")
class isAtLineStartTabCommand(sublime_plugin.TextCommand):
def run(self, edit):
print ('tab')
for region in self.view.sel():
if region.empty():
x = self.view.line(region.a)
p = self.view.rowcol(region.a)[1]
if len(self.view.substr(x)[:p].replace(" ", "").replace("\t", "").replace("//", "").replace("/*", "").replace("<!--", "").replace('#', '')) != 0:
return
else:
if len(self.view.substr(region).replace(" ", "").replace("\t", "").replace("//", "").replace("/*", "").replace("<!--", "").replace('#', '')) != 0:
return
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": specialkey+" + [ to indent line, and "+specialkey+" + ] to unindent line from cursor anywhere in the line", "sound": False})
# sublime.message_dialog("Better than tab to indent is "+specialkey+" + [ to indent line, and "+specialkey+" + ] to unindent line")
class CodeBuddy(sublime_plugin.EventListener):
def on_modified(self, view):
print("on modified")
actionLog.append(view.command_history(0))
actionLineLog.append(view.rowcol(view.sel()[0].a)[0])
# actionLineLog.append(view.)
# view.command_history(0);
keyHistory.append(view.substr(sublime.Region(0, view.size())))
if (view.command_history(0)[1]):
# Comment Line for 2 languages?
if (view.command_history(0)[1]['characters'] == "//" or view.command_history(0)[1]['characters'] == "<!--" or view.command_history(0)[1]['characters'] == "/*" or view.command_history(0)[1]['characters'] == "#"):
view.run_command('is_at_line_start')
def on_text_command(command_name, view, args, ne): #args and command_name is swapped?
print('text command')
if(sublime.active_window().active_view().name() == "Find Results"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": 'Double click the left gutter in find to go to the file and line number', "sound": False})
# sublime.status_message('Double click the gutter in find to go to the file and line number')
# if args == "drag_select":
# view.run_command('is_region_whole_line')
view.run_command('is_deleting_line')
view.run_command('is_next_to_bracket')
try:
if len(actionLog) > 0 and (actionLog[len(actionLog)-1] == "drag_select" or ne['by'] == 'lines'):
if (args == "left_delete"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": specialkey+' + X to delete the current line', "sound": False})
else:
view.run_command('is_region_whole_line')
except:
pass
actionLog.append(args)
actionLineLog.append(view.rowcol(view.sel()[0].a)[0])
keyHistory.append(command_name)
try:
if( ne['default'] == "\t"):
view.run_command('is_at_line_start_tab')
except:
pass
l = len(actionLog)
# TODO
if(l > 2 and len(keyHistory) != 0):
# scenario 1 no new line
try:
if(actionLog[l-1]=="paste_and_indent" and actionLog[l-2] == "drag_select" and actionLog[l-3][0]=="cut" and actionLog[l-5]=="drag_select" and (actionLineLog[l-3] == 1+actionLineLog[l-1] or actionLineLog[l-3]+1 == actionLineLog[l-1] or actionLineLog[l-3]+2 == actionLineLog[l-1] or actionLineLog[l-3] == 2+actionLineLog[l-1])):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": "ctrl + "+specialkey+" + ↑ or ↓ to swap lines (transpose)", "sound": False})
except:
pass
# scenario 2 new line
try:
if(actionLog[l-1]=="paste_and_indent" and actionLog[l-2][1]['characters'] == "\n" and actionLog[l-3]=="insert" and actionLog[l-5][0]=="cut" and (actionLineLog[l-5] == 1+actionLineLog[l-1] or actionLineLog[l-5]+1 == actionLineLog[l-1] or actionLineLog[l-5]+2 == actionLineLog[l-1] or actionLineLog[l-5] == 2+actionLineLog[l-1])):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": "ctrl + "+specialkey+" + ↑ or ↓ to swap lines (transpose)", "sound": False})
except:
pass
try:
if(actionLog[l-1]=="paste_and_indent" and actionLog[l-2][1]['characters'] == "\n" and actionLog[l-3]=="insert" and actionLog[l-5]=="copy" and (actionLineLog[l-5] == 1+actionLineLog[l-1] or actionLineLog[l-5]+1 == actionLineLog[l-1])):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": specialkey+" + shift + D to duplicate a line. Remember, D for duplicate", "sound": False})
# sublime.message_dialog(""+specialkey+" + shift + D to duplicate a line. Remember, D for duplicate")
except:
pass
def on_window_command(self, window, command_name, args):
print('window command')
print(window)
print(command_name)
print(args)
if(sublime.active_window().active_view().name() == "Find Results"):
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": 'Double click the gutter in find to go to the file and line number', "sound": False})
# sublime.status_message('Double click the gutter in find to go to the file and line number')
if len(actionLog) > 0 and actionLog[len(actionLog)-1] == "drag_select":
window.active_view().run_command('is_region_whole_line')
actionLog.append(args)
actionLineLog.append(sublime.active_window().active_view().rowcol(sublime.active_window().active_view().sel()[0].a)[0])
# UNUSED. FOR FUTURE EXTENSION INTO SCROLLING
import _thread as thread
import time
synch_scroll_running = False
synch_scroll_current_view_object = None
def updatePos(view):
view.settings().set('origPos',view.viewport_position()[1])
def initialize(view):
#print 'initialize'
if not view.settings().has('syncScroll'):
view.settings().set('syncScroll',False)
#the add on change should be here, it's elsewhere for debug reasons
updatePos(view)
view.settings().clear_on_change('syncScroll') #for debug reasons
view.settings().add_on_change('syncScroll', updateStatus) #when syncScroll is toggled, update status bar
def plugin_loaded():
if not 'running_synch_scroll_loop' in globals():
global running_synch_scroll_loop
running_synch_scroll_loop = True
thread.start_new_thread(synch_scroll_loop, ())
#on startup initialize every view
print ("syncScroll starting")
for window in sublime.windows():
for view in window.views():
initialize(view)
def synch_scroll_loop():
while True:
global synch_scroll_running
if not synch_scroll_running:
synch_scroll_running = True
sublime.set_timeout(lambda: synch_scroll(), 0)
time.sleep(0.08)
def synch_scroll():
global synch_scroll_running
global synch_scroll_current_view_object
# print ("one timeout")
current_view = synch_scroll_current_view_object
try:
if(100 < current_view.viewport_position()[1] and not hasWarned):
hasWarned = True
sublime.run_command("sub_notify", {"title": "Shortcut Tip", "msg": "Stop scrolling! Use "+specialkey+" + P then : for line number, enter or @ for function definitions. You can also try bookmarking by installing the SublimeBookmarks package", "sound": False})
# sublime.message_dialog("Stop scrolling! Use "+specialkey+" + P then : for line number, enter or @ for function definitions. You can also try bookmarking by installing the SublimeBookmarks package")
except:
hasWarned = False
pass
# x = 1
# previousPosition = current_view.viewport_position()[1]
if current_view is None or current_view.is_loading() or not current_view.settings().get('syncScroll'):
synch_scroll_running = False
return
callingViewPos = current_view.viewport_position()[1]
origCallingViewPos = current_view.settings().get('origPos')
# print ('modified. origCallingViewPos=', origCallingViewPos, 'callingViewPos= ', callingViewPos)
if callingViewPos != origCallingViewPos: #and it moved vertically
# print ("it moved")
for view in current_view.window().views():
if view.settings().get('syncScroll') and view.id() != current_view.id(): #if view has syncScroll enabled AND we're not talking about the same view as view
#we move view
viewPos = view.viewport_position()[1]
newViewPos = viewPos+callingViewPos-origCallingViewPos
# print ("moving. viewPos= ",viewPos," newViewPos= ",newViewPos)
view.set_viewport_position((view.viewport_position()[0],newViewPos), True) #move the other view
updatePos(view)
updatePos(current_view) #update original positions
synch_scroll_running = False
def updateStatus():
# print "updateStatus"
for window in sublime.windows():
for view in window.views():
if view.settings().get('syncScroll'):
view.set_status('syncScroll','[Sync ON]')
else:
view.erase_status('syncScroll')
class syncScrollListener(sublime_plugin.EventListener):
def on_activated(self, view):
global synch_scroll_current_view_object
synch_scroll_current_view_object = view
def on_load(self,view):
#on load add settings to a view
# print ("on_load")
initialize(view)
class ToggleSyncScrollCommand(sublime_plugin.TextCommand):
def run(self, edit, setting):
current_state = self.view.settings().get('syncScroll')
self.view.settings().set('syncScroll',not current_state)
def is_checked(self, setting):
if not self.view.settings().has('syncScroll'):
initialize(self.view)
# print ("current setting",self.view.settings().get('syncScroll'))
return self.view.settings().get('syncScroll')
| 50.187692 | 515 | 0.68414 |
4a1a97f286f12baacd82d676d7147ad0f559e5bf
| 2,585 |
py
|
Python
|
bench/benchmark/bm.py
|
gaaalmeida/trab_benchmark
|
4e42c6c34b6859050b792cefcad9627ecc5906bd
|
[
"MIT"
] | null | null | null |
bench/benchmark/bm.py
|
gaaalmeida/trab_benchmark
|
4e42c6c34b6859050b792cefcad9627ecc5906bd
|
[
"MIT"
] | null | null | null |
bench/benchmark/bm.py
|
gaaalmeida/trab_benchmark
|
4e42c6c34b6859050b792cefcad9627ecc5906bd
|
[
"MIT"
] | null | null | null |
from queue import Queue
from threading import Thread
import pandas as pd
import time
import codecs
import os
import numpy as np
_time = [0,0,0]
_words = []
_tf = False
_size = 0
_finished = False
def setup_words(cv):
global _words
for _ in range(cv):
_words.append([0])
def clearResults(names):
for name in names:
try:
os.remove(f'results/{name}.txt')
except FileNotFoundError:
pass
def write(q):
global _words, _time, _size, _tf, _finished
while True:
data = None
x = False
if not q.empty():
data = q.get()
x = True
if x:
_tf = False
ptime_s = time.time()
names = data.columns.values
ptime_e = time.time() - ptime_s
_time[1] += ptime_e
for i in range(_size):
iotime_s = time.time()
file = f"results/{names[i]}.txt"
f = codecs.open(file, 'a', 'utf-8')
f.write(u'\ufeff')
iotime_e = time.time() - iotime_s
_time[2] += iotime_e
# Verifca se a coluna é numerica
if pd.api.types.is_string_dtype(data[names[i]]):
ptime_s = time.time()
_words[i][0] += data[names[i]].str.count(' ') + 1
ptime_e = time.time() - ptime_s
_time[1] += ptime_e
# Escreve no arquivo o valor da linha
iotime_s = time.time()
np.savetxt(f, data[names[i]].to_string(index=False, header=False).strip().split('\n'), fmt='%s')
iotime_e = time.time() - iotime_s
_time[2] += iotime_e
else:
iotime_s = time.time()
np.savetxt(f, data[names[i]].to_string(index=False, header=False).strip().split('\n'), fmt='%s')
iotime_e = time.time() - iotime_s
_time[2] += iotime_e
iotime_s = time.time()
f.close()
iotime_e = time.time() - iotime_s
_time[2] += iotime_e
data = None
elif _finished:
_tf = True
def run_benchmark(hw):
global _words, _time, _tf, _size, _finished
q = Queue()
thread = Thread(target = write, daemon=False, args =(q,))
thread.start()
q.join()
i = 0
for chunk in pd.read_csv("bench/benchmark/dataset.csv", chunksize=3000000, encoding='utf-8', low_memory=False):
if i == 0:
_size = len(chunk.columns)
setup_words(_size)
try:
os.mkdir('results')
except FileExistsError:
clearResults(chunk.columns.values)
i += 1
mtime_s = time.time()
q.put((chunk))
mtime_e = time.time() - mtime_s
_time[0] += mtime_e
_finished = True
while True:
if _tf:
break
return _time
| 23.935185 | 113 | 0.57911 |
4a1a980a5355306d5714ee31ec2949c436e3f816
| 7,390 |
py
|
Python
|
user_manager/tests.py
|
MOOCworkbench/MOOCworkbench
|
c478dd4f185c50e0a48319e2b30d418533c32a34
|
[
"MIT"
] | null | null | null |
user_manager/tests.py
|
MOOCworkbench/MOOCworkbench
|
c478dd4f185c50e0a48319e2b30d418533c32a34
|
[
"MIT"
] | 1 |
2017-07-09T17:38:21.000Z
|
2017-07-09T17:38:22.000Z
|
user_manager/tests.py
|
MOOCworkbench/MOOCworkbench
|
c478dd4f185c50e0a48319e2b30d418533c32a34
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.core.management import call_command
from django.shortcuts import reverse
from django.test import Client, TestCase
from dataschema_manager.models import DataSchema
from experiments_manager.models import Experiment
from git_manager.models import GitRepository
from user_manager.models import WorkbenchUser
class UserManagerTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', 'test@test.nl', 'test')
self.workbench_user = WorkbenchUser.objects.get(user=self.user)
self.git_repo = GitRepository.objects.create(name='Experiment',
owner=self.workbench_user,
github_url='https://github')
schema = DataSchema(name='main')
schema.save()
self.experiment = Experiment.objects.create(title='Experiment', description='test',
owner=self.workbench_user,
git_repo=self.git_repo,
language_id=1,
template_id=2,
schema=schema)
self.client = Client()
self.client.login(username='test', password='test')
call_command('loaddata', 'fixtures/steps.json', verbosity=0)
call_command('loaddata', 'fixtures/measures.json', verbosity=0)
call_command('loaddata', 'fixtures/package_categories_languages.json', verbosity=0)
def test_index(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
def test_detail_profile_view(self):
response = self.client.get(reverse('view_my_profile'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['workbench_user'], self.workbench_user)
def test_sign_out(self):
response = self.client.get(reverse('logout'), follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'AnonymousUser')
def test_sign_out_without_signed_in(self):
c = Client()
response = c.get(reverse('logout'), follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'AnonymousUser')
def test_sign_in_get(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
def test_sign_in_post(self):
c = Client()
data = {'username': 'test', 'password': 'test'}
response = c.post(reverse('login'), data=data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'], self.user)
def test_sign_in_post_incorrect_username(self):
c = Client()
data = {'username': 'NON_EXISTENT_USER', 'password': 'test'}
response = c.post(reverse('login'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
self.assertEqual(str(response.context['user']), 'AnonymousUser')
def test_sign_in_post_incorrect_password(self):
c = Client()
data = {'username': 'test', 'password': 'INCORRECT_PASSWORD'}
response = c.post(reverse('login'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
self.assertEqual(str(response.context['user']), 'AnonymousUser')
def test_sign_in_post_missing_password(self):
c = Client()
data = {'username': 'test'}
response = c.post(reverse('login'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
self.assertEqual(str(response.context['user']), 'AnonymousUser')
def test_sign_in_post_missing_username(self):
c = Client()
data = {'password': 'RANDOM_PASSWORD'}
response = c.post(reverse('login'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
self.assertEqual(str(response.context['user']), 'AnonymousUser')
def test_edit_profile_view_get(self):
response = self.client.get(reverse('edit_profile'))
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
def test_edit_profile_view_post(self):
data = {'netid': '123456789'}
response = self.client.post(reverse('edit_profile'), data=data)
self.assertEqual(response.status_code, 302)
self.workbench_user.refresh_from_db()
self.assertEqual(self.workbench_user.netid, '123456789')
def test_edit_profile_view_post_none(self):
data = {}
response = self.client.post(reverse('edit_profile'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
def test_register_view_get(self):
c = Client()
response = c.get(reverse('register'))
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
def test_register_view_post(self):
c = Client()
data = {'username': 'test3',
'email': 'test2@test2.nl',
'password': 'test',
'password_again': 'test',
'netid': '123456789'}
response = c.post(reverse('register'), data=data)
self.assertEqual(response.status_code, 302)
new_user = User.objects.filter(email=data['email'])
self.assertTrue(new_user)
def test_register_view_post_missing_data(self):
c = Client()
data = {'username': 'test3',
'email': 'test2@test2.nl',
'password': 'test',
'password_again': 'test',}
response = c.post(reverse('register'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
def test_register_view_post_same_username(self):
c = Client()
data = {'username': 'test',
'email': 'test2@test2.nl',
'password': 'DIFFERENT_PASSWORD',
'password_again': 'DIFFERENT_PASSWORD',
'netid': '123456789'}
response = c.post(reverse('register'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
self.user.refresh_from_db()
self.assertNotEqual(self.user.email, data['email'])
def test_register_view_post_same_email(self):
c = Client()
data = {'username': 'test2',
'email': self.user.email,
'password': 'DIFFERENT_PASSWORD',
'password_again': 'DIFFERENT_PASSWORD',
'netid': '123456789'}
response = c.post(reverse('register'), data=data)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context['form'])
new_user = User.objects.filter(username=data['username'])
self.assertFalse(new_user)
| 43.216374 | 91 | 0.623951 |
4a1a98cf424d148f2ae09bad11a3b81f6039a3b8
| 2,952 |
py
|
Python
|
ops.py
|
mato00/TFAN_HS_Segmentation
|
21e95fdcbec8b2c06909bc8cb99cb87727a9da6f
|
[
"MIT"
] | 1 |
2021-07-14T01:36:38.000Z
|
2021-07-14T01:36:38.000Z
|
ops.py
|
mato00/TFAN_HS_Segmentation
|
21e95fdcbec8b2c06909bc8cb99cb87727a9da6f
|
[
"MIT"
] | null | null | null |
ops.py
|
mato00/TFAN_HS_Segmentation
|
21e95fdcbec8b2c06909bc8cb99cb87727a9da6f
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.framework import ops
import tensorflow.contrib.layers as tflayers
from utils import *
from ncausalconv import *
def batch_norm(input, is_training=True, name="batch_norm"):
x = tflayers.batch_norm(inputs=input,
scale=True,
is_training=is_training,
trainable=True,
reuse=None)
return x
def instance_norm(input, name="instance_norm", is_training=True):
with tf.variable_scope(name):
depth = input.get_shape()[2]
scale = tf.get_variable("scale", [depth], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32))
offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0))
mean, variance = tf.nn.moments(input, axes=[1], keep_dims=True)
epsilon = 1e-5
inv = tf.rsqrt(variance + epsilon)
normalized = (input-mean)*inv
return scale*normalized + offset
def conv2d(input_, output_dim, ks=4, s=2, stddev=0.02, padding='SAME', name="conv2d", activation_fn=None):
with tf.variable_scope(name):
return slim.conv2d(input_, output_dim, ks, s, padding=padding, activation_fn=activation_fn,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=None)
def deconv2d(input_, output_dim, ks=4, s=2, stddev=0.02, name="deconv2d"):
with tf.variable_scope(name):
input_ = tf.image.resize_images(images=input_,
size=tf.shape(input_)[1:3] * s,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) # That is optional
return conv2d(input_=input_, output_dim=output_dim, ks=ks, s=1, padding='SAME')
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [input_.get_shape()[-1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
def TempBlock(input_, outchannels_, layer_index=0, ks=2, s=1, dilation_rate_=1, dropout_=0.2,
name='TemBlock', is_training=True):
with tf.variable_scope(name):
tb = TemporalBlock(outchannels_, ks, s, dilation_rate_,
dropout_, name="tblock_{}".format(layer_index))
return tb(input_, training=is_training)
| 42.171429 | 120 | 0.629065 |
4a1a99c2f5752f2cefa22247417a3ba3df8f57d8
| 563 |
py
|
Python
|
deployments/migrations/0018_auto_20200319_0431.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 11 |
2018-06-11T06:05:12.000Z
|
2022-03-25T09:31:44.000Z
|
deployments/migrations/0018_auto_20200319_0431.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 498 |
2017-11-07T21:20:13.000Z
|
2022-03-31T14:37:18.000Z
|
deployments/migrations/0018_auto_20200319_0431.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 6 |
2018-04-11T13:29:50.000Z
|
2020-07-16T16:52:11.000Z
|
# Generated by Django 2.0.12 on 2020-03-19 04:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('deployments', '0017_auto_20200122_1434'),
]
operations = [
migrations.AlterField(
model_name='project',
name='project_district',
field=models.ForeignKey(blank=True, help_text='No selection will indicate all districts.', null=True, on_delete=django.db.models.deletion.CASCADE, to='api.District'),
),
]
| 28.15 | 178 | 0.667851 |
4a1a9a16bab51ac31347cc052307597a4e91d441
| 6,888 |
py
|
Python
|
tensorflow_probability/python/distributions/vector_laplace_diag_test.py
|
sanket-kamthe/probability
|
c22b6201155c2e58d08a4ad30641d1aff59fbe7c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/vector_laplace_diag_test.py
|
sanket-kamthe/probability
|
c22b6201155c2e58d08a4ad30641d1aff59fbe7c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/vector_laplace_diag_test.py
|
sanket-kamthe/probability
|
c22b6201155c2e58d08a4ad30641d1aff59fbe7c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for VectorLaplaceLinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class VectorLaplaceDiagTest(test_util.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
super(VectorLaplaceDiagTest, self).setUp()
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
tfd.VectorLaplaceDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).shape)
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tf.get_static_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = tf.zeros((1, 3))
diag = tf.ones((1, 3))
base_dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
dist = tfd.TransformedDistribution(
base_dist, validate_args=True, bijector=tfp.bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).shape)
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, self.evaluate(dist.mean()))
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], self.evaluate(dist.mean()))
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
seed = test_util.test_seed()
samps = self.evaluate(dist.sample(int(2e4), seed=seed))
cov_mat = 2. * self.evaluate(tf.linalg.diag(diag))**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0., rtol=0.10)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.15, rtol=0.10)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
self.evaluate(dist.sample())
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
dist = tfd.VectorLaplaceDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.shape)
self.assertAllClose(mu, self.evaluate(mean))
n = int(1e4)
samps = self.evaluate(dist.sample(n, seed=test_util.test_seed()))
cov_mat = 2. * self.evaluate(tf.linalg.diag(diag))**2
sample_cov = np.matmul(
samps.transpose([1, 2, 0]), samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0), atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov, atol=0.10, rtol=0.05)
def testCovariance(self):
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([2, 3], dtype=tf.float32), validate_args=True)
self.assertAllClose(2. * np.diag(np.ones([3], dtype=np.float32)),
self.evaluate(vla.covariance()))
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([3], dtype=tf.float32),
scale_identity_multiplier=[3., 2.],
validate_args=True)
self.assertAllEqual([2], vla.batch_shape)
self.assertAllEqual([3], vla.event_shape)
self.assertAllClose(
2. * np.array([[[3., 0, 0], [0, 3, 0], [0, 0, 3]],
[[2, 0, 0], [0, 2, 0], [0, 0, 2]]])**2.,
self.evaluate(vla.covariance()))
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([3], dtype=tf.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]],
validate_args=True)
self.assertAllEqual([2], vla.batch_shape)
self.assertAllEqual([3], vla.event_shape)
self.assertAllClose(
2. * np.array([[[3., 0, 0], [0, 2, 0], [0, 0, 1]],
[[4, 0, 0], [0, 5, 0], [0, 0, 6]]])**2.,
self.evaluate(vla.covariance()))
def testVariance(self):
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([2, 3], dtype=tf.float32), validate_args=True)
self.assertAllClose(2. * np.ones([3], dtype=np.float32),
self.evaluate(vla.variance()))
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([3], dtype=tf.float32),
scale_identity_multiplier=[3., 2.],
validate_args=True)
self.assertAllClose(2. * np.array([[3., 3, 3], [2, 2, 2]])**2.,
self.evaluate(vla.variance()))
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([3], dtype=tf.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]],
validate_args=True)
self.assertAllClose(2. * np.array([[3., 2, 1], [4, 5, 6]])**2.,
self.evaluate(vla.variance()))
def testStddev(self):
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([2, 3], dtype=tf.float32), validate_args=True)
self.assertAllClose(
np.sqrt(2) * np.ones([3], dtype=np.float32),
self.evaluate(vla.stddev()))
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([3], dtype=tf.float32),
scale_identity_multiplier=[3., 2.],
validate_args=True)
self.assertAllClose(
np.sqrt(2) * np.array([[3., 3, 3], [2, 2, 2]]),
self.evaluate(vla.stddev()))
vla = tfd.VectorLaplaceDiag(
loc=tf.zeros([3], dtype=tf.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]],
validate_args=True)
self.assertAllClose(
np.sqrt(2) * np.array([[3., 2, 1], [4, 5, 6]]),
self.evaluate(vla.stddev()))
if __name__ == "__main__":
tf.test.main()
| 36.444444 | 78 | 0.634146 |
4a1a9a1d21a4a56185df933b79061d6e57745d67
| 27,646 |
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 1 |
2021-12-07T13:43:54.000Z
|
2021-12-07T13:43:54.000Z
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 1 |
2019-10-14T19:43:52.000Z
|
2019-10-14T19:43:52.000Z
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import platform
import functools
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.pipeline.transport import AioHttpTransport
from azure.core.credentials import AzureKeyCredential
from multidict import CIMultiDict, CIMultiDictProxy
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import (
VERSION,
DetectLanguageInput,
DetectLanguageInput,
TextAnalyticsApiVersion,
)
from testcase import GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from asynctestcase import AsyncTextAnalyticsTest
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class TestDetectLanguage(AsyncTextAnalyticsTest):
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = await client.detect_language("hello world")
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": "猫は幸せ"},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = await client.detect_language(docs, show_stats=True)
self.assertEqual(response[0].primary_language.name, "English")
self.assertEqual(response[1].primary_language.name, "Spanish")
self.assertEqual(response[2].primary_language.name, "Japanese")
self.assertEqual(response[3].primary_language.name, "German")
self.assertEqual(response[0].primary_language.iso6391_name, "en")
self.assertEqual(response[1].primary_language.iso6391_name, "es")
self.assertEqual(response[2].primary_language.iso6391_name, "ja")
self.assertEqual(response[3].primary_language.iso6391_name, "de")
for doc in response:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.assertIsNotNone(doc.primary_language.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_all_successful_passing_text_document_input(self, client):
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian"),
DetectLanguageInput(id="2", text="Este es un document escrito en Español."),
DetectLanguageInput(id="3", text="猫は幸せ"),
DetectLanguageInput(id="4", text="Fahrt nach Stuttgart und dann zum Hotel zu Fu.")
]
response = await client.detect_language(docs)
self.assertEqual(response[0].primary_language.name, "English")
self.assertEqual(response[1].primary_language.name, "Spanish")
self.assertEqual(response[2].primary_language.name, "Japanese")
self.assertEqual(response[3].primary_language.name, "German")
self.assertEqual(response[0].primary_language.iso6391_name, "en")
self.assertEqual(response[1].primary_language.iso6391_name, "es")
self.assertEqual(response[2].primary_language.iso6391_name, "ja")
self.assertEqual(response[3].primary_language.iso6391_name, "de")
for doc in response:
self.assertIsNotNone(doc.primary_language.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_passing_only_string(self, client):
docs = [
u"I should take my cat to the veterinarian.",
u"Este es un document escrito en Español.",
u"猫は幸せ",
u"Fahrt nach Stuttgart und dann zum Hotel zu Fu.",
u""
]
response = await client.detect_language(docs)
self.assertEqual(response[0].primary_language.name, "English")
self.assertEqual(response[1].primary_language.name, "Spanish")
self.assertEqual(response[2].primary_language.name, "Japanese")
self.assertEqual(response[3].primary_language.name, "German")
self.assertTrue(response[4].is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_input_with_some_errors(self, client):
docs = [{"id": "1", "country_hint": "United States", "text": "I should take my cat to the veterinarian."},
{"id": "2", "text": "Este es un document escrito en Español."},
{"id": "3", "text": ""},
{"id": "4", "text": "Fahrt nach Stuttgart und dann zum Hotel zu Fu."}]
response = await client.detect_language(docs)
self.assertTrue(response[0].is_error)
self.assertFalse(response[1].is_error)
self.assertTrue(response[2].is_error)
self.assertFalse(response[3].is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_input_with_all_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "text": ""},
{"id": "3", "text": ""},
{"id": "4", "text": text}]
response = await client.detect_language(docs)
for resp in response:
self.assertTrue(resp.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_output_same_order_as_input(self, client):
docs = [
DetectLanguageInput(id="1", text="one"),
DetectLanguageInput(id="2", text="two"),
DetectLanguageInput(id="3", text="three"),
DetectLanguageInput(id="4", text="four"),
DetectLanguageInput(id="5", text="five")
]
response = await client.detect_language(docs)
for idx, doc in enumerate(response):
self.assertEqual(str(idx + 1), doc.id)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"text_analytics_account_key": ""})
async def test_empty_credential_class(self, client):
with self.assertRaises(ClientAuthenticationError):
response = await client.detect_language(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"text_analytics_account_key": "xxxxxxxxxxxx"})
async def test_bad_credentials(self, client):
with self.assertRaises(ClientAuthenticationError):
response = await client.detect_language(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_bad_document_input(self, client):
docs = "This is the wrong type"
with self.assertRaises(TypeError):
response = await client.detect_language(docs)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
DetectLanguageInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
u"You cannot mix string input with the above documents"
]
with self.assertRaises(TypeError):
response = await client.detect_language(docs)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.detect_language(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_show_stats_and_model_version(self, client):
def callback(response):
self.assertIsNotNone(response)
self.assertIsNotNone(response.model_version, msg=response.raw_response)
self.assertIsNotNone(response.raw_response)
self.assertEqual(response.statistics.document_count, 5)
self.assertEqual(response.statistics.transaction_count, 4)
self.assertEqual(response.statistics.valid_document_count, 4)
self.assertEqual(response.statistics.erroneous_document_count, 1)
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.detect_language(
docs,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_batch_size_over_limit(self, client):
docs = [u"hello world"] * 1050
with self.assertRaises(HttpResponseError):
response = await client.detect_language(docs)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed at. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = await client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_dont_use_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed at. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = await client.detect_language(docs, country_hint="", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_per_item_dont_use_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 2)
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 1)
docs = [{"id": "1", "country_hint": "", "text": "I will go to the park."},
{"id": "2", "country_hint": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.detect_language(docs, raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_country_hint_and_obj_input(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian."),
DetectLanguageInput(id="2", text="Este es un document escrito en Español."),
DetectLanguageInput(id="3", text="猫は幸せ"),
]
response = await client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_country_hint_and_dict_input(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_country_hint_and_obj_per_item_hints(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 2)
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 1)
docs = [
DetectLanguageInput(id="1", text="I should take my cat to the veterinarian.", country_hint="CA"),
DetectLanguageInput(id="4", text="Este es un document escrito en Español.", country_hint="CA"),
DetectLanguageInput(id="3", text="猫は幸せ"),
]
response = await client.detect_language(docs, country_hint="US", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_country_hint_and_dict_per_item_hints(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 1)
country_str = "\"countryHint\": \"US\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 2)
docs = [{"id": "1", "country_hint": "US", "text": "I will go to the park."},
{"id": "2", "country_hint": "US", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.detect_language(docs, country_hint="CA", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"default_country_hint": "CA"})
async def test_client_passed_default_country_hint(self, client):
def callback(resp):
country_str = "\"countryHint\": \"CA\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
def callback_2(resp):
country_str = "\"countryHint\": \"DE\""
country = resp.http_request.body.count(country_str)
self.assertEqual(country, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.detect_language(docs, raw_response_hook=callback)
response = await client.detect_language(docs, country_hint="DE", raw_response_hook=callback_2)
response = await client.detect_language(docs, raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
async def test_rotate_subscription_key(self, resource_group, location, text_analytics_account, text_analytics_account_key):
credential = AzureKeyCredential(text_analytics_account_key)
client = TextAnalyticsClient(text_analytics_account, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.detect_language(docs)
self.assertIsNotNone(response)
credential.update("xxx") # Make authentication fail
with self.assertRaises(ClientAuthenticationError):
response = await client.detect_language(docs)
credential.update(text_analytics_account_key) # Authenticate successfully again
response = await client.detect_language(docs)
self.assertIsNotNone(response)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_user_agent(self, client):
def callback(resp):
self.assertIn("azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()),
resp.http_request.headers["User-Agent"]
)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.detect_language(docs, raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_attribute_error_no_result_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.detect_language(docs)
# Attributes on DocumentError
self.assertTrue(response[0].is_error)
self.assertEqual(response[0].id, "1")
self.assertIsNotNone(response[0].error)
# Result attribute not on DocumentError, custom error message
try:
primary_language = response[0].primary_language
except AttributeError as custom_error:
self.assertEqual(
custom_error.args[0],
'\'DocumentError\' object has no attribute \'primary_language\'. '
'The service was unable to process this document:\nDocument Id: 1\nError: '
'InvalidDocument - Document text is empty.\n'
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_attribute_error_nonexistent_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.detect_language(docs)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
primary_language = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
self.assertEqual(
default_behavior.args[0],
'\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_bad_model_version_error(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.detect_language(docs, model_version="bad")
except HttpResponseError as err:
self.assertEqual(err.error.code, "ModelVersionIncorrect")
self.assertIsNotNone(err.error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "text": text}]
doc_errors = await client.detect_language(docs)
self.assertEqual(doc_errors[0].error.code, "InvalidDocument")
self.assertIsNotNone(doc_errors[0].error.message)
self.assertEqual(doc_errors[1].error.code, "InvalidDocument")
self.assertIsNotNone(doc_errors[1].error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_warnings(self, client):
# No warnings actually returned for detect_language. Will update when they add
docs = [
{"id": "1", "text": "This won't actually create a warning :'("},
]
result = await client.detect_language(docs)
for doc in result:
doc_warnings = doc.warnings
self.assertEqual(len(doc_warnings), 0)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_not_passing_list_for_docs(self, client):
docs = {"id": "1", "text": "hello world"}
with pytest.raises(TypeError) as excinfo:
await client.detect_language(docs)
assert "Input documents cannot be a dict" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
await client.detect_language(docs)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
await client.detect_language(None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_duplicate_ids_error(self, client):
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.detect_language(docs)
except HttpResponseError as err:
self.assertEqual(err.error.code, "InvalidDocument")
self.assertIsNotNone(err.error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_batch_size_over_limit_error(self, client):
# Batch size over limit
docs = [u"hello world"] * 1001
try:
response = await client.detect_language(docs)
except HttpResponseError as err:
self.assertEqual(err.error.code, "InvalidDocumentBatch")
self.assertIsNotNone(err.error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_invalid_country_hint_method(self, client):
docs = [{"id": "1", "text": "hello world"}]
response = await client.detect_language(docs, country_hint="United States")
self.assertEqual(response[0].error.code, "InvalidCountryHint")
self.assertIsNotNone(response[0].error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_invalid_country_hint_docs(self, client):
docs = [{"id": "1", "country_hint": "United States", "text": "hello world"}]
response = await client.detect_language(docs)
self.assertEqual(response[0].error.code, "InvalidCountryHint")
self.assertIsNotNone(response[0].error.message)
@GlobalTextAnalyticsAccountPreparer()
async def test_country_hint_none(self, resource_group, location, text_analytics_account, text_analytics_account_key):
client = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))
# service will eventually support this and we will not need to send "" for input == "none"
documents = [{"id": "0", "country_hint": "none", "text": "This is written in English."}]
documents2 = [DetectLanguageInput(id="1", country_hint="none", text="This is written in English.")]
def callback(response):
country_str = "\"countryHint\": \"\""
country = response.http_request.body.count(country_str)
self.assertEqual(country, 1)
# test dict
result = await client.detect_language(documents, raw_response_hook=callback)
# test DetectLanguageInput
result2 = await client.detect_language(documents2, raw_response_hook=callback)
# test per-operation
result3 = await client.detect_language(documents=["this is written in english"], country_hint="none", raw_response_hook=callback)
# test client default
new_client = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key), default_country_hint="none")
result4 = await new_client.detect_language(documents=["this is written in english"], raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_country_hint_kwarg(self, client):
def callback(response):
country_str = "\"countryHint\": \"ES\""
self.assertEqual(response.http_request.body.count(country_str), 1)
self.assertIsNotNone(response.model_version)
self.assertIsNotNone(response.statistics)
res = await client.detect_language(
documents=["this is written in english"],
model_version="latest",
show_stats=True,
country_hint="ES",
raw_response_hook=callback
)
@GlobalTextAnalyticsAccountPreparer()
async def test_pass_cls(self, resource_group, location, text_analytics_account, text_analytics_account_key):
def callback(pipeline_response, deserialized, _):
return "cls result"
text_analytics = TextAnalyticsClient(text_analytics_account, AzureKeyCredential(text_analytics_account_key))
res = await text_analytics.detect_language(
documents=["Test passing cls to endpoint"],
cls=callback
)
assert res == "cls result"
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
async def test_string_index_type_not_fail_v3(self, client):
# make sure that the addition of the string_index_type kwarg for v3.1-preview.1 doesn't
# cause v3.0 calls to fail
await client.detect_language(["please don't fail"])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_disable_service_logs(self, client):
def callback(resp):
assert resp.http_request.query['loggingOptOut']
await client.detect_language(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
| 44.2336 | 141 | 0.653657 |
4a1a9a27bb1a4b82e7e785fa4cbafe968a1c3547
| 626 |
py
|
Python
|
runs/par-nobro-iter00600.cfg.py
|
janpawellek/broeval
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
[
"MIT"
] | null | null | null |
runs/par-nobro-iter00600.cfg.py
|
janpawellek/broeval
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
[
"MIT"
] | null | null | null |
runs/par-nobro-iter00600.cfg.py
|
janpawellek/broeval
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
[
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/par-nobro-iter00600.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 600
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| 21.586207 | 68 | 0.722045 |
4a1a9a3a8ddbff60a412ac97623216b4d3f845c0
| 20,540 |
py
|
Python
|
vino_nsyss2020/utils/GPU_models.py
|
onurbarut/Encrypted_Malware_Detection
|
2d2323c1e9ea3313b76bc2e37b68a9126587c6cd
|
[
"Apache-2.0"
] | 1 |
2022-02-25T00:50:35.000Z
|
2022-02-25T00:50:35.000Z
|
vino_nsyss2020/utils/GPU_models.py
|
onurbarut/Encrypted_Malware_Detection
|
2d2323c1e9ea3313b76bc2e37b68a9126587c6cd
|
[
"Apache-2.0"
] | 1 |
2022-03-12T01:15:51.000Z
|
2022-03-23T07:34:58.000Z
|
vino_nsyss2020/utils/GPU_models.py
|
onurbarut/Encrypted_Malware_Detection
|
2d2323c1e9ea3313b76bc2e37b68a9126587c6cd
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Sequential, Model, model_from_json
from tensorflow.keras.layers import Input, Dense, Dropout, BatchNormalization, Flatten, Conv1D, MaxPooling1D, Conv2D, MaxPooling2D
def one_hot(y_, n_classes=None):
# Function to encode neural one-hot output labels from number indexes
# e.g.:
# one_hot(y_=[[5], [0], [3]], n_classes=6):
# return [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
if n_classes is None:
n_classes = int(int(max(y_))+1)
y_ = y_.reshape(len(y_))
return np.eye(n_classes)[np.array(y_, dtype=np.int32)] # Returns FLOATS
class CNN_1D:
"""docstring for CNN_1D"""
def __init__(self, input_shape,
n_classes,
filters=250,
kernel_size=3,
strides=1,
dense_units=128,
dropout_rate=0.,
CNN_layers=2,
clf_reg=1e-4):
# Model Definition
#raw_inputs = Input(shape=(X_train.shape[1],1,))
raw_inputs = Input(shape=input_shape)
xcnn = Conv1D(filters,
(kernel_size),
padding='same',
activation='relu',
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='Conv1D_1')(raw_inputs)
xcnn = BatchNormalization()(xcnn)
xcnn = MaxPooling1D(pool_size=2, padding='same')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
for i in range(1, CNN_layers):
xcnn = Conv1D(filters,
(kernel_size),
padding='same',
activation='relu',
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='Conv1D_'+str(i+1))(xcnn)
xcnn = BatchNormalization()(xcnn)
xcnn = MaxPooling1D(pool_size=2, padding='same')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
# we flatten for dense layer
xcnn = Flatten()(xcnn)
xcnn = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC1_layer')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
xcnn = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC2_layer')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
top_level_predictions = Dense(n_classes, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='top_level_output')(xcnn)
model = Model(inputs=raw_inputs, outputs=top_level_predictions)
self.model = model
self.n_classes = n_classes
def train(self, X_train, y_train, X_val, y_val, n_batch, n_epochs, learning_rate, decay_rate, save_dir):
if len(X_train.shape) < 3:
X_train_1D = X_train.reshape(-1,X_train.shape[1],1)
X_val_1D = X_val.reshape(-1,X_val.shape[1],1)
else:
X_train_1D = X_train
X_val_1D = X_val
print(self.model.summary()) # summarize layers
plot_model(self.model, to_file=save_dir+'/model.png') # plot graph
self.model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate),
metrics=['accuracy'])
# Train the model
return self.model.fit(X_train_1D, one_hot(y_train, self.n_classes),
batch_size=n_batch,
epochs=n_epochs,
validation_data=(X_val_1D, one_hot(y_val, self.n_classes)))
def classify(self, data):
if len(data.shape) < 3:
X_test_1D = data.reshape(-1,data.shape[1],1)
else:
X_test_1D = data
return self.model.predict(X_test_1D)
class CNN_2D:
"""docstring for CNN_2D"""
def __init__(self, input_shape,
n_classes,
filters=250,
kernel_size=3,
strides=1,
dense_units=128,
dropout_rate=0.,
CNN_layers=2,
clf_reg=1e-4):
# Model Definition
#raw_inputs = Input(shape=(X_train.shape[1],1,))
raw_inputs = Input(shape=input_shape)
xcnn = Conv2D(filters,
(kernel_size),
padding='same',
activation='relu',
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='Conv2D_1')(raw_inputs)
xcnn = BatchNormalization()(xcnn)
xcnn = MaxPooling2D(pool_size=2, padding='same')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
for i in range(1, CNN_layers):
xcnn = Conv2D(filters,
(kernel_size),
padding='same',
activation='relu',
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='Conv2D_'+str(i+1))(xcnn)
xcnn = BatchNormalization()(xcnn)
xcnn = MaxPooling2D(pool_size=2, padding='same')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
# we flatten for dense layer
xcnn = Flatten()(xcnn)
xcnn = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC1_layer')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
xcnn = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC2_layer')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
top_level_predictions = Dense(n_classes, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='top_level_output')(xcnn)
model = Model(inputs=raw_inputs, outputs=top_level_predictions)
self.model = model
self.n_classes = n_classes
def train(self, X_train, y_train, X_val, y_val, n_batch, n_epochs, learning_rate, decay_rate, save_dir):
if len(X_train.shape) > 2:
X_train = X_train.reshape(-1, X_train.shape[1], X_train.shape[2], 1)
X_val = X_val.reshape(-1, X_val.shape[1], X_val.shape[2], 1)
print(self.model.summary()) # summarize layers
plot_model(self.model, to_file=save_dir+'/model.png') # plot graph
self.model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate),
metrics=['accuracy'])
# Train the model
return self.model.fit(X_train, one_hot(y_train, self.n_classes),
batch_size=n_batch,
epochs=n_epochs,
validation_data=(X_val, one_hot(y_val, self.n_classes)))
def classify(self, data):
if len(data.shape) > 2:
return self.model.predict(data.reshape(-1, data.shape[1], data.shape[2], 1))
else:
return self.model.predict(data)
class LSTM:
"""docstring for LSTM"""
def __init__(self, input_shape,
n_classes,
dense_units=128,
dropout_rate=0.,
LSTM_layers=2,
LSTM_units=128,
lstm_reg=1e-4,
clf_reg=1e-4):
# Model Definition
#raw_inputs = Input(shape=(X_train.shape[1],1,))
raw_inputs = Input(shape=input_shape)
if LSTM_layers == 1:
xlstm = tf.keras.layers.LSTM(LSTM_units, return_sequences=False,
kernel_regularizer=tf.keras.regularizers.l2(lstm_reg),
recurrent_regularizer=tf.keras.regularizers.l2(lstm_reg),
bias_regularizer=tf.keras.regularizers.l2(lstm_reg),
activity_regularizer=tf.keras.regularizers.l1(lstm_reg))(raw_inputs)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
else:
xlstm = tf.keras.layers.LSTM(LSTM_units, return_sequences=True,
kernel_regularizer=tf.keras.regularizers.l2(lstm_reg),
recurrent_regularizer=tf.keras.regularizers.l2(lstm_reg),
bias_regularizer=tf.keras.regularizers.l2(lstm_reg),
activity_regularizer=tf.keras.regularizers.l1(lstm_reg))(raw_inputs)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
for i in range(1, LSTM_layers-1):
xlstm = tf.keras.layers.LSTM(LSTM_units, return_sequences=True,
kernel_regularizer=tf.keras.regularizers.l2(lstm_reg),
recurrent_regularizer=tf.keras.regularizers.l2(lstm_reg),
bias_regularizer=tf.keras.regularizers.l2(lstm_reg),
activity_regularizer=tf.keras.regularizers.l1(lstm_reg))(xlstm)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
xlstm = tf.keras.layers.LSTM(LSTM_units, return_sequences=False,
kernel_regularizer=tf.keras.regularizers.l2(lstm_reg),
recurrent_regularizer=tf.keras.regularizers.l2(lstm_reg),
bias_regularizer=tf.keras.regularizers.l2(lstm_reg),
activity_regularizer=tf.keras.regularizers.l1(lstm_reg))(xlstm)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
top_level_predictions = Dense(n_classes, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='top_level_output')(xlstm)
model = Model(inputs=raw_inputs, outputs=top_level_predictions)
self.model = model
self.n_classes = n_classes
def train(self, X_train, y_train, X_val, y_val, n_batch, n_epochs, learning_rate, decay_rate, save_dir):
print(self.model.summary()) # summarize layers
plot_model(self.model, to_file=save_dir+'/model.png') # plot graph
self.model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate),
metrics=['accuracy'])
# Train the model
return self.model.fit(X_train, one_hot(y_train, self.n_classes),
batch_size=n_batch,
epochs=n_epochs,
validation_data=(X_val, one_hot(y_val, self.n_classes)))
def classify(self, data):
return self.model.predict(data)
class CNN_LSTM:
"""docstring for 1D_CNN_LSTM"""
def __init__(self, input_shape,
n_classes,
filters=32,
kernel_size=5,
strides=1,
dense_units=200,
dropout_rate=0.,
LSTM_units=200,
lstm_reg=1e-4,
clf_reg=1e-4):
# Model Definition
#raw_inputs = Input(shape=(X_train.shape[1],1,))
raw_inputs = Input(shape=input_shape)
xcnn = Conv1D(filters,
(kernel_size),
padding='same',
activation='relu',
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='Conv1D_1')(raw_inputs)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
xcnn = Conv1D(filters,
(kernel_size),
padding='same',
activation='relu',
strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='Conv1D_2')(xcnn)
xcnn = BatchNormalization()(xcnn)
xcnn = MaxPooling1D(pool_size=2, padding='same')(xcnn)
if dropout_rate != 0:
xcnn = Dropout(dropout_rate)(xcnn)
xlstm = tf.keras.layers.LSTM(LSTM_units, return_sequences=False,
kernel_regularizer=tf.keras.regularizers.l2(lstm_reg),
recurrent_regularizer=tf.keras.regularizers.l2(lstm_reg),
bias_regularizer=tf.keras.regularizers.l2(lstm_reg),
activity_regularizer=tf.keras.regularizers.l1(lstm_reg))(xcnn)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
# we flatten for dense layer
xlstm = Flatten()(xlstm)
xlstm = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC1_layer')(xlstm)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
xlstm = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC2_layer')(xlstm)
if dropout_rate != 0:
xlstm = Dropout(dropout_rate)(xlstm)
top_level_predictions = Dense(n_classes, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='top_level_output')(xlstm)
model = Model(inputs=raw_inputs, outputs=top_level_predictions)
self.model = model
self.n_classes = n_classes
def train(self, X_train, y_train, X_val, y_val, n_batch, n_epochs, learning_rate, decay_rate, save_dir):
if len(X_train.shape) < 3:
X_train_1D = X_train.reshape(-1,X_train.shape[1],1)
X_val_1D = X_val.reshape(-1,X_val.shape[1],1)
else:
X_train_1D = X_train
X_val_1D = X_val
print(self.model.summary()) # summarize layers
plot_model(self.model, to_file=save_dir+'/model.png') # plot graph
self.model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate),
metrics=['accuracy'])
# Train the model
return self.model.fit(X_train_1D, one_hot(y_train, self.n_classes),
batch_size=n_batch,
epochs=n_epochs,
validation_data=(X_val_1D, one_hot(y_val, self.n_classes)))
def classify(self, data):
if len(data.shape) < 3:
X_test_1D = data.reshape(-1,data.shape[1],1)
else:
X_test_1D = data
return self.model.predict(X_test_1D)
class ANN:
"""docstring for ANN"""
def __init__(self, input_shape,
n_classes,
dense_units=128,
dropout_rate=0.,
clf_reg=1e-4):
# Model Definition
#raw_inputs = Input(shape=(X_train.shape[1],1,))
raw_inputs = Input(shape=input_shape)
xann = Dense(dense_units, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='FC1_layer')(raw_inputs)
if dropout_rate != 0:
xann = Dropout(dropout_rate)(xann)
top_level_predictions = Dense(n_classes, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(clf_reg),
bias_regularizer=tf.keras.regularizers.l2(clf_reg),
activity_regularizer=tf.keras.regularizers.l1(clf_reg),
name='top_level_output')(xann)
model = Model(inputs=raw_inputs, outputs=top_level_predictions)
self.model = model
self.n_classes = n_classes
def train(self, X_train, y_train, X_val, y_val, n_batch, n_epochs, learning_rate, decay_rate, save_dir):
if len(X_train.shape) < 3:
X_train_1D = X_train.reshape(-1,X_train.shape[1],)
X_val_1D = X_val.reshape(-1,X_val.shape[1],)
else:
X_train_1D = X_train
X_val_1D = X_val
print(self.model.summary()) # summarize layers
plot_model(self.model, to_file=save_dir+'/model.png') # plot graph
self.model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate, decay=decay_rate),
metrics=['accuracy'])
# Train the model
return self.model.fit(X_train_1D, one_hot(y_train, self.n_classes),
batch_size=n_batch,
epochs=n_epochs,
validation_data=(X_val_1D, one_hot(y_val, self.n_classes)))
def classify(self, data):
if len(data.shape) < 3:
X_test_1D = data.reshape(-1,data.shape[1],)
else:
X_test_1D = data
return self.model.predict(X_test_1D)
| 42.702703 | 130 | 0.558179 |
4a1a9a7064fbb1b719f52728ae7cfe44c41c3b47
| 2,928 |
py
|
Python
|
test_seirmodel.py
|
tomkooij/covid19
|
a7d8a5781ed84b4a59652fc4575c15679de7898a
|
[
"MIT"
] | 6 |
2020-09-27T17:21:23.000Z
|
2022-02-06T11:20:48.000Z
|
test_seirmodel.py
|
tomkooij/covid19
|
a7d8a5781ed84b4a59652fc4575c15679de7898a
|
[
"MIT"
] | 3 |
2020-11-23T13:44:31.000Z
|
2021-07-10T20:10:38.000Z
|
test_seirmodel.py
|
tomkooij/covid19
|
a7d8a5781ed84b4a59652fc4575c15679de7898a
|
[
"MIT"
] | 5 |
2020-11-23T13:29:59.000Z
|
2021-12-25T02:23:32.000Z
|
"""Test cases for code validation."""
import numpy as np
import pandas as pd
from seirmodel import EpidemyModel
def test_EpidemyModel():
"""Test case"""
em = EpidemyModel(
R=2, T_lat=2.2, T_i2h=3.5, T_i2d=5.4,
ihr=0.1, ifr=0.01, dispersion=0.0)
# print(list(em.estate.labels))
expected_labels = [
'Sus', 'La0', 'La1', 'La2', 'Sy0', 'Sy1',
'Ho0', 'Ho1', 'Ded', 'Rec', 'NewL', 'NewH', 'NewD'
]
assert list(em.estate.labels) == expected_labels
matf = em.mat_as_df()
assert list(matf.index) == expected_labels
assert list(matf.columns) == expected_labels
expected_matrix = np.array([
[ 1, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0.2,0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0.8,1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0.03,0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0.07,1, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0.09,0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0.01,1, 1, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0.9, 0, 0.9 ,0, 0, 1, 0, 0, 0 ],
[ 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0.07,1, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0.01,1, 0, 0, 0, 0, 0 ]])
assert np.allclose(matf.to_numpy(), expected_matrix)
# check preservation of number of people (ignore 'newX' rows/columns)
submat = matf.loc['Sus':'Rec', 'Sus':'Rec']
assert np.allclose(submat.sum(axis=0), 1)
def test_EpModel_disp(interactive=False):
n = 4
labels = [f'Foo{i}' for i in range(n)]
matf = pd.DataFrame(np.zeros((n, n)), index=labels, columns=labels)
EpidemyModel._set_transfers_with_dispersion(
matf, 'Foo', 3, 0.0)
if interactive:
print('First matrix:\n{repr(matf.to_numpy())}')
expected_mat = np.array([
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.]])
assert np.allclose(matf.to_numpy(), expected_mat)
EpidemyModel._set_transfers_with_dispersion(matf, 'Foo', 3, 0.2)
if interactive:
print('Second matrix:\n{repr(matf.to_numpy())}')
expected_mat = np.array([
[0., 0., 0., 0.],
[1., 0.02, 0., 0.],
[0., 0.96, 0., 0.],
[0., 0.02, 1., 0.]])
assert np.allclose(matf.to_numpy(), expected_mat)
n = 6
labels = [f'Foo{i}' for i in range(n)]
matf = pd.DataFrame(np.zeros((n, n)), index=labels, columns=labels)
EpidemyModel._set_transfers_with_dispersion(matf, 'Foo', 5, 1)
if interactive:
print('Third matrix:\n{repr(matf.to_numpy())}')
if __name__ == '__main__':
test_EpModel_disp()
test_EpidemyModel()
| 33.655172 | 73 | 0.486339 |
4a1a9abc02e8fa90f470512daf2d6d720873bc83
| 3,494 |
py
|
Python
|
retired/old_version/original/example/servers/Speech.py
|
gecko-robotics/pygecko
|
a809593a894d8e591e992455a01aa73d8f7b7981
|
[
"MIT"
] | 3 |
2019-06-13T07:52:12.000Z
|
2020-07-05T13:28:43.000Z
|
retired/old_version/original/example/servers/Speech.py
|
walchko/pygecko
|
a809593a894d8e591e992455a01aa73d8f7b7981
|
[
"MIT"
] | 23 |
2017-07-07T01:29:33.000Z
|
2018-11-23T18:41:08.000Z
|
retired/old_version/original/example/servers/Speech.py
|
MomsFriendlyRobotCompany/pygecko
|
a809593a894d8e591e992455a01aa73d8f7b7981
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import logging # logging
import multiprocessing as mp # multiprocess
import pygecko.lib.ZmqClass as zmq
from pygecko.TTS import TTS
from pygecko.Chatbot import Chatbot
import speech_recognition
class SphinxServer(mp.Process):
def __init__(self, host='localhost', port=9000):
"""
"""
# Initialize pyaudio
# self.pyaudio_instance = pyaudio.PyAudio()
# Create a speech recognizer
mp.Process.__init__(self)
self.host = host
self.port = port
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
self.r = speech_recognition.Recognizer()
# self.logger.info('soundserver stdin: ' + str(sys.stdin.fileno()))
self.pub = zmq.Pub((host, port))
self.sub = zmq.Sub('text', (host, str(port + 1)))
self.tts = TTS()
self.tts.setOptions('-v Karen') # this works on macOS and say
self.chatbot = Chatbot()
print('WARNING ... I am going to move away from this')
def __del__(self):
""" Called when the AlexaAudio object is no longer needed. This closes the PyAudio instance.
"""
# Terminate the pyaudio instance
# self.pyaudio_instance.terminate()
pass
def get_audio(self, timeout=None):
""" Get audio from the microphone. The SpeechRecognition package is used to automatically stop listening
when the user stops speaking. A timeout can also be specified. If the timeout is reached, the function
returns None.
This function can also be used for debugging purposes to read an example audio file.
:param timeout: timeout in seconds, when to give up if the user did not speak.
:return: the raw binary audio string (PCM)
"""
# Create a speech recognizer
# r = speech_recognition.Recognizer()
r = self.r
audio = None
# Open the microphone (and release is when done using "with")
with speech_recognition.Microphone() as source:
if timeout is None:
# Prompt user to say something
print("You can start talking now...")
# TODO add sounds to prompt the user to do something, rather than text
# Record audio until the user stops talking
audio = r.listen(source)
else:
print("Start talking now, you have %d seconds" % timeout)
# TODO add sounds to prompt the user to do something, rather than text
try:
audio = r.listen(source, timeout=timeout)
except speech_recognition.WaitTimeoutError:
return None
if not audio:
print('heard nothing')
return audio
def stt(self, audio):
ret = self.r.recognize_sphinx(audio)
# print('sphinx heard: {}'.format(ret))
return ret
def getPCM(self, audio):
# Convert audio to raw_data (PCM)
raw_audio = audio.get_raw_data()
return raw_audio
def run(self):
"""
Main process run loop
in: none
out: none
"""
# main loop
try:
self.logger.info(str(self.name)+'['+str(self.pid)+'] started on ' +
str(self.host) + ':' + str(self.port) + ', Daemon: '+str(self.daemon))
loop = True
while loop:
print('speak')
audio = self.get_audio(5)
if audio:
txt = self.stt(audio)
print('heard: {}'.format(txt))
txt = self.chatbot.run(txt)
if txt == 'exit_loop':
# self.tts.say('bye')
loop = False
elif txt:
self.logger.debug('response' + txt)
self.tts.say(txt)
self.tts.say('Good bye ...')
except KeyboardInterrupt:
print('{} exiting'.format(__name__))
raise
if __name__ == '__main__':
t = SphinxServer()
t.run()
| 27.296875 | 106 | 0.68403 |
4a1a9ae27be80cada7cfcf91702da5726f58f2ad
| 1,445 |
py
|
Python
|
extensions/jisho.py
|
TuxedoDiscord/TuxedoBot
|
77536b34c6778f3c97353c777cca5cd325bc16d3
|
[
"MIT"
] | 5 |
2017-11-23T06:39:14.000Z
|
2018-02-05T16:03:26.000Z
|
extensions/jisho.py
|
TuxedoDiscord/TuxedoBot
|
77536b34c6778f3c97353c777cca5cd325bc16d3
|
[
"MIT"
] | 11 |
2018-02-09T18:46:15.000Z
|
2018-04-12T19:05:11.000Z
|
extensions/jisho.py
|
TuxedoDiscord/TuxedoBot
|
77536b34c6778f3c97353c777cca5cd325bc16d3
|
[
"MIT"
] | 7 |
2017-11-21T20:58:26.000Z
|
2018-02-05T14:50:52.000Z
|
#!/usr/bin/env python3
"""jisho.org query command."""
import urllib.parse
import requests
import discord
from discord.ext import commands
BASE_URL = "http://jisho.org/api/v1/search/words"
class Jisho:
"""A Japanese translation command."""
@commands.command(aliases=["jp"])
async def jisho(self, ctx, query):
"""Translate a string into Japanese"""
with requests.get(BASE_URL, params={"keyword": query}) as response:
data = response.json()
if not data["data"]:
await ctx.send("No matching result found on jisho.org.")
jap = data["data"][0]["japanese"][0]
senses = data["data"][0]["senses"][0]
defs = ", ".join(senses["english_definitions"])
tags = senses["tags"]
embed = discord.Embed(color=discord.Color.blurple())
embed.add_field(name="Kanji", value=str(jap.get("word")))
embed.add_field(name="Kana", value=str(
jap.get("reading")), inline=False)
embed.add_field(name="English", value=defs, inline=False)
embed.add_field(name="Tags", value=tags)
embed.set_thumbnail(
url="https://www.tofugu.com/images/learn-japanese/jisho-org-9a549ffd.jpg")
embed.set_footer(text="Powered by Jisho.org")
await ctx.send(embed=embed)
def setup(bot):
"""Set up the extension."""
bot.add_cog(Jisho())
| 31.413043 | 90 | 0.595156 |
4a1a9bac9d341358010e679e800f98f1451c2093
| 26,510 |
py
|
Python
|
pysolar/util.py
|
matthistuff/Decaying-Shelters-Rhino
|
38f5669f34da886bf4740f7fcaa9383872a1bf5b
|
[
"MIT"
] | null | null | null |
pysolar/util.py
|
matthistuff/Decaying-Shelters-Rhino
|
38f5669f34da886bf4740f7fcaa9383872a1bf5b
|
[
"MIT"
] | 1 |
2019-04-23T02:25:54.000Z
|
2019-04-23T12:41:50.000Z
|
pysolar/util.py
|
matthistuff/Decaying-Shelters-Rhino
|
38f5669f34da886bf4740f7fcaa9383872a1bf5b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009-2010 Brandon Stafford
#
# This file is part of Pysolar.
#
# Pysolar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Pysolar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Pysolar. If not, see <http://www.gnu.org/licenses/>.
"""Additional support functions for solar geometry, astronomy, radiation correlation
:Original author: Simeon Nwaogaidu
:Contact: SimeonObinna.Nwaogaidu AT lahmeyer DOT de
:Additional author: Holger Zebner
:Contact: holger.zebner AT lahmeyer DOT de
:Additional author: Brandon Stafford
"""
from datetime import datetime as dt
import math
import pytz
from pytz import all_timezones
import solar
# Some default constants
AM_default = 2.0 # Default air mass is 2.0
TL_default = 1.0 # Default Linke turbidity factor is 1.0
SC_default = 1367.0 # Solar constant in W/m^2 is 1367.0. Note that this value could vary by +/-4 W/m^2
TY_default = 365 # Total year number from 1 to 365 days
elevation_default = 0.0 # Default elevation is 0.0
# Useful equations for analysis
def GetSunriseSunset(latitude_deg, longitude_deg, utc_datetime, timezone):
"""This function calculates the astronomical sunrise and sunset times in local time.
WARNING: THIS FUNCTION IS BROKEN. It relies on an unknown library called conversions_time, and another library called decimaldegrees that does not appear to have an active maintainer. TODO: Fix this function so it works without creating unnecessary dependencies.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting
the north/south angular location of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location
in an east-west direction,relative to the Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
timezone : float
timezone as numerical value: GMT offset in hours. A time zone is a region of
the earth that has uniform standard time, usually referred to as the local time.
Returns
-------
sunrise_time_dt : datetime.datetime
Sunrise time in local time as datetime_obj.
sunset_time_dt : datetime.datetime
Sunset time in local time as datetime_obj.
References
----------
.. [1] http://www.skypowerinternational.com/pdf/Radiation/7.1415.01.121_cm121_bed-anleitung_engl.pdf
.. [2] http://pysolar.org/
Examples
--------
>>> gmt_offset = 1
>>> lat = 50.111512
>>> lon = 8.680506
>>> timezone_local = 'Europe/Berlin'
>>> utct = dt.datetime.utcnow()
>>> sr, ss = sb.GetSunriseSunset(lat, lon, utct, gmt_offset)
>>> print 'sunrise: ', sr
>>> print 'sunset:', ss
"""
# Day of the year
day = solar.GetDayOfYear(utc_datetime)
# Solar hour angle
SHA = ((timezone) * 15.0 - longitude_deg)
# Time adjustment
TT = (279.134 + 0.985647 * day) * math.pi / 180
# Time adjustment in hours
time_adst = ((5.0323 - 100.976 * math.sin(TT) + 595.275 * math.sin(2 * TT) +
3.6858 * math.sin(3 * TT) - 12.47 * math.sin(4 * TT) - 430.847 * math.cos(TT) +
12.5024 * math.cos(2 * TT) + 18.25 * math.cos(3 * TT)) / 3600)
# Time of noon
TON = (12 + (SHA / 15.0) - time_adst)
sunn = (math.pi / 2 - (23.45 * math.pi / 180) * math.tan(latitude_deg * math.pi / 180) *
math.cos(2 * math.pi * day / 365.25)) * (180 / (math.pi * 15))
# Sunrise_time in hours
sunrise_time = (TON - sunn + time_adst)
# Sunset_time in hours
sunset_time = (TON + sunn - time_adst)
sunrise_time_dt = date_with_decimal_hour(utc_datetime, sunrise_time)
sunset_time_dt = date_with_decimal_hour(utc_datetime, sunset_time)
return sunrise_time_dt, sunset_time_dt
def GetSunriseTime(latitude_deg, longitude_deg, utc_datetime, timezone):
"Wrapper for GetSunriseSunset that returns just the sunrise time"
sr, ss = GetSunriseSunset(latitude_deg, longitude_deg, utc_datetime, timezone)
return sr
def GetSunsetTime(latitude_deg, longitude_deg, utc_datetime, timezone):
"Wrapper for GetSunriseSunset that returns just the sunset time"
sr, ss = GetSunriseSunset(latitude_deg, longitude_deg, utc_datetime, timezone)
return ss
def mean_earth_sun_distance(utc_datetime):
"""Mean Earth-Sun distance is the arithmetical mean of the maximum and minimum distances
between a planet (Earth) and the object about which it revolves (Sun). However,
the function is used to calculate the Mean earth sun distance.
Parameters
----------
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
Returns
-------
KD : float
Mean earth sun distance
References
----------
.. [1] http://sunbird.jrc.it/pvgis/solres/solmod3.htm#clear-sky%20radiation
.. [2] R. aguiar and et al, "The ESRA user guidebook, vol. 2. database", models and exploitation software-Solar
radiation models, p.113
"""
return (1 - (0.0335 * math.sin(360 * ((solar.GetDayOfYear(utc_datetime)) - 94)) / (365)))
def extraterrestrial_irrad(utc_datetime, latitude_deg, longitude_deg, SC=SC_default):
"""Equation calculates Extratrestrial radiation. Solar radiation incident outside the earth's
atmosphere is called extraterrestrial radiation. On average the extraterrestrial irradiance
is 1367 Watts/meter2 (W/m2). This value varies by + or - 3 percent as the earth orbits the sun.
The earth's closest approach to the sun occurs around January 4th and it is furthest
from the sun around July 5th.
Parameters
----------
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location
of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative
to the Greenwich meridian.
SC : float
The solar constant is the amount of incoming solar electromagnetic radiation per unit area, measured
on the outer surface of Earth's atmosphere in a plane perpendicular to the rays.It is measured by
satellite to be roughly 1366 watts per square meter (W/m^2)
Returns
-------
EXTR1 : float
Extraterrestrial irradiation
References
----------
.. [1] http://solardat.uoregon.edu/SolarRadiationBasics.html
.. [2] Dr. J. Schumacher and et al,"INSEL LE(Integrated Simulation Environment Language)Block reference",p.68
"""
day = solar.GetDayOfYear(utc_datetime)
ab = math.cos(2 * math.pi * (solar.GetDayOfYear(utc_datetime) - 1.0) / (365.0))
bc = math.sin(2 * math.pi * (solar.GetDayOfYear(utc_datetime) - 1.0) / (365.0))
cd = math.cos(2 * (2 * math.pi * (solar.GetDayOfYear(utc_datetime) - 1.0) / (365.0)))
df = math.sin(2 * (2 * math.pi * (solar.GetDayOfYear(utc_datetime) - 1.0) / (365.0)))
decl = solar.GetDeclination(day)
ha = solar.GetHourAngle(utc_datetime, longitude_deg)
ZA = math.sin(latitude_deg) * math.sin(decl) + math.cos(latitude_deg) * math.cos(decl) * math.cos(ha)
return SC * ZA * (1.00010 + 0.034221 * ab + 0.001280 * bc + 0.000719 * cd + 0.000077 * df)
def declination_degree(utc_datetime, TY=TY_default):
"""The declination of the sun is the angle between Earth's equatorial plane and a line
between the Earth and the sun. It varies between 23.45 degrees and -23.45 degrees,
hitting zero on the equinoxes and peaking on the solstices.
Parameters
----------
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
Returns
-------
DEC : float
The declination of the Sun
References
----------
.. [1] http://pysolar.org/
"""
return 23.45 * math.sin((2 * math.pi / (TY)) * ((solar.GetDayOfYear(utc_datetime)) - 81))
def solarelevation_function_clear(latitude_deg, longitude_deg, utc_datetime, temperature_celsius=25,
pressure_millibars=1013.25, elevation=elevation_default):
"""Equation calculates Solar elevation function for clear sky type.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting
the north/south angular location of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location
in an east-west direction,relative to the Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and cold.
pressure_millibars : float
pressure_millibars
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean
sea level.
Returns
-------
SOLALTC : float
Solar elevation function clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status
and proposed new approaches", energy 30 (2005), pp 1533 - 1549.
"""
altitude = solar.GetAltitude(latitude_deg, longitude_deg, utc_datetime, elevation, temperature_celsius,
pressure_millibars)
return (0.038175 + (1.5458 * (math.sin(altitude))) + ((-0.59980) * (0.5 * (1 - math.cos(2 * (altitude))))))
def solarelevation_function_overcast(latitude_deg, longitude_deg, utc_datetime,
elevation=elevation_default, temperature_celsius=25,
pressure_millibars=1013.25):
""" The function calculates solar elevation function for overcast sky type.
This associated hourly overcast radiation model is based on the estimation of the
overcast sky transmittance with the sun directly overhead combined with the application
of an over sky elavation function to estimate the overcast day global irradiation
value at any solar elevation.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on a
sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and cold.
pressure_millibars : float
pressure_millibars
Returns
-------
SOLALTO : float
Solar elevation function overcast
References
----------
.. [1] Prof. Peter Tregenza,"Solar radiation and daylight models", p.89.
.. [2] Also accessible through Google Books: http://tinyurl.com/5kdbwu
Tariq Muneer, "Solar Radiation and Daylight Models, Second Edition: For the Energy Efficient
Design of Buildings"
"""
altitude = solar.GetAltitude(latitude_deg, longitude_deg, utc_datetime, elevation, temperature_celsius,
pressure_millibars)
return ((-0.0067133) + (0.78600 * (math.sin(altitude)))) + (0.22401 * (0.5 * (1 - math.cos(2 * altitude))))
def diffuse_transmittance(TL=TL_default):
"""Equation calculates the Diffuse_transmittance and the is the Theoretical Diffuse Irradiance on a horizontal
surface when the sun is at the zenith.
Parameters
----------
TL : float
Linke turbidity factor
Returns
-------
DT : float
diffuse_transmittance
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
return ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
def diffuse_underclear(latitude_deg, longitude_deg, utc_datetime, elevation=elevation_default,
temperature_celsius=25, pressure_millibars=1013.25, TL=TL_default):
"""Equation calculates diffuse radiation under clear sky conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on
a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and cold.
pressure_millibars : float
pressure_millibars
TL : float
Linke turbidity factor
Returns
-------
DIFFC : float
Diffuse Irradiation under clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DT = ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
altitude = solar.GetAltitude(latitude_deg, longitude_deg, utc_datetime, elevation, temperature_celsius,
pressure_millibars)
return mean_earth_sun_distance(utc_datetime) * DT * altitude
def diffuse_underovercast(latitude_deg, longitude_deg, utc_datetime, elevation=elevation_default,
temperature_celsius=25, pressure_millibars=1013.25, TL=TL_default):
"""Function calculates the diffuse radiation under overcast conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on a
sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and cold.
pressure_millibars : float
pressure_millibars
TL : float
Linke turbidity factor
Returns
-------
DIFOC : float
Diffuse Irradiation under overcast
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DT = ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
DIFOC = ((mean_earth_sun_distance(utc_datetime)
) * (DT) * (solar.GetAltitude(latitude_deg, longitude_deg, utc_datetime, elevation,
temperature_celsius, pressure_millibars)))
return DIFOC
def direct_underclear(latitude_deg, longitude_deg, utc_datetime,
temperature_celsius=25, pressure_millibars=1013.25, TY=TY_default,
AM=AM_default, TL=TL_default, elevation=elevation_default):
"""Equation calculates direct radiation under clear sky conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a
place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and cold.
pressure_millibars : float
pressure_millibars
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
AM : float
Air mass. An Air Mass is a measure of how far light travels through the Earth's atmosphere. One air mass,
or AM1, is the thickness of the Earth's atmosphere. Air mass zero (AM0) describes solar irradiance in space,
where it is unaffected by the atmosphere. The power density of AM1 light is about 1,000 W/m^2
TL : float
Linke turbidity factor
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean
sea level.
Returns
-------
DIRC : float
Direct Irradiation under clear
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
KD = mean_earth_sun_distance(utc_datetime)
DEC = declination_degree(utc_datetime, TY)
DIRC = (1367 * KD * math.exp(-0.8662 * (AM) * (TL) * (DEC)
) * math.sin(solar.GetAltitude(latitude_deg, longitude_deg,
utc_datetime, elevation,
temperature_celsius, pressure_millibars)))
return DIRC
def global_irradiance_clear(DIRC, DIFFC, latitude_deg, longitude_deg, utc_datetime,
temperature_celsius=25, pressure_millibars=1013.25, TY=TY_default,
AM=AM_default, TL=TL_default, elevation=elevation_default):
"""Equation calculates global irradiance under clear sky conditions.
Parameters
----------
DIRC : float
Direct Irradiation under clear
DIFFC : float
Diffuse Irradiation under clear sky
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place
on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to
the Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and cold.
pressure_millibars : float
pressure_millibars
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the
mean sea level.
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
AM : float
Air mass. An Air Mass is a measure of how far light travels through the Earth's atmosphere. One air mass,
or AM1, is the thickness of the Earth's atmosphere. Air mass zero (AM0) describes solar irradiance in
space, where it is unaffected by the atmosphere. The power density of AM1 light is about 1,000 W/m.
TL : float
Linke turbidity factor
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea
level.
Returns
-------
ghic : float
Global Irradiation under clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DIRC = direct_underclear(latitude_deg, longitude_deg, utc_datetime,
TY, AM, TL, elevation, temperature_celsius=25,
pressure_millibars=1013.25)
DIFFC = diffuse_underclear(latitude_deg, longitude_deg, utc_datetime,
elevation, temperature_celsius=25, pressure_millibars=1013.25)
ghic = (DIRC + DIFFC)
return ghic
def global_irradiance_overcast(latitude_deg, longitude_deg, utc_datetime,
elevation=elevation_default, temperature_celsius=25,
pressure_millibars=1013.25):
"""Calculated Global is used to compare to the Diffuse under overcast conditions.
Under overcast skies, global and diffuse are expected to be equal due to the absence of the beam
component.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a
place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative
to the Greenwich meridian.
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the
mean sea level.
temperature_celsius : float
Temperature is a physical property of a system that underlies the common notions of hot and
cold.
pressure_millibars : float
pressure_millibars
Returns
-------
ghioc : float
Global Irradiation under overcast sky
References
----------
.. [1] S. Younes, R.Claywell and el al, "Quality
control of solar radiation data: present status
and proposed new approaches", energy 30
(2005), pp 1533 - 1549.
"""
ghioc = (572 * (solar.GetAltitude(latitude_deg, longitude_deg, utc_datetime,
elevation, temperature_celsius, pressure_millibars)))
return ghioc
def diffuse_ratio(DIFF_data, ghi_data):
"""Function calculates the Diffuse ratio.
Parameters
----------
DIFF_data : array_like
Diffuse horizontal irradiation data
ghi_data : array_like
global horizontal irradiation data array
Returns
-------
K : float
diffuse_ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
K = DIFF_data / ghi_data
return K
def clear_index(ghi_data, utc_datetime, latitude_deg, longitude_deg):
"""This calculates the clear index ratio.
Parameters
----------
ghi_data : array_like
global horizontal irradiation data array
utc_datetime : date_object
utc_datetime. UTC DateTime is for Universal Time ( i.e. like a GMT+0 )
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place
on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
Returns
-------
KT : float
Clear index ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
EXTR1 = extraterrestrial_irrad(utc_datetime, latitude_deg, longitude_deg)
KT = (ghi_data / EXTR1)
return KT
def date_with_decimal_hour(date_utc, hour_decimal):
"""This converts dates with decimal hour to datetime_hour.
An improved version :mod:`conversions_time`
Parameters
----------
datetime : datetime.datetime
A datetime object is a single object containing all the information from a
date object and a time object.
hour_decimal : datetime.datetime
An hour is a unit of time 60 minutes, or 3,600 seconds in length.
Returns
-------.
datetime_hour : datetime.datetime
datetime_hour
"""
hour_dms = (int(hour_decimal), int((hour_decimal - int(hour_decimal)) * 60), 0,)
datetime_hour = dt(date_utc.year, date_utc.month, date_utc.day,
hour_dms[0], hour_dms[1], int(hour_dms[2]))
return datetime_hour
| 39.626308 | 266 | 0.646247 |
4a1a9c316c33e460b47bee597d85a4509e9b9544
| 3,254 |
py
|
Python
|
dataset.py
|
atkirtland/pyffe
|
f6788a70ed495fb7dd3721ad2f8fc876cd09ca13
|
[
"MIT"
] | null | null | null |
dataset.py
|
atkirtland/pyffe
|
f6788a70ed495fb7dd3721ad2f8fc876cd09ca13
|
[
"MIT"
] | null | null | null |
dataset.py
|
atkirtland/pyffe
|
f6788a70ed495fb7dd3721ad2f8fc876cd09ca13
|
[
"MIT"
] | null | null | null |
import os
from collections import Counter
import numpy as np
import pandas as pd
import pyffe
# DataSubset is DEPRECATED
class DataSubset(object):
def __init__(self, parent, list_file_name):
self.parent = parent
self.list_file = list_file_name
self.list_name = os.path.splitext(self.list_file)[0]
self.list_absolute_path = self.parent.path + "/" + self.list_file
self.count = None
# FIXME Vars for deserialization
# Used for transition from DataSubset to ListFile
self.urls = None
self.labels = None
self.abs_path = None
self._loaded = False
self.get_count()
def __getattr__(self, name):
if hasattr(self.__dict__, name):
return self.__dict__[name]
elif 'parent' in self.__dict__ and hasattr(self.__dict__['parent'], name):
return self.__dict__['parent'].__dict__[name]
raise AttributeError("No attribute called {} is present".format(name))
def get_count(self):
if self.count is not None:
return self.count
p = os.path.join(self.parent.path, self.list_file)
with open(p) as f:
for i, l in enumerate(f):
pass
self.count = i + 1
return self.count
def get_list_full_path(self):
p = os.path.join(self.parent.path, self.list_file)
return p
def get_name(self):
return self.parent.name + '_' + self.list_name
def __str__(self):
return self.get_name()
class Dataset(object):
def __init__(self, dataset_path):
self.path = os.path.abspath(dataset_path)
self.name = os.path.basename(self.path)
self.root_folder = None
config_file = os.path.join(self.path, "config.py")
if os.path.exists(config_file):
with open(config_file) as f:
code = compile(f.read(), config_file, 'exec')
exec(code)
# context = dict()
# execfile(config_file, context)
# execfile(config_file)
# print config
self.__dict__.update(config)
self.load_subsets()
def load_subsets(self):
self.subsets = {
os.path.splitext(list_file)[0]: pyffe.ListFile(self.path + '/' + list_file, self)
for list_file in os.listdir(self.path) if list_file.endswith(".txt")
}
# TOFIX: this code is a hack for binary classification only
def get_details(self):
keys = []
frees = []
busys = []
for k, v in self.subsets.iteritems():
try:
c = Counter(v.get_labels())
frees.append(c[0])
busys.append(c[1])
keys.append(k)
except:
print "Skipping", k, ": not a list file"
return pd.DataFrame(np.array([frees, busys]).T, index=keys, columns=["free", "busy"])
def __getattr__(self, name):
if hasattr(self.__dict__, name):
return self.__dict__[name]
elif 'subsets' in self.__dict__ and name in self.__dict__['subsets']:
return self.__dict__['subsets'][name]
raise AttributeError("No attribute called {} is present".format(name))
| 31.592233 | 93 | 0.588814 |
4a1a9d557c880d2fceee02a71c80cbc939374dcd
| 5,902 |
py
|
Python
|
pybat/cli/commands/get.py
|
lslap/pybat
|
72fcc703c095ab9841e8b13845c1bea780f02904
|
[
"MIT"
] | null | null | null |
pybat/cli/commands/get.py
|
lslap/pybat
|
72fcc703c095ab9841e8b13845c1bea780f02904
|
[
"MIT"
] | null | null | null |
pybat/cli/commands/get.py
|
lslap/pybat
|
72fcc703c095ab9841e8b13845c1bea780f02904
|
[
"MIT"
] | null | null | null |
# Encoding: UTF-8
# Copyright (c) Marnik Bercx, University of Antwerp
# Distributed under the terms of the MIT License
import os
import pdb
from pybat.core import Cathode, DimerNEBAnalysis
from pymatgen import Structure
from pymatgen.io.vasp.outputs import Outcar
from pymatgen.analysis.transition_state import NEBAnalysis
"""
Set of scripts used to extract information from VASP output files for analysis.
"""
__author__ = "Marnik Bercx"
__copyright__ = "Copyright 2018, Marnik Bercx, University of Antwerp"
__version__ = "0.1"
__maintainer__ = "Marnik Bercx"
__email__ = "marnik.bercx@uantwerpen.be"
__date__ = "May 2018"
# Total Energy per Li of metallic lithium
LI_ENERGY = -1.89
def get_structure(directory, write_cif=False):
"""
Construct a .json file with the structure and magnetic moment from the
output of a VASP calculation, i.e. the CONTCAR and OUTCAR file.
Args:
directory (str): Directory in which the geometry optimization
output files (i.e. CONTCAR and OUTCAR) are stored.
write_cif (bool): Flag that indicates whether the structure should
also be written as a .cif file.
"""
directory = os.path.abspath(directory)
structure = Structure.from_file(os.path.join(directory, "CONTCAR"))
out = Outcar(os.path.join(directory, "OUTCAR"))
magmom = [site["tot"] for site in out.magnetization]
# Add the magnetic moments to the Structure
try:
structure.add_site_property("magmom", magmom)
except ValueError:
# If something goes wrong in assigning the magnetic moments,
# give the user a warning and assign magnetic moment zero to all sites.
print("WARNING: Could not assign the magnetic moments found in the "
"OUTCAR file. They may be missing.")
structure.add_site_property("magmom", len(structure.sites) * [0])
structure.to("json", "structure.json")
if write_cif:
structure.to("cif", "structure.cif")
def get_cathode(directory, to_current_dir=False, write_cif=False,
ignore_magmom=False):
"""
Construct a .json file of the updated Cathode from a geometry
optimization, based on the initial_cathode.json file and the output of a
VASP calculation, i.e. the CONTCAR and OUTCAR file. All these files must
be present in the directory.
Args:
directory (str): Directory in which the geometry optimization
calculation was performed. Must contain the initial_cathode.json,
OUTCAR and CONTCAR file.
to_current_dir (bool): Write the output final_cathode files to the
current working directory.
write_cif (bool): Flag that determines whether a .cif file of the
cathode structure is written to the directory.
ignore_magmom (bool): Flag that indicates that the final magnetic
moments of the optimized structure should be ignored. This means
that the magnetic moments of the initial structure will be used.
Returns:
None
"""
directory = os.path.abspath(directory)
cathode = Cathode.from_file(os.path.join(directory,
"initial_cathode.json"))
cathode.update_sites(directory, ignore_magmom=ignore_magmom)
if to_current_dir:
filename = os.path.join(os.getcwd(), "final_cathode")
else:
filename = os.path.join(directory, "final_cathode")
cathode.to("json", filename + ".json")
if write_cif:
cathode.to("cif", filename + ".cif")
def get_barrier(directory, method="pymatgen"):
"""
Plot the migration barrier of a transition in a directory.
Args:
directory (str):
method (str):
Returns:
"""
if method == "pymatgen":
# The pymatgen.analysis.transition_state module has an object that
# allows you to
neb = NEBAnalysis.from_dir(directory, relaxation_dirs=('initial',
'final'))
neb.get_plot().show()
if method == "dimers":
# This method makes some assumptions about the directory structure
# for it to work:
#
# - The image directories are two characters long, and there are no
# other directories which are two characters long.
# - The directory in which the nudged elastic band was performed
# contains the dimer indices, delimited by '_', and with no other
# numbers delimited in such a way present.
if os.path.exists(os.path.join(directory, "neb_data.json")):
neb = DimerNEBAnalysis.from_file(
os.path.join(directory, "neb_data.json")
)
else:
neb = DimerNEBAnalysis.from_dir(directory)
neb.to("json", os.path.join(directory, "neb_data.json"))
neb.setup_spline({"saddle_point": "zero_slope"})
neb.get_plot(label_barrier=False).show()
def get_voltage(directory, calculation="relax", functional=None):
"""
Calculate the voltage of a battery consisting of a cathode specified by the
directory versus a metallic Li anode.
Args:
directory:
calculation:
functional:
"""
raise NotImplementedError
def get_endiff(directory):
"""
Calculate the energy difference for a transition in a directory.
Args:
directory:
Returns:
"""
initial_outcar = Outcar(os.path.join(directory, "initial", "OUTCAR"))
final_outcar = Outcar(os.path.join(directory, "final", "OUTCAR"))
initial_energy = initial_outcar.final_energy
final_energy = final_outcar.final_energy
print("The energy difference is: ", end="")
print(str(final_energy - initial_energy) + " eV")
# SO plagiarism
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
| 32.251366 | 79 | 0.659268 |
4a1a9dd890766579078b4b73f34ba0e798ec9fc2
| 1,735 |
py
|
Python
|
autres/gestion-des-mots-de-passe.py
|
ehbc221/apprenez-a-programmer-en-python
|
16ed3ba8b914b77a576ef18a34702179acb25338
|
[
"MIT"
] | null | null | null |
autres/gestion-des-mots-de-passe.py
|
ehbc221/apprenez-a-programmer-en-python
|
16ed3ba8b914b77a576ef18a34702179acb25338
|
[
"MIT"
] | null | null | null |
autres/gestion-des-mots-de-passe.py
|
ehbc221/apprenez-a-programmer-en-python
|
16ed3ba8b914b77a576ef18a34702179acb25338
|
[
"MIT"
] | null | null | null |
# -*-coding:Utf-8 -*
########################################################
# Réceptionner un mot de passe saisi par l'utilisateur #
########################################################
from getpass import getpass
mot_de_passe = getpass("Tapez votre mot de passe : ")
############################
# Chiffrer un mot de passe #
############################
# Chiffrer un mot de passe
import hashlib
# Choisir un algorithme : algorithms_available ou hashlib.algorithms_guaranteed. Exemple : hashlib.algorithms_guaranteed
import hashlib
hashlib.algorithms_guaranteed
{'sha1', 'sha224', 'sha384', 'sha256', 'sha512', 'md5'}
# Utilisation d'un algorithme : SHA1 (chaîne-de-bytes) => utiliser b minuscule avant l'ouverture de votre chaîne
mot_de_passe = hashlib.sha1(b"mot de passe")
# Obtenir le chiffrement associé à cet objet : digest (renvoie un type bytes contenant notre mot de passe chiffré) et hexdigest (renvoie une chaîne str contenant une suite de symboles hexadécimaux => de 0 à 9 et de A à F)
mot_de_passe.hexdigest()
'b47ea832576a75814e13351dcc97eaa985b9c6b7'
########################################
# Test de vérification de mot de passe #
########################################
import hashlib
from getpass import getpass
chaine_mot_de_passe = b"azerty"
mot_de_passe_chiffre = hashlib.sha1(chaine_mot_de_passe).hexdigest()
verrouille = True
while verrouille:
entre = getpass("Tapez le mot de passe : ") # azerty
# On encode la saisie pour avoir un type bytes
entre = entre.encode()
entre_chiffre = hashlib.sha1(entre).hexdigest()
if entre_chiffre == mot_de_passe_chiffre:
verrouille = False
else:
print("Mot de passe incorrect")
print("Mot de passe accepté...")
| 34.019608 | 221 | 0.637464 |
4a1a9f173226a6f677f68c6553ed2c29e67eb448
| 342 |
py
|
Python
|
src/main.py
|
codestrange/cool-compiler-2020
|
30508965d75a1a1d1362d0b51bef8da3978fd0c2
|
[
"MIT"
] | 3 |
2020-01-14T04:47:32.000Z
|
2020-09-10T17:57:20.000Z
|
src/main.py
|
codestrange/cool-compiler-2020
|
30508965d75a1a1d1362d0b51bef8da3978fd0c2
|
[
"MIT"
] | 5 |
2020-01-14T06:06:35.000Z
|
2020-02-19T01:01:33.000Z
|
src/main.py
|
codestrange/cool-compiler-2020
|
30508965d75a1a1d1362d0b51bef8da3978fd0c2
|
[
"MIT"
] | 3 |
2020-01-14T04:58:24.000Z
|
2020-01-14T16:23:41.000Z
|
from os import system
from sys import argv
INPUT_FILE = argv[1]
OUTPUT_FILE = f"{INPUT_FILE[0: -2]}mips"
print("CodeStrange Cool Compiler v0.1")
print(
"Copyright (c) 2020: "
+ "Carlos Bermudez Porto, "
+ "Leynier Gutiérrez González, "
+ "Tony Raúl Blanco Fernández"
)
system(f"python coolc.py {INPUT_FILE} {OUTPUT_FILE}")
| 21.375 | 53 | 0.687135 |
4a1aa0f55bb3dcf4ddcef486719549b7ebb8e53d
| 1,025 |
py
|
Python
|
Archive/appendix/Atari/baseline-QR-DQN/utils.py
|
uncharted-technologies/risk-and-uncertainty
|
d6bc518ebd3a661d3de6f298588bec5cc4c96e96
|
[
"MIT"
] | 19 |
2019-05-28T14:30:23.000Z
|
2022-03-31T03:14:31.000Z
|
Archive/appendix/Atari/baseline-QR-DQN/utils.py
|
uncharted-technologies/risk-and-uncertainty
|
d6bc518ebd3a661d3de6f298588bec5cc4c96e96
|
[
"MIT"
] | 4 |
2021-06-08T20:30:58.000Z
|
2022-03-12T00:02:29.000Z
|
Archive/appendix/Atari/baseline-QR-DQN/utils.py
|
uncharted-technologies/risk-and-uncertainty
|
d6bc518ebd3a661d3de6f298588bec5cc4c96e96
|
[
"MIT"
] | 3 |
2019-07-20T14:40:03.000Z
|
2021-02-26T04:09:03.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
import numpy as np
import torch
def set_global_seed(seed, env):
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
random.seed(seed)
def quantile_huber_loss(x,y, device, kappa=1):
batch_size = x.shape[0]
num_quant = x.shape[1]
#Get x and y to repeat here
x = x.unsqueeze(2).repeat(1,1,num_quant)
y = y.unsqueeze(2).repeat(1,1,num_quant).transpose(1,2)
tau_hat = torch.linspace(0.0, 1.0 - 1. / num_quant, num_quant) + 0.5 / num_quant
tau_hat = tau_hat.to(device)
tau_hat = tau_hat.unsqueeze(0).unsqueeze(2).repeat(batch_size, 1,num_quant)
diff = y-x
if kappa == 0:
huber_loss = diff.abs()
else:
huber_loss = 0.5 * diff.abs().clamp(min=0.0, max=kappa).pow(2)
huber_loss += kappa * (diff.abs() - diff.abs().clamp(min=0.0, max=kappa))
quantile_loss = (tau_hat - (diff < 0).float()).abs() * huber_loss
return quantile_loss.mean(2).mean(0).sum()
| 24.404762 | 84 | 0.626341 |
4a1aa14630751a073bb1bf18454d0489502307b9
| 2,017 |
py
|
Python
|
AutomatedSpearPhisher/CommonFrame.py
|
tejarrpaladagu/phisher
|
ddfcc10820d6c995a44e7d7c302b5df860140480
|
[
"MIT"
] | 3 |
2021-06-28T19:37:04.000Z
|
2022-03-18T23:27:53.000Z
|
AutomatedSpearPhisher/CommonFrame.py
|
tejarrpaladagu/phisher
|
ddfcc10820d6c995a44e7d7c302b5df860140480
|
[
"MIT"
] | null | null | null |
AutomatedSpearPhisher/CommonFrame.py
|
tejarrpaladagu/phisher
|
ddfcc10820d6c995a44e7d7c302b5df860140480
|
[
"MIT"
] | 4 |
2021-04-22T17:50:20.000Z
|
2021-08-13T01:57:38.000Z
|
from tkinter import Frame, PhotoImage, Label
from warnings import warn
from time import strftime
# Common Frame with header and footer
class CommonFrame(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, bg='#0077e6')
self.createHeading()
self.createFooter()
self.tick ()
#common heading
def createHeading(self):
heading_label = Label(self, text='Spear Phishing Tool', font=('orbitron', 45,'bold'), fg='white', bg='#0077e6')
heading_label.pack(pady=25)
def createFooter(self):
#bottom frame for time and python logo
bottom_frame = Frame(self,relief='raised', borderwidth=3)
bottom_frame.pack(fill='x',side='bottom')
#python develop sentence
python_dev_label = Label(bottom_frame, text='Developed with: ', font=('orbitron', 12,'bold'))
python_dev_label.place(relx=0)
#python symbol
python_image = PhotoImage (file='images/python.png')
python_label = Label(bottom_frame, image=python_image)
python_label.place(relx=0.11)
python_label.image = python_image
self.time_label = Label(bottom_frame,font=('orbitron-Bold',12))
self.time_label.pack (side='right')
#time bar at the bottom
def tick(self):
current_time = strftime('%I:%M %p').lstrip('0').replace(' 0',' ')
self.time_label.config(text=current_time)
self.time_label.after(200,self.tick)
#frame for buttons
def createButtonFrame(self):
self.button_frame = Frame(self,bg='#80c1ff')
self.button_frame.pack(fill='both', expand=True)
# make sure button frame exists
def getButtonFrame(self):
try:
self.button_frame
except NameError:
self.createButtonFrame()
warn('WARNING: Main button frame did not exist... Manually creating button frame')
return self.button_frame
def changePages(self, page_name: str):
self.controller.show_frame(page_name)
| 35.385965 | 119 | 0.653941 |
4a1aa29b784174e35a2142b053855a39049f072e
| 2,665 |
py
|
Python
|
wolframclient/serializers/encoders/datetime.py
|
WolframResearch/WolframClientForPython
|
27cffef560eea8d16c02fe4086f42363604284b6
|
[
"MIT"
] | 358 |
2018-10-18T13:39:48.000Z
|
2022-03-26T09:42:53.000Z
|
wolframclient/serializers/encoders/datetime.py
|
WolframResearch/WolframClientForPython
|
27cffef560eea8d16c02fe4086f42363604284b6
|
[
"MIT"
] | 29 |
2018-10-20T09:04:12.000Z
|
2022-03-06T18:36:19.000Z
|
wolframclient/serializers/encoders/datetime.py
|
LaudateCorpus1/WolframClientForPython
|
26f7fa3d81691ba2a63d3eadcd9734b261130b7c
|
[
"MIT"
] | 38 |
2018-10-19T21:52:14.000Z
|
2021-11-21T13:07:04.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
import datetime
from wolframclient.utils.dispatch import Dispatch
encoder = Dispatch()
@encoder.dispatch(datetime.datetime)
def encode_datetime(serializer, o):
return serializer.serialize_function(
serializer.serialize_symbol(b"DateObject"),
(
serializer.serialize_iterable(
(
serializer.serialize_int(o.year),
serializer.serialize_int(o.month),
serializer.serialize_int(o.day),
serializer.serialize_int(o.hour),
serializer.serialize_int(o.minute),
serializer.serialize_float(o.second + o.microsecond / 1000000.0),
)
),
serializer.serialize_string("Instant"),
serializer.serialize_string("Gregorian"),
serializer.serialize_tzinfo(o.tzinfo, o),
),
)
@encoder.dispatch(datetime.tzinfo)
def encode_tzinfo(serializer, o):
return serializer.serialize_tzinfo(o)
@encoder.dispatch(datetime.timedelta)
def encode_timedelta(serializer, o):
return serializer.serialize_function(
serializer.serialize_symbol(b"Quantity"),
(
serializer.serialize_float(o.total_seconds()),
serializer.serialize_string("Seconds"),
),
)
@encoder.dispatch(datetime.date)
def encode_date(serializer, o):
return serializer.serialize_function(
serializer.serialize_symbol(b"DateObject"),
(
serializer.serialize_iterable(
(
serializer.serialize_int(o.year),
serializer.serialize_int(o.month),
serializer.serialize_int(o.day),
)
),
serializer.serialize_string("Day"),
serializer.serialize_string("Gregorian"),
serializer.serialize_symbol(b"None"),
),
)
@encoder.dispatch(datetime.time)
def encode_time(serializer, o):
inner = [
serializer.serialize_iterable(
(
serializer.serialize_int(o.hour),
serializer.serialize_int(o.minute),
serializer.serialize_float(o.second + o.microsecond / 1000000.0),
)
)
]
if o.tzinfo:
inner.append(
serializer.serialize_rule(
serializer.serialize_symbol(b"TimeZone"),
serializer.serialize_tzinfo(o.tzinfo, o, name_match=None),
)
)
return serializer.serialize_function(serializer.serialize_symbol(b"TimeObject"), inner)
| 29.94382 | 91 | 0.605253 |
4a1aa2d9a44249a405091a6c5891bcf0418bb906
| 6,275 |
py
|
Python
|
src/hackeme/frontend/hackeme_parser.py
|
ThomasBollmeier/hackeme-native
|
1bd9eac3eb057661045bcc1a612f8fc704f6d809
|
[
"Apache-2.0"
] | null | null | null |
src/hackeme/frontend/hackeme_parser.py
|
ThomasBollmeier/hackeme-native
|
1bd9eac3eb057661045bcc1a612f8fc704f6d809
|
[
"Apache-2.0"
] | null | null | null |
src/hackeme/frontend/hackeme_parser.py
|
ThomasBollmeier/hackeme-native
|
1bd9eac3eb057661045bcc1a612f8fc704f6d809
|
[
"Apache-2.0"
] | null | null | null |
from .hackeme_base_parser import HackemeBaseParser
from komparse import Ast
class HackemeParser(HackemeBaseParser):
def __init__(self):
HackemeBaseParser.__init__(self)
self._init_transformations()
def parse(self, source):
ast = HackemeBaseParser.parse(self, source)
if ast:
arity_grouping = _ArityGrouping()
ast.walk(arity_grouping)
return arity_grouping.get_grouped_ast()
else:
return None
def _init_transformations(self):
g = self._grammar
g.set_ast_transform('start', self._start)
g.set_ast_transform('definition', lambda ast: ast.get_children()[0])
g.set_ast_transform('vardef', self._vardef)
g.set_ast_transform('fundef', self._fundef)
g.set_ast_transform('expr', lambda ast: ast.get_children()[0])
g.set_ast_transform('no_list', lambda ast: ast.get_children()[0])
g.set_ast_transform('if_expr', self._if_expr)
g.set_ast_transform('cond_expr', self._cond_expr)
g.set_ast_transform('cond_branch', self._cond_branch)
g.set_ast_transform('call', self._call)
g.set_ast_transform('operator', self._operator)
g.set_ast_transform('boolean', self._boolean)
g.set_ast_transform('list', self._list)
g.set_ast_transform('list_item', self._list_item)
# AST transformations:
@staticmethod
def _start(ast):
ret = Ast('hackeme')
for child in ast.get_children():
child.id = ''
ret.add_child(child)
return ret
@staticmethod
def _vardef(ast):
ret = Ast('vardef')
name_node = ast.find_children_by_id('name')[0]
ret.set_attr('name', name_node.value)
ret.add_children_by_id(ast, 'value')
return ret
@staticmethod
def _fundef(ast):
ret = Ast('fundef')
name_node = ast.find_children_by_id('name')[0]
ret.set_attr('name', name_node.value)
params = Ast('parameters')
ret.add_child(params)
param_nodes = ast.find_children_by_id('param')
for param_node in param_nodes:
params.add_child(Ast('parameter', param_node.value))
vararg = ast.find_children_by_id('vararg')
if vararg:
vararg = vararg[0]
params.add_child(Ast('var', vararg.value[:-1]))
localdefs = Ast('localdefs')
ret.add_child(localdefs)
localdefs.add_children_by_id(ast, 'localdef')
body = Ast('body')
ret.add_child(body)
body.add_children_by_id(ast, 'body')
return ret
@staticmethod
def _if_expr(ast):
ret = Ast('if_expr')
test = Ast('test')
ret.add_child(test)
test.add_children_by_id(ast, 'test')
consequent = Ast('consequent')
ret.add_child(consequent)
consequent.add_children_by_id(ast, 'consequent')
alternate = Ast('alternate')
ret.add_child(alternate)
alternate.add_children_by_id(ast, 'alternate')
return ret
@staticmethod
def _cond_expr(ast):
ret = Ast('cond')
ret.add_children_by_id(ast, 'branch')
return ret
@staticmethod
def _cond_branch(ast):
ret = Ast('branch')
test = Ast('test')
ret.add_child(test)
test.add_children_by_id(ast, 'test')
consequent = Ast('consequent')
ret.add_child(consequent)
consequent.add_children_by_id(ast, 'consequent')
return ret
@staticmethod
def _call(ast):
ret = Ast('call')
callee = Ast('callee')
ret.add_child(callee)
callee.add_children_by_id(ast, 'callee')
args = Ast('arguments')
ret.add_child(args)
args.add_children_by_id(ast, 'arg')
return ret
@staticmethod
def _operator(ast):
ret = Ast('operator')
op = ast.get_children()[0].value
ret.set_attr('value', op)
return ret
@staticmethod
def _boolean(ast):
child = ast.get_children()[0]
if child.value == '#t' or child.value == '#true':
return Ast('TRUE')
else:
return Ast('FALSE')
@staticmethod
def _list(ast):
ret = Ast('list')
ret.add_children_by_id(ast, 'li')
return ret
@staticmethod
def _list_item(ast):
children = ast.find_children_by_id('single')
if children:
ret = children[0]
ret.id = ''
return ret
else:
ret = Ast('list')
ret.add_children_by_id(ast, 'li')
return ret
class _ArityGrouping(object):
"""
Group arities into function definition node
"""
def __init__(self):
self._ast = None
self._node_stack = []
self._func_stack = []
def get_grouped_ast(self):
return self._ast
def enter_node(self, node):
if node.has_attr('root'):
self._ast = node.copy()
self._node_stack.append(self._ast)
self._func_stack = [{}]
elif node.name == 'fundef':
arity = Ast("arity")
func_name = node.get_attr('name')
funcs = self._func_stack[-1]
if func_name not in funcs:
func_node = node.copy()
funcs[func_name] = func_node
self._add_to_parent(func_node)
else:
func_node = funcs[func_name]
func_node.add_child(arity)
self._node_stack.append(arity)
self._func_stack.append({})
else:
self._node_stack.append(node.copy())
def exit_node(self, node):
child = self._node_stack.pop()
if node.name != "fundef":
self._add_to_parent(child)
else:
self._func_stack.pop()
def visit_node(self, node):
self._add_to_parent(node.copy())
def _add_to_parent(self, node):
if self._node_stack:
parent = self._node_stack[-1]
parent.add_child(node)
| 30.31401 | 76 | 0.567649 |
4a1aa32d3c14b1be6a9b5623dbfba181e538efc0
| 2,367 |
py
|
Python
|
doc/scripts/docgen.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 2,045 |
2015-01-01T14:07:52.000Z
|
2022-03-08T08:56:41.000Z
|
doc/scripts/docgen.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 305 |
2015-01-02T13:18:24.000Z
|
2021-08-20T18:03:28.000Z
|
doc/scripts/docgen.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 976 |
2015-01-01T17:08:51.000Z
|
2022-03-25T19:53:17.000Z
|
from __future__ import print_function
from collections import defaultdict
import inspect
import getopt
import os
import shutil
import sys
if __name__ == '__main__':
throot = "/".join(sys.path[0].split("/")[:-2])
options = defaultdict(bool)
options.update(dict([x, y or True] for x, y in getopt.getopt(sys.argv[1:], 'o:', ['epydoc', 'rst', 'help', 'nopdf', 'test'])[0]))
if options['--help']:
print('Usage: %s [OPTIONS]' % sys.argv[0])
print(' -o <dir>: output the html files in the specified dir')
print(' --rst: only compile the doc (requires sphinx)')
print(' --nopdf: do not produce a PDF file from the doc, only HTML')
print(' --help: this help')
# --test: build the docs with warnings=errors to test them (exclusive)
sys.exit(0)
options['--all'] = not options['--rst']
def mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
outdir = options['-o'] or (throot + '/html')
mkdir(outdir)
os.chdir(outdir)
mkdir("doc")
# Make sure the appropriate 'theano' directory is in the PYTHONPATH
pythonpath = os.environ.get('PYTHONPATH', '')
pythonpath = throot + ':' + pythonpath
os.environ['PYTHONPATH'] = pythonpath
if options['--test']:
import sphinx
sys.path[0:0] = [os.path.join(throot, 'doc')]
out = sphinx.main(['', '-b' 'text', '-W',
'-E', os.path.join(throot, 'doc'), '.'])
sys.exit(out)
elif options['--all'] or options['--rst']:
import sphinx
sys.path[0:0] = [os.path.join(throot, 'doc')]
sphinx.main(['', '-E', os.path.join(throot, 'doc'), '.'])
if not options['--nopdf']:
# Generate latex file in a temp directory
import tempfile
workdir = tempfile.mkdtemp()
sphinx.main(['', '-E', '-b', 'latex',
os.path.join(throot, 'doc'), workdir])
# Compile to PDF
os.chdir(workdir)
os.system('make')
try:
shutil.copy(os.path.join(workdir, 'pylearn2.pdf'), outdir)
os.chdir(outdir)
shutil.rmtree(workdir)
except OSError as e:
print('OSError:', e)
except IOError as e:
print('IOError:', e)
| 32.424658 | 133 | 0.538657 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.