repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/rest_framework/serializers.py
|
Python
|
agpl-3.0
| 41,575 | 0.001034 |
"""
Serializers and ModelSerializers are similar to Forms and ModelForms.
Unlike forms, they are not constrained to dealing with HTML output, and
form encoded input.
Serialization in REST framework is a two-phase process:
1. Serializers marshal between complex types like model instances, and
python primitives.
2. The process of marshalling between python primitives and request and
response content is handled by parsers and renderers.
"""
from __future__ import unicode_literals
import copy
import datetime
import inspect
import types
from decimal import Decimal
from django.contrib.contenttypes.generic import GenericForeignKey
from django.core.paginator import Page
from django.db import models
from django.forms import widgets
from django.utils.datastructures import SortedDict
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.compat import get_concrete_model, six
from rest_framework.settings import api_settings
# Note: We do the following so that users of the framework can use this style:
#
# example_field = serializers.CharField(...)
#
# This helps keep the separation between model fields, form fields, and
# serializer fields more explicit.
from rest_framework.relations import * # NOQA
from rest_framework.fields import * # NOQA
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situtations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
return models.get_model(app_name, model_name)
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
else:
raise ValueError("{0} is not a Django model".format(obj))
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
class RelationsList(list):
_deleted = []
class NestedValidationError(ValidationError):
"""
The default ValidationError behavior is to stringify each item in the list
if the messages are a list of error messages.
In the case of nested serializers, where the parent has many children,
then the child's `serializer.errors` will be a list of dicts. In the case
of a single child, the `serializer.errors` will be a dict.
We need to override the default behavior to get properly nested error dicts.
"""
def __init__(self, message):
if isinstance(message, dict):
self._messages = [message]
else:
self._messages = message
@property
def messages(self):
return self._messages
class DictWithMetadata(dict):
"""
A dict-like object, that can have additional properties attached.
"""
def __getstate__(self):
"""
Used by pickle (e.g., caching).
Overridden to remove the metadata from the dict, since it shouldn't be
pickled and may in some instances be unpickleable.
"""
return dict(self)
class SortedDictWithMetadata(SortedDict):
"""
A sorted dict-like object, that can have additional properties attached.
"""
def __getstate__(self):
"""
Used by pickle (e.g., caching).
Overriden to remove the metadata from the dict, since it shouldn't be
pickle and may in some instances be unpickleable.
"""
return SortedDict(self).__dict__
def _is_protected_type(obj):
"""
True if the object is a native datatype that does not need to
be serialized further.
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal,
basestring)
)
def _get_declared_fields(bases, attrs):
"""
Create a list of serializer field instances from the passed in 'attrs',
plus any fields on the base classes (in 'bases').
Note that all fields from the base classes are used.
"""
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(six.iteritems(attrs))
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Serializer, add that Serializer's
# fields. Note that we loop over the bases in *reverse*. This is necessary
# in order to maintain the correct order of fields.
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = list(base.base_fields.items()) + fields
return SortedDict(fields)
class SerializerMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = _get_declared_fields(bases, attrs)
return super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class SerializerOptions(object):
"""
Meta class options for Serializer
"""
def __init__(self, meta):
self.depth = getattr(meta, 'depth', 0)
self.fields = getattr(meta, 'fields', ())
self.exclude = getattr(meta, 'exclude', ())
class BaseSerializer(WritableField):
"""
This is the Serializer implementation.
We need to implement it as `BaseSerializer` due to metaclass magicks.
"""
class Meta(object):
pass
_options_class = SerializerOptions
_dict_class = SortedDictWithMetadata
def __init__(self, instance=None, data=None, files=None,
context=None, partial=False, many=None,
allow_add_remove=False, **kwargs):
super(BaseSerializer, self).__init__(**kwargs)
self.opts = self._options_class(self.Meta)
self.parent = None
self.root = None
self.partial = partial
self.many = many
self.allow_add_remove = allow_add_remove
self.context = context or {}
self.init_data = data
self.init_files = files
self.object = instance
self.fields = self.get_fields()
self._data = None
self._files = None
self._errors = None
if many and instance is not None and not hasattr(instance, '__iter__'):
raise ValueError('instance should be a queryset or other iterable with many=True')
if allow_add_remove and not many:
raise ValueError('allow_add_remove should only be used for bulk updates, but you have not set many=True')
#####
# Methods to determine which fields to use when (de)serializing objects.
def get_default_fields(self):
"""
Return the complete set of default fields for the object, as a dict.
"""
return {}
def get_fields(self):
"""
Returns the complete set of fields for the object as a dict.
This will be the set of any explicitly declared fields,
plus the set of fields returned by get_default_fields().
"""
ret = SortedDict()
# Get the explicitly declared fields
base_fields = copy.deepcopy(self.base_fields)
for key, field in base_fields.items():
ret[key] = field
# Add in the default fields
default_fields = self.get_default_fields()
for key, val in default_fields.ite
|
ms():
if key not in ret:
|
ret[key] = val
# If 'fields' is specified, use those fields, in that order.
if self.opts.fields:
assert isinstance(self.opts.fields, (list, tuple)), '`fields` must be a list or tuple'
new = SortedDict()
for key in self.opts.fields:
new[key] = ret[key]
ret = new
# Remove anything in 'exclude'
if self.opts.exclude:
assert isinstance(self.opts.exclude, (list, tuple)), '`exclude` must be a list or tuple'
for key in self.opts.exclude:
ret.pop(key, None)
for key,
|
Mark-E-Hamilton/tappy
|
tap/tests/testcase.py
|
Python
|
bsd-2-clause
| 258 | 0 |
# Copyri
|
ght (c) 2016, Matt Layman
import unittest
from tap.tests.factory import Factory
class TestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.factory = Factor
|
y()
|
tsu-iscd/lyapas-lcc
|
lyapas_to_json.py
|
Python
|
bsd-3-clause
| 753 | 0.010624 |
#!/usr/bin/env python2.7
import json
import argparse
import codecs
import sys
def main(args):
data = args.in_lyapas.read()
data = json.dumps(data, ensure_ascii=False, encoding='utf-8')
json_data = '{"file": "' + args.in_lyapas.name + '",' + ' "source": ' + data +'}'
args.out_filename.write(json_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Getting json from lyapas sourses')
parser.add_argument('in_lyapas', help='Path in filesystem for input lyapas-file', nargs='?', type=argparse.FileType('r'), defau
|
lt=sys.stdin)
parser.add_argument('-out_filename', help='Name of output file
|
', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
main(args)
|
waterponey/scikit-learn
|
sklearn/linear_model/ridge.py
|
Python
|
bsd-3-clause
| 51,357 | 0.000156 |
"""
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
|
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
|
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically ba
|
phac-nml/irida-miseq-uploader
|
Tests/unitTests/test_directoryscanner.py
|
Python
|
apache-2.0
| 1,282 | 0.00234 |
import unittest
from os import path
from API.directoryscanner import find_runs_in_directory
path_to_module = path.abspath(path.dirname(__file__))
class TestDirectoryScanner(unittest.TestCase):
def test_sample_names_spaces(self):
runs = find_r
|
uns_in_directory(path.join(path_to_module, "sample-names-with-spaces"))
self.assertEqual(1, len(runs))
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertEqual(sample.get_id(), sample.get_id().strip())
def test_single_end(self):
runs = find_runs_in_director
|
y(path.join(path_to_module, "single_end"))
self.assertEqual(1, len(runs))
self.assertEqual("SINGLE_END", runs[0].metadata["layoutType"])
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertFalse(sample.is_paired_end())
def test_completed_upload(self):
runs = find_runs_in_directory(path.join(path_to_module, "completed"))
self.assertEqual(0, len(runs))
def test_find_sample_sheet_name_variations(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-sheet-name-variations"))
self.assertEqual(1, len(runs))
|
1065865483/0python_script
|
four/Webdriver/screenshot.py
|
Python
|
mit
| 405 | 0.008264 |
from selenium import webdriver
f
|
rom time import sleep
driver=webdriver.Firefox()
#打开我要自学网页面并截图
driver.get("http://www.51zxw.net/")
driver.get_screenshot_as_file(r'E:\0python_script\four\Webdriver\zxw.jpg')
sleep(2)
#打开百度页面并截图
driver.get("http://www.baidu.com")
driver.get_scre
|
enshot_as_file(r'E:\0python_script\four\Webdriver\baidu.png')
sleep(2)
driver.quit()
|
ys-nuem/project-euler
|
003/003.py
|
Python
|
mit
| 603 | 0.006861 |
import random
N = 600851475143
def gcd(a, b):
while b > 0:
a, b = b, a % b
return a
def factorize(N):
" N の素因数分解を求める (
|
Pollard's rho algorithm) "
factors = []
while N >= 2:
d = 1
while d == 1:
x = random.randint(1, N)
y = random.randint(1, N)
d = gcd(abs(x-y), N)
d = int(d)
if d < N:
factors.append(d)
N /= d
elif d == N:
factors.append(d)
break
return factors
factors = list(sorte
|
d(factorize(N)))
print(factors[-1])
|
tensorflow/tensorflow
|
tensorflow/python/kernel_tests/image_ops/decode_jpeg_op_test.py
|
Python
|
apache-2.0
| 7,835 | 0.006254 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeJpegOp."""
import os
import time
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeJpegBenchmark(test.Benchmark):
"""Evaluate tensorflow DecodeJpegOp performance."""
def _evalDecodeJpeg(self,
image_name,
parallelism,
num_iters,
crop_during_decode=None,
crop_window=None,
tile=None):
"""Evaluate DecodeJpegOp for the given image.
TODO(tanmingxing): add decoding+cropping as well.
Args:
image_name: a string of image file name (without suffix).
parallelism: the number of concurrent decode_jpeg ops to be run.
num_iters: number of iterations for evaluation.
crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
separate decode and crop ops. It is ignored if crop_window is None.
crop_window: if not None, crop the decoded image. Depending on
crop_during_decode, cropping could happen during or after decoding.
tile: if not None, tile the image to composite a larger fake image.
Returns:
The duration of the run in seconds.
"""
ops.reset_default_graph()
image_file_path = resource_loader.get_path_to_datafile(
os.path.join('core', 'lib', 'jpeg', 'testdata', image_name))
# resource_loader does not seem to work well under benchmark runners.
# So if the above path is not available, try another way to access the file:
if not os.path.exists(image_file_path):
image_file_path = resource_loader.get_path_to_datafile(
os.path.join(
'..', '..', 'core', 'lib', 'jpeg', 'testdata', image_name))
if tile is None:
image_content = variable_scope.get_variable(
'image_%s' % image_name,
initializer=io_ops.read_file(image_file_path))
else:
single_image = image_ops.decode_jpeg(
io_ops.read_file(image_file_path), channels=3, name='single_image')
# Tile the image to composite a new larger image.
tiled_image = array_ops.tile(single_image, tile)
image_content = variable_scope.get_variable(
'tiled_image_%s' % image_name,
initializer=image_ops.encode_jpeg(tiled_image))
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
images = []
for _ in range(parallelism):
if crop_window is None:
# No crop.
image = image_ops.decode_jpeg(image_content, channels=3)
elif crop_during_decode:
# combined decode and crop.
image = image_ops.decode_and_crop_jpeg(
image_content, crop_window, channels=3)
else:
# separate decode and crop.
image = image_ops.decode_jpeg(image_content, channels=3)
image = image_ops.crop_to_bounding_box(
image,
offset_height=crop_window[0],
offset_width=crop_window[1],
target_height=crop_window[2],
target_width=crop_window[3])
images.append(image)
r = control_flow_ops.group(*images)
for _ in range(3):
# Skip warm up time.
self.evaluate(r)
start_time = time.time()
for _ in range(num_iters):
self.evaluate(r)
end_time = time.time()
return end_time - start_time
def benchmarkDecodeJpegSmall(self):
"""Evaluate single DecodeImageOp for small size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'small.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegMedium(self):
"""Evaluate single DecodeImageOp for medium size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegLarge(self):
"""Evaluate single DecodeImageOp for large size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
tile = [4, 4, 1]
for parallelism in [1, 100]:
# Tile the medium size image to composite a larger fake image.
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_i
|
ters, tile)
duration_decode_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, False, crop_window, tile)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window, tile)
self.report_benchmark(
name='decode_jpeg_large_p%d
|
' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
if __name__ == '__main__':
test.main()
|
mscuthbert/abjad
|
abjad/tools/timespantools/offset_happens_before_timespan_stops.py
|
Python
|
gpl-3.0
| 1,081 | 0.000925 |
# -*- encoding: utf-8 -*-
def offset_happens_before_timespan_stops(
timespan=None,
offset=None,
hold=False,
):
r'''Makes time relation indicating that `offset` happens
|
before `timespan` stops.
::
>>> relation = timespantools.offset_happens_before_timespan_stops()
>>> print(format(relation))
timespantools.OffsetTimespanTimeRelation(
inequality=timespantools.CompoundInequality(
[
timespantools.SimpleInequality('offset < timespan.stop'),
],
|
logical_operator='and',
),
)
Returns time relation or boolean.
'''
from abjad.tools import timespantools
inequality = timespantools.CompoundInequality([
'offset < timespan.stop',
])
time_relation = timespantools.OffsetTimespanTimeRelation(
inequality,
timespan=timespan,
offset=offset)
if time_relation.is_fully_loaded and not hold:
return time_relation()
else:
return time_relation
|
zk33/negi
|
negi/main.py
|
Python
|
mit
| 798 | 0.035088 |
# -*- coding: utf-8 -*-
import aaargh
from app import Negi
app = aaargh.App(description="Jinja2+JSON powered static HTML build tool")
@app.cmd(help='Parse JSON and build HTML')
@app.cmd_arg('-d','--data_dir',default='./data',help='JSON data dirctory(default:./data')
@app.cmd_arg('-t','--tmpl_dir',default='./templates',help='Jinja2 template dirctory(default:./templates')
@app.cmd_arg('-o','--out_dir',default='./dist',help='Output dirctory(default:./dist')
@app.cmd_arg('-v','--verbose',nargs='?',const=True,default=False)
def build(data_dir
|
,tmpl_dir,out_dir,verbose):
builder = Negi(
|
data_dir= data_dir,
tmpl_dir = tmpl_dir,
out_dir = out_dir,
verbose = verbose
)
builder.build()
def main():
app.run()
if __name__ == '__main__':
main()
|
svirt/tp-libvirt
|
libvirt/tests/src/virsh_cmd/host/virsh_nodecpustats.py
|
Python
|
gpl-2.0
| 8,299 | 0.000361 |
import re
from autotest.client.shared import error
from autotest.client import utils
from virttest import virsh
from virttest import utils_libvirtd
def run(test, params, env):
"""
Test the command virsh nodecpustats
(1) Call the virsh nodecpustats command for all cpu host cpus
separately
(2) Get the output
(3) Check the against /proc/stat output(o) for respective cpu
user: o[0] + o[1]
system: o[2] + o[5] + o[6]
idle: o[3]
iowait: o[4]
(4) Call the virsh nodecpustats command with an unexpected option
(5) Call the virsh nodecpustats command with libvirtd service stop
"""
def virsh_check_nodecpustats_percpu(actual_stats):
"""
Check the acual nodecpustats output value
total time <= system uptime
"""
# Normalise to seconds from nano seconds
total = float((actual_stats['system'] + actual_stats['user'] +
actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9))
uptime = float(utils.get_uptime())
if not total <= uptime:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as total time: %f is more"
" than uptime: %f" % (total, uptime))
return True
def virsh_check_nodecpustats(actual_stats, cpu_count):
"""
Check the acual nodecpustats output value
total time <= system uptime
"""
# Normalise to seconds from nano seconds and get for one cpu
total = float(((actual_stats['system'] + actual_stats['user'] +
actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9)) / (
cpu_count))
uptime = float(utils.get_uptime())
if not total <= uptime:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as total time: %f is more"
" than uptime: %f" % (total, uptime))
return True
def virsh_check_nodecpustats_percentage(actual_per):
"""
Check the actual nodecpustats percentage adds up to 100%
"""
total = int(round(actual_per['user'] + actual_per['system'] +
actual_per['idle'] + actual_per['iowait']))
if not total == 100:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as the total percentage value: %d"
" is not equal 100" % total)
def parse_output(output):
"""
To get the output parsed into a dictionary
:param virsh command output
:return: dict of user,system,idle,iowait times
"""
# From the beginning of a line, group 1 is one or more word-characters,
# followed by zero or more whitespace characters and a ':',
# then one or more whitespace characters,
# followed by group 2, which is one or more digit characters,
# e.g as below
# user: 6163690000000
#
regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)")
actual = {}
for line in output.stdout.split('\n'):
match_obj = regex_obj.search(line)
# Due to the extra space in the list
if match_obj is not None:
name = match_obj.group(1)
value = match_obj.group(2)
actual[name] = int(value)
return actual
def parse_percentage_output(output):
"""
To get the output parsed into a dictionary
:param virsh command output
:return: dict of user,system,idle,iowait times
"""
# From the beginning of a line, group 1 is one or more word-characters,
# followed by zero or more whitespace characters and a ':',
# then one or more whitespace characters,
# followed by group 2, which is one or more digit characters,
# e.g as below
# user: 1.5%
#
regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+.\d+)")
actual_percentage = {}
for line in output.stdout.split('\n'):
match_obj = regex_obj.search(line)
# Due to the extra space in the list
if match_obj is not None:
name = match_obj.group(1)
value = match_obj.group(2)
actual_percentage[name] = float(value)
return actual_percentage
# Initialize the variables
itr = int(params.get("inner_test_iterations"))
option = params.get("virsh_cpunodestats_options")
invalid_cpunum = params.get("invalid_cpunum")
status_error = params.get("status_error")
libvirtd = params.get("libvirtd", "on")
# Prepare libvirtd service
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Get the host cpu list
host_cpus_list = utils.cpu_online_map()
# Run test case for 5 iterations default can be changed in subtests.cfg
# file
for i in range(itr):
if status_error == "yes":
if invalid_cpunum == "yes":
option = "--cpu %s" % (len(host_cpus_list) + 1)
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
raise error.TestFail("Command 'virsh nodecpustats' "
"succeeded with libvirtd service "
"stopped, incorrect")
else:
raise error.TestFail("Command 'virsh nodecpustats %s' "
"succeeded (incorrect command)" % option)
elif status_error == "no":
# Run the testcase for each cpu to get the cpu stats
for cpu in host_cpus_list:
option = "--cpu %s" % cpu
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_output(output)
virsh_check_nodecpustats_percpu(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
|
"not succeeded" % option)
# Run the test case for each cpu to get the cpu stats in percentage
for cpu in host_cpus_list:
option = "--cpu %s --percen
|
t" % cpu
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_percentage_output(output)
virsh_check_nodecpustats_percentage(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
option = ''
# Run the test case for total cpus to get the cpus stats
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_output(output)
virsh_check_nodecpustats(actual_value, len(host_cpus_list))
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
# Run the test case for the total cpus to get the stats in
# percentage
option = "--percent"
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_percentage_output(output)
virsh_check_nodecpustats_percentage(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succee
|
ryfeus/lambda-packs
|
pytorch/source/numpy/f2py/__main__.py
|
Python
|
mit
| 134 | 0 |
# See http://cens.ioc.ee/projects/f2py2e/
fr
|
om __future__ imp
|
ort division, print_function
from numpy.f2py.f2py2e import main
main()
|
YPCrumble/django-annoying
|
annoying/tests/models.py
|
Python
|
bsd-3-clause
| 363 | 0 |
from django.db import mode
|
ls
from annoying.fields import AutoOneToOneField
class SuperVillain(models.Model):
name = models.CharField(max_length="20", default="Dr Horrible")
class SuperHero(models.Model):
name = models.CharField(max_length="20", default="Captain Hammer")
mortal_enemy = AutoOneToOneField(SuperVillain, related_name='mort
|
al_enemy')
|
grlee77/nipype
|
nipype/interfaces/semtools/brains/classify.py
|
Python
|
bsd-3-clause
| 2,306 | 0.006071 |
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class BRAINSPosteriorToContinuousClassInputSpec(CommandLineInputSpec):
inputWhiteVolume = File(desc="White Matter Posterior Volume", exists=True, argstr="--inputWhiteVolume %s")
inputBasalGmVolume = File(desc="Basal Grey Matter Posterior Volume", exists=True, argstr="--inputBasalGmVolume %s")
inputSurfaceGmVolume = File(desc="Surface Grey Matter Posterior Volume", exists=True,
|
argstr="--inputSurfaceGmVolume %s")
inputCsfVolume = File(desc="CSF Posterior Volume", exists=True, argstr="--inputCsfVolume %s")
inputVbVolume = File(desc="Venous Blood Posterior Volume", exists=True, argstr="--inputVbVolume %s")
inputCrblGmVolume = File(desc="Cerebellum Grey Matter Posterior Volume", exists=True, argstr="--inputCrblGmVolume %s")
inputCrblWmV
|
olume = File(desc="Cerebellum White Matter Posterior Volume", exists=True, argstr="--inputCrblWmVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output Continuous Tissue Classified Image", argstr="--outputVolume %s")
class BRAINSPosteriorToContinuousClassOutputSpec(TraitedSpec):
outputVolume = File(desc="Output Continuous Tissue Classified Image", exists=True)
class BRAINSPosteriorToContinuousClass(SEMLikeCommandLine):
"""title: Tissue Classification
category: BRAINS.Classify
description: This program will generate an 8-bit continuous tissue classified image based on BRAINSABC posterior images.
version: 3.0
documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSClassify
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Vincent A. Magnotta
acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568
"""
input_spec = BRAINSPosteriorToContinuousClassInputSpec
output_spec = BRAINSPosteriorToContinuousClassOutputSpec
_cmd = " BRAINSPosteriorToContinuousClass "
_outputs_filenames = {'outputVolume': 'outputVolume'}
_redirect_x = False
|
AndyHannon/ctrprogress
|
wowapi.py
|
Python
|
mit
| 5,784 | 0.007089 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import json
import logging
import time
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
class APIKey(ndb.Model):
key = ndb.StringProperty(indexed=True,required=True)
class Importer:
def load(self, toonlist, data):
q = APIKey.query()
apikey = q.fetch()[0].key
# Request all of the toon data from the blizzard API and determine the
# group's ilvls, armor type counts and token type counts. subs are not
# included in the counts, since they're not really part of the main
# group.
for toon in toonlist:
try:
# TODO: this object can probably be a class instead of another dict
newdata = dict()
data.append(newdata)
url = 'https://us.api.battle.net/wow/character/aerie-peak/%s?fields=progression,items&locale=en_US&apikey=%s' % (toon, apikey)
# create the rpc object for the fetch method. the deadline
# defaults to 5 seconds, but that seems to be too short for the
# Blizzard API site sometimes. setting it to 10 helps a little
# but it makes page loads a little slower.
rpc = urlfetch.create_rpc(10)
rpc.callback = self.create_callback(rpc, toon, newdata)
urlfetch.make_fetch_call(rpc, url)
newdata['rpc'] = rpc
newdata['toon'] = toon
# The Blizzard API has a limit of 10 calls per second. Sleep here
# for a very brief time to avoid hitting that limit.
time.sleep(0.1)
except:
logging.error('Failed to create rpc for %s' % toon)
# Now that all of the RPC calls have been created, loop through the data
# dictionary one more time and wait for each fetch to be completed. Once
# all of the waits finish, then we have all of the data from the
# Blizzard API and can loop through all of it and build the page.
start = time.time()
for d in data:
try:
d['rpc'].wait()
except:
logging.error('Waiting for rpc failed')
end = time.time()
logging.info("Time spent retrieving data: %f seconds" % (end-start))
# Callback that handles the result of the call to the Blizzard API. This will fill in
# the toondata dict for the requested toon with either data from Battle.net or with an
# error message to display on the page.
def handle_result(self, rpc, name, toondata):
try:
response = rpc.get_result()
except urlfetch_errors.DeadlineExceededError:
logging.error('urlfetch threw DeadlineExceededError on toon %s' % name.encode('ascii','ignore'))
toondata['toon'] = name
toondata['status'] = 'nok'
toondata['reason'] = 'Timeout retrieving data from Battle.net for %s. Refresh page to try again.' % name
return
except urlfetch_errors.DownloadError:
logging.error('urlfetch threw DownloadError on toon %s' % name.encode('asci
|
i','ignore'))
toondata['toon'] = name
toondata['status'] = 'nok'
toondata['reason'] = 'Network error retrieving data from Battle.net for
|
toon %s. Refresh page to try again.' % name
return
except:
logging.error('urlfetch threw unknown exception on toon %s' % name.encode('ascii','ignore'))
toondata['toon'] = name
toondata['status'] = 'nok'
toondata['reason'] = 'Unknown error retrieving data from Battle.net for toon %s. Refresh page to try again.' % name
return
# change the json from the response into a dict of data and store it
# into the toondata object that was passed in.
jsondata = json.loads(response.content)
toondata.update(jsondata);
# Blizzard's API will return an error if it couldn't retrieve the data
# for some reason. Check for this and log it if it fails. Note that
# this response doesn't contain the toon's name so it has to be added
# in afterwards.
if 'status' in jsondata and jsondata['status'] == 'nok':
logging.error('Blizzard API failed to find toon %s for reason: %s' %
(name.encode('ascii','ignore'), jsondata['reason']))
toondata['toon'] = name
toondata['reason'] = "Error retrieving data for %s from Blizzard API: %s" % (name, jsondata['reason'])
return
# we get all of the data here, but we want to filter out just the raids
# we care about so that it's not so much data returned from the importer
validraids = ['Highmaul','Blackrock Foundry']
if toondata['progression'] != None:
toondata['progression']['raids'] = [r for r in toondata['progression']['raids'] if r['name'] in validraids]
del toondata['rpc']
def create_callback(self, rpc, name, toondata):
return lambda: self.handle_result(rpc, name, toondata)
class Setup:
# The new Battle.net Mashery API requires an API key when using it. This
# method stores an API in the datastore so it can used in later page requests.
def setkey(self,apikey):
# Delete all of the entities out of the apikey datastore so fresh entities
# can be loaded.
q = APIKey.query()
result = q.fetch();
if (len(result) == 0):
k = APIKey(key = apikey)
k.put()
else:
k = result[0]
k.key = apikey
k.put()
|
nirvaris/nirvaris-djangofence
|
djangofence/urls.py
|
Python
|
mit
| 566 | 0.003534 |
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import UploadBlackListView, DemoView, UdateBlackListView
urlpatterns = [
|
url(r'^admin/', include(admin.site.urls)),
url(r'^upload-blacklist$', login_required(UploadBlackListView.as_view()), name='upload-blacklist'),
url(r'^update-blacklist$', UdateBlackListView.as_view(), name='update-blacklist'),
url(r'^profile/', include('n_profile.urls')),
url(r'^demo$', DemoView.as_view(), name='demo'),
|
]
|
subodhchhabra/airflow
|
airflow/contrib/hooks/segment_hook.py
|
Python
|
apache-2.0
| 3,748 | 0.0008 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundatio
|
n (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache Lic
|
ense, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Segment Hook
which allows you to connect to your Segment account,
retrieve data from it or write to that file.
NOTE: this hook also relies on the Segment analytics package:
https://github.com/segmentio/analytics-python
"""
import analytics
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
class SegmentHook(BaseHook, LoggingMixin):
def __init__(
self,
segment_conn_id='segment_default',
segment_debug_mode=False,
*args,
**kwargs
):
"""
Create new connection to Segment
and allows you to pull data out of Segment or write to it.
You can then use that file with other
Airflow operators to move the data around or interact with segment.
:param segment_conn_id: the name of the connection that has the parameters
we need to connect to Segment.
The connection should be type `json` and include a
write_key security token in the `Extras` field.
:type segment_conn_id: str
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
:type segment_debug_mode: boolean
.. note::
You must include a JSON structure in the `Extras` field.
We need a user's security token to connect to Segment.
So we define it in the `Extras` field as:
`{"write_key":"YOUR_SECURITY_TOKEN"}`
"""
self.segment_conn_id = segment_conn_id
self.segment_debug_mode = segment_debug_mode
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(self.segment_conn_id)
self.extras = self.connection.extra_dejson
self.write_key = self.extras.get('write_key')
if self.write_key is None:
raise AirflowException('No Segment write key provided')
def get_conn(self):
self.log.info('Setting write key for Segment analytics connection')
analytics.debug = self.segment_debug_mode
if self.segment_debug_mode:
self.log.info('Setting Segment analytics connection to debug mode')
analytics.on_error = self.on_error
analytics.write_key = self.write_key
return analytics
def on_error(self, error, items):
"""
Handles error callbacks when using Segment with segment_debug_mode set to True
"""
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error))
|
jonparrott/gcloud-python
|
firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2.py
|
Python
|
apache-2.0
| 2,565 | 0.011696 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.firestore_v1beta1.proto import common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2
from google.cloud.firestore_v1beta1.proto import document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto',
package='google.firestore.v1beta1',
syntax='proto3',
serialized_pb=_b('\nEgoogle/cloud/firestore_v1beta1/proto/event_flow_document_change.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.protoB\xa2\x01\n\x1c\x63om.google.firestore.v1beta1B\x1c\x45ventFlowDocumentChangeProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR,google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__p
|
b2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIP
|
TOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.firestore.v1beta1B\034EventFlowDocumentChangeProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\252\002\036Google.Cloud.Firestore.V1Beta1'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
klausman/scion
|
python/sciond/sciond.py
|
Python
|
apache-2.0
| 30,793 | 0.000974 |
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sciond` --- Reference endhost SCION Daemon
================================================
"""
# Stdlib
import logging
import os
import errno
import threading
import time
from itertools import product
# External
from external.expiring_dict import ExpiringDict
# SCION
from lib.app.sciond import get_default_sciond_path
from lib.defines import (
GEN_CACHE_PATH,
PATH_FLAG_SIBRA,
PATH_REQ_TOUT,
SCIOND_API_SOCKDIR,
)
from lib.errors import SCIONBaseError, SCIONParseError, SCIONServiceLookupError
from lib.log import log_exception
from lib.msg_meta import SockOnlyMetadata
from lib.path_seg_meta import PathSegMeta
from lib.packet.ctrl_pld import CtrlPayload, mk_ctrl_req_id
from lib.packet.path import SCIONPath
from lib.packet.path_mgmt.base import PathMgmt
from lib.packet.path_mgmt.rev_info import (
SignedRevInfoCertFetchError,
RevInfoExpiredError,
RevInfoValidationError,
RevocationInfo,
SignedRevInfo,
SignedRevInfoVerificationError
)
from lib.packet.path_mgmt.seg_req import PathSegmentReply, PathSegmentReq
from lib.packet.scion_addr import ISD_AS
from lib.packet.scmp.types import SCMPClass, SCMPPathClass
from lib.path_combinator import build_shortcut_paths, tuples_to_full_paths
from lib.path_db import DBResult, PathSegmentDB
from lib.rev_cache import RevCache
from lib.sciond_api.as_req import SCIONDASInfoReply, SCIONDASInfoReplyEntry, SCIONDASInfoRequest
from lib.sciond_api.revocation import SCIONDRevReply, SCIONDRevReplyStatus
from lib.sciond_api.host_info import HostInfo
from lib.sciond_api.if_req import SCIONDIFInfoReply, SCIONDIFInfoReplyEntry, SCIONDIFInfoRequest
from lib.sciond_api.base import SCIONDMsg
from lib.sciond_api.path_meta import FwdPathMeta, PathInterface
from lib.sciond_api.path_req import (
SCIONDPathRequest,
SCIONDPathReplyError,
SCIONDPathReply,
SCIONDPathReplyEntry,
)
from lib.sciond_api.revocation import SCIONDRevNotification
from lib.sciond_api.segment_req import (
SCIONDSegTypeHopReply,
SCIONDSegTypeHopReplyEntry,
SCIONDSegTypeHopRequest,
)
from lib.sciond_api.service_req import (
SCIONDServiceInfoReply,
SCIONDServiceInfoReplyEntry,
SCIONDServiceInfoRequest,
)
from lib.sibra.ext.resv import ResvBlockSteady
from lib.socket import ReliableSocket
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
PathMgmtType as PMT,
PathSegmentType as PST,
PayloadClass,
LinkType,
SCIONDMsgType as SMT,
ServiceType,
TypeBase,
)
from lib.util import SCIONTime
from sciond.req import RequestState
from scion_elem.scion_elem import SCIONElement
_FLUSH_FLAG = "FLUSH"
class SCIONDaemon(SCIONElement):
"""
The SCION Daemon used for retrieving and combining paths.
"""
MAX_REQS = 1024
# Time a path segment is cached at a host (in seconds).
SEGMENT_TTL = 300
# Empty Path TTL
EMPTY_PATH_TTL = SEGMENT_TTL
def __init__(self, conf_dir, addr, api_addr, run_local_api=False,
port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False):
"""
Initialize an instance of the class SCIONDaemon.
"""
super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, public=(addr, port))
up_labels = {**self._labels, "type": "up"} if self._labels else None
down_labels = {**self._labels, "type": "down"} if self._labels else None
core_labels = {**self._labels, "type": "core"} if self._labels else None
self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels)
self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels)
self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels)
self.rev_cache = RevCache()
# Keep track of requested paths.
self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT)
self.req_path_lock = threading.Lock()
self._api_sock = None
self.daemon_thread = None
os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
self.api_addr = (api_addr or get_default_sciond_path())
if delete_sock:
try:
os.remove(self.api_addr)
except OSError as e:
if e.errno != errno.ENOENT:
logging.error("Could not delete socket %s: %s" % (self.api_addr, e))
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.PATH: {
PMT.REPLY: self.handle_path_reply,
PMT.REVOCATION: self.handle_revocation,
},
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
},
}
self.SCMP_PLD_CLASS_MAP = {
SCMPClass.PATH:
{SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation},
}
if run_local_api:
self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond"))
self._socks.add(self._api_sock, self.handle_accept)
@classmethod
def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0):
"""
Initializes and starts a SCIOND instance.
"""
inst = cls(conf_dir, addr, api_addr, run_local_api, port)
name = "SCIONDaemon.run %s" % inst.addr.isd_as
inst.daemon_thread = threading.Thread(
target=thread_safety_net, args=(inst.run,), name=name, daemon=True)
inst.daemon_thread.start()
logging.debug("sciond started with api_addr = %s", inst.api_addr)
def _get_msg_meta(self, packet, addr, sock):
if sock != self._udp_sock:
return packet, SockOnlyMetadata.from_values(sock) # API socket
else:
return super()._get_msg_meta(packet, addr, sock)
def handle_msg_meta(self, msg, meta):
"""
Main routine to handle incoming SCION messages.
"""
if isinstance(meta, SockOnlyMetadata): # From SCIOND API
try:
sciond_msg = SCIONDMsg.from_raw(msg)
except SCIONParseError as err:
logging.error(str(err))
return
self.api_handle_request(sciond_msg, meta)
return
super().handle_msg_meta(msg, meta)
def handle_path_reply(self, cpld, meta):
"""
Handle path reply from local path server.
""
|
"
pmgt = cpld.union
path_reply = pmgt.union
assert isinstance(path_reply, PathSegmentReply), type(path_reply)
recs = path_reply.recs()
for srev_info in recs.iter_srev_infos():
self.check_revocation(srev_info, lamb
|
da x: self.continue_revocation_processing(
srev_info) if not x else False, meta)
req = path_reply.req()
key = req.dst_ia(), req.flags()
with self.req_path_lock:
r = self.requested_paths.get(key)
if r:
r.notify_reply(path_reply)
else:
logging.warning("No outstanding request found for %s", key)
for type_, pcb in recs.iter_pcbs():
seg_meta = PathSegMeta(pcb, self.continue_seg_processing,
meta, type_, params=(r,))
self._process_path_seg(seg_meta, cpld.req_id)
def
|
geotagx/geotagx-pybossa-archive
|
pybossa/hateoas.py
|
Python
|
agpl-3.0
| 2,393 | 0 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask import url_for
class Hateoas(object):
def link(self, rel, title, href):
return "<link rel='%s' title='%s' href='%s'/>" % (rel, title, href)
def create_link(self, item, rel='self'):
title = item.__class__.__name__.lower()
method = ".api_%s" % title
href = url_for(method, id=item.id, _external=True)
return self.link(rel, title, href)
def create_links(self, item):
cls = item.__class__.__name__.lower()
if cls == 'taskrun':
link = self.create_link(item)
links = []
if item.app_id is not None:
links.append(self.create_link(item.app, rel='parent'))
if
|
item.task_id is not None:
links.append(self.create_link(item.task, r
|
el='parent'))
return links, link
elif cls == 'task':
link = self.create_link(item)
links = []
if item.app_id is not None:
links = [self.create_link(item.app, rel='parent')]
return links, link
elif cls == 'category':
return None, self.create_link(item)
elif cls == 'app':
link = self.create_link(item)
links = []
if item.category_id is not None:
links.append(self.create_link(item.category, rel='category'))
return links, link
else:
return False
def remove_links(self, item):
"""Remove HATEOAS link and links from item"""
if item.get('link'):
item.pop('link')
if item.get('links'):
item.pop('links')
return item
|
nubakery/smith3
|
python/spcaspt2/gen_split.py
|
Python
|
gpl-2.0
| 2,179 | 0.001377 |
#!/opt/local/bin/python
import string
import os
def header(n) :
return "//\n\
// BAGEL - Brilliantly Advanced General Electronic Structure Library\n\
// Filename: SPCASPT2_gen" + str(n) + ".cc\n\
// Copyright (C) 2014 Toru Shiozaki\n\
//\n\
// Author: Toru Shiozaki <shiozaki@northwestern.edu>\n\
// Maintainer: Shiozaki group\n\
//\n\
// This file is part of the BAGEL package.\n\
//\n\
// This program is free software: you can redistribute it and/or modify\n\
// it under the terms of the GNU General Public License as published by\n\
// the Free Software Foundation, either version 3 of the License, or\n\
// (at your option) any later version.\n\
//\n\
// This program is distributed in the hope that it will be useful,\n\
// but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
// GNU General Public License for more details.\n\
//\n\
// You should have received a copy of the GNU General Public License\n\
// along with this program. If not, see <http://www.gnu.org/licenses/>.\n
|
\
//\n\
\n\
#include <bagel_config.h>\n\
#ifdef COMPILE_SMITH\n\
\n\
#include <src/smith/caspt2/SPCASPT2_tasks" + str(n) + ".h>\n\
\n\
using namespace std;\n\
using namespace bagel;\n\
using namespace bagel::SMITH;\n\
using namespace bagel::SMITH::SPCASPT2;\n\
\n\
"
footer = "#endif\n"
f = open('SPCASPT2_gen.cc', 'r')
lines = f.read().split("
|
\n")[32:]
tasks = []
tmp = ""
for line in lines:
if (line[0:4] == "Task"):
if (tmp != ""):
tasks.append(tmp)
tmp = ""
if (line != ""):
tmp += line + "\n"
if (line == "}"):
tmp += "\n"
tasks.append(tmp)
tmp = ""
num = 0
chunk = 50
for i in range(len(tasks)):
if (num != 0 and num % chunk == 0):
n = num / chunk
fout = open("SPCASPT2_gen" + str(n) + ".cc", "w")
out = header(n) + tmp + footer
fout.write(out)
fout.close()
tmp = ""
num = num+1
tmp = tmp + tasks[i];
n = (num-1) / chunk + 1
fout = open("SPCASPT2_gen" + str(n) + ".cc", "w")
out = header(n) + tmp + footer
fout.write(out)
fout.close()
os.remove("SPCASPT2_gen.cc")
|
duanx/bdcspider
|
bdmysqlDB.py
|
Python
|
gpl-2.0
| 3,758 | 0.00612 |
#!/usr/bin/env python
# coding:utf-8
__author__ = 'lixin'
'''
°²×°MySQL
¿ÉÒÔÖ±½Ó´ÓMySQL¹Ù·½ÍøÕ¾ÏÂÔØ×îеÄCommunity Server 5.6.x°æ±¾¡£MySQLÊÇ¿çÆ½Ì¨µÄ£¬Ñ¡Ôñ¶ÔÓ¦µÄƽ̨ÏÂÔØ°²×°Îļþ£¬°²×°¼´¿É¡£
°²×°Ê±£¬MySQL»áÌáʾÊäÈërootÓû§µÄ¿ÚÁÇëÎñ±Ø¼ÇÇå³þ¡£Èç¹ûżDz»×¡£¬¾Í°Ñ¿ÚÁîÉèÖÃΪpassword¡£
ÔÚWindowsÉÏ£¬°²×°Ê±ÇëÑ¡ÔñUTF-8±àÂ룬ÒÔ±ãÕýÈ·µØ´¦ÀíÖÐÎÄ¡£
ÔÚMac»òLinuxÉÏ£¬ÐèÒª±à¼MySQLµÄÅäÖÃÎļþ£¬°ÑÊý¾Ý¿âĬÈϵıàÂëÈ«²¿¸ÄΪUTF-8¡£MySQLµÄÅäÖÃÎļþĬÈÏ´æ·ÅÔÚ/etc/my.cnf»òÕß/etc/mysql/my.cnf£º
[client]
default-character-set = utf8
[mysqld]
default-storage-engine = INNODB
character-set-server = utf8
collation-server = utf8_general_ci
ÖØÆôMySQLºó£¬¿ÉÒÔͨ¹ýMySQLµÄ¿Í»§¶ËÃüÁîÐмì²é±àÂ룺
$ mysql -u root -p
Enter password:
Welcome to the MySQL monitor...
...
mysql> show variables like '%char%';
+--------------------------+--------------------------------------------------------+
| Variable_name | Value |
+--------------------------+--------------------------------------------------------+
| character_set_client | utf8 |
| character_set_connection | utf8 |
| character_set_database | utf8 |
| character_set_filesystem | binary |
| character_set_results | utf8 |
| character_set_server | utf8 |
| character_set_system | utf8 |
| character_sets_dir | /usr/local/mysql-5.1.65-osx10.6-x86_64/share/charsets/ |
+--------------------------+--------------------------------------------------------+
8 rows in se
|
t (0.00 sec)
¿´µ½utf8×ÖÑù¾Í±íʾ±àÂëÉèÖÃÕýÈ·¡£
'''
# ÐèÒª°²×°MYSQLÇý¶¯,Ö¸ÁîÈçÏ£º
# $ pip install my
|
sql-connector-python --allow-external mysql-connector-python
# µ¼Èë:
# 导å
¥:
import uuid
from datetime import datetime
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# å建对象çåºç±»:
Base = declarative_base()
# åå§åæ°æ®åºè¿æ¥:
engine = create_engine('mysql+mysqlconnector://root:Duanx1234@localhost:3306/test')
# å建DBSessionç±»å:
DBSession = sessionmaker(bind=engine)
class Sourcedata(Base):
# 表çåå:
__tablename__ = 'sourcedata'
# 表çç»æ
id = Column(String(50), primary_key=True)
name = Column(String(500))
url = Column(String(500))
sharetime = Column(String(20))
createtime = Column(String(20))
class SourcedataDao:
def batchInsert(self, flist):
try:
# å建session对象:
session = DBSession()
for sd in flist:
# å建æ°Sourcedata对象:
new_sourcedata = Sourcedata(id=str(uuid.uuid4()), name=sd.name, url=sd.url, sharetime=sd.sharetime, createtime=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# æ·»å å°session:
session.add(new_sourcedata)
print "insert a new_sourcedata"
# æäº¤å³ä¿åå°æ°æ®åº:
session.commit()
except Exception,e:
print e.message
finally:
# å
³ésession:
session.close()
class sdata:
def __init__(self,n,u):
self.name=n
self.url=u
self.sharetime=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
flist = []
sdDao = SourcedataDao()
for i in range(10):
flist.append(sdata("file" + str(i), "pan.baidu.com/file" + str(i)))
sdDao.batchInsert(flist)
|
yuxng/Deep_ISM
|
ISM/lib/datasets/rgbd_scenes.py
|
Python
|
mit
| 4,957 | 0.004035 |
__author__ = 'yuxiang'
import os
import datasets
import datasets.rgbd_scenes
import datasets.imdb
import numpy as np
import subprocess
import cPickle
class rgbd_scenes(datasets.imdb):
def __init__(self, image_set, rgbd_scenes_path=None):
datasets.imdb.__init__(self, 'rgbd_scenes_' + image_set)
self._image_set = image_set
self._rgbd_scenes_path = self._get_default_path() if rgbd_scenes_path is None \
else rgbd_scenes_path
self._data_path = os.path.join(self._rgbd_scenes_path, 'imgs')
self._classes = ('__background__', 'bowl', 'cap', 'cereal_box', 'coffee_mug', 'coffee_table', 'office_chair', 'soda_can', 'sofa', 'table')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._rgbd_scenes_path), \
'rgbd_scenes path does not exist: {}'.format(self._rgbd_scenes_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + '-color' + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def depth_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.depth_path_from_index(self.image_index[i])
def depth_path_from_index(self, index):
"""
Construct an depth path from the image's "index" identifier.
"""
depth_path = os.path.join(self._data_path, index + '-depth' + self._image_ext)
assert os.path.exists(depth_path), \
'Path does not exist: {}'.format(depth_path)
return depth_path
def metadata_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.metadata_path_from_index(self.image_index[i])
def metadata_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
metadata_path = os.path.join(self._data_path, index + '-meta.mat')
return metadata_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._rgbd_scenes_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'RGBD_Scenes', 'rgbd-scenes-v2')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache
|
_file):
with open(cache_file, 'rb') as fid:
|
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_rgbd_scenes_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_rgbd_scenes_annotation(self, index):
"""
Load class name and meta data
"""
# image path
image_path = self.image_path_from_index(index)
# depth path
depth_path = self.depth_path_from_index(index)
# metadata path
metadata_path = self.metadata_path_from_index(index)
boxes = []
gt_class = []
return {'image': image_path,
'depth': depth_path,
'meta_data': metadata_path,
'boxes': boxes,
'gt_classes': gt_class,
'flipped' : False}
if __name__ == '__main__':
d = datasets.rgbd_scenes('val')
res = d.roidb
from IPython import embed; embed()
|
Stanford-Online/edx-ora2
|
openassessment/xblock/resolve_dates.py
|
Python
|
agpl-3.0
| 10,291 | 0.003984 |
"""
Resolve unspecified dates and date strings to datetimes.
"""
import datetime as dt
from dateutil.parser import parse as parse_date
import pytz
class InvalidDateFormat(Exception):
"""
The date string could not be parsed.
"""
pass
class DateValidationError(Exception):
"""
Dates are not semantically valid.
"""
pass
DISTANT_PAST = dt.datetime(dt.MINYEAR, 1, 1, tzinfo=pytz.utc)
DISTANT_FUTURE = dt.datetime(dt.MAXYEAR, 1, 1, tzinfo=pytz.utc)
def _parse_date(value, _):
"""
Parse an ISO formatted datestring into a datetime object with timezone set to UTC.
Args:
value (str or datetime): The ISO formatted date string or datetime object.
_ (function): The i18n service function used to get the appropriate
text for a message.
Returns:
datetime.datetime
Raises:
InvalidDateFormat: The date string could not be parsed.
"""
if isinstance(value, dt.datetime):
return value.replace(tzinfo=pytz.utc)
elif isinstance(value, basestring):
try:
return parse_date(value).replace(tzinfo=pytz.utc)
except ValueError:
raise InvalidDateFormat(
_("'{date}' is an invalid date format. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.").format(
date=value
)
)
else:
raise InvalidDateFormat(_("'{date}' must be a date string or datetime").format(date=value))
def parse_date_value(date, _):
""" Public method for _parse_date """
return _parse_date(date, _)
def resolve_dates(start, end, date_ranges, _):
"""
Resolve date strings (including "default" dates) to datetimes.
The basic rules are:
1) Unset problem start dates default to the distant past.
2) Unset problem end dates default to the distant future.
3) Unset start dates default to the start date of the previous assessment/submission.
(The first submission defaults to the problem start date.)
4) Unset end dates default to the end date of the following assessment/submission.
(The last assessment defaults to the problem end date.)
5) `start` resolves to the earliest start date.
6) `end` resolves to the latest end date.
7) Ensure that `start` is before `end`.
8) Ensure that `start` is before the earliest due date.
9) Ensure that `end` is after the latest start date.
Overriding start/end dates:
* Rules 5-9 may seem strange, but they're necessary. Unlike `date_ranges`,
the `start` and `end` values are inherited by the XBlock from the LMS.
This means that you can set `start` and `end` in Studio, effectively bypassing
our validation rules.
* On the other hand, we *need* the start/due dates so we can resolve unspecified
date ranges to an actual date. For example,
if the problem closes on April 15th, 2014, but the course author hasn't specified
a due date for a submission, we need ensure the submission closes on April 15th.
* For this reason, we use `start` and `end` only if they satisfy our validation
rules. If not (because a course author has changed them to something invalid in Studio),
we use the dates that the course author specified in the problem definition,
which (a) MUST satisfy our ordering constraints, and (b) are probably
what the author intended.
Example:
Suppose I have a problem with a submission and two assessments:
| |
| |== submission ==| |== peer-assessessment ==| |== self-assessment ==| |
| |
and I set start/due dates for the submission and self-assessment, but not for peer-assessment.
Then by default, peer-assessment will "expand":
| |
| |== submission ==| |== self-assessment ==| |
| |============================ peer-assessment ==========================| |
| |
If I then remove the due date for the submission, but add a due date for peer-assessment:
| |
| |== submission =============================| |== self-assessment ==| |
| |============== peer-assessment ============| |
| |
If no dates are set, start dates default to the distant past and end dates default
to the distant future:
| |
| |================= submission ==============| |
| |============== self-assessment ============| |
| |============== peer-assessment ============| |
| |
Args:
start (str, ISO date format, or datetime): When the problem opens.
A value of None indicates that the problem is always open.
end (str, ISO date format, or datetime): When the problem closes.
A value of None indicates that the problem never closes.
date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating
the start/end timestamps (date string or datetime) of each submission/assessment.
_ (function): An i18n service function to use for retrieving the
proper text.
Returns:
start (datetime): The resolved start date
end (datetime): The resolved end date.
list of (start, end) tuples, where both elements are datetime objects.
Raises:
DateValidationError
InvalidDateFormat
"""
# Resolve problem start and end dates to minimum and maximum dates
start = _parse_date(start, _) if start is not None else DISTANT_PAST
end = _parse_date(end, _) if end is not None else DISTANT_FUTURE
resolved_starts = []
resolved_ends = []
# Amazingly, Studio allows the release date to be after the due date!
# This can cause a problem if the course author has configured:
#
# 1) Problem start >= problem due, and
# 2) Start/due dates that resolve to the problem start/due date.
#
# In this case, all submission/assessment start dates
# could default to the problem start while
# due dates default to the problem due date, violating
# the constraint that start dates always precede due dates.
# If we detect that the author has done this,
# we set the start date to just before
# the due date, so we (just barely) satify the validation rules.
if start >= end:
start =
|
end - dt.timedelta(milliseconds=1)
# Override start/end dates if they fail to satisfy our validation rules
# These are the only parameters a course author can change in Studio
# without triggering our validation rule
|
s, so we need to use sensible
# defaults. See the docstring above for a more detailed justification.
for step_start, step_end in date_ranges:
if step_start is not None:
parsed_start = _parse_date(step_start, _)
start = min(start, parsed_start)
end = max(end, parsed_start + dt.timedelta(milliseconds=1))
if step_end is not None:
parsed_end = _parse_date(step_end, _)
end = max(end, parsed_end)
start = min(start, parsed_end - dt.timedelta(milliseconds=1))
# Iterate through the list forwards and backwards simultaneously
# As we iterate forwards, resolve start dates.
# As we iterate backwards, resolve end dates.
prev_start = start
prev_end = end
for index in range(len(date_ranges)):
reverse_index = len(date_
|
road2ge/cyber-defense-scripts
|
main-for-windows.py
|
Python
|
gpl-3.0
| 9,651 | 0.017615 |
# This script is actually for Cyber Security on Windows 7. Should mostly work
# for Windows 8 and 10 too. I just absolutely hate using Windows 8 and refuse
# to test it on any Windows 8 machine.
from __future__ import print_function
from subprocess import call
from subprocess import check_output
import os
############################# User Management #############################
# Get username
username = os.getenv('username')
# Make alphanumeric variable
alpha = 'abcdefghijklmnopqrstuvwxyz'
numbers = '1234567890'
alpha_numeric = alpha + alpha.upper() + numbers
registry_commands = open("commands.txt", "r")
# Initialize important variables
users = []
incoming_user = ''
times_through = 1
temp_users = str(check_output('net user'))
for not_allowed_characters in '"/\[]:;|=,+*?<>':
temp_users.replace(not_allowed_characters, '')
temp_users.replace("\r\n","")
temp_users.replace("\r","")
temp_users.replace("\n","")
# " / \ [ ] : ; | = , + * ? < > are the characters not allowed in usernames
# Get a list of all users on the system
for character in temp_users:
if character in alpha_numeric or character in "-#\'.!@$%^&()}{":
incoming_user += character
elif len(incoming_user) > 0:
if times_through > 5:
users.append(incoming_user)
incoming_user = ''
times_through += 1
# Remove unnecessary stuff at end
users = users[0:len(users)-4]
# Print all users
print('All the users currently on this computer are ' + str(users))
def user_management(users):
def should_be_admin(user):
# Should the user be an admin
should_be_admin = raw_input(user + " is an administrator. Should they be? y/n. ")
if should_be_admin == 'y':
return True
if should_be_admin == 'n':
return False
def should_be_user(user):
# Should the user be a user
should_be_user = raw_input(user + " is a user. Should they be? y/n. ")
if should_be_user == 'y':
return True
if should_be_user == 'n':
return False
for user in users:
# Iterate through user list
if user in check_output('net localgroup Administrators'):
# If user is in the Administrators localgroup
if not should_be_admin(user):
print('Removing ' + user + ' from the Administrators group')
os.system('net localgroup Administrators ' + user + ' /delete')
else:
print('OK. We are keeping ' + user + ' in the Administrators group.')
else:
should_be_user_answer = should_be_user(user)
if not should_be_user_answer:
print('Removing ' + user)
os.system('net user ' + user + ' /delete')
if should_be_admin(user):
if user not in check_output('net localgroup Administrators'):
if should_be_admin(user):
print('Adding ' + user + 'to the Administrators group')
os.system('net localgroup Administrators ' + user + ' /add')
# Ask if we should do user management stuff.
do_user_management = raw_input("Shall we manage users? y/n. ")
if do_user_management == 'y':
user_management(users)
############################# Registry keys and such #############################
if raw_input("Shall we change some registry stuff? y/n. ") == 'y':
# Password policy automagic
print('Chaning password policies and such...')
os.system('net accounts /FORCELOGOFF:30 /MINPWLEN:8 /MAXPWAGE:30 /MINPWAGE:10 /UNIQUEPW:5')
# Clean DNS cache, cause why not
print('Bro, I cleaned your DNS cache. Deal with it.')
os.system('ipconfig /flushdns')
# Disable built-in accounts
print('I really hope you weren\'t the default Administrator account')
os.system('net user Guest /active:NO')
os.system('net user Administrator /active:NO')
# Make auditing great again.
print('Auditing now on! Yay!!!!')
os.system('auditpol /set /category:* /success:enable')
os.system('auditpol /set /category:* /failure:enable')
# Enable firewall
print('The firewall torch has been passed on to you')
os.system('netsh advfirewall set allprofiles state on')
os.system('echo You\'re going to have to type exit')
#I have no idea what I was doing here....
os.system('secedit /import /db secedit.sdb /cfg cyber.inf /overwrite /log MyLog.txt')
reg_dir = '"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System\\ '
for command in (('FilterAdministratorToken"','1'),('ConsentPromptBehaviorAdmin"','1'),('ConsentPromptBehaviorUser"','1'),('EnableInstallerDetection"','1'),('ValidateAdminCodeSignatures"','1'),('EnableLUA"','1'),('PromptOnSecureDesktop"','1'),('EnableVirtualization"','1'),):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\\'
for command in (('AUOptions"', '4'),('ElevateNonAdmins"', '1'),('IncludeRecommendedUpdates"', '1'),('ScheduledInstallTime"', '22')):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Terminal Server\\'
for command in (('fDenyTSConnections"', '1'),('AllowRemoteRPC"', '0')):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Control\Remote Assistance\\'
for command in (('fAllowFullControl"','0'),('fAllowToGetHelp"','0')):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp\\'
command = ('UserAuthentication"','1')
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Control\Remote Assistance\\'
command = ('CreateEncryptedOnlyTickets"','1')
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp\\'
command = ('fDisableEncryption"','0')
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
# I have found additional commands. This one 'might' fix the host file.
os.system('attrib -r -s C:\WINDOWS\system32\drivers\etc\hosts')
os.system('echo > C:\Windows\System32\drivers\etc\hosts')
# This isn't really appropriate for this option, but...
os.system('net start > started_services.txt')
# Remote registry
os.system('net stop RemoteRegistry')
os.system('sc config RemoteRegistry start=disabled')
for service in ('RemoteAccess', 'Telephony', 'tlntsvr', 'p2pimsvc', 'simptcp', 'fax', 'msftpsvc'):
os.system('net stop ' + service)
os.system('sc config ' + service + ' start = disabled')
|
for command in registry_commands.readlines():
os.system(command)
############################# Search for media files #############################
if raw_input("Shall we search for media files? y/n. ") == 'y':
file_list = []
# Ask for directory to be scanned.
directory_to_scan = input('What d
|
irectory would you like to scan for media files? Remember to enclose your directory in \'s or "s, and use two \s if your directory ends in a \. ')
# Inefficient but I spent too much time looking how to do this to delete it.
'''for root, dirs, files in os.walk(directory_to_scan):
for f_name in files:
file_path = os.path.join(root, f_name)
# If the file ends with common media extension, add file path to text_file
for extension in ('.mp3','.wav','.png','wmv','.jpg','.jpeg','.mp4','.avi','.mov','.aif','.iff','.php','.m3u','.m4a','.wma','.m4v','.mpg','.bmp','.gif','.bat','.exe','.zip','.7z'):
if ro
|
EderSantana/fuel
|
fuel/datasets/svhn.py
|
Python
|
mit
| 2,213 | 0 |
# -*- coding: utf-8 -*-
import os
from fuel import config
from fuel.datasets import H5PYDataset
from fuel.transformers.defaults import uint8_pixels_to_floatX
class SVHN(H5PYDataset):
"""The Street View House Numbers (SVHN) dataset.
SVHN [SVHN] is
|
a real-world image dataset for developing machine
learning and object recognition algorithms with minimal requirement
on data preprocessing and formatti
|
ng. It can be seen as similar in
flavor to MNIST [LBBH] (e.g., the images are of small cropped
digits), but incorporates an order of magnitude more labeled data
(over 600,000 digit images) and comes from a significantly harder,
unsolved, real world problem (recognizing digits and numbers in
natural scene images). SVHN is obtained from house numbers in
Google Street View images.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
.. [LBBH] Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner,
*Gradient-based learning applied to document recognition*,
Proceedings of the IEEE, November 1998, 86(11):2278-2324.
Parameters
----------
which_format : {1, 2}
SVHN format 1 contains the full numbers, whereas SVHN format 2
contains cropped digits.
which_set : {'train', 'test', 'extra'}
Whether to load the training set (73,257 examples), the test
set (26,032 examples) or the extra set (531,131 examples).
Note that SVHN does not have a validation set; usually you
will create your own training/validation split
using the `subset` argument.
"""
filename = 'svhn_format_{}.hdf5'
default_transformers = uint8_pixels_to_floatX(('features',))
def __init__(self, which_format, which_set, **kwargs):
self.which_format = which_format
super(SVHN, self).__init__(self.data_path, which_set, **kwargs)
@property
def data_path(self):
return os.path.join(
config.data_path, self.filename.format(self.which_format))
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_local_network_gateways_operations.py
|
Python
|
mit
| 27,633 | 0.005139 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.
|
url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(u
|
rl, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscrip
|
pvagner/orca
|
test/keystrokes/firefox/aria_combobox_dojo.py
|
Python
|
lgpl-2.1
| 4,695 | 0.000852 |
#!/usr/bin/python
"""Test of Dojo combo box presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(5000))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to the first combo box",
["BRAILLE LINE: 'US State test 1 (200% Courier font): California $l'",
" VISIBLE: '(200% Courier font): California ', cursor=32",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"BRAILLE LINE: 'US State test 1 (200% Courier font): California $l'",
" VISIBLE: '(200% Courier font): California ', cursor=32",
"SPEECH OUTPUT: 'collapsed'",
"SPEECH OUTPUT: 'US State test 1 (200% Courier font): entry California selected'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(TypeAction("C"))
sequence.append(utils.AssertPresentationAction(
"2. Replace existing text with a 'C'",
["KNOWN ISSUE: The braille line is not quite right",
"BRAILLE LINE: 'US State test 1 (200% Couri
|
er font): C $l'",
" VISIBLE: '(200% Courie
|
r font): C $l', cursor=23",
"BRAILLE LINE: 'US State test 1 (200% Courier font): US State test 1 (200% Courier font): combo box'",
" VISIBLE: 'te test 1 (200% Courier font): U', cursor=32",
"SPEECH OUTPUT: 'expanded'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Down Arrow",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'California menu'",
"SPEECH OUTPUT: 'C alifornia (CA).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Down Arrow",
["BRAILLE LINE: 'C olorado (CO)'",
" VISIBLE: 'C olorado (CO)', cursor=1",
"SPEECH OUTPUT: 'C olorado (CO).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Down Arrow",
["BRAILLE LINE: 'C onnecticut (CT)'",
" VISIBLE: 'C onnecticut (CT)', cursor=1",
"SPEECH OUTPUT: 'C onnecticut (CT).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Down Arrow",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'C alifornia (CA).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"7. Up Arrow",
["BRAILLE LINE: 'C onnecticut (CT)'",
" VISIBLE: 'C onnecticut (CT)', cursor=1",
"SPEECH OUTPUT: 'C onnecticut (CT).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"8. Up Arrow",
["BRAILLE LINE: 'C olorado (CO)'",
" VISIBLE: 'C olorado (CO)', cursor=1",
"SPEECH OUTPUT: 'C olorado (CO).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"9. Up Arrow",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'C alifornia (CA).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"10. Basic Where Am I - Combo box expanded",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'California menu'",
"SPEECH OUTPUT: 'C alifornia (CA).'",
"SPEECH OUTPUT: '1 of 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Escape"))
sequence.append(utils.AssertPresentationAction(
"11. Escape",
["BRAILLE LINE: 'US State test 1 (200% Courier font): US State test 1 (200% Courier font): combo box'",
" VISIBLE: 'te test 1 (200% Courier font): U', cursor=32",
"BRAILLE LINE: 'US State test 1 (200% Courier font): California $l'",
" VISIBLE: '(200% Courier font): California ', cursor=32",
"SPEECH OUTPUT: 'collapsed'",
"SPEECH OUTPUT: 'US State test 1 (200% Courier font): entry California selected'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
teltek/edx-platform
|
common/djangoapps/student/views/management.py
|
Python
|
agpl-3.0
| 44,012 | 0.002545 |
"""
Student Views
"""
import datetime
import logging
import uuid
from collections import namedtuple
from bulk_email.models import Optout
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.views import password_reset_confirm
from django.contrib.sites.models import Site
from django.core import mail
from django.urls import reverse
from django.core.validators import ValidationError, validate_email
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import Signal, receiver
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.utils.encoding import force_bytes, force_text
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from edx_ace import ace
from edx_ace.recipient import Recipient
from edx_django_utils import monitoring as monitoring_utils
from eventtracking import tracker
from ipware.ip import get_ip
# Note that this lives in LMS, so this dependency should be refactored.
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from six import text_type
from xmodule.modulestore.django import modulestore
import track.views
from course_modes.models import CourseMode
from edx_ace import ace
from edx_ace.recipient import Recipient
from edxmako.shortcuts import render_to_response, render_to_string
from entitlements.models import CourseEntitlement
from openedx.core.djangoapps.ace_common.template_context import get_base_template_context
from openedx.core.djangoapps.catalog.utils import get_programs_with_type
from openedx.core.djangoapps.embargo import api as embargo_api
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.oauth_dispatch.api import destroy_oauth_tokens
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.theming.helpers import get_current_site
from openedx.core.djangoapps.user_api.config.waffle import (
PASSWORD_UNICODE_NORMALIZE_FLAG, PREVENT_AUTH_USER_WRITES, SYSTEM_MAINTENANCE_MSG, waffle
)
from openedx.core.djangoapps.user_api.errors import UserNotFound, UserAPIInternalError
from openedx.core.djangoapps.user_api.models import UserRetirementRequest
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.journals.api import get_journals_context
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.helpers import (
DISABLE_UNENROLL_CERT_STATES,
auth_pipeline_urls,
cert_info,
create_or_set_user_attribute_created_on_site,
do_create_account,
generate_activation_email_context,
get_next_url_for_login_page
)
from student.message_types import EmailChange, PasswordReset
from student.models import (
CourseEnrollment,
PasswordHistory,
PendingEmailChange,
Registration,
RegistrationCookieConfiguration,
UserAttribute,
UserProfile,
UserSignupSource,
UserStanding,
create_comments_service_user,
email_exists_or_retired,
)
from student.signals import REFUND_ORDER
from student.tasks import send_activation_email
from student.text_me_the_app import TextMeTheAppFragmentView
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.password_policy_validators import normalize_password, validate_password
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple(
'ReverifyInfo',
'course_id course_name course_number date status display'
)
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "registration"])
def csrf_token(context):
"""
A csrf token that can be included in a form.
"""
token = context.get('csrf_token', '')
if token == '
|
NOTPROVIDED':
return ''
return
|
(u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="{}" /></div>'.format(token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# Maximum number of courses to display on the homepage.
context['homepage_course_max'] = configuration_helpers.get_value(
'HOMEPAGE_COURSE_MAX', settings.HOMEPAGE_COURSE_MAX
)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
# Add marketable programs to the context.
context['programs_list'] = get_programs_with_type(request.site, include_hidden=False)
# TODO: Course Listing Plugin required
context['journal_info'] = get_journals_context(request)
return render_to_response('index.html', context)
def compose_and_send_activation_email(user, profile, user_registration=None):
"""
Construct all the required params and send the activation email
through celery task
Arguments:
user: current logged-in user
profile: pro
|
ASMlover/study
|
python/src/config_parser.py
|
Python
|
bsd-2-clause
| 1,635 | 0 |
#!/usr/bin/env python
# -*- encoding
|
: utf-8 -*-
#
# Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted
|
provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ConfigParser import ConfigParser
CONFIG_FILE = 'config.conf'
config = ConfigParser()
config.read(CONFIG_FILE)
print config.get('DEMO', 'STR_VAL')
print config.getint('DEMO', 'INT_VAL')
print config.getfloat('DEMO', 'FLOAT_VAL')
|
whtsky/Dash.py
|
dash_py/__init__.py
|
Python
|
mit
| 109 | 0 |
"""
Create python docs for das
|
h easily.
"""
__version__ = '0.2.1'
__author__ = 'whtsky'
__license__ = 'MIT'
| |
digimarc/django
|
tests/forms_tests/tests/test_fields.py
|
Python
|
bsd-3-clause
| 83,991 | 0.005026 |
# -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase, ignore_warnings
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
|
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
# IntegerField ################################################################
def
|
test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), int))
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1,
|
pmuller/django-softdeletion
|
django_softdeletion/models.py
|
Python
|
mit
| 5,119 | 0 |
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
_unset_related_many_to_many(obj, field)
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
|
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for ob
|
j in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
"""Return ALL objects.
"""
return self._get_base_queryset()
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def delete(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
|
foobarbazblarg/stayclean
|
stayclean-2020-january/display-final-after-month-is-over.py
|
Python
|
mit
| 3,056 | 0.004254 |
#!/usr/bin/python
# TODO: issu
|
es with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script ge
|
ts run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
dntt1/youtube-dl
|
youtube_dl/extractor/jwplatform.py
|
Python
|
unlicense
| 5,161 | 0.002906 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
)
class JWPlatformBaseIE(InfoExtractor):
@staticmethod
def _find_jwplayer_data(webpage):
# TODO
|
: Merge this with JWPlayer-related codes in generic.py
|
mobj = re.search(
'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\((?P<options>[^)]+)\)',
webpage)
if mobj:
return mobj.group('options')
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._parse_json(
self._find_jwplayer_data(webpage), video_id)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id, require_title=True, m3u8_id=None, rtmp_params=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
video_data = jwplayer_data['playlist'][0]
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
formats = []
for source in video_data['sources']:
source_url = self._proto_relative_url(source['file'])
source_type = source.get('type') or ''
if source_type in ('application/vnd.apple.mpegurl', 'hls') or determine_ext(source_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False))
elif source_type.startswith('audio'):
formats.append({
'url': source_url,
'vcodec': 'none',
})
else:
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': int_or_none(source.get('height')),
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv',
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if track.get('file') and track.get('kind') == 'captions':
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track['file'])
})
return {
'id': video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
}
class JWPlatformIE(JWPlatformBaseIE):
_VALID_URL = r'(?:https?://content\.jwplatform\.com/(?:feeds|players|jw6)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
_TEST = {
'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
'md5': 'fa8899fa601eb7c83a64e9d568bdf325',
'info_dict': {
'id': 'nPripu9l',
'ext': 'mov',
'title': 'Big Buck Bunny Trailer',
'description': 'Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.',
'upload_date': '20081127',
'timestamp': 1227796140,
}
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<script[^>]+?src=["\'](?P<url>(?:https?:)?//content.jwplatform.com/players/[a-zA-Z0-9]{8})',
webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json('http://content.jwplatform.com/feeds/%s.json' % video_id, video_id)
return self._parse_jwplayer_data(json_data, video_id)
|
gully/PyKE
|
pyke/kepoutlier.py
|
Python
|
mit
| 14,393 | 0.00139 |
from .utils import PyKEArgumentHelpFormatter
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from tqdm import tqdm
from . import kepio, kepmsg, kepkey, kepfit, kepstat, kepfunc
__all__ = ['kepoutlier']
def kepoutlier(infile, outfile=None, datacol='SAP_FLUX', nsig=3.0, stepsize=1.0,
npoly=3, niter=1, operation='remove', ranges='0,0', plot=False,
plotfit=False, overwrite=False, verbose=False,
logfile='kepoutlier.log'):
"""
kepoutlier -- Remove or replace statistical outliers from time series data
kepoutlier identifies data outliers relative to piecemeal best-fit
polynomials. Outliers are either removed from the output time series or
replaced by a noise-treated value defined by the polynomial fit. Identified
outliers and the best fit functions are optionally plotted for inspection
purposes.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file. ``outfile`` will be direct copy of
infile with either data outliers removed (i.e. the table will have
fewer rows) or the outliers will be corrected according to a best-fit
function and a noise model.
datacol : str
The column name containing data stored within extension 1 of infile.
This data will be searched for outliers. Typically this name is
SAP_FLUX (Simple Aperture Photometry fluxes) or PDCSAP_FLUX (Pre-search
Data Conditioning fluxes).
nsig : float
The sigma clipping threshold. Data deviating from a best fit function
by more than the threshold will be either removed or corrected
according to the user selection of operation.
stepsize : float
The data within datacol is unlikely to be well represented by a single
polynomial function. stepsize splits the data up into a series of time
blocks, each is fit independently by a separate function. The user can
provide an informed choice of stepsize after inspecting the data with
the kepdraw tool. Units are days.
npoly : int
The polynomial order of each best-fit function.
niter : int
If outliers are found in a particular data section, that data will be
removed temporarily and the time series fit again. This will be
iterated niter times before freezing upon the best available fit.
operation : str
* ``remove`` throws away outliers. The output data table will smaller
or equal in size to the input table.
* ``replace`` replaces outliers with a value that is consistent with
the best-fit polynomial function and a random component defined by the
rms of the data relative to the fit and calculated using the inverse
normal cumulative function and a random number generator.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset.
Time ranges are supplied as comma-separated pairs of Barycentric
|
Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon. An example
containing two time ranges is::
'2455012.48517,2455014.50072;2455022.63487,2455025.08231'
If the user wants to correct the entire time series then providing
``ranges = '0,0'`` will tell the task to operate
|
on the whole time series.
plot : bool
Plot the data and outliers?
plotfit : bool
Overlay the polynomial fits upon the plot?
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepoutlier kplr002437329-2010355172524_llc.fits --datacol SAP_FLUX
--nsig 4 --stepsize 5 --npoly 2 --niter 10 --operation replace
--verbose --plot --plotfit
.. image:: ../_static/images/api/kepoutlier.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPOUTLIER -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' nsig={}'.format(nsig)
+ ' stepsize={}'.format(stepsize)
+ ' npoly={}'.format(npoly)
+ ' niter={}'.format(niter)
+ ' operation={}'.format(operation)
+ ' ranges={}'.format(ranges)
+ ' plot={}'.format(plot)
+ ' plotfit={}'.format(plotfit)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPOUTLIER started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPOUTLIER: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
try:
work = instr[0].header['FILEVER']
cadenom = 1.0
except:
cadenom = cadence
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# read table structure
table = kepio.readfitstab(infile, instr[1], logfile, verbose)
# filter input data table
try:
nanclean = instr[1].header['NANCLEAN']
except:
time = kepio.readtimecol(infile, table, logfile, verbose)
flux = kepio.readfitscol(infile, table, datacol, logfile, verbose)
finite_data_mask = np.isfinite(time) & np.isfinite(flux) & (flux != 0)
table = table[finite_data_mask]
instr[1].data = table
comment = 'NaN cadences removed from data'
kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile,
verbose)
# read table columns
try:
intime = instr[1].data.field('barytime') + 2.4e6
except:
intime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
verbose)
indata = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
verbose)
intime = intime + bjdref
indata = indata / cadenom
# time ranges for region to be corrected
t1, t2 = kepio.timeranges(ranges, logfile, verbose)
cadencelis = kepstat.filterOnRange(intime, t1, t2)
# find limits of each time step
tstep1, tstep2 = [], []
work = intime[0]
while work < intime[-1]:
tstep1.append(work)
tstep2.append(np.array([work + stepsize, intime[-1]],
dtype='float64').min())
work += stepsize
# find cadence limits of each time step
cstep1, cstep2 = [], []
work1 = 0
work2 = 0
for i in range(len(intime)):
if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize:
work2 = i
else:
cstep1.append(work1)
cstep2.append(work2)
work1 = i
work2 = i
cstep1.append(work1)
cstep2.append(work2)
outdata = indata * 1.0
# comment keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
# clean up x-axis unit
intime0 = (tstart // 100) * 100.0
ptime = intime - intime0
xlab = 'BJD $-$ {}'.format(intime0)
# clean up y-axis units
pout
|
trthanhquang/bus-assistant
|
webApp/getBusTiming.py
|
Python
|
mit
| 2,827 | 0.038557 |
#!/usr/bin/env python
import urllib2
from bs4 import BeautifulSoup as BS
import re
import time
def getAgenciesList():
agenciesList_re
|
q = urllib2.Request('''http://services.my511.org/Transit2.0/GetAgencies.aspx?token=aeeb38de-5385-482a-abde-692dfb2769e3''')
xml_resp = urllib2.urlopen(agenciesList_req)
soup = BS(xml_resp.read(),'lxml')
print soup.prettify()
agencies = soup.find_all('agency')
for a
|
in agencies:
print a['name']
def getBusList(busCodes):
api_url = '''http://services.my511.org/Transit2.0/GetRoutesForAgencies.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&agencyNames=SF-MUNI'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
routes = soup.find_all('route')
for route in routes:
if route['code'] in busCodes:
print route.prettify()
def getBusStopsList():
api_url = '''http://services.my511.org/Transit2.0/GetStopsForRoute.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&routeIDF=SF-MUNI~8X~Inbound'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
print soup.prettify()
def getNextDepartures(stopcode,buscode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
# print soup.prettify()
route = soup.find('route',{'code':buscode})
l = route.departuretimelist.getText().split()
if l:
print '-- %s\t%s (mins)'%(buscode,', '.join(l))
else:
print '-- %s\tUnavailable'%buscode
return l
class busTime:
def __init__(self,busCode,busTime=[]):
self.busCode = busCode #String
self.busTime = busTime #List of String
def __str__(self):
return self.busCode
class busStopStatus:
def __init__(self,stopcode,description="",departureList=[]):
self.stopcode = stopcode
self.description = description
self.departureList = departureList
def getBusStopStatus(stopcode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
description = soup.find('stop')['name']
status = busStopStatus(stopcode,description,[])
for bus in soup.find_all('route'):
departtime = busTime(bus['code'],[])
timeList = bus.departuretimelist.getText().split()
if timeList:
print '-- %s\t%s (mins)'%(bus['code'],', '.join(timeList))
for t in timeList:
departtime.busTime.append(t)
status.departureList.append(departtime)
else:
print '-- %s\tUnavailable'%bus['code']
return status
if __name__ == '__main__':
print 'BUS TIMING... :D\n'
print time.ctime(time.time())
getBusStopStatus(16367)
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/db/models/proxy.py
|
Python
|
gpl-2.0
| 2,643 | 0.001892 |
"""
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
from django.contrib.gis import memoryview
from django.utils import six
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
#
|
Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
|
geom = geom_value
elif (geom_value is None) or (geom_value == ''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None:
value.srid = self._field.srid
elif value is None or isinstance(value, six.string_types + (memoryview,)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('Cannot set %s GeometryProxy (%s) with value of type: %s' % (
obj.__class__.__name__, gtype, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
|
google-research/google-research
|
kws_streaming/models/att_rnn.py
|
Python
|
apache-2.0
| 5,484 | 0.008388 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BiRNN model with attention."""
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""BiRNN attention model parameters."""
parser_nn.add_argument(
'--cnn_filters',
type=str,
default='10,1',
help='Number of output filters in the convolution layers',
)
parser_nn.add_argument(
'--cnn_kernel_size',
type=str,
default='(5,1),(5,1)',
help='Heights and widths of the 2D convolution window',
)
parser_nn.add_argument(
'--cnn_act',
type=str,
default="'relu','relu'",
help='Activation function in the convolution layers',
)
parser_nn.add_argument(
'--cnn_dilation_rate',
type=str,
default='(1,1),(1,1)',
help='Dilation rate to use for dilated convolutions',
)
parser_nn.add_argument(
'--cnn_strides',
type=str,
default='(1,1),(1,1)',
help='Strides of the convolution layers along the height and width',
)
parser_nn.add_argument(
'--rnn_layers',
type=int,
default=2,
help='Number of RNN layers (each RNN is wrapped by Bidirec
|
tional)',
)
parser_nn.add_argument(
'--rnn_type',
type=str,
|
default='gru',
help='RNN type: it can be gru or lstm',
)
parser_nn.add_argument(
'--rnn_units',
type=int,
default=128,
help='Units number in RNN cell',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.1,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--units2',
type=str,
default='64,32',
help='Number of units in the last set of hidden layers',
)
parser_nn.add_argument(
'--act2',
type=str,
default="'relu','linear'",
help='Activation function of the last set of hidden layers',
)
def model(flags):
"""BiRNN attention model.
It is based on paper:
A neural attention model for speech command recognition
https://arxiv.org/pdf/1808.08929.pdf
Depending on parameter rnn_type, model can be biLSTM or biGRU
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
rnn_types = {'lstm': tf.keras.layers.LSTM, 'gru': tf.keras.layers.GRU}
if flags.rnn_type not in rnn_types:
ValueError('not supported RNN type ', flags.rnn_type)
rnn = rnn_types[flags.rnn_type]
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
net = tf.keras.backend.expand_dims(net)
for filters, kernel_size, activation, dilation_rate, strides in zip(
utils.parse(flags.cnn_filters), utils.parse(flags.cnn_kernel_size),
utils.parse(flags.cnn_act), utils.parse(flags.cnn_dilation_rate),
utils.parse(flags.cnn_strides)):
net = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
activation=activation,
dilation_rate=dilation_rate,
strides=strides,
padding='same')(
net)
net = tf.keras.layers.BatchNormalization()(net)
shape = net.shape
# input net dimension: [batch, time, feature, channels]
# reshape dimension: [batch, time, feature * channels]
# so that GRU/RNN can process it
net = tf.keras.layers.Reshape((-1, shape[2] * shape[3]))(net)
# dims: [batch, time, feature]
for _ in range(flags.rnn_layers):
net = tf.keras.layers.Bidirectional(
rnn(flags.rnn_units, return_sequences=True, unroll=True))(
net)
feature_dim = net.shape[-1]
middle = net.shape[1] // 2 # index of middle point of sequence
# feature vector at middle point [batch, feature]
mid_feature = net[:, middle, :]
# apply one projection layer with the same dim as input feature
query = tf.keras.layers.Dense(feature_dim)(mid_feature)
# attention weights [batch, time]
att_weights = tf.keras.layers.Dot(axes=[1, 2])([query, net])
att_weights = tf.keras.layers.Softmax(name='attSoftmax')(att_weights)
# apply attention weights [batch, feature]
net = tf.keras.layers.Dot(axes=[1, 1])([att_weights, net])
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
for units, activation in zip(
utils.parse(flags.units2), utils.parse(flags.act2)):
net = tf.keras.layers.Dense(units=units, activation=activation)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
|
nathanielvarona/airflow
|
airflow/cli/commands/provider_command.py
|
Python
|
apache-2.0
| 3,862 | 0.000259 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distrib
|
uted on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Providers sub-commands"""
import re
from airflow.cli.simple_table im
|
port AirflowConsole
from airflow.providers_manager import ProvidersManager
from airflow.utils.cli import suppress_logs_and_warning
def _remove_rst_syntax(value: str) -> str:
return re.sub("[`_<>]", "", value.strip(" \n."))
@suppress_logs_and_warning
def provider_get(args):
"""Get a provider info."""
providers = ProvidersManager().providers
if args.provider_name in providers:
provider_version = providers[args.provider_name].version
provider_info = providers[args.provider_name].provider_info
if args.full:
provider_info["description"] = _remove_rst_syntax(provider_info["description"])
AirflowConsole().print_as(
data=[provider_info],
output=args.output,
)
else:
print(f"Provider: {args.provider_name}")
print(f"Version: {provider_version}")
else:
raise SystemExit(f"No such provider installed: {args.provider_name}")
@suppress_logs_and_warning
def providers_list(args):
"""Lists all providers at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().providers.values()),
output=args.output,
mapper=lambda x: {
"package_name": x[1]["package-name"],
"description": _remove_rst_syntax(x[1]["description"]),
"version": x[0],
},
)
@suppress_logs_and_warning
def hooks_list(args):
"""Lists all hooks at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().hooks.items()),
output=args.output,
mapper=lambda x: {
"connection_type": x[0],
"class": x[1].connection_class,
"conn_id_attribute_name": x[1].connection_id_attribute_name,
'package_name': x[1].package_name,
'hook_name': x[1].hook_name,
},
)
@suppress_logs_and_warning
def connection_form_widget_list(args):
"""Lists all custom connection form fields at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().connection_form_widgets.items()),
output=args.output,
mapper=lambda x: {
"connection_parameter_name": x[0],
"class": x[1].connection_class,
'package_name': x[1].package_name,
'field_type': x[1].field.field_class.__name__,
},
)
@suppress_logs_and_warning
def connection_field_behaviours(args):
"""Lists field behaviours"""
AirflowConsole().print_as(
data=list(ProvidersManager().field_behaviours.keys()),
output=args.output,
mapper=lambda x: {
"field_behaviours": x,
},
)
@suppress_logs_and_warning
def extra_links_list(args):
"""Lists all extra links at the command line"""
AirflowConsole().print_as(
data=ProvidersManager().extra_links_class_names,
output=args.output,
mapper=lambda x: {
"extra_link_class_name": x,
},
)
|
cmunk/protwis
|
build_gpcr/management/commands/build_statistics_trees.py
|
Python
|
apache-2.0
| 142 | 0.021429 |
from build.managemen
|
t.commands.build_statistics_trees import Command as BuildStatisticsTrees
class Command(BuildStatisticsT
|
rees):
pass
|
bajibabu/merlin
|
misc/scripts/vocoder/world/extract_features_for_merlin.py
|
Python
|
apache-2.0
| 5,044 | 0.011102 |
import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
if len(sys.argv)!=5:
print("Usage: ")
print("python extract_features_for_merlin.py <path_to_merlin_dir> <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top merlin directory
merlin_dir
|
= sys.argv[1]
# input audio directory
wav_dir = sys.argv[2]
# Output features directory
out_dir = sys.argv[3]
# initializations
fs = int(sys.argv[4])
# tools directory
world = os.path.join(merlin_dir, "tools/bin/WORLD")
sptk = os.path.join(merlin_dir, "tools/bin/SP
|
TK-3.9")
sp_dir = os.path.join(out_dir, 'sp' )
mgc_dir = os.path.join(out_dir, 'mgc')
ap_dir = os.path.join(out_dir, 'ap' )
bap_dir = os.path.join(out_dir, 'bap')
f0_dir = os.path.join(out_dir, 'f0' )
lf0_dir = os.path.join(out_dir, 'lf0')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(sp_dir):
os.mkdir(sp_dir)
if not os.path.exists(mgc_dir):
os.mkdir(mgc_dir)
if not os.path.exists(bap_dir):
os.mkdir(bap_dir)
if not os.path.exists(f0_dir):
os.mkdir(f0_dir)
if not os.path.exists(lf0_dir):
os.mkdir(lf0_dir)
if fs == 16000:
nFFTHalf = 1024
alpha = 0.58
elif fs == 48000:
nFFTHalf = 2048
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
#bap order depends on sampling rate.
mcsize=59
def get_wav_filelist(wav_dir):
wav_files = []
for file in os.listdir(wav_dir):
whole_filepath = os.path.join(wav_dir,file)
if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(".wav"):
wav_files.append(whole_filepath)
elif os.path.isdir(whole_filepath):
wav_files += get_wav_filelist(whole_filepath)
wav_files.sort()
return wav_files
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
world_analysis_cmd = "%s %s %s %s %s" % (os.path.join(world, 'analysis'), \
filename,
os.path.join(f0_dir, file_id + '.f0'), \
os.path.join(sp_dir, file_id + '.sp'), \
os.path.join(bap_dir, file_id + '.bapd'))
os.system(world_analysis_cmd)
### convert f0 to lf0 ###
sptk_x2x_da_cmd = "%s +da %s > %s" % (os.path.join(sptk, 'x2x'), \
os.path.join(f0_dir, file_id + '.f0'), \
os.path.join(f0_dir, file_id + '.f0a'))
os.system(sptk_x2x_da_cmd)
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(f0_dir, file_id + '.f0a'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(lf0_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_x2x_df_cmd1 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
os.path.join(sp_dir, file_id + '.sp'), \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
mcsize) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(mgc_dir, file_id + '.mgc'))
os.system(sptk_x2x_df_cmd1)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(bap_dir, file_id + ".bapd"), \
os.path.join(bap_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, wav_files)
# clean temporal files
shutil.rmtree(sp_dir, ignore_errors=True)
shutil.rmtree(f0_dir, ignore_errors=True)
for zippath in glob.iglob(os.path.join(bap_dir, '*.bapd')):
os.remove(zippath)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
|
markreidvfx/pyaaf
|
example/aaf2xml.py
|
Python
|
mit
| 281 | 0 |
import aaf
import os
from optparse imp
|
ort OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
if not args:
parser.error("not enough argements")
path = args[0]
name
|
, ext = os.path.splitext(path)
f = aaf.open(path, 'r')
f.save(name + ".xml")
f.close()
|
tensorflow/tensorflow
|
tensorflow/python/profiler/pprof_profiler_test.py
|
Python
|
apache-2.0
| 5,145 | 0.005442 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEqual(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEqual(expected_proto, str(profile))
@test_util.run_v1
|
_only('b/120545219')
def testProfileWithWhileLoop(self):
options =
|
config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEqual(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEqual(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEqual(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
|
Fizzadar/ElasticQuery
|
setup.py
|
Python
|
mit
| 554 | 0 |
# ElasticQuery
# File: setup.p
|
y
# Desc: needed
from setuptools import setup
if __name__ == '__main__':
setup(
version='3.2',
name='ElasticQuery',
description='A simple query builder for Elasticsearch 2',
author='Nick Barrett',
author_email='pointlessrambler@gmail.com',
url='http://github.com/Fizzadar/ElasticQuery',
package_dir={
'ElasticQuery': 'elasticquery',
},
packages=[
|
'elasticquery',
],
install_requires=['six>=1.4.0'],
)
|
jmchilton/galaxy-central
|
modules/docutils/languages/sv.py
|
Python
|
mit
| 2,135 | 0.001405 |
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision: 2224 $
# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': u'F\u00f6rfattare',
'authors': u'F\u00f6rfattare',
'organization': u'Organisation',
'address': u'Adress',
'contact'
|
: u'Kontakt',
'version': u'Version',
'revision': u'Revision',
'status': u'Status',
'date': u'Datum',
'copyright': u'Copyright',
|
'dedication': u'Dedikation',
'abstract': u'Sammanfattning',
'attention': u'Observera!',
'caution': u'Varning!',
'danger': u'FARA!',
'error': u'Fel',
'hint': u'V\u00e4gledning',
'important': u'Viktigt',
'note': u'Notera',
'tip': u'Tips',
'warning': u'Varning',
'contents': u'Inneh\u00e5ll' }
"""Mapping of node class name to label text."""
bibliographic_fields = {
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f\u00f6rfattare': 'authors',
u' n/a': 'author',
u'organisation': 'organization',
u'adress': 'address',
u'kontakt': 'contact',
u'version': 'version',
u'revision': 'revision',
u'status': 'status',
u'datum': 'date',
u'copyright': 'copyright',
u'dedikation': 'dedication',
u'sammanfattning': 'abstract' }
"""Swedish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
trondhindenes/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_svm.py
|
Python
|
gpl-3.0
| 18,691 | 0.001498 |
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_svm
short_description: Manage NetApp ONTAP svm
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Create, modify or delete svm on NetApp ONTAP
options:
state:
description:
- Whether the specified SVM should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the SVM to manage.
required: true
from_name:
description:
- Name of the SVM to be renamed
version_added: '2.7'
root_volume:
description:
- Root volume of the SVM.
- Cannot be modified after creation.
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
- Cannot be modified after creation.
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create,
this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call,
this will return the list of matching Vservers.
- The 'unified' security style, which applies only to Infinite Volumes,
cannot be applied to a Vserver's root volume.
- Cannot be modified after creation.
choices: ['unix', 'ntfs', 'mixed', 'unified']
allowed_protocols:
description:
- Allowed Protocols.
- When specified as part of a vserver-create,
this field represent the list of protocols allowed on the Vserver.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the protocols specified
as part of the allowed-protocols.
- When part of vserver-modify,
this field should include the existing list
along with new protocol list to be added to prevent data disruptions.
- Possible values
- nfs NFS protocol,
- cifs CIFS protocol,
- fcp FCP protocol,
- iscsi iSCSI protocol,
- ndmp NDMP protocol,
- http HTTP protocol,
- nvme NVMe protocol
aggr_list:
description:
- List of aggregates assigned for volume operations.
- These aggregates could be shared for use with other Vservers.
- When specified as part of a vserver-create,
this field represents the list of aggregates
that are assigned to the Vserver for volume operations.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the aggregates specified as part of the aggr-list.
ipspace:
description:
- IPSpace name
- Cannot be modified after creation.
version_added: '2.7'
snapshot_policy:
description:
- Default snapshot policy setting for all volumes of the Vserver.
This policy will be assigned to all volumes created in this
Vserver unless the volume create request explicitly provides a
snapshot policy or volume is modified later with a specific
snapshot policy. A volume-level snapshot policy always overrides
the default Vserver-wide snapshot policy.
version_added: '2.7'
language:
description:
- Language to use for the SVM
- Default to C.UTF-8
- Possible values Language
- c POSIX
- ar Arabic
- cs Czech
- da Danish
- de German
- en English
- en_us English (US)
- es Spanish
- fi Finnish
- fr French
- he Hebrew
- hr Croatian
- hu Hungarian
- it Italian
- ja Japanese euc-j
- ja_v1 Japanese euc-j
- ja_jp.pck Japanese PCK (sjis)
- ja_jp.932 Japanese cp932
- ja_jp.pck_v2 Japanese PCK (sjis)
- ko Korean
- no Norwegian
- nl Dutch
- pl Polish
- pt Portuguese
- ro Romanian
- ru Russian
- sk Slovak
- sl Slovenian
- sv Swedish
- tr Turkish
- zh Simplified Chinese
- zh.gbk Simplified Chinese (GBK)
- zh_tw Traditional Chinese euc-tw
- zh_tw.big5 Traditional Chinese Big 5
version_added: '2.7'
subtype:
description:
- The subtype for vserver to be created.
- Cannot be modified after creation.
choices: ['default', 'dp_destination', 'sync_source', 'sync_destination']
version_added: '2.7'
'''
EXAMPLES = """
- name: Create SVM
na_ontap_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSVM(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
root_volume=dict(typ
|
e='str'),
root_volume_aggregate=dict(type='str'),
root_volume_security_style=dict(type='str', choices=['unix',
'ntfs',
'mixed',
|
'unified'
]),
allowed_protocols=dict(type='list'),
aggr_list=dict(type='list'),
ipspace=dict(type='str', required=False),
snapshot_policy=dict(type='str', required=False),
language=dict(type='str', required=False),
subtype=dict(choices=['default', 'dp_destination', 'sync_source', 'sync_destination'])
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.from_name = p['from_name']
self.root_volume = p['root_volume']
self.root_volume_aggregate = p['root_volume_aggregate']
self.root_volume_security_style = p['root_volume_security_style']
self.allowed_protocols = p['allowed_protocols']
self.aggr_list = p['aggr_list']
self.language = p['language']
self.ipspace = p['ipspace']
self.snapshot_policy = p['snapshot_policy']
self.subtype = p['subtype']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_vserver(self, vserver_name=None):
"""
Checks if vserver exists.
:return:
vserver object if vserver found
None if vserver is not found
:rtype: object/None
"""
if vserver_name is None:
|
olduvaihand/ProjectEuler
|
src/python/problem303.py
|
Python
|
mit
| 475 | 0.004228 |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem303.py
#
# Multiples with small digits
# ===========================
# Published on Saturday, 25th September 2010, 10:00 pm
#
# F
|
or a positive integer n, define f(n) as the least positive multiple of n
# that, written in base 10,
|
uses only digits 2. Thus f(2)=2, f(3)=12, f(7)=21,
# f(42)=210, f(89)=1121222. Also, . Find .
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
pdxwebdev/yadapy
|
yadapy/friendnode.py
|
Python
|
gpl-3.0
| 3,507 | 0.013117 |
import logging, os, marshal, json, cPickle, time, copy, time, datetime, re, urllib, httplib
from base64 import b64encode, b64decode
from lib.crypt import encrypt, decrypt
from uuid import uuid4
from node import Node, InvalidIdentity
class FriendNode(Node):
def __init__(self, *args, **kwargs):
if 'identityData' in kwargs:
identityData = kwargs['identityData']
else:
identityData = args[0]
kwargs['identityData'] = identityData
try:
newIdentity = args[1]
except:
newIdentity = None
if type(kwargs['identityData']) == type(u'') or type(kwargs['identityData']) == type(''):
identityData = self.getManagedNode(kwargs['identityData'])
elif type(kwargs['identityData']) == type({}):
identityData = kwargs['identityData']
else:
raise InvalidIdentity("A valid server Identity was not given nor was a public_key specified.")
super(FriendNode, self).__init__(*args, **kwargs)
self.set('routed_public_key', kwargs['acceptor']['public_key'], True)
self.set('source_indexer_key', kwargs['requester']['public_key'], True)
if 'connector' in kwargs:
self.set('public_key', kwargs['connector']['public_key'])
self.set('private_key', kwargs['connector']['private_key'])
self.setModifiedToNow()
def validIdentity(self, data):
try:
if 'public_key' in data \
and 'private_key' in data \
and 'modified' in data \
and 'data' in data \
and 'friends' in data['data'] \
and 'identity' in da
|
ta['data'] \
and 'name' in data['data']['identity'] \
and 'avatar' in data['data']['identity']:
return True
else:
raise InvalidIdentity("invalid identity dictionary for identity")
except InvalidIdentity:
|
raise
class RoutedFriendNode(FriendNode):
def __init__(self, *args, **kwargs):
if 'identityData' in kwargs:
identityData = kwargs['identityData']
else:
identityData = args[0]
kwargs['identityData'] = identityData
try:
newIdentity = args[1]
except:
newIdentity = None
if type(kwargs['identityData']) == type(u'') or type(kwargs['identityData']) == type(''):
identityData = self.getFriend(kwargs['identityData'])
elif type(kwargs['identityData']) == type({}):
identityData = kwargs['identityData']
else:
raise InvalidIdentity("A valid server Identity was not given nor was a public_key specified.")
super(RoutedFriendNode, self).__init__(*args, **kwargs)
def validIdentity(self, data):
try:
if 'public_key' in data \
and 'private_key' in data \
and 'source_indexer_key' in data \
and 'routed_public_key' in data \
and 'modified' in data \
and 'data' in data \
and 'friends' in data['data'] \
and 'identity' in data['data'] \
and 'name' in data['data']['identity']:
return True
else:
raise InvalidIdentity("invalid identity dictionary for identity")
except InvalidIdentity:
raise
|
MuhammadVT/davitpy
|
davitpy/pydarn/proc/music/music.py
|
Python
|
gpl-3.0
| 84,879 | 0.014338 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
---------
|
-
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall
|
2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
|
mileistone/test
|
utils/test/setup.py
|
Python
|
mit
| 5,817 | 0.003094 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import subprocess
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('c
|
ompiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
|
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension(
'pycocotools._mask',
sources=['pycocotools/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [numpy_include, 'pycocotools'],
extra_compile_args={
'gcc': ['-Wno-cpp', '-Wno-unused-function', '-std=c99']},
),
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
|
jamespcole/home-assistant
|
homeassistant/components/telegram_bot/polling.py
|
Python
|
apache-2.0
| 3,026 | 0 |
"""Support for Telegram bot using polling."""
import logging
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from . import (
CONF_ALLOWED_CHAT_IDS, PLATFORM_SCHEMA as TELEGRAM_PLATFORM_SCHEMA,
BaseTelegramBotEntity, initialize_bot)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = TELEGRAM_PLATFORM_SCHEMA
async def async_setup_platform(hass, config):
"""Set up the Telegram polling platform."""
bot = initialize_bot(config)
pol = TelegramPoll(bot, hass, config[C
|
ONF_ALLOWED_CHAT_IDS])
@callback
def _start_bot(_event):
|
"""Start the bot."""
pol.start_polling()
@callback
def _stop_bot(_event):
"""Stop the bot."""
pol.stop_polling()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _start_bot)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_bot)
return True
def process_error(bot, update, error):
"""Telegram bot error handler."""
from telegram.error import (
TelegramError, TimedOut, NetworkError, RetryAfter)
try:
raise error
except (TimedOut, NetworkError, RetryAfter):
# Long polling timeout or connection problem. Nothing serious.
pass
except TelegramError:
_LOGGER.error('Update "%s" caused error "%s"', update, error)
def message_handler(handler):
"""Create messages handler."""
from telegram import Update
from telegram.ext import Handler
class MessageHandler(Handler):
"""Telegram bot message handler."""
def __init__(self):
"""Initialize the messages handler instance."""
super().__init__(handler)
def check_update(self, update): # pylint: disable=no-self-use
"""Check is update valid."""
return isinstance(update, Update)
def handle_update(self, update, dispatcher):
"""Handle update."""
optional_args = self.collect_optional_args(dispatcher, update)
return self.callback(dispatcher.bot, update, **optional_args)
return MessageHandler()
class TelegramPoll(BaseTelegramBotEntity):
"""Asyncio telegram incoming message handler."""
def __init__(self, bot, hass, allowed_chat_ids):
"""Initialize the polling instance."""
from telegram.ext import Updater
BaseTelegramBotEntity.__init__(self, hass, allowed_chat_ids)
self.updater = Updater(bot=bot, workers=4)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(message_handler(self.process_update))
self.dispatcher.add_error_handler(process_error)
def start_polling(self):
"""Start the polling task."""
self.updater.start_polling()
def stop_polling(self):
"""Stop the polling task."""
self.updater.stop()
def process_update(self, bot, update):
"""Process incoming message."""
self.process_message(update.to_dict())
|
scheib/chromium
|
third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py
|
Python
|
bsd-3-clause
| 3,817 | 0 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import subprocess
import sys
_enable_style_format = None
_clang_format_command_path = None
_gn_command_path = None
def init(root_src_dir, enable_style_format=True):
assert isinstance(root_src_dir, str)
assert isinstance(enable_style_format, bool)
global _enable_style_format
global _clang_format_command_path
global _gn_command_path
assert _enable_style_format is None
assert _clang_format_command_path is None
assert _gn_command_
|
path is None
_enable_style_format = enable_style_format
root_src_dir = os.path.abspath(root_src_dir)
# Determine //buildtools/<platform>/ directory
if sys.platform.startswith("linux"):
platform = "linux64"
exe_suffix = ""
elif sys.platform.startswith("darwin"):
platform = "mac"
exe_suffix = ""
elif sys.platform.startswith(("cygwin", "win")):
platform = "win"
exe_suffix = ".exe"
else:
assert False, "Unknow
|
n platform: {}".format(sys.platform)
buildtools_platform_dir = os.path.join(root_src_dir, "buildtools",
platform)
# //buildtools/<platform>/clang-format
_clang_format_command_path = os.path.join(
buildtools_platform_dir, "clang-format{}".format(exe_suffix))
# //buildtools/<platform>/gn
_gn_command_path = os.path.join(buildtools_platform_dir,
"gn{}".format(exe_suffix))
def auto_format(contents, filename):
assert isinstance(filename, str)
_, ext = os.path.splitext(filename)
if ext in (".gn", ".gni"):
return gn_format(contents, filename)
return clang_format(contents, filename)
def clang_format(contents, filename=None):
command_line = [_clang_format_command_path]
if filename is not None:
command_line.append('-assume-filename={}'.format(filename))
return _invoke_format_command(command_line, filename, contents)
def gn_format(contents, filename=None):
command_line = [_gn_command_path, "format", "--stdin"]
if filename is not None:
command_line.append('-assume-filename={}'.format(filename))
return _invoke_format_command(command_line, filename, contents)
def _invoke_format_command(command_line, filename, contents):
if not _enable_style_format:
return StyleFormatResult(stdout_output=contents,
stderr_output="",
exit_code=0,
filename=filename)
kwargs = {}
if sys.version_info.major != 2:
kwargs['encoding'] = 'utf-8'
proc = subprocess.Popen(command_line,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
**kwargs)
stdout_output, stderr_output = proc.communicate(input=contents)
exit_code = proc.wait()
return StyleFormatResult(
stdout_output=stdout_output,
stderr_output=stderr_output,
exit_code=exit_code,
filename=filename)
class StyleFormatResult(object):
def __init__(self, stdout_output, stderr_output, exit_code, filename):
self._stdout_output = stdout_output
self._stderr_output = stderr_output
self._exit_code = exit_code
self._filename = filename
@property
def did_succeed(self):
return self._exit_code == 0
@property
def contents(self):
assert self.did_succeed
return self._stdout_output
@property
def error_message(self):
return self._stderr_output
@property
def filename(self):
return self._filename
|
blutack/mavlog
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,361 | 0.005502 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import mavlog
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MAVLog'
copyright = u'2014, Gareth R'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = mavlog.__version__
# The full version, including alpha/beta/rc tags.
release = mavlog.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mavlogdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'mavlog.tex',
u'MAVLog Documentation',
u'Gareth R', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mavlog',
u'MAVLog Documentation',
[u'Gareth R'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file
|
, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mavlog',
u'MAVLog Documentation',
u'Gareth R',
'mavlog',
'One line description
|
of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to dis
|
iseppi/zookeepr
|
alembic/versions/20_58ee75910929_add_theme_to_config_.py
|
Python
|
gpl-2.0
| 603 | 0.006633 |
"""Add theme to config
Revision ID: 58ee75910929
Revises: 1c22ceb384a7
Create Date: 2015-08-28 15:15:47.971807
"""
# revision identifiers, used by Alembic.
revision = '58ee75910929'
down_revision = '1c22ceb384a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO config (category
|
, key, value, description) VALUES ('general', 'theme', '\"zkpylons\"', 'The enabled theme to use. Should match the theme folder name (requires a server restart to take effect)')")
def downgrade():
op.execute("DELETE FROM config WHERE category='general' AND key='them
|
e'")
|
CatoTH/OpenSlides
|
server/openslides/motions/migrations/0005_auto_20180202_1318.py
|
Python
|
mit
| 2,199 | 0.00091 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-02 12:18
from __future__ import unicode_literals
from django.contrib.auth.models import Permission
from django.db import migrations
def delete_old_comment_permission(apps, schema_editor):
"""
Deletes the old 'can_see_and_manage_comments' permission which is
split up into two seperate permissions.
"""
perm = Permission.objects.filter(codename="can_see_and_manage_comments")
if len(perm):
perm = perm.get()
# Save content_type for manual creation of new permissions.
content_type = perm.content_type
# Save groups. li
|
st() is necessary to evalu
|
ate the database query right now.
groups = list(perm.group_set.all())
# Delete permission
perm.delete()
# Create new permission
perm_see = Permission.objects.create(
codename="can_see_comments",
name="Can see comments",
content_type=content_type,
)
perm_manage = Permission.objects.create(
codename="can_manage_comments",
name="Can manage comments",
content_type=content_type,
)
for group in groups:
group.permissions.add(perm_see)
group.permissions.add(perm_manage)
group.save()
class Migration(migrations.Migration):
dependencies = [("motions", "0004_motionchangerecommendation_other_description")]
operations = [
migrations.AlterModelOptions(
name="motion",
options={
"default_permissions": (),
"ordering": ("identifier",),
"permissions": (
("can_see", "Can see motions"),
("can_create", "Can create motions"),
("can_support", "Can support motions"),
("can_see_comments", "Can see comments"),
("can_manage_comments", "Can manage comments"),
("can_manage", "Can manage motions"),
),
"verbose_name": "Motion",
},
),
migrations.RunPython(delete_old_comment_permission),
]
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/template/backends/jinja2.py
|
Python
|
artistic-2.0
| 3,342 | 0.000598 |
# Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
import jinja2
from django.conf import settings
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.utils import six
from django.utils.module_loading import import_string
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class Jinja2(BaseEngine):
app_dirname = 'jinja2'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(Jinja2, self).__init__(params)
environment = options.pop('environment', 'jinja2.Environment')
environment_cls = import_string(environment)
options.setdefault('autoescape', True)
options.setdefault('loader', jinja2.FileSystemLoader(self.template_dirs))
options.setdefault('auto_reload', settings.DEBUG)
options.setdefault('undefined',
jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code))
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name))
except jinja2.TemplateNotFound as exc:
six.reraise(
TemplateDoesNotExist,
TemplateDoesNotExist(exc.name, backend=self),
sys.exc_info()[2],
)
except jinja2.TemplateSyntaxErro
|
r as exc:
new = TemplateSyntaxError(exc.args)
new.template_debug = get_exception_inf
|
o(exc)
six.reraise(TemplateSyntaxError, new, sys.exc_info()[2])
class Template(object):
def __init__(self, template):
self.template = template
self.origin = Origin(
name=template.filename, template_name=template.name,
)
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template.render(context)
class Origin(object):
"""
A container to hold debug information as described in the template API
documentation.
"""
def __init__(self, name, template_name):
self.name = name
self.template_name = template_name
def get_exception_info(exception):
"""
Formats exception information for display on the debug page using the
structure described in the template API documentation.
"""
context_lines = 10
lineno = exception.lineno
lines = list(enumerate(exception.source.strip().split("\n"), start=1))
during = lines[lineno - 1][1]
total = len(lines)
top = max(0, lineno - context_lines - 1)
bottom = min(total, lineno + context_lines)
return {
'name': exception.filename,
'message': exception.message,
'source_lines': lines[top:bottom],
'line': lineno,
'before': '',
'during': during,
'after': '',
'total': total,
'top': top,
'bottom': bottom,
}
|
eltonkevani/tempest_el_env
|
tempest/api/object_storage/test_container_quotas.py
|
Python
|
apache-2.0
| 4,616 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import HTTP_SUCCESS
QUOTA_BYTES = 10
QUOTA_COUNT = 3
SKIP_MSG = "Container quotas middleware not available."
class ContainerQuotasTest(base.BaseObjectTest):
"""Attemps to test the perfect behavior of quotas in a container."""
container_quotas_available = \
config.TempestConfig().object_storage_feature_enabled.container_quotas
def setUp(self):
"""Creates and sets a container with quotas.
Quotas are set by adding meta values to the container,
and are validated when set:
- X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
self.container_name = data_utils.rand_name(name="TestContainer")
self.container_client.create_container(self.container_name)
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
self.container_client.update_container_metadata(
self.container_name, metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
self.delete_containers([self.container_name])
super(ContainerQuotasTest, self).tearDown()
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES)
nbefore = self._get_bytes_used()
resp, _ = self.object_client.create_object(
self.container_name, object_name, data)
self.assertIn(int(resp['status']), HTTP_SUCCESS)
nafter = self._get_bytes_used()
self.assertEqual(nbefore + len(data), nafter)
|
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object lagger than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES + 1)
nbefore = self._get_b
|
ytes_used()
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
name = data_utils.rand_name(name="TestObject")
self.object_client.create_object(self.container_name, name, "")
nbefore = self._get_object_count()
self.assertEqual(nbefore, QUOTA_COUNT)
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, "OverQuotaObject", "")
nafter = self._get_object_count()
self.assertEqual(nbefore, nafter)
def _get_container_metadata(self):
resp, _ = self.container_client.list_container_metadata(
self.container_name)
return resp
def _get_object_count(self):
resp = self._get_container_metadata()
return int(resp["x-container-object-count"])
def _get_bytes_used(self):
resp = self._get_container_metadata()
return int(resp["x-container-bytes-used"])
|
FrostyX/pysysinfo
|
doc/conf.py
|
Python
|
gpl-2.0
| 8,133 | 0.006271 |
# -*- coding: utf-8 -*-
#
# pysysinfo documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 6 16:05:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysysinfo'
copyright = u'2015, FrostyX'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sysinfodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pysysinfo.tex', u'pysysinfo Documentation',
u'FrostyX', 'manual'),
]
# The
|
name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_par
|
ts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysysinfo', u'pysysinfo Documentation',
[u'FrostyX'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysysinfo', u'pysysinfo Documentation',
u'FrostyX', 'pysysinfo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
TAMU-CPT/galaxy-tools
|
tools/webapollo/list_organism_data.py
|
Python
|
gpl-3.0
| 951 | 0.001052 |
#!/usr/bin/env python
import json
import argparse
from webapollo import WAAuth, WebApolloInstance, AssertUser, accessible_organisms
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="List all organisms available in an Apollo instance"
)
WAAuth(parser)
parser.add_argument("email", help="User Email")
args = parser.parse_args()
wa = WebApolloInstance(args.apollo, args.username, args.password)
gx_user = AssertUser(wa.users.loadUsers(email=args.email))
all_orgs = wa.o
|
rganisms.findAllOrganisms()
orgs = accessibl
|
e_organisms(gx_user, all_orgs)
cleanedOrgs = []
for organism in all_orgs:
org = {
"name": organism["commonName"],
"id": organism["id"],
"annotations": organism["annotationCount"],
"sequences": organism["sequences"],
}
cleanedOrgs.append(org)
print(json.dumps(cleanedOrgs, indent=2))
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim_parcel/models/building_location_choice_model.py
|
Python
|
gpl-2.0
| 5,528 | 0.009949 |
# Opus/UrbanSim ur
|
ban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.models.building_location_choice_model import BuildingLocationChoiceModel as UrbansimBuildingLocationChoice
|
Model
from numpy import where, arange, zeros
from numpy import logical_or, logical_not
from opus_core.variables.variable_name import VariableName
from opus_core.resources import Resources
from opus_core.datasets.dataset import Dataset
class BuildingLocationChoiceModel(UrbansimBuildingLocationChoiceModel):
# def get_weights_for_sampling_locations(self, agent_set, agents_index, data_objects=None):
# where_developable = where(self.apply_filter(self.filter, None, agent_set, agents_index, data_objects=data_objects))[0]
# weight_array = ones((where_developable.size), dtype=int8) #.astype(bool8)
# return (weight_array, where_developable)
def get_weights_for_sampling_locations_for_estimation(self, agent_set, agents_index):
if self.run_config.get("agent_units_string", None): # needs to be corrected
agent_set.compute_variables(self.run_config["agent_units_string"], dataset_pool=self.dataset_pool)
return self.get_weights_for_sampling_locations(agent_set, agents_index)
def prepare_for_estimate(self, add_member_prefix=True,
specification_dict=None,
specification_storage=None,
specification_table=None,
building_set=None,
buildings_for_estimation_storage=None,
buildings_for_estimation_table=None,
constants=None, base_year=0,
building_categories=None,
location_id_variable=None,
join_datasets=False,
data_objects=None, **kwargs):
# buildings = None
if (building_set is not None):
if location_id_variable is not None:
building_set.compute_variables(location_id_variable, resources=Resources(data_objects))
# create agents for estimation
if buildings_for_estimation_storage is not None:
estimation_set = Dataset(in_storage=buildings_for_estimation_storage,
in_table_name=buildings_for_estimation_table,
id_name=building_set.get_id_name(),
dataset_name=building_set.get_dataset_name())
if location_id_variable:
estimation_set.compute_variables(location_id_variable,
resources=Resources(data_objects))
# needs to be a primary attribute because of the join method below
estimation_set.add_primary_attribute(estimation_set.get_attribute(location_id_variable),
VariableName(location_id_variable).alias())
years = estimation_set.get_attribute("scheduled_year")
recent_years = constants['recent_years']
indicator = zeros(estimation_set.size(), dtype="int32")
for year in range(base_year-recent_years, base_year+1):
indicator = logical_or(indicator, years==year)
idx = where(logical_not(indicator))[0]
estimation_set.remove_elements(idx)
#if filter:
#estimation_set.compute_variables(filter, resources=Resources(data_objects))
#index = where(estimation_set.get_attribute(filter) > 0)[0]
#estimation_set.subset_by_index(index, flush_attributes_if_not_loaded=False)
if join_datasets:
building_set.join_by_rows(estimation_set,
require_all_attributes=False,
change_ids_if_not_unique=True)
index = arange(building_set.size()-estimation_set.size(), building_set.size())
else:
index = building_set.get_id_index(estimation_set.get_id_attribute())
else:
if building_set is not None:
index = arange(building_set.size())
else:
index = None
if add_member_prefix:
specification_table = self.group_member.add_member_prefix_to_table_names([specification_table])
from opus_core.model import get_specification_for_estimation
#from urbansim.functions import compute_supply_and_add_to_location_set
specification = get_specification_for_estimation(specification_dict,
specification_storage,
specification_table)
#specification, dummy = AgentLocationChoiceModelMember.prepare_for_estimate(self, add_member_prefix,
#specification_dict, specification_storage,
#specification_table,
#location_id_variable=location_id_variable,
#data_objects=data_objects, **kwargs)
return (specification, index)
|
dugan/coverage-reporter
|
coverage_reporter/extras/unittest2_plugin.py
|
Python
|
mit
| 1,511 | 0.003971 |
from unittest2.events import Plugin, addOption
from unittest2.util import getS
|
ource
import os
import sys
try:
import coverag
|
e
except ImportError, e:
coverage = None
coverageImportError = e
help_text1 = 'Enable coverage reporting'
class CoveragePlugin(Plugin):
configSection = 'coverage'
commandLineSwitch = ('C', 'coverage', help_text1)
def __init__(self):
self.configFile = self.config.get('config', '').strip() or True
self.branch = self.config.as_bool('branch', default=None)
self.timid = self.config.as_bool('timid', default=False)
self.cover_pylib = self.config.as_bool('cover-pylib', default=False)
self.excludeLines = self.config.as_list('exclude-lines', default=[])
self.ignoreErrors = self.config.as_bool('ignore-errors', default=False)
def register(self):
if coverage is None:
raise coverageImportError
Plugin.register(self)
def pluginsLoaded(self, event):
args = dict(
config_file=self.configFile,
cover_pylib=self.cover_pylib,
branch=self.branch,
timid=self.timid,
)
self.cov = coverage.coverage(**args)
self.cov.erase()
self.cov.exclude('#pragma:? *[nN][oO] [cC][oO][vV][eE][rR]')
for line in self.excludeLines:
self.cov.exclude(line)
self.cov.start()
def stopTestRun(self, event):
self.cov.stop()
self.cov.save()
|
nuagenetworks/nuage-openstack-horizon
|
nuage_horizon/dashboards/project/gateways/urls.py
|
Python
|
apache-2.0
| 1,078 | 0 |
# Copyright 2020 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use
|
this file except in compliance with the License. You may obtain
# a copy of the License at
#
# ht
|
tp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import url
from nuage_horizon.dashboards.project.gateways.ports import urls as port_urls
from nuage_horizon.dashboards.project.gateways import views as gw_views
GATEWAY = r'^(?P<gateway_id>[^/]+)/%s'
urlpatterns = [
url(r'^$', gw_views.IndexView.as_view(), name='index'),
url(GATEWAY % '$', gw_views.DetailView.as_view(), name='detail'),
url(r'^ports/', include((port_urls, 'ports'), namespace='ports')),
]
|
samvarankashyap/linch-pin
|
linchpin/tests/mockdata/contextdata.py
|
Python
|
gpl-3.0
| 4,450 | 0.002472 |
from __future__ import absolute_import
import os
import tempfile
from six.moves import configparser as ConfigParser
from six import iteritems
from linchpin.exceptions import LinchpinError
"""
Provide valid context data to test against.
"""
class ContextData(object):
def __init__(self, parser=ConfigParser.ConfigParser):
self.lib_path = '{0}'.format(os.path.dirname(
os.path.realpath(__file__))).rstrip('/')
current_path = os.path.dirname(os.path.realpath(__file__))
constants_path = '{0}/../../'.format(current_path)
self.constants_path = '{0}'.format(os.path.dirname(
constants_path)).rstrip('/')
self.logfile = tempfile.mktemp(suffix='.log', prefix='linchpin')
self.parser = parser()
self.cfg_data = {}
self._load_constants()
def _load_constants(self):
"""
Create self.cfgs with defaults from the linchpin constants file.
"""
constants_file = '{0}/linchpin.constants'.format(self.constants_path)
constants_file = os.path.realpath(os.path.expanduser(constants_file))
self._parse_config(constants_file)
def load_config_data(self, provider='dummy'):
"""
Load a test-based linchpin.conf into both a configs and evars
dictionary to represent a configuration file
"""
expanded_path = None
config_found = False
# simply modify this variable to adjust where linchpin.conf can be found
CONFIG_PATH = [
'{0}/{1}/conf/linchpin.conf'.format(self.lib_path, provider)
]
for path in CONFIG_PATH:
expanded_path = (
"{0}".format(os.path.realpath(os.path.expanduser(path))))
if os.path.exists(expanded_path):
self._parse_config(expanded_path)
# override logger file
self.cfg_data['logger'] = dict()
self.cfg_data['logger']['file'] = self.logfile
|
self.evars = self.cfg_data.get('evars', {})
def _parse_config(self, path):
"""
|
Parse configs into the self.cfg_data dict from provided path.
:param path: A path to a config to parse
"""
try:
config = ConfigParser.ConfigParser()
f = open(path)
config.readfp(f)
f.close()
for section in config.sections():
if not self.cfg_data.get(section):
self.cfg_data[section] = {}
for k in config.options(section):
if section == 'evars':
try:
self.cfg_data[section][k] = (
config.getboolean(section, k)
)
except ValueError:
self.cfg_data[section][k] = config.get(section, k)
else:
try:
self.cfg_data[section][k] = config.get(section, k)
except ConfigParser.InterpolationMissingOptionError:
value = config.get(section, k, raw=True)
self.cfg_data[section][k] = value.replace('%%', '%')
except ConfigParser.InterpolationSyntaxError as e:
raise LinchpinError('Unable to parse configuration file properly:'
' {0}'.format(e))
def get_temp_filename(self):
tmpfile = tempfile.NamedTemporaryFile(delete=False).name
return tmpfile
def write_config_file(self, path):
try:
with open(path, 'a+') as f:
self.parser.write(f)
except Exception as e:
raise LinchpinError('Unable to write configuration file:'
' {0}'.format(e))
def create_config(self, config_data=None):
"""
Creates a config object using ConfigParser from the config_data object
"""
if not config_data:
config_data = self.cfg_data
# we know that data is a dict, containing dicts
try:
for k, v in iteritems(config_data):
self.parser.add_section(k)
for kv, vv in iteritems(v):
if type(vv) is not str:
vv = str(vv)
self.parser.set(k, kv, vv)
except ValueError:
pass
|
jablonskim/jupyweave
|
jupyweave/settings/output_types.py
|
Python
|
mit
| 1,920 | 0.000521 |
from enum import Enum
import re
class OutputTypes:
"""Class representing visible output types"""
class Types(Enum):
"""Types"""
Stdout = 1
Stderr = 2
Result = 3
Image = 4
Pdf = 5
def __init__(self, types_str):
"""Initialization from string"""
self.__types = self.__parse_types(types_str)
def is_enabled(self, type_str):
"""Checks if given type is visible"""
type_str = type_str.lower()
if type_str == 'stdout':
return OutputTypes.Types.Stdout in self.__types
if type_str == 'stderr':
return OutputTypes.Types.Stderr in self.__types
if 'text' in type_str:
return OutputTypes.Types.Result in self.__types
if 'image' in type_str:
return OutputTypes.Types.Image in self.__types
if 'application/pdf' in type_str:
return OutputTypes.Types.Pdf in self.__types
@staticmethod
def __parse_types(types_str):
"""Parses types"""
if types_str is None:
|
types_str = 'All'
|
types = set()
types_tokens = [token.lower() for token in re.findall(r'\w+', types_str)]
if 'stdout' in types_tokens:
types.add(OutputTypes.Types.Stdout)
if 'stderr' in types_tokens:
types.add(OutputTypes.Types.Stderr)
if 'result' in types_tokens:
types.add(OutputTypes.Types.Result)
if 'image' in types_tokens:
types.add(OutputTypes.Types.Image)
if 'pdf' in types_tokens:
types.add(OutputTypes.Types.Pdf)
if 'all' in types_tokens:
types.add(OutputTypes.Types.Stdout)
types.add(OutputTypes.Types.Stderr)
types.add(OutputTypes.Types.Result)
types.add(OutputTypes.Types.Image)
types.add(OutputTypes.Types.Pdf)
return types
|
tnwhitwell/lexicon
|
tests/providers/test_nsone.py
|
Python
|
mit
| 972 | 0.003086 |
# Test for one implementat
|
ion of the interface
from lexicon.providers.nsone import Provider
from integration_tests import IntegrationTests
from unittest import TestCase
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the in
|
terface must
# pass, by inheritance from define_tests.TheTests
class Ns1ProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'nsone'
domain = 'lexicon-example.com'
def _filter_headers(self):
return ['X-NSONE-Key', 'Authorization']
@pytest.mark.skip(reason="can not set ttl when creating/updating records")
def test_Provider_when_calling_list_records_after_setting_ttl(self):
return
# TODO: this should be enabled
@pytest.mark.skip(reason="regenerating auth keys required")
def test_Provider_when_calling_update_record_should_modify_record_name_specified(self):
return
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/android/pylib/local/device/local_device_instrumentation_test_run.py
|
Python
|
mit
| 6,414 | 0.011381 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import time
from devil.android import device_errors
from pylib import flag_changer
from pylib.base import base_test_result
from pylib.local.device import local_device_test_run
TIMEOUT_ANNOTATIONS = [
('Manual', 10 * 60 * 60),
('IntegrationTest', 30 * 60),
('External', 10 * 60),
('EnormousTest', 10 * 60),
('LargeTest', 5 * 60),
('MediumTest', 3 * 60),
('SmallTest', 1 * 60),
]
# TODO(jbudorick): Make this private once the instrumentation test_runner is
# deprecated.
def DidPackageCrashOnDevice(package_name, device):
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
try:
for _ in xrange(10):
package = devic
|
e.DismissCrashDialogIfNeeded()
if not package:
return False
# Assume test package convention of ".test" suffix
if package in package_name:
return True
except device_errors.CommandFailedError:
logging.exception('Error while attempting to dismiss crash dialog.')
return False
_CURRENT_FOCUS_CRASH_RE = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
class LocalDeviceInstrumentationTestRun(
local_dev
|
ice_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
super(LocalDeviceInstrumentationTestRun, self).__init__(env, test_instance)
self._flag_changers = {}
def TestPackage(self):
return None
def SetUp(self):
def substitute_external_storage(d, external_storage):
if not d:
return external_storage
elif isinstance(d, list):
return '/'.join(p if p else external_storage for p in d)
else:
return d
def individual_device_set_up(dev, host_device_tuples):
dev.Install(self._test_instance.apk_under_test,
permissions=self._test_instance.apk_under_test_permissions)
dev.Install(self._test_instance.test_apk,
permissions=self._test_instance.test_permissions)
for apk in self._test_instance.additional_apks:
dev.Install(apk)
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, substitute_external_storage(d, external_storage))
for h, d in host_device_tuples]
logging.info('instrumentation data deps:')
for h, d in host_device_tuples:
logging.info('%r -> %r', h, d)
dev.PushChangedFiles(host_device_tuples)
if self._test_instance.flags:
if not self._test_instance.package_info:
logging.error("Couldn't set flags: no package info")
elif not self._test_instance.package_info.cmdline_file:
logging.error("Couldn't set flags: no cmdline_file")
else:
self._flag_changers[str(dev)] = flag_changer.FlagChanger(
dev, self._test_instance.package_info.cmdline_file)
logging.debug('Attempting to set flags: %r',
self._test_instance.flags)
self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
self._env.parallel_devices.pMap(
individual_device_set_up,
self._test_instance.GetDataDependencies())
def TearDown(self):
def individual_device_tear_down(dev):
if str(dev) in self._flag_changers:
self._flag_changers[str(dev)].Restore()
self._env.parallel_devices.pMap(individual_device_tear_down)
#override
def _CreateShards(self, tests):
return tests
#override
def _GetTests(self):
return self._test_instance.GetTests()
#override
def _GetTestName(self, test):
return '%s#%s' % (test['class'], test['method'])
#override
def _RunTest(self, device, test):
extras = self._test_instance.GetHttpServerEnvironmentVars()
if isinstance(test, list):
if not self._test_instance.driver_apk:
raise Exception('driver_apk does not exist. '
'Please build it and try again.')
def name_and_timeout(t):
n = self._GetTestName(t)
i = self._GetTimeoutFromAnnotations(t['annotations'], n)
return (n, i)
test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
test_name = ','.join(test_names)
target = '%s/%s' % (
self._test_instance.driver_package,
self._test_instance.driver_name)
extras.update(
self._test_instance.GetDriverEnvironmentVars(
test_list=test_names))
timeout = sum(timeouts)
else:
test_name = self._GetTestName(test)
target = '%s/%s' % (
self._test_instance.test_package, self._test_instance.test_runner)
extras['class'] = test_name
timeout = self._GetTimeoutFromAnnotations(test['annotations'], test_name)
logging.info('preparing to run %s: %s', test_name, test)
time_ms = lambda: int(time.time() * 1e3)
start_ms = time_ms()
output = device.StartInstrumentation(
target, raw=True, extras=extras, timeout=timeout, retries=0)
duration_ms = time_ms() - start_ms
# TODO(jbudorick): Make instrumentation tests output a JSON so this
# doesn't have to parse the output.
logging.debug('output from %s:', test_name)
for l in output:
logging.debug(' %s', l)
result_code, result_bundle, statuses = (
self._test_instance.ParseAmInstrumentRawOutput(output))
results = self._test_instance.GenerateTestResults(
result_code, result_bundle, statuses, start_ms, duration_ms)
if DidPackageCrashOnDevice(self._test_instance.test_package, device):
for r in results:
if r.GetType() == base_test_result.ResultType.UNKNOWN:
r.SetType(base_test_result.ResultType.CRASH)
return results
#override
def _ShouldShard(self):
return True
@staticmethod
def _GetTimeoutFromAnnotations(annotations, test_name):
for k, v in TIMEOUT_ANNOTATIONS:
if k in annotations:
timeout = v
break
else:
logging.warning('Using default 1 minute timeout for %s', test_name)
timeout = 60
try:
scale = int(annotations.get('TimeoutScale', 1))
except ValueError as e:
logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
scale = 1
timeout *= scale
return timeout
|
fayf/pyload
|
module/plugins/hooks/SmoozedComHook.py
|
Python
|
gpl-3.0
| 876 | 0.025114 |
# -*- coding: utf-8 -*-
from module.plugins.internal.MultiHook import MultiHook
class SmoozedComHook(MultiHook):
__name__ = "SmoozedComHook"
__type__ = "hook"
__version__ = "0.04"
__status__ = "testing"
__config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Rel
|
oad interval in hours" , 12
|
)]
__description__ = """Smoozed.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("", "")]
def get_hosters(self):
user, info = self.account.select()
return self.account.get_data(user)['hosters']
|
yeleman/snisi
|
snisi_vacc/indicators.py
|
Python
|
mit
| 4,346 | 0.00069 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import numpy
from snisi_core.models.Projects import Cluster
# from snisi_core.models.Reporting import ExpectedReporting
# from snisi_tools.caching import descendants_slugs
from snisi_core.indicators import Indicator, gen_report_indicator
from snisi_vacc.models import VaccCovR, AggVaccCovR
from snisi_tools.misc import import_path
logger = logging.getLogger(__name__)
cluster = Cluster.get_or_none("major_vaccine_monthly")
excludes = ['VaccinationIndicator', 'Indicator']
class VaccinationIndicator(Indicator):
INDIVIDUAL_CLS = VaccCovR
AGGREGATED_CLS = AggVaccCovR
def is_hc(self):
''' whether at HealthCenter/Source level or not (above) '''
return self.entity.type.slug == 'health_center'
def should_yesno(self):
return self.is_hc()
def sum_on_hc(self, field):
return sum(self.all_hc_values(field))
def all_hc_values(self, field):
return [getattr(r, field, None)
for r in self.report.indiv_sources.all()]
def sources_average(self, field):
return float(numpy.mean(self.all_hc_values(field)))
def inverse(self, value):
if value < 0:
return float(1 + numpy.abs(value))
else:
return float(1 - value)
gen_shortcut = lambda field, label=None: gen_report_indicator(
field, name=label, report_cls=VaccCovR,
base_indicator_cls=VaccinationIndicator)
gen_shortcut_agg = lambda field, label=None: gen_report_indicator(
field, name=label, report_cls=AggVaccCovR,
base_indicator_cls=VaccinationIndicator)
class BCGCoverage(VaccinationIndicator):
name = ("Taux de couverture BCG")
is_ratio = True
is_geo_friendly = True
geo_section = "Couverture"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.report.bcg_coverage
else:
return self.sources_average('bcg_coverage')
class Polio3Coverage(VaccinationIndicator):
name = ("Taux de couverture Penta-3")
is_ratio = True
is_geo_friendly = True
geo_section = "Couverture"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.report.polio3_coverage
else:
return self.sources_average('polio3_coverage')
class MeaslesCoverage(VaccinationIndicator):
name = ("Taux de couverture VAR-1")
is_ratio = True
is_geo_friendly = True
geo_section = "Couverture"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.report.measles_coverage
else:
return self.sources_average('measles_coverage')
class NonAbandonmentRate(VaccinationIndicator):
name = ("Taux de poursuite (non-abandon) Penta-1 / Penta-3")
is_ratio = True
is_geo_friendly = True
geo_section = "Abandons"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.inverse(self.report.polio3_abandonment_rate)
else:
return self.inverse(
self.sources_average('polio3_abandonment_rate'))
def is_indicator(module, member, only_geo=False):
ind = get_indicator(module, member)
if not getattr(ind, 'SNISI_INDICATOR', None) or member in excludes:
return False
if only_geo and not getattr(ind, 'is_geo_friendly', None):
return False
return True
def get_indicator(module, member):
if module is None:
return member
return getattr(module, member)
def get_geo_indicators():
indicators = {}
# section = get_section("map")
section = import_path('snisi_vacc.indicators')
for indicator_name in dir(section):
if not is_indicator(section, indicator_name, True):
continue
indicator = import_path('snisi_vacc.indicators.{}'
|
.format(indicator_name))
|
geo_section = getattr(indicator, 'geo_section', None)
if geo_section not in indicators.keys():
indicators.update({geo_section: []})
spec = indicator.spec()
spec.update({'slug': indicator.__name__})
indicators[geo_section].append(spec)
return indicators
|
TNT-Samuel/Coding-Projects
|
Image Test/_ImageEdit3MultiProcess.py
|
Python
|
gpl-3.0
| 17,506 | 0.00914 |
if __name__ == '__main__':
print("Loading Modules...")
from setuptools.command import easy_install
def install_with_easyinstall(package):
easy_install.main(["-U", package])
imported = False
tries = 0
while not imported:
try:
import socket, importlib
globals()['PIL'] = importlib.import_module('PIL')
imported = True
except Exception as ex:
print("An error occured when importing PIL: " + str(ex))
tries += 1
if tries == 6:
print("Install Failed.")
while True:
pass
print("Installing PIL... [Try " + str(tries) + "/5]")
try:
install_with_easyinstall('Pillow')
import site, imp
imp.reload(site)
print("PIL installed.")
except Exception as ex:
print("An error occured when installing PIL: " + str(ex))
import time, math, os, queue #, threading
from multiprocessing import Process, Queue, Value, Manager, Array
globals()["exitFlag"] = False
from tkinter import *
import PIL.Image
from PIL import ImageTk
if __name__ == '__main__':
print("All Modules Successfully Loaded!")
print("")
threadnumber = 2
time.sleep(0.5)
def process_data(threadName, q1, q2, im, qlock, ima, rfunc, rerrors, gfunc, gerrors, bfunc, berrors, percent, op):
import math
def funct_if(test,var_true,var_false):
if (test):
return var_true
else:
return var_false
def scale(var_old_min, var_old_max, var_new_min, var_new_max, var_value):
OldSRange = (var_old_max - var_old_min)
NewSRange = (var_new_max - var_new_min)
return (((var_value - var_old_min) * NewSRange) / OldSRange) + var_new_min
def is_even(value_to_test):
return value_to_test % 2 == 0
def draw_funct(dfunction, dxmin, dxmax, dymin, dymax, resolution):
dx = scale(0,canvas_width,dxmin,dxmax,x)
cdy = eval(dfunction)
dx = scale(0,canvas_width,dxmin,dxmax,x-resolution)
pdy = eval(dfunction)
dx = scale(0,canvas_width,dxmin,dxmax,x+resolution)
ndy = eval(dfunction)
cdsy = canvas_height - scale(dymin,dymax,0,canvas_height,cdy)
pdsy = canvas_height - scale(dymin,dymax,0,canvas_height,pdy)
ndsy = canvas_height - scale(dymin,dymax,0,canvas_height,ndy)
dyval = scale(0,canvas_height,dymin,dymax,y)
py = scale(dymin,dymax,0,canvas_height,dyval-resolution)
ny = scale(dymin,dymax,0,canvas_height,dyval+resolution)
#if y - cdsy > py - pdsy and y - cdsy < ny - ndsy:
#if (cdsy - y < pdsy - y and cdsy - y > ndsy - y) or (cdsy - y > pdsy - y and cdsy - y < ndsy - y):
if (0 < pdsy - y and 0 > ndsy - y) or (0 > pdsy - y and 0 < ndsy - y) or round(cdsy - y) == 0:
# print("dx: " + str(dx) + " , dy: " + str(dy))
# if y - dsy < resolution + 1 and y - dsy > 0-(resolution + 1): #round(dsy) == y:
return 255
else:
return 0
red = 0
green = 0
blue = 0
canvas_height = im.height
canvas_width = im.width
OldXRange = (canvas_width - 0)
OldYRange = (canvas_height - 0)
NewRange = (255 - 0)
def pix2index(xpix,ypix):
return ((((canvas_height - ypix - 1)*canvas_width) + (xpix)) * 3) - 3
def getpix(xval,yval):
pixindex = pix2index(xval,yval)
try:
rpix = ima[pixindex]
gpix = ima[pixindex + 1]
bpix = ima[pixindex + 2]
except:
print("ERROR WITH INDEX: " + str(pixindex))
while True:
pass
return (rpix,gpix,bpix)
def setpix(xval,yval,val):
pixindex = pix2index(xval,yval)
ima[pixindex] = val[0]
ima[pixindex + 1] = val[1]
ima[pixindex + 2] = val[2]
print("[" + str(threadName) + "] Started.")
# rfunccomp = eval('lambda: ' + globals()["rfunc"], locals())
# gfunccomp = eval('lambda: ' + globals()["gfunc"], locals())
# bfunccomp = eval('lambda: ' + globals()["bfunc"], locals())
while not im.exitFlag:
gotqdata = False
#queueLock.acquire()
if not q1.empty() and im.currq == 1:
try:
qlock.acquire()
datax = q1.get()
qlock.release()
gotqdata = True
except Exception as ex:
print("Q1Error: " +
|
str(ex))
elif not q2.empty() and im.currq == 2:
try:
qlock.acquire()
datax = q2.get()
qlock.release()
gotqdata = True
except Exception as ex:
|
print("Q2Error: " + str(ex))
else:
time.sleep(0.1)
if gotqdata:
#queueLock.release()
#print ("%s processing %s" % (threadName, data))
x = datax
#print("[" + str(threadName) + "] Processing " + str(x))
y = canvas_height
while y > 0:
y = y - 1
qlock.acquire()
im.tmppix = im.tmppix + 1
qlock.release()
#print("Solving: " + str(x) + "," + str(y))
value = getpix(x,y)
XValue = round((x * NewRange) / OldXRange)
YValue = round((y * NewRange) / OldYRange)
progress = 255 * (percent.value / 100)
if op == 1:
level = round((value[0]+value[1]+value[2]) / 3)
pixval = (level,level,level)
elif op == 2:
red = value[0]
green = value[1]
blue = value[2]
try:
# r = rfunccomp()
r = eval(rfunc, locals())
except Exception as ex:
print("An Error occured at pixel (" + str(x) + "," + str(y) + "), Colour: " + str(value) + " with the red function: " + rfunc)
print("Error: " + str(ex))
r = 0
rerrors.value = rerrors.value + 1
try:
# g = gfunccomp()
g = eval(gfunc, locals())
except Exception as ex:
print("An Error occured at pixel (" + str(x) + "," + str(y) + "), Colour: " + str(value) + " with the green function: " + gfunc)
print("Error: " + str(ex))
g = 0
gerrors.value = gerrors.value + 1
try:
# b = bfunccomp()
b = eval(bfunc, locals())
except Exception as ex:
print("An Error occured at pixel (" + str(x) + "," + str(y) + "), Colour: " + str(value) + " with the blue function: " + bfunc)
print("Error: " + str(ex))
b = 0
berrors.value = berrors.value + 1
if r < 0:
r = 0
if r > 255:
r = 255
if g < 0:
g = 0
if g > 255:
g = 255
if b < 0:
b = 0
if b > 255:
b = 255
#print(str(red) + "," + str(green) + "," + str(blue) + ";" + str(r) + "," + str(g) + "," + str(b))
pixval = (round(r),round(g),round(b))
else:
pixval = value
# print("Changing pixel (" + str(x) + "," + str(y) + ") from " + str(value) + " to " + str(pixval))
#print("Before: " + str(x) + "," + str(y) + ":" + str(getpix(x,y)))
setpix(x,y,pixval)
#print("After: " + str(x) + "," + str(y) + ":" + str(getpix(x,y)))
else:
#queueLock.release()
pass
#time.sleep(1)
print("[" + str(threadName) + "] Exiting.")
if __name__ == '__main__':
print("""Modes:
0: G
|
kalev/anaconda
|
pyanaconda/storage/deviceaction.py
|
Python
|
gpl-2.0
| 20,775 | 0.000722 |
# deviceaction.py
# Device modification action classes for anaconda's storage configuration
|
# module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted
|
material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
from udev import *
import math
from devices import StorageDevice
from devices import PartitionDevice
from devices import LVMLogicalVolumeDevice
from formats import getFormat
from errors import *
from parted import partitionFlag, PARTITION_LBA
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("storage")
# The values are just hints as to the ordering.
# Eg: fsmod and devmod ordering depends on the mod (shrink -v- grow)
ACTION_TYPE_NONE = 0
ACTION_TYPE_DESTROY = 1000
ACTION_TYPE_RESIZE = 500
ACTION_TYPE_MIGRATE = 250
ACTION_TYPE_CREATE = 100
action_strings = {ACTION_TYPE_NONE: "None",
ACTION_TYPE_DESTROY: "Destroy",
ACTION_TYPE_RESIZE: "Resize",
ACTION_TYPE_MIGRATE: "Migrate",
ACTION_TYPE_CREATE: "Create"}
ACTION_OBJECT_NONE = 0
ACTION_OBJECT_FORMAT = 1
ACTION_OBJECT_DEVICE = 2
object_strings = {ACTION_OBJECT_NONE: "None",
ACTION_OBJECT_FORMAT: "Format",
ACTION_OBJECT_DEVICE: "Device"}
RESIZE_SHRINK = 88
RESIZE_GROW = 89
resize_strings = {RESIZE_SHRINK: "Shrink",
RESIZE_GROW: "Grow"}
def action_type_from_string(type_string):
if type_string is None:
return None
for (k,v) in action_strings.items():
if v.lower() == type_string.lower():
return k
return resize_type_from_string(type_string)
def action_object_from_string(type_string):
if type_string is None:
return None
for (k,v) in object_strings.items():
if v.lower() == type_string.lower():
return k
def resize_type_from_string(type_string):
if type_string is None:
return None
for (k,v) in resize_strings.items():
if v.lower() == type_string.lower():
return k
class DeviceAction(object):
""" An action that will be carried out in the future on a Device.
These classes represent actions to be performed on devices or
filesystems.
The operand Device instance will be modified according to the
action, but no changes will be made to the underlying device or
filesystem until the DeviceAction instance's execute method is
called. The DeviceAction instance's cancel method should reverse
any modifications made to the Device instance's attributes.
If the Device instance represents a pre-existing device, the
constructor should call any methods or set any attributes that the
action will eventually change. Device/DeviceFormat classes should verify
that the requested modifications are reasonable and raise an
exception if not.
Only one action of any given type/object pair can exist for any
given device at any given time. This is enforced by the
DeviceTree.
Basic usage:
a = DeviceAction(dev)
a.execute()
OR
a = DeviceAction(dev)
a.cancel()
XXX should we back up the device with a deep copy for forcibly
cancelling actions?
The downside is that we lose any checking or verification that
would get done when resetting the Device instance's attributes to
their original values.
The upside is that we would be guaranteed to achieve a total
reversal. No chance of, eg: resizes ending up altering Device
size due to rounding or other miscalculation.
"""
type = ACTION_TYPE_NONE
obj = ACTION_OBJECT_NONE
_id = 0
def __init__(self, device):
if not isinstance(device, StorageDevice):
raise ValueError("arg 1 must be a StorageDevice instance")
self.device = device
# Establish a unique id for each action instance. Making shallow or
# deep copyies of DeviceAction instances will require __copy__ and
# __deepcopy__ methods to handle incrementing the id in the copy
self.id = DeviceAction._id
DeviceAction._id += 1
def execute(self, intf=None):
""" perform the action """
pass
def cancel(self):
""" cancel the action """
pass
@property
def isDestroy(self):
return self.type == ACTION_TYPE_DESTROY
@property
def isCreate(self):
return self.type == ACTION_TYPE_CREATE
@property
def isMigrate(self):
return self.type == ACTION_TYPE_MIGRATE
@property
def isResize(self):
return self.type == ACTION_TYPE_RESIZE
@property
def isShrink(self):
return (self.type == ACTION_TYPE_RESIZE and self.dir == RESIZE_SHRINK)
@property
def isGrow(self):
return (self.type == ACTION_TYPE_RESIZE and self.dir == RESIZE_GROW)
@property
def isDevice(self):
return self.obj == ACTION_OBJECT_DEVICE
@property
def isFormat(self):
return self.obj == ACTION_OBJECT_FORMAT
@property
def format(self):
return self.device.format
def __str__(self):
s = "[%d] %s %s" % (self.id, action_strings[self.type],
object_strings[self.obj])
if self.isResize:
s += " (%s)" % resize_strings[self.dir]
if self.isFormat:
s += " %s" % self.format.desc
if self.isMigrate:
s += " to %s" % self.format.migrationTarget
s += " on"
s += " %s %s (id %d)" % (self.device.type, self.device.name,
self.device.id)
return s
def requires(self, action):
""" Return True if self requires action. """
return False
def obsoletes(self, action):
""" Return True is self obsoletes action.
DeviceAction instances obsolete other DeviceAction instances with
lower id and same device.
"""
return (self.device.id == action.device.id and
self.type == action.type and
self.obj == action.obj and
self.id > action.id)
class ActionCreateDevice(DeviceAction):
""" Action representing the creation of a new device. """
type = ACTION_TYPE_CREATE
obj = ACTION_OBJECT_DEVICE
def __init__(self, device):
if device.exists:
raise ValueError("device already exists")
# FIXME: assert device.fs is None
DeviceAction.__init__(self, device)
def execute(self, intf=None):
self.device.create(intf=intf)
def requires(self, action):
""" Return True if self requires action.
Device create actions require other actions when either of the
following is true:
- this action's device depends on the other action's device
- both actions are partition create actions on the same disk
and this partition has a higher number
"""
rc = False
if self.device.dependsOn(action.device):
rc = True
elif (action.isCreate and action.isDevice and
|
melizeche/PyBlog
|
blog/models.py
|
Python
|
gpl-2.0
| 607 | 0.039539 |
from django.db import models
# Create your models here.
class Autor(models.Model):
nombre = models.CharField(max_length=50)
ed
|
ad = models.IntegerField(null=True, blank=True)
email = models.EmailField()
def __unicode__(self):
return self.nombre
class Meta:
verbose_name_plural = "Autores"
class Articulo(models.Model):
autor = models.ForeignKey('Autor', n
|
ull=True)
titulo = models.CharField(max_length=100)
texto = models.TextField(blank=True, null=True)
created = models.DateTimeField('Agregado',auto_now_add=True, null=True, blank=True)
def __unicode__(self):
return self.titulo
|
0sm0s1z/subterfuge
|
modules/models.py
|
Python
|
gpl-3.0
| 1,614 | 0.02912 |
from django.db import models
from django import forms
class installed(models.Model):
name = models.CharField(max_length=300)
active = models.CharField(max_length=300)
class vectors(models.Model):
name = models.CharField(max_length=300)
active = models.CharField(max_length=300)
class iptrack(models.Model):
address = models.CharField(max_length=300)
mac = models.CharField(max_length=300)
os = models.CharField(max_length=300, default = 'unknown')
osdetails = models.CharField(max_length=300)
injected = models.CharField(max_length=300)
expand = models.CharField(max_length=300, default = '0')
class scan(models.Model):
address = models.CharField(max_length=300)
ports = models.CharField(max_length=300)
osdetails = models.CharField(max_length=300)
hostname = models.CharField(max_length=300)
scanning = models.CharField(max_length=300, default = '0')
class apgen(models.Model):
essid = models.Char
|
Field(max_length=300)
channel = models.CharField(max_length=300)
atknic = models.CharField(max_length=300)
netnic = models.CharField(max_length=300)
class arppoison(models.Model):
target = models.CharField(m
|
ax_length=300, default = 'none')
method = models.CharField(max_length=300, default = 'none')
class sessions(models.Model):
source = models.CharField(max_length=300)
session = models.CharField(max_length=300)
date = models.CharField(max_length=300)
|
ahmadiga/min_edx
|
common/test/acceptance/pages/studio/course_rerun.py
|
Python
|
agpl-3.0
| 971 | 0 |
"""
Course rerun page in Studio
"""
from .course_page import CoursePage
from .utils import set_input_value
class CourseRerunPage(CoursePage):
"""
Course rerun page in Studio
"""
url_path = "course_rerun"
COURSE_RUN_INPUT = '.rerun-course-run'
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the course rerun page.
"""
return self.q(css='body.view-cou
|
rse-create-rerun').present
@property
def course_run(self):
"""
Retur
|
ns the value of the course run field.
"""
return self.q(css=self.COURSE_RUN_INPUT).text[0]
@course_run.setter
def course_run(self, value):
"""
Sets the value of the course run field.
"""
set_input_value(self, self.COURSE_RUN_INPUT, value)
def create_rerun(self):
"""
Clicks the create rerun button.
"""
self.q(css='.rerun-course-save')[0].click()
|
codeaudit/myriad-toolkit
|
tools/python/myriad/compiler/debug.py
|
Python
|
apache-2.0
| 2,056 | 0.006323 |
'''
Copyright 2010-2013 DIMA Research Group, TU Berlin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Dec 15, 2011
@author: Alexander Alexandrov <alexan
|
der.alexandrov@tu-berlin.de>
'''
from myriad.compiler.visitor import AbstractVisitor
class PrintVisitor(AbstractVisitor):
'''
classdocs
'''
__indent = 0
__indentPrefix = " "
def __init__(self, *args, **kwargs):
super(PrintVisitor, self).__init__(
|
*args, **kwargs)
def traverse(self, node):
print "~" * 160
node.accept(self)
print "~" * 160
def _preVisitAbstractNode(self, node):
if (len(node.allAttributes()) == 0):
# print node with attributes
print "%s+ %s" % (self.__indentPrefix * self.__indent, node.__class__.__name__)
else:
# print node without attributes
print "%s+ %s {" % (self.__indentPrefix * self.__indent, node.__class__.__name__)
for (k, v) in node.allAttributes().iteritems():
print "%s'%s': '%s'," % (self.__indentPrefix * (self.__indent + 3), k, v)
print "%s}" % (self.__indentPrefix * (self.__indent + 2))
self._increaseIndent()
def _postVisitAbstractNode(self, node):
self._decreaseIndent()
# def _preVisitSetItemNode(self, node):
# pass
#
# def _postVisitSetItemNode(self, node):
# pass
def _increaseIndent(self):
self.__indent = self.__indent + 1
def _decreaseIndent(self):
self.__indent = self.__indent - 1
|
kenorb-contrib/BitTorrent
|
python_bt_codebase_library/BTL/ebrpc.py
|
Python
|
gpl-3.0
| 3,425 | 0.012555 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
### ebrpc
## query = ebencode({'y':'q', 'q':'<method>', 'a':[<params>])
## response = ebencode({'y':'r', 'r':<return value>}}
## fault = ebencode({'y':'e','c':'<fault code>', 's':'<fault string>'
from xmlrpclib import Error, Fault
from types import TupleType
from BTL.ebencode import ebencode, ebdecode
def dump_fault(code, msg):
return ebencode({'y':'e', 'c':code, 's':msg})
def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=False):
if methodresponse and isinstance(params, TupleType):
assert len(params) == 1, "response tuple must be a singleton"
if methodname:
out = ebencode({'y':'q', 'q':methodname, 'a':params})
elif isinstance(params, Fault):
out = ebencode({'y':'e', 'c':params.faultCode, 's':params.faultString})
elif methodresponse:
out = ebencode({'y':'r', 'r':params[0]})
else:
raise Error("")
return out
def loads(data):
d = ebdecode(data)
if d['y'] == 'e':
raise Fault(d['c'], d['s']) # the server raised a fault
elif d['y'] == 'r':
# why is this return value so weird?
# because it's the way that loads works in xmlrpclib
return (d['r'],), None
elif d['y'] == 'q':
return d['a'], d['q']
raise ValueError
class DFault(Exception):
"""Indicates an Datagram EBRPC fault package."""
# If you return a DFault with tid=None from within a function called via
# twispread's TEBRPC.callRemote then TEBRPC will insert the tid for the call.
def __init__(self, faultCode, faultString, tid=None):
self.faultCode = faultCode
self.faultString = faultString
self.tid = tid
self.args = (faultCode, faultString)
|
def __repr__(self):
return (
"<Fault %s: %s>" %
(self.faultCode, repr(self.faultString))
)
### datagram interface
### has transaction ID as third return valuebt
### slightly different API, returns a tid as third arg
|
ument in query/response
def dumpd(params, methodname=None, methodresponse=None, encoding=None, allow_none=False, tid=None):
assert tid is not None, "need a transaction identifier"
if methodname:
out = ebencode({'y':'q', 't':tid, 'q':methodname, 'a':params})
elif isinstance(params, DFault):
out = ebencode({'y':'e', 't':tid, 'c':params.faultCode, 's':params.faultString})
elif methodresponse:
out = ebencode({'y':'r', 't':tid, 'r':params})
else:
raise Error("")
return out
def loadd(data):
d = ebdecode(data)
if d['y'] == 'e':
raise DFault(d['c'], d['s'], d['t'])
elif d['y'] == 'r':
return d['r'], None, d['t']
elif d['y'] == 'q':
return d['a'], d['q'], d['t']
raise ValueError
|
Trust-Code/addons-yelizariev
|
reminder_base/reminder_base_models.py
|
Python
|
lgpl-3.0
| 6,555 | 0.001831 |
from openerp import api, models, fields, SUPERUSER_ID
class reminder(models.AbstractModel):
_name = 'reminder'
_reminder_date_field = 'date'
_reminder_description_field = 'description'
# res.users or res.partner fields
_reminder_attendees_fields = ['user_id']
reminder_event_id = fields.Many2one('calendar.event',
string='Reminder Calendar Event')
reminder_alarm_ids = fields.Many2many('calendar.alarm', string='Reminders',
related='reminder_event_id.alarm_ids')
@api.one
def _get_reminder_event_name(self):
return '%s: %s' % (self._description, self.display_name)
@api.model
def _create_reminder_event(self):
vals = {
'reminder_res_model': self._name,
# dummy values
'name': 'TMP NAME',
'allday': True,
'start_date': fields.Date.today(),
'stop_date': fields.Date.today(),
}
event = self.env['calendar.event'].with_context({}).create(vals)
return event
@api.model
def _init_reminder(self):
domain = [(self._reminder_date_field, '!=', False)]
self.search(domain)._do_update_reminder()
@api.one
def _update_reminder(self, vals):
if self._context.get('do_not_update_reminder'):
# ignore own calling of write function
return
if not vals:
return
if not self.reminder_event_id and self._reminder_date_field not in vals:
# don't allow to create reminder if date is not set
return
fields = ['reminder_alarm_ids',
self._reminder_date_field,
self._reminder_description_field]
if not any([k in vals for k in fields if k]):
return
self._do_update_reminder(update_date=self._reminder_date_field in vals)
@api.one
def _do_update_reminder(self, update_date=True):
vals = {'name': self._get_reminder_event_name()[0]}
event = self.reminder_event_id
if not event:
event = self._create_reminder_event()
self.with_context(do_not_update_reminder=True).write({'reminder_event_id': event.id})
if not event.reminder_res_id:
vals['reminder_res_id
|
'] = self.id
if update_date:
fdate = self._fields[self._reminder_date_field]
fdate_value = getattr(self, self._reminder_date_field)
if not fdate_value:
event.unlink()
return
if fdate.type == 'date':
vals.update({
|
'allday': True,
'start_date': fdate_value,
'stop_date': fdate_value,
})
elif fdate.type == 'datetime':
vals.update({
'allday': False,
'start_datetime': fdate_value,
'stop_datetime': fdate_value,
})
if self._reminder_description_field:
vals['description'] = getattr(self, self._reminder_description_field)
if self._reminder_attendees_fields:
partner_ids = []
for field_name in self._reminder_attendees_fields:
field = self._columns[field_name]
partner = getattr(self, field_name)
model = None
try:
model = field.comodel_name
except AttributeError:
model = field._obj # v7
if model == 'res.users':
partner = partner.partner_id
if partner.id not in partner_ids:
partner_ids.append(partner.id)
vals['partner_ids'] = [(6, 0, partner_ids)]
event.write(vals)
@api.model
def _check_and_create_reminder_event(self, vals):
fields = [self._reminder_date_field]
if any([k in vals for k in fields]):
event = self._create_reminder_event()
vals['reminder_event_id'] = event.id
return vals
@api.model
def create(self, vals):
vals = self._check_and_create_reminder_event(vals)
res = super(reminder, self).create(vals)
res._update_reminder(vals)
return res
@api.one
def write(self, vals):
if not self.reminder_event_id:
vals = self._check_and_create_reminder_event(vals)
res = super(reminder, self).write(vals)
self._update_reminder(vals)
return res
class calendar_event(models.Model):
_inherit = 'calendar.event'
reminder_res_model = fields.Char('Related Document Model for reminding')
reminder_res_id = fields.Integer('Related Document ID for reminding')
@api.multi
def open_reminder_object(self):
r = self[0]
target = self._context.get('target', 'current')
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': r.reminder_res_model,
'res_id': r.reminder_res_id,
'views': [(False, 'form')],
'target': target,
}
class reminder_admin_wizard(models.TransientModel):
_name = 'reminder.admin'
model = fields.Selection(string='Model', selection='_get_model_list', required=True)
events_count = fields.Integer(string='Count of calendar records', compute='_get_events_count')
action = fields.Selection(string='Action', selection=[('create', 'Create Calendar Records'), ('delete', 'Delete Calendar Records')],
required=True, default='create',)
def _get_model_list(self):
res = []
for r in self.env['ir.model.fields'].search([('name', '=', 'reminder_event_id')]):
if r.model_id.model == 'reminder':
# ignore abstract class
continue
res.append( (r.model_id.model, r.model_id.name) )
return res
@api.onchange('model')
@api.one
def _get_events_count(self):
count = 0
if self.model:
count = self.env['calendar.event'].search_count([('reminder_res_model', '=', self.model)])
self.events_count = count
@api.one
def action_execute(self):
if self.action == 'delete':
self.env['calendar.event'].search([('reminder_res_model', '=', self.model)]).unlink()
elif self.action == 'create':
self.env[self.model]._init_reminder()
|
stargaser/astropy
|
astropy/units/decorators.py
|
Python
|
bsd-3-clause
| 9,242 | 0.001839 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from astropy.utils.decorators import wraps
from astropy.utils.misc import isiterable
from .core import Unit, UnitBase, UnitsError, add_enabled_equivalencies
from .physical import _unit_physical_mapping
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try: # unit passed in as a string
target_unit = Unit(target)
except ValueError:
try: # See if the function writer specified a physical type
physical_type_id = _unit_physical_mapping[target]
except KeyError: # Function argument target is invalid
raise ValueError("Invalid unit or physical type '{}'."
.format(target))
# get unit directly from physical type id
target_unit = Unit._from_physical_type_id(physical_type_id)
allowed_units.append(target_unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute
|
without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError("Argument '{}' to function '{}' has {}. "
"You may want to pass in
|
an astropy Quantity instead."
.format(param_name, func_name, error_msg))
else:
if len(targets) > 1:
raise UnitsError("Argument '{}' to function '{}' must be in units"
" convertible to one of: {}."
.format(param_name, func_name,
[str(targ) for targ in targets]))
else:
raise UnitsError("Argument '{}' to function '{}' must be in units"
" convertible to '{}'."
.format(param_name, func_name,
str(targets[0])))
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the decorator,
or by using function annotation syntax. Arguments to the decorator
take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator
or in the annotation.
If the argument has no unit attribute, i.e. it is not a Quantity object, a
`ValueError` will be raised unless the argument is an annotation. This is to
allow non Quantity annotations to pass through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if param.name not in bound_args.arguments and param.default is not param.empty:
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all
|
ciaranlangton/reddit-cxlive-bot
|
config.py
|
Python
|
mit
| 62 | 0 |
username =
|
"x"
pas
|
sword = "x"
subreddit = "x"
client_id = "x"
|
Comunitea/CMNT_00040_2016_ELN_addons
|
sales_mrp_stock_forecast_link/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,048 | 0 |
# -*-
|
coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Pedro Gómez$ <pegomez@elnogal.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General P
|
ublic License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sales_mrp_forecast
import sales_stock_forecast
|
shudwi/CrimeMap
|
Type_Of_Crime/admin.py
|
Python
|
gpl-3.0
| 131 | 0 |
from django.contrib import admin
from .m
|
odels import Typ
|
e_Of_Crime
admin.site.register(Type_Of_Crime)
# Register your models here.
|
tpsatish95/Topic-Modeling-Social-Network-Text-Data
|
Kseeds/modifiedCluster.py
|
Python
|
apache-2.0
| 4,434 | 0.016013 |
import numpy as np
from sklearn import cluster, datasets, preprocessing
import pickle
import gensim
import time
import re
import tokenize
from scipy import spatial
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
def combine(v1,v2):
A = np.add(v1,v2)
M = np.multiply(A,A)
lent=0
for i in M:
lent+=i
return np.divide(A,lent)
# 3M word google dataset of pretrained 300D vectors
model = gensim.models.Word2Vec.load_word2vec_format('vectors.bin', binary=True)
model.init_sims(replace=True)
#### getting all vecs from w2v using the inbuilt syn0 list see code
# X_Scaled_Feature_Vecs = []
# for w in model.vocab:
# X_Scaled_Feature_Vecs.append(model.syn0[model.vocab[w].index])
# model.syn0 = X_Scaled_Feature_Vecs
# X_Scaled_Feature_Vecs = None
# X_Scaled_Feature_Vecs = model.syn0
# ### scaling feature vecs
# min_max_scaler = preprocessing.MinMaxScaler()
# X_Scaled_Feature_Vecs = min_max_scaler.fit_transform(X)
# X_Scaled_Feature_Vecs = X
# W2V = dict(zip(model.vocab, X_Scaled_Feature_Vecs))
#Cosine Distance
# from scipy import spatial
# dataSetI = model["travel"]
# dataSetII = model["travelling"]
# result = 1 - spatial.distance.cosine(dataSetI, dataSetII)
# print(result)
X_Scaled_Feature_Vecs=[]
for word in model.vocab:
X_Scaled_Feature_Vecs.append(model[word])
# ######## Interested Categories
cat = ["advertising","beauty","business","celebrity","diy craft","entertainment","family","fashion","food","general","health","lifestyle","music","news","pop","culture","social","media","sports","technology","travel","video games"]
nums = range(0,22)
num2cat = dict(zip(nums, cat))
# new Categories Seeds (787 seeds) DICT [seed: cat]
Word2CatMap = load_obj("baseWord2CatMap")
baseWords = Word2CatMap.keys()
catVec=[]
newBaseWords =[]
# load from C file output
for bw in baseWords:
try:
catVec.append(np.array(model[bw]))
newBaseWords.append(bw)
except:
words = bw.split()
try:
vec = np.array(model[words[0]])
for word in words[1:]:
try:
vec = combine(vec,np.array(model[word]))
except:
#print(word + " Skipped!")
continue
catVec.append(vec)
newBaseWords.append(bw)
except:
#print(words)
continue
# print(len(catVec))
# print(len(newBaseWords))
#cluster Size
# newBaseWords has the list of new base words that are in word2vec vocab
k = len(catVec)
# form a num(k) to cat(22) mapping
numK2CatMap = dict()
for w in newBaseWords:
numK2CatMap[newBaseWords.index(w)] = Word2CatMap[w]
# kmeans
##### better code
t0 = time.time()
# Assign Max_Iter to 1 (ONE) if u just want to fit vectors around seeds
kmeans = cluster.KMeans(n_clusters=k, init=np.array(catVec), max_iter=1).fit(X_Scaled_Feature_Vecs)
#kmeans = cluster.KMeans(n_clusters=22, init=np.array(catVec), max_iter=900).fit(X_Scaled_Feature_Vecs)
print(str(time.time()-t0))
print(kmeans.inertia_)
###### After Fiting the Cluster Centers are recomputed : update catVec (Order Preserved)
catVec = kmeans.cluster_centers_
# #test
# for c in catVec:
# print(num2cat[kmeans.predict(c)[0]])
##### save best for future use
save_obj(kmeans,"clusterLarge")
KM = kmeans
# Cluster_lookUP = dict(zip(model.vocab, KM.labels_))
Cluster_lookUP = dict()
Cluster_KlookUP = dict()
for word in model.vocab:
kmap = KM.predict(model[word])[0]
Cluster_lookUP[word] = numK2CatMap[kmap]
Cluster_KlookUP[word] = kmap
## Precomputing
|
the cosine similarities
Cosine_Similarity = dict()
for k in Cluster_lookUP.keys():
# if len(Cluster_lookUP[k]) == 1:
Cosine_Similarity[k] = 1 - spatial.distance.cosine(model[k], catVec[Cluster_KlookU
|
P[k]])
# else:
# Cosine_Similarity[k] = [1 - spatial.distance.cosine(model[k], catVec[wk]) for wk in Cluster_KlookUP[k]]
#check
print(num2cat[Cluster_lookUP["flight"][0]] + " "+str(Cosine_Similarity["flight"]))
print(num2cat[Cluster_lookUP["gamecube"][0]] +" "+str(Cosine_Similarity["gamecube"]))
#Saving Models
# for 22 topics
save_obj(Cluster_lookUP,"Cluster_lookUP")
save_obj(Cosine_Similarity,"Cosine_Similarity")
save_obj(num2cat,"num2cat")
save_obj(catVec,"catVec")
save_obj(numK2CatMap,"numK2CatMap")
|
ianloic/fuchsia-sdk
|
scripts/common.py
|
Python
|
apache-2.0
| 848 | 0.004717 |
#!/usr/bin/env python
import json
import os
import subprocess
def normalize_target(target):
if ':' in target: return target
return target + ':' + os.path.basename(target)
def gn_desc(root_out_dir, target, *what_to_show):
# gn desc may fail transiently for an unknown reason; retry loop
for i in xrange(2):
desc = subprocess.check_output([
os.path.join(os.environ['FUCHSIA_DIR'], 'buildtools', 'gn'), 'desc',
root_out_dir, '--format=json', targe
|
t
] + list(what_to_show))
try:
output = json.loads(desc)
break
except ValueError:
if i >= 1:
print 'Failed to describe target ', target, '; output: ', desc
raise
if target not in
|
output:
target = normalize_target(target)
return output[target]
|
bd-j/magellanic
|
magellanic/sfhs/prediction_scripts/predicted_total.py
|
Python
|
gpl-2.0
| 5,894 | 0.009841 |
import sys, pickle, copy
import numpy as np
import matplotlib.pyplot as pl
import astropy.io.fits as pyfits
import magellanic.regionsed as rsed
import magellanic.mcutils as utils
from magellanic.lfutils import *
try:
import fsps
from sedpy import observate
except ImportError:
#you wont be able to predict the integrated spectrum or magnitudes
# filterlist must be set to None in calls to total_cloud_data
sps = None
wlengths = {'2': '{4.5\mu m}',
'4': '{8\mu m}'}
dmod = {'smc':18.9,
'lmc':18.5}
cloud_info = {}
cloud_info['smc'] = [utils.smc_region
|
s(), 20, 23, [7, 13,
|
16], [3,5,6]]
cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]]
def total_cloud_data(cloud, filternames = None, basti=False,
lfstring=None, agb_dust=1.0,
one_metal=None):
#########
# SPS
#########
#
if filternames is not None:
sps = fsps.StellarPopulation(add_agb_dust_model=True)
sps.params['sfh'] = 0
sps.params['agb_dust'] = agb_dust
dust = ['nodust', 'agbdust']
sps.params['imf_type'] = 0.0 #salpeter
filterlist = observate.load_filters(filternames)
else:
filterlist = None
##########
# SFHs
##########
regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()]
if basti:
zlist = basti_zlist
if 'header' in regions.keys():
rheader = regions.pop('header') #dump the header info from the reg. dict
total_sfhs = None
for n, dat in regions.iteritems():
total_sfhs = sum_sfhs(total_sfhs, dat['sfhs'])
total_zmet = dat['zmet']
#collapse SFHs to one metallicity
if one_metal is not None:
ts = None
for sfh in total_sfhs:
ts = sum_sfhs(ts, sfh)
total_sfh = ts
zlist = [zlist[one_metal]]
total_zmet = [total_zmet[one_metal]]
#############
# LFs
############
bins = rsed.lfbins
if lfstring is not None:
# these are stored as a list of different metallicities
lffiles = [lfstring.format(z) for z in zlist]
lf_base = [read_villaume_lfs(f) for f in lffiles]
#get LFs broken out by age and metallicity as well as the total
lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base)
else:
lfs_zt, lf, logages = None, None, None
###########
# SED
############
if filterlist is not None:
spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps)
mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
maggies = 10**(-0.4 * np.atleast_1d(mags))
else:
maggies, mass = None, None
#############
# Write output
############
total_values = {}
total_values['agb_clf'] = lf
total_values['agb_clfs_zt'] = lfs_zt
total_values['clf_mags'] = bins
total_values['logages'] = logages
total_values['sed_ab_maggies'] = maggies
total_values['sed_filters'] = filternames
total_values['lffile'] = lfstring
total_values['mstar'] = mass
total_values['zlist'] = zlist
return total_values, total_sfhs
def sum_sfhs(sfhs1, sfhs2):
"""
Accumulate individual sets of SFHs into a total set of SFHs. This
assumes that the individual SFH sets all have the same number and
order of metallicities, and the same time binning.
"""
if sfhs1 is None:
return copy.deepcopy(sfhs2)
elif sfhs2 is None:
return copy.deepcopy(sfhs1)
else:
out = copy.deepcopy(sfhs1)
for s1, s2 in zip(out, sfhs2):
s1['sfr'] += s2['sfr']
return out
if __name__ == '__main__':
filters = ['galex_NUV', 'spitzer_irac_ch2',
'spitzer_irac_ch4', 'spitzer_mips_24']
#filters = None
ldir, cdir = 'lf_data/', 'composite_lfs/'
outst = '{0}_n2teffcut.p'
# total_cloud_data will loop over the appropriate (for the
# isochrone) metallicities for a given lfst filename template
lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt'
basti = False
agb_dust=1.0
agebins = np.arange(9)*0.3 + 7.4
#loop over clouds (and bands and agb_dust) to produce clfs
for cloud in ['smc']:
rdir = '{0}cclf_{1}_'.format(cdir, cloud)
for band in ['2','4']:
lfstring = lfst.format(ldir, agb_dust, band)
dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust,
lfstring=lfstring, basti=basti)
agebins = sfhs[0]['t1'][3:-1]
outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat')
write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring)
#fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud])
#fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band))
#fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band))
#pl.close(fig)
colheads = (len(agebins)-1) * ' N<m(t={})'
colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.)
tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages
in zip(dat['agb_clfs_zt'], dat['logages'])])
write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)],
outfile.replace(cdir,'byage_clfs/'), lfstring,
colheads=colheads)
pl.figure()
for s, z in zip(sfhs, dat['zlist']):
pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3)
pl.legend(loc=0)
pl.title(cloud.upper())
print(cloud, dat['mstar'])
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/analysis/hole2/hole.py
|
Python
|
gpl-2.0
| 67,157 | 0.000626 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2020 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""HOLE Analysis --- :mod:`MDAnalysis.analysis.hole2.hole`
=====================================================================================
:Author: Lily Wang
:Year: 2020
:Copyright: GNU Public License v3
.. versionadded:: 1.0.0
This module contains the tools to interface with HOLE_ [Smart1993]_
[Smart1996]_ to analyse an ion channel pore or transporter pathway [Stelzl2014]_ .
Using HOLE on a PDB file
------------------------
Use the :func:``hole`` function to run `HOLE`_ on a single PDB file. For example,
the code below runs the `HOLE`_ program installed at '~/hole2/exe/hole' ::
from MDAnalysis.tests.datafiles import PDB_HOLE
from MDAnalysis.analysis import hole2
profiles = hole2.hole(PDB_HOLE, executable='~/hole2/exe/hole')
# to create a VMD surface of the pore
hole2.create_vmd_surface(filename='hole.vmd')
``profiles`` is a dictionary of HOLE profiles, indexed by the frame number. If only
a PDB file is passed to the function, there will only be one profile at frame 0.
You can visualise the pore by loading your PDB file into VMD, and in
Extensions > Tk Console, type::
source hole.vmd
You can also pass a DCD trajectory with the same atoms in the same order as
your PDB file with the ``dcd`` keyword argument. In that case, ``profiles`` will
contain multiple HOLE profiles, indexed by frame.
The HOLE program will create some output files:
* an output file (default name: hole.out)
* an sphpdb file (default name: hole.sph)
* a file of van der Waals' radii
(if not specified with ``vdwradii_file``. Default name: simple2.rad)
* a symlink of your PDB or DCD files (if the original name is too long)
* the input text (if you specify ``infile``)
By default (`keep_files=True`), these files are kept. If you would like to
delete the files after the function is wrong, set `keep_files=False`. Keep in
mind that if you delete the sphpdb file, you cannot then create a VMD surface.
Using HOLE on a trajectory
--------------------------
You can also run HOLE on a trajectory through the :class:`HoleAnalysis` class.
This behaves similarly to the ``hole`` function, although arguments such as ``cpoint``
and ``cvect`` become runtime arguments for the :meth:`~HoleAnalysis.run` function.
The class can be set-up and run like a normal MDAnalysis analysis class::
import MDAnalysis as mda
from MDAnalysis.tests.datafiles import MULTIPDB_HOLE
from
|
MDAnalysis.analysis import hole2
u = mda.Universe(MULTIPDB_HOLE)
ha = hol
|
e2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
ha.run()
ha.create_vmd_surface(filename='hole.vmd')
The VMD surface created by the class updates the pore for each frame of the trajectory.
Use it as normal by loading your trajectory in VMD and sourcing the file in the Tk Console.
You can access the actual profiles generated in the ``results`` attribute::
print(ha.results.profiles)
Again, HOLE writes out files for each frame. If you would like to delete these files
after the analysis, you can call :meth:`~HoleAnalysis.delete_temporary_files`::
ha.delete_temporary_files()
Alternatively, you can use HoleAnalysis as a context manager that deletes temporary
files when you are finished with the context manager::
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run()
h2.create_vmd_surface()
Using HOLE with VMD
-------------------
The :program:`sos_triangle` program that is part of HOLE_ can write an input
file for VMD_ to display a triangulated surface of the pore found by
:program:`hole`. This functionality is available with the
:meth:`HoleAnalysis.create_vmd_surface` method
[#create_vmd_surface_function]_. For an input trajectory MDAnalysis writes a
*trajectory* of pore surfaces that can be animated in VMD together with the
frames from the trajectory.
Analyzing a full trajectory
~~~~~~~~~~~~~~~~~~~~~~~~~~~
To analyze a full trajectory and write pore surfaces for all frames to file
:file:`hole_surface.vmd`, use ::
import MDAnalysis as mda
from MDAnalysis.analysis import hole2
# load example trajectory MULTIPDB_HOLE
from MDAnalysis.tests.datafiles import MULTIPDB_HOLE
u = mda.Universe(MULTIPDB_HOLE)
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run()
h2.create_vmd_surface(filename="hole_surface.vmd")
In VMD, load your trajectory and then in the tcl console
(e.g.. :menuselection:`Extensions --> Tk Console`) load the surface
trajectory:
.. code-block:: tcl
source hole_surface.vmd
If you only want to *subsample the trajectory* and only show the surface at
specific frames then you can either load the trajectory with the same
subsampling into VMD or create a subsampled trajectory.
Creating subsampled HOLE surface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For example, if we want to start displaying at frame 1 (i.e., skip frame 0), stop at frame 7, and
only show every other frame (step 2) then the HOLE analysis will be ::
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run(start=1, stop=9, step=2)
h2.create_vmd_surface(filename="hole_surface_subsampled.vmd")
The commands produce the file ``hole_surface_subsampled.vmd`` that can be loaded into VMD.
.. Note::
Python (and MDAnalysis) stop indices are *exclusive* so the parameters
``start=1``, ``stop=9``, and ``step=2`` will analyze frames 1, 3, 5, 7.
.. _Loading-a-trajectory-into-VMD-with-subsampling:
Loading a trajectory into VMD with subsampling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Load your system into VMD. This can mean to load the topology file with
:menuselection:`File --> New Molecule` and adding the trajectory with
:menuselection:`File --> Load Data into Molecule` or just :menuselection:`File
--> New Molecule`.
When loading the trajectory, subsample the frames by setting parametes in in
the :guilabel:`Frames` section. Select *First: 1*, *Last: 7*, *Stride: 2*. Then
:guilabel:`Load` everything.
.. Note::
VMD considers the stop/last frame to be *inclusive* so you need to typically
choose one less than the ``stop`` value that you selected in MDAnalysis.
Then load the surface trajectory:
.. code-block:: tcl
source hole_surface_subsampled.vmd
You should see a different surface for each frame in the trajectory. [#vmd_extra_frame]_
Creating a subsampled trajectory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instead of having VMD subsample the trajectory as described in
:ref:`Loading-a-trajectory-into-VMD-with-subsampling` we can write a subsampled
trajectory to a file. Although it requires more disk space, it can be
convenient if we want to visualize the system repeatedly.
The example trajectory comes as a multi-PDB file so we need a suitable topology
file. If you already have a topology file such as a PSF, TPR, or PRMTOP file
then skip this step. We write frame 0 as a PDB :file:`frame0.pdb` (which we
will use as the topology in VMD)::
u.atoms.write("frame0.pdb")
Then write the actual trajectory in a convenient format such as TRR (or
DCD). Note that we apply the same s
|
mjlong/openmc
|
tests/test_filter_material/test_filter_material.py
|
Python
|
mit
| 858 | 0.003497 |
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness, PyAPITestHarness
import openmc
class FilterMaterialTestHarness(PyAPITestHarness):
def _build_inputs(self):
filt = openmc.Filter(type='material', bins=(1, 2, 3, 4))
tally = openmc.Tally(tally_id=1)
tally.add_filter(filt)
tally.add_score('total')
self._input_set.tallies = openmc.TalliesFile()
self._input_set.tallies.add_tally(tally)
super(FilterMaterialTestHarness, self)._build_inputs()
|
def _cleanup(self):
super(FilterMaterialTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = FilterMaterialTestHarne
|
ss('statepoint.10.*', True)
harness.main()
|
unlessbamboo/django
|
accounts/models.py
|
Python
|
gpl-3.0
| 1,076 | 0.005576 |
from __future__ import unicode_literals
from django import forms
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class Accounts(User):
class Meta:
proxy = True
class LoginForm(forms.Form):
# This creates two variables called username and password that are assigned form character fields
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
class
|
RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name',
'email', 'password1', 'password2')
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email'
|
]
if commit:
user.save()
return user
|
david-ragazzi/nupic
|
examples/opf/simple_server/model_params.py
|
Python
|
gpl-3.0
| 9,288 | 0.001507 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Include the encoders we use
'encoders': {
u'consumption': {
'fieldname': u'consumption',
'resolution': 0.88,
'seed': 1,
'name': u'consumption',
'type': 'RandomDistributedScalarEncoder',
},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.85,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.04,
'synPermInactiveDec': 0.005,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getT
|
PClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerIn
|
hArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements
|
wangg12/IRLS_tf_pytorch
|
src/IRLS_tf_v2.py
|
Python
|
apache-2.0
| 6,061 | 0.003135 |
# python 3
# tensorflow 2.0
from __future__ import print_function, division, absolute_import
import os
import argparse
import random
import numpy as np
import datetime
# from numpy import linalg
import os.path as osp
import sys
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(1, osp.join(cur_dir, '.'))
from sklearn.datasets import load_svmlight_file
from scipy.sparse import csr_matrix
# from scipy.sparse import linalg
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import tensorflow as tf
from tf_utils import pinv_naive, pinv
path_train = osp.join(cur_dir, "../a9a/a9a")
path_test = osp.join(cur_dir, "../a9a/a9a.t")
MAX_ITER = 100
np_dtype = np.float32
tf_dtype = tf.float32
# manual seed
manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
np.random.seed(manualSeed)
# load all data
X_train, y_train = load_svmlight_file(path_train, n_features=123, dtype=np_dtype)
X_test, y_test = load_svmlight_file(path_test, n_features=123, dtype=np_dtype)
# X: scipy.sparse.csr.csr_matrix
# X_train: (32561, 123), y_train: (32561,)
# X_test: (16281, 123), y_test:(16281,)
# stack a dimension of ones to X to simplify computation
N_train = X_train.shape[0]
N_test = X_test.shape[0]
X_train = np.hstack((np.ones((N_train, 1)), X_train.toarray())).astype(np_dtype)
X_test = np.hstack((np.ones((N_test, 1)), X_test.toarray())).astype(np_dtype)
# print(X_train.shape, X_test.shape)
y_train = y_train.reshape((N_train, 1))
y_test = y_test.reshape((N_test, 1))
# label: -1, +1 ==> 0, 1
y_train = np.where(y_train == -1, 0, 1)
y_test = np.where(y_test == -1, 0, 1)
# NB: here X's shape is (N,d), which differs to the derivation
def neg_log_likelihood(w, X, y, L2_param=None):
"""
w: dx1
X: Nxd
y: Nx1
L2_param: \lambda>0, will introduce -\lambda/2 ||w||_2^2
"""
# print(type(X), X.dtype)
res = tf.matmul(tf.matmul(tf.transpose(w), tf.transpose(X)), y.astype(np_dtype)) - \
tf.reduce_sum(tf.math.log(1 + tf.exp(tf.matmul(X, w))))
if L2_param != None and L2_param > 0:
res += -0.5 * L2_param * tf.matmul(tf.transpose(w), w)
return -res[0][0]
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob
def compute_acc(X, y, w):
p = prob(X, w)
y_pred = tf.cast(tf.argmax(p, axis=1), tf.float32)
y = tf.cast(tf.squeeze(y), tf.float32)
acc = tf.reduce_mean(tf.cast(tf.equal(y, y_pred), tf.float32))
return acc
def update(w_old, X, y, L2_param=0):
"""
w_new = w_old - w_update
w_update = (X'RX+lambda*I)^(-1) (X'(mu-y) + lambda*w_old)
lambda is L2_param
w_old: dx1
X: Nxd
y: Nx1
---
w_update: dx1
"""
d = X.shape[1]
mu = tf.sigmoid(tf.matmul(X, w_old)) # Nx1
R_flat = mu * (1 - mu) # element-wise, Nx1
L2_reg_term = L2_param * tf.eye(d)
XRX = tf.matmul(tf.transpose(X), R_flat * X) + L2_reg_term # dxd
# np.save('XRX_tf.npy', XRX.numpy())
# calculate pseudo inverse via SVD
# method 1
# slightly better than tfp.math.pinv when L2_param=0
XRX_pinv = pinv_naive(XRX)
# method 2
# XRX_pinv = pinv(XRX)
# w = w - (X^T R X)^(-1) X^T (mu-y)
# w_new = tf.assign(w_old, w_old - tf.matmul(tf.matmul(XRX_pinv, tf.transpose(X)), mu - y))
y = tf.cast(y, tf_dtype)
w_update = tf.matmul(XRX_pinv, tf.matmul(tf.transpose(X), mu - y) + L2_param * w_old)
return w_update
def optimize(w_old, w_update):
"""custom update op, instead of using SGD variants"""
return w_old.assign(w_old - w_update)
def train_IRLS(X_train, y_train, X_test=None, y_test=None, L2_param=0, max_iter=MAX_ITER):
"""train Logistic Regression via IRLS algorithm
X: Nxd
y: Nx1
---
"""
N, d = X_train.shape
w = tf.Variable(0.01 * tf.ones((d, 1), dtype=tf.float32), name="w")
current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
summary_writer = tf.summary.create_file_writer(f"./logs/{current_time}")
print("start training...")
print("L2 param(lambda): {}".format(L2_param))
i = 0
# iteration
while i <= max_iter:
print("iter: {}".format(i))
# print('\t neg log likelihood: {}'.format(sess.run(neg_L, feed_dict=train_feed_dict)))
neg_L = neg_log_likelihood(w, X_train, y_train, L2_param)
print("\t neg log likelihood: {}".format(neg_L))
train_acc = compute_acc(X_train, y_train, w)
with summary_writer.as_default():
tf.summary.scalar("train_acc", train_acc, step=i)
tf.summary.scalar("train_neg_L", neg_L, step=i)
test_acc = compute_acc(X_test, y_test, w)
with summary_writer.as_default():
tf.summary.scalar("test_acc", test_acc, step=i)
print("\t train acc: {}, test acc: {}".format(train_acc, test_acc))
L2_norm_w = np.linalg.norm(w.numpy())
print("\t L2 norm of w: {}".format(L2_norm_w))
if i > 0:
diff_w = np.linalg.norm(w_update.numpy())
print("\t diff of w_old and w: {}".format(diff_w))
if diff_w < 1e-2:
break
w_update = update(w, X_train, y_train, L2_param)
w =
|
optimize(w, w_update)
i += 1
print("training done.")
if __name__ =
|
= "__main__":
# test_acc should be about 0.85
lambda_ = 20 # 0
train_IRLS(X_train, y_train, X_test, y_test, L2_param=lambda_, max_iter=100)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train.reshape(N_train,))
y_pred_train = classifier.predict(X_train)
train_acc = np.sum(y_train.reshape(N_train,) == y_pred_train)/N_train
print('train_acc: {}'.format(train_acc))
y_pred_test = classifier.predict(X_test)
test_acc = np.sum(y_test.reshape(N_test,) == y_pred_test)/N_test
print('test acc: {}'.format(test_acc))
|
ragb/sudoaudio
|
sudoaudio/speech/__init__.py
|
Python
|
gpl-3.0
| 1,242 | 0.003221 |
# Copyright (c) 2011 - Rui Batista <ruiandrebatista@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public L
|
icense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of
|
the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import locale
import logging
from accessible_output import speech
logger = logging.getLogger(__name__)
_speaker = None
def init():
global _speaker
if _speaker:
return
_speaker = speech.Speaker()
def speak(message, cancel=True):
global _speaker
assert _speaker, "Speech module not initialized"
if cancel:
_speaker.silence()
_speaker.output(message)
def cancel():
assert _speaker, "Speech module not initialized"
_speaker.silence()
def quit():
pass
|
dariosena/LearningPython
|
general/dry/test_calc.py
|
Python
|
gpl-3.0
| 794 | 0 |
import unittest
import calc
class CalcTestCase(unittest.TestCase):
"""Test calc.py"""
def setUp(self):
self.num1 = 10
self.num2 = 5
def tearDown(self):
pass
def test_add(self):
self.assertTrue(calc.add(self.num1, self.num2), self.num1 + self.num2)
def test_subtract(self):
self.assertTrue(calc.subtract(self.num1, self.num2),
self.num1 - self.num2)
def test_multiply(self):
self.assertTrue(calc.multiply(self.num1, self.num2),
self.num1 * self.num2)
def test_divide(self):
self.assertTrue(calc.divide(self.num1, self.num2),
self.num1 / self.num2)
if __name__ == '__main__':
|
unittest.mai
|
n()
|
grnet/synnefo
|
snf-cyclades-app/synnefo/app_settings/default/api.py
|
Python
|
gpl-3.0
| 8,466 | 0.000472 |
# -*- coding: utf-8 -*-
#
# API configuration
#####################
DEBUG = False
# Top-level URL for deployment. Numerous other URLs depend on this.
CYCLADES_BASE_URL = "https://compute.example.synnefo.org/compute/"
# The API will return HTTP Bad Request if the ?changes-since
# parameter refers to a point in time more than POLL_LIMIT seconds ago.
POLL_LIMIT = 3600
# Astakos groups that have access to '/admin' views.
ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"]
# Enable/Disable the snapshots feature altogether at the API level.
# If set to False, Cyclades will not expose the '/snapshots' API URL
# of the 'volume' app.
CYCLADES_SNAPSHOTS_ENABLED = True
# Enable/Disable the feature of a sharing a resource to the members of the
# project to which it belongs, at the API level.
CYCLADES_SHARED_RESOURCES_ENABLED = False
# Enable/Disable the of feature of rescuing a Virtual Machine at the API
# level
RESCUE_ENABLED = False
#
# Network Configuration
#
# CYCLADES_DEFAULT_SERVER_NETWORKS setting contains a list of networks to
# connect a newly created server to, *if the user has not* specified them
# explicitly in the POST /server API call.
# Each member of the list may be a network UUID, a tuple of network UUIDs,
# "SNF:ANY_PUBLIC_IPV4" [any public network with an IPv4 subnet defined],
# "SNF:ANY_PUBLIC_IPV6 [any public network with only an IPV6 subnet defined],
# or "SNF:ANY_PUBLIC" [any public network].
#
# Access control and quota policy are enforced, just as if the user had
# specified the value of CYCLADES_DEFAULT_SERVER_NETWORKS in the content
# of the POST /call, after processing of "SNF:*" directives."
CYCLADES_DEFAULT_SERVER_NETWORKS = []
# This setting contains a list of networks which every new server
# will be forced to connect to, regardless of the contents of the POST
# /servers call, or the value of CYCLADES_DEFAULT_SERVER_NETWORKS.
# Its format is identical to that of CYCLADES_DEFAULT_SERVER_NETWORKS.
# WARNING: No access control or quota policy are enforced.
# The server will get all IPv4/IPv6 addresses needed to connect to the
# networks specified in CYCLADES_FORCED_SERVER_NETWORKS, regardless
# of the state of the floating IP pool of the user, and without
# allocating any floating IPs."
CYCLADES_FORCED_SERVER_NETWORKS = []
# Maximum allowed network size for private networks.
MAX_CIDR_BLOCK = 22
# Default settings used by network flavors
DEFAULT_MAC_PREFIX = 'aa:00:0'
DEFAULT_BRIDGE = 'br0'
# Network flavors that users are allowed to create through API requests
# Available flavors are IP_LESS_ROUTED, MAC_FILTERED, PHYSICAL_VLAN
API_ENABLED_NETWORK_FLAVORS = ['MAC_FILTERED']
# Settings for MAC_FILTERED network:
# ------------------------------------------
# All networks of this type are bridged to the same bridge. Isolation between
# networks is achieved by assigning a unique MAC-prefix to each network and
# filtering packets via ebtables.
DEFAULT_MAC_FILTERED_BRIDGE = 'prv0'
# Firewalling. Firewall tags should contain '%d' to be filled with the NIC
# ID.
GANETI_FIREWALL_ENABLED_TAG = 'synnefo:network:%s:protected'
GANETI_FIREWALL_DISABLED_TAG = 'synnefo:network:%s:unprotected'
GANETI_FIREWALL_PROTECTED_TAG = 'synnefo:network:%s:limited'
# The default firewall profile that will be in effect if no tags are defined
DEFAULT_FIREWALL_PROFILE = 'DISABLED'
# Fixed mapping of user VMs to a specific backend.
# e.g. BACKEND_PER_USER = {'example@synnefo.org': 2}
BACKEND_PER_USER = {}
# Encryption key for the instance hostname in the stat graphs URLs. Set it to
# a random string and update the STATS_SECRET_KEY setting in the snf-stats-app
# host (20-snf-stats-app-settings.conf) accordingly.
CYCLADES_STATS_SECRET_KEY = "secret_key"
# URL templates for the stat graphs.
# The API implementation replaces '%s' with the encrypted backend id.
CPU_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/cpu-bar/%s'
CPU_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/cpu-ts/%s'
NET_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/net-bar/%s'
NET_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/net-ts/%s'
# Recommended refresh period for server stats
STATS_REFRESH_PERIOD = 60
# The maximum number of file path/content pairs that can be supplied on server
# build
MAX_PERSONALITY = 5
# The maximum size, in bytes, for each personality file
MAX_PERSONALITY_SIZE = 10240
# Authentication URL of the astakos instance to be used for user management
ASTAKOS_AUTH_URL = 'https://accounts.example.synnefo.org/identity/v2.0'
# Tune the size of the Astakos http client connection pool
# This limit the number of concurrent requests to Astakos.
CYCLADES_ASTAKOSCLIENT_POOLSIZE = 50
# Key for password encryption-decryption. After changing this setting, synnefo
# will be unable to decrypt all existing Backend passwords. You will need to
# store again the new password by using 'snf-manage backend-modify'.
# SECRET_ENCRYPTION_KEY may up to 32 bytes. Keys bigger than 32 bytes are not
# supported.
SECRET_ENCRYPTION_KEY = "Password Encryption Key"
# Astakos service token
# The token used for astakos service api calls (e.g. api to retrieve user email
# using a user uuid)
CYCLADES_SERVICE_TOKEN = ''
# Template to use to build the FQDN of VMs. The setting will be formated with
# the id of the VM.
CYCLADES_SERVERS_FQDN = 'snf-%(id)s.vm.example.synnefo.org'
# Description of applied port forwarding rules (DNAT) for Cyclades VMs. This
# setting contains a mapping from the port of each VM to a tuple contaning the
# destination IP/hostname and the new port: (host, port). Instead of a tuple a
# python callable object may be used which must return such a tuple. The caller
# will pass to the callable the following positional arguments, in the
# following order:
# * server_id: The ID of the VM in the DB
# * ip_address: The IPv4 address of the public VM NIC
# * fqdn: The FQDN of the VM
# * user: The UUID of the owner of the VM
#
# Here is an example describing the mapping of the SSH port of all VMs to
# the external address 'gate.example.synnefo.org' and port 60000+server_id.
# e.g. iptables -t nat -A prerouting -d gate.example.synnefo.org \
# --dport (61000 + $(VM_ID)) -j DNAT --to-destination $(VM_IP):22
#CYCLADES_PORT_FORWARDING = {
# 22: lambda ip_address, server_id, fqdn, user:
# ("gate.example.synnefo.org", 61000 + server_id),
#}
CYCLADES_PORT_FORWARDING = {}
# Extra configuration options required for snf-vncauthproxy (>=1.5). Each dict
# of the list, describes one vncauthproxy instance.
CYCLADES_VNCAUTHPROXY_OPTS = [
{
# These values are required for VNC console support. They should match
# a user / password configured in the snf-vncauthproxy authentication /
# users file (/var/lib/vncauthproxy/users).
'auth_user': 'synnefo',
'auth_password': 'secret_password',
# server_address and server_port should reflect the --listen-address and
# --listen-port options passed to the vncauthproxy daemon
'server_address': '127.0.0.1',
'server_port': 24999,
# Set to True to enable SSL support on the control socket.
'enable_ssl': False,
# If you enabled SSL support for snf-vncauthproxy you can optionally
# provide a path to a CA file and enable strict checkfing for the server
# certficiate.
'ca_cert': None,
'strict': False,
},
]
# The maximum allowed size(GB) for a Cyclades Volume
CYCLADES_VOLUME_MAX_SIZE = 200
# The maximum allowed metadata items for a Cyclades Volume
CYCLADES_VOLUME_MAX_METADATA = 10
# The volume types that Cyclades allow to be detached
CYCLADES_DETACHABLE_DISK_TEMPLATES =
|
("ext_archipelago", "ext_vlmc")
# The maximum number of tags allowed for a Cyclades Virtual Machine
CYCLADES_VM_MAX_TAGS = 50
|
# The maximmum allowed metadata items for a Cyclades Virtual Machine
CYCLADES_VM_MAX_METADATA = 10
# Define cache for public stats
PUBLIC_STATS_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "publicstats",
"TIMEOUT": 300,
}
# Permit users of specific
|
wmak/mvmv
|
mvmv/cli.py
|
Python
|
mit
| 6,353 | 0.001259 |
#!/usr/bin/env python
from os import path
import sys
import sqlite3
import random
import argparse
import re
import gzip
import mvmv.mvmv as mvmv
import mvmv.mvmvd as mvmvd
import mvmv.parse as parse
class DownloadDB(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(DownloadDB, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
movie_list_name = "movies.list"
list_url = "ftp://ftp.fu-berlin.de/pub/misc/movies/database/movies.list.gz"
sys.stdout.write("Downloading ... ")
sys.stdout.flush()
if sys.version_info >= (3, 0):
import urllib.request
urllib.request.urlretrieve(list_url, movie_list_name + ".gz")
else:
import urllib
urllib.urlretrieve(list_url, movie_list_name + ".gz")
sys.stdout.write("Done\n")
sys.stdout.write("Adding to table ... ")
sys.stdout.flush()
with open(movie_list_name, 'wb') as movie_list:
with gzip.open(movie_list_name + ".gz", 'rb') as decompressed:
movie_list.write(decompressed.read())
parse.create_table(movie_list_name, "movie.db")
sys.stdout.write("Done.\n")
def get_parser():
usage_str = "%(prog)s [OPTIONS] [-r] [-w] [-s] DIRECTORY [DIRECTORY ...] -t DESTDIR"
parser = argparse.ArgumentParser(usage=usage_str)
parser.add_argument("-f", "--file", dest="files", metavar="FILE",
type=str, nargs='*', default=[],
help="Rename this FILE")
parser.add_argument("-s", "--srcdir", dest="srcdirs", metavar="SRCDIR",
type=str, nargs='*', default=[],
help="Rename all files in this DIRECTORY")
parser.add_argument("-t", "--destdir", dest="destdir", metavar="DESTDIR",
type=str, nargs=1, action='store', required=True,
help="Move all the files to this directory.")
parser.add_argument("-e", "--excludes", dest="excludes", metavar="REGEX",
type=str, nargs='*', default=[],
help="Rename all files in this DIRECTORY")
parser.add_argument("-r", "-R", "--recursive", action="store_true",
dest="recursive", default=False,
help="Recursively scan the directories for files." +
"(Unsupported)",)
parser.add_argument("-m", "--max-depth", dest="depth", metavar="DEPTH",
default=None, type=int, nargs='?',
help="Recursively scan the directories for files." +
"(Unsupported)",)
parser.add_argument("-g", "--gui", action="store_true", dest="start_gui",
default=False,
help="Start the program as a GUI." + "(Unsupported)")
parser.add_argument("-w", "--watch", action="store_true", dest="watch",
default=False,
help="Watch the given directories for new files")
parser.add_argument("--stop", action="store_true", dest="stop_daemon",
default=False,
help="Stop the daemon.")
parser.add_argument("--pidfile", dest="pidfile", nargs=1,
metavar="FILE", type=str, default="./mvmvd.pid",
help="The file where the pid is stored for the daemon")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
default=False,
help="Be more verbose." + "(Unsupported)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true",
default=False,
help="Only output errors." + "(Unsupported)")
parser.add_argument("-y", "--always-yes", dest="always_yes",
action="store_true", default=False,
help="Assume yes for every prompt." + "(Unsupported)")
parser.add_argument("-u", "--updatedb", dest="remotedb", default=None,
metavar="PATH", type=str, nargs='?',
action=DownloadDB,
help="Update the movies list from the given DBPATH." +
"(Unsupported custom DBPATH)")
# TODO(pbhandari): default db path should be sane.
parser.add_argument("-p", "--dbpath", dest="dbpath", nargs='?',
metavar="PATH", type=str, default="movie.db",
help="Alternate path for the database of movies.")
parser.add_argument('args', nargs=argparse.REMAINDER)
return parser
def error(message, end='\n'):
sys.stderr.write(sys.argv[0] + ": error: " + message + end)
sys.stderr.flush()
def main():
args = get_parser().parse_args()
args.files = [path.abspath(fname) for fname in args.files
if mvmv.is_valid_file(fname, a
|
rgs.excludes)]
args.srcdirs = [path.abspath(sdir) for sdir in args.srcdirs
if path.isdir(sdir)]
args.destdir = path.abspath(args.destdir[0])
for arg in args.args:
if path.isdir(arg):
args.srcdirs.append(path.abspath(arg))
elif mvmv.is_valid_file(arg):
args.files.append(arg)
|
if not path.isdir(args.destdir[0]):
error("'%s' is not a directory." % args.destdir[0])
sys.exit(1)
if not args.srcdirs and not args.files:
error("You must specify a directory or filename in the commandline.")
sys.exit(1)
conn = sqlite3.connect(args.dbpath)
cursor = conn.cursor()
args.excludes = [re.compile(a) for a in args.excludes]
if args.stop_daemon:
mvmvd.mvmvd(args.pidfile).stop()
if args.watch:
mvmvd.mvmvd(args.pidfile,
dirs=args.srcdirs,
dest=args.destdir,
recursive=args.recursive).start()
for query in args.files:
mvmv.movemovie(path.split(path.abspath(query)), args.destdir, cursor)
for dirname in args.srcdirs:
mvmv.movemovies(dirname, args.destdir, cursor, args.excludes)
conn.close()
# TODO(pbhandari): Code is ugly and stupid.
if __name__ == '__main__':
main()
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/logging/sinks/list.py
|
Python
|
mit
| 4,667 | 0.004928 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging sinks list' command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class List(base.ListCommand):
"""Lists the defined sinks."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
parser.add_argument(
'--only-v2-sinks', required=False, action='store_true',
help='Display only v2 sinks.')
util.AddNonProjectArgs(parser, 'List sinks')
def Collection(self):
return 'logging.sinks'
def ListLogSinks(self, project, log_name):
"""List log sinks from the specified log."""
result = util.GetClientV1().projects_logs_sinks.List(
util.GetMessagesV1().LoggingProjectsLogsSinksListRequest(
projectsId=project, logsId=log_name))
for sink in result.sinks:
yield util.TypedLogSink(sink, log_name=log_name)
def ListLogServiceSinks(self, project, service_name):
"""List log service sinks from the specified service."""
result = util.GetClientV1().projects_logServices_sinks.List(
util.GetMessagesV1().LoggingProjectsLogServicesSinksListRequest(
projectsId=project, logServicesId=service_name))
for sink in result.sinks:
yield util.TypedLogSink(sink, service_name=service_name)
def ListSinks(self, parent):
"""List sinks."""
# Use V2 logging API.
result = util.GetClient().projects_sinks.List(
util.GetMessages().LoggingProjectsSinksListRequest(
parent=parent))
for sink in result.sinks:
yield util.TypedLogSink(sink)
def YieldAllSinks(self, project):
"""Yield all log and log service sinks from the specified project."""
client = util.GetClientV1()
messages = util.GetMessagesV1()
# First get all the log sinks.
response = list_pager.YieldFromList(
client.projects_logs,
messages.LoggingProjectsLogsListRequest(projectsId=project),
field='logs', batch_size=None, batch_size_attribute='pageSize')
for log in response:
# We need only the base log name, not the full resource uri.
log_id = util.ExtractLogId(log.name)
for typed_sink in self.ListLogSinks(project, log_id):
yield typed_sink
# Now get all the log service sinks.
response = list_pager.YieldFromList(
client.projects_logServices,
messages.LoggingProjectsLogServicesListRequest(projectsId=project),
field='logServices', batch_size=None, batch_size_attribute='pageSize')
for service in response:
# In contrast, service.name correctly contains only the
|
name.
for typed_sink in self.ListLogServiceSinks(project, service.name):
yield typed_sink
# Lastly, get all v2 sinks.
for typed_sink in self.ListSinks(util.GetCurrentProjectParent()):
yield typed_sink
def Run(self, args):
|
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The list of sinks.
"""
util.CheckLegacySinksCommandArguments(args)
project = properties.VALUES.core.project.Get(required=True)
if args.log:
return self.ListLogSinks(project, args.log)
elif args.service:
return self.ListLogServiceSinks(project, args.service)
elif (args.organization or args.folder or args.billing_account or
args.only_v2_sinks):
return self.ListSinks(util.GetParentFromArgs(args))
else:
return self.YieldAllSinks(project)
List.detailed_help = {
'DESCRIPTION': """\
{index}
If either the *--log* or *--log-service* flags are included, then
the only sinks listed are for that log or that service.
If *--only-v2-sinks* flag is included, then only v2 sinks
are listed.
If none of the flags are included, then all sinks in use are listed.
""",
}
|
HumanCompatibleAI/imitation
|
src/imitation/scripts/config/train_adversarial.py
|
Python
|
mit
| 4,850 | 0.000825 |
"""Configuration for imitation.scripts.train_adversarial."""
import sacred
from imitation.rewards import reward_nets
from imitation.scripts.common import common, demonstrations, reward, rl, train
train_adversarial_ex = sacred.Experiment(
"train_adversarial",
ingredients=[
common.common_ingredient,
demonstrations.demonstrations_ingredient,
reward.reward_ingredient,
rl.rl_ingredient,
train.train_ingredient,
],
)
@train_adversarial_ex.config
def defaults():
show_config = False
total_timesteps = int(1e6) # Num of environment transitions to sample
algorithm_kwargs = dict(
demo_batch_size=1024, # Number of expert samples per discriminator update
n_disc_updates_per_round=4, # Num discriminator updates per generator round
)
algorithm_specific = {} # algorithm_specific[algorithm] is merged with config
checkpoint_interval = 0 # Num epochs between checkpoints (<0 disables)
@train_adversarial_ex.config
def aliases_default_gen_batch_size(algorithm_kwargs, rl):
# Setting generator buffer capacity and discriminator batch size to
# the same number is equivalent to not using a replay buffer at all.
# "Disabling" the replay buffer seems to improve convergence speed, but may
# come at a cost of stability.
algorithm_kwargs["gen_replay_buffer_capacity"] = rl["batch_size"]
# Shared settings
MUJOCO_SHARED_LOCALS = dict(rl=dict(rl_kwargs=dict(ent_coef=0.1)))
ANT_SHARED_LOCALS = dict(
total_timesteps=int(3e7),
algorithm_kwargs=dict(shared=dict(demo_batch_size=8192)),
rl=dict(batch_size=16384),
)
# Classic RL Gym environment named configs
@train_adversarial_ex.named_config
def acrobot():
env_name = "Acrobot-v1"
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def cartpole():
common = dict(env_name="CartPole-v1")
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def seals_cartpole():
common = dict(env_name="seals/CartPole-v0")
total_timesteps = int(1.4e6)
@train_adversarial_ex.named_config
def mountain_car():
common = dict(env_name="MountainCar-v0")
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def seals_mountain_car():
common = dict(env_name="seals/MountainCar-v0")
@train_adversarial_ex.named_config
def pendulum():
common = dict(env_name="Pendulum-v1")
# Standard MuJoCo Gym environment named configs
@train_adversarial_ex.named_config
def seals_ant():
locals().update(**MUJOCO_SHARED_LOCALS)
locals().update(**ANT_SHARED_LOCALS)
common = dict(env_name="seals/Ant-v0")
@train_adversarial_ex.named_config
def half_cheetah():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_nam
|
e="HalfCheetah-v2")
rl
|
= dict(batch_size=16384, rl_kwargs=dict(batch_size=1024))
algorithm_specific = dict(
airl=dict(total_timesteps=int(5e6)),
gail=dict(total_timesteps=int(8e6)),
)
reward = dict(
algorithm_specific=dict(
airl=dict(
net_cls=reward_nets.BasicShapedRewardNet,
net_kwargs=dict(
reward_hid_sizes=(32,),
potential_hid_sizes=(32,),
),
),
),
)
algorithm_kwargs = dict(
# Number of discriminator updates after each round of generator updates
n_disc_updates_per_round=16,
# Equivalent to no replay buffer if batch size is the same
gen_replay_buffer_capacity=16384,
demo_batch_size=8192,
)
@train_adversarial_ex.named_config
def seals_hopper():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Hopper-v0")
@train_adversarial_ex.named_config
def seals_humanoid():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Humanoid-v0")
total_timesteps = int(4e6)
@train_adversarial_ex.named_config
def reacher():
common = dict(env_name="Reacher-v2")
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def seals_swimmer():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Swimmer-v0")
total_timesteps = int(2e6)
@train_adversarial_ex.named_config
def seals_walker():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Walker2d-v0")
# Debug configs
@train_adversarial_ex.named_config
def fast():
# Minimize the amount of computation. Useful for test cases.
# Need a minimum of 10 total_timesteps for adversarial training code to pass
# "any update happened" assertion inside training loop.
total_timesteps = 10
algorithm_kwargs = dict(
demo_batch_size=1,
n_disc_updates_per_round=4,
)
|
shakamunyi/neutron-vrrp
|
neutron/tests/unit/ml2/test_mechanism_odl.py
|
Python
|
apache-2.0
| 9,006 | 0 |
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
import mock
import requests
from neutron.plugins.common import constants
from neutron.plugins.ml2 import config as config
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mechanism_odl
from neutron.plugins.ml2 import plugin
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import testlib_api
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class OpenDaylightTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'],
'ml2')
# Set URL/user/pass so init doesn't throw a cfg required error.
# They are not used in these tests since sendjson is overwritten.
config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
super(OpenDaylightTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
self.segment = {'api.NETWORK_TYPE': ""}
self.mech = mechanism_odl.OpenDaylightMechanismDriver()
mechanism_odl.OpenDaylightMechanismDriver.sendjson = (
self.check_sendjson)
def check_sendjson(self, method, urlpath, obj, ignorecodes=[]):
self.assertFalse(urlpath.startswith("http://"))
def test_check_segment(self):
"""Validate the check_segment call."""
self.segment[api.NETWORK_TYPE] = constants.TYPE_LOCAL
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_FLAT
self.assertFalse(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_VLAN
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_GRE
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_VXLAN
self.assertTrue(self.mech.check_segment(self.segment))
# Validate a network type not currently supported
self.segment[api.NETWORK_TYPE] = 'mpls'
self.assertFalse(self.mech.check_segment(self.segment))
class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
password='somepass'):
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'],
'ml2')
config.cfg.CONF.set_override('url', url, 'ml2_odl')
config.cfg.CONF.set_override('username', username, 'ml2_odl')
config.cfg.CONF.set_override('password', password, 'ml2_odl')
def _test_missing_config(self, **kwargs):
self._set_config(**kwargs)
self.assertRaises(config.cfg.RequiredOptError,
plugin.Ml2Plugin)
def test_valid_config(self):
self._set_config()
|
plugin.Ml2Plugin()
def test_missing_url_raises_exception(self):
self._test_missing_config(url=None)
def test_m
|
issing_username_raises_exception(self):
self._test_missing_config(username=None)
def test_missing_password_raises_exception(self):
self._test_missing_config(password=None)
class OpenDaylightMechanismTestBasicGet(test_plugin.TestBasicGet,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestNetworksV2(test_plugin.TestNetworksV2,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestSubnetsV2,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestPortsV2(test_plugin.TestPortsV2,
OpenDaylightTestCase):
pass
class AuthMatcher(object):
def __eq__(self, obj):
return (obj.username == config.cfg.CONF.ml2_odl.username and
obj.password == config.cfg.CONF.ml2_odl.password)
class OpenDaylightMechanismDriverTestCase(base.BaseTestCase):
def setUp(self):
super(OpenDaylightMechanismDriverTestCase, self).setUp()
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'], 'ml2')
config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
self.mech = mechanism_odl.OpenDaylightMechanismDriver()
self.mech.initialize()
@staticmethod
def _get_mock_delete_resource_context():
current = {'id': '00000000-1111-2222-3333-444444444444'}
context = mock.Mock(current=current)
return context
_status_code_msgs = {
204: '',
401: '401 Client Error: Unauthorized',
403: '403 Client Error: Forbidden',
404: '404 Client Error: Not Found',
409: '409 Client Error: Conflict',
501: '501 Server Error: Not Implemented'
}
@classmethod
def _get_mock_request_response(cls, status_code):
response = mock.Mock(status_code=status_code)
response.raise_for_status = mock.Mock() if status_code < 400 else (
mock.Mock(side_effect=requests.exceptions.HTTPError(
cls._status_code_msgs[status_code])))
return response
def _test_delete_resource_postcommit(self, object_type, status_code,
exc_class=None):
self.mech.out_of_sync = False
method = getattr(self.mech, 'delete_%s_postcommit' % object_type)
context = self._get_mock_delete_resource_context()
request_response = self._get_mock_request_response(status_code)
with mock.patch('requests.request',
return_value=request_response) as mock_method:
if exc_class is not None:
self.assertRaises(exc_class, method, context)
else:
method(context)
url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
context.current['id'])
mock_method.assert_called_once_with(
'delete', url=url, headers={'Content-Type': 'application/json'},
data=None, auth=AuthMatcher(),
timeout=config.cfg.CONF.ml2_odl.timeout)
def test_delete_network_postcommit(self):
self._test_delete_resource_postcommit('network',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.not_found,
requests.codes.conflict):
self._test_delete_resource_postcommit(
'network', status_code, requests.exceptions.HTTPError)
def test_delete_subnet_postcommit(self):
self._test_delete_resource_postcommit('subnet',
requests.codes.no_content)
for status_code in (requests
|
edoburu/django-fluent-utils
|
fluent_utils/softdeps/any_imagefield.py
|
Python
|
apache-2.0
| 1,856 | 0.002694 |
"""
Optional integration with django-any-Imagefield
"""
from __future__ import absolute_import
from django.db import models
from fluent_utils.django_compat import is_installed
if is_installed('any_imagefield'):
from any_imagefield.models import AnyFileField as BaseFileField, AnyImageField as BaseImageField
else:
BaseFileField = models.FileField
BaseImageField = models.ImageField
# subclassing here so South or Django migrations detect a single class.
class AnyFileField(BaseFileField):
"""
A FileField that can refer to an uploaded file.
If *django-any-imagefield* is not installed, the filebrowser link will not be displayed.
"""
|
def deconstruct(self):
# For Django migrations, masquerade as normal FileField too
name, path, args, kwargs = super(AnyFileField, self).deconstruct()
# FileField behavior
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = getattr(self, 'upload_to'
|
, None) or getattr(self, 'directory', None) or ''
return name, "django.db.models.FileField", args, kwargs
# subclassing here so South or Django migrations detect a single class.
class AnyImageField(BaseImageField):
"""
An ImageField that can refer to an uploaded image file.
If *django-any-imagefield* is not installed, the filebrowser link will not be displayed.
"""
def deconstruct(self):
# For Django migrations, masquerade as normal ImageField too
name, path, args, kwargs = super(AnyImageField, self).deconstruct()
# FileField behavior
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = getattr(self, 'upload_to', None) or getattr(self, 'directory', None) or ''
return name, "django.db.models.ImageField", args, kwargs
|
indirectlylit/kolibri
|
kolibri/core/content/test/test_deletechannel.py
|
Python
|
mit
| 1,918 | 0.000521 |
from django.core.management import call_command
from django.test import TestCase
from mock import call
from mock import patch
from kolibri.core.con
|
tent import models as content
class DeleteChannelTestCase(TestCase):
"""
Testcase for delete channel management command
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def delete_channel(self):
call_command("deletechannel", self.the_channel_id)
def test_channelmetadata_delete_remove_metadata_object(self):
self.delete_channel()
self.assertEquals(0, content
|
.ChannelMetadata.objects.count())
def test_channelmetadata_delete_remove_contentnodes(self):
self.delete_channel()
self.assertEquals(0, content.ContentNode.objects.count())
def test_channelmetadata_delete_leave_unrelated_contentnodes(self):
c2c1 = content.ContentNode.objects.get(title="c2c1")
new_id = c2c1.id[:-1] + "1"
content.ContentNode.objects.create(
id=new_id,
content_id=c2c1.content_id,
kind=c2c1.kind,
channel_id=c2c1.channel_id,
available=True,
title=c2c1.title,
)
self.delete_channel()
self.assertEquals(1, content.ContentNode.objects.count())
def test_channelmetadata_delete_remove_file_objects(self):
self.delete_channel()
self.assertEquals(0, content.File.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_files(self, os_remove_mock, content_file_path):
path = "testing"
content_file_path.return_value = path
num_files = content.LocalFile.objects.filter(available=True).count()
self.delete_channel()
os_remove_mock.assert_has_calls([call(path)] * num_files)
|
errollw/EyeTab
|
EyeTab_Python/gaze_geometry.py
|
Python
|
mit
| 5,818 | 0.010141 |
import visual as vpy
import numpy as np
import anatomical_constants
from math import sin, cos, acos, atan, radians, sqrt
from conic_section import Ellipse
cam_mat_n7 = np.array([[1062.348, 0.0 , 344.629],
[0.0 , 1065.308, 626.738],
[0.0 , 0.0 , 1.0]])
# [Looking at a camera facing the user]
# z points towards user's face
# x points to the left (same as px coord direction)
# y points downwards (same as px coord direction)
class Limbus:
def __init__(self, centre_mm_param, normal_param, ransac_ellipse_param):
self.center_mm = centre_mm_param
self.normal = normal_param
self.ransac_ellipse = ransac_ellipse_param
def ellipse_to_limbuses_persp_geom(ellipse, device):
limbus_r_mm = anatomical_constants.limbus_r_mm
focal_len_x_px, focal_len_y_px, prin_point_x, prin_point_y = device.get_intrisic_cam_params()
focal_len_z_px = (focal_len_x_px + focal_len_y_px) / 2
(x0_px, y0_px), (_, maj_axis_px), _ = ellipse.rotated_rect
# Using iris_r_px / focal_len_px = iris_r_mm / distance_to_iris_mm
iris_z_mm = (limbus_r_mm * 2 * focal_len_z_px) / maj_axis_px
# Using (x_screen_px - prin_point) / focal_len_px = x_world / z_world
iris_x_mm = -iris_z_mm * (x0_px - prin_point_x) / focal_len_x_px
iris_y_mm = iris_z_mm * (y0_px - prin_point_y) / focal_len_y_px
limbus_center = (iris_x_mm, iris_y_mm, iris_z_mm)
(ell_x0, ell_y0), (ell_w, ell_h), angle = ellipse.rotated_rect
new_rotated_rect = (ell_x0 - prin_point_x, ell_y0 - prin_point_y), (ell_w, ell_h), angle
ell = Ellipse(new_rotated_rect)
f = focal_len_z_px;
Z = np.array([[ell.A, ell.B / 2.0, ell.D / (2.0 * f)],
[ell.B / 2.0, ell.C, ell.E / (2.0 * f)],
[ell.D / (2.0 * f), ell.E / (2.0 * f), ell.F / (f * f)]])
eig_vals, eig_vecs = np.linalg.eig(Z)
idx = eig_vals.argsort()
eig_vals = eig_vals[idx]
eig_vecs = eig_vecs[:, idx]
L1, L2, L3 = eig_vals[2], eig_vals[1], eig_vals[0]
R = np.vstack([eig_vecs[:, 2], eig_vecs[:, 1], eig_vecs[:, 0]])
g = sqrt((L2 - L3) / (L1 - L3))
h = sqrt((L1 - L2) / (L1 - L3))
poss_normals = [R.dot([h, 0, -g]), R.dot([h, 0, g]), R.dot([-h, 0, -g]), R.dot([-h, 0, g])]
# Constraints
nx, ny, nz = poss_normals[0 if iris_x_mm > 0 else 1]
if nz > 0:
nx, ny, nz = -nx, -ny, -nz
if ny * nz < 0:
ny *= -1
if iris_x_mm > 0:
if nx > 0: nx *= -1
elif nx < 0: nx *= -1
return Limbus(limbus_center, [nx, ny, nz], ellipse)
def ellipse_to_limbuses_approx(ellipse, device):
""" Returns 2 ambiguous limbuses
"""
limbus_r_mm = anatomical_constants.limbus_r_mm
focal_len_x_px, focal_len_y_px, prin_point_x, prin_point_y = device.get_intrisic_cam_params()
focal_len_z_px = (focal_len_x_px + focal_len_y_px) / 2
(x0_px, y0_px), (min_axis_px, maj_axis_px), angle = ellipse.rotated_rect
# Using iris_r_px / focal_len_px = iris_r_mm / distance_to_iris_mm
iris_z_mm = (limbus_r_mm * 2 * focal_len_z_px) / maj_axis_px
# Using (x_screen_px - prin_point) / focal_len_px = x_world / z_world
iris_x_mm = -iris_z_mm * (x0_px - prin_point_x) / focal_len_x_px
iris_y_mm = iris_z_mm * (y0_px - prin_point_y) / focal_len_y_px
limbus_center = (iris_x_mm, iris_y_mm, iris_z_mm)
psi = radians(angle) # z-axis rotation (radians)
tht_1 = acos(min_axis_px / maj_axis_px) # y-axis rotation (radians)
tht_2 = -tht_1 # as acos has 2 ambiguous solutions
# Find 2 possible normals for the limbus (weak perspective)
normal_1 = vpy.vector(sin(tht_1) * cos(psi), -sin(tht_1) * sin(psi), -cos(tht_1))
normal_2 = vpy.vector(sin(tht_2) * cos(psi), -sin(tht_2) * sin(psi), -cos(tht_2))
# Now correct for weak perspective by modifying angle by offset between camera axis and limbus
x_correction = -atan(iris_y_mm / iris_z_mm)
y_correction = -atan(iris_x_mm / iris_z_mm)
x_axis, y_axis = vpy.vector(1, 0, 0), vpy.vector(0, -1, 0) # VPython uses different y axis
normal_1 = vpy.rotate(normal_1, y_correction, y_axis)
normal_1 = vpy.rotate(normal_1, x_correction, x_axis).astuple()
normal_2 = vpy.rotate(normal_2, y_correction, y_axis)
normal_2 = vpy.rotate(normal_2, x_correction, x_axis).astuple()
return Limbus(limbus_center, normal_1, ellipse)
def get_gaze_point_px(limbus):
""" Convenience method for getting gaze point on screen in px
"""
gaze_point_mm = get_gaze_point_mm(limbus)
return convert_gaze_pt_mm_to_px(gaze_point_mm)
def get_gaze_point_mm(limbus):
""" Returns intersection with z-plane of optical axis vector (mm)
"""
# Ray-plane intersection
x0, y0, z0 = limbus.center_mm
dx, dy, dz = limbus.normal
t = -z0 / dz
x_screen_mm, y_screen_mm = x0 + dx * t, y0 + dy * t
return x_screen_mm, y_screen_mm
def convert_gaze_pt_mm_to_px((x_screen_mm, y_screen_mm), device):
""" Returns intersection with screen in coordinates (px)
"""
screen_w_mm, screen_h_mm = device.screen_size_mm
screen_w_px, screen_h_px = device.screen_size_px
screen_y
|
_offset_px = device
|
.screen_y_offset_px # height of notification bar
x_offset, y_offset = device.offset_mm # screen offset from camera position
x_screen_px = (x_screen_mm + x_offset) / screen_w_mm * screen_w_px
y_screen_px = (y_screen_mm - y_offset) / screen_h_mm * screen_h_px - screen_y_offset_px
return x_screen_px, y_screen_px
|
ConstantinT/jAEk
|
crawler/models/urlstructure.py
|
Python
|
gpl-3.0
| 1,991 | 0.00452 |
'''
Copyright (C) 2015 Constantin Tschuertz
This program is free software: you can
|
redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is di
|
stributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from enum import Enum
import hashlib
from models.parametertype import ParameterType
__author__ = 'constantin'
class UrlStructure():
def __init__(self, path, paramters = {}, url_hash = None):
self.path = path
self.parameters = paramters # List of dict: parametername, parametertype, origin, generating <= change of the param creates a new page
self.url_hash = url_hash
def get_parameter_type(self, parameter_name):
if parameter_name not in self.parameters:
raise KeyError("{} not found".format(parameter_name))
return ParameterType(self.parameters[parameter_name]['parameter_type'])
def get_parameter_origin(self, parameter_name):
if parameter_name not in self.parameters:
raise KeyError("{} not found".format(parameter_name))
return ParameterType(self.parameters[parameter_name]['origin'])
def toString(self):
msg = "[Url: {} \n".format(self.path)
for param in self.parameters:
msg += "{} - {} - {} - {} \n".format(param, ParameterType(self.parameters[param]['parameter_type']), ParameterOrigin(self.parameters[param]['origin']), self.parameters[param]['generating'])
msg += "Hash: {}]".format(self.url_hash)
return msg
class ParameterOrigin(Enum):
ServerGenerated = 0
ClientGenerated = 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.