repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
pearsonlab/thunder
|
thunder/utils/context.py
|
Python
|
apache-2.0
| 36,355 | 0.004924 |
""" Simple wrapper for a Spark Context to provide loading functionality """
import os
from thunder.utils.common import checkParams, handleFormat, raiseErrorIfPathExists
from thunder.utils.datasets import DataSets
from thunder.utils.params import Params
class ThunderContext():
"""
Wrapper for a SparkContext that provides an entry point for loading and saving.
Also supports creation of example datasets, and loading example
data both locally and from EC2.
"""
def __init__(self, sparkcontext):
self._sc = sparkcontext
self._credentials = None
@classmethod
def start(cls, *args, **kwargs):
"""
Starts a ThunderContext using the same arguments as SparkContext
"""
from pyspark import SparkContext
return ThunderContext(SparkContext(*args, **kwargs))
def addPyFile(self, path):
"""
Adds a .zip or .py or .egg dependency for all tasks to be executed
as part of this context.
Uses the corresponding SparkContext method.
Parameters
----------
path : str
Path to a file as either a local file, file in HDFS, or URI.
"""
self._sc.addPyFile(path)
def stop(self):
"""
Shut down the context
"""
self._sc.stop()
def loadSeries(self, dataPath, nkeys=None, nvalues=None, inputFormat='binary', minPartitions=None,
maxPartitionSize='32mb', confFilename='conf.json', keyType=None, valueType=None, keyPath=None,
varName=None):
"""
Loads a Series object from data stored as binary, text, npy, or mat.
For binary and text, supports single files or multiple files stored on a local file system,
a networked file system (mounted and available on all cluster nodes), Amazon S3, or HDFS.
For local formats (npy and mat) only local file systems currently supported.
Parameters
----------
dataPath: string
Path to data files or directory, as either a local filesystem path or a URI.
May include a single '*' wildcard in the filename. Examples of valid dataPaths include
'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/".
nkeys: int, optional (required if `inputFormat` is 'text'), default = None
Number of keys per record (e.g. 3 for (x, y, z) coordinate keys). Must be specified for
text data; can be specified here or in a configuration file for binary data.
nvalues: int, optional (required if `inputFormat` is 'text')
Number of values per record. Must be specified here or in a configuration file for binary data.
inputFormat: {'text', 'binary', 'npy', 'mat'}. optional, default = 'binary'
inputFormat of data to be read.
|
minPartitions: int, optional, default = SparkContext.m
|
inParallelism
Minimum number of Spark partitions to use, only for text.
maxPartitionSize : int, optional, default = '32mb'
Maximum size of partitions as a Java-style memory string, e.g. '32mb' or '64mb',
indirectly controls the number of Spark partitions, only for binary.
confFilename: string, optional, default 'conf.json'
Path to JSON file with configuration options including 'nkeys', 'nvalues',
'keyType', and 'valueType'. If a file is not found at the given path, then the base
directory in 'dataPath' will be checked. Parameters will override the conf file.
keyType: string or numpy dtype, optional, default = None
Numerical type of keys, will override conf file.
valueType: string or numpy dtype, optional, default = None
Numerical type of values, will override conf file.
keyPath: string, optional, default = None
Path to file with keys when loading from npy or mat.
varName : str, optional, default = None
Variable name to load (for MAT files only)
Returns
-------
data: thunder.rdds.Series
A Series object, wrapping an RDD, with (n-tuples of ints) : (numpy array) pairs
"""
checkParams(inputFormat, ['text', 'binary', 'npy', 'mat'])
from thunder.rdds.fileio.seriesloader import SeriesLoader
loader = SeriesLoader(self._sc, minPartitions=minPartitions)
if inputFormat.lower() == 'binary':
data = loader.fromBinary(dataPath, confFilename=confFilename, nkeys=nkeys, nvalues=nvalues,
keyType=keyType, valueType=valueType, maxPartitionSize=maxPartitionSize)
elif inputFormat.lower() == 'text':
if nkeys is None:
raise Exception('Must provide number of keys per record for loading from text')
data = loader.fromText(dataPath, nkeys=nkeys)
elif inputFormat.lower() == 'npy':
data = loader.fromNpyLocal(dataPath, keyPath)
else:
if varName is None:
raise Exception('Must provide variable name for loading MAT files')
data = loader.fromMatLocal(dataPath, varName, keyPath)
return data
def loadImages(self, dataPath, dims=None, dtype=None, inputFormat='stack', ext=None,
startIdx=None, stopIdx=None, recursive=False, nplanes=None, npartitions=None,
renumber=False, confFilename='conf.json'):
"""
Loads an Images object from data stored as a binary image stack, tif, or png files.
Supports single files or multiple files, stored on a local file system, a networked file sytem
(mounted and available on all nodes), Amazon S3, or Google Storage.
HDFS is not currently supported for image file data.
Parameters
----------
dataPath: string
Path to data files or directory, as either a local filesystem path or a URI.
May include a single '*' wildcard in the filename. Examples of valid dataPaths include
'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/".
dims: tuple of positive int, optional (required if inputFormat is 'stack')
Image dimensions. Binary stack data will be interpreted as a multidimensional array
with the given dimensions, and should be stored in row-major order (Fortran or Matlab convention),
where the first dimension changes most rapidly. For 'png' or 'tif' data dimensions
will be read from the image file headers.
inputFormat: str, optional, default = 'stack'
Expected format of the input data: 'stack', 'png', or 'tif'. 'stack' indicates flat binary stacks.
'png' or 'tif' indicate image format. Page of a multipage tif file will be extend along
the third dimension. Separate files interpreted as distinct records, with ordering
given by lexicographic sorting of file names.
ext: string, optional, default = None
File extension, default will be "bin" if inputFormat=="stack", "tif" for inputFormat=='tif',
and 'png' for inputFormat=="png".
dtype: string or numpy dtype, optional, default = 'int16'
Data type of the image files to be loaded, specified as a numpy "dtype" string.
Ignored for 'tif' or 'png' (data will be inferred from image formats).
startIdx: nonnegative int, optional, default = None
Convenience parameters to read only a subset of input files. Uses python slice conventions
(zero-based indexing with exclusive final position). These parameters give the starting
and final index after lexicographic sorting.
stopIdx: nonnegative int, optional, default = None
See startIdx.
recursive: boolean, optional, default = False
If true, will recursively descend directories rooted at dataPath, loading all files
in the tree with an appropriate extension.
nplanes: positive integer, optional, de
|
ProjectQ-Framework/ProjectQ
|
projectq/cengines/_main_test.py
|
Python
|
apache-2.0
| 7,119 | 0.000421 |
# -*- coding: utf-8 -*-
# Copyright 2017, 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for projectq.cengines._main.py."""
import sys
import weakref
import pytest
from projectq.backends import Simulator
from projectq.cengines import BasicMapperEngine, DummyEngine, LocalOptimizer, _main
from projectq.ops import AllocateQubitGate, DeallocateQubitGate, FlushGate, H
def test_main_engine_init():
ceng1 = DummyEngine()
ceng2 = DummyEngine()
test_backend = DummyEngine()
engine_list = [ceng1, ceng2]
eng = _main.MainEngine(backend=test_backend, engine_list=engine_list)
assert id(eng.next_engine) == id(ceng1)
assert
|
id(eng.main_engine) == id(eng)
assert not eng.is_last_engine
assert id(ceng1.next_engine) == id(ceng2)
assert id(ceng1.mai
|
n_engine) == id(eng)
assert not ceng1.is_last_engine
assert id(ceng2.next_engine) == id(test_backend)
assert id(ceng2.main_engine) == id(eng)
assert not ceng2.is_last_engine
assert test_backend.is_last_engine
assert id(test_backend.main_engine) == id(eng)
assert not test_backend.next_engine
assert len(engine_list) == 2
def test_main_engine_init_failure():
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(backend=DummyEngine)
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(engine_list=DummyEngine)
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(engine_list=[DummyEngine(), DummyEngine])
with pytest.raises(_main.UnsupportedEngineError):
engine = DummyEngine()
_main.MainEngine(backend=engine, engine_list=[engine])
def test_main_engine_init_defaults():
eng = _main.MainEngine()
eng_list = []
current_engine = eng.next_engine
while not current_engine.is_last_engine:
eng_list.append(current_engine)
current_engine = current_engine.next_engine
assert isinstance(eng_list[-1].next_engine, Simulator)
import projectq.setups.default
default_engines = projectq.setups.default.get_engine_list()
for engine, expected in zip(eng_list, default_engines):
assert type(engine) == type(expected)
def test_main_engine_too_many_compiler_engines():
old = _main._N_ENGINES_THRESHOLD
_main._N_ENGINES_THRESHOLD = 3
_main.MainEngine(backend=DummyEngine(), engine_list=[DummyEngine(), DummyEngine()])
with pytest.raises(ValueError):
_main.MainEngine(backend=DummyEngine(), engine_list=[DummyEngine(), DummyEngine(), DummyEngine()])
_main._N_ENGINES_THRESHOLD = old
def test_main_engine_init_mapper():
class LinearMapper(BasicMapperEngine):
pass
mapper1 = LinearMapper()
mapper2 = BasicMapperEngine()
engine_list1 = [mapper1]
eng1 = _main.MainEngine(engine_list=engine_list1)
assert eng1.mapper == mapper1
engine_list2 = [mapper2]
eng2 = _main.MainEngine(engine_list=engine_list2)
assert eng2.mapper == mapper2
engine_list3 = [mapper1, mapper2]
with pytest.raises(_main.UnsupportedEngineError):
_main.MainEngine(engine_list=engine_list3)
def test_main_engine_del():
# Clear previous exceptions of other tests
sys.last_type = None
del sys.last_type
# need engine which caches commands to test that del calls flush
caching_engine = LocalOptimizer(cache_size=5)
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[caching_engine])
qubit = eng.allocate_qubit()
H | qubit
assert len(backend.received_commands) == 0
eng.__del__()
# Allocate, H, Deallocate, and Flush Gate
assert len(backend.received_commands) == 4
def test_main_engine_set_and_get_measurement_result():
eng = _main.MainEngine()
qubit0 = eng.allocate_qubit()
qubit1 = eng.allocate_qubit()
with pytest.raises(_main.NotYetMeasuredError):
print(int(qubit0))
eng.set_measurement_result(qubit0[0], True)
eng.set_measurement_result(qubit1[0], False)
assert int(qubit0)
assert not int(qubit1)
def test_main_engine_get_qubit_id():
# Test that ids are not identical
eng = _main.MainEngine()
ids = []
for _ in range(10):
ids.append(eng.get_new_qubit_id())
assert len(set(ids)) == 10
def test_main_engine_flush():
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[DummyEngine()])
qubit = eng.allocate_qubit()
H | qubit
eng.flush()
assert len(backend.received_commands) == 3
assert backend.received_commands[0].gate == AllocateQubitGate()
assert backend.received_commands[1].gate == H
assert backend.received_commands[2].gate == FlushGate()
eng.flush(deallocate_qubits=True)
assert len(backend.received_commands) == 5
assert backend.received_commands[3].gate == DeallocateQubitGate()
# keep the qubit alive until at least here
assert len(str(qubit)) != 0
def test_main_engine_atexit_no_error():
# Clear previous exceptions of other tests
sys.last_type = None
del sys.last_type
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[])
qb = eng.allocate_qubit() # noqa: F841
eng._delfun(weakref.ref(eng))
assert len(backend.received_commands) == 3
assert backend.received_commands[0].gate == AllocateQubitGate()
assert backend.received_commands[1].gate == DeallocateQubitGate()
assert backend.received_commands[2].gate == FlushGate()
def test_main_engine_atexit_with_error():
sys.last_type = "Something"
backend = DummyEngine(save_commands=True)
eng = _main.MainEngine(backend=backend, engine_list=[])
qb = eng.allocate_qubit() # noqa: F841
eng._delfun(weakref.ref(eng))
assert len(backend.received_commands) == 1
assert backend.received_commands[0].gate == AllocateQubitGate()
def test_exceptions_are_forwarded():
class ErrorEngine(DummyEngine):
def receive(self, command_list):
raise TypeError
eng = _main.MainEngine(backend=ErrorEngine(), engine_list=[])
with pytest.raises(TypeError):
qb = eng.allocate_qubit() # noqa: F841
eng2 = _main.MainEngine(backend=ErrorEngine(), engine_list=[])
with pytest.raises(TypeError):
qb = eng2.allocate_qubit() # noqa: F841
# NB: avoid throwing exceptions when destroying the MainEngine
eng.next_engine = DummyEngine()
eng.next_engine.is_last_engine = True
eng2.next_engine = DummyEngine()
eng2.next_engine.is_last_engine = True
|
BT-ojossen/website
|
website_logo/models/company.py
|
Python
|
agpl-3.0
| 1,095 | 0 |
# -*- coding: utf-8 -*-
############
|
##################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be usefu
|
l,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class Company(models.Model):
_inherit = 'res.company'
website_logo = fields.Binary("Website logo")
|
scjrobertson/xRange
|
kalman/gaussian.py
|
Python
|
gpl-3.0
| 4,271 | 0.012409 |
'''Module containing a DensityFunc abstract class, with common probability densities
@since: Jan 10, 2013
@author: kroon
'''
from __future__ import division
import numpy as np
class Gaussian(object):
'''
Class for representing a multi-dimensional Gaussian distribution of dimension d,
given mean and covariance.
The covariance matrix has to be positive definite and non-singular.
Parameters
----------
mean : (d,) ndarray
mean of the distribution
cov : (d,d) ndarray
Covariance matrix.
Methods
-------
f
Returns the value of the density function
logf
Returns the log of the density function
likelihood
Returns the likelihood of the data
loglik
Reurns the log-likelihood of the data
sample
Returns samples drawn from the normal distribution with the given
mean and covariance
Example
-------
>>> from density import Gaussian
>>> # Scalar example
>>> mean = [10.]
>>> cov = [[1.]]
>>> ga = Gaussian(mean,cov)
>>> ga.f([10.])
0.398942280401
>>> x = np.array([[10.,10.,10.]])
>>> ga.likelihood(x)
0.0634936359342
>>> # Multivariate example
>>> mean = [10.0, 10.0]
>>> cov = [[ 1. 0.],[ 0. 10.]]
>>> ga = Gaussian(mean,cov)
>>> ga.f(np.array([10.,10.])
0.050329212104487035
>>> x = np.array([[10.,10.,10.,10.],[10.,10.,10.,10.]])
>>> ga.likelihood(x)
6.4162389091777101e-06
'''
def __init__(self, mean=[0.,0.], cov=[[1.,0.],[0.,1.]]):
mean = np.array(mean); cov = np.array(cov)
d,n = cov.shape
self._dim = d
self._mean = mean.flatten()
self._cov = cov
self._covdet = np.linalg.det(2.0*np.pi*cov)
if self._covdet < 10e-12:
raise ValueError('The covariance matrix is singular.')
def f(self, x):
'''
Calculate the value of the normal distributions at x
Parameters
----------
x : (d,) ndarray
Evaluate a single d-dimensional samples x
Returns
|
-------
val : scalar
The value of the normal distribution at x.
'''
return np.exp(self.logf(x))
def logf(self, x):
'''
Calculate the log-density at x
Parameters
----------
x : (d,) ndarra
|
y
Evaluate the log-normal distribution at a single d-dimensional
sample x
Returns
-------
val : scalar
The value of the log of the normal distribution at x.
'''
#x = x[:,np.newaxis]
trans = x - self._mean
mal = -trans.dot(np.linalg.solve(self._cov,trans))/2.
return -0.5*np.log(self._covdet) + mal
def likelihood(self, x):
'''
Calculates the likelihood of the data set x for the normal
distribution.
Parameters
----------
x : (d,n) ndarray
Calculate the likelihood of n, d-dimensional samples
Returns
-------
val : scalar
The likelihood value
'''
return np.exp(self.loglik(x))
def loglik(self, x):
'''
Calculates the log-likelihood of the data set x for the normal
distribution.
Parameters
----------
x : (d,n) ndarray
Calculate the likelihood of n, d-dimensional samples
Returns
-------
val : scalar
The log-likelihood value
'''
return np.sum(np.apply_along_axis(self.logf, 0, x))
def sample(self, n=1):
'''
Calculates n independent points sampled from the normal distribution
Parameters
----------
n : int
The number of samples
Returns
-------
samples : (d,n) ndarray
n, d-dimensional samples
'''
return np.random.multivariate_normal(self._mean, self._cov, n).T
|
ntthuy11/CodeFights
|
Arcade/04_Python/05_ComplexityOfComprehension/coolPairs.py
|
Python
|
mit
| 853 | 0.014068 |
""" A pair of numbers is considered to be cool if their product is divisible by their sum. More formally,
a pair (i, j) is cool if and only if (i * j) % (i + j) = 0.
Given two lists a and b, find cool pairs with the first number in the pair from a, and the second one from b.
Return the number of different sums of elements in such pairs.
Example
For a =
|
[4, 5, 6, 7, 8] and b = [8, 9, 10, 11, 12], the output should be
coolPairs(a, b) = 2.
There are th
|
ree cool pairs that can be formed from these arrays: (4, 12), (6, 12) and (8, 8). Their respective
sums are 16, 18 and 16, which means that there are just 2 different sums: 16 and 18. Thus, the output should be
equal to 2.
"""
def coolPairs(a, b):
uniqueSums = {i+j for i in a for j in b if i*j%(i+j) == 0} # CodeFights asks to change this line only
return len(uniqueSums)
|
MJuddBooth/pandas
|
pandas/core/computation/ops.py
|
Python
|
bsd-3-clause
| 16,387 | 0 |
"""Operator classes for eval.
"""
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import operator as op
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.compat import PY3, string_types, text_type
from pandas.core.dtypes.common import is_list_like, is_scalar
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs', 'log10',
'floor', 'ceil'
)
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
|
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is
|
a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self):
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
|
flyingpoops/kaggle-digit-recognizer-team-learning
|
plot.py
|
Python
|
apache-2.0
| 1,298 | 0.008475 |
import sys, os, math
import time
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.decomposition import PCA
fro
|
m sklearn.cross_validation import StratifiedShuffleSplit
from sklearn import metrics
import sklearn.svm as svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
from sklearn.cross_validation import
|
train_test_split
import matplotlib.pyplot as plt
cut_pt = 1
print ("Reading the file...")
input_res = read_csv(os.path.expanduser("input/train.csv"), nrows=3000) # load pandas dataframe
input_res = input_res.as_matrix()
shape = input_res.shape
number_of_rows = shape[0]
number_of_columns = shape[1]
number_of_fv = number_of_columns - cut_pt
print ("Number of rows: %d (document)" % number_of_rows)
print ("Number of columns: %d (feature vector(preprocessed) + topics class labels(preprocessed))" % number_of_columns)
print ("Number of class_labels: %d" % number_of_fv)
# initialize training x and y's
x = input_res[:,cut_pt:number_of_columns]
y = input_res[:,0:cut_pt].transpose().ravel()
x = x / 255.
data = x[0]
print (data)
print (data.shape[0])
img = data.reshape(28, 28)
img = img.astype(np.float32)
plt.imshow(img, cmap="gray")
plt.show()
|
github/codeql
|
python/ql/test/query-tests/Security/lib/fabric/__init__.py
|
Python
|
mit
| 113 | 0 |
from .connect
|
ion import Connection
from .group import Group, SerialGroup, ThreadingGroup
from .tasks i
|
mport task
|
thonkify/thonkify
|
src/lib/libfuturize/fixes/fix_order___future__imports.py
|
Python
|
mit
| 830 | 0 |
"""
UNFINISHED
Fixer for turning multiple lines like these:
from __future__ impo
|
rt division
from __future__ import absolute_import
from __future__ import print_function
into a single line like this:
from __future__ import (absolute_import, division, print_function)
This helps with testing of ``futurize``.
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import future_import
class FixOrderFutureImports(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 10
# def match(self, n
|
ode):
# """
# Match only once per file
# """
# if hasattr(node, 'type') and node.type == syms.file_input:
# return True
# return False
def transform(self, node, results):
# TODO # write me
pass
|
ruishihan/R7-with-notes
|
src/python/ioread.py
|
Python
|
apache-2.0
| 82 | 0.012195 |
import
|
axi2s_c
import sys
uut = axi2s_c.axi2s_c()
uut.read(sys.argv[1
|
])
|
dergraaf/xpcc
|
tools/system_design/xmlparser/event.py
|
Python
|
bsd-3-clause
| 1,538 | 0.048114 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import utils
import xml_utils
from parser_exce
|
ption import ParserException
class Event(object):
def __init__(self, node):
""" Constructor
Keyword arguments:
node --
|
XML node defining this event
"""
self.node = node
self.name = node.get('name')
utils.check_name(self.name)
self.id = None
self.description = None
self.rate = None
self.type = None
def evaluate(self, tree):
if self.node is None:
return
self.id = xml_utils.get_identifier(self.node)
self.description = xml_utils.get_description(self.node)
self.rate = self.node.get('rate')
type = self.node.get('type')
if type is None:
self.type = None
else:
try:
self.type = tree.types[type]
except KeyError as e:
raise ParserException("Type '%s' is not defined. Used by Event '%s')" % (type, self.name))
self.node = None
def update(self, other):
""" Update events with the values from another event
Events are guaranteed to be unique within the evaluted tree. Therefore
an update demand can only be issued for the same events, one declared
in the super-class and the other in the sub-class.
The assert statement checks this, nothing else needs to be done.
"""
assert id(self) == id(other)
def __cmp__(self, other):
return cmp(self.id, other.id) or cmp(self.name, other.name)
def __str__(self):
if self.type is None:
type = None
else:
type = self.type.name
return "[%02x] %s : %s" % (self.id, self.name, type)
|
MyRookie/SentimentAnalyse
|
venv/lib/python2.7/site-packages/numpy/testing/utils.py
|
Python
|
mit
| 66,431 | 0.001114 |
"""
Utility function to facilitate testing.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import re
import operator
import warnings
from functools import partial
import shutil
import contextlib
from tempfile import mkdtemp, mkstemp
from .nosetester import import_nose
from numpy.core import float32, empty, arange, array_repr, ndarray
from numpy.lib.utils import deprecate
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
__all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir']
class KnownFailureException(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
KnownFailureTest = KnownFailureException # backwards compat
# nose.SkipTest is unittest.case.SkipTest
# import it into the namespace, so that it's available as np.testing.SkipTest
try:
from unittest.case import SkipTest
except ImportError:
# on py2.6 unittest.case is not available. Ask nose for a replacement.
SkipTest = import_nose().SkipTest
verbose = 0
def assert_(val, msg=''):
"""
Assert that works in release mode.
Accepts callable msg to allow deferring evaluation until failure.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
if not val:
try:
smsg = msg()
except TypeError:
smsg = msg
raise AssertionError(smsg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, errstate
with errstate(invalid='ignore'):
st = isfinite(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isfinite not supported for this type")
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, errstate
with errstate(invalid='ignore'):
st = isinf(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isinf not supported for this type")
return st
@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
"Use numpy.random.rand instead.")
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standa
|
rd library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results
|
= zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None:
format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
"""
Return virtual memory size in bytes of the running python.
"""
try:
f = open(_proc_pid_stat, 'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
def memusage():
"""
Return memory usage of running python. [Not implemented]
"""
raise NotImplementedError
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
f = open(_proc_pid_stat, 'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(
|
ishahid/django-blogg
|
source/blogg/migrations/0002_comment.py
|
Python
|
mit
| 1,206 | 0.004146 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blogg', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField(max_length=1000L)),
('author', models.CharField(default=b'Anonymous', max_length=100, blank=True)),
('ip_address', models.GenericIPAddressField(null=T
|
rue, blank=True)),
('user_agent', models.CharField(max_length=500L, blank=True)),
('published', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', mo
|
dels.DateTimeField(auto_now=True, auto_now_add=True)),
('post', models.ForeignKey(related_name='comments', to='blogg.Post')),
],
options={
'ordering': ['-created'],
},
bases=(models.Model,),
),
]
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/astroid/raw_building.py
|
Python
|
apache-2.0
| 15,733 | 0.001525 |
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains a set of functions to create astroid trees from scratch
(build_* functions) or from living object (object_build_* functions)
"""
import inspect
import logging
import os
import sys
import types
import six
from astroid import bases
from astroid import manager
from astroid import node_classes
from astroid import nodes
MANAGER = manager.AstroidManager()
# the keys of CONST_CLS eg python builtin types
_CONSTANTS = tuple(node_classes.CONST_CLS)
_JYTHON = os.name == 'java'
_BUILTINS = vars(six.moves.builtins)
_LOG = logging.getLogger(__name__)
def _io_discrepancy(member):
# _io module names itself `io`: http://bugs.python.org/issue18602
member_self = getattr(member, '__self__', None)
return (member_self and
inspect.ismodule(member_self) and
member_self.__name__ == '_io' and
member.__module__ == 'io')
def _attach_local_node(parent, node, name):
node.name = name # needed by add_local_node
parent.add_local_node(node)
def _add_dunder_class(func, member):
"""Add a __class__ member to the given func node, if we can determine it."""
python_cls = member.__class__
cls_name = getattr(python_cls, '__name__', None)
if not cls_name:
return
cls_bases = [ancestor.__name__ for ancestor in python_cls.__bases__]
ast_klass = build_class(cls_name, cls_bases, python_cls.__doc__)
func.instance_attrs['__class__'] = [ast_klass]
_marker = object()
def attach_dummy_node(node, name, runtime_object=_marker):
"""create a dummy node and register it in the locals of the given
node with the specified name
"""
enode = nodes.EmptyNode()
enode.object = runtime_object
_attach_local_node(node, enode, name)
def _has_underlying_object(self):
return self.object is not None and self.object is not _marker
nodes.EmptyNode.has_underlying_object = _has_underlying_object
def attach_const_node(node, name, value):
"""create a Const node and register it in the locals of the given
node with the specified name
"""
if name not in node.special_attributes:
_attach_local_node(node, nodes.const_factory(value), name)
def attach_import_node(node, modname, membername):
"""create a ImportFrom node and register it in the locals of the given
node with the specified name
"""
from_node = nodes.ImportFrom(modname, [(membername, None)])
_attach_local_node(node, from_node, membername)
def build_module(name, doc=None):
"""create and initialize a astroid Module node"""
node = nodes.Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
def build_class(name, basenames=(), doc=None):
"""create and initialize a astroid ClassDef node"""
node = nodes.ClassDef(name, doc)
for base in basenames:
basenode = nodes.Name()
basenode.name = base
node.bases.append(basenode)
basenode.parent = node
return node
def build_function(name, args=None, defaults=None, doc=None):
"""create and initialize a astroid FunctionDef node"""
args, defaults = args or [], defaults or []
# first argument is now a list of decorators
func = nodes.FunctionDef(name, doc)
func.args = argsnode = nodes.Arguments()
argsnode.args = []
for arg in args:
argsnode.args.append(nodes.Name())
argsnode.args[-1].name = arg
argsnode.args[-1].parent = argsnode
argsnode.defaults = []
for default in defaults:
argsnode.defaults.append(nodes.const_factory(default))
argsnode.defaults[-1].parent = argsnode
argsnode.kwarg = None
argsnode.vararg = None
argsnode.parent = func
if args:
register_arguments(func)
return func
def build_from_import(fromname, names):
"""create and initialize an astroid ImportFrom import statement"""
return nodes.ImportFrom(fromname, [(name, None) for name in names])
def register_arguments(func, args=None):
"""add given arguments to local
args is a list that may contains nested lists
(i.e. def func(a, (b, c, d)): ...)
"""
if args is None:
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, nodes.Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
def object_build_class(node, member, localname):
"""create astroid for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames,
localname=localname)
def object_build_function(node, member, localname):
"""create astroid for a living function object"""
# pylint: disable=deprecated-method; completely removed in 2.0
args, varargs, varkw, defaults = inspect.getargspec(member)
if varargs is not None:
args.append(varargs)
if varkw is not None:
args.append(varkw)
func = build_function(getattr(member, '__name__', None) or localname, args,
defaults, member.__doc__)
node.add_local_node(func, localname)
def object_build_datadescriptor(node, member, name):
"""create astroid for a living data descriptor object"""
return _base_class_object_build(node, member, [], name)
def object_build_methoddescriptor(node, member, localname):
"""create astroid for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(getattr(member, '__name__', None) or localname,
doc=member.__doc__)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
_add_dunder_class(func, member)
def _base_class_object_build(node, member, basenames, name=None, localname=None):
"""create astroid for a living class object, with a given set of base names
(e.g. ancestors)
"""
klass = build_class(name or getattr(member, '__name__', None) or localname,
basenames, member.__doc__)
klass._newstyle = isinstance(member, type)
node.add_local_node(klass, localname)
try:
# limit the instantiation trick since it's too dangerous
# (such as infinite test execution...)
# this at least resolves common
|
case such as Exception.args,
# OSError.errno
if issubclass(member, Exception):
instdict = member().__dict__
else:
raise TypeError
except: # pylint: disable=bare-except
pass
else:
for item_name, obj in instdict.items():
valnode = nodes.EmptyNode()
va
|
lnode.object = obj
valnode.parent = klass
valnode.lineno = 1
klass.instance_attrs[item_name] = [valnode]
return klass
def _build_from_function(node, name, member, module):
# verify this is not an imported function
try:
code = six.get_function_code(member)
except AttributeError:
# Some implementations don't provide the code object,
# such as Jython.
code = None
filename = getattr(code, 'co_filename', None)
if filename is None:
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif filename != getattr(module, '__file__', None):
attach_dummy_node(node, name, member)
else:
object_build_function(node, member, name)
class InspectBuilder(object):
"""class for building nodes from living object
this is actually a real
|
autosub-team/autosub
|
src/plugins/vels_ob/test/test_task.py
|
Python
|
gpl-2.0
| 776 | 0 |
# coding: utf-8
"""
HDL Testing Platform
REST API for HDL TP # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import
|
absolute_import
import unittest
import swagger_client
from swagger_client.models.task import Task # noqa: E501
from swagger_client.rest import ApiException
class TestTask(unittest.TestCase):
"""Task unit test stubs"""
def setUp(se
|
lf):
pass
def tearDown(self):
pass
def testTask(self):
"""Test Task"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.task.Task() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
ZackYovel/studybuddy
|
server/studybuddy/discussions/migrations/0005_auto_20150430_1645.py
|
Python
|
mit
| 459 | 0.002179 |
# -*- coding: utf-8
|
-*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('discussions', '0004_auto_20150430_1641'),
]
operations = [
migrations.AlterField(
model_name='discussion',
name='original_post',
field=models.OneToOneField(null=True, to='discussions.Post',
|
related_name='OP'),
),
]
|
Scoudem/audiolyze
|
inputhandler.py
|
Python
|
mit
| 1,620 | 0 |
'''
File: input.py
Author: Tristan van Vaalen
Handles user input
'''
import signal
import sys
import verbose
v = verbose.Verbose()
class InputHandler():
def __init__(self):
v.debug('Initializing input handler').indent()
self.running = True
self.signal_level = 0
|
v.debug('Registering signal handler').unindent()
signal.signal(signal.SIGINT, self.signal_handler)
def test(self):
pass
def signal_handler(self, signal, frame):
self.signal_level += 1
if self.signal_level == 1:
|
self.running = False
else:
sys.exit(0)
def output_options(self):
v.write(
'Available options:\n' +
' - help: prints this message\n' +
' - exit: exit program'
' - test: magic'
)
def get(self):
v.debug('Entering input loop')
v.write('AUDIOLYZE v0.01\nPress ctrl+D to exit')
while self.running:
try:
self._parse_input(raw_input('>>> '))
except EOFError:
v.write('EOF received')
self.running = False
v.write('Goodbye')
def _parse_input(self, raw):
raw = raw.strip()
if raw in ['help', 'h', '?']:
self.output_options()
elif raw in ['quit', 'exit', 'stop', 'abort']:
self.running = False
elif raw in ['test']:
self.test()
else:
v.write(
'Invalid command \'{}\'. Try \'help\' for a list of commands'
.format(raw)
)
|
CSB-IG/natk
|
ninnx/pruning/mi_triangles.py
|
Python
|
gpl-3.0
| 1,793 | 0.026771 |
import networkx as nx
import itertools
import matplotlib.pyplot as plt
fig = plt.figure()
fig.subpl
|
ots_adjust(left=0.2, wspace=0.6)
G = nx.Graph()
G.add_edges_from([(1,2,{'w': 6}),
(2,3,{'w': 3}),
(3,1,{'w': 4}),
(3,4,{'
|
w': 12}),
(4,5,{'w': 13}),
(5,3,{'w': 11}),
])
import pprint
# detect triangles
triangles = []
for trio in itertools.combinations(G.nodes(), 3):
vertices = []
for v in itertools.combinations(trio, 2):
vertice = G.get_edge_data(*v)
if vertice:
vertices.append(v)
if len(vertices)==3:
triangles.append(vertices)
pos = nx.spring_layout(G)
graph1 = fig.add_subplot(121)
# graph1.plot(nx.draw_networkx_nodes(G, pos=pos, node_size=[G.degree(n) for n in G.nodes()], label=True, alpha=0.75),
# nx.draw_networkx_edges(G, pos=pos, width=[G.get_edge_data(*e)['w'] for e in G.edges()], alpha=0.75))
graph1.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
#plt.show()
for t in triangles:
weights = {}
for v in t:
k = (G.get_edge_data(*v)['w'])
weights[k]=v
l = weights.keys()
if len(l) != 1:
l.sort()
l.reverse()
pprint.pprint(l)
quitar = l.pop()
G.remove_edge(*weights[quitar])
graph2 = fig.add_subplot(122)
graph2.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
plt.show()
|
dboddie/python-diana
|
configure.py
|
Python
|
gpl-2.0
| 5,321 | 0.008081 |
#!/usr/bin/env python
import glob, os, sys
import sipconfig
from PyQt4 import pyqtconfig
def get_diana_version():
depends = filter(lambda line: line.startswith("Depends:"),
open("debian/control").readlines())
for line in depends:
pieces = line.split()
for piece in pieces:
name_pieces = piece.strip(",").split("-")
if len(name_pieces) == 2 and name_pieces[0] == "diana":
return name_pieces[1]
return None
def get_python_diana_version():
line = open("debian/changelog").readline()
pieces = line.split()
return pieces[1][1:-1]
if __name__ == "__main__":
if len(sys.argv) not in (1, 3, 5):
sys.stderr.write("Usage: %s [<directory containing diana headers> <directory containing libdiana>] "
"[<directory containing metlibs headers> <directory containing metlibs libraries>]\n" % sys.argv[0])
sys.exit(1)
if len(sys.argv) == 5:
metlibs_inc_dir = sys.argv[3]
metlibs_lib_dir = sys.argv[4]
else:
metlibs_inc_dir = "/usr/include/metlibs"
metlibs_lib_dir = "/usr/lib"
if len(sys.argv) >= 3:
diana_inc_dir = sys.argv[1]
diana_lib_dir = sys.argv[2]
else:
diana_inc_dir = "/usr/include/diana"
diana_lib_dir = "/usr/lib"
qt_pkg_dir = os.getenv("qt_pkg_dir")
python_diana_pkg_dir = os.getenv("python_diana_pkg_dir")
dest_pkg_dir = os.path.join(python_diana_pkg_dir, "metno")
config = pyqtconfig.Configuration()
# The name of the SIP build file generated by SIP and used by the build
# system.
sip_files_dir = "sip"
modules = ["std", "metlibs", "diana"]
if not os.path.exists("modules"):
os.mkdir("modules")
# Run SIP to generate the code.
output_dirs = []
for module in modules:
output_dir = os.path.join("modules", module)
build_file = module + ".sbf"
build_path = os.path.join(output_dir, build_file)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
sip_file = os.path.join("sip", module, module+".sip")
command = " ".join([config.sip_bin,
"-c", output_dir,
"-b", build_path,
"-I"+config.sip_inc_dir,
"-I"+config.pyqt_sip_dir,
"-I"+diana_inc_dir,
"-I/usr/include",
"-I"+metlibs_inc_dir,
"-I"+qt_pkg_dir+"/include",
"-I"+qt_pkg_dir+"/share/sip/PyQt4",
"-Isip",
config.pyqt_sip_flags,
"-w",
"-o", # generate docstrings for signatures
sip_file])
sys.stdout.write(command+"\n")
sys.stdout.flush()
if os.system(command) != 0:
sys
|
.exit(1)
# Create the Makefile (within the diana directory).
makefile = pyqtconfig.QtGuiModuleMakef
|
ile(
config, build_file, dir=output_dir,
install_dir=dest_pkg_dir,
qt=["QtCore", "QtGui", "QtNetwork", "QtXml", "QtXmlPatterns"]
)
if module == "diana":
makefile.extra_include_dirs += [
diana_inc_dir,
os.path.join(diana_inc_dir, "PaintGL"),
metlibs_inc_dir,
qt_pkg_dir+"/include"
]
makefile.extra_lib_dirs += [diana_lib_dir, qt_pkg_dir+"/lib"]
makefile.extra_lflags += ["-Wl,-rpath="+diana_lib_dir, "-Wl,-fPIC"]
makefile.extra_libs += ["diana"]
if module == "metlibs":
makefile.extra_include_dirs.append(diana_inc_dir)
makefile.extra_include_dirs.append("/usr/include/metlibs")
makefile.extra_lib_dirs += [diana_lib_dir, "/usr/lib", metlibs_lib_dir, qt_pkg_dir+"/lib"]
makefile.extra_lflags += ["-Wl,-rpath="+diana_lib_dir, "-Wl,-fPIC"]
makefile.extra_libs += ["miLogger", "coserver", "diana"]
makefile.generate()
output_dirs.append(output_dir)
# Update the metno package version.
diana_version = get_diana_version()
python_diana_version = get_python_diana_version()
if not diana_version or not python_diana_version:
sys.stderr.write("Failed to find version information for Diana (%s) "
"or python-diana (%s)\n" % (repr(diana_version),
repr(python_diana_version)))
sys.exit(1)
f = open("python/metno/versions.py", "w")
f.write('\ndiana_version = "%s"\npython_diana_version = "%s"\n' % (
diana_version, python_diana_version))
# Generate the top-level Makefile.
python_files = glob.glob(os.path.join("python", "metno", "*.py"))
sipconfig.ParentMakefile(
configuration = config,
subdirs = output_dirs,
installs = [(python_files, dest_pkg_dir)]
).generate()
sys.exit()
|
heitorschueroff/ctci
|
ch5/5.03_Flip_Bit_To_Win/test_flip_bit_to_win.py
|
Python
|
mit
| 269 | 0 |
import
|
unittest
from flip_bit_to_win import flip_bit
class TestFlipBit(unittest.TestCase):
def test_flip_bit(self):
self.assertEquals(flip_bit(0b1011100101), 4)
self.assertEquals(flip_bit(1775), 8)
if __name__ == '__main__':
unittest.main(
|
)
|
mvdroest/RTLSDR-Scanner
|
src/misc.py
|
Python
|
gpl-3.0
| 6,123 | 0.000327 |
#
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import json
from math import radians,
|
sin, cos, asin, sqrt
import math
import os
import socket
import sys
from threading import Thread
import time
import urllib
import serial.tools.list_ports
from constants import SAMPLE_RATE, TIMESTAMP_FILE
class RemoteControl(object):
def __init__(self):
self.connected = False
self.socket = No
|
ne
def __connect(self):
if not self.connected:
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect(('localhost', 3382))
self.connected = True
except socket.error:
self.connected = False
def __thread(self, command):
self.__connect()
if self.connected:
try:
self.socket.send(json.dumps(command))
self.socket.send('\r\n')
except socket.error:
self.socket.close()
self.connected = False
def __send(self, command):
thread = Thread(target=self.__thread, args=(command,))
thread.daemon = True
thread.start()
def tune(self, frequency):
command = {'Command': 'Set',
'Method': 'Frequency',
'Value': frequency}
self.__send(command)
def get_script_dir():
if not hasattr(sys, 'frozen'):
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
scriptDir = sys._MEIPASS
return scriptDir
def get_resdir():
scriptDir = get_script_dir()
if os.path.isdir(os.path.join(scriptDir, 'res')):
resDir = os.path.join(scriptDir, 'res')
else:
resDir = os.path.join(scriptDir, '..', 'res')
return resDir
def get_resource_path(resource):
return os.path.join(get_resdir(), resource)
def limit(value, minimum, maximum):
return max(min(maximum, value), minimum)
def level_to_db(level):
return 10 * math.log10(level)
def db_to_level(dB):
return math.pow(10, dB / 10.0)
def next_2_to_pow(val):
val -= 1
val |= val >> 1
val |= val >> 2
val |= val >> 4
val |= val >> 8
val |= val >> 16
return val + 1
def calc_samples(dwell):
samples = dwell * SAMPLE_RATE
samples = next_2_to_pow(int(samples))
return samples
def calc_real_dwell(dwell):
samples = calc_samples(dwell)
dwellReal = samples / SAMPLE_RATE
return (int)(dwellReal * 1000.0) / 1000.0
def nearest(value, values):
offset = [abs(value - v) for v in values]
return values[offset.index(min(offset))]
def haversine(lat1, lat2, lon1, lon2):
lat1, lat2, lon1, lon2 = map(radians, [lat1, lat2, lon1, lon2])
dlon = lon1 - lon2
dlat = lat1 - lat2
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
b = asin(sqrt(a))
return 2 * b * 6371000
def format_precision(settings, freq=None, level=None,
units=True, fancyUnits=False):
textFreq = None
textLevel = None
if freq is not None:
prec = settings.precisionFreq
width = 4 + prec
textFreq = '{:{width}.{prec}f}'.format(freq, width=width, prec=prec)
if units or fancyUnits:
textFreq += " MHz"
if level is not None:
prec = settings.precisionLevel
width = 4 + prec
textLevel = '{:.{prec}f}'.format(level, width=width, prec=prec)
if fancyUnits:
textLevel += r" $\mathsf{{dB/\sqrt{{Hz}}}}$"
elif units:
textLevel += " dB/Hz"
if textFreq and textLevel:
return (textFreq, textLevel)
if textFreq:
return textFreq
if textLevel:
return textLevel
return None
def format_time(timeStamp, withDate=False):
if timeStamp <= 1:
return 'Unknown'
if withDate:
return time.strftime('%c', time.localtime(timeStamp))
return time.strftime('%H:%M:%S', time.localtime(timeStamp))
def format_iso_time(timeStamp):
dt = datetime.datetime.utcfromtimestamp(timeStamp)
return dt.isoformat() + 'Z'
def set_version_timestamp():
scriptDir = get_script_dir()
timeStamp = str(int(time.time()))
f = open(os.path.join(scriptDir, TIMESTAMP_FILE), 'w')
f.write(timeStamp)
f.close()
def get_version_timestamp(asSeconds=False):
scriptDir = get_script_dir()
f = open(os.path.join(scriptDir, TIMESTAMP_FILE), 'r')
timeStamp = int(f.readline())
f.close()
if asSeconds:
return timeStamp
else:
return format_time(timeStamp, True)
def get_version_timestamp_repo():
f = urllib.urlopen('https://raw.github.com/EarToEarOak/RTLSDR-Scanner/master/src/version-timestamp')
timeStamp = int(f.readline())
f.close()
return timeStamp
def get_serial_ports():
ports = [port[0] for port in serial.tools.list_ports.comports()]
if len(ports) == 0:
if os.name == 'nt':
ports.append('COM1')
else:
ports.append('/dev/ttyS0')
return ports
def limit_to_ascii(text):
return ''.join([i if ord(i) < 128 else '' for i in text])
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
|
gpospelov/BornAgain
|
Tests/Functional/PyFit/minimizer_api.py
|
Python
|
gpl-3.0
| 2,947 | 0.000679 |
"""
Testing python specific API for M
|
inimizer related classes.
"""
import sys
import os
import unittest
import bornagain as ba
class TestMinimizerHelper:
def __init__(self):
self.m_ncalls = 0
self.m_pars =
|
None
def objective_function(self, pars):
self.m_ncalls += 1
self.m_pars = pars
return 42.0
class MinimizerAPITest(unittest.TestCase):
def test_ParameterAttribute(self):
"""
Testing p.value attribute
"""
par = ba.Parameter("par", 1.0)
self.assertEqual(par.value, 1.0)
par.value = 42.0
self.assertEqual(par.value, 42.0)
def test_ParametersSetIterator(self):
"""
Testing of python iterator over defined fit parameters.
"""
pars = ba.Parameters()
self.assertEqual(pars.size(), 0)
pars.add(ba.Parameter("par0", 1.0, ba.AttLimits.limitless()))
pars.add(ba.Parameter("par1", 2.0, ba.AttLimits.limitless()))
expected_names = ["par0", "par1"]
for index, p in enumerate(pars):
self.assertEqual(p.name(), expected_names[index])
def test_ParametersAdd(self):
"""
Testing Parameters::add method
"""
params = ba.Parameters()
params.add("par0", 0.0)
params.add("par1", 1.0, min=1.0)
params.add("par2", 2.0, max=2.0)
params.add("par3", 3.0, min=1.0, max=2.0)
params.add("par4", 4.0, vary=False)
self.assertTrue(params["par0"].limits().isLimitless())
self.assertTrue(params["par1"].limits().isLowerLimited())
self.assertEqual(params["par1"].limits().lowerLimit(), 1.0)
self.assertTrue(params["par2"].limits().isUpperLimited())
self.assertEqual(params["par2"].limits().upperLimit(), 2.0)
self.assertTrue(params["par3"].limits().isLimited())
self.assertEqual(params["par3"].limits().lowerLimit(), 1.0)
self.assertEqual(params["par3"].limits().upperLimit(), 2.0)
self.assertTrue(params["par4"].limits().isFixed())
def test_SimpleMinimizer(self):
minimizer = ba.Minimizer()
minimizer.setMinimizer("Test")
pars = ba.Parameters()
pars.add(ba.Parameter("par0", 0.0))
pars.add(ba.Parameter("par1", 1.0))
pars.add(ba.Parameter("par2", 2.0))
helper = TestMinimizerHelper()
result = minimizer.minimize(helper.objective_function, pars)
# return value of objective function was propagated to MinimizerResult
self.assertEqual(result.minValue(), 42.0)
# objective function was called twice
#(once by test minimizer, and second time during return type deduction)
self.assertEqual(helper.m_ncalls, 2)
# starting values of fit parameters were correctly send to objective func
self.assertEqual(list(helper.m_pars.values()), [0.0, 1.0, 2.0])
if __name__ == '__main__':
unittest.main()
|
ar4s/django
|
tests/messages_tests/base.py
|
Python
|
bsd-3-clause
| 14,187 | 0.000705 |
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.http import HttpRequest, HttpResponse
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS={},
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return HttpRequest()
def get_response(self):
return HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_repr(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(
|
repr(storage),
f'<{self.storage_class.__qualname__}: request=<HttpRequest>>',
)
def test_add(self):
storage = self.get_storage()
self.assertFalse
|
(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels:
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url
|
endlessm/chromium-browser
|
chrome/updater/win/installer/create_installer_archive.py
|
Python
|
bsd-3-clause
| 13,828 | 0.011065 |
# Copyrig
|
ht 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create the Chrome Updater Installer archive.
This script is used to create a
|
n archive of all the files required for a
Chrome Updater install in appropriate directory structure. It reads
updater.release file as input, creates updater.7z ucompressed archive, and
generates the updater.packed.7z compressed archive.
"""
import ConfigParser
import glob
import optparse
import os
import shutil
import subprocess
import sys
# Directory name inside the uncompressed archive where all the files are.
UPDATER_DIR = "bin"
# Suffix to uncompressed full archive file, appended to options.output_name.
ARCHIVE_SUFFIX = ".7z"
# compressed full archive suffix, will be prefixed by options.output_name.
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
g_archive_inputs = []
def CompressUsingLZMA(build_dir, compressed_file, input_file, verbose):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). These arguments are the similar to what the Chrome mini
# installer is using.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
os.path.abspath(compressed_file),
os.path.abspath(input_file),]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd, verbose)
def CopyAllFilesToStagingDir(config, staging_dir, build_dir, timestamp):
"""Copies the files required for installer archive.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir,
timestamp)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir,
timestamp):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
src_subdir = option.replace('\\', os.sep)
dst_dir = os.path.join(staging_dir, config.get(section, option))
dst_dir = dst_dir.replace('\\', os.sep)
src_paths = glob.glob(os.path.join(src_dir, src_subdir))
if src_paths and not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for src_path in src_paths:
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
if not os.path.exists(dst_path):
g_archive_inputs.append(src_path)
shutil.copy(src_path, dst_dir)
os.utime(dst_path, (os.stat(dst_path).st_atime, timestamp))
os.utime(dst_dir, (os.stat(dst_dir).st_atime, timestamp))
def GetLZMAExec(build_dir):
if sys.platform == 'win32':
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
else:
lzma_exec = '7zr' # Use system 7zr.
return lzma_exec
def MakeStagingDirectory(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
return file_path
def Readconfig(input_file):
"""Reads config information from input file after setting default value of
global variables.
"""
variables = {}
variables['UpdaterDir'] = UPDATER_DIR
config = ConfigParser.SafeConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, verbose):
"""Runs |cmd|, prints the |cmd| and its output if |verbose|; otherwise
captures its output and only emits it on failure.
"""
if verbose:
print 'Running', cmd
try:
# Run |cmd|, redirecting stderr to stdout in order for captured errors to be
# inline with corresponding stdout.
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if verbose:
print output
except subprocess.CalledProcessError as e:
raise Exception("Error while running cmd: %s\n"
"Exit code: %s\n"
"Command output:\n%s" %
(e.cmd, e.returncode, e.output))
def CreateArchiveFile(options, staging_dir, timestamp):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (updater.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
if options.depfile:
# If a depfile was requested, do the glob of the staging dir and generate
# a list of dependencies in .d format. We list the files that were copied
# into the staging dir, not the files that are actually in the staging dir
# because the ones in the staging dir will never be edited, and we want
# to have the build be triggered when the thing-that-was-copied-there
# changes.
def PathFixup(path):
"""Fixes path for depfile format: backslash to forward slash, and
backslash escaping for spaces."""
return path.replace('\\', '/').replace(' ', '\\ ')
# Gather the list of files in the staging dir that will be zipped up. We
# only gather this list to make sure that g_archive_inputs is complete (i.e.
# that there's not file copies that got missed).
staging_contents = []
for root, files in os.walk(os.path.join(staging_dir, UPDATER_DIR)):
for filename in files:
staging_contents.append(PathFixup(os.path.join(root, filename)))
# Make sure there's an archive_input for each staging dir file.
for staging_file in staging_contents:
for archive_input in g_archive_inputs:
archive_rel = PathFixup(archive_input)
if (os.path.basename(staging_file).lower() ==
os.path.basename(archive_rel).lower()):
break
else:
raise Exception('Did not find an archive input file for "%s"' %
staging_file)
# Finally, write the depfile referencing the inputs.
with open(options.depfile, 'wb') as f:
f.write(PathFixup(os.path.relpath(archive_file, options.build_dir)) +
': \\\n')
f.write(' ' + ' \\\n '.join(PathFixup(x) for x in g_archive_inputs))
# It is important to use abspath to create the path to the directory because
# if you use a relative path without any .. sequences then 7za.exe uses the
# entire relative path as part of the file paths in the archive. If you have
# a .. sequence or an absolute path then only the last directory is stored as
# part of the file paths in the archive, which is what we want.
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.abspath(os.path.join(staging_dir, UPDATER_DIR)),
'-mx0',]
# There does not seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd, options.verbose)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd, options.verbose)
# Do not compress the archive when skip_archive_compression is specified.
if options.skip_archive_compression:
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
os.utime(archive_file, (os.stat(archive_file).st_atime, timestamp))
CompressUsingLZMA(options.build_dir, com
|
juan-cardelino/matlab_demos
|
ipol_demo-light-1025b85/app_available/75/app.py
|
Python
|
gpl-2.0
| 17,698 | 0.00825 |
""""
Meaningful Scales Detection: an Unsupervised Noise Detection Algorithm for \
Digital Contours
Demo Editor: B. Kerautret
"""
from lib import base_app, build, http, image, config
from lib.misc import app_expose, ctime
from lib.base_app import init_app
import cherrypy
from cherrypy import TimeoutError
import os.path
import shutil
import time
class app(base_app):
""" template demo app """
title = "Meaningful Scales Detection: an Unsupervised Noise "+\
"Detection Algorithm for Digital Contours"
xlink_article = 'http://www.ipol.im/pub/pre/75/'
xlink_src = 'http://www.ipol.im/pub/pre/75/meaningfulscaleDemo.tgz'
demo_src_filename = 'meaningfulscaleDemo.tgz'
demo_src_dir = 'meaningfulscaleDemo'
input_nb = 1 # number of input images
input_max_pixels = 4096 * 4096 # max size (in pixels) of an input image
input_max_weight = 1 * 4096 * 4096 # max size (in bytes) of an input file
input_dtype = '3x8i' # input image expected data type
input_ext = '.png' # input image expected extension (ie file format)
is_test = False # switch to False for deployment
list_commands = []
def __init__(self):
"""
app setup
"""
# setup the parent class
base_dir = os.path.dirname(os.path.abspath(__file__))
base_app.__init__(self, base_dir)
# select the base_app steps to expose
# index() is generic
app_expose(base_app.index)
app_expose(base_app.input_select)
app_expose(base_app.input_upload)
# params() is modified from the template
app_expose(base_app.params)
# run() and result() must be defined here
def build(self):
"""
program build/update
"""
# store common file path in variables
tgz_file = self.dl_dir + self.demo_src_filename
prog_names = ["meaningfulScaleEstim"]
script_names = ["applyMS.sh", "convert.sh", "convertFig.sh", \
"transformBG.sh"]
prog_bin_files = []
for f in prog_names:
prog_bin_files.append(self.bin_dir+ f)
log_file = self.base_dir + "build.log"
# get the latest source archive
build.download(self.xlink_src, tgz_file)
# test if the dest file is missing, or too old
if (os.path.isfile(prog_bin_files[0])
and ctime(tgz_file) < ctime(prog_bin_files[0])):
cherrypy.log("not rebuild needed",
context='BUILD', traceback=False)
else:
# extract the archive
build.extract(tgz_file, self.src_dir)
# build the program
build.run("mkdir %s;" %(self.src_dir+ self.demo_src_dir+"/build"), \
stdout=log_file)
build.run("cd %s; cmake .. -DCMAKE_BUILD_TYPE=Release \
-DBUILD_TESTING=false ; make -j 4" %(self.src_dir+ \
self.demo_src_dir+\
"/build"),
stdout=log_file)
# save into bin dir
if os.path.isdir(self.bin_dir):
shutil.rmtree(self.bin_dir)
os.mkdir(self.bin_dir)
shutil.copy(self.src_dir + self.demo_src_dir + \
"/build/demoIPOL/meaningfulScaleEstim", self.bin_dir)
for f in script_names :
shutil.copy(self.src_dir + os.path.join(self.demo_src_dir, \
"demoIPOL", f), self.bin_dir)
# copy annex file : pgm2freeman (extraction of contours)
shutil.copy(self.src_dir + self.demo_src_dir+ \
"/build/bin/pgm2freeman", self.bin_dir)
# copy Dynamic lib
shutil.copy(self.src_dir + self.demo_src_dir+ \
"/build/src/libImaGene.so", self.bin_dir)
# cleanup the source dir
shutil.rmtree(self.src_dir)
return
@cherrypy.expose
@init_app
def input_select(self, **kwargs):
"""
use the selected available input images
"""
self.init_cfg()
#kwargs contains input_id.x and input_id.y
input_id = kwargs.keys()[0].split('.')[0]
assert input_id == kwargs.keys()[1].split('.')[0]
# get the images
input_dict = config.file_dict(self.input_dir)
fnames = input_dict[input_id]['files'].split()
for i in range(len(fnames)):
shutil.copy(self.input_dir + fnames[i],
self.work_dir + 'input_%i' % i)
msg = self.process_input()
self.log("input selected : %s" % input_id)
self.cfg['meta']['original'] = False
self.cfg.save()
# jump to the params page
return self.params(msg=msg, key=self.key)
#---------------------------------------------------------------------------
# Parameter handling (an optional crop).
#---------------------------------------------------------------------------
@cherrypy.expose
@init_app
def params(self, newrun=False, msg=None):
"""Parameter handling (optional crop)."""
# if a new experiment on the same image, clone data
if newrun:
self.clone_input()
# save the input image as 'input_0_selection.png', the one to be used
|
img = image(self.work_dir + 'input_0.png')
img.save(self.work_dir + 'input_0_selection.png')
img.save(self.work_dir + 'input_0_selection.pgm')
# initialize subimage parameters
self.cfg['param'] = {'x1
|
':-1, 'y1':-1, 'x2':-1, 'y2':-1}
self.cfg.save()
return self.tmpl_out('params.html')
@cherrypy.expose
@init_app
def wait(self, **kwargs):
"""
params handling and run redirection
"""
# save and validate the parameters
# handle image crop if used
if not 'action' in kwargs:
# read click coordinates
x = kwargs['click.x']
y = kwargs['click.y']
x1 = self.cfg['param']['x1']
y1 = self.cfg['param']['y1']
img = image(self.work_dir + 'input_0.png')
# check if the click is inside the image
if int(x) >= 0 and int(y) >= 0 and \
int(x) < img.size[0] and int(y) < img.size[1]:
if int(x1) < 0 or int(y1) < 0 : # first click
# update (x1,y1)
self.cfg['param']['x1'] = int(x)
self.cfg['param']['y1'] = int(y)
self.cfg.save()
# draw cross
img.convert('3x8i')
img.draw_cross((int(x), int(y)), size=9, color="red")
img.save(self.work_dir + 'input_0_selection.png')
elif int(x1) != int(x) and int(y1) != int(y) : # second click
# update (x2,y2)
self.cfg['param']['x2'] = int(x)
self.cfg['param']['y2'] = int(y)
self.cfg.save()
# order points such that (x1,y1) is the lower left corner
(x1, x2) = sorted((int(x1), int(x)))
(y1, y2) = sorted((int(y1), int(y)))
assert (x2 - x1) > 0 and (y2 - y1) > 0
# crop the image
img.crop((x1, y1, x2+1, y2+1))
img.save(self.work_dir + 'input_0_selection.png')
img.save(self.work_dir + 'input_0_selection.pgm')
return self.tmpl_out('params.html')
try:
self.cfg['param'] = {'tmax' : float(kwargs['tmax']),
'm' : float(kwargs['m'])}
except ValueError:
return self.error(errcode='badparams',
errmsg="The parameters must be numeric.")
self.cfg['param']['autothreshold'] = kwargs['thresholdtype'] == 'True'
http.refresh(self.base_url + 'run?key=%s' % self.key)
return self.tmpl_out("wait.html")
@cherryp
|
shreya2111/Recommender-System
|
outgoing_call_count.py
|
Python
|
gpl-2.0
| 1,175 | 0.095319 |
import json
import os
import datetime
for i in range(9):
try:
os.chdir('../call_record/')
with open('callsSortedperson_'+str(i)+'.json','r') as f:
data=json.load(f)
print 'User: ',i
#print len(data)
friends=[]
for j in range(len(data)):
#print data[j][0]
friends.append(data[j][0])
friends=list(set(friends))
#print len(friends)
#24 hour loop
time=data[0][2]
t=datetime.datetime.fromtimestamp(data[0][2]).strftime("%Y-%m-%d %H:%M:%S") #IST
#print t
calls=[]
for k in friends:
c
|
=0
for j in range(len(data)):
if data[j][1]==2:
#In 86400 seconds all outgoing calls to one person
if k==data[j][0]:
#if da
|
ta[j][2]<=(float(time)+86400):
t=datetime.datetime.fromtimestamp(data[j][2]).strftime("%Y-%m-%d %H:%M:%S") #IST
#print t
c+=1
#print c,k
calls.append(c)
#print len(calls)
k=[]
c=0
for j in range(len(friends)):
k.append(j+1)
if calls[j]==0:
c+=1
print c
#print zip(k, calls)
f=open('#CallsVsContacts'+str(i)+'.json','w')
json.dump(zip(k,calls),f,indent=2)
except Exception as e:
continue
|
Jelby/Hatalogico
|
ledKnightRider16.py
|
Python
|
mit
| 9,635 | 0.019512 |
#!/usr/bin/python
# ===========================================================================
# Hatalogico KNIGHT RIDER for 8 LEDs - powered by Adafruit's Libraries
# -------------------------------------------------
# Date: 12/4/2015
# Author: John Lumley
#
# BIG THANKS TO ADAFRUIT INDUSTRIES FOR MAKING THIS POSSIBLE AND EASY
# ===========================================================================
import time, os, sys
# DETERMINE CURRENT PATH
scriptPath = os.path.realpath(os.path.dirname(sys.argv[0]))
os.chdir(scriptPath)
# APPEN
|
D FOLDER OF REQUIRED LIBRARY
sys.path.append("Adafruit/Adafruit_PWM_Servo_Driver")
# FINALLY LOAD THE LIBRARY
from Adafruit_PWM_Servo_Driver import PWM
LED_PIN_0 = 0
LED_PIN_1 = 2
LED_PIN_2 = 4
LED_PIN_3 = 6
LED_PIN_4 = 8
LED_PIN_5 = 10
LED_PIN_6 = 12
LED_PIN_7 = 14
LED_PIN_8 = 1
LED_PIN_9 = 3
LED_PIN_10 = 5
LED_PIN_11 = 7
LED_PIN_12 = 9
LED_PIN_13 = 11
LED_PIN_14 = 13
LED_PIN_15 = 15
BRIGHT_0=0
BRIGHT_1=1800
BRIGHT_2=2400
BRIGHT_3=3000
BRIGHT_4=3500
BRI
|
GHT_5=3800
BRIGHT_6=4000
BRIGHT_7=4095
# BUILD 16 ARRAYS OF 8 POSITIONS/VALUES FOR PWM LOOP
position = {}
position[0] = [ BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[1] = [ BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[2] = [ BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[3] = [ BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[4] = [ BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[5] = [ BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[6] = [ BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[7] = [ BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[8] = [ BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[9] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[10] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[11] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[12] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[13] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7, BRIGHT_7 ]
position[14] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0, BRIGHT_7 ]
position[15] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_1, BRIGHT_0 ]
position[16] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_2, BRIGHT_0 ]
position[17] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_3, BRIGHT_0 ]
position[18] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_4, BRIGHT_0 ]
position[19] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_5, BRIGHT_0 ]
position[20] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_6, BRIGHT_0 ]
position[21] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0 ]
position[22] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1 ]
position[23] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2 ]
position[24] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3 ]
position[25] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4 ]
position[26] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5 ]
position[27] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6 ]
position[28] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7 ]
position[29] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7 ]
position[30] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[31] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[32] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[33] = [ BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[34] = [ BRIGHT_7, BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[35] = [ BRIGHT_7, BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[36] = [ BRIGHT_0, BRIGHT_1, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[37] = [ BRIGHT_0, BRIGHT_2, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[38] = [ BRIGHT_0, BRIGHT_3, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7 ]
position[39] = [ BRIGHT_0, BRIGHT_4, BRIGHT_5, BRIGHT_6, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT_7, BRIGHT
|
pradeepnazareth/NS-3-begining
|
src/lte/bindings/callbacks_list.py
|
Python
|
gpl-2.0
| 6,324 | 0.006167 |
callback_classes = [
['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'std::list<ns3::Ptr<ns3::LteControlMessage>, std::allocator<ns3::Ptr<ns3::LteControlMessage> > >', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'ns3::Ptr<ns3::SpectrumValue>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::UlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::PacketBurst>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::PhyReceptionStatParameters', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::PhyTransmissionStatParameters', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'ns3::LteUePhy::State', 'ns3::LteUePhy::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'double', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns
|
3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'double', 'bool', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::
|
empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'const ns3::Address &', 'const ns3::Address &', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::EpcUeNas::State', 'ns3::EpcUeNas::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::SpectrumValue &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned char', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned char', 'unsigned int', 'unsigned long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::UeManager::State', 'ns3::UeManager::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::LteRrcSap::MeasurementReport', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlSchedulingCallbackInfo', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned int', 'unsigned int', 'unsigned short', 'unsigned char', 'unsigned short', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::LteUeRrc::State', 'ns3::LteUeRrc::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
torchmed/torchmed
|
torchmed/views.py
|
Python
|
mit
| 364 | 0.002747 |
from django.shortcuts import render
from djan
|
go.utils.translation import activate
def inde
|
x(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {'latest_question_list': latest_question_list}
# activate('pt-br')
# print(request.LANGUAGE_CODE)
context = {}
return render(request, 'index.html', context)
|
yoshinarikou/MilleFeuilleRaspberryPi
|
milpython/MoistureTest.py
|
Python
|
mit
| 835 | 0.027545 |
########################################################################
# MCU Gear(R) system Sample Code
# Auther:y.kou.
# web site: http://www.milletool.com/
# Date : 8/OCT/2016
#
########################################################################
#Revision Information
#
########################################################################
#!/usr/bin/python
from milpy import
|
mil
from milpy import milMod
from milpy import wiringdata
from milpy import Moisuture
import time
wiringdata.initIO()
modA = milMod.milMod(Moisuture.getIn
|
fo(0))
if __name__=='__main__':
try:
while(1):
modA.connect()
readData = Moisuture.read(modA)
print "readData = ",readData
time.sleep(1)
modA.disconnect()
except KeyboardInterrupt:
print("detect key interrupt [ctrl]+ [C] \n")
mil.cleanup()
wiringdata.cleanup()
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/botocore/retryhandler.py
|
Python
|
mit
| 13,631 | 0.000147 |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import functools
import logging
from binascii import crc32
from botocore.vendored.requests import ConnectionError, Timeout
from botocore.vendored.requests.packages.urllib3.exceptions import ClosedPoolError
from botocore.exceptions import ChecksumError, EndpointConnectionError
logger = logging.getLogger(__name__)
# The only supported error for now is GENERAL_CONNECTION_ERROR
# which maps to requests generic ConnectionError. If we're able
# to get more specific exceptions from requests we can update
# this mapping with more specific exceptions.
EXCEPTION_MAP = {
'GENERAL_CONNECTION_ERROR': [
ConnectionError, ClosedPoolError, Timeout,
EndpointConnectionError
],
}
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
def create_retry_handler(config, operation_name=None):
checker = create_checker_from_retry_config(
config, operation_name=operation_name)
action = create_retry_action_from_config(
config, operation_name=operation_name)
return RetryHandler(checker=checker, action=action)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_checker_from_retry_config(config, operation_name=None):
checkers = []
max_attempts = None
retryable_exceptions = []
if '__default__' in config:
policies = config['__default__'].get('policies', [])
max_attempts = config['__default__']['max_attempts']
for key in policies:
current_conf
|
ig = policies[key]
checkers.append(_create_single_checker(current_config))
retry_exception = _extract_retryable_exception(current_config)
if retry_exception is not None:
retryable_exceptions.extend(retry_excep
|
tion)
if operation_name is not None and config.get(operation_name) is not None:
operation_policies = config[operation_name]['policies']
for key in operation_policies:
checkers.append(_create_single_checker(operation_policies[key]))
retry_exception = _extract_retryable_exception(
operation_policies[key])
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if len(checkers) == 1:
# Don't need to use a MultiChecker
return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
else:
multi_checker = MultiChecker(checkers)
return MaxAttemptsDecorator(
multi_checker, max_attempts=max_attempts,
retryable_exceptions=tuple(retryable_exceptions))
def _create_single_checker(config):
if 'response' in config['applies_when']:
return _create_single_response_checker(
config['applies_when']['response'])
elif 'socket_errors' in config['applies_when']:
return ExceptionRaiser()
def _create_single_response_checker(response):
if 'service_error_code' in response:
checker = ServiceErrorCodeChecker(
status_code=response['http_status_code'],
error_code=response['service_error_code'])
elif 'http_status_code' in response:
checker = HTTPStatusCodeChecker(
status_code=response['http_status_code'])
elif 'crc32body' in response:
checker = CRC32Checker(header=response['crc32body'])
else:
# TODO: send a signal.
raise ValueError("Unknown retry policy: %s" % config)
return checker
def _extract_retryable_exception(config):
applies_when = config['applies_when']
if 'crc32body' in applies_when.get('response', {}):
return [ChecksumError]
elif 'socket_errors' in applies_when:
exceptions = []
for name in applies_when['socket_errors']:
exceptions.extend(EXCEPTION_MAP[name])
return exceptions
class RetryHandler(object):
"""Retry handler.
The retry handler takes two params, ``checker`` object
and an ``action`` object.
The ``checker`` object must be a callable object and based on a response
and an attempt number, determines whether or not sufficient criteria for
a retry has been met. If this is the case then the ``action`` object
(which also is a callable) determines what needs to happen in the event
of a retry.
"""
def __init__(self, checker, action):
self._checker = checker
self._action = action
def __call__(self, attempts, response, caught_exception, **kwargs):
"""Handler for a retry.
Intended to be hooked up to an event handler (hence the **kwargs),
this will process retries appropriately.
"""
if self._checker(attempts, response, caught_exception):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
logger.debug("No retry needed.")
class BaseChecker(object):
"""Base class for retry checkers.
Each class is responsible for checking a single criteria that determines
whether or not a retry should not happen.
"""
def __call__(self, attempt_number, response, caught_exception):
"""Determine if retry criteria matches.
Note that either ``response`` is not None and ``caught_exception`` is
None or ``response`` is None and ``caught_exception`` is not None.
:type attempt_number: int
:param attempt_number: The total number of times we've attempted
to send the request.
:param response: The HTTP response (if one was received).
:type caught_exception: Exception
:param caught_exception: Any exception that was caught while trying to
send the HTTP response.
:return: True, if the retry criteria matches (and therefore a retry
should occur. False if the criteria does not match.
"""
# The default implementation allows subclasses to not have to check
# whether or not response is None or not.
if response is not None:
return self._check_response(attempt_number, response)
elif caught_exception is not None:
return sel
|
hyoklee/siphon
|
siphon/cdmr/ncstream.py
|
Python
|
mit
| 5,147 | 0.000971 |
from __future__ import print_function
import zlib
import numpy as np
from . import ncStream_pb2 as stream # noqa
MAGIC_HEADER = b'\xad\xec\xce\xda'
MAGIC_DATA = b'\xab\xec\xce\xba'
MAGIC_VDATA = b'\xab\xef\xfe\xba'
MAGIC_VEND = b'\xed\xef\xfe\xda'
MAGIC_ERR = b'\xab\xad\xba\xda'
def read_ncstream_messages(fobj):
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
if magic == MAGIC_HEADER:
messages.append(stream.Header())
messages[0].ParseFromString(read_block(fobj))
elif magic == MAGIC_DATA:
data = stream.Data()
|
data.ParseFromString(read_block(fobj))
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
dt = _dtypeLookup.get(data.dataType, np.object_)
num_obj = read_var_int(fobj)
|
blocks = np.array([read_block(fobj) for _ in range(num_obj)], dtype=dt)
messages.append(blocks)
elif data.dataType in _dtypeLookup:
data_block = read_numpy_block(fobj, data)
messages.append(data_block)
elif data.dataType in (stream.STRUCTURE, stream.SEQUENCE):
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
assert magic == MAGIC_VDATA, 'Bad magic for struct/seq data!'
blocks.append(stream.StructureData())
blocks[0].ParseFromString(read_block(fobj))
magic = read_magic(fobj)
messages.append((data, blocks))
else:
raise NotImplementedError("Don't know how to handle data type: %d" %
data.dataType)
elif magic == MAGIC_ERR:
err = stream.Error()
err.ParseFromString(read_block(fobj))
raise RuntimeError(err.message)
else:
print('Unknown magic: ' + str(' '.join('%02x' % b for b in magic)))
return messages
def read_magic(fobj):
return fobj.read(4)
def read_block(fobj):
num = read_var_int(fobj)
return fobj.read(num)
def read_numpy_block(fobj, data_header):
dt = data_type_to_numpy(data_header.dataType)
dt.newbyteorder('>' if data_header.bigend else '<')
shape = tuple(r.size for r in data_header.section.range)
buf = read_block(fobj)
if data_header.compress == stream.DEFLATE:
buf = zlib.decompress(buf)
assert len(buf) == data_header.uncompressedSize
elif data_header.compress != stream.NONE:
raise NotImplementedError('Compression type %d not implemented!' %
data_header.compress)
return np.frombuffer(bytearray(buf), dtype=dt).reshape(*shape)
# STRUCTURE = 8;
# SEQUENCE = 9;
_dtypeLookup = {stream.CHAR: 'b', stream.BYTE: 'b', stream.SHORT: 'i2',
stream.INT: 'i4', stream.LONG: 'i8', stream.FLOAT: 'f4',
stream.DOUBLE: 'f8', stream.STRING: np.string_,
stream.ENUM1: 'B', stream.ENUM2: 'u2', stream.ENUM4: 'u4',
stream.OPAQUE: 'O'}
def data_type_to_numpy(datatype, unsigned=False):
basic_type = _dtypeLookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('>' + basic_type)
def unpack_variable(var):
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.type.__name__
if var.data:
if var.dataType is str:
data = var.data
else:
data = np.fromstring(var.data, dtype=dt)
else:
data = None
return data, dt, type_name
_attrConverters = {stream.Attribute.BYTE: np.dtype('>b'),
stream.Attribute.SHORT: np.dtype('>i2'),
stream.Attribute.INT: np.dtype('>i4'),
stream.Attribute.LONG: np.dtype('>i8'),
stream.Attribute.FLOAT: np.dtype('>f4'),
stream.Attribute.DOUBLE: np.dtype('>f8')}
def unpack_attribute(att):
if att.unsigned:
print('Warning: Unsigned attribute!')
if att.len == 0:
val = None
elif att.type == stream.Attribute.STRING:
val = att.sdata
else:
val = np.fromstring(att.data,
dtype=_attrConverters[att.type], count=att.len)
if att.len == 1:
val = val[0]
return att.name, val
def read_var_int(file_obj):
'Read a variable-length integer'
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val = ((next_val & 0x7F) << shift) | val
shift += 7
if not next_val & 0x80:
break
return val
|
Eigenstate/msmbuilder
|
runtests.py
|
Python
|
lgpl-2.1
| 6,293 | 0.000953 |
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -t {SAMPLE_TEST}
"""
from __future__ import division, print_function
PROJECT_MODULE = "msmbuilder"
PROJECT_ROOT_FILES = ['msmbuilder', 'LICENSE', 'setup.py']
SAMPLE_TEST = "msmbuilder.tests.test_msm:test_ergodic_cutoff"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project "
"(use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true",
default=False,
help="just build, do not run any tests")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-verbose", action='store_true', default=False,
help="Default nose verbosity is -v. "
"This turns that off")
parser.add_argument("
|
--ipython", action='store_true', default=False,
help="Launch an ipython shell instead of nose")
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose")
args = parser.parse_args(argv)
if not
|
args.no_build:
site_dir, dst_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
os.environ['PATH'] = dst_dir + "/bin:" + os.environ['PATH']
if args.build_only:
sys.exit(0)
if args.ipython:
commands = ['ipython']
else:
commands = ['nosetests', '--with-timer', '--timer-top-n', '5']
if args.verbose > 0 and not args.no_verbose:
verbosity = "-{vs}".format(vs="v" * args.verbose)
commands += [verbosity]
if args.tests:
commands += args.tests[:]
else:
commands += ["{}.tests".format(PROJECT_MODULE)]
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
commands += extra_argv
# Run the tests under build/test
test_dir = os.path.join("build", "test")
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = subprocess.call(commands)
finally:
os.chdir(cwd)
sys.exit(result)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
prev_path = env.get('PATH', '').split(os.pathsep)
env['PATH'] = os.pathsep.join(EXTRA_PATH + prev_path)
if args.debug:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
cmd += ["build"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
from distutils.sysconfig import get_python_lib
return get_python_lib(prefix=dst_dir, plat_specific=True), dst_dir
if __name__ == "__main__":
main(argv=sys.argv[1:])
|
izapolsk/integration_tests
|
cfme/tests/cloud/test_tag_mapping.py
|
Python
|
gpl-2.0
| 9,719 | 0.001338 |
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from wrapanapi.exceptions import ImageNotFoundError
from cfme import test_requirements
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.exceptions import ItemNotFound
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.provider([EC2Provider], scope='function'),
pytest.mark.usefixtures('setup_provider', 'refresh_provider'),
test_requirements.tag
]
@pytest.fixture(scope='function')
def map_tags(appliance, provider, request):
tag = appliance.collections.map_tags.create(entity_type=partial_match(provider.name.title()),
label='test',
category='Testing')
yield tag
request.addfinalizer(lambda: tag.delete())
@pytest.fixture(scope='function')
def tagged_vm(provider):
# cu-24x7 vm is tagged with test:testing in provider
tag_vm = provider.data.cap_and_util.capandu_vm
collection = provider.appliance.provider_based_collection(provider)
try:
return collection.instantiate(tag_vm, provider)
except IndexError:
raise ItemNotFound('VM for tag mapping not found!')
@pytest.fixture(scope='function')
def refresh_provider(provider):
provider.refresh_provider_relationships(wait=600)
return True
@pytest.fixture(params=['instances', 'images'])
def tag_mapping_items(request, appliance, provider):
entity_type = request.param
collection = getattr(appliance.collections, 'cloud_{}'.format(entity_type))
collection.filters = {'provider': provider}
view = navigate_to(collection, 'AllForProvider')
name = view.entities.get_first_entity().name
try:
mgmt_item = (
provider.mgmt.get_template(name)
if entity_type == 'images'
else provider.mgmt.get_vm(name)
)
except ImageNotFoundError:
|
msg = 'Failed looking up template [{}] from CFME on provider: {}'.format(name, provider)
logger.exception(msg)
|
pytest.skip(msg)
return collection.instantiate(name=name, provider=provider), mgmt_item, entity_type
def tag_components():
# Return tuple with random tag_label and tag_value
return (
fauxfactory.gen_alphanumeric(15, start="tag_label_"),
fauxfactory.gen_alphanumeric(15, start="tag_value_")
)
@pytest.mark.provider([AzureProvider], selector=ONE_PER_TYPE, scope='function')
def test_tag_mapping_azure_instances(tagged_vm, map_tags):
""""
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/12h
testSteps:
1. Find Instance that tagged with test:testing in Azure (cu-24x7)
2. Create tag mapping for Azure instances
3. Refresh Provider
4. Go to Summary of the Instance and read Smart Management field
expectedResults:
1.
2.
3.
4. Field value is "My Company Tags Testing: testing"
"""
tagged_vm.provider.refresh_provider_relationships()
view = navigate_to(tagged_vm, 'Details')
def my_company_tags():
return view.tag.get_text_of('My Company Tags') != 'No My Company Tags have been assigned'
# sometimes it's not updated immediately after provider refresh
wait_for(
my_company_tags,
timeout=600,
delay=45,
fail_func=view.toolbar.reload.click
)
assert view.tag.get_text_of('My Company Tags')[0] == 'Testing: testing'
# TODO: Azure needs tagging support in wrapanapi
def test_labels_update(provider, tag_mapping_items, soft_assert):
"""" Test updates of tag labels on entity details
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/12h
testSteps:
1. Set a tag through provider mgmt interface
2. Refresh Provider
3. Go to entity details and get labels
4. unset tag through provider mgmt interface
5. Go to entity details and get labels
expectedResults:
1.
2.
3. labels includes label + tag
4.
5. labels should not include tag label
"""
entity, mgmt_entity, entity_type = tag_mapping_items
tag_label, tag_value = tag_components()
mgmt_entity.set_tag(tag_label, tag_value)
provider.refresh_provider_relationships(method='ui')
view = navigate_to(entity, 'Details')
# get_tags() doesn't work here as we're looking at labels, not smart management
current_tag_value = view.entities.summary('Labels').get_text_of(tag_label)
soft_assert(
current_tag_value == tag_value, (
'Tag values is not that expected, actual - {}, expected - {}'.format(
current_tag_value, tag_value
)
)
)
mgmt_entity.unset_tag(tag_label, tag_value)
provider.refresh_provider_relationships(method='ui')
view = navigate_to(entity, 'Details', force=True)
fields = view.entities.summary('Labels').fields
soft_assert(
tag_label not in fields,
'{} label was not removed from details page'.format(tag_label)
)
# TODO: Azure needs tagging support in wrapanapi
def test_mapping_tags(
appliance, provider, tag_mapping_items, soft_assert, category, request
):
"""Test mapping tags on provider instances and images
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/12h
testSteps:
1. Set a tag through provider mgmt interface
2. create a CFME tag map for entity type
3. Go to entity details and get smart management table
4. Delete the tag map
5. Go to entity details and get smart management table
expectedResults:
1.
2.
3. smart management should include category name and tag
4.
5. smart management table should NOT include category name and tag
"""
entity, mgmt_entity, entity_type = tag_mapping_items
tag_label, tag_value = tag_components()
mgmt_entity.set_tag(tag_label, tag_value)
request.addfinalizer(
lambda: mgmt_entity.unset_tag(tag_label, tag_value)
)
provider_type = provider.discover_name.split(' ')[0]
# Check the add form to find the correct resource entity type selection string
view = navigate_to(appliance.collections.map_tags, 'Add')
select_text = None # init this since we set it within if, and reference it in for/else:
options = [] # track the option strings for logging in failure
for option in view.resource_entity.all_options:
option_text = option.text # read it once since its used multiple times
options.append(option_text)
if provider_type in option_text and entity_type.capitalize()[:-1] in option_text:
select_text = option_text
break
else:
# no match / break for select_text
if select_text is None:
pytest.fail(
'Failed to match the entity type [{e}] and provider type [{p}] in options: [{o}]'
.format(e=entity_type, p=provider_type, o=options)
)
view.cancel_button.click() # close the open form
map_tag = appliance.collections.map_tags.create(
entity_type=select_text,
label=tag_label,
category=category.name
)
# check the tag shows up
provider.refresh_provider_relationships(method='ui')
soft_assert(any(
tag.category.display_name == category.name and tag.display_name == tag_value
for tag in entity.get_tags()
), '{}: {} was not found in tags'.format(category.name, tag_value))
# delete it
map_tag.delete()
# check the tag goes away
provider.refresh_provider_relationships(method='ui')
|
jawilson/home-assistant
|
tests/components/metoffice/const.py
|
Python
|
apache-2.0
| 2,161 | 0.000463 |
"""Helpers for testing Met Office DataPoint."""
from homeassistant.components.metoffice.const import DOMAIN
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
TEST_DATETIME_STRING = "2020-04-25T12:00:00+00:00"
TEST_API_KEY = "test-metoffice-api-key"
TEST_LATITUDE_WAVERTREE = 53.38374
TEST_LONGITUDE_WAVERTREE = -2.90929
TEST_SITE_NAME_WAVERTREE = "Wavertree"
TEST_LATITUDE_KINGSLYNN = 52.75556
TEST_LONGITUDE_KINGSLYNN = 0.44231
TEST_SITE_NAME_KINGSLYNN = "King's Lynn"
METOFFICE_CONFIG_WAVERTREE = {
CONF_API_KEY: TEST_API_KEY,
CONF_LATITUDE: TEST_LATITUDE_WAVERTREE,
CONF_LONGITUDE: TEST_LONGITUDE_WAVERTREE,
CONF_NAME: TEST_SITE_NAME_WAVERTREE,
}
METOFFICE_CONFIG_KINGSLYNN = {
CONF_API_KEY: TEST_API_KEY,
CONF_LATITUDE: TEST_LATITUDE_KINGSLYNN,
CONF_LONGITUDE: TEST_LONGITUDE_KINGSLYNN,
CONF_NAME: TEST_SITE_NAME_KINGSLYNN,
}
KINGSLYNN_SENSOR_RESULTS = {
"weather": ("weather", "sunny"),
"visibility": ("visibility", "Very Good"),
"visibility_distance": ("visibility_distance", "20-40"),
"temperature": ("temperature", "14"),
"feels_like_temperature": ("feels_like_temperature", "13"),
"uv": (
|
"uv_index", "6"),
"precipitation": ("probability_of_precipitation", "0"),
"wind_direction": ("w
|
ind_direction", "E"),
"wind_gust": ("wind_gust", "7"),
"wind_speed": ("wind_speed", "2"),
"humidity": ("humidity", "60"),
}
WAVERTREE_SENSOR_RESULTS = {
"weather": ("weather", "sunny"),
"visibility": ("visibility", "Good"),
"visibility_distance": ("visibility_distance", "10-20"),
"temperature": ("temperature", "17"),
"feels_like_temperature": ("feels_like_temperature", "14"),
"uv": ("uv_index", "5"),
"precipitation": ("probability_of_precipitation", "0"),
"wind_direction": ("wind_direction", "SSE"),
"wind_gust": ("wind_gust", "16"),
"wind_speed": ("wind_speed", "9"),
"humidity": ("humidity", "50"),
}
DEVICE_KEY_KINGSLYNN = {
(DOMAIN, f"{TEST_LATITUDE_KINGSLYNN}_{TEST_LONGITUDE_KINGSLYNN}")
}
DEVICE_KEY_WAVERTREE = {
(DOMAIN, f"{TEST_LATITUDE_WAVERTREE}_{TEST_LONGITUDE_WAVERTREE}")
}
|
mikebsg01/Contests-Online
|
UVa/12015-GoogleisFeelingLucky.py
|
Python
|
mit
| 604 | 0.038079 |
from sys import stdin
def readLine():
return stdin.readline().strip()
def readInt():
return int(readLine())
def readInts():
return list(map(int, readLine().split()))
def main():
T = readInt()
for i in range(T):
pages = [{'url': None, 'v': 0} for j in range(10)]
for j in range(10):
pages[j]['url'], pages[j]['v'] = readLine().split()
pages[j]['v'] = int(pages[j]['v'])
maxVal = max(pag
|
es, key=lambda x: x['v'])['v']
pages = list(filter(lambda x: x['v'] == maxVal, pages))
print('Case #%d:' %(i + 1))
for p i
|
n pages:
print(p['url'])
if __name__ == '__main__':
main()
|
Gibbsdavidl/miergolf
|
src/corEdges.py
|
Python
|
bsd-3-clause
| 3,914 | 0.020184 |
import sys
import numpy as np
from copy import copy, deepcopy
import multiprocessing as mp
from numpy.random import shuffle, random, normal
from math import log, sqrt, exp, pi
import itertools as it
from scipy.stats import gaussian_kde, pearsonr
from scipy.stats import ttest_1samp
from itertools import product
try:
from Crypto.pct_warnings import PowmInsecureWarning
import warnings
warnings.simplefilter("ignore", PowmInsecureWarning)
except:
pass
# In this work, I am computing transfer entropies
# by, first, discretizing expression values into a given
# number of bins. Using those bins, the probability of a given
# interval is computed, and the joint probability over time
# can also be computed (given two time series).
# Want P(X_t+1, X_k2, Y_k1) * log (P(X_t+1,Y_k1,X_k2)*P(X_t+1)) / (P(X_t+1, X_k2)*P(X_k2,Y_K1))
# just get the joint, then get the others by marginalization
# parameters:
# yk: the markov order for Y = let it be 1
# xk: the markov order for x = let it be 1
# yl: the time delay for y
# xl: the time delay for x
# b : the number of bins
# autoTE is
# FOR TE (Y -> X)
def autoshuff((x,y)):
permutedY = deepcopy(y)
shuffle(permutedY)
return(pearsonr(x, permutedY)[0])
def autoCorr(x,y,reps1, cpus):
pool = mp.Pool(cpus)
observed = pearsonr(x,y)[0]
permutedList = it.repeat( (x,y), reps1)
permutedCor = pool.map(autoshuff, permutedList)
pool.close()
return([observed] + permutedCor)
def geneindex(gene, genes):
for i in range(0,len(genes)):
if gene in genes[i]:
return(i)
return(-1)
def prepGeneDataGG(dats, genes, g1, g2):
i = geneindex(g1, genes) # from
j = geneindex(g2, genes) # to
if (i > -1 and j > -1):
x = map(float,dats[i]) #from
y = map(float,dats[j]) # to
x = np.array(x); x = (x-x.mean())/max(1,(x-x.mean()).max())
y = np.array(y); y = (y-y.mean())/max(1,(y-y.mean()).max())
return((x,y))
|
else:
return( ([],[]) )
def corEdges(exprfile, genefile, fileout, reps, cpus, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
res0 = autoCorr(fromx,toy,reps, cpus)
fout.write(g1 +"\t"+ g2 +"
|
\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, ylmax, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
maxCorr = 0.0
maxLag = 0.0
for yl in range(0,(ylmax+1)):
try:
res0 = autoCorr(fromx,toy,reps, cpus)
if (res0[0] > maxCorr):
maxTE = res0
maxLag = yl
except:
e = sys.exc_info()
sys.stderr.write(str(e)+"\n")
fout.write(g1 +"\t"+ g2 +"\t"+ str(maxLag) +"\t"+ str(maxCorr) +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def main(argv):
#for i in range(1,len(argv)):
# print(str(i) +" "+ argv[i])
exprfile = argv[1]
genefile = argv[2]
fileout = argv[3]
reps = int(argv[4])
cpus = int(argv[5])
g1 = argv[6]
g2 = argv[7]
maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, 6, g1, g2)
if __name__ == "__main__":
main(sys.argv)
#pref="/Users/davidlgibbs/Dropbox/Research/Projects/Influence_Maximization_Problem/EserData/"
#pref = "/users/dgibbs/EserData/"
#genes = pref +"yeast_array_genesymbols.csv"
#gexpr = pref +"Eser_Averaged_Expression.txt"
#tout = "/Users/davidlgibbs/Desktop/x.txt"
#corEdges(gexpr, genes, tout, 20, 2, "YOX1", "MBP1")
|
CharlesGust/django-imagr
|
imagr_site/imagr_app/migrations/0001_initial.py
|
Python
|
mit
| 5,413 | 0.00388 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImagrUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('our_date_joined_field', models.DateField(auto_now_add=True)),
('our_is_active_field', models.BooleanField(default=False)),
('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
|
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
|
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
('image_url', models.CharField(default=b'Photo Not Found', max_length=1024)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='album',
name='cover',
field=models.ForeignKey(related_name='Album_cover', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='photos',
field=models.ManyToManyField(related_name='Album_photos', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
geobricks/geobricks_raster_correlation
|
geobricks_raster_correlation/cli/cli_argh.py
|
Python
|
gpl-2.0
| 493 | 0.006085 |
from argh import dispatch_commands
from argh.decorators import named, arg
from geobricks_raster_correlation.core.raster_correlation_core import get_correlation
@named('corr')
@arg('--bins', default=150, help='Bins')
def cli_get_correlation(file1, file2, **kwargs):
|
corr = get_correlation(file1, file2, kwargs['bins'])
print "Series: ", corr['series']
print "Stats: ", corr['stats']
def main():
dispatch_commands([cli_get_correlation])
if __name__ == '__main__':
main()
| |
icloudrnd/automation_tools
|
openstack_dashboard/dashboards/groups/instances/panel.py
|
Python
|
apache-2.0
| 250 | 0.004 |
from django.utils.translation import ugettext_
|
lazy as _
import horizon
from openstack_dashboard.dashboards.groups import dashboard
class Instances(horizon.Panel):
name = _("Groups")
|
slug = "instances"
dashboard.Groups.register(Instances)
|
ateska/striga2-sampleapp
|
app/appweb/context.py
|
Python
|
unlicense
| 58 | 0.034483 |
import striga
class Cust
|
omContext(striga.context):
|
pass
|
ryfeus/lambda-packs
|
HDF4_H5_NETCDF/source2.7/h5py/tests/old/test_h5f.py
|
Python
|
mit
| 2,360 | 0.002119 |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
|
from __future__ import absolute_import
import tempfile
import shutil
import os
from h5py import File
from ..common import TestCase
|
class TestFileID(TestCase):
def test_descriptor_core(self):
with File('TestFileID.test_descriptor_core', driver='core', backing_store=False) as f:
with self.assertRaises(NotImplementedError):
f.id.get_vfd_handle()
def test_descriptor_sec2(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.test_descriptor_sec2')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5, driver='sec2') as f:
descriptor = f.id.get_vfd_handle()
self.assertNotEqual(descriptor, 0)
os.fsync(descriptor)
finally:
shutil.rmtree(dn_tmp)
class TestCacheConfig(TestCase):
def test_simple_gets(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_simple_gets')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5) as f:
hit_rate = f._id.get_mdc_hit_rate()
mdc_size = f._id.get_mdc_size()
finally:
shutil.rmtree(dn_tmp)
def test_hitrate_reset(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_hitrate_reset')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5) as f:
hit_rate = f._id.get_mdc_hit_rate()
f._id.reset_mdc_hit_rate_stats()
hit_rate = f._id.get_mdc_hit_rate()
assert hit_rate == 0
finally:
shutil.rmtree(dn_tmp)
def test_mdc_config_get(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.TestCacheConfig.test_mdc_config_get')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5) as f:
conf = f._id.get_mdc_config()
f._id.set_mdc_config(conf)
finally:
shutil.rmtree(dn_tmp)
|
isaac-s/cloudify-manager
|
tests/integration_tests/tests/agentless_tests/scale/test_scale_in.py
|
Python
|
apache-2.0
| 9,915 | 0 |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from . import TestScaleBase
class TestScaleCompute(TestScaleBase):
def test_compute_scale_in_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_true(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': True,
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_compute_ignore_failure_false(self):
expectations = self.deploy_app('scale_ignore_failure')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
try:
self.scale(parameters={
'scalable_entity_name': 'compute',
'ignore_failure': False,
'delta': -1})
except RuntimeError as e:
self.assertIn(
"RuntimeError: Workflow failed: Task failed "
"'testmockoperations.tasks.mock_stop_failure'",
str(e))
else:
self.fail()
def test_compute_scale_out_and_in_compute_from_0(self):
expectations = self.deploy_app('scale10')
expectations['compute']['new']['install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute'})
expectations['compute']['new']['install'] = 1
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
self.deployment_assertions(expectations)
def test_compute_scale_in_2_compute(self):
expectations = self.deploy_app('scale4')
expectations['compute']['new']['install'] = 3
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -2})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 2
expectations['compute']['removed']['uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_comput
|
e(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 4
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
|
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 2
expectations['db']['removed']['uninstall'] = 2
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_db(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'db',
'delta': -1})
expectations['compute']['existing']['install'] = 2
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 4
expectations['db']['removed']['install'] = 1
expectations['db']['removed']['uninstall'] = 1
expectations['db']['removed']['rel_install'] = 4
expectations['db']['removed']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_compute(self):
expectations = self.deploy_app('scale6')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['install'] = 2
expectations['db']['new']['rel_install'] = 8
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['existing']['install'] = 1
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 2
expectations['db']['existing']['rel_install'] = 8
expectations['db']['existing']['rel_uninstall'] = 4
self.deployment_assertions(expectations)
def test_db_connected_to_compute_scale_in_and_out_compute_from_0(self):
expectations = self.deploy_app('scale11')
expectations['compute']['new']['install'] = 0
expectations['db']['new']['install'] = 1
expectations['db']['new']['rel_install'] = 0
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': 1})
expectations['compute']['new']['install'] = 1
expectations['compute']['existing']['install'] = 0
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['rel_install'] = 0
expectations['db']['existing']['scale_rel_install'] = 2
self.deployment_assertions(expectations)
expectations = self.scale(parameters={
'scalable_entity_name': 'compute',
'delta': -1})
expectations['compute']['new']['install'] = 0
expectations['compute']['existing']['install'] = 0
expectations['compute']['removed']['install'] = 1
expectations['compute']['removed']['uninstall'] = 1
expectations['db']['existing']['install'] = 1
expectations['db']['existing']['scale_rel_install'] = 2
expectations['db']['existing']['rel_uninstall'] = 2
self.deployment_assertions(expectations)
def test_db_contained_in_compute_scale_in_db_scale_db(self):
expectations = self.deploy_app('scale5')
expectations['compute']['new']['install'] = 2
expectations['db']['new']['i
|
woodenbrick/mtp-lastfm
|
mtplastfm/webservices.py
|
Python
|
gpl-3.0
| 7,361 | 0.01277 |
# Copyright 2009 Daniel Woodhouse
#
#This file is part of mtp-lastfm.
#
#mtp-lastfm is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#mtp-lastfm is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with mtp-lastfm. If not, see http://www.gnu.org/licenses/
import hashlib
import urllib2
import urllib
import webbrowser
import httplib
import xml.etree.ElementTree as ET
from httprequest import HttpRequest
import localisation
_ = localisation.set_get_text()
class LastfmWebService(object):
def __init__(self):
self.api_key = "2d21a4ab6f049a413eb27dbf9af10579"
self.api_2 = "6146d36f59da8720cd5f3dd2c8422da0"
self.url = "http://ws.audioscrobbler.com/2.0/"
def request_session_token(self):
"""returns a token which is used authenticate mtp-lastfm with the users account"""
data = {"api_key" : self.api_key, "method" : "auth.gettoken"}
data['api_sig'] = self.create_api_sig(data)
encoded_data = urllib.urlencode(data)
url = self.url + "?" + encoded_data
conn = urllib2.urlopen(url)
return self.parse_xml(conn, "token")
def parse_xml(self, conn, tag):
"""Searches an XML document for a single tag and returns its value"""
tree = ET.parse(conn)
iter = tree.getiterator()
for child in iter:
if child.tag == tag:
token = child.text
break
try:
return token
except:
return False
def parse_xml_doc(self, doc, tag):
"""Search an XML doc for tags and returns them all as a list"""
tree = ET.parse(doc)
iter = tree.getiterator()
tags = []
for child in iter:
if child.tag == tag:
tags.append(child.text)
return tags
def create_api_sig(self, dict):
"""dict is a dictionary of param_name : value sorted into the correct order"""
data = ""
items = dict.items()
items.sort()
for i in items:
for j in i:
data += j
data += self.api_2
api_sig = hashlib.md5(data.encode('UTF-8')).hexdigest()
return api_sig
def request_authorisation(self, token):
"""Opens a browser to request users authentication"""
encoded_values = urllib.urlencode({
"api_key" : self.api_key,
"token" : token
})
webbrowser.open("http://www.last.fm/api/auth/?" + encoded_values)
def create_web_service_session(self, token):
"""The final step, this creates a token with infinite lifespan store in db"""
data = {
"api_key" : self.api_key,
"method" : "auth.getsession",
"token" : token }
data['api_sig'] = self.create_api_sig(data)
encode_values = urllib.urlencode(data)
url = self.url + "?" + encode_values
try:
conn = urllib2.urlopen(url)
self.key = self.parse_xml(conn, "key")
return True, self.key
except urllib2.HTTPError:
return False, _("A problem occurred during authentication")
def love_track(self, artist, track, sk):
#Params
#track (Required) : A track name (utf8 encoded)
#artist (Required) : An artist name (utf8 encoded)
#api_key (Required) : A Last.fm API key.
#api_sig (Required) : A Last.fm method signature.
#sk (Required) : A session key generated by authenticating a user.
post_values = {
"track" : track,
"artist" : artist,
"api_key" : self.api_key,
"method" : "track.love",
"sk" : sk}
post_values['api_sig'] = self.create_api_sig(post_values)
post_values = urllib.urlencode(post_values)
req = urllib2.Request(url=self.u
|
rl, data=post_values)
try:
url_handle = urllib2.urlopen(req)
response = url_handle.readlines()[1]
l = response.find('"') + 1
r = response.rf
|
ind('"')
response = response[l:r]
return response
except urllib2.URLError, error:
return error
except httplib.BadStatusLine, error:
return error
def get_user_top_tags(self, username, limit=15):
#method user.getTopTags
#Params
#user (Required) : The user name
#limit (Optional) : Limit the number of tags returned
#api_key (Required) : A Last.fm API key.
encoded_values = urllib.urlencode(
{"method" : "user.gettoptags",
"user" : username,
"limit" : limit,
"api_key" : self.api_key}
)
url = self.url + "?" + encoded_values
conn = HttpRequest(url)
xml_doc = conn.connect(xml=True)
return self.parse_xml_doc(xml_doc, "name")
def get_popular_tags(self, method, info_dict):
"""method is either artist.gettoptags or track.gettoptags"""
#Params
#track (Optional) : The track name in question
#artist (Required) : The artist name in question
#api_key (Required) : A Last.fm API key.
dict = {"method" : method,
"artist" : info_dict['Artist'],
"api_key" : self.api_key}
if method == "track.gettoptags":
dict['track'] = info_dict['Track']
encoded_values = urllib.urlencode(dict)
url = self.url + "?" + encoded_values
conn = HttpRequest(url)
xml_doc = conn.connect(xml=True)
return self.parse_xml_doc(xml_doc, "name")
def send_tags(self, method, info, tags, sk):
"""Sends tags to last.fm. method is one of:
album.addtags, artist.addtags or track.addtags
info_dict is the artist, track and album info
tags is a comma delimited list of no more than 10 tags"""
#All methods require these parameters:
#tags (Required) : A comma delimited list of user supplied tags to apply
#to this album. Accepts a maximum of 10 tags.
#api_key (Required) : A Last.fm API key.
#api_sig (Required)
#sk (Required)
#artist (Required) : The artist name in question
post_values = {
"method" : method,
"tags" : tags,
"api_key" : self.api_key,
"sk" : sk,
"artist" : info['Artist']}
#these methods require additional info:
#album.addTags -> album
#track.addTags -> track
if method == "album.addtags":
post_values['album'] = info['Album']
if method == "track.addtags":
post_values['track'] = info['Track']
post_values['api_sig'] = self.create_api_sig(post_values)
conn = HttpRequest(self.url, urllib.urlencode(post_values))
response = conn.connect()
|
jiangzhuo/kbengine
|
kbe/src/lib/python/Lib/test/test_random.py
|
Python
|
lgpl-3.0
| 31,638 | 0.002497 |
import unittest
import unittest.mock
import random
import time
import pickle
import warnings
from functools import partial
from math import log, exp, pi, fsum, sin
from test import support
class TestBasicOps:
# Superclass with tests common to all generators.
# Subclasses must arrange for self.gen to retrieve the Random instance
# to be tested.
def randomlist(self, n):
"""Helper function to make a list of random numbers"""
return [self.gen.random() for i in range(n)]
def test_autoseed(self):
self.gen.seed()
state1 = self.gen.getstate()
time.sleep(0.1)
self.gen.seed() # diffent seeds at different times
state2 = self.gen.getstate()
self.assertNotEqual(state1, state2)
def test_saverestore(self):
N = 1000
self.gen.seed()
state = self.gen.getstate()
randseq = self.randomlist(N)
self.gen.setstate(state) # should regenerate the same sequence
self.assertEqual(randseq, self.randomlist(N))
def test_seedargs(self):
# Seed value with a negative hash.
class MySeed(object):
def __hash__(self):
return -1729
for arg in [None, 0, 0, 1, 1, -1, -1, 10**20, -(10**20),
3.14, 1+2j, 'a', tuple('abc'), MySeed()]:
self.gen.seed(arg)
for arg in [list(range(3)), dict(one=1)]:
self.assertRaises(TypeError, self.gen.seed, arg)
self.assertRaises(TypeError, self.gen.seed, 1, 2, 3, 4)
self.assertRaises(TypeError, type(self.gen), [])
@unittest.mock.patch('random._urandom') # os.urandom
def test_seed_when_randomness_source_not_found(self, urandom_mock):
# Random.seed() uses time.time() when an operating system specific
# randomness source is not found. To test this on machines were it
# exists, run the above test, test_seedargs(), again after mocking
# os.urandom() so that it raises the exception expected when the
# randomness source is not available.
urandom_mock.side_effect = NotImplementedError
self.test_seedargs()
def test_shuffle(self):
shuffle = self.gen.shuffle
lst = []
shuffle(lst)
self.assertEqual(lst, [])
lst = [37]
shuffle(lst)
self.assertEqual(lst, [37])
seqs = [list(range(n)) for n in range(10)]
shuffled_seqs = [list(range(n)) for n in range(10)]
for shuffled_seq in shuffled_seqs:
shuffle(shuffled_seq)
for (seq, shuffled_seq) in zip(seqs, shuffled_seqs):
self.assertEqual(len(seq), len(shuffled_seq))
self.assertEqual(set(seq), set(shuffled_seq))
# The above tests all would pass if the shuffle was a
# no-op. The following non-deterministic test covers that. It
# asserts that the shuffled sequence of 1000 distinct elements
# must be different from the original one. Although there is
# mathematically a non-zero probability that this could
# actually happen in a genuinely random shuffle, it is
# completely negligible, given that the number of possible
# permutations of 1000 objects is 1000! (factorial of 1000),
# which is considerably larger than the number of atoms in the
# universe...
lst = list(range(1000))
shuffled_lst = list(range(1000))
shuffle(shuffled_lst)
self.assertTrue(lst != shuffled_lst)
shuffle(lst)
self.assertTrue(lst != shuffled_lst)
def test_choice(self):
choice = self.gen.choice
with self.assertRaises(IndexError):
choice([])
self.assertEqual(choice([50]), 50)
self.assertIn(choice([25, 75]), [25, 75])
def test_sample(self):
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
N = 100
population = range(N)
for k in range(N+1):
s = self.gen.sample(population, k)
self.assertEqual(len(s), k)
uniq = set(s)
self.ass
|
ertEqual(len(un
|
iq), k)
self.assertTrue(uniq <= set(population))
self.assertEqual(self.gen.sample([], 0), []) # test edge case N==k==0
# Exception raised if size of sample exceeds that of population
self.assertRaises(ValueError, self.gen.sample, population, N+1)
def test_sample_distribution(self):
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n = 5
pop = range(n)
trials = 10000 # large num prevents false negatives without slowing normal case
def factorial(n):
if n == 0:
return 1
return n * factorial(n - 1)
for k in range(n):
expected = factorial(n) // factorial(n-k)
perms = {}
for i in range(trials):
perms[tuple(self.gen.sample(pop, k))] = None
if len(perms) == expected:
break
else:
self.fail()
def test_sample_inputs(self):
# SF bug #801342 -- population can be any iterable defining __len__()
self.gen.sample(set(range(20)), 2)
self.gen.sample(range(20), 2)
self.gen.sample(range(20), 2)
self.gen.sample(str('abcdefghijklmnopqrst'), 2)
self.gen.sample(tuple('abcdefghijklmnopqrst'), 2)
def test_sample_on_dicts(self):
self.assertRaises(TypeError, self.gen.sample, dict.fromkeys('abcdef'), 2)
def test_gauss(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
# by (and only by) the .gauss() method.
for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
self.gen.seed(seed)
x1 = self.gen.random()
y1 = self.gen.gauss(0, 1)
self.gen.seed(seed)
x2 = self.gen.random()
y2 = self.gen.gauss(0, 1)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_pickling(self):
state = pickle.dumps(self.gen)
origseq = [self.gen.random() for i in range(10)]
newgen = pickle.loads(state)
restoredseq = [newgen.random() for i in range(10)]
self.assertEqual(origseq, restoredseq)
def test_bug_1727780(self):
# verify that version-2-pickles can be loaded
# fine, whether they are created on 32-bit or 64-bit
# platforms, and that version-3-pickles load fine.
files = [("randv2_32.pck", 780),
("randv2_64.pck", 866),
("randv3.pck", 343)]
for file, value in files:
f = open(support.findfile(file),"rb")
r = pickle.load(f)
f.close()
self.assertEqual(int(r.random()*1000), value)
def test_bug_9025(self):
# Had problem with an uneven distribution in int(n*random())
# Verify the fix by checking that distributions fall within expectations.
n = 100000
randrange = self.gen.randrange
k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n))
self.assertTrue(0.30 < k/n < .37, (k/n))
try:
random.SystemRandom().random()
except NotImplementedError:
SystemRandom_available = False
else:
SystemRandom_available = True
@unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available")
class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase):
gen = random.SystemRandom()
def test_autoseed(self):
# Doesn't need to do anything except not fail
self.gen.seed()
def test_saverestore(self):
self.assertRaises(NotImplementedError, self.gen.getstate)
self.assertRaises(NotImplementedError, self.gen.setstate, None)
def test_seedargs(self):
# Doesn't need to do anything except not fail
self.gen.seed(100)
def test_gauss(self):
self.gen.gauss_next = None
self
|
hzlf/openbroadcast.org
|
website/apps/ac_tagging/widgets.py
|
Python
|
gpl-3.0
| 4,147 | 0.001206 |
from django.forms.widgets import TextInput
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.safestring import mark_safe
class TagAutocompleteTagIt(TextInput):
def __init__(self, max_tags, *args, **kwargs):
self.max_tags = (
max_tags
if max_tags
else getattr(settings, "TAGGING_AUTOCOMPLETE_MAX_TAGS", 20)
)
super(TagAutocompleteTagIt, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
""" Render HTML code """
# django-tagging
case_sensitive = (
"false" if not getattr(settings, "FORCE_LOWERCASE_TAGS", False) else "false"
)
max_tag_lentgh = getattr(settings, "MAX_TAG_LENGTH", 50)
# django-tagging-autocomplete-tagit
autocomplete_min_length = getattr(
settings, "TAGGING_AUTOCOMPLETE_MIN_LENGTH", 1
)
remove_confirmation = (
"true"
if getattr(settings, "TAGGING_AUTOCOMPLETE_REMOVE_CONFIRMATION", True)
else "false"
)
animate = (
"true"
if getattr(settings, "TAGGING_AUTOCOMPLETE_ANIMATE", True)
else "false"
)
list_view = reverse("ac_tagging-list")
html = super(TagAutocompleteTagIt, self).render(name, value, attrs)
# Subclass this field in case you need to add some custom behaviour like custom callbacks
# js = u"""<script type="text/javascript">
# $(document).ready(function() {
# init_jQueryTagit({
# objectId: '%s',
# sourceUrl: '%s',
# fieldName: '%s',
# minLength: %s,
# removeConfirmation: %s,
# caseSensitive: %s,
# animate: %s,
# maxLength: %s,
# maxTags: %s,
# //onTagAdded : ac_tagginc_clean,
# //onTagRemoved: ac_tagginc_clean,
# onTagClicked: null,
# onMaxTagsExceeded: null,
# })
# });
# </script>""" % (attrs['id'], list_view, name, autocomplete_min_length, remove_confirmation, case_sensitive,
# animate, max_tag_lentgh, self.max_tags)
js = ""
return mark_safe("\n".join([html, js]))
class Media:
# JS Base url defaults to STATIC_URL/jquery-autocomplete/
js_base_url = getattr(
settings,
"TAGGING_AUTOCOMPLETE_JS_BASE_URL",
"%sjs/jquery-tag-it/" % settings.STATIC_URL,
)
# jQuery ui is loaded from google's CDN by default
jqueryui_default = (
"https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.12/jquery-ui.mi
|
n.js"
)
jqueryui_file = getattr(
settings, "TAGGING_AUTOCOMPLETE_JQUERY_UI_FILE", jqueryui_default
)
# if a custom jquery ui file has been specified
if jqueryui_file != jqueryui_default:
# determine path
jqueryui_file = "%s%s" % (js_base_url, jqueryui_file)
# load js
js = (
"%sac_tagging.js" % js_base_url,
|
jqueryui_file,
"%sjquery.tag-it.js" % js_base_url,
)
# custom css can also be overriden in settings
css_list = getattr(
settings,
"TAGGING_AUTOCOMPLETE_CSS",
["%scss/ui-autocomplete-tag-it.css" % js_base_url],
)
# check is a list, if is a string convert it to a list
if type(css_list) != list and type(css_list) == str:
css_list = [css_list]
css = {"screen": css_list}
def _format_value(self, value):
return value.replace(",", ", ")
def value_from_datadict(self, data, files, name):
current_value = data.get(name, None)
if current_value and current_value[-1] != ",":
current_value = u"%s," % current_value
# current_value = u'"%s"' % current_value
# current_value = u'%s' % current_value
return current_value
|
sinnwerkstatt/landmatrix
|
apps/grid/forms/deal_action_comment_form.py
|
Python
|
agpl-3.0
| 3,733 | 0.001607 |
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from apps.grid.fields import TitleField, UserModelChoiceField
from apps.grid.widgets import CommentInput
from .base_form import BaseForm
class DealActionCommentForm(BaseForm):
exclude_in_export = (
"tg_action_comment",
"source",
"id",
"assign_to_user",
"tg_feedback_comment",
"fully_updated",
)
NOT_PUBLIC_REASON_CHOICES = (
("", _("---------")),
(
"Temporary removal from PI after criticism",
_("Temporary removal from PI after criticism"),
),
("Research in progress", _("Research in progress")),
("Land Observatory Import", _("Land Observatory Import")),
)
form_title = _("Meta info")
tg_action = TitleField(required=False, label="", initial=_("Fully updated"))
tg_action_comment = forms.CharField(
required=True, label=_("Action comment"), widget=CommentInput
)
fully_updated = forms.BooleanField(required=False, label=_("Fully updated"))
# fu
|
lly_updated_history = forms.CharField(
# required=False, label=_("Fully updated history"),
# widget=forms.Textarea(attrs={"readonly":True, "cols": 80, "rows": 5}))
tg_not_public = TitleField(required=False, label="", initial=_("P
|
ublic deal"))
not_public = forms.BooleanField(
required=False,
label=_("Not public"),
help_text=_("Please specify in additional comment field"),
)
not_public_reason = forms.ChoiceField(
required=False, label=_("Reason"), choices=NOT_PUBLIC_REASON_CHOICES
)
tg_not_public_comment = forms.CharField(
required=False, label=_("Comment on not public"), widget=CommentInput
)
tg_imported = TitleField(required=False, label="", initial=_("Import history"))
# source = forms.CharField(
# required=False, label=_("Import source"),
# widget=forms.TextInput(attrs={'readonly': True}))
previous_identifier = forms.CharField(
required=False,
label=_("Previous identifier"),
widget=forms.TextInput(attrs={"size": "64", "readonly": True}),
)
tg_feedback = TitleField(required=False, label="", initial=_("Feedback"))
assign_to_user = UserModelChoiceField(
required=False,
label=_("Assign to"),
queryset=get_user_model().objects.none(),
empty_label=_("Unassigned"),
)
tg_feedback_comment = forms.CharField(
required=False, label=_("Feedback comment"), widget=CommentInput
)
class Meta:
name = "action_comment"
def __init__(self, *args, **kwargs):
super(DealActionCommentForm, self).__init__(*args, **kwargs)
self.fields["assign_to_user"].queryset = (
get_user_model()
.objects.filter(
is_active=True, groups__name__in=("Editors", "Administrators")
)
.order_by("first_name", "last_name")
)
def get_attributes(self, request=None):
# Remove action comment, this field is handled separately in DealBaseView
attributes = super(DealActionCommentForm, self).get_attributes(request)
del attributes["tg_action_comment"]
return attributes
@classmethod
def get_data(cls, activity, group=None, prefix=""):
data = super().get_data(activity, group, prefix)
# Remove action comment, due to an old bug it seems to exist as an attribute too
if "tg_action_comment" in data:
del data["tg_action_comment"]
# Get action comment
data["tg_action_comment"] = activity.comment or ""
return data
|
microdee/IronHydra
|
src/IronHydra/Lib/tarfile.py
|
Python
|
mit
| 88,997 | 0.001888 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustäbel (lars@gustaebel.de)"
__date__ = "$Date$"
__cvsid__ = "$Id$"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S
|
_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on e
|
xecution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
exc
|
python-control/python-control
|
control/tests/config_test.py
|
Python
|
bsd-3-clause
| 12,736 | 0.000157 |
"""config_test.py - test config module
RMM, 25 may 2019
This test suite checks the functionality of the config module
"""
from math import pi, log10
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup as mplcleanup
import numpy as np
import pytest
import control as ct
@pytest.mark.usefixtures("editsdefaults") # makes sure to reset the defaults
# to the test configuration
class TestConfig:
# Create a simple second order system to use for testing
sys = ct.tf([10], [1, 2, 1])
def test_set_defaults(self):
ct.config.set_defaults('config', test1=1, test2=2, test3=None)
assert ct.config.defaults['config.test1'] == 1
assert ct.config.defaults['config.test2'] == 2
assert ct.config.defaults['config.test3'] is None
@mplcleanup
def test_get_param(self):
assert ct.config._get_param('freqplot', 'dB')\
== ct.config.defaults['freqplot.dB']
assert ct.config._get_param('freqplot', 'dB', 1) == 1
ct.config.defaults['config.test1'] = 1
assert ct.config._get_param('config', 'test1', None) == 1
assert ct.config._get_param('config', 'test1', None, 1) == 1
ct.config.defaults['config.test3'] = None
assert ct.config._get_param('config', 'test3') is None
assert ct.config._get_param('config', 'test3', 1) == 1
assert ct.config._get_param('config', 'test3', None, 1) is None
assert ct.config._get_param('config', 'test4') is None
assert ct.config._get_param('config', 'test4', 1) == 1
assert ct.config._get_param('config', 'test4', 2, 1) == 2
assert ct.config._get_param('config', 'test4', None, 3) == 3
assert ct.config._get_param('config', 'test4', {'test4': 1}, None) == 1
def test_default_deprecation(self):
ct.config.defaults['deprecated.config.oldkey'] = 'config.newkey'
ct.config.defaults['deprecated.config.oldmiss'] = 'config.newmiss'
msgpattern = r'config\.oldkey.* has been renamed to .*config\.newkey'
ct.config.defaults['config.newkey'] = 1
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults['config.oldkey'] == 1
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.defaults['config.oldkey'] = 2
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults['config.oldkey'] == 2
assert ct.config.defaults['config.newkey'] == 2
ct.config.set_defaults('config', newkey=3)
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config._get_param('config', 'oldkey') == 3
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.set_defaults('config', oldkey=4)
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults['config.oldkey'] == 4
assert ct.config.defaults['config.newkey'] == 4
ct.config.defaults.update({'config.newkey': 5})
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.defaults.update({'config.oldkey': 6})
with pytest.warns(FutureWarning, match=msgpattern):
assert ct.config.defaults.get('config.oldkey') == 6
with pytest.raises(KeyError):
with pytest.warns(FutureWarning, match=msgpattern):
ct.config.defaults['config.oldmiss']
with pytest.raises(KeyError):
ct.config.defaults['config.neverdefined']
# assert that reset defaults keeps the custom type
ct.config.reset_defaults()
with pytest.warns(FutureWarning,
match='bode.* has been renamed to.*freqplot'):
assert ct.config.defaults['bode.Hz'] \
== ct.config.defaults['freqplot.Hz']
@mplcleanup
def test_fbs_bode(self):
ct.use_fbs_defaults()
# Generate a Bode plot
plt.figure()
omega = np.logspace(-3, 3, 100)
ct.bode_plot(self.sys, omega)
# Get the magnitude line
mag_axis = plt.gcf().axes[0]
mag_line = mag_axis.get_lines()
mag_data = mag_line[0].get_data()
mag_x, mag_y = mag_data
# Make sure the x-axis is in rad/sec and y-axis is in natural units
np.testing.assert_almost_equal(mag_x[0], 0.001, decimal=6)
np.testing.assert_almost_equal(mag_y[0], 10, decimal=3)
# Get the phase line
phase_axis = plt.gcf().axes[1]
phase_line = phase_axis.get_lines()
phase_data = phase_line[0].get_data()
phase_x, phase_y = phase_data
# Make sure the x-axis is in rad/sec and y-axis is in degrees
np.testing.assert_almost_equal(phase_x[-1], 1000, decimal=0)
np.testing.assert_almost_equal(phase_y[-1], -180, decimal=0)
# Override the defaults and make sure that works as well
plt.figure()
ct.bode_plot(self.sys, omega, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
plt.figure()
ct.bode_plot(self.sys, omega, Hz=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
plt.figure()
ct.bode_plot(self.sys, omega, deg=False)
phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(phase_y[-1], -pi, decimal=2)
@mplcleanup
def test_matlab_bode(self):
ct.use_matlab_defaults()
# Generate a Bode plot
plt.figure()
omega = np.logspace(-3, 3, 100)
ct.bode_plot(self.sys, omega)
# Get the magnitude line
mag_axis = plt.gcf().axes[0]
mag_line = mag_axis.get_lines()
mag_data = mag_line[0].get_data()
mag_x, mag_y = mag_data
# Make sure the x-axis is in rad/sec and y-axis is in dB
np.testing.assert_almost_equal(mag_x[0], 0.001, decimal=6)
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
# Get the phase line
phase_axis = plt.gcf().axes[1]
phase_line = phase_axis.get_lines()
phase_data = phase_line[0].get_data()
phase_x, phase_y = phase_data
# Make sure the x-axis is in rad/sec and y-axis is in degrees
np.testing.assert_almost_equal(phase_x[-1], 1000, decimal=1)
np.testing.assert_almost_equal(phase_y[-1], -180, decimal=0)
# Override the defaults and make sure that works as well
plt.figure()
ct.bode_plot(self.sys, omega, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
plt.figure()
ct.bode_plot(self.sys, omega, Hz=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
plt.figure()
ct.bode_plot(self.sys, omega, deg=False)
phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(phase_y[-1], -pi, decimal=2)
@mplcleanup
|
def test_custom_bode_default(self):
ct.config.defaults['freqplot.dB'] = True
ct.config.defaults['freqplot.deg'] = True
ct.config.defaults['freqplot.Hz'] = True
# Generate a Bode plot
plt.figure()
omega = np.logspace(-3, 3, 100)
ct.bode_plot(self.sys, omega, dB=True)
mag_x, mag_y = ((
|
(plt.gcf().axes[0]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_y[0], 20*log10(10), decimal=3)
# Override defaults
plt.figure()
ct.bode_plot(self.sys, omega, Hz=True, deg=False, dB=True)
mag_x, mag_y = (((plt.gcf().axes[0]).get_lines())[0]).get_data()
phase_x, phase_y = (((plt.gcf().axes[1]).get_lines())[0]).get_data()
np.testing.assert_almost_equal(mag_x[0], 0.001 / (2*pi), decimal=6)
np.test
|
auto-mat/klub
|
local_migrations/migrations_helpdesk/0028_auto_20190826_2034.py
|
Python
|
gpl-3.0
| 2,149 | 0.002792 |
# Generated by Django 2.2.4 on 2019-08-26 18:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('helpdesk', '0027_auto_20190826_0700'),
]
operations = [
migrations.AlterField(
model_name='followup',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='kbitem',
name='voted_by',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='queue',
name='default_owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='de
|
fault_owner', to=settings.AUTH_USER_MODEL, verbose_name='Default owner'),
),
migrations.AlterField(
model_name='savedsearch',
name
|
='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='ticket',
name='assigned_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assigned_to', to=settings.AUTH_USER_MODEL, verbose_name='Assigned to'),
),
migrations.AlterField(
model_name='ticketcc',
name='user',
field=models.ForeignKey(blank=True, help_text='User who wishes to receive updates for this ticket.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='usersettings',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='usersettings_helpdesk', to=settings.AUTH_USER_MODEL),
),
]
|
ksetyadi/Sahana-Eden
|
models/delphi.py
|
Python
|
mit
| 9,575 | 0.014413 |
# coding: utf8
"""
Delphi decision maker
"""
module = "delphi"
if deployment_settings.has_module(module):
########
# Groups
########
resourcename = "group"
tablename = module + "_" + resourcename
table = db.define_table(tablename, timestamp,
Field("name", notnull=True),
Field("description", "text"),
Field("active", "boolean", default=True),
migrate=migrate)
table.name.label = T("Group Title")
table.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, "delphi_group.name")]
# CRUD Strings
ADD_GROUP = T("Add Group")
LIST_GROUPS = T("List Groups")
s3.crud_strings[tablename] = Storage(
title_create = ADD_GROUP,
title_display = T("Group Details"),
title_list = LIST_GROUPS,
title_update = T("Edit Group"),
title_search = T("Search Groups"),
subtitle_create = T("Add New Group"),
subtitle_list = T("Groups"),
label_list_button = LIST_GROUPS,
label_create_button = ADD_GROUP,
msg_record_created = T("Group added"),
msg_record_modified = T("Group updated"),
msg_record_deleted = T("Group deleted"),
msg_list_empty = T("No Groups currently defined"))
s3xrc.model.configure(table, list_fields=["id", "name", "description"])
##################
# Group Membership
##################
delphi_role_opts = {
1:T("Guest"),
2:T("Contributor"),
3:T("Participant"),
4:T("Moderator")
}
resourcename = "user_to_group"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("group_id", db.delphi_group, notnull=True),
Field("user_id", db.auth_user, notnull=True),
Field("description"),
Field("req", "boolean", default=False),
Field("status", "integer", default=1),
migrate=migrate)
table.group_id.label = T("Problem Group")
table.group_id.requires = IS_IN_DB(db, "delphi_group.id", "%(name)s")
table.group_id.represent = lambda id: (id and [db(db.delphi_group.id == id).select(limitby=(0, 1)).first().name] or ["None"])[0]
table.user_id.label = T("User")
table.user_id.represent = lambda user_id: (user_id == 0) and "-" or "%(first_name)s %(last_name)s [%(id)d]" % db(db.auth_user.id==user_id).select()[0]
#table.user_id.requires = IS_IN_DB(db, "auth_user.id", "%(first_name)s %(last_name)s [%(id)d]")
table.user_id.requires = IS_IN_DB(db, "auth_user.id", shn_user_represent)
table.status.requires = IS_IN_SET(delphi_role_opts, zero=None)
table.status.represent = lambda opt: delphi_role_opts.get(opt, UNKNOWN_OPT)
# CRUD Strings
ADD_MEMBERSHIP = T("Add Membership")
LIST_MEMBERSHIPS = T("List Memberships")
s3.crud_strings[tablename] = Storage(
title_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = LIST_MEMBERSHIPS,
title_update = T("Edit Membership"),
title_search = T("Search Memberships"),
subtitle_create = T("Add New Membership"),
subtitle_list = T("Memberships"),
label_list_button = LIST_MEMBERSHIPS,
label_create_button = ADD_MEMBERSHIP,
msg_record_created = T("Membership added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Memberships currently defined"))
s3xrc.model.configure(table, list_fields=["id", "group_id", "user_id", "status", "req"])
##########
# Problems
##
|
########
resourcename = "problem"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("group_id", db.delphi_group, notnull=True),
Field("name", notnull=True),
Field("description", "text"),
|
Field("criteria", "text", notnull=True),
Field("active", "boolean", default=True),
Field("created_by", db.auth_user, writable=False, readable=False),
Field("last_modification", "datetime", default=request.now, writable=False),
migrate=migrate)
table.name.label = T("Problem Title")
table.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, "delphi_problem.name")]
table.created_by.default = auth.user.id if auth.user else 0
table.group_id.label = T("Problem Group")
table.group_id.requires = IS_IN_DB(db, "delphi_group.id", "%(name)s")
table.group_id.represent = lambda id: (id and [db(db.delphi_group.id == id).select(limitby=(0, 1)).first().name] or ["None"])[0]
# CRUD Strings
ADD_PROBLEM = T("Add Problem")
LIST_PROBLEMS = T("List Problems")
s3.crud_strings[tablename] = Storage(
title_create = ADD_PROBLEM,
title_display = T("Problem Details"),
title_list = LIST_PROBLEMS,
title_update = T("Edit Problem"),
title_search = T("Search Problems"),
subtitle_create = T("Add New Problem"),
subtitle_list = T("Problems"),
label_list_button = LIST_PROBLEMS,
label_create_button = ADD_PROBLEM,
msg_record_created = T("Problem added"),
msg_record_modified = T("Problem updated"),
msg_record_deleted = T("Problem deleted"),
msg_list_empty = T("No Problems currently defined"))
s3xrc.model.configure(table, list_fields=["id", "group_id", "name", "created_by", "last_modification"])
def get_last_problem_id():
last_problems = db(db.delphi_problem.id > 0).select(db.delphi_problem.id, orderby =~ db.delphi_problem.id, limitby = (0, 1))
if last_problems:
return last_problems[0].id
###########
# Solutions
###########
resourcename = "solution"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("problem_id", db.delphi_problem, notnull=True),
Field("name"),
Field("description", "text"),
Field("suggested_by", db.auth_user, writable=False, readable=False),
Field("last_modification", "datetime", default=request.now, writable=False),
migrate=migrate)
table.name.requires = IS_NOT_EMPTY()
table.name.label = T("Title")
table.suggested_by.default = auth.user.id if auth.user else 0
table.problem_id.label = T("Problem")
# Breaks on 1st_run with prepopulate=False, so moved to controller
#table.problem_id.default = get_last_problem_id()
table.problem_id.requires = IS_IN_DB(db, "delphi_problem.id", "%(id)s: %(name)s")
table.problem_id.represent = lambda id: (id and [db(db.delphi_problem.id == id).select(limitby=(0, 1)).first().name] or ["None"])[0]
# CRUD Strings
ADD_SOLUTION = T("Add Solution")
LIST_SOLUTIONS = T("List Solutions")
s3.crud_strings[tablename] = Storage(
title_create = ADD_SOLUTION,
title_display = T("Solution Details"),
title_list = LIST_SOLUTIONS,
title_update = T("Edit Solution"),
title_search = T("Search Solutions"),
subtitle_create = T("Add New Solution"),
subtitle_list = T("Solutions"),
label_list_button = LIST_SOLUTIONS,
label_create_button = ADD_SOLUTION,
msg_record_created = T("Solution added"),
msg_record_modified = T("Solution updated"),
msg_record_deleted = T("Solution deleted"),
msg_list_empty = T("No Solutions currently defined"))
s3xrc.model.configure(table, list_fields=["id", "problem_id", "name", "suggested_by", "last_modification"])
#######
# Votes
#######
resourcename = "vote"
tablename = module + "_" + resourcename
table = db.define_table(tablename,
Field("problem_id", db.delphi_problem,
|
Djabbz/wakatime
|
tests/utils.py
|
Python
|
bsd-3-clause
| 1,355 | 0.002214 |
# -*- coding: utf-8 -*-
import logging
from wakatime.compat import u
try:
import mock
except ImportError:
import unittest.mock as mock
try:
# Python 2.6
import unittest2 as unittest
except ImportError:
# Python >= 2.7
import unittest
class TestCase(unittest.TestCase):
patch_thes
|
e = []
def setUp(self):
# disable logging while testing
logging.disable(logging.CRITICAL)
self.patched = {}
if hasattr(self, 'patch_these'):
for patch_this in self.patch_these:
namespace = patch_this[0] if isinstance(patch_this, (list, set)) else patch_this
patcher = mock.patch(n
|
amespace)
mocked = patcher.start()
mocked.reset_mock()
self.patched[namespace] = mocked
if isinstance(patch_this, (list, set)) and len(patch_this) > 0:
retval = patch_this[1]
if callable(retval):
retval = retval()
mocked.return_value = retval
def tearDown(self):
mock.patch.stopall()
def normalize_list(self, items):
return sorted([u(x) for x in items])
def assertListsEqual(self, first_list, second_list):
self.assertEquals(self.normalize_list(first_list), self.normalize_list(second_list))
|
CKPalk/ProbabilisticMethods
|
A5/hw5_start.py
|
Python
|
mit
| 4,767 | 0.064821 |
# CIS 410/510pm
# Homework 5 beta 0.0.1
# Cameron Palk
# May 2016
#
# Special thanks to Daniel Lowd for the skeletor code
import sys
import tokenize
from functools import reduce
global_card = []
num_vars = 0
''' Calc Strides
'''
def calcStrides( scope ):
rev_scope = list( reversed( scope ) )
res = [ 1 ] + [ 0 ] * ( len( scope ) - 1 )
for idx in range( 1, len( rev_scope ) ):
res[ idx ] = res[ idx - 1 ] * global_card[ rev_scope[ idx - 1 ] ]
stride = list( reversed( res ) )
return { scope[i] : stride[i] for i in range( len( scope ) ) }
# FACTOR CLASS DEFINITION
class Factor( dict ):
# Constructor
def __init__(self, scope_, vals_):
self.scope = scope_
self.vals = vals_
self.stride = calcStrides( scope_ )
#
# Are two object EQual, True of False
def __eq__(self, other):
return (self.scope == other.scope and
self.vals == other.vals and
self.stride == other.stride )
#
# A string used for printing the Factor Objects
def __repr__( self ):
style = "\n{0}\nScope: {1}\nStride: {2}\nCard: {3}\nVals:\n{4}\n{0}\n"
vertBar = ''.join( ['-'] * 50 )
return style.format( vertBar, self.scope, self.stride,
{ v : global_card[v] for v in self.scope },
'\n'.join( [ str( round( e, 3 ) ) for e in self.vals ] ) )
#
# What the '*' character does between our objects
def __mul__( self, other ):
new_scope = list( set( self.scope ).union( set( other.scope ) ) )
assignment = { e : 0 for e in new_scope }
card = { u : global_card[ u ] for u in new_scope }
val_count = reduce( lambda agg, x: agg * global_card[x], new_scope, 1 )
new_vals = [ 0 ] * val_count
idx1 = idx2 = 0
for i in range( 0, val_count ):
new_vals[ i ] = self.vals[ idx1 ] * other.vals[ idx2 ]
for rv in reversed( new_scope ):
if assignment[ rv ] == card[ rv ] - 1:
idx1 -= assignment[ rv ] * self.stride [ rv ] if rv in self.stride else 0
idx2 -= assignment[ rv ] * other.stride[ rv ] if rv in other.stride else 0
assignment[ rv ] = 0
else:
idx1 += self.stride [ rv ] if rv in self.scope else 0
idx2 += other.stride[ rv ] if rv in other.scope else 0
assignment[ rv ] += 1
break
#
return Factor( new_scope, new_vals )
#
# Sum out the variable and return a new Factor
def sumOut( self ):
# TODO Sum out a RV
return
#
# Helper Functions:
def containsRV( self, rv ):
return rv in self.scope
#
# END FACTOR CLASS DEFINITION
# IGNORE DANIELS READER BELOW
#
# Read in all tokens from stdin. Save it to a (global) buf that we use
# later. (Is there a better way to do this? Almost certainly.)
curr_token = 0
token_buf = []
def read_tokens():
global token_buf
for line in sys.stdin:
token_buf.extend(line.strip().split())
#
def next_token():
global curr_token
global token_buf
curr_token += 1
return token_buf[ curr_token - 1 ]
#
def next_int():
return int( next_token() )
#
def next_float():
return float( next_token() )
#
def read_model():
# Read in all tokens and throw away the first (expected to be "MARKOV")
read_tokens()
s = next_token()
# Get number of vars, followed by their ranges
global num_vars
num_vars = next_int()
global global_card
global_card = [ next_int() for i in range( num_vars ) ]
# Get number and scopes of factors
num_factors = int(next_token())
factor_scopes = []
for i in range(num_factors):
factor_scopes.append( [ next_int() for i in range( next_int() ) ] )
# Read in all factor values
factor_vals = []
for i in range(num_factors):
factor_vals.append( [ next_float() for i in range( next_int() ) ] )
return [ Factor(s,v) for (s,v) in zip( factor_scopes, factor_vals ) ]
#
# IGNORE DANIELS READER ABOVE
''' Factor Count With Var
@input factors Factors we want to look through
@input rv A RV
@return [int] The number of times the rv occures in the factors scopes
'''
def factorCountWithVar( factors, rv ):
return sum( [ 1 if f.containsRV( rv ) else 0 for f in factors ] )
''' Factor Stats
'''
def factorStats( fact
|
ors, possibleVariables ):
return { v: factorCountWithVar(factors,v) for v in range( num_vars ) if v in possibleVariables }
''' Compute Partition Function
@input factors An array of Factor objects representing the graph
@return [float] The partition function ( why is it called a function? )
'''
def computePartitionFunction( factors ):
# TODO: Implement a faster way to computer partition function by summing out variables
f = reduce( Factor.__mul__, factors )
z = s
|
um( f.vals )
return z
#
''' Main '''
def main():
# Read file
factors = read_model()
# Computer partition function
z = computePartitionFunction( factors )
# Print results
print( "Z =", z )
return
# Run main if this module is being run directly
if __name__ == '__main__':
main()
|
mlperf/training_results_v0.7
|
NVIDIA/benchmarks/maskrcnn/implementations/pytorch/demo/predictor.py
|
Python
|
apache-2.0
| 15,180 | 0.000791 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
class COCODemo(object):
# COCO categories for pretty print
CATEGORIES = [
"__background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __init__(
self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
if self.cfg.MODEL.KEYPOINT_ON:
result = self.overlay_keypoints(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
predicti
|
on = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width
|
, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computatio
|
simpeg/simpeg
|
SimPEG/electromagnetics/utils/EMUtils.py
|
Python
|
mit
| 149 | 0.006711 |
from ...utils.code_utils import deprecate_module
deprecate_mod
|
ule("EMUtils", "waveform_utils", "0.16.0", error=
|
True)
from .waveform_utils import *
|
aLaix2/O-Nes-Sama
|
DebuggerClient/Breakpoint.py
|
Python
|
gpl-3.0
| 1,060 | 0.00283 |
class Breakpoint():
def __init__(self, breakpointNumber):
self.breakpointNumber = breakpointNumber
class BreakpointPPUByTime(Breakpoint):
def __init__(self, breakpointNumber, scanline, tick):
Breakpoint.__init__(self, breakpointNumber)
self._scanline = scanline
self._tick = ti
|
ck
def toString(self):
|
return 'Scanline = {self._scanline:s}, Tick = {self._tick:s}'.format(**locals())
class BreakpointPPUByAddress(Breakpoint):
def __init__(self, breakpointNumber, address):
Breakpoint.__init__(self, breakpointNumber)
self._address = address
def toString(self):
return 'Address = {self._address:s}'.format(**locals())
class BreakpointPPUByValue(Breakpoint):
def __init__(self, breakpointNumber, address, value):
Breakpoint.__init__(self, breakpointNumber)
self._address = address
self._value = value
def toString(self):
return 'Address = {self._address:s}, Value = {self._value:s}'.format(**locals())
|
mattiasgiese/squeezie
|
app/master.py
|
Python
|
mit
| 476 | 0.021008 |
#!/usr/bin/env python
from flask import Flask, jsonify, request, abort, render_template
app = Flask(__name__)
@app.route("/",methods=['GET'])
def index()
|
:
if request.method == 'GET':
return render_template('index.html')
else:
abort(400)
@app.route("/devices",methods=['GET'])
def devices():
if request.method == 'GET':
return render_template('devices.html')
else:
abort(400)
if __name__ == "__
|
main__":
app.debug = True
app.run(host='0.0.0.0')
|
ConeyLiu/spark
|
python/pyspark/ml/tests/test_param.py
|
Python
|
apache-2.0
| 16,252 | 0.002031 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import sys
import array as pyarray
import unittest
import numpy as np
from pyspark import keyword_only
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import Binarizer, Bucketizer, ElementwiseProduct, IndexToString, \
MaxAbsScaler, VectorSlicer, Word2Vec
from pyspark.ml.linalg import DenseVector, SparseVector, Vectors
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
from pyspark.ml.wrapper import JavaParams
from pyspark.testing.mlutils import check_params, PySparkTestCase, SparkSessionTestCase
if sys.version > '3':
xrange = range
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l), range(len(l)), l),
pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
|
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
|
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_list_float(self):
b = Bucketizer(splitsArray=[[-0.1, 0.5, 3], [-5, 1.5]])
self.assertEqual(b.getSplitsArray(), [[-0.1, 0.5, 3.0], [-5.0, 1.5]])
self.assertTrue(all([type(v) == list for v in b.getSplitsArray()]))
self.assertTrue(all([type(v) == float for v in b.getSplitsArray()[0]]))
self.assertTrue(all([type(v) == float for v in b.getSplitsArray()[1]]))
self.assertRaises(TypeError, lambda: Bucketizer(splitsArray=["a", 1.0]))
self.assertRaises(TypeError, lambda: Bucketizer(splitsArray=[[-5, 1.5], ["a", 1.0]]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(SparkSessionTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
self.assertTrue(testParams.hasParam(u"maxIter"))
def test_resolveparam(self):
testParams = TestParams()
self.assertEqual(testParams._resolveParam(testParams.maxIter), testParams.maxIter)
self.assertEqual(testParams._resolveParam("maxIter"), testParams.maxIter)
self.assertEqual(testParams._resolveParam(u"maxIter"), testParams.maxIter)
if sys.version_info[0] >= 3:
# In Python 3, it is allowed to get/set attributes with non-ascii characters.
|
iemejia/incubator-beam
|
sdks/python/apache_beam/io/external/xlang_kafkaio_it_test.py
|
Python
|
apache-2.0
| 4,950 | 0.005253 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific l
|
anguage governing permissions and
# limitations under the License.
#
"""Integrat
|
ion test for Python cross-language pipelines for Java KafkaIO."""
from __future__ import absolute_import
import contextlib
import logging
import os
import socket
import subprocess
import time
import typing
import unittest
import apache_beam as beam
from apache_beam.io.external.kafka import ReadFromKafka
from apache_beam.io.external.kafka import WriteToKafka
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
class CrossLanguageKafkaIO(object):
def __init__(self, bootstrap_servers, topic, expansion_service=None):
self.bootstrap_servers = bootstrap_servers
self.topic = topic
self.expansion_service = expansion_service
self.sum_counter = Metrics.counter('source', 'elements_sum')
def build_write_pipeline(self, pipeline):
_ = (
pipeline
| 'Impulse' >> beam.Impulse()
| 'Generate' >> beam.FlatMap(lambda x: range(1000)) # pylint: disable=range-builtin-not-iterating
| 'Reshuffle' >> beam.Reshuffle()
| 'MakeKV' >> beam.Map(lambda x:
(b'', str(x).encode())).with_output_types(
typing.Tuple[bytes, bytes])
| 'WriteToKafka' >> WriteToKafka(
producer_config={'bootstrap.servers': self.bootstrap_servers},
topic=self.topic,
expansion_service=self.expansion_service))
def build_read_pipeline(self, pipeline):
_ = (
pipeline
| 'ReadFromKafka' >> ReadFromKafka(
consumer_config={
'bootstrap.servers': self.bootstrap_servers,
'auto.offset.reset': 'earliest'
},
topics=[self.topic],
expansion_service=self.expansion_service)
| 'Windowing' >> beam.WindowInto(
beam.window.FixedWindows(300),
trigger=beam.transforms.trigger.AfterProcessingTime(60),
accumulation_mode=beam.transforms.trigger.AccumulationMode.
DISCARDING)
| 'DecodingValue' >> beam.Map(lambda elem: int(elem[1].decode()))
| 'CombineGlobally' >> beam.CombineGlobally(sum).without_defaults()
| 'SetSumCounter' >> beam.Map(self.sum_counter.inc))
def run_xlang_kafkaio(self, pipeline):
self.build_write_pipeline(pipeline)
self.build_read_pipeline(pipeline)
pipeline.run(False)
@unittest.skipUnless(
os.environ.get('LOCAL_KAFKA_JAR'),
"LOCAL_KAFKA_JAR environment var is not provided.")
class CrossLanguageKafkaIOTest(unittest.TestCase):
def get_open_port(self):
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except: # pylint: disable=bare-except
# Above call will fail for nodes that only support IPv6.
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(('localhost', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
@contextlib.contextmanager
def local_kafka_service(self, local_kafka_jar_file):
kafka_port = str(self.get_open_port())
zookeeper_port = str(self.get_open_port())
kafka_server = None
try:
kafka_server = subprocess.Popen(
['java', '-jar', local_kafka_jar_file, kafka_port, zookeeper_port])
time.sleep(3)
yield kafka_port
finally:
if kafka_server:
kafka_server.kill()
def get_options(self):
options = PipelineOptions([
'--runner',
'FlinkRunner',
'--parallelism',
'2',
'--experiment',
'beam_fn_api'
])
return options
def test_kafkaio_write(self):
local_kafka_jar = os.environ.get('LOCAL_KAFKA_JAR')
with self.local_kafka_service(local_kafka_jar) as kafka_port:
options = self.get_options()
p = TestPipeline(options=options)
p.not_use_test_runner_api = True
CrossLanguageKafkaIO('localhost:%s' % kafka_port,
'xlang_kafkaio_test').build_write_pipeline(p)
job = p.run()
job.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
ManiacalLabs/BiblioPixelAnimations
|
BiblioPixelAnimations/circle/swirl.py
|
Python
|
mit
| 586 | 0 |
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import palettes
|
class Swirl(Circle):
COLOR_DEFAULTS = ('palette', palettes.get('three_sixty')),
def __init__(self, layout, angle=12, **kwds):
super().__init__(layout, **kwds)
self.angle = angle
def pre_run(self):
self._step = 0
def step(self, amt=1):
for a in range(0, 360, self.angle):
c = self.palette(self._step)
for i in range(self.ringCount):
self.layout.set(i, a, c)
|
self._step += amt
|
cbentes/texta
|
dataset_importer/document_preprocessor/preprocessors/text_tagger.py
|
Python
|
gpl-3.0
| 3,426 | 0.007589 |
from task_manager.tag_manager.tag_manager import TaggingModel
from task_manager.models import Task
import numpy as np
import json
enabled_tagger_ids = [tagger.pk for tagger in Task.objects.filter(task_type='train_tagger').filter(status='completed')]
enabled_taggers = {}
# Load Tagger models
for _id in enabled_tagger_ids:
tm = TaggingModel()
tm.load(_id)
enabled_taggers[_id] = tm
class TextTaggerPreprocessor(object):
"""Preprocessor implementation for running TEXTA Text Taggers on the selected documents.
"""
def __init__(self, feature_map={}):
self._feature_map = feature_map
def transform(self, documents, **kwargs):
input_features = json.loads(kwargs['text_tagger_preprocessor_feature_names'])
tagger_ids_to_apply = [int(_id) for _id in json.loads(kwargs['text_tagger_preprocessor_taggers'])]
taggers_to_apply = []
if not kwargs.get('text_tagger_preprocessor_feature_names', None):
return documents
# Load tagger models
for _id in tagger_ids_to_apply:
tm = TaggingModel()
tm.load(_id)
taggers_to_apply.append(tm)
for input_feature in input_features:
texts = []
for document in documents:
# Take into account nested fields encoded as: 'field.sub_field'
decoded_text = document
for k in input_feature.split('.'):
# Field might be empty and not included in document
if k in decoded_text:
decoded_text = decoded_text[k]
else:
decoded_text = ''
break
try:
decoded_text.strip().decode()
except AttributeError:
decoded_text.strip()
texts.append(decoded_text)
if not texts:
return documents
## Dies with empty text!
results = []
tagger_descriptions = []
for tagger in taggers_to_apply:
ta
|
gger_descriptions.append(tagger.description)
result_vector = tagger.tag(texts)
results.append(result_vector)
results_transposed = np.array(results).transpose()
for i,tagger_ids in enumerate(res
|
ults_transposed):
positive_tag_ids = np.nonzero(tagger_ids)
positive_tags = [tagger_descriptions[positive_tag_id] for positive_tag_id in positive_tag_ids[0]]
texta_facts = []
if positive_tags:
if 'texta_facts' not in documents[i]:
documents[i]['texta_facts'] = []
for tag in positive_tags:
new_fact = {'fact': 'TEXTA_TAG', 'str_val': tag, 'doc_path': input_feature, 'spans': json.dumps([[0,len(texts[i])]])}
texta_facts.append(new_fact)
documents[i]['texta_facts'].extend(texta_facts)
# Get total tagged documents, get np array of results
total_positives = np.count_nonzero(results)
return {"documents":documents, "meta": {'documents_tagged': total_positives}}
|
kmahyyg/learn_py3
|
adv_feature/adv_feature_iterable.py
|
Python
|
agpl-3.0
| 894 | 0.001119 |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
from coll
|
ections import I
|
terable
from collections import Iterator
isinstance([], Iterable)
isinstance({}, Iterable)
isinstance((), Iterable)
isinstance('abc', Iterable)
isinstance((x for x in range(10)), Iterable)
isinstance(100, Iterable)
# Iterable but not Iterator
isinstance([], Iterator)
isinstance({}, Iterator)
isinstance((), Iterator)
isinstance('abc', Iterator)
isinstance((x for x in range(10)), Iterator)
isinstance(100, Iterator)
# use iter() to migrate iterable to iterator
# iterator is a data stream and donnot have a fixed length and is a lazy-calculated object
# if you could use 'for loop' , then it's a Iterable
# if you could use 'next()' , then it's a Iterator
for x in [1, 2, 3, 4, 5]:
pass
# Equals to
it = iter([1, 2, 3, 4, 5])
while True:
try:
x = next(it)
except StopIteration:
break
|
ctmunwebmaster/huxley
|
huxley/utils/zoho.py
|
Python
|
bsd-3-clause
| 1,767 | 0.01245 |
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
import requests
from django.conf import settings
def get_contact(school):
if not settings.ZOHO_CREDENTIALS:
return
list_url = 'https://invoice.zoho.com/api/v3
|
/contacts?organization_id=' + settings.ORGANIZATION_ID + '&authtoken=' + settings.AUTHTOKEN
contact = {
"company_name_contains": school.name
}
return requests.get(list_url, params=contact).json()["contacts"][0]["contact_id"]
def generate_contact_
|
attributes(school):
return {
"contact_name": school.primary_name,
"company_name": school.name,
"payment_terms": "",
"payment_terms_label": "Due on Receipt",
"currency_id": "",
"website": "",
"custom_fields": [
],
"billing_address": {
"address": "",
"city": "",
"state": "",
"zip": "",
"country": "",
"fax": ""
},
"shipping_address": {
"address": "",
"city": "",
"state": "",
"zip": "",
"country": "",
"fax": ""
},
"contact_persons": [{
"salutation": "",
"first_name": "",
"last_name": "",
"email": school.primary_email,
"phone": "",
"mobile": "",
"is_primary_contact": True
}],
"default_templates": {
"invoice_template_id": "",
"estimate_template_id": "",
"creditnote_template_id": "",
"invoice_email_template_id": "",
"estimate_email_template_id": "",
"creditnote_email_template_id": ""
},
"notes": ""
}
|
yterauchi/primecloud-controller
|
iaas-gw/src/iaasgw/controller/ec2/ec2VolumController.py
|
Python
|
gpl-2.0
| 17,993 | 0.013944 |
# coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.exception.iaasException import IaasException
from iaasgw.log.log import IaasLogger
from iaasgw.module.ec2.ec2module import TagSet
from iaasgw.utils.stringUtils import isNotEmpty, isEmpty
import time
import traceback
class ec2VolumController(object):
logger = IaasLogger()
client = None
conn = None
platforminfo = None
def __init__(self, platforminfo, ec2iaasclient, conn):
self.client = ec2iaasclient
self.conn = conn
self.platforminfo = platforminfo
def startVolumes(self, instanceNo) :
# ボリューム情報の取得
table = self.conn.getTable("AWS_VOLUME")
volumes = self.conn.select(table.select(table.c.INSTANCE_NO==instanceNo))
for volume in volumes :
self.startVolume(instanceNo, volume["VOLUME_NO"])
def startVolume(self, instanceNo, volumeNo) :
table = self.conn.getTable("AWS_VOLUME")
volume = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
# インスタンスIDがある場合はスキップ
if (isNotEmpty(volume["INSTANCE_ID"])) :
return
if (isEmpty(volume["VOLUME_ID"])) :
# ボリュームIDがない場合は新規作成
self.createVolume(instanceNo, volumeNo)
# ボリュームの作成待ち
self.waitCreateVolume(instanceNo, volumeNo)
# ボリュームにタグを付ける
self.createTag(volumeNo)
# ボリュームのアタッチ
self.attachVolume(instanceNo, volumeNo)
# ボリュームのアタッチ待ち
self.waitAttachVolume(instanceNo, volumeNo)
def stopVolumes(self, instanceNo) :
# ボリューム情報の取得
awsVolumes = self.getAwsVolumes(instanceNo)
for volume in awsVolumes :
self.stopVolume(instanceNo, volume["VOLUME_NO"])
def stopVolume(self, instanceNo, volumeNo):
table = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
# ボリュームIDがない場合はスキップ
if (isEmpty(awsVolume["VOLUME_ID"])):
return
# インスタンスIDがない場合はスキップ
if (isEmpty(awsVolume["INSTANCE_ID"])) :
|
return;
try :
# ボリュームのデ
|
タッチ
self.detachVolume(instanceNo, volumeNo)
# ボリュームのデタッチ待ち
self.waitDetachVolume(instanceNo, volumeNo)
except Exception, e:
self.logger.error(traceback.format_exc())
# 情報が不整合(インスタンス異常終了時など)の場合、警告ログと後始末のみ行う
self.logger.warn(e.massage);
table = self.conn.getTable("AWS_VOLUME")
updateDict = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = "error"
updateDict["INSTANCE_ID"] = None
sql = table.update(table.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def getAwsVolumes(self, instanceNo) :
table = self.conn.getTable("AWS_VOLUME")
awsVolumes = self.conn.select(table.select(table.c.INSTANCE_NO==instanceNo))
if (awsVolumes or len(awsVolumes) < 1) :
return awsVolumes;
# Platformのチェック
retVolumes = []
for awsVolume in awsVolumes:
# PlatformNoが異なる場合、データ不整合なので警告ログを出す
if (self.client.getPlatformNo() != awsVolume["PLATFORM_NO"]) :
self.logger.warn(None, "EPROCESS-000201",[awsVolume["VOLUME_NAME"], awsVolume["PLATFORM_NO"], self.client.getPlatformNo()])
else :
retVolumes.append(awsVolume)
return retVolumes;
def waitVolume(self, volumeId) :
# スナップショットの処理待ち
volume = None
while (True):
volume = self.client.describeVolume(volumeId);
status = volume.status
if status == "available" or status == "in-use" or status == "error":
break
if status != "creating" and status != "deleting" :
#予期しないステータス
raise IaasException("EPROCESS-000112", [volumeId, status,])
return volume;
def createVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
# ボリュームの作成
volume = self.client.createVolume(awsVolume["AVAILABILITY_ZONE"], awsVolume["SIZE"], awsVolume["SNAPSHOT_ID"])
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"], "AwsEbsCreate",["EC2",])
# データベース更新
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["VOLUME_ID"] = volume.volumeId
updateDict["STATUS"] = volume.status
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def waitCreateVolume(self, instanceNo, volumeNo) :
tableAWSVOL = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
volumeId = awsVolume["VOLUME_ID"]
# ボリュームの作成待ち
volume = None
try :
volume = self.waitVolume(volumeId)
if volume.status != "available":
#ボリューム作成失敗時
raise IaasException("EPROCESS-000113", [volumeId, volume.status,])
# ログ出力
self.logger.info(None, "IPROCESS-100122", [volumeId,])
except Exception:
self.logger.error(traceback.format_exc())
# ボリューム作成失敗時
awsVolume["VOLUME_ID"] = None
awsVolume["STATUS"] = None
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==awsVolume["VOLUME_NO"], values=awsVolume)
self.conn.execute(sql)
raise
#イベントログ出力
tableCPNT = self.conn.getTable("COMPONENT")
component = self.conn.selectOne(tableCPNT.select(tableCPNT.c.COMPONENT_NO==awsVolume["COMPONENT_NO"]))
componentName = None
if component:
componentName = component["COMPONENT_NAME"]
tableINS = self.conn.getTable("INSTANCE")
instance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
self.conn.debug(instance["FARM_NO"], awsVolume["COMPONENT_NO"], componentName, instanceNo, instance["INSTANCE_NAME"],
"AwsEbsCreateFinish",["EC2", awsVolume["VOLUME_ID"], awsVolume["SIZE"]])
# データベース更新
updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==volumeNo))
updateDict["STATUS"] = volume.status
sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict["VOLUME_NO"], values=updateDict)
self.conn.execute(sql)
def checkAvailableVolume(self, instanceNo, volumeNo) :
table = self.conn.getTable("AWS_VOLUME")
awsVolume = self.conn.selectOne(table.select(table.c.VOLUME_NO==volumeNo))
volumeId = aws
|
nextsmsversion/macchina.io
|
platform/JS/V8/v8-3.28.4/PRESUBMIT.py
|
Python
|
apache-2.0
| 7,096 | 0.008737 |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level presubmit script for V8.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckGeneratedRuntimeTests
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckGeneratedRuntimeTests(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Generated runtime tests check failed"))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
return results
def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if src/version.cc has been updated."""
src_version = 'src/version.cc'
FilterFile = lambda file: file.LocalPath() == src_version
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api):
"""Checks usage of LOG= flag in the commit message."""
results = []
if input_api.change.BUG and not 'LOG' in input_api.change.tags:
results.append(output_api.PresubmitError(
'An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.'))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
|
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
i
|
f not _SkipTreeCheck(input_api, output_api):
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nosnap_rel': set(['defaulttests']),
'v8_linux_nosnap_dbg': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux_arm_dbg': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
},
}
|
subeax/grab
|
grab/spider/cache_backend/postgresql.py
|
Python
|
mit
| 6,990 | 0.001001 |
"""
CacheItem interface:
'_id': string,
'url': string,
'response_url': string,
'body': string,
'head': string,
'response_code': int,
'cookies': None,#grab.response.cookies,
"""
from hashlib import sha1
import zlib
import logging
import marshal
import time
from grab.response import Response
from grab.cookie import CookieManager
from grab.util.py3k_support import *
logger = logging.getLogger('grab.spider.cache_backend.postgresql')
class CacheBackend(object):
def __init__(self, database, use_compression=True, spider=None, **kwargs):
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED
self.spider = spider
self.conn = psycopg2.connect(dbname=database, **kwargs)
self.conn.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
self.cursor = self.conn.cursor()
res = self.cursor.execute("""
SELECT
TABLE_NAME
FROM
INFORMATION_SCHEMA.TABLES
WHERE
TABLE_TYPE = 'BASE TABLE'
AND
table_schema NOT IN ('pg_catalog', 'information_schema')"""
)
found = False
for row in self.cursor:
if row[0] == 'cache':
found = True
break
if not found:
self.create_cache_table()
def create_cache_table(self):
self.cursor.execute('BEGIN')
self.cursor.execute('''
CREATE TABLE cache (
id BYTEA NOT NULL CONSTRAINT primary_key PRIMARY KEY,
timestamp INT NOT NULL,
data BYTEA NOT NULL,
);
CREATE INDEX timestamp_idx ON cache (timestamp);
''')
self.cursor.execute('COMMIT')
def get_item(self, url, timeout=None):
"""
Returned item should have specific interface. See module docstring.
"""
_hash = self.build_hash(url)
with self.spider.save_timer('cache.read.postgresql_query'):
self.cursor.execute('BEGIN')
if timeout is None:
query = ""
else:
ts = int(time.time()) - timeout
query = " AND timestamp > %d" % ts
# py3 hack
if PY3K:
sql = '''
SELECT data
FROM cache
WHERE id = {0} %(query)s
''' % {'query': query}
else:
sql = '''
SELECT data
FROM cache
WHERE id = %%s %(query)s
''' % {'query': query}
res = self.cursor.execute(sql, (_hash,))
row = self.cursor.fetchone()
self.cursor.execute('COMMIT')
if row:
data = row[0]
return self.unpack_database_value(data)
else:
return None
def unpack_database_value(self, val):
with self.spider.save_timer('cache.read.unpack_data'):
dump = zlib.decompress(str(val))
return marshal.loads(dump)
def build_hash(self, url):
with self.spider.save_timer('cache.read.build_hash'):
if isinstance(url, unicode):
utf_url = url.encode('utf-8')
else:
utf_url = url
return sha1(utf_url).hexdigest()
def remove_cache_item(self, url):
_hash = self.build_hash(url)
self.cursor.execute('begin')
self.cursor.execute('''
DELETE FROM cache WHERE id = x%s
''', (_hash,))
self.cursor.execute('commit')
def load_response(self, grab, cache_item):
grab.fake_response(cache_item['body'])
body = cache_item['body']
def custom_prepare_response_func(transport, g):
response = Response()
response.head = cache_item['head']
response.body = body
response.code = cache_item['response_code']
response.download_size = len(body)
response.upload_size = 0
response.download_speed = 0
# Hack for deprecated behaviour
if 'response_url' in cache_item:
response.url = cache_item['response_url']
else:
logger.debug('You cache contains items without `response_url` key. It is depricated data format. Please re-download you cache or build manually `response_url` keys.')
response.url = cache_item['url']
response.parse()
response.cookies = CookieManager(transport.extract_cookiejar())
return response
grab.process_request_result(custom_prepare_response_func)
def save_response(self, url, grab):
body = grab.response.body
item = {
'url': url,
'response_url': grab.response.url,
'body': body,
'head': grab.response.head,
'response_code': grab.response.code,
'cookies': None,
}
self.set_item(url, item)
def set_item(self, url, item):
import psycopg2
_hash = self.build_hash(url)
data = self.pack_database_value(item)
self.cursor.execute('BEGIN')
ts = int(time.time())
# py3 hack
if PY3K:
sql = '''
UPDATE cache SET timestamp = {0}, data = {1} WHERE id = {2};
INSERT INTO cache (id, timestamp, data)
SELECT {2}, {0}, {1} WHERE NOT EXISTS (SELECT 1 FROM cache WHERE id = {2});
'''
else:
sql = '''
UPDATE cache SET timestamp = %s, data = %s WHERE id = %s;
|
INSERT INTO cache (id, timestamp, data)
SELECT %s, %s, %s WHERE NOT EXISTS (SELECT 1 FROM cache WHERE id = %s);
'''
res = self.cursor.ex
|
ecute(sql, (ts, psycopg2.Binary(data), _hash, _hash, ts, psycopg2.Binary(data), _hash))
self.cursor.execute('COMMIT')
def pack_database_value(self, val):
dump = marshal.dumps(val)
return zlib.compress(dump)
def clear(self):
self.cursor.execute('BEGIN')
self.cursor.execute('TRUNCATE cache')
self.cursor.execute('COMMIT')
def has_item(self, url, timeout=None):
"""
Test if required item exists in the cache.
"""
_hash = self.build_hash(url)
with self.spider.save_timer('cache.read.postgresql_query'):
if timeout is None:
query = ""
else:
ts = int(time.time()) - timeout
query = " AND timestamp > %d" % ts
res = self.cursor.execute('''
SELECT id
FROM cache
WHERE id = %%s %(query)s
LIMIT 1
''' % {'query': query},
(_hash,))
row = self.cursor.fetchone()
return True if row else False
|
spacemeowx2/remote-web
|
client/client.py
|
Python
|
mit
| 1,269 | 0.01576 |
import websocket
import package
import thread
import time
import run
import random
import config
import dht
import logging
logging.basicConfig()
def on_message(ws, message):
#d = package.LoadPackage(message)
#res = run.PackageParser(d)
#ws.send(package.DumpPackage(res))
print message
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
deviceConfig = config.DeviceConfig()
deviceConfig.Update("device.conf")
HSPackage = package.GenSH(deviceConfig)
|
#print HSPackage,123
ws.send(HSPackage)
def SendRandomData(*args):
while True:
humdi, temp = dht.GetData()
if ( humdi == -1 or temp == -1):
continue
dump = package.SensorDump(0, temp)
dump1 = package.SensorDump(1, humdi)
ws.send(dump)
ws.send(dump1)
time.sleep(1)
thread.start_new_thread(SendRandomData, ())
if __nam
|
e__ == "__main__":
ws = websocket.WebSocketApp("ws://ali.imspace.cn:3000/device",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/firewall_cmds.py
|
Python
|
gpl-3.0
| 12,747 | 0.005021 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string))
return data
class FirewallRules(object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources):
self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = []
self.target_tags = []
def SetTags(self, source_tags, target_tags):
self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags))
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> desc
|
ribed by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
|
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,databas
|
getsentry/freight
|
freight/vcs/base.py
|
Python
|
apache-2.0
| 1,987 | 0 |
import os
import os.path
from freight.constants import PROJECT_ROOT
from freight.exceptions import CommandError
class UnknownRevision(CommandError):
pass
class Vcs(object):
ssh_connect_path = os.path.join(PROJECT_ROOT, "bin", "ssh-connect")
def __init__(self, workspace, url, username=None):
self.url = url
self.username = username
self.workspace = workspace
self._path_exists = None
@property
def path(self):
return self.workspace.path
def get_default_env(self):
return {}
def run(self, command, capture=False, workspace=None, *args, **kwargs):
if workspace is None:
workspace = self.workspace
if not self.exists(workspace=workspace):
kwargs.setdefaul
|
t("cwd", None)
env = kwargs.pop("env", {})
for key, value in self.get_default_env().items():
env.setdefault(key, va
|
lue)
env.setdefault("FREIGHT_SSH_REPO", self.url)
kwargs["env"] = env
if capture:
handler = workspace.capture
else:
handler = workspace.run
rv = handler(command, *args, **kwargs)
if isinstance(rv, bytes):
rv = rv.decode("utf8")
if isinstance(rv, str):
return rv.strip()
return rv
def exists(self, workspace=None):
if workspace is None:
workspace = self.workspace
return os.path.exists(workspace.path)
def clone_or_update(self):
if self.exists():
self.update()
else:
self.clone()
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def checkout(self, ref):
raise NotImplementedError
def get_sha(self, ref):
"""
Given a `ref` return the fully qualified version.
"""
raise NotImplementedError
def get_default_revision(self):
raise NotImplementedError
|
hillscottc/quiz2
|
quiz2/urls.py
|
Python
|
agpl-3.0
| 1,275 | 0.002353 |
"""Top level site urls."""
from
|
django.conf.urls import pattern
|
s, include, url
from quiz2 import views
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', views.home, name='home'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
url(r'^admin/', include(admin.site.urls)),
url(r'^quiz/', include('quiz2.apps.quiz.urls',
app_name='quizapp', namespace='quizapp'
)),
url(r'^user/account/$', views.user_account, name='user_account'),
url(r'^user/password/reset/$',
'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/user/password/reset/done/'},
name="password_reset"),
(r'^user/password/reset/done/$',
'django.contrib.auth.views.password_reset_done'),
(r'^user/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/user/password/done/'}),
(r'^user/password/done/$',
'django.contrib.auth.views.password_reset_complete'),
)
|
simontakite/sysadmin
|
pythonscripts/practicalprogramming/functions/days_bad.py
|
Python
|
gpl-2.0
| 1,684 | 0.000594 |
def get_weekday(current_weekday, days_ahead):
""" (int, int) -> int
Return which day of the week it will be days_ahead days from
current_weekday.
current_weekday is the current day of the week and is in the range 1-7,
indicating whether today is Sunday (1), Monday (2), ..., Saturday (7).
days_ahead is the number of days after today.
>>> get_weekday(3, 1)
4
>>> get_weekday(6, 1)
7
>>> get_weekday(7, 1)
1
>>> get_weekday(1, 0)
1
>>> get_weekday(4, 7)
4
>>> get_weekday(7, 72)
2
"""
return current_weekday + days_ahead % 7
def days_difference(day1, day2):
""" (int, int) -> int
Return the number of days between day1 and day2, which are both
in the range 1-365 (thus indicating the day of the year).
>>> days_difference(200, 224)
24
>>> days_difference(50, 50)
0
>>> days_difference(100, 99)
-1
"""
return day2 - day1
def get_birthday_weekday(current_weekday, current_day, birthday_day):
""" (int, int, int) -> int
Return the day of the week it will be on birthday_day, given that
the day of the week is current_weekday and the day of the year is
current_day.
|
current_weekday is the current day of the week and is in the range 1-7,
indicating whether today is Sunday (1), Monday (2), ..., Saturday (7).
current_day and birthday_day are both in the range 1-365.
>>> get_birthday_weekday(5, 3, 4)
6
>>> get_birthday_weekday(5, 3, 116)
6
>>> get_birthday_weekday(6, 116, 3)
5
"""
days_diff = days_difference(current_day, birthday_day)
return g
|
et_weekday(current_weekday, days_diff)
|
kingsdigitallab/kdl-django
|
kdl/settings/liv.py
|
Python
|
mit
| 1,127 | 0 |
from .base import * # noqa
INTERNAL_IPS = INTERNAL_IPS + ('', )
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'app_kdl_liv',
'USER': 'app_kdl',
'PASSWORD': '',
'HOST': ''
},
}
# -----------------------------------------------------------------------------
# GLOBALS FOR JS
# -----------------------------------------------------------------------------
# Google Analytics ID
GA_ID = 'UA-67707155-1'
# --------------------------------------------------
|
---------------------------
# Django Extensions
# http://django-extensions.readthedocs.org/en/latest/
# -----------------------------------------------------------------------------
try:
import django_extensions # noqa
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
pa
|
ss
# -----------------------------------------------------------------------------
# Local settings
# -----------------------------------------------------------------------------
try:
from .local import * # noqa
except ImportError:
pass
|
XeryusTC/projman
|
projects/urls.py
|
Python
|
mit
| 1,477 | 0 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from projects import views
urlpatterns = [
url(r'^$', views.MainPageView.as_view(), name='main'),
# Inlist
url(r'^inlist/$', views.InlistView.as_view(), name='inlist'),
url(r'^inlist/(?P<pk>[0-9]+)/delete/$', views.InlistItemDelete.as_view(),
name='delete_inlist'),
url(r'^inlist/(?P<pk>[0-9]+)/convert/action/$',
views.InlistItemToActionView.as_view(), name='convert_inlist_action'),
url(r'^inlist/(?P<inlistitem>[0-9]+)/convert/project/$',
views.CreateProjectView.as_view(), name='convert_inlist_project'),
# Actions
url(r'^actions/(?P<pk>[0-9]+)/delete/$',
views.ActionlistItemDelete.as_view(), name='delete_actionlist'),
url(r'^actions/(?P<pk>[0-9]+)/complete/$',
views.ActionCompleteView.as_view(), name='complete_action'),
|
url(r'^actions/(?P<pk>[0-9]+)/edit/$', views.EditActionView.as_view(),
name='edit_action'),
# Projects
url(r'^project/(?P<pk>[0-9]+)/$', views.Projec
|
tView.as_view(),
name='project'),
url(r'^project/create/$', views.CreateProjectView.as_view(),
name='create_project'),
url(r'project/(?P<pk>[0-9]+)/edit/$', views.EditProjectView.as_view(),
name='edit_project'),
url(r'project/(?P<pk>[0-9]+)/delete/$', views.DeleteProjectView.as_view(),
name='delete'),
url(r'sort/actions/$', views.ActionlistSortView.as_view(),
name='sort_actions'),
]
|
QJonny/spin_emulator
|
pydevin/devinManager.py
|
Python
|
lgpl-2.1
| 2,573 | 0.026817 |
from pydevin import *
import math
# ball parameters definitions
BALL_POS_Y_MAX = 115
BALL_POS_Y_MIN = 5
BALL_POS_Y_CENTER = (BALL_POS_Y_MAX + BALL_POS_Y_MIN) / 2.0
BALL_POS_X_MAX = 125
BALL_POS_X_MIN = 20
BALL_POS_X_CENTER = (BALL_POS_X_MAX + BALL_POS_X_MIN) / 2.0
A_X = -1.0/(BALL_POS_X_MAX - BALL_POS_X_CENTER)
B_X = -(A_X)*BALL_POS_X_CENTER
A_Y = -1.0/(BALL_POS_Y_MIN - BALL_POS_Y_CENTER)
B_Y = -(A_Y)*BALL_POS_Y_CENTER
# ball tracking
x_buffer = [ 0 for i in range(16) ]
y_buffer = [ 0 for i in range(16) ]
total_sum_x = 0
total_sum_y = 0
curr_index = 0
# end of ball tracking
# ball parameters
x_pos = 0
y_pos = 0
f_x_pos = 0.0
f_y_pos = 0.0
# end of ball parameters
pos_computed = 0
# motor params
alpha_x = 1 # l=1500, h=2500
beta_x = 0
alpha_y = 1 # l=1500, h=2500
beta_y = 0
pdev = PyDevin()
pdev.init()
def norm(x, y):
return math.sqrt(x*x + y*y)
def r_range(v, l, h):
if(v < l):
return l
elif(v > h):
return h
return v
# ball tracking
# normalizes ball position
def normalize_ball_params():
global A_X, A_Y, B_X, B_Y, f_x_pos, f_y_pos
f_x_pos = r_range(A_X*x_pos + B_X, -1.0, 1.0)
f_y_pos = r_range(A_Y*y_pos + B_Y, -1.0, 1.0)
def compute_pos(x_cur, y_cur):
global pos_computed, x_pos, y_pos, curr_index, total_sum_x, total_sum_y, x_buffer, y_buffer
if(pos_computed == 0 or norm(x_pos - x_cur, y_pos - y_cur) < 100):
# this is a very efficient way to average
# over 16 position samples without any sum
# or division
total_sum_x = total_sum_x - x_buffer[curr_index] + x_cur
x_buffer[curr_index] = x_cur
total_sum_y = total_s
|
um_y - y_buffer[curr_index] + y_cur
y_buffer[curr_index] = y_cur
x_pos = total_sum
|
_x >> 4 # division by 16
y_pos = total_sum_y >> 4
normalize_ball_params()
if(pos_computed == 0 and curr_index == 15):
pos_computed = 1
curr_index = (curr_index + 1) % 16
def cameraEvent():
global pdev
key = pdev.get_camera()
# raw position extraction
y_cur = ((key & 0x7F))
x_cur = (((key >> 8) & 0x7F))
pol = (key >> 7) & 0x01
check = (key >> 15) & 0x01
if(pol == 1):
compute_pos(x_cur, y_cur)
normalize_ball_params()
def getCamera():
global f_x_pos, f_y_pos
return (f_x_pos, f_y_pos)
def setMotorRange(l, h):
global alpha_x, beta_x, alpha_y, beta_y
alpha_x = 1000.0 / (h - l)
beta_x = 1680.0 - 1000.0*(l/(h-l))
alpha_y = 1000.0 / (h - l)
beta_y = 1450.0 - 1000.0*(l/(h-l))
def sendCommand(x, y):
global alpha_x, beta_x, alpha_y, beta_y
f_x = alpha_x*x + beta_x
f_y = alpha_y*y + beta_y
pdev.send_motor(int(f_y) | (int(f_x) << 16 ))
|
houssine78/addons
|
mrp_bom_dismantling/models/res_config.py
|
Python
|
agpl-3.0
| 1,008 | 0 |
# -*- coding: utf-8 -*-
# © 2016 Cyril Gaudin (Camptocamp)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
|
from openerp import api, fields, models
class MrpConfigSettings(models.TransientModel):
""" Add settings for dismantling BOM.
"""
_inherit = 'mrp.config.settings'
dismantling_product_choice = fields.Selecti
|
on([
(0, "Main BOM product will be set randomly"),
(1, "User have to choose which component to set as main BOM product")
], "Dismantling BOM")
@api.multi
def get_default_dismantling_product_choice(self, fields):
product_choice = self.env["ir.config_parameter"].get_param(
'mrp.bom.dismantling.product_choice', default=0
)
return {'dismantling_product_choice': product_choice}
@api.multi
def set_dismantling_product_choice(self):
self.env["ir.config_parameter"].set_param(
'mrp.bom.dismantling.product_choice',
self.dismantling_product_choice
)
|
hosseinoliabak/learningpy
|
09_5_dictionary.py
|
Python
|
gpl-3.0
| 877 | 0.004561 |
'''
9.5 First, you have to resolve assignment9_3. This is slightly different.
This program records the domain name (instead of the address) where the message
was sent from instead of who the mail came from (i.e., the whole email address).
At the end of the program, prin
|
t out the contents of your dictionary.
Sample:
python assignment9_5_dictionary.py
{'media.berkeley.edu': 4, 'uct.ac.za': 6, 'umich.edu': 7,
'gmail.com': 1, 'caret.cam.ac.uk': 1, 'iupui.edu': 8}
'''
dDomain = dict()
try:
flHand = open("mbox-short.txt")
except:
print('There is no "mbox-short.txt" file in the same folder as this script.')
else:
for sLine in flHand:
if not
|
sLine.startswith('From '):
continue
lWords = sLine.split()
lEmailDomain = lWords[1].split('@')
dDomain[lEmailDomain[1]] = dDomain.get(lEmailDomain[1], 0) + 1
print (dDomain)
|
jonathanmorgan/django_reference_data
|
migrations/0005_auto__add_field_reference_domain_external_id__add_field_reference_doma.py
|
Python
|
gpl-3.0
| 5,852 | 0.00769 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Reference_Domain.external_id'
db.add_column(u'django_reference_data_reference_domain', 'external_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Reference_Domain.guid'
db.add_column(u'django_reference_data_reference_domain', 'guid',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
|
# Deleting field 'Reference_Domain.external_id'
db.delete_column(u'django_reference_data_reference_domain', 'external_id')
# Deleting field 'Reference_Domain.guid'
db.delete_column(u'django_reference_data_reference_domain',
|
'guid')
models = {
u'django_reference_data.postal_code': {
'Meta': {'object_name': 'Postal_Code'},
'admin_code1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_code2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_code3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_name1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_name2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_name3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'lat_long_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'django_reference_data.reference_domain': {
'Meta': {'object_name': 'Reference_Domain'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'domain_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'domain_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'domain_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_multimedia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_news': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'source_details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['django_reference_data']
|
SpheMakh/Stimela
|
stimela/utils/__init__.py
|
Python
|
gpl-2.0
| 11,642 | 0.002233 |
import os
import sys
import json
import yaml
import time
import tempfile
import inspect
import warnings
import re
import math
import codecs
class StimelaCabRuntimeError(RuntimeError):
pass
class StimelaProcessRuntimeError(RuntimeError):
pass
CPUS = 1
from .xrun_poll import xrun
def assign(key, value):
frame = inspect.currentframe().f_back
frame.f_globals[key] = value
def readJson(conf):
with open(conf, "r") as _std:
jdict = yaml.safe_load(_std)
return jdict
def writeJson(config, dictionary):
with codecs.open(config, 'w', 'utf8') as std:
std.write(json.dumps(dictionary, ensure_ascii=False))
def get_Dockerfile_base_image(image):
if os.path.isfile(image):
dockerfile = image
else:
dockerfile = "{:s}/Dockerfile".format(image)
with open(dockerfile, "r") as std:
_from = ""
for line in std.readlines():
if line.startswith("FROM"):
_from = line
return _from
def change_Dockerfile_base_image(path, _from, label, destdir="."):
if os.path.isfile(path):
dockerfile = path
dirname = os.path.dirname(path)
else:
dockerfile = "{:s}/Dockerfile".format(path)
dirname = path
with open(dockerfile, "r") as std:
lines = std.readlines()
for line in lines:
if line.startswith("FROM"):
lines.remove(line)
temp_dir = tempfile.mkdtemp(
prefix="tmp-stimela-{:s}-".format(label), dir=destdir)
xrun(
"cp", ["-r", "{:s}/Dockerfile {:s}/src".format(dirname, dirname), temp_dir])
dockerfile = "{:s}/Dockerfile".format(temp_dir)
with open(dockerfile, "w") as std:
std.write("{:s}\n".format(_from))
for line in lines:
std.write(line)
return temp_dir, dockerfile
def get_base_images(logfile, index=1):
with opEn(logfile, "r") as std:
string = std.read()
separator = "[================================DONE==========================]"
log = string.split(separator)[index-1]
images = []
for line in log.split("\n"):
if line.find("<=BASE_IMAGE=>") > 0:
tmp = line.split("<=BASE_IMAGE=>")[-1]
image, base = tmp.split("=")
images.append((image.strip(), base))
return images
def icasa(taskname, mult=None, clearstart=False, loadthese=[], **kw0):
"""
runs a CASA task given a list of options.
A given task can be run multiple times with a different options,
in this case the options must be parsed as a list/tuple of dictionaries via mult, e.g
icasa('exportfits',mult=[{'imagename':'img1.image','fitsimage':'image1.fits},{'imagename':'img2.image','fitsimage':'image2.fits}]).
Options you want be common between the multiple commands should be specified as key word args.
"""
# create temp directory from which to run casapy
td = tempfile.mkdtemp(dir='.')
# we want get back to the working directory once casapy is launched
cdir = os.path.realpath('.')
# load modules in loadthese
_load = ""
if "os" not in loadthese or "import os" not in loadthese:
loadthese.append("os")
if loadthese:
exclude = filter(lambda line: line.startswith("import")
or line.startswith("from"), loadthese)
for line in loadthese:
if line not in exclude:
line = "import %s" % line
_load += "%s\n" % line
if mult:
if isinstance(mult, (tuple, list)):
for opts in mult:
opts.update(kw0)
else:
mult.upadte(kw0)
mult = [mult]
else:
mult = [kw0
|
]
run_cmd = """ """
for kw in mult:
task_cmds = []
for key, val in kw.items():
if isinstance(val, (str, unicode)):
val = '"%s"' % val
task_cmds .append('%s=%s' % (key, val))
task_cmds = ", ".join(task_cmds)
run_cmd += """
%s
os.chdir('%s')
%s
%s(%s)
""" % (_load, cdir, "clearstart()" if clearstart else "", taskname, task_c
|
mds)
tf = tempfile.NamedTemporaryFile(suffix='.py')
tf.write(run_cmd)
tf.flush()
t0 = time.time()
# all logging information will be in the pyxis log files
print("Running {}".format(run_cmd))
xrun("cd", [td, "&& casa --nologger --log2term --nologfile -c", tf.name])
# log taskname.last
task_last = '%s.last' % taskname
if os.path.exists(task_last):
with opEn(task_last, 'r') as last:
print('%s.last is: \n %s' % (taskname, last.read()))
# remove temp directory. This also gets rid of the casa log files; so long suckers!
xrun("rm", ["-fr ", td, task_last])
tf.close()
def stack_fits(fitslist, outname, axis=0, ctype=None, keep_old=False, fits=False):
""" Stack a list of fits files along a given axiis.
fitslist: list of fits file to combine
outname: output file name
axis: axis along which to combine the files
fits: If True will axis FITS ordering axes
ctype: Axis label in the fits header (if given, axis will be ignored)
keep_old: Keep component files after combining?
"""
import numpy
try:
import pyfits
except ImportError:
warnings.warn(
"Could not find pyfits on this system. FITS files will not be stacked")
sys.exit(0)
hdu = pyfits.open(fitslist[0])[0]
hdr = hdu.header
naxis = hdr['NAXIS']
# find axis via CTYPE key
if ctype is not None:
for i in range(1, naxis+1):
if hdr['CTYPE%d' % i].lower().startswith(ctype.lower()):
axis = naxis - i # fits to numpy convention
elif fits:
axis = naxis - axis
fits_ind = abs(axis-naxis)
crval = hdr['CRVAL%d' % fits_ind]
imslice = [slice(None)]*naxis
_sorted = sorted([pyfits.open(fits) for fits in fitslist],
key=lambda a: a[0].header['CRVAL%d' % (naxis-axis)])
# define structure of new FITS file
nn = [hd[0].header['NAXIS%d' % (naxis-axis)] for hd in _sorted]
shape = list(hdu.data.shape)
shape[axis] = sum(nn)
data = numpy.zeros(shape, dtype=float)
for i, hdu0 in enumerate(_sorted):
h = hdu0[0].header
d = hdu0[0].data
imslice[axis] = range(sum(nn[:i]), sum(nn[:i+1]))
data[imslice] = d
if crval > h['CRVAL%d' % fits_ind]:
crval = h['CRVAL%d' % fits_ind]
# update header
hdr['CRVAL%d' % fits_ind] = crval
hdr['CRPIX%d' % fits_ind] = 1
pyfits.writeto(outname, data, hdr, clobber=True)
print("Successfully stacked images. Output image is %s" % outname)
# remove old files
if not keep_old:
for fits in fitslist:
os.system('rm -f %s' % fits)
def substitute_globals(string, globs=None):
sub = set(re.findall('\{(.*?)\}', string))
globs = globs or inspect.currentframe().f_back.f_globals
if sub:
for item in map(str, sub):
string = string.replace("${%s}" % item, globs[item])
return string
else:
return False
def get_imslice(ndim):
imslice = []
for i in xrange(ndim):
if i < ndim-2:
imslice.append(0)
else:
imslice.append(slice(None))
return imslice
def addcol(msname, colname=None, shape=None,
data_desc_type='array', valuetype=None, init_with=0, **kw):
""" add column to MS
msanme : MS to add colmn to
colname : column name
shape : shape
valuetype : data type
data_desc_type : 'scalar' for scalar elements and array for 'array' elements
init_with : value to initialise the column with
"""
import numpy
import pyrap.tables
tab = pyrap.tables.table(msname, readonly=False)
try:
tab.getcol(colname)
print('Column already exists')
except RuntimeError:
print('Attempting to add %s column to %s' % (colname, msname))
from pyrap.tables import maketabdesc
valuetype = valuetype or 'complex'
if shape is None:
dshape = list(tab.getcol('DA
|
eawerbaneth/Scoreboard
|
achievs/admin.py
|
Python
|
bsd-3-clause
| 1,116 | 0.031362 |
from django.contrib import admin
from achievs.models import Achievement
# from achievs.models import Gold
# from achievs.models import Silver
# from achievs.models import Bronze
# from achievs.models import Platinum
from achievs.models import Level
# class PlatinumInline(admin.StackedInline):
# model=Platinum
# class GoldInline(admin.StackedInline):
# model=Gold
# class SilverInline(admin.StackedInline):
# model=Silver
# class BronzeInline(admin.StackedInline):
# model=Bronze
class LevelInline(admin.StackedInline):
model=Level
class AchievementAdmin(
|
admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name']}),
('Date information', {'fields': ['pub_date']}),
]
#inlines=[GoldInline, SilverInline, BronzeInline, PlatinumInline]
inlines=[LevelInline]
list_display = ('name', 'pub_date')
list_filter=['pub_date']
search_fiel
|
ds=['name']
date_hierarchy='pub_date'
# admin.site.register(Gold)
# admin.site.register(Silver)
# admin.site.register(Bronze)
# admin.site.register(Platinum)
admin.site.register(Level)
admin.site.register(Achievement, AchievementAdmin)
|
Integral-Technology-Solutions/ConfigNOW
|
wlst/persist.py
|
Python
|
mit
| 8,036 | 0.015928 |
# ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
##
## persist.py
##
## This script contains functions that manipulate persistent stores.
#=======================================================================================
# Global variables
#=======================================================================================
persistModule = '1.2.0'
log.debug('Loading module [persist.py] version [' + persistModule + ']')
#=======================================================================================
# Configure filestores
#=======================================================================================
def createFileStores(resourcesProperties, domainProperties):
fileStores=resourcesProperties.getProperty('persistent.filestores')
if fileStores is None or len(fileStores)==0:
log.info('Persistent Store is not specified, skipping.')
else:
fileStoreList=fileStores.split(',')
for fileStore in fileStoreList:
__createFileStore(fileStore, resourcesProperties, domainProperties)
#=======================================================================================
# Configure filestore
#=======================================================================================
def __createFileStore(fileStore, resourcesProperties, domainProperties):
fileStoreName=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Name')
fileStoreLocation=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Location')
tmpTarget=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Target')
migratable=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Migratable')
replaceFlag=resourcesProperties.getProperty('persistent.filestore.' + str(fileStore) + '.Replace')
if replaceFlag is None:
replaceFlag = 'false'
targetServerName = None
try:
fileStore = None
fileStoreExist = 0
try:
cd('/')
fileStore = lookup(fileStoreName, 'FileStore')
except Exception, error:
log.info('Unable to find filestore [' + str(fileStoreName) + '], trying to create new one.')
if fileStore is None:
cd('/')
fileStore = create(fileStoreName, 'FileStore')
if tmpTarget is None or len(tmpTarget)==0:
targetServerName=domainProperties.getProperty('wls.admin.name')
targetServer = lookup(targetServerName, 'Server')
else:
targetServerName=domainProperties.getProperty('wls.server.' + str(tmpTarget) + '.name')
if migratable.upper()=='TRUE':
targetServerName = targetServerName + ' (migratable)'
targetServer = lookup(targetServerName, 'MigratableTarget')
else:
targetServer = lookup(targetServerName, 'Server')
try:
fileStore.addTarget(targetServer)
except Exception, error:
cancelEdit('y')
raise ScriptError, 'Unable to add filestore [' + str(fileStoreName) + '] to target server [' + str(targetServerName) + '] : ' + str(error)
else:
if not migratable is None and migratable.upper()=='TRUE' and isUpdateToPreviouslyCreatedDomain().upper()=='TRUE':
targetsArray = fileStore.getTargets()
for i in range(0, len(targetsArray)):
targetName = targetsArray[i].getName()
# If current target is not migratable
if targetName.find("(migratable)") < 0:
newTargetName = targetName + ' (migratable)'
targetServer = lookup(newTargetName, 'MigratableTarget')
jmsServersArray = cmo.getJMSServers()
for j in range(0, len(jmsServersArray)):
currentJMSServer = jmsServersArray[j]
currentPersistentStore = currentJMSServer.getPersistentStore()
if not currentPersistentStore is None and currentPersistentStore.getName()==fileStore.getName():
log.info('Upgrading target [' + targetName + '] in JMS Server [' + currentJMSServer.getName() + '] to migratable')
currentJMSServer.setTargets(jarray.array([targetServer], weblogic.management.configuration.MigratableTargetMBean))
log.info('Upgrading target [' + targetName + '] to migratable for persistent store [' + str(fileStore.getName()) + ']')
fileStore.setTargets(jarray.array([targetServer], weblogic.management.configuration.MigratableTargetMBean))
safAgents = cmo.getSAFAgents()
for k in range(0, len(safAgents)):
safAgent = safAgents[k]
safAgentTargets = safAgent.getTargets()
newSafAgentsArray = zeros(len(safAgentTargets), weblogic.management.configuration.MigratableTargetMBean)
for l in range(0, len(safAgentTargets)):
safAgentTarget = safAgentTargets[l]
safAgentTargetName = safAgentTarget.getName()
# If current target is not migratable
if safAgentTargetName.find("(migratable)") < 0:
newSafAgentTargetName = safAgentTargetName + ' (migratable)'
newSafAgentTarget = lookup(newSafAgentTargetName, 'MigratableTarget')
log.info('Setting migratable target [' + newSafAgentTarget.getName() + '] for SAF Agent [' + safAgent.getName() + '].')
newSafAgentsArray[l] = newSafAgentTarget
else:
log.info('Setting migratable target [' + safAgentTarget.getName() + '] for SAF Agent [' + safAgent.getName() + '].')
newSafAgentsArray[l] = safAgentTarget
log.info('Updating migratable targets for SAF Agent [' + safAgent.getName() + '].')
safAgent.setTargets(newSafAgentsArray)
fileStoreExist = 1
log.info('FileStore [' + str(fileStoreName) + '] already exists, checking REPLACE flag.')
if not fileStoreExist or isReplaceRequired(domainProperties.getProperty('REPLACE')) or replaceFlag.upper()=='TRUE':
if fileStoreExist and isReplaceRequired(domainProperties.getProperty('REPLACE')):
log.info('REPLACE flag is specified, start replacing FileStore [' + str(fileStoreName) + '] properties.')
file = File(fileStoreLocation)
if not file.exists():
|
if file.mkdirs():
log.info('File store directory [' + str(fileStoreLocation) + '] has been created successfully.')
|
fileStore.setDirectory(fileStoreLocation)
except Exception, error:
cancelEdit('y')
raise ScriptError, 'Unable to create filestore [' + str(fileStoreName) + '] for target server [' + str(targetServerName) + '] : ' + str(error)
|
kikocorreoso/brython
|
www/speed/benchmarks/add_dict.py
|
Python
|
bsd-3-clause
| 124 | 0 |
d = {}
for i in range(100000):
|
d[i] = i
JS_CODE = '''
var d = {};
for (var i = 0; i < 100000; i++) {
d[i] = i
|
;
}
'''
|
linkingcharities/linkingcharities
|
Linking_Charities/payment/apps.py
|
Python
|
mit
| 128 | 0 |
from __future__ import unicode_literals
from django.apps import AppConfig
class PaypalConfig(App
|
Config):
name = '
|
paypal'
|
CoolProp/CoolProp
|
Web/scripts/logo_2013.py
|
Python
|
mit
| 1,263 | 0.005542 |
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import CoolProp
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(2, 2))
ax = fig.add_subplot(111, projection='3d')
NT = 1000
NR = 1000
rho, t = np.logspace(np.log10(2e-3), np.log10(1100), NR), np.linspace(275.15, 700, NT)
RHO, T = np.meshgrid(rho, t)
P = CoolProp.CoolProp.PropsSI('P', 'D', RHO.reshape((NR * NT, 1)), 'T', T.reshape((NR * NT, 1)), 'REFPROP-Water').reshape(NT, NR)
Tsat = np.linspace(273.17, 647.0, 100)
psat = CoolProp.CoolProp.PropsSI('P', 'Q', 0, 'T', Tsat, 'Water')
rhoL = CoolProp.CoolProp.PropsSI('D', 'Q', 0, 'T', Tsat, 'Water')
rhoV = CoolProp.CoolProp.PropsSI('D', 'Q', 1, 'T', Tsat, 'Water')
ax.plot_surface(np.log(RHO), T,
|
np.log(P), cmap=cm.jet, edgecolor='none')
ax.plot(np.log(rhoL), Tsat, np.log(psat), color='k', lw=2)
ax.plot(np.log(rhoV), Tsat, np.log(psat), color='k', lw=2)
ax.text(0.3, 800, 22, "CoolProp", size=12)
ax.set_frame_on(False)
ax.set_axis_off()
ax.view_init(22, -13
|
6)
ax.set_xlabel(r'$\ln\rho$ ')
ax.set_ylabel('$T$')
ax.set_zlabel('$p$')
plt.tight_layout()
plt.savefig('_static/PVTCP.png', transparent=True)
plt.savefig('_static/PVTCP.pdf', transparent=True)
plt.close()
|
thedrow/cython
|
Cython/Build/Cythonize.py
|
Python
|
apache-2.0
| 6,882 | 0.001453 |
#!/usr/bin/env python
from __future__ import absolute_import
import os
import shutil
import tempfile
from distutils.core import setup
from .Dependencies import cythonize, extended_iglob
from ..Utils import is_package_dir
from ..Compiler import Options
try:
import multiprocessing
parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
except ImportError:
multiprocessing = None
parallel_compiles = 0
class _FakePool(object):
def map_async(self, func, args):
from itertools import imap
for _ in imap(func, args):
pass
def close(self): pass
def terminate(self): pass
def join(self): pass
def parse_directives(option, name, value, parser):
dest = option.dest
old_directives = dict(getattr(parser.values, dest,
Options.directive_defaults))
directives = Options.parse_directive_list(
value, relaxed_bool=True, current_settings=old_directives)
setattr(parser.values, dest, directives)
def parse_options(option, name, value, parser):
dest = option.dest
options = dict(getattr(parser.values, dest, {}))
for opt in value.split(','):
if '=' in opt:
n, v = opt.split('=', 1)
v = v.lower() not in ('false', 'f', '0', 'no')
else:
n, v = opt, True
options[n] = v
setattr(parser.values, dest, options)
def find_package_base(path):
base_dir, package_path = os.path.split(path)
while os.path.isfile(os.path.join(base_dir, '__init__.py')):
base_dir, parent = os.path.split(base_dir)
package_path = '%s/%s' % (parent, package_path)
return base_dir, package_path
def cython_compile(path_pattern, options):
pool = None
paths = map(os.path.abspath, extended_iglob(path_pattern))
try:
for path in paths:
if options.build_inplace:
base_dir = path
while not os.path.isdir(base_dir) or is_package_dir(base_dir):
base_dir = os.path.dirname(base_dir)
else:
base_dir = None
if os.path.isdir(path):
# recursively compiling a package
paths = [os.path.join(path, '**', '*.%s' % ext)
for ext in ('py', 'pyx')]
else:
# assume it's a file(-like thing)
paths = [path]
ext_modules = cythonize(
paths,
nthreads=options.parallel,
exclude_failures=options.keep_going,
exclude=options.excludes,
compiler_directives=options.directives,
force=options.force,
quiet=options.quiet,
**options.options)
if ext_modules and options.build:
if len(ext_modules) > 1 and options.parallel > 1:
if pool is None:
try:
pool = multiprocessing.Pool(options.parallel)
except OSError:
pool = _FakePool()
pool.map_async(run_distutils, [
(base_dir, [ext]) for ext in ext_modules])
else:
run_distutils((base_dir, ext_modules))
except:
if pool is not None:
pool.terminate()
raise
else:
if pool is not None:
pool.close()
pool.join()
def run_distutils(args):
base_dir, ext_modules = args
script_args = ['build_ext', '-i']
cwd = os.getcwd()
temp_dir = None
try:
if base_dir:
os.chdir(base_dir)
temp_dir = tempfile.mkdtemp(dir=base_dir)
script_args.extend(['--build-temp', temp_dir])
setup(
script_name='setup.py',
script_args=script_args,
ext_modules=ext_modules,
)
finally:
if base_dir:
os.chdir(cwd)
if temp_dir and os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
def parse_args(args):
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] [sources and packages]+')
parser.add_option('-X', '--directive', metavar='NAME=VALUE,...', dest='directives',
type=str, action='callback', callback=parse_directives, default={},
help='set a compiler directive')
parser.add_option('-s', '--option', metavar='NAME=VALUE', dest='options',
type=str, action='callback', callback=parse_options, default={},
help='set a cythonize option')
parser.add_option('-3', dest='python3_mode', action='store_true',
help='use Python 3 syntax mode by default')
parser.add_option('-x', '--exclude', metavar='PATTERN', dest='excludes',
action='append', default=[],
help='exclude certain file patterns from the compilation')
parser.add_option('-b', '--build', dest='build', action='store_true',
help='build extension modules using distutils')
parser.add_option('-i', '--inplace', dest='build_inplace', action='store_true',
help='build extension modules in place using distutils (implies -b)')
parser.add_option('-j', '--parallel', dest='parallel', metavar='N',
type=int, default=parallel_compiles,
help=('run builds in N parallel jobs (default: %d)' %
parallel_compiles or 1))
parser.add_option('-f', '--force', dest='force', action='store_true',
help='force recompilation')
|
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
help
|
='be less verbose during compilation')
parser.add_option('--lenient', dest='lenient', action='store_true',
help='increase Python compatibility by ignoring some compile time errors')
parser.add_option('-k', '--keep-going', dest='keep_going', action='store_true',
help='compile as much as possible, ignore compilation failures')
options, args = parser.parse_args(args)
if not args:
parser.error("no source files provided")
if options.build_inplace:
options.build = True
if multiprocessing is None:
options.parallel = 0
if options.python3_mode:
options.options['language_level'] = 3
return options, args
def main(args=None):
options, paths = parse_args(args)
if options.lenient:
# increase Python compatibility by ignoring compile time errors
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
for path in paths:
cython_compile(path, options)
if __name__ == '__main__':
main()
|
JonasWallin/BayesFlow
|
examples/flowcymetry_normalMixture.py
|
Python
|
gpl-2.0
| 5,242 | 0.014117 |
# -*- coding: utf-8 -*-
"""
Running the Gibbs sampler on flowcymetry data
http://www.physics.orst.edu/~rubin/nacphy/lapack/linear.html
matlab time: Elapsed time is 1.563538 seconds.
improve sample_mu:
python: 0.544 0.005 0.664
cython_admi: 0.469 0.005 0.493
moved_index_in_cython: 0.148 0.002 0.217 (most time is highmem)
changed_index 0.136 0.001 0.174
removed_higmem: 0.048 0.000 0.048
improve sample_sigma:
python: 0.544 0.005 0.664
cython_admi: 0.313 0.003 0.364
moved_index_in_cython: 0.145 0.001 0.199
changed_index : 0.074 0.000 0.081 (used BLAS matrix calc)
changed to syrk : 0.060 0.000 0.067
to profile use:
%prun main(K=5):
ncalls tottime percall cumtime percall filename:lineno(function)
500 0.358 0.001 0.358 0.001 rng_cython.pyx:262(calc_lik)
100 0.297 0.003 0.297 0.003 rng_cython.pyx:291(calc_exp_normalize)
500 0.159 0.000 0.167 0.000 rng_cython.pyx:129(sample_mix_sigma_zero_mean)
100 0.145 0.001 0.199 0.002 GMM.py:40(sample_mu)
1 0.099 0.099 0.218 0.218 npyio.py:628(loadtxt)
500 0.053 0.000 0.053 0.000 rng_cython.pyx:169(sample_mu)
100 0.052 0.001 0.052 0.001 rng_cython.pyx:238(draw_x)
100 0.045 0.000 0.700 0.007 GMM.py:90(compute_ProbX)
59998/29999 0.037 0.000 0.040 0.000 npyio.py:772(pack_items)
30000 0.026 0.000 0.048 0.000 npyio.py:788(split_line)
507 0.018 0.000 0.018 0.000 {method 'reduce' of 'numpy.ufunc' objects}
60000 0.017 0.000 0.017 0.000 {method 'split' of 'str' objects}
100 0.015 0.000 0.034 0.000 GMM.py:208(sample_p)
12 0.014 0.001 0.014 0.001 {numpy.core.multiarray.array}
29999 0.012 0.000 0.012 0.000 {zip}
%prun main_python(K=5)
ncalls tottime percall cumtime percall filename:lineno(function)
10707 0.584 0.000 0.584 0.000 {method 'reduce' of 'numpy.ufunc' objects}
100 0.574 0.006 2.195 0.022 GMM.py:149(sample_x)
100 0.544 0.005 0.664 0.007 GMM.py:176(sample_mu)
100 0.499 0.005 1.295 0.013 GMM.py:219(compute_ProbX)
100 0.334 0.003 0.549 0.005 GMM.py:189(sample_sigma)
3501 0.310 0.000 0.310 0.000 {numpy.core._dotblas.dot}
16112 0.252 0.000 0.252 0.000
|
{numpy.core.multiarray.array}
1 0.101 0.101 0.223 0.223 npyio.py:628(loadtxt)
100 0.048 0.000 0.048 0.000 {method 'cumsum' o
|
f 'numpy.ndarray' objects}
59998/29999 0.038 0.000 0.041 0.000 npyio.py:772(pack_items)
Created on Fri Jun 20 16:52:31 2014
@author: jonaswallin
"""
from __future__ import division
import numpy as np
from BayesFlow import mixture
import BayesFlow.PurePython.GMM as GMM
from matplotlib import pyplot as plt
import numpy.random as npr
import time
K = 5
def main(K= 5):
sim = 100
data = np.ascontiguousarray(np.loadtxt('../data/flowcym.dat',skiprows=1,usecols=(1,2,3,4,5,6)))
mix = mixture(data,K,high_memory=True)
t0 = time.time()
for i in range(sim): # @UnusedVariable
mix.sample()
t1 = time.time()
print("mixture took %.4f sec"%(t1-t0))
def main_python(K = 5):
sim = 100
data = np.ascontiguousarray(np.loadtxt('../data/flowcym.dat',skiprows=1,usecols=(1,2,3,4,5,6)))
mix = GMM.mixture(data,K)
t0 = time.time()
for i in range(sim): # @UnusedVariable
mix.sample()
t1 = time.time()
print("mixture took %.4f sec"%(t1-t0))
if __name__ == '__main__':
sim = 10
data = np.ascontiguousarray(np.loadtxt('../data/flowcym.dat',skiprows=1,usecols=(1,2,3,4,5,6)))
mix = mixture(data, K)
mus = np.zeros((sim,2*data.shape[1]))
t0 = time.time()
for i in range(sim):
mix.sample()
mus[i,:data.shape[1]] = mix.mu[0]
mus[i,data.shape[1]:] = mix.mu[1]
t1 = time.time()
if 1:
for k in range(mix.K):
plt.plot(mix.data[mix.x==k,0],mix.data[mix.x==k,1],'o')
plt.figure()
for k in range(mix.K):
plt.plot(mus[:,(2*k):(2*(k+1))])
plt.show()
print("mixture took %.4f sec"%(t1-t0))
mix2 = GMM.mixture(data,K)
mus = np.zeros((sim,4))
t0 = time.time()
for i in range(sim):
mix2.sample()
t1 = time.time()
print("Python mixture took %.4f sec"%(t1-t0))
if 0:
import pstats, cProfile
import pyximport
pyximport.install()
import bayesianmixture.distributions.rng_cython as rng_cython
#cProfile.runctx("rng_cython.sample_mu_rep(np.sum(mix.data[mix.x == 0 ,:],1),mix.sigma[0],mix.prior[0]['mu']['theta'].reshape(mix.d),mix.prior[0]['mu']['sigma'],npr.rand(mix.d),10000)", globals(), locals(), "Profile.prof")
cProfile.runctx("for k in range(100): mix.sample_mu()", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
fossevents/fossevents.in
|
fossevents/users/migrations/0005_auto_20170212_1138.py
|
Python
|
mit
| 504 | 0 |
# -*- coding: utf-8 -*-
# Generated by
|
Django 1.9.4 on 2017-02-12 06:08
from __future__ import unicode_literals
from django.db import migrations
import fossevents.users.models
class Migration(migrations.Migration):
de
|
pendencies = [
('users', '0004_user_is_moderator'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', fossevents.users.models.CustomUserManager()),
],
),
]
|
rileymjohnson/fbla
|
app/main.py
|
Python
|
mit
| 3,324 | 0.008724 |
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask imp
|
ort Flask, render_template, Markup
from . import public, admin
|
from .extensions import *
from .config import Config
#extensions
def getPackage(num):
packages = {
"0": "No Package",
"1": "Basic Package",
"2": "Deluxe Package",
"3": "Ultimate Blast Package",
"4": "Party Package",
"5": "Holiday Package",
"6": "Behind the Scenes Package"
}
return packages[str(num)]
def formatHours(hour):
if hour <= 12:
return str(hour) + " A.M."
else:
return str(hour - 12) + " P.M."
_js_escapes = {
'\\': '\\u005C',
'\'': '\\u0027',
'"': '\\u0022',
'>': '\\u003E',
'<': '\\u003C',
'&': '\\u0026',
'=': '\\u003D',
'-': '\\u002D',
';': '\\u003B',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update(('%c' % z, '\\u%04X' % z) for z in xrange(32))
def jinja2_escapejs_filter(value):
retval = []
for letter in value:
if _js_escapes.has_key(letter):
retval.append(_js_escapes[letter])
else:
retval.append(letter)
return Markup("".join(retval))
#creates and returna a flask app instance
def create_app(config_object=Config):
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_jinja_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
setup_logging(app)
return app
#set up gunicorn logging in production
def setup_logging(app):
if not app.debug:
# In production mode, add log handler to sys.stderr.
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
return None
#register flask extensions
def register_extensions(app):
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
#register blue prints to the app
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
#add jinja extensions
def register_jinja_extensions(app):
def get_year(*args): #returns the current year
import datetime
now = datetime.datetime.now()
return now.year
app.jinja_env.filters['currentYear'] = get_year #creates a filter that returns the current year
app.jinja_env.filters['escapejs'] = jinja2_escapejs_filter
app.jinja_env.globals.update(formatHours=formatHours)
app.jinja_env.globals.update(getPackage=getPackage)
return None
#register error handlers
def register_errorhandlers(app):
def render_error(error):
error_code = getattr(error, 'code', 500)
print error_code
if error_code == 404:
return render_template("notfound.html", error=error_code), error_code
else:
return render_template("error.html", error=error_code), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
|
dstockwell/catapult
|
tracing/third_party/tvcm/tvcm/module_unittest.py
|
Python
|
bsd-3-clause
| 3,914 | 0.005876 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the module module, which contains Module and related classes."""
import os
import unittest
from tvcm import fake_fs
from tvcm import module
from tvcm import resource_loader
from tvcm import project as project_module
class ModuleIntegrationTests(unittest.TestCase):
def test_module(self):
fs = fake_fs.Fak
|
eFS()
fs.AddFile('/src/x.html', """
<!DOCTYPE html>
<link rel="import" href="/y.html"
|
>
<link rel="import" href="/z.html">
<script>
'use strict';
</script>
""")
fs.AddFile('/src/y.html', """
<!DOCTYPE html>
<link rel="import" href="/z.html">
""")
fs.AddFile('/src/z.html', """
<!DOCTYPE html>
""")
fs.AddFile('/src/tvcm.html', '<!DOCTYPE html>')
with fs:
project = project_module.Project([os.path.normpath('/src/')])
loader = resource_loader.ResourceLoader(project)
x_module = loader.LoadModule('x')
self.assertEquals([loader.loaded_modules['y'],
loader.loaded_modules['z']],
x_module.dependent_modules)
already_loaded_set = set()
load_sequence = []
x_module.ComputeLoadSequenceRecursive(load_sequence, already_loaded_set)
self.assertEquals([loader.loaded_modules['z'],
loader.loaded_modules['y'],
x_module],
load_sequence)
def testBasic(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/src/my_module.html', """
<!DOCTYPE html>
<link rel="import" href="/tvcm/foo.html">
});
""")
fs.AddFile('/x/tvcm/foo.html', """
<!DOCTYPE html>
});
""")
project = project_module.Project([os.path.normpath('/x')])
loader = resource_loader.ResourceLoader(project)
with fs:
my_module = loader.LoadModule(module_name='src.my_module')
dep_names = [x.name for x in my_module.dependent_modules]
self.assertEquals(['tvcm.foo'], dep_names)
def testDepsExceptionContext(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/src/my_module.html', """
<!DOCTYPE html>
<link rel="import" href="/tvcm/foo.html">
""")
fs.AddFile('/x/tvcm/foo.html', """
<!DOCTYPE html>
<link rel="import" href="missing.html">
""")
project = project_module.Project([os.path.normpath('/x')])
loader = resource_loader.ResourceLoader(project)
with fs:
exc = None
try:
loader.LoadModule(module_name='src.my_module')
assert False, 'Expected an exception'
except module.DepsException, e:
exc = e
self.assertEquals(
['src.my_module', 'tvcm.foo'],
exc.context)
def testGetAllDependentFilenamesRecursive(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/y/z/foo.html', """
<!DOCTYPE html>
<link rel="import" href="/z/foo2.html">
<link rel="stylesheet" href="/z/foo.css">
<script src="/bar.js"></script>
""")
fs.AddFile('/x/y/z/foo.css', """
.x .y {
background-image: url(foo.jpeg);
}
""")
fs.AddFile('/x/y/z/foo.jpeg', '')
fs.AddFile('/x/y/z/foo2.html', """
<!DOCTYPE html>
""")
fs.AddFile('/x/raw/bar.js', 'hello')
project = project_module.Project([
os.path.normpath('/x/y'), os.path.normpath('/x/raw/')])
loader = resource_loader.ResourceLoader(project)
with fs:
my_module = loader.LoadModule(module_name='z.foo')
self.assertEquals(1, len(my_module.dependent_raw_scripts))
dependent_filenames = my_module.GetAllDependentFilenamesRecursive()
self.assertEquals(
[
os.path.normpath('/x/y/z/foo.html'),
os.path.normpath('/x/raw/bar.js'),
os.path.normpath('/x/y/z/foo.css'),
os.path.normpath('/x/y/z/foo.jpeg'),
os.path.normpath('/x/y/z/foo2.html'),
],
dependent_filenames)
|
richard-willowit/odoo
|
addons/account/models/account_payment.py
|
Python
|
gpl-3.0
| 35,920 | 0.004928 |
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
MAP_INVOICE_TYPE_PARTNER_TYPE = {
'out_invoice': 'customer',
'out_refund': 'customer',
'in_invoice': 'supplier',
'in_refund': 'supplier',
}
# Since invoice amounts are unsigned, this is how we know if money comes in or goes out
MAP_INVOICE_TYPE_PAYMENT_SIGN = {
'out_invoice': 1,
'in_refund': -1,
'in_invoice': -1,
'out_refund': 1,
}
class account_payment_method(models.Model):
_name = "account.payment.method"
_description = "Payment Methods"
name = fields.Char(required=True, translate=True)
code = fields.Char(required=True) # For internal identification
payment_type = fields.Selection([('inbound', 'Inbound'), ('outbound', 'Outbound')], required=True)
class account_abstract_payment(models.AbstractModel):
_name = "account.abstract.payment"
_description = "Contains the logic shared between models which allows to register payments"
payment_type = fields.Selection([('outbound', 'Send Money'), ('inbound', 'Receive Money')], string='Payment Type', required=True)
payment_method_id = fields.Many2one('account.payment.method', string='Payment Method Type', required=True, oldname="payment_method",
help="Manual: Get paid by cash, check or any other method outside of Odoo.\n"\
"Electronic: Get paid automatically through a payment acquirer by requesting a transaction on a card saved by the customer when buying or subscribing online (payment token).\n"\
"Check: Pay bill by check and print it from Odoo.\n"\
"Batch Deposit: Encash several customer checks at once by generating a batch deposit to submit to your bank. When encoding the bank statement in Odoo, you are suggested to reconcile the transaction with the batch deposit.To enable batch deposit,module account_batch_deposit must be installed.\n"\
"SEPA Credit Transfer: Pay bill from a SEPA Credit Transfer file you submit to your bank. To enable sepa credit transfer, module account_sepa must be installed ")
payment_method_code = fields.Char(related='payment_method_id.code',
help="Technical field used to adapt the interface to the payment type selected.", readonly=True)
partner_type = fields.Selection([('customer', 'Customer'), ('supplier', 'Vendor')])
partner_id = fields.Many2one('res.partner', string='Partner')
amount = fields.Monetary(string='Payment Amount', required=True)
currency_id = fields.Many2one('res.currency', string='Currency', required=True, default=lambda self: self.env.user.company_id.currency_id)
payment_date = fields.Date(string='Payment Date', default=fields.Date.context_today, required=True, copy=False)
communication = fields.Char(string='Memo')
journal_id = fields.Many2one('account.journal', string='Payment Journal', required=True, domain=[('type', 'in', ('bank', 'cash'))])
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', readonly=True)
hide_payment_method = fields.Boolean(compute='_compute_hide_payment_method',
help="Technical field used to hide the payment method if the selected journal has only one available which is 'manual'")
@api.one
@api.constrains('amount')
def _check_amount(self):
if self.amount < 0:
raise ValidationError(_('The payment amount cannot be negative.'))
@api.multi
@api.depends('payment_type', 'journal_id')
def _compute_hide_payment_method(self):
for payment in self:
if not payment.journal_id:
payment.hide_payment_method = True
continue
journal_payment_methods = payment.payment_type == 'inbound'\
and payment.journal_id.inbound_payment_method_ids\
or payment.journal_id.outbound_payment_method_ids
payment.hide_payment_method = len(journal_payment_methods) == 1 and journal_payment_methods[0].code == 'manual'
@api.onchange('journal_id')
def _onchange_journal(self):
if self.journal_id:
self.currency_id = self.journal_id.currency_id or self.company_id.currency_id
# Set default payment method (we consider the first to be the default one)
payment_methods = self.payment_type == 'inbound' and self.journal_id.inbound_payment_method_ids or self.journal_id.outbound_payment_method_ids
self.payment_method_id = payment_methods and payment_methods[0] or False
# Set payment method domain (restrict to methods enabled for the journal and to selected payment type)
payment_type = self.payment_type in ('outbound', 'transfer') and 'outbound' or 'inbound'
return {'domain': {'payment_method_id': [('payment_type', '=', payment_type), ('id', 'in', payment_methods.ids)]}}
return {}
@api.model
def _compute_total_invoices_amount(self):
""" Compute the sum of the residual of invoices, expressed in the payment currency """
payment_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id or self.env.user.company_id.currency_id
total = 0
for inv in self.invoice_ids:
if inv.currency_id == payment_currency:
total += inv.residual_signed
else:
total += inv.company_currency_id.with_context(date=self.payment_date).compute(
inv.residual_company_signed, payment_currency)
return abs(total)
class account_register_payments(models.TransientModel):
_name = "account.register.payments"
_inherit = 'account.abstract.payment'
_description = "Register payments on multiple invoices"
invoice_ids = fields.Many2many('account.invoice', string='Invoices', copy=False)
multi = fields.Boolean(string='Multi', help='Technical field indicating if the user selected invoices from multiple partners or from different types.')
@api.onchange('payment_type')
def _onchange_payment_type(self):
if self.payment_type:
return {'domain': {'payment_method_id': [('payment_type', '=', self.payment_type)]}}
@api.model
def _compute_payment_amount(self, invoice_ids):
payment_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
total = 0
for inv in invoice_ids:
if inv.currency_id == payment_currency:
total += MAP_INVOICE_TYPE_PAYMENT_SIGN[inv.type] * inv.residual_company_signed
else:
amount_residual = inv.company_currency_id.with_context(date=self.payment_date).compute(
inv.residual_company_signed, payment_currency)
total += MAP_INVOICE_TYPE_PAYMENT_SIGN[inv.type] * amount_residual
|
return total
@api.model
def default_get(self, fields):
rec = super(account_register_payments, self).default_get(fields)
active_ids = self._context.get('active_ids')
# Check for selected invoices ids
if not active_ids:
raise UserError(_("Programmation error: wizard action executed without active_ids in context."))
invoices = self.env['account.invoice'].browse(active_ids)
# Check all invoices are op
|
en
if any(invoice.state != 'open' for invoice in invoices):
raise UserError(_("You can only register payments for open invoices"))
# Check all invoices have the same currency
if any(inv.currency_id != invoices[0].currency_id for inv in invoices):
raise UserError(_("In order to pay multiple invoices at once, they must use the same currency."))
# Look if we are mixin multiple commercial_partner or customer invoices with vendor bills
multi = any(inv.commercial_partner_id != invoices[0].commercial_partner_id
or MAP_INVOICE_TYPE_PARTNER_TYPE[inv.type] != MAP_INVOICE_TYPE_PARTNER_TYPE[invoices[0].type]
for inv in invoices)
total_amount = self._compute_payment_amount(invoices)
rec.update({
'amount':
|
pabulumm/neighbors
|
lib/python3.4/site-packages/django_extensions/db/fields/json.py
|
Python
|
bsd-3-clause
| 3,459 | 0.000289 |
""
|
"
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
from __future__ import absolute_import
from decimal import Decimal
import six
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
f
|
rom django.db import models
try:
# Django >= 1.7
import json
except ImportError:
# Django <= 1.6 backwards compatibility
from django.utils import simplejson as json
def dumps(value):
return DjangoJSONEncoder().encode(value)
def loads(txt):
value = json.loads(
txt,
parse_float=Decimal,
encoding=settings.DEFAULT_CHARSET
)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONUnicode(six.text_type):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONList(list):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
def __init__(self, *args, **kwargs):
default = kwargs.get('default', None)
if default is None:
kwargs['default'] = '{}'
elif isinstance(default, (list, dict)):
kwargs['default'] = dumps(default)
models.TextField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value is None or value == '':
return {}
elif isinstance(value, six.string_types):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
elif isinstance(res, six.string_types):
return JSONUnicode(res)
elif isinstance(res, list):
return JSONList(res)
return res
else:
return value
def get_db_prep_save(self, value, connection, **kwargs):
"""Convert our JSON object to a string before we save"""
if value is None and self.null:
return None
# default values come in as strings; only non-strings should be
# run through `dumps`
if not isinstance(value, six.string_types):
value = dumps(value)
return super(JSONField, self).get_db_prep_save(value, connection=connection, **kwargs)
def south_field_triple(self):
"""Returns a suitable description of this field for South."""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(JSONField, self).deconstruct()
if self.default == '{}':
del kwargs['default']
return name, path, args, kwargs
|
franckinux/django-openzoom
|
setup.py
|
Python
|
gpl-3.0
| 925 | 0.035676 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""setup
(C) Franck Barbenoire <fbarbenoire@yahoo.fr>
License : GPL v3"""
from distutils.core import setup
from setuptools import find_packages
setup(name = "django-openzoom",
version = "0.1.1",
description = "Django application for disp
|
laying very high resolution images",
author = "Franck Barbenoire",
author_email = "fbarbenoire@yahoo.fr",
url = "https://github.com/franckinux/django-openzoom",
packages = find_packages(),
include_package_data = True,
zip_
|
safe = False,
classifiers = ['Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
)
|
boyska/libreant
|
libreantdb/api.py
|
Python
|
agpl-3.0
| 7,780 | 0.002314 |
from __future__ import print_function
def validate_book(body):
'''
This does not only accept/refuse a book. It also returns an ENHANCED
version of body, with (mostly fts-related) additional fields.
This function is idempotent.
'''
if '_language' not in body:
raise ValueError('language needed')
if len(body['_language']) > 2:
raise ValueError('invalid language: %s' % body['_language'])
allfields = collectStrings(body)
body['_text_%s' % body['_language']] = ' '.join(allfields)
return body
def collectStrings(leftovers):
strings = []
if isinstance(leftovers, basestring):
return leftovers.split()
elif isinstance(leftovers, list):
for l in leftovers:
strings.extend(collectStrings(l))
return strings
elif isinstance(leftovers, dict):
for key, value in leftovers.items():
if not key.startswith('_'):
strings.extend(collec
|
tStrings(value))
return strings
else:
return strings
class DB(object):
'''
|
this class contains every query method and every operation on the index
'''
# Setup {{{2
def __init__(self, es, index_name):
self.es = es
self.index_name = index_name
# book_validator can adjust the book, and raise if it's not valid
self.book_validator = validate_book
def setup_db(self):
maps = {
'book': { # this need to be the document type!
# special elasticsearch field
# http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-timestamp-field.html
# initialized with element creation date, hidden by default in query result
"_timestamp" : { "enabled" : "true",
"store": "yes"},
"properties": {
"_text_en": {
"type": "string",
"analyzer": "english"
},
"_text_it": {
"type": "string",
"analyzer": "it_analyzer"
}
}
}
}
# Just like the default one
# http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html#italian-analyzer
# but the stemmer changed from light_italian to italian
settings = {"analysis": {
"filter": {
"italian_elision": {
"type": "elision",
"articles": [
"c", "l", "all", "dall", "dell",
"nell", "sull", "coll", "pell",
"gl", "agl", "dagl", "degl", "negl",
"sugl", "un", "m", "t", "s", "v", "d"
]
},
"italian_stop": {
"type": "stop", "stopwords": "_italian_"},
"italian_stemmer": {
"type": "stemmer", "language": "italian"}
},
"analyzer": {
"it_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"italian_elision",
"lowercase",
"italian_stop",
"italian_stemmer"
]
}
}
}}
if not self.es.indices.exists(self.index_name):
self.es.indices.create(index=self.index_name,
body={'settings': settings,
'mappings': maps})
# End setup }}
# Queries {{{2
def __len__(self):
stats = self.es.indices.stats()
return stats['indices'][self.index_name]['total']['docs']['count']
def _search(self, body, size=30):
return self.es.search(index=self.index_name, body=body, size=size)
def _get_search_field(self, field, value):
return {'query':
{'match': {field: value}}
}
def mlt(self, _id):
'''
High-level method to do "more like this".
Its exact implementation can vary.
'''
query = {'more_like_this': {
# FIXME: text_* does not seem to work, so we're relying on listing
# them manually
'fields': ['book._text_it', 'book._text_en'],
'ids': [_id],
'min_term_freq': 1,
'min_doc_freq': 1,
}}
return self._search(dict(query=query))
def get_all_books(self, size=30):
return self._search({}, size=size)
def get_last_inserted(self, size=30):
query = { "fields": [ "_timestamp", "_source"],
"query" : { "match_all" : {} },
"sort" : [ {"_timestamp": "desc"} ] }
return self._search(body=query, size=size)
def get_books_simplequery(self, query):
return self._search(self._get_search_field('_all', query))
def get_books_multilanguage(self, query):
return self._search({'query': {'multi_match':
{'query': query, 'fields': '_text_*'}
}})
def get_books_by_title(self, title):
return self._search(self._get_search_field('title', title))
def get_books_by_actor(self, authorname):
return self._search(self._get_search_field('actors', authorname))
def get_book_by_id(self, id):
return self.es.get(index=self.index_name, id=id)
def get_books_querystring(self, query):
q = {'query': query, 'fields': ['_text_*']}
return self._search({'query': dict(query_string=q)})
def user_search(self, query):
'''
This acts like a "wrapper" that always point to the recommended
function for user searching.
'''
return self.get_books_querystring(query)
def autocomplete(self, fieldname, start):
raise NotImplementedError()
# End queries }}}
# Operations {{{2
def add_book(self, **book):
'''
Call it like this:
db.add_book(doc_type='book',
body={'title': 'foobar', '_language': 'it'})
'''
if 'doc_type' not in book:
book['doc_type'] = 'book'
book['body'] = validate_book(book['body'])
return self.es.create(index=self.index_name, **book)
def update_book(self, id, doc_type='book', body={}):
'''
Update a book. The "body" is merged with the current one.
Yes, it is NOT overwritten.
'''
# note that we are NOT overwriting all the _source, just merging
doc = {'doc': body}
ret = self.es.update(index=self.index_name, id=id,
doc_type=doc_type, body=doc)
# text_* fields need to be "updated"; atomicity is provided by the
# idempotency of validate_book
book = self.get_book_by_id(ret['_id'])['_source']
book = validate_book(book)
ret = self.es.update(index=self.index_name, id=id,
doc_type=doc_type, body={'doc': book})
return ret
def increment_download_count(self, id, fileIndex, doc_type='book'):
'''
Increment the download counter of a specific file
'''
body = self.es.get(index=self.index_name, id=id, doc_type='book', _source_include='_files')['_source']
body['_files'][fileIndex]['download_count'] += 1
self.es.update(index=self.index_name, id=id,
doc_type=doc_type, body={"doc":body})
# End operations }}}
# vim: set fdm=marker fdl=1:
|
etsinko/oerplib
|
oerplib/rpc/xmlrpclib_custom.py
|
Python
|
lgpl-3.0
| 5,967 | 0.000168 |
# -*- coding: UTF-8 -*-
##############################################################################
#
# OERPLib
# Copyright (C) 2012-2013 Sébastien Alix.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xmlrpclib
import httplib
import socket
import sys
from urlparse import urlparse
# Defined later following the version of Python used
TimeoutTransport = None
TimeoutSafeTransport = None
class TimeoutServerProxy(xmlrpclib.ServerProxy):
"""xmlrpclib.ServerProxy overload to manage the timeout of the socket."""
def __init__(self, *args, **kwargs):
url = args[0]
https_ok = urlparse(url).scheme == 'https'
t = https_ok and TimeoutSafeTransport() or TimeoutTransport()
t.timeout = kwargs.get('timeout', 120)
if 'timeout' in kwargs:
del kwargs['timeout']
kwargs['transport'] = t
xmlrpclib.ServerProxy.__init__(self, *args, **kwargs)
if sys.version_info <= (2, 7):
# Python 2.5 and 2.6
# -- xmlrpclib.Transport with timeout support --
class TimeoutHTTPPy26(httplib.HTTP):
def __init__(self, host='', port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class TimeoutTransportPy26(xmlrpclib.Transport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
host, extra_headers, x509 = self.get_host_info(host)
conn = TimeoutHTTPPy26(host, timeout=self.timeout)
return conn
# -- xmlrpclib.SafeTransport with timeout support --
class TimeoutHTTPSPy26(httplib.HTTPS):
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
if port == 0:
|
port = None
self._se
|
tup(self._connection_class(
host, port, key_file, cert_file, strict, timeout))
self.key_file = key_file
self.cert_file = cert_file
class TimeoutSafeTransportPy26(xmlrpclib.SafeTransport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
host, extra_headers, x509 = self.get_host_info(host)
conn = TimeoutHTTPSPy26(host, timeout=self.timeout)
return conn
# Define the TimeTransport and TimeSafeTransport class version to use
TimeoutTransport = TimeoutTransportPy26
TimeoutSafeTransport = TimeoutSafeTransportPy26
else:
# Python 2.7 and 3.X
# -- xmlrpclib.Transport with timeout support --
class TimeoutHTTPConnectionPy27(httplib.HTTPConnection):
def __init__(self, timeout, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
self.timeout = timeout
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutTransportPy27(xmlrpclib.Transport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, TimeoutHTTPConnectionPy27(
self.timeout, chost)
return self._connection[1]
# -- xmlrpclib.SafeTransport with timeout support --
class TimeoutHTTPSConnectionPy27(httplib.HTTPSConnection):
def __init__(self, timeout, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.timeout = timeout
def connect(self):
httplib.HTTPSConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutSafeTransportPy27(xmlrpclib.SafeTransport):
def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*args, **kwargs):
xmlrpclib.SafeTransport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, TimeoutHTTPSConnectionPy27(
self.timeout, chost)
return self._connection[1]
# Define the TimeTransport and TimeSafeTransport class version to use
TimeoutTransport = TimeoutTransportPy27
TimeoutSafeTransport = TimeoutSafeTransportPy27
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
toastedcornflakes/scikit-learn
|
sklearn/feature_selection/univariate_selection.py
|
Python
|
bsd-3-clause
| 25,381 | 0.000394 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
|
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might w
|
ant to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, sha
|
Gerapy/Gerapy
|
setup.py
|
Python
|
mit
| 3,683 | 0.000544 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join, isfile
from os import walk
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
def read_file(filename):
with open(filename) as fp:
return fp.read().strip()
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
NAME = 'gerapy'
FOLDER = 'gerapy'
DESCRIPTION = 'Distributed Crawler Management Framework Based on Scrapy, Scrapyd, Scrapyd-Client, Scrapyd-API, Django and Vue.js'
URL = 'https://github.com/Gerapy/Gerapy'
EMAIL = 'cqc@cuiqingcai.com'
AUTHOR = 'Germey'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = None
REQUIRED = read_requirements('requirements.txt')
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
about = {}
if not VERSION:
with open(os.path.join(here, FOLDER, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
def package_files(directories):
paths = []
for item in directories:
if isfile(item):
paths.append(join('..', item))
continue
for (path, directories, filenames) in walk(item):
for filename in filenames:
paths.append(join('..', path, filename))
return paths
class UploadCommand(Command):
description = 'Build and publish the package.'
user_
|
options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pas
|
s
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
entry_points={
'console_scripts': ['gerapy = gerapy.cmd:cmd']
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
package_data={
'': package_files([
'gerapy/server/static',
'gerapy/server/core/templates',
'gerapy/templates',
])
},
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
openstack/networking-odl
|
networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py
|
Python
|
apache-2.0
| 25,998 | 0 |
#
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from unittest import mock
import urllib
from oslotest import base
from ceilometer import service
from networking_odl.ceilometer.network.statistics.opendaylight_v2 import driver
from oslo_utils import uuidutils
ADMIN_ID = str(uuidutils.generate_uuid())
PORT_1_TENANT_ID = str(uuidutils.generate_uuid())
PORT_2_TENANT_ID = str(uuidutils.generate_uuid())
PORT_1_ID = str(uuidutils.generate_uuid())
PORT_2_ID = str(uuidutils.generate_uuid())
class _Base(base.BaseTestCase, metaclass=abc.ABCMeta):
@abc.abstractmethod
def switch_data(self):
pass
fake_odl_url = urllib.parse.ParseResult('opendaylight.v2',
'localhost:8080',
'controller/statistics',
None,
None,
None)
fake_params = urllib.parse.parse_qs(
'user=admin&password=admin&scheme=http&auth=basic')
def setUp(self):
super(_Base, self).setUp()
self.addCleanup(mock.patch.stopall)
conf = service.prepare_service([], [])
self.driver = driver.OpenDaylightDriver(conf)
ks_client = mock.Mock(auth_token='fake_token')
ks_client.projects.find.return_value = mock.Mock(name='admin',
id=ADMIN_ID)
self.ks_client = mock.patch('ceilometer.keystone_client.get_client',
return_value=ks_client).start()
self.get_statistics = mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
|
'client.SwitchStatisticsAPIClient.get_statistics',
return_value=self.switch_data).start()
def
|
_test_for_meter(self, meter_name, expected_data):
sample_data = self.driver.get_sample_data(meter_name,
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(expected_data, list(sample_data))
class TestOpenDayLightDriverInvalid(_Base):
switch_data = {"flow_capable_switches": []}
def test_not_implemented_meter(self):
sample_data = self.driver.get_sample_data('egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
sample_data = self.driver.get_sample_data('switch.table.egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
def test_cache(self):
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(1, self.get_statistics.call_count)
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(2, self.get_statistics.call_count)
def test_http_error(self):
mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
side_effect=Exception()).start()
sample_data = self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(0, len(sample_data))
mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
side_effect=[Exception(), self.switch_data]).start()
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertIn('network.statistics.opendaylight_v2', cache)
class TestOpenDayLightDriverSimple(_Base):
switch_data = {
"flow_capable_switches": [{
"packet_in_messages_received": "501",
"packet_out_messages_sent": "300",
"ports": "1",
"flow_datapath_id": "55120148545607",
"switch_port_counters": [{
"bytes_received": "0",
"bytes_sent": "0",
"duration": "600",
"packets_internal_received": "444",
"packets_internal_sent": "0",
"packets_received": "0",
"packets_received_drop": "0",
"packets_received_error": "0",
"packets_sent": "0",
"port_id": "4",
"tenant_id": PORT_1_TENANT_ID,
"uuid": PORT_1_ID
}],
"table_counters": [{
"flow_count": "90",
"table_id": "0"
}]
}]
}
def test_meter_switch(self):
expected_data = [
(1, "55120148545607",
{'controller': 'OpenDaylight_V2'},
ADMIN_ID),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_ports(self):
expected_data = [
(1, "55120148545607",
{'controller': 'OpenDaylight_V2'},
ADMIN_ID)
]
self._test_for_meter('switch.ports', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_uptime(self):
expected_data = [
(600, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.uptime', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
|
andrewsomething/libcloud
|
libcloud/test/dns/test_route53.py
|
Python
|
apache-2.0
| 14,213 | 0.000211 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.route53 import Route53DNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_ROUTE53
class Route53Tests(unittest.TestCase):
def setUp(self):
Route53DNSDriver.connectionCls.conn_class = Route53MockHttp
Route53MockHttp.type = None
self.driver = Route53DNSDriver(*DNS_PARAMS_ROUTE53)
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 10)
self.assertTrue(RecordType.A in record_types)
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 5)
zone = zones[0]
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
def test_list_records(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 10)
record = records[1]
self.assertEqual(record.name, 'www')
self.assertEqual(record.id, 'A:www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '208.111.35.173')
self.assertEqual(record.extra['ttl'], 86400)
record = records[3]
self.assertEqual(record.type, RecordType.MX)
self.assertEqual(record.data, 'ASPMX.L.GOOGLE.COM.')
self.assertEqual(record.extra['priority'], 1)
record = records[4]
self.assertEqual(record.type, RecordType.MX)
self.assertEqual(record.data, 'ALT1.ASPMX.L.GOOGLE.COM.')
self.assertEqual(record.extra['priority'], 5)
record = records[8]
self.assertEqual(record.type, RecordType.SRV)
self.assertEqual(record.data, 'xmpp-server.example.com.')
self.assertEqual(record.extra['priority'], 1)
self.assertEqual(record.extra['weight'], 10)
self.assertEqual(record.extra['port'], 5269)
def test_get_zone(self):
zone = self.driver.get_zone(zone_id='47234')
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
def test_get_record(self):
record = self.driver.get_record(zone_id='47234',
record_id='CNAME:wibble')
self.assertEqual(record.name, 'wibble')
self.assertEqual(record.type, RecordType.CNAME)
self.assertEqual(record.data, 't.com')
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_does_not_exist(self):
|
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='47234')
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, '47234')
else:
self.fail('Exception was not thrown')
def test_get_record_zone_does_not_exist(self):
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
|
self.driver.get_record(zone_id='4444', record_id='28536')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST'
rid = 'CNAME:doesnotexist.t.com'
try:
self.driver.get_record(zone_id='47234',
record_id=rid)
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone(self):
zone = self.driver.create_zone(domain='t.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.domain, 't.com')
def test_create_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='www', zone=zone,
type=RecordType.A, data='127.0.0.1',
extra={'ttl': 0}
)
self.assertEqual(record.id, 'A:www')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_create_record_zone_name(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.A, data='127.0.0.1',
extra={'ttl': 0}
)
self.assertEqual(record.id, 'A:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_create_TXT_record(self):
"""
Check that TXT records are created in quotes
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.TXT, data='test'
)
self.assertEqual(record.id, 'TXT:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.TXT)
self.assertEqual(record.data, '"test"')
def test_create_TXT_record_quoted(self):
"""
Check that TXT values already quoted are not changed
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.TXT, data='"test"'
)
self.assertEqual(record.id, 'TXT:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.TXT)
self.assertEqual(record.data, '"test"')
def test_create_SPF_record(self):
"""
Check that SPF records are created in quotes
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.SPF, data='test'
)
self.assertEqual(record.id, 'SPF:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.SPF)
self.assertEqual(record.data, '"test"')
def test_create_SPF_record_quoted(self):
"""
Check that SPF values already quoted are not changed
"""
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.SPF, data='"test"'
)
self.assertEqual(record.id, 'SPF:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
|
the-zebulan/CodeWars
|
katas/kyu_7/sum_of_all_arguments.py
|
Python
|
mit
| 42 | 0 |
de
|
f sum_args(*args):
|
return sum(args)
|
boltnev/iktomi
|
iktomi/utils/i18n.py
|
Python
|
mit
| 2,033 | 0.001476 |
# i18n markers
def N_(msg):
'''
Single translatable string marker.
Does nothing, just a marker for \\*.pot file compilers.
Usage::
n = N_('translate me')
translated = env.gettext(n)
'''
return msg
class M_(object):
'''
Marker for translatable string with plural form.
Does not make a translation, just incapsulates a data about
the translatable string.
:param single: a single form
:param plural: a plural form. Count can be included in %\-format syntax
:param count_field: a key used to format
Usage::
message = M_(u'max length is %(max)d symbol',
u'max length is %(max)d symbols',
count_field="max")
m = message % {'max': 10}
trans = env.ngettext(m.single,
m.plural,
|
m.count
|
) % m.format_args
'''
def __init__(self, single, plural, count_field='count', format_args=None):
self.single = single
self.plural = plural
self.count_field = count_field
self.format_args = format_args
def __mod__(self, format_args):
'''
Returns a copy of the object with bound formatting args (as dict).
A key equal to `count_field` must be in `format_args`.
'''
return self.__class__(self.single, self.plural, count_field=self.count_field,
format_args=format_args)
@property
def count(self):
'''
A count based on `count_field` and `format_args`.
'''
args = self.format_args
if args is None or \
(isinstance(args, dict) and self.count_field not in args):
raise TypeError("count is required")
return args[self.count_field] if isinstance(args, dict) else args
def __unicode__(self):
args = self.format_args
if self.count == 1:
return self.single % args
return self.plural % args
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/imp.py
|
Python
|
gpl-3.0
| 10,631 | 0.000094 |
"""This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import create_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
create_dynamic = None
from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name
from importlib._bootstrap_external import SourcelessFileLoader
from importlib import machinery
from importlib import util
import importlib
import os
import sys
import tokenize
import types
import warnings
warnings.warn("the imp module is deprecated in favour of importlib; "
"see the module's documentation for alternative uses",
PendingDeprecationWarning, stacklevel=2)
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def new_module(name):
"""**DEPRECATED**
Create a new module.
The module is not entered into sys.modules.
"""
return types.ModuleType(name)
def get_magic():
"""**DEPRECATED**
Return the magic number for .pyc files.
"""
return util.MAGIC_NUMBER
def get_tag():
"""Return the magic tag for .pyc files."""
return sys.implementation.cache_tag
def cache_from_source(path, debug_override=None):
"""**DEPRECATED**
Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return util.cache_from_source(path, debug_override)
def source_from_cache(path):
"""**DEPRECATED**
Given the path to a .pyc. file, return the path to its .py file.
The .pyc
|
file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
return util.source_from_cache(path)
def get_suffixes():
"""**DEPRECATED**"""
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(
|
s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""**DEPRECATED**
Null import object.
"""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibility support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
def load_source(name, pathname, file=None):
loader = _LoadSourceCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = machinery.SourceFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
def load_compiled(name, pathname, file=None):
"""**DEPRECATED**"""
loader = _LoadCompiledCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = SourcelessFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
def load_package(name, path):
"""**DEPRECATED**"""
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
spec = util.spec_from_file_location(name, path,
submodule_search_locations=[])
if name in sys.modules:
return _exec(spec, sys.modules[name])
else:
return _load(spec)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
rais
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.