repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
python-cmd2/cmd2
|
cmd2/py_bridge.py
|
Python
|
mit
| 4,605 | 0.001303 |
# coding=utf-8
"""
Bridges calls made inside of a Python environment to the Cmd2 host app
while maintaining a reasonable degree of isolation between the two.
"""
import sys
from contextlib import (
redirect_stderr,
redirect_stdout,
)
from typing import (
IO,
TYPE_CHECKING,
Any,
List,
NamedTuple,
Optional,
TextIO,
Union,
cast,
)
from .utils import ( # namedtuple_with_defaults,
StdSim,
)
if TYPE_CHECKING: # pragma: no cover
import cmd2
class CommandResult(NamedTuple):
"""Encapsulates the results from a cmd2 app command
:stdout: str - output captured from stdout while this command is executing
:stderr: str - output captured from stderr while this command is executing
:stop: bool - return value of onecmd_plus_hooks after it runs the given
command line.
:data: possible data populated by the command.
Any combination of these fields can be used when developing a scripting API
for a given command. By default stdout, stderr, and stop will be captured
for you. If there is additional command specific data, then write that to
cmd2's last_result member. That becomes the data member of this tuple.
In some cases, the data member may contain everything needed for a command
and storing stdout and stderr might just be a duplication of data that
wastes memory. In that case, the StdSim can be told not to store output
with its pause_storage member. While this member is True, any output sent
to StdSim won't be saved in its buffer.
The code would look like this::
if isinstance(self.stdout, StdSim):
self.stdout.pause_storage = True
if isinstance(sys.stderr, StdSim):
sys.stderr.pause_storage = True
See :class:`~cmd2.utils.StdSim` for more information.
.. note::
Named tuples are immutable. The contents are there for access,
not for modification.
"""
stdout: str = ''
stderr: str = ''
stop: bool = False
data: Any = None
def __bool__(self) -> bool:
|
"""Returns True if the command succeeded, otherwise False"""
# If data was set, then use it to determine success
if self.data is not None:
return bool(self.data)
# Otherwise check if stderr was filled out
else:
return not self.stderr
class PyBridge:
"""Provides a Python API wrapper for application commands."""
def __init__(self, cmd2_app: '
|
cmd2.Cmd') -> None:
self._cmd2_app = cmd2_app
self.cmd_echo = False
# Tells if any of the commands run via __call__ returned True for stop
self.stop = False
def __dir__(self) -> List[str]:
"""Return a custom set of attribute names"""
attributes: List[str] = []
attributes.insert(0, 'cmd_echo')
return attributes
def __call__(self, command: str, *, echo: Optional[bool] = None) -> CommandResult:
"""
Provide functionality to call application commands by calling PyBridge
ex: app('help')
:param command: command line being run
:param echo: If provided, this temporarily overrides the value of self.cmd_echo while the
command runs. If True, output will be echoed to stdout/stderr. (Defaults to None)
"""
if echo is None:
echo = self.cmd_echo
# This will be used to capture _cmd2_app.stdout and sys.stdout
copy_cmd_stdout = StdSim(cast(Union[TextIO, StdSim], self._cmd2_app.stdout), echo=echo)
# Pause the storing of stdout until onecmd_plus_hooks enables it
copy_cmd_stdout.pause_storage = True
# This will be used to capture sys.stderr
copy_stderr = StdSim(sys.stderr, echo=echo)
self._cmd2_app.last_result = None
stop = False
try:
self._cmd2_app.stdout = cast(TextIO, copy_cmd_stdout)
with redirect_stdout(cast(IO[str], copy_cmd_stdout)):
with redirect_stderr(cast(IO[str], copy_stderr)):
stop = self._cmd2_app.onecmd_plus_hooks(command, py_bridge_call=True)
finally:
with self._cmd2_app.sigint_protection:
self._cmd2_app.stdout = cast(IO[str], copy_cmd_stdout.inner_stream)
self.stop = stop or self.stop
# Save the result
result = CommandResult(
stdout=copy_cmd_stdout.getvalue(),
stderr=copy_stderr.getvalue(),
stop=stop,
data=self._cmd2_app.last_result,
)
return result
|
pordnajela/AlgoritmosCriptografiaClasica
|
Transposicion/TransposicionGrupo.py
|
Python
|
apache-2.0
| 3,845 | 0.036468 |
#!/usr/bin/env python3
# -*- co
|
ding: UTF-8 -*-
class TransposicionGrupo(object):
"""
"""
def __init__(self, cadena=None, clave=None):
self.cadena = cadena #Recibe una lista, la longitud de cada elemento es a longitud de la clave
self.clave = clave
self.textoClaro = ""
self.textoCifrado = ""
sel
|
f.caracterRelleno = "₫" #₫
def cifrar(self, cantidadRellenoB64=0):
textoCifrado = ""
linea_a_cifrar = None
saltosLinea = len(self.cadena)-1
i = 0
for linea in self.cadena:
if i < saltosLinea:
linea_a_cifrar = self.dividirGrupos(linea,cantidadRellenoB64)
textoCifrado = textoCifrado + self.__cifrar(linea_a_cifrar) + "\n"
i += 1
else:
linea_a_cifrar = self.dividirGrupos(linea, cantidadRellenoB64)
textoCifrado = textoCifrado + self.__cifrar(linea_a_cifrar)
self.textoCifrado = textoCifrado
def descifrar(self, cantidadRellenoB64=0):
textoDescifrado = ""
linea_a_descifrar = None
saltosLinea = len(self.cadena)-1
i = 0
for linea in self.cadena:
if i < saltosLinea:
linea_a_descifrar = self.dividirGrupos(linea)
textoDescifrado = textoDescifrado + self.__descifrar(linea_a_descifrar) + "\n"
i += 1
else:
linea_a_descifrar = self.dividirGrupos(linea, cantidadRellenoB64)
textoDescifrado = textoDescifrado + self.__descifrar(linea_a_descifrar)
self.textoClaro = textoDescifrado
#---------------------------------------------------------- Métodos complementarios
def dividirGrupos(self, linea, cantidadRellenoB64=0):
lineaNueva = linea
tamanioLinea = len(linea)-cantidadRellenoB64
tamanioBloque = len(str(self.clave))
#print(tamanioLinea, tamanioBloque)
if tamanioLinea % tamanioBloque != 0:
lineaNueva = self.adicionarRelleno(linea, tamanioLinea, tamanioBloque)
tamanioLinea = len(lineaNueva)
nuevaCadena = list()
bloque = ""
i = 0
while i < tamanioLinea:
bloque = bloque + lineaNueva[i]
i += 1
if i % tamanioBloque == 0 and i > 0:
nuevaCadena.append(bloque)
bloque = ""
return nuevaCadena
def adicionarRelleno(self, linea, tamanioLinea, tamanioBloque):
if tamanioLinea % tamanioBloque == 0:
return linea
else:
linea = linea + self.caracterRelleno
return self.adicionarRelleno(linea ,len(linea), tamanioBloque)
def eliminarRelleno(self, cadena):
apareceRelleno = 0
nuevaLinea = ""
if len(cadena) > 1:
cadena.pop()
for linea in cadena:
apareceRelleno = linea.find(self.caracterRelleno)
nuevaLinea += linea[0:int(apareceRelleno)] +"\n"
return nuevaLinea
def intercambiar_cifrar(self, bloque, clave):
tamanioBloque = len(bloque)
claveStr = str(clave)
nuevoBloque = list()
i = 0
pos = 0
while i < tamanioBloque:
pos = int(claveStr[i])-1
nuevoBloque.insert(i, bloque[pos])
i += 1
nuevoBloque = ''.join(nuevoBloque)
return nuevoBloque
def intercambiar_descifrar(self, bloque, clave):
tamanioBloque = len(bloque)
claveStr = str(clave)
nuevoBloque = {}
bloqueDescifrado = list()
i = 0
pos = 0
while i < tamanioBloque:
pos = int(claveStr[i])-1
nuevoBloque.update({pos:bloque[i]})
i += 1
for llave, valor in nuevoBloque.items():
bloqueDescifrado.append(valor)
bloqueDescifrado = ''.join(bloqueDescifrado)
return bloqueDescifrado
#----------------------------------------------------------------- Métodos privados
def __cifrar(self, linea_a_cifrar, cantidadRellenoB64=0):
lineaNueva = list()
for bloque in linea_a_cifrar:
lineaNueva.append(self.intercambiar_cifrar(bloque, self.clave))
lineaNueva = ''.join(lineaNueva)
return lineaNueva
def __descifrar(self, linea_a_descifrar, cantidadRellenoB64=0):
lineaNueva = list()
for bloque in linea_a_descifrar:
lineaNueva.append(self.intercambiar_descifrar(bloque, self.clave))
lineaNueva = ''.join(lineaNueva)
return lineaNueva
|
daryl314/markdown-browser
|
pycmark/cmarkgfm/CmarkDocument.py
|
Python
|
mit
| 3,664 | 0.002729 |
import ctypes
from . import cmarkgfm
from ..util.TypedTree import TypedTree
cmarkgfm.document_to_html.restype = ctypes.POINTER(ctypes.c_char)
class CmarkDocument(object):
def __init__(self, txt, encoding='utf_8'):
if not isinstance(txt, bytes):
txt = txt.encode(encoding=encoding)
self._doc = cmarkgfm.string_to_document(txt)
def toHTML(self):
result = cmarkgfm.document_to_html(self._doc)
out = ctypes.cast(result, ctypes.c_char_p).value.decode()
cmarkgfm.cmark_get_default_mem_allocator().contents.free(result)
return out
def toLatex(self):
result = cmarkgfm.document_to_latex(self._doc)
out = ctypes.cast(result, ctypes.c_char_p).value.decode()
cmarkgfm.cmark_get_default_mem_allocator().contents.free(result)
return out
def toAST(self):
return TypedTree.Build('Document', nodes=[self._toAST(c) for c in self._children(self._doc)])
##### AST GENERATION #####
@classmethod
def _children(cls, node):
out = [cmarkgfm.cmark_node_first_child(node)]
while out[-1]: # iterate until null pointer
out.append(cmarkgfm.cmark_node_next(out[-1]))
return tuple(out[:-1])
@classmethod
def _position(cls, node):
return TypedTree.Build('position',
r1=cmarkgfm.cmark_node_get_start_line(node),
c1=cmarkgfm.cmark_node_get_start_column(node),
r2=cmarkgfm.cmark_node_get_end_line(node),
c2=cmarkgfm.cmark_node_get_end_column(node))
@classmethod
def _toAST(cls, node, children=None, **attr)
|
:
tag = cmarkgfm.cmark_node_get_type_string(node).decode()
if tag == 'table' and children is None:
return cls._tableToAST(node)
elif tag == 'list' and len(attr) == 0:
return cls._listToAST(node)
if children is None:
children = [cls._toAST(c) for c in cls._children(node)]
if tag in {'text', 'code_block', 'code', 'html_block', 'html_inline', 'latex_block', 'latex_inline'}:
attr['Text'] = cmarkgf
|
m.cmark_node_get_literal(node).decode()
if tag == 'heading':
attr['Level'] = cmarkgfm.cmark_node_get_heading_level(node)
if tag == 'code_block':
attr['Info'] = cmarkgfm.cmark_node_get_fence_info(node).decode()
if tag in {'link', 'image'}:
attr['Destination'] = cmarkgfm.cmark_node_get_url(node).decode()
attr['Title'] = cmarkgfm.cmark_node_get_title(node).decode()
return TypedTree.Build(tag, position=cls._position(node), children=children, **attr)
@classmethod
def _listToAST(cls, node):
attr = {
'Type': ['None', 'Bullet', 'Ordered'][cmarkgfm.cmark_node_get_list_type(node)],
'Tight': cmarkgfm.cmark_node_get_list_tight(node) != 0
}
if attr['Type'] == 'Ordered':
attr['Start'] = cmarkgfm.cmark_node_get_list_start(node)
attr['Delim'] = ["None", "Period", "Paren"][cmarkgfm.cmark_node_get_list_delim(node)]
return cls._toAST(node, **attr)
@classmethod
def _tableToAST(cls, node):
align = cmarkgfm.cmark_gfm_extensions_get_table_alignments(node)
rows = []
for tr in cls._children(node):
cols = []
for td, a in zip(cls._children(tr), align):
cols.append(cls._toAST(td, Alignment={'l': "Left", 'c': "Center", 'r': "Right"}.get(a,'Left')))
rows.append(cls._toAST(tr, children=cols))
return cls._toAST(node, children=rows)
|
augustoqm/MCLRE
|
src/recommender_execution/run_rec_mrbpr.py
|
Python
|
gpl-3.0
| 5,871 | 0.002044 |
#!/usr/bin/python
# =======================================================================
# This file is part of MCLRE.
#
# MCLRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MCLRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCLRE. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2015 Augusto Queiroz de Macedo <augustoqmacedo@gmail.com>
# =======================================================================
"""
MRBPR Runner
"""
from os import path
from argparse import ArgumentParser
import shlex
import subprocess
import multiprocessing
import logging
from run_rec_functions import read_experiment_atts
from mrbpr.mrbpr_runner import create_meta_file, run
##############################################################################
# GLOBAL VARIABLES
##############################################################################
# Define the Logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(name)s : %(message)s',
level=logging.INFO)
LOGGER = logging.getLogger('mrbpr.run_rec_mrbpr')
LOGGER.setLevel(logging.INFO)
#####################################################################
|
#########
# AUXILIAR FUNCTIONS
##############################################################################
def get_mrbpr_confs():
""" Yield the MRBPR Models Configurations """
pass
##############################################################################
# MAIN
##############################################################################
if __name__ == '__main__':
|
# ------------------------------------------------------------------------
# Define the argument parser
PARSER = ArgumentParser(description="Script that runs the mrbpr event recommender algorithms for" \
" a given 'experiment_name' with data from a given 'region'")
PARSER.add_argument("-e", "--experiment_name", type=str, required=True,
help="The Experiment Name (e.g. recsys-15)")
PARSER.add_argument("-r", "--region", type=str, required=True,
help="The data Region (e.g. san_jose)")
PARSER.add_argument("-a", "--algorithm", type=str, required=True,
help="The algorithm name (used only to differenciate our proposed MRBPR to the others")
ARGS = PARSER.parse_args()
EXPERIMENT_NAME = ARGS.experiment_name
REGION = ARGS.region
ALGORITHM_NAME = ARGS.algorithm
LOGGER.info(ALGORITHM_NAME)
DATA_DIR = "data"
PARTITIONED_DATA_DIR = path.join(DATA_DIR, "partitioned_data")
PARTITIONED_REGION_DATA_DIR = path.join(PARTITIONED_DATA_DIR, REGION)
EXPERIMENT_DIR = path.join(DATA_DIR, "experiments", EXPERIMENT_NAME)
EXPERIMENT_REGION_DATA_DIR = path.join(EXPERIMENT_DIR, REGION)
# LOGGER.info('Defining the MRBPR relation weights file...')
subprocess.call(shlex.split("Rscript %s %s %s" %
(path.join("src", "recommender_execution", "mrbpr", "mrbpr_relation_weights.R"),
EXPERIMENT_NAME, ALGORITHM_NAME)))
# ------------------------------------------------------------------------
# Reading and Defining the Experiment Attributes
EXPERIMENT_ATTS = read_experiment_atts(EXPERIMENT_DIR)
PARALLEL_RUNS = multiprocessing.cpu_count() - 1
TRAIN_RELATION_NAMES = EXPERIMENT_ATTS['%s_relation_names' % ALGORITHM_NAME.lower()]
TRAIN_RELATION_FILES = ["%s_train.tsv" % name for name in TRAIN_RELATION_NAMES]
PARTITIONS = reversed(EXPERIMENT_ATTS['partitions'])
# ------------------------------------------------------------------------
# Reading and Defining the Experiment Attributes
META_FILE = path.join(EXPERIMENT_DIR, "%s_meetup.meta" % ALGORITHM_NAME.lower())
LOGGER.info('Creating the META relations file...')
create_meta_file(TRAIN_RELATION_NAMES, META_FILE, PARTITIONED_DATA_DIR)
# ------------------------------------------------------------------------
# Fixed parameters
# ------------------------------------------------------------------------
# Algorithm (0 - MRBPR)
ALGORITHM = 0
# Size of the Ranked list of events per User
RANK_SIZE = 100
# Save Parameters
SAVE_MODEL = 0
# Hyper Parameters
REGULARIZATION_PER_ENTITY = ""
REGULARIZATION_PER_RELATION = ""
RELATION_WEIGHTS_FILE = path.join(EXPERIMENT_DIR, "%s_relation_weights.txt" % ALGORITHM_NAME.lower())
# ------------------------------------------------------------------------
if ALGORITHM_NAME == "MRBPR":
LEARN_RATES = [0.1]
NUM_FACTORS = [300]
NUM_ITERATIONS = [1500]
elif ALGORITHM_NAME == "BPR-NET":
LEARN_RATES = [0.1]
NUM_FACTORS = [200]
NUM_ITERATIONS = [600]
else:
LEARN_RATES = [0.1]
NUM_FACTORS = [10]
NUM_ITERATIONS = [10]
MRBPR_BIN_PATH = path.join("src", "recommender_execution", "mrbpr", "mrbpr.bin")
LOGGER.info("Start running MRBPR Process Scheduler!")
run(PARTITIONED_REGION_DATA_DIR, EXPERIMENT_REGION_DATA_DIR,
REGION, ALGORITHM, RANK_SIZE, SAVE_MODEL, META_FILE,
REGULARIZATION_PER_ENTITY, REGULARIZATION_PER_RELATION,
RELATION_WEIGHTS_FILE, TRAIN_RELATION_FILES,
PARTITIONS, NUM_ITERATIONS, NUM_FACTORS, LEARN_RATES,
MRBPR_BIN_PATH, PARALLEL_RUNS, ALGORITHM_NAME)
LOGGER.info("DONE!")
|
honzajavorek/tipi
|
tipi/repl.py
|
Python
|
mit
| 2,146 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tipi.compat import unicode
f
|
rom tipi.html import HTMLFragment
__all__ = ('Replacement', 'replace')
class Replacement(object):
"""Replacement representation."""
skipped_tags = (
'code', 'kbd', 'pre', 'samp', 'script', 'style', 'tt', 'xmp'
)
textflow_tags = (
'b', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'cite',
'dfn', 'em', 'kbd', 'strong', 'samp', 'var', 'a', 'bdo', 'q', 'script',
'span', 'sub', 'sup'
)
def __init__(self,
|
pattern, replacement):
self.pattern = pattern
self.replacement = replacement
def _is_replacement_allowed(self, s):
"""Tests whether replacement is allowed on given piece of HTML text."""
if any(tag in s.parent_tags for tag in self.skipped_tags):
return False
if any(tag not in self.textflow_tags for tag in s.involved_tags):
return False
return True
def replace(self, html):
"""Perform replacements on given HTML fragment."""
self.html = html
text = html.text()
positions = []
def perform_replacement(match):
offset = sum(positions)
start, stop = match.start() + offset, match.end() + offset
s = self.html[start:stop]
if self._is_replacement_allowed(s):
repl = match.expand(self.replacement)
self.html[start:stop] = repl
else:
repl = match.group() # no replacement takes place
positions.append(match.end())
return repl
while True:
if positions:
text = text[positions[-1]:]
text, n = self.pattern.subn(perform_replacement, text, count=1)
if not n: # all is already replaced
break
def replace(html, replacements=None):
"""Performs replacements on given HTML string."""
if not replacements:
return html # no replacements
html = HTMLFragment(html)
for r in replacements:
r.replace(html)
return unicode(html)
|
Azure/azure-sdk-for-python
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_recoverable_databases_operations.py
|
Python
|
mit
| 10,559 | 0.004072 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2014-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases/{databaseName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_server_request(
subscriptio
|
n_id: str,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> HttpRequest:
a
|
pi_version = "2014-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class RecoverableDatabasesOperations(object):
"""RecoverableDatabasesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> "_models.RecoverableDatabase":
"""Gets a recoverable database, which is a resource representing a database's geo backup.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecoverableDatabase, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.RecoverableDatabase
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoverableDatabase"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecoverableDatabase', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases/{databaseName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> Iterable["_models.RecoverableDatabaseListResult"]:
"""Gets a list of recoverable databases.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecoverableDatabaseListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.RecoverableDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kw
|
explosion/spaCy
|
spacy/tests/lang/da/test_exceptions.py
|
Python
|
mit
| 1,824 | 0.000551 |
import pytest
@pytest.mark.parametrize("text", ["ca.", "m.a.o.", "Jan.", "Dec.", "kr.", "jf."])
def test_da_tokenizer_handles_abbr(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["Jul.", "jul.", "Tor.", "Tors."])
def test_da_tokenizer_handles_ambiguous_abbr(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["1.", "10.", "31."])
def test_da_tokenizer_handles_dates(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 1
def test_da_tokenizer_handles_exc_in_text(da_tokenizer):
text = "Det er bl.a. ikke meningen"
tokens = da_tokenizer(text)
assert len(tokens) == 5
assert tokens[2].text == "bl.a."
def test_da_tokenizer_handles_custom_base_exc(da_tokenizer):
text = "Her er noget du kan kigge i."
tokens = da_tokenizer(text)
assert len(tokens) == 8
assert tokens[6].text == "i"
assert tokens[7].text == "."
@pytest.mark.parametrize(
"text,n_tokens",
[
("Godt og/eller skidt", 3),
("Kør 4 km/t på vejen", 5),
("Det blæser 12 m/s.", 5),
("Det blæser 12 m/sek. på havnen", 6),
("Windows 8/Windows 10", 5),
("Billeten virker til bus/tog/metro", 8),
("26/02/2019", 1),
("Kristiansen c/o Madsen", 3),
("Sprogteknologi a/s", 2),
("De boede i A/B Bellevue", 5),
# note: skipping due to weirdness in UD_Danish-DDT
# ("Rotorhastigheden er 3400 o/m.", 5),
("Jeg købte billet t/r.", 5),
("Murerarbejdsmand m/k søges", 3),
(
|
"Netværket kører over TCP/IP", 4),
],
)
def test_da_tokenizer_slash(da_tokenizer, text, n_tokens):
tokens = da_tokenizer(text)
assert len(tokens) ==
|
n_tokens
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter/_legendrank.py
|
Python
|
mit
| 406 | 0.002463 |
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_
|
utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="scatter", **kwargs):
super(LegendrankValidator, self).__init__(
|
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
|
chenc10/Spark-PAF
|
examples/src/main/python/mllib/gradient_boosting_regression_example.py
|
Python
|
apache-2.0
| 2,443 | 0.001637 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permis
|
sions and
# limitations under the License.
#
"""
Gradient Boosted Trees Regression Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientB
|
oostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesRegressionExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainRegressor(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda (v, p): (v - p) * (v - p)).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingRegressionModel")
sameModel = GradientBoostedTreesModel.load(sc, "target/tmp/myGradientBoostingRegressionModel")
# $example off$
|
django-bmf/django-bmf
|
tests/appapis/test_sites_setting.py
|
Python
|
bsd-3-clause
| 285 | 0 |
#
|
!/usr/bin/python
# ex:set fileencoding=utf-8:
# flake8: noqa
from __future__ import unicode_literals
from django.test import TestCase
from unittest import expectedFailure
class SettingTests(TestCase):
@expectedFailure
def test_fails(self):
self.assertTrue(False)
| |
keighrim/kaldi-yesno-tutorial
|
steps/nnet3/chain/gen_topo3.py
|
Python
|
apache-2.0
| 1,879 | 0.008515 |
#!/usr/bin/env python
# Copyright 2012 Johns Hopkins University (author: Daniel Povey)
# Generate a topology file. This allows control of the number of states in the
# non-silence HMMs, and in the silence HMMs. This is a modified version of
# 'utils/gen_topo.pl' that generates a different type of topology, one that we
# believe should be useful in the 'chain' model. Note: right now it doesn't
# have any real options, and it treats silence and nonsilence the same. The
# intention is that you write different versions of this script, or add options,
# if you experiment with it.
from __future__ import print_function
impo
|
rt argparse
parser = argparse.ArgumentParser(description="Usage: steps/nnet3/chain/gen_topo.py "
"<colon-separated-nonsilence-phones> <colon-separated-silence-phones>"
"e.g.: steps/nnet3/chain/gen_topo.pl 4:5:6:7:8:9:
|
10 1:2:3\n",
epilog="See egs/swbd/s5c/local/chain/train_tdnn_a.sh for example of usage.");
parser.add_argument("nonsilence_phones", type=str,
help="List of non-silence phones as integers, separated by colons, e.g. 4:5:6:7:8:9");
parser.add_argument("silence_phones", type=str,
help="List of silence phones as integers, separated by colons, e.g. 1:2:3");
args = parser.parse_args()
silence_phones = [ int(x) for x in args.silence_phones.split(":") ]
nonsilence_phones = [ int(x) for x in args.nonsilence_phones.split(":") ]
all_phones = silence_phones + nonsilence_phones
print("<Topology>")
print("<TopologyEntry>")
print("<ForPhones>")
print(" ".join([str(x) for x in all_phones]))
print("</ForPhones>")
print("<State> 0 <PdfClass> 0 <Transition> 0 0.5 <Transition> 1 0.5 </State>")
print("<State> 1 </State>")
print("</TopologyEntry>")
print("</Topology>")
|
rchakra3/generic-experiment-loop
|
model/helpers/decision.py
|
Python
|
gpl-2.0
| 339 | 0 |
import random
class Decision(object):
def __init__(self, name, min_val, ma
|
x_val):
self.name = name
self.min_val = min_val
self.max_val = max_val
def generate_valid_val(self):
return random.uniform(self.min_val, self.max_val)
def get_range(self):
return (self.min_v
|
al, self.max_val)
|
sou-komatsu/checkgear
|
checkgear/checkgear.py
|
Python
|
mit
| 138 | 0.007246 |
#!/usr/bin/env python
# coding: utf-8
class
|
CheckGear():
def __init__(self):
pass
def proc(self):
print
|
'test'
|
bgxavier/nova
|
nova/objects/instance.py
|
Python
|
apache-2.0
| 57,332 | 0.000157 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices', 'tags']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'flavor', 'old_flavor',
'new_flavor', 'ec2_ids']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests',
'flavor', 'vcpu_model']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
_INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining.
NB: This function may modify expected_attrs if one
requested attribute requires another.
"""
if not expected_attrs:
return expected_attrs
if ('system_metadata' in expected_attrs and
'flavor' not in expected_attrs):
# NOTE(danms): If the client asked for sysmeta, we have to
# pull flavor so we can potentially provide compatibility
expected_attrs.append('flavor')
simple_cols = [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
complex_cols = ['extra.%s' % field
for field in _INSTANCE_EXTRA_FIELDS
if field in expected_attrs]
if complex_cols:
simple_cols.append('extra')
simple_cols = filter(lambda x: x not in _INSTANCE_EXTRA_FIELDS,
simple_cols)
if (any([flavor in expected_attrs
for flavor in ['flavor', 'old_flavor', 'new_flavor']]) and
'system_metadata' not in simple_cols):
# NOTE(danms): While we're maintaining compatibility with
# flavor data being stored in system_metadata, we need to
# ask for it any time flavors are requested.
simple_cols.append('system_metadata')
expected_attrs.append('system_metadata')
return simple_cols + complex_cols
def compat_instance(instance):
"""Create a dict-like instance structure from an objects.Instance.
This is basically the same as nova.objects.base.obj_to_primitive(),
except that it includes some instance-specific details, like stashing
flavor information in system_metadata.
If you have a function (or RPC client) that needs to see the instance
as a dict that has flavor information in system_metadata, use this
to appease it (while you fix said thing).
:param instance: a nova.objects.Instance instance
:returns: a dict-based instance structure
"""
if not isinstance(instance, objects.Instance):
return instance
db_instance = copy.deepcopy(base.obj_to_primitive(instance))
flavor_attrs = [('', 'flavor'), ('old_', 'old_flavor'),
('new_', 'new_flavor')]
for prefix, attr in flavor_attrs:
flavor = (instance.obj_attr_is_set(attr) and
getattr(instance, attr) or None)
if flavor:
# NOTE(danms): If flavor is unset or None, don't
# copy it into the primitive's system_metadata
db_instance['system_metadata'] = \
flavors.save_flavor_info(
db_instance.get('system_metadata', {}),
flavor, prefix)
if attr in db_instance:
del db_instance[attr]
return db_instance
# TODO(berrange): Remove NovaObjectDictCompat
class Instance(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
# Version 1.10: Added use_slave to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_u
|
uid
# Version 1.13: Added delete_metadata_key()
# Version 1.14: Added numa_topology
# Version 1.15: PciDeviceList 1.1
#
|
Version 1.16: Added pci_requests
# Version 1.17: Added tags
# Version 1.18: Added flavor, old_flavor, new_flavor
# Version 1.19: Added vcpu_model
# Version 1.20: Added ec2_ids
VERSION = '1.20'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=Fa
|
mozilla/normandy
|
normandy/recipes/management/commands/update_recipe_signatures.py
|
Python
|
mpl-2.0
| 2,612 | 0.001914 |
from datetime import timedelta
import markus
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from normandy.recipes.models import Recipe
from normandy.recipes.exports import RemoteSettings
metrics = markus.get_metrics("normandy.signing.recipes")
class Command(BaseCommand):
"""
Update signatures for enabled Recipes that have no signature or an old signature
"""
help = "Update Recipe signatures"
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
"-f", "--force", action="store_true", help="Update signatures for all recipes"
)
def handle(self, *args, force=False, **options):
remote_settings = RemoteSettings()
if force:
recipes_to_update = Recipe.objects.only_enabled()
else:
recipes_to_update = self.get_outdated_recipes()
count = recipes_to_update.count()
if count == 0:
self.stdout.write("No out of date recipes to sign")
else:
self.stdout.write(f"Signing {count} recipes:")
for recipe in recipes_to_update:
self.stdout.write(" * " + recipe.approved_revision.name)
recipe.update_signature()
recipe.save()
remote_settings.publish(recipe, approve_changes=False)
# Approve all Remote Settings changes.
rem
|
ote_settings.approve_changes()
metrics.gauge("signed", count, tags=["force"] if force else [])
recipes_to_unsign = Recipe.objects.only_disabled().exclude(signature=None)
count = recipes_to_unsign.count()
if count == 0:
self.stdout.write("No disabled recipes to unsign")
else:
self.stdout.write(f"Unsigning {count} disabled recipes:")
for recipe in recipes_to_unsign:
self.stdout.write(" * "
|
+ recipe.approved_revision.name)
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
metrics.gauge("unsigned", count, tags=["force"] if force else [])
self.stdout.write("all signing done")
def get_outdated_recipes(self):
outdated_age = timedelta(seconds=settings.AUTOGRAPH_SIGNATURE_MAX_AGE)
outdated_filter = Q(signature__timestamp__lt=timezone.now() - outdated_age)
missing_filter = Q(signature=None)
return Recipe.objects.only_enabled().filter(outdated_filter | missing_filter)
|
huvermann/MyPiHomeAutomation
|
HomeAutomation/thingUtils.py
|
Python
|
mit
| 125 | 0.016 |
import platform
def is_windows():
"""Return
|
s true if current platform is windows"""
|
return any(platform.win32_ver())
|
dowski/statvent
|
tests/test_thread_safety.py
|
Python
|
bsd-2-clause
| 677 | 0.004431 |
import threading
import Queue
import statvent
jobs = Queue.Queue()
done = Queue.Queue()
def do_inc():
while True:
job = jobs.get()
if job is None:
|
done.put(None)
break
statvent.incr('thread.test')
def test_10k_iterations_in_N_threads_results_in_10k_incrs():
n = 25
threads = []
for i in xrange(n):
t = threading.Thread(target=do_inc)
t.start()
threads.append(t)
for i in xrange(5000):
jobs.put(i)
for i in xrange(n):
jobs.put(None)
for i in xrange(n):
done.get()
actual = st
|
atvent.get_all()['thread.test']
assert actual == 5000, actual
|
balbinot/arghphot
|
arghphot/logutil.py
|
Python
|
mit
| 652 | 0.010736 |
import logging
import colorlog
from logging.config import fileConfig
import json
class mylogger():
def __init__(self, sdict, logfn):
fileConfig('./logging_config.ini', defaults={'logfilename': logfn})
self.logger = logging.getLogger()
self.sdict = sdict
#save or open
|
from json file
def __call__(self, type, key, value, msg):
if key:
self.sdict[key] = value
if type==1:
self.logger.info(msg)
elif type
|
==2:
self.logger.warning(msg)
elif type==3:
self.logger.error(msg)
elif type==4:
self.logger.exception(msg)
|
JiaminXuan/leetcode-python
|
best_time_to_buy_and_sell_stock_iii/solution.py
|
Python
|
bsd-2-clause
| 846 | 0 |
class Solution:
# @param prices, a list of int
|
eger
# @return an integer
def maxProfit(self, prices):
if not prices:
return 0
n = le
|
n(prices)
m1 = [0] * n
m2 = [0] * n
max_profit1 = 0
min_price1 = prices[0]
max_profit2 = 0
max_price2 = prices[-1]
for i in range(n):
max_profit1 = max(max_profit1, prices[i] - min_price1)
m1[i] = max_profit1
min_price1 = min(min_price1, prices[i])
for i in range(n):
max_profit2 = max(max_profit2, max_price2 - prices[n - 1 - i])
m2[n - 1 - i] = max_profit2
max_price2 = max(max_price2, prices[n - 1 - i])
max_profit = 0
for i in range(n):
max_profit = max(m1[i] + m2[i], max_profit)
return max_profit
|
imk1/IMKTFBindingCode
|
processLolaResults.py
|
Python
|
mit
| 4,260 | 0.026761 |
import sys
import argparse
import numpy as np
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description="Process results from LOLA")
parser.add_argument("--lolaResultsFileNameListFileName", required=True, help="List of file names with LOLA results")
parser.add_argument("--lolaHeadersFileName", required=Tr
|
ue, help="Headers for database region lists used in LOLA")
parser.add_argument("--lolaHeadersExcludeFileName", required=False, default=None, \
help="Headers for database region lists used in LOLA that sh
|
ould not be included, should be subset of lolaHeadersFileName")
parser.add_argument("--fileNamePartsInHeader", action='append', type=int, required=False, default=[1], \
help="Parts of the file name that are in the lola headers, where parts are separated by .'s")
parser.add_argument("--outputFileName", required=True, help="Name where p-values from LOLA will be recorded")
parser.add_argument("--singleFile", action='store_true', required=False, \
help="lolaResultsFileNameListFileName is the name of a file with LOLA results and not a file with a list of LOLA results file names")
parser.add_argument("--outputLog", action='store_true', required=False, \
help="Output the -log10 of the p-values and no headers")
options = parser.parse_args()
return options
def processLolaResults(options):
# Process results from LOLA
outputFile = open(options.outputFileName, 'w+')
if not options.outputLog:
# Include the headers
outputFile.write("TF")
lolaHeadersFile = open(options.lolaHeadersFileName)
lolaHeaders = [line.strip() for line in lolaHeadersFile]
lolaHeadersFile.close()
lolaHeadersExclude = []
if options.lolaHeadersExcludeFileName != None:
# There are headers that should be excluded
lolaHeadersExcludeFile = open(options.lolaHeadersExcludeFileName)
lolaHeadersExclude = [line.strip() for line in lolaHeadersExcludeFile]
lolaHeadersExcludeFile.close()
if not options.outputLog:
# Include the headers
for lh in lolaHeaders:
# Iterate through the headers and record each
if lh not in lolaHeadersExclude:
# The current header should be included
outputFile.write("\t" + lh)
outputFile.write("\n")
lolaResultsFileNameList = []
if options.singleFile:
# The inputted file name is a file with LOLA results
lolaResultsFileNameList = [options.lolaResultsFileNameListFileName]
else:
# The inputted file name is a list of files with LOLA results
lolaResultsFileNameListFile = open(options.lolaResultsFileNameListFileName)
lolaResultsFileNameList = [line.strip() for line in lolaResultsFileNameListFile]
lolaResultsFileNameListFile.close()
numTests = (len(lolaHeaders) - len(lolaHeadersExclude)) * len(lolaResultsFileNameList)
for lolaResultsFileName in lolaResultsFileNameList:
# Iterate through the results files and record the p-value for each TF in each category
if not options.outputLog:
# Include the headers
TF = lolaResultsFileName.split("/")[-1].split(".")[0].split("_")[0]
outputFile.write(TF + "\t")
lolaResultsFile = open(lolaResultsFileName)
for line in lolaResultsFile:
# Iterate through the categories and record the Bonferroni-corrected p-value for each
lineElements = line.strip().split("\t")
currentHeaderElements = lineElements[20].split(".")
currentHeaderElementsFilt = [currentHeaderElements[fnp] for fnp in options.fileNamePartsInHeader]
currentHeader = ".".join(currentHeaderElementsFilt)
if currentHeader in lolaHeadersExclude:
# Skip the current category
continue
if not options.outputLog:
# Output the p-value
pVal = (10 ** (0 - float(lineElements[3]))) * numTests
pValStr = str(pVal)
if pVal > 1:
# Change the p-value string to be > 1
pValStr = "> 1"
outputFile.write(pValStr + "\t")
else:
# Output the -log10 of the p-value
pVal = float(lineElements[3]) - np.log10(numTests)
if pVal < 0:
# The p-value is > 1, so set its -log10 to 0
pVal = 0.0
if pVal > 250:
# The p-value is really small, so set its -log10 to 250
pVal = 250.0
outputFile.write(str(pVal) + "\t")
outputFile.write("\n")
outputFile.close()
if __name__ == "__main__":
options = parseArgument()
processLolaResults(options)
|
bankonme/MUE-Src
|
qa/rpc-tests/test_framework.py
|
Python
|
mit
| 4,729 | 0.003383 |
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import shutil
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_monetaryunitds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_monetaryunitds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave monetaryunitds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing monetaryunitd/monetaryunit-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
|
import logging
logging.basicConfig(level=logging.DEBUG)
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
|
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
#stop_nodes(self.nodes)
#wait_monetaryunitds()
#shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
|
cjlee112/socraticqs2
|
mysite/ct/signals.py
|
Python
|
apache-2.0
| 5,199 | 0.002693 |
"""
Django signals for the app.
"""
import logging
from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Response, UnitLesson
from .ct_util import get_middle_indexes
from core.common.mongo import c_milestone_orct
from core.common.utils import send_email, suspending_receiver
log = logging.getLogger(__name__)
@suspending_receiver(post_save, sender=Response)
def run_courselet_notif_flow(sender, instance, **kwargs):
# TODO: add check that Response has a text, as an obj can be created before a student submits
# TODO: exclude self eval submissions other than a response submission (e.g. "just guessing")
if (instance.kind == Response.ORCT_RESPONSE and not
(instance.unitLesson.kind == UnitLesson.RESOLVES or
instance.is_test or instance.is_preview or not instance.unitLesson.order)):
course = instance.course
course_id = course.id if course else None
instructors = course.get_users(role="prof")
lesson = instance.lesson
lesson_id = lesson.id if lesson else None
student = instance.author
student_id = student.id if student
|
else None
unit_lesson = instance.unitLesson
unit_lesson_id = unit_lesson.id if unit_lesson else None # it's a thread
#
|
Exclude instructors, e.g. the ones submitting in preview mode
for instructor in instructors:
if student_id == instructor.id:
return
# Define if it's a milestone question (either first, middle, or last)
milestone = None
questions = unit_lesson.unit.all_orct()
i = [_[0] for _ in questions.values_list('id')].index(unit_lesson_id)
if i == 0:
milestone = "first"
elif i == len(questions) - 1:
milestone = "last"
elif i in get_middle_indexes(questions):
milestone = "middle" # TODO consider returning a single number
# If milestone, store the record
if milestone:
to_save = {
"milestone": milestone,
"lesson_title": lesson.title if lesson else None,
"lesson_id": lesson_id,
"unit_lesson_id": unit_lesson_id,
"course_title": course.title if course else None,
"course_id": course_id,
"student_username": student.username if student else None,
"student_id": student_id,
# "datetime": datetime.datetime.now() # TODO: consider changing to UTC (and making it a timestamp)
}
# Do not store if such `student_id`-`lesson_id` row is already present
milestone_orct_answers_cursor = c_milestone_orct(use_secondary=False).find({
"milestone": milestone,
"lesson_id": lesson_id
})
initial_milestone_orct_answers_number = milestone_orct_answers_cursor.count()
milestone_orct_answers = (a for a in milestone_orct_answers_cursor)
already_exists = False
for answer in milestone_orct_answers:
if answer.get("student_id") == student_id:
already_exists = True
break
if not already_exists:
c_milestone_orct(use_secondary=False).save(to_save)
milestone_orct_answers_number = initial_milestone_orct_answers_number + 1
# If N students responded to a milestone question, send an email.
# The threshold holds for each milestone separately.
if milestone_orct_answers_number == settings.MILESTONE_ORCT_NUMBER:
context_data = {
"milestone": milestone,
"students_number": milestone_orct_answers_number,
"course_title": course.title if course else None,
"lesson_title": lesson.title if lesson else None,
"current_site": Site.objects.get_current(),
"course_id": course_id,
"unit_lesson_id": unit_lesson_id,
"courselet_pk": unit_lesson.unit.id if unit_lesson.unit else None
} # pragma: no cover
log.info("""Courselet notification with data:
Course title - {course_title},
Lesson title - {lesson_title},
Students number - {students_number},
Unit lesson id - {unit_lesson_id},
Course id - {course_id},
Milestone - {milestone}
""".format(**context_data)) # pragma: no cover
send_email(
context_data=context_data,
from_email=settings.EMAIL_FROM,
to_email=[instructor.email for instructor in instructors],
template_subject="ct/email/milestone_ortc_notify_subject",
template_text="ct/email/milestone_ortc_notify_text"
)
|
melqkiades/yelp
|
source/python/recommenders/multicriteria/multicriteria_base_recommender.py
|
Python
|
lgpl-2.1
| 3,524 | 0.001703 |
from abc import ABCMeta
from recommenders.similarity.weights_similarity_matrix_builder import \
WeightsSimilarityMatrixBuilder
from tripadvisor.fourcity import extractor
from recommenders.base_recommender import BaseRecommender
from utils import dictionary_utils
__author__ = 'fpena'
class MultiCriteriaBaseRecommender(BaseRecommender):
__metaclass__ = ABCMeta
def __init__(
self, name, similarity_metric=None,
significant_criteria_ranges=None):
super(MultiCriteriaBaseRecommender, self).__init__(name, None)
self._significant_criteria_ranges = significant_criteria_ranges
self._similarity_matrix_builder = WeightsSimilarityMatrixBuilder(similarity_metric)
self.user_cluster_dictionary = None
def load(self, reviews):
self.reviews = reviews
self.user_ids = extractor.get_groupby_list(self.reviews, 'user_id')
self.user_dictionary =\
extractor.initialize_cluster_users(self.reviews, self._significant_criteria_ranges)
self.user_cluster_dictionary = self.build_user_clusters(
self.reviews, self._significant_criteria_ranges)
if self._similarity_matrix_builder._similarity_metric is not None:
self.user_similarity_matrix =\
self._similarity_matrix_builder.build_similarity_matrix(
self.user_dictionary, self.user_ids)
def clear(self):
super(MultiCriteriaBaseRecommender, self).clear()
self.user_cluster_dictionary = None
# TODO: Add the item_id as a parameter in order to optimize the method
def get_neighbourhood(self, user_id):
cluster_name = self.user_dictionary[user_id].cluster
cluster_users = list(self.user_cluster_dictionary[cluster_name])
cluster_users.remove(user_id)
# We remove the given
|
user from the cluster
|
in order to avoid bias
if self._num_neighbors is None:
return cluster_users
similarity_matrix = self.user_similarity_matrix[user_id].copy()
similarity_matrix.pop(user_id, None)
ordered_similar_users = dictionary_utils.sort_dictionary_keys(
similarity_matrix)
intersection_set = set.intersection(set(ordered_similar_users), set(cluster_users))
intersection_lst = [t for t in ordered_similar_users if t in intersection_set]
return intersection_lst # [:self._num_neighbors]
@staticmethod
def build_user_clusters(reviews, significant_criteria_ranges=None):
"""
Builds a series of clusters for users according to their significant
criteria. Users that have exactly the same significant criteria will belong
to the same cluster.
:param reviews: the list of reviews
:return: a dictionary where all the keys are the cluster names and the
values for those keys are list of users that belong to that cluster
"""
user_list = extractor.get_groupby_list(reviews, 'user_id')
user_cluster_dictionary = {}
for user in user_list:
weights = extractor.get_criteria_weights(reviews, user)
significant_criteria, cluster_name =\
extractor.get_significant_criteria(weights, significant_criteria_ranges)
if cluster_name in user_cluster_dictionary:
user_cluster_dictionary[cluster_name].append(user)
else:
user_cluster_dictionary[cluster_name] = [user]
return user_cluster_dictionary
|
squadran2003/filtering-searching-mineral-catalogue
|
filtering-searching-mineral-catalogue/minerals/apps.py
|
Python
|
mit
| 91 | 0 |
from django.apps import AppConfig
class Mi
|
neralsConfig(AppC
|
onfig):
name = 'minerals'
|
hltbra/pycukes
|
specs/console_examples/stories_with_hooks/support/env.py
|
Python
|
mit
| 392 | 0.005102 |
from pycukes import BeforeAll, AfterAll, BeforeEach, AfterEach
@BeforeAll
def add_message1_attr(context):
context.counter = 1
@BeforeEach
def
|
add_message_attr(context):
context.counter += 1
setattr(context, 'message%d' % context.counter, 'msg')
@AfterEach
def increment_one(context):
context.counter += 1
@AfterAll
def show_hello_
|
world(context):
print 'hello world'
|
josephlewis42/magpie
|
magpie/lib/jinja2/testsuite/filters.py
|
Python
|
bsd-3-clause
| 19,379 | 0.000929 |
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.filters
~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
from jinja2._compat import text_type, implements_to_string
env = Environment()
class FilterTestCase(JinjaTestCase):
def test_filter_calling(self):
rv = env.call_filter('sum', [1, 2, 3])
self.assert_equal(rv, 6)
def test_capitalize(self):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == 'Foo bar'
def test_center(self):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == ' foo '
def test_default(self):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given='yes') == 'no|False|no|yes'
def test_dictsort(self):
tmpl = env.from_string(
'{{ foo|dictsort }}|'
'{{ foo|dictsort(true) }}|'
|
'{{ foo|dictsort(false, "value") }}'
)
out = tmpl.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == ("[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]|"
"[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]|"
"[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]")
def test_batch(self):
tmpl = env.from_string("{{ foo|batch(3)|list }}|"
"{{ foo|batch(3, 'X')|list }}")
out = tmpl.ren
|
der(foo=list(range(10)))
assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]")
def test_slice(self):
tmpl = env.from_string('{{ foo|slice(3)|list }}|'
'{{ foo|slice(3, "X")|list }}')
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
def test_escape(self):
tmpl = env.from_string('''{{ '<">&'|escape }}''')
out = tmpl.render()
assert out == '<">&'
def test_striptags(self):
tmpl = env.from_string('''{{ foo|striptags }}''')
out = tmpl.render(foo=' <p>just a small \n <a href="#">'
'example</a> link</p>\n<p>to a webpage</p> '
'<!-- <p>and some commented stuff</p> -->')
assert out == 'just a small example link to a webpage'
def test_filesizeformat(self):
tmpl = env.from_string(
'{{ 100|filesizeformat }}|'
'{{ 1000|filesizeformat }}|'
'{{ 1000000|filesizeformat }}|'
'{{ 1000000000|filesizeformat }}|'
'{{ 1000000000000|filesizeformat }}|'
'{{ 100|filesizeformat(true) }}|'
'{{ 1000|filesizeformat(true) }}|'
'{{ 1000000|filesizeformat(true) }}|'
'{{ 1000000000|filesizeformat(true) }}|'
'{{ 1000000000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|'
'1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB'
))
def test_filesizeformat_issue59(self):
tmpl = env.from_string(
'{{ 300|filesizeformat }}|'
'{{ 3000|filesizeformat }}|'
'{{ 3000000|filesizeformat }}|'
'{{ 3000000000|filesizeformat }}|'
'{{ 3000000000000|filesizeformat }}|'
'{{ 300|filesizeformat(true) }}|'
'{{ 3000|filesizeformat(true) }}|'
'{{ 3000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|'
'2.9 KiB|2.9 MiB'
))
def test_first(self):
tmpl = env.from_string('{{ foo|first }}')
out = tmpl.render(foo=list(range(10)))
assert out == '0'
def test_float(self):
tmpl = env.from_string('{{ "42"|float }}|'
'{{ "ajsghasjgd"|float }}|'
'{{ "32.32"|float }}')
out = tmpl.render()
assert out == '42.0|0.0|32.32'
def test_format(self):
tmpl = env.from_string('''{{ "%s|%s"|format("a", "b") }}''')
out = tmpl.render()
assert out == 'a|b'
def test_indent(self):
tmpl = env.from_string('{{ foo|indent(2) }}|{{ foo|indent(2, true) }}')
text = '\n'.join([' '.join(['foo', 'bar'] * 2)] * 2)
out = tmpl.render(foo=text)
assert out == ('foo bar foo bar\n foo bar foo bar| '
'foo bar foo bar\n foo bar foo bar')
def test_int(self):
tmpl = env.from_string('{{ "42"|int }}|{{ "ajsghasjgd"|int }}|'
'{{ "32.32"|int }}')
out = tmpl.render()
assert out == '42|0|32'
def test_join(self):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == '1|2|3'
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == '<foo><span>foo</span>'
def test_join_attribute(self):
class User(object):
def __init__(self, username):
self.username = username
tmpl = env.from_string('''{{ users|join(', ', 'username') }}''')
assert tmpl.render(users=map(User, ['foo', 'bar'])) == 'foo, bar'
def test_last(self):
tmpl = env.from_string('''{{ foo|last }}''')
out = tmpl.render(foo=list(range(10)))
assert out == '9'
def test_length(self):
tmpl = env.from_string('''{{ "hello world"|length }}''')
out = tmpl.render()
assert out == '11'
def test_lower(self):
tmpl = env.from_string('''{{ "FOO"|lower }}''')
out = tmpl.render()
assert out == 'foo'
def test_pprint(self):
from pprint import pformat
tmpl = env.from_string('''{{ data|pprint }}''')
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self):
tmpl = env.from_string('''{{ seq|random }}''')
seq = list(range(100))
for _ in range(10):
assert int(tmpl.render(seq=seq)) in seq
def test_reverse(self):
tmpl = env.from_string('{{ "foobar"|reverse|join }}|'
'{{ [1, 2, 3]|reverse|list }}')
assert tmpl.render() == 'raboof|[3, 2, 1]'
def test_string(self):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string('''{{ obj|string }}''')
assert tmpl.render(obj=x) == text_type(x)
def test_title(self):
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "foo's bar"|title }}''')
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "f bar f"|title }}''')
assert tmpl.render() == "F Bar F"
tmpl = env.from_string('''{{ "foo-bar"|title }}''')
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string('''{{ "foo\tbar"|title }}''')
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string('''{{ "FOO\tBAR"|title }}''')
assert tmpl.render() == "Foo\tBar"
class Foo:
def __str__(self):
return 'foo-bar'
tmpl = env.from_string('''{{ data|title }}''')
out = tmpl.render(data=Foo())
assert out == 'Foo-Bar'
def test_truncate(self):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
'{{ smalld
|
gregorschatz/pymodbus3
|
pymodbus3/bit_read_message.py
|
Python
|
bsd-3-clause
| 8,239 | 0 |
# -*- coding: utf-8 -*-
"""
Bit Reading Request/Response messages
--------------------------------------
"""
import struct
from pymodbus3.pdu import ModbusRequest
from pymodbus3.pdu import ModbusResponse
from pymodbus3.pdu import ModbusExceptions
from pymodbus3.utilities import pack_bitstring, unpack_bitstring
class ReadBitsRequestBase(ModbusRequest):
""" Base class for Messages Requesting bit values """
_rtu_frame_size = 8
def __init__(self, address, count, **kwargs):
""" Initializes the read request data
:param address: The start address to read from
:param count: The number of bits after 'address' to read
"""
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
""" Encodes a request pdu
:returns: The encoded pdu
"""
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
""" Decodes a request pdu
:param data: The packet data to decode
"""
self.address, self.count = struct.unpack('>HH', data)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'ReadBitRequest({0},{1})'.format(self.address, self.count)
class ReadBitsResponseBase(ModbusResponse):
""" Base class for Messages responding to bit-reading values """
_rtu_byte_count_pos = 2
def __init__(self, values, **kwargs):
""" Initializes a new instance
:param values: The requested values to be returned
"""
self.byte_count = None
ModbusResponse.__init__(self, **kwargs)
self.bits = values or []
def encode(self):
""" Encodes response pdu
:returns: The encoded packet message
"""
result = pack_bitstring(self.bits)
packet = struct.pack('>B', len(result)) + result
return packet
def decode(self, data):
""" Decodes response pdu
:param data: The packet data to decode
"""
self.byte_count = data[0]
self.bits = unpack_bitstring(data[1:])
|
def set_bit(self, address, value=1):
""" Helper function to set the specified bit
:param address: The bit to set
:param value: The value to set the bit to
"""
self.bits[address] = (value != 0)
def re
|
set_bit(self, address):
""" Helper function to set the specified bit to 0
:param address: The bit to reset
"""
self.set_bit(address, 0)
def get_bit(self, address):
""" Helper function to get the specified bit's value
:param address: The bit to query
:returns: The value of the requested bit
"""
return self.bits[address]
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'ReadBitResponse({0})'.format(len(self.bits))
class ReadCoilsRequest(ReadBitsRequestBase):
"""
This function code is used to read from 1 to 2000(0x7d0) contiguous status
of coils in a remote device. The Request PDU specifies the starting
address, ie the address of the first coil specified, and the number of
coils. In the PDU Coils are addressed starting at zero. Therefore coils
numbered 1-16 are addressed as 0-15.
"""
function_code = 1
def __init__(self, address=None, count=None, **kwargs):
""" Initializes a new instance
:param address: The address to start reading from
:param count: The number of bits to read
"""
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
""" Run a read coils request against a datastore
Before running the request, we make sure that the request is in
the max valid range (0x001-0x7d0). Next we make sure that the
request is valid against the current datastore.
:param context: The datastore to request from
:returns: The initializes response message, exception message otherwise
"""
if not (1 <= self.count <= 0x7d0):
return self.do_exception(ModbusExceptions.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.do_exception(ModbusExceptions.IllegalAddress)
values = context.get_values(
self.function_code, self.address, self.count
)
return ReadCoilsResponse(values)
class ReadCoilsResponse(ReadBitsResponseBase):
"""
The coils in the response message are packed as one coil per bit of
the data field. Status is indicated as 1= ON and 0= OFF. The LSB of the
first data byte contains the output addressed in the query. The other
coils follow toward the high order end of this byte, and from low order
to high order in subsequent bytes.
If the returned output quantity is not a multiple of eight, the
remaining bits in the final data byte will be padded with zeros
(toward the high order end of the byte). The Byte Count field specifies
the quantity of complete bytes of data.
"""
function_code = 1
def __init__(self, values=None, **kwargs):
""" Initializes a new instance
:param values: The request values to respond with
"""
ReadBitsResponseBase.__init__(self, values, **kwargs)
class ReadDiscreteInputsRequest(ReadBitsRequestBase):
"""
This function code is used to read from 1 to 2000(0x7d0) contiguous status
of discrete inputs in a remote device. The Request PDU specifies the
starting address, ie the address of the first input specified, and the
number of inputs. In the PDU Discrete Inputs are addressed starting at
zero. Therefore Discrete inputs numbered 1-16 are addressed as 0-15.
"""
function_code = 2
def __init__(self, address=None, count=None, **kwargs):
""" Initializes a new instance
:param address: The address to start reading from
:param count: The number of bits to read
"""
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
""" Run a read discrete input request against a datastore
Before running the request, we make sure that the request is in
the max valid range (0x001-0x7d0). Next we make sure that the
request is valid against the current datastore.
:param context: The datastore to request from
:returns: The initializes response message, exception message otherwise
"""
if not (1 <= self.count <= 0x7d0):
return self.do_exception(ModbusExceptions.IllegalValue)
if not context.validate(
self.function_code, self.address, self.count
):
return self.do_exception(ModbusExceptions.IllegalAddress)
values = context.get_values(
self.function_code, self.address, self.count
)
return ReadDiscreteInputsResponse(values)
class ReadDiscreteInputsResponse(ReadBitsResponseBase):
"""
The discrete inputs in the response message are packed as one input per
bit of the data field. Status is indicated as 1= ON; 0= OFF. The LSB of
the first data byte contains the input addressed in the query. The other
inputs follow toward the high order end of this byte, and from low order
to high order in subsequent bytes.
If the returned input quantity is not a multiple of eight, the
remaining bits in the final data byte will be padded with zeros
(toward the high order end of the byte). The Byte Count field specifies
the quantity of complete bytes of data.
"""
function_code = 2
def __init__(self, values=None, **kwargs):
""" Initializes a new instance
:param values: The request values to respond with
"""
ReadBitsResponseBase.__init__(self, values, **kwargs)
# Exported symbols
__all__ = [
'ReadCoilsRequest',
'ReadCoilsResponse',
'ReadD
|
kuiche/chromium
|
third_party/scons/scons-local/SCons/Debug.py
|
Python
|
bsd-3-clause
| 6,593 | 0.00546 |
"""SCons.Debug
Code for debugging SCons internal things. Not everything here is
guaranteed to work all the way back to Python 1.5.2, and shouldn't be
needed by most users.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py 3897 2009/01/13 06:45:54 scons"
import os
import string
import sys
# Recipe 14.10 from the Python Cookbook.
try:
import weakref
except ImportError:
def logInstanceCreation(instance, name=None):
pass
else:
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if not tracked_classes.has_key(name):
tracked_classes[name] = []
tracked_classes[name].append(weakref.ref(instance))
tracked_classes = {}
def string_to_classes(s):
if s == '*':
c = tracked_classes.keys()
c.sort()
return c
else:
return string.split(s)
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return map(lambda cn: (cn, len(tracked_classes[cn])), classnames)
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = string.split(mstr)[22]
return int(mstr)
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack(*backlist):
import traceback
if not backlist:
backlist = [0]
result = []
for back in backlist:
tb = traceback.extract_stack(limit=3+back)
key = tb[0][:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
# trace a caller's stack
def caller_trace(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
l = []
for c,v in caller_dicts[key].items():
l.append((-v,c))
l.sort()
leader = ' '*level
for v,c in l:
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if caller_dicts.has_key(c):
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
keys = caller_bases.keys()
keys.sort()
for k in keys:
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
def platformize(t):
return (string.replace(t[0], '/', os.sep), t[1])
shorten_list = map(platformize, shorten_list)
del plat
|
formize
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = string.find(f, t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
def Trace(msg, file=None, mode='w'):
"""Write a trace message to a file. Whenever a file is specified,
i
|
t becomes the default for the next call to Trace()."""
global TraceDefault
if file is None:
file = TraceDefault
else:
TraceDefault = file
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
fp.write(msg)
fp.flush()
|
mouseratti/guake
|
guake/main.py
|
Python
|
gpl-2.0
| 15,344 | 0.001369 |
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2013 Guake authors
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
import inspect
import time
# You can put calls to p() everywhere in this page to inspect timing
# g_start = time.time()
# def p():
# print(time.time() - g_start, __file__, inspect.currentframe().f_back.f_lineno)
import logging
import os
import signal
import subprocess
import sys
import uuid
from locale import gettext as _
from optparse import OptionParser
log = logging.getLogger(__name__)
from guake.globals import NAME
from guake.globals import bindtextdomain
from guake.support import print_support
from guake.utils import restore_preferences
from guake.utils import save_preferences
# When we are in the document generation on readthedocs, we do not have paths.py generated
try:
from guake.paths import LOCALE_DIR
bindtextdomain(NAME, LOCALE_DIR)
except: # pylint: disable=bare-except
pass
def main():
"""Parses the command line parameters and decide if dbus methods
should be called or not. If there is already a guake instance
running it will be used and a True value will be returned,
otherwise, false will be returned.
"""
# Force to xterm-256 colors for compatibilit
|
y with some old comma
|
nd line programs
os.environ["TERM"] = "xterm-256color"
# Force use X11 backend underwayland
os.environ["GDK_BACKEND"] = "x11"
# do not use version keywords here, pbr might be slow to find the version of Guake module
parser = OptionParser()
parser.add_option(
'-V',
'--version',
dest='version',
action='store_true',
default=False,
help=_('Show Guake version number and exit')
)
parser.add_option(
'-v',
'--verbose',
dest='verbose',
action='store_true',
default=False,
help=_('Enable verbose logging')
)
parser.add_option(
'-f',
'--fullscreen',
dest='fullscreen',
action='store_true',
default=False,
help=_('Put Guake in fullscreen mode')
)
parser.add_option(
'-t',
'--toggle-visibility',
dest='show_hide',
action='store_true',
default=False,
help=_('Toggles the visibility of the terminal window')
)
parser.add_option(
'--show',
dest="show",
action='store_true',
default=False,
help=_('Shows Guake main window')
)
parser.add_option(
'--hide',
dest='hide',
action='store_true',
default=False,
help=_('Hides Guake main window')
)
parser.add_option(
'-p',
'--preferences',
dest='show_preferences',
action='store_true',
default=False,
help=_('Shows Guake preference window')
)
parser.add_option(
'-a',
'--about',
dest='show_about',
action='store_true',
default=False,
help=_('Shows Guake\'s about info')
)
parser.add_option(
'-n',
'--new-tab',
dest='new_tab',
action='store',
default='',
help=_('Add a new tab (with current directory set to NEW_TAB)')
)
parser.add_option(
'-s',
'--select-tab',
dest='select_tab',
action='store',
default='',
help=_('Select a tab (SELECT_TAB is the index of the tab)')
)
parser.add_option(
'-g',
'--selected-tab',
dest='selected_tab',
action='store_true',
default=False,
help=_('Return the selected tab index.')
)
parser.add_option(
'-l',
'--selected-tablabel',
dest='selected_tablabel',
action='store_true',
default=False,
help=_('Return the selected tab label.')
)
parser.add_option(
'--split-vertical',
dest='split_vertical',
action='store_true',
default=False,
help=_('Split the selected tab vertically.')
)
parser.add_option(
'--split-horizontal',
dest='split_horizontal',
action='store_true',
default=False,
help=_('Split the selected tab horizontally.')
)
parser.add_option(
'-e',
'--execute-command',
dest='command',
action='store',
default='',
help=_('Execute an arbitrary command in the selected tab.')
)
parser.add_option(
'-i',
'--tab-index',
dest='tab_index',
action='store',
default='0',
help=_('Specify the tab to rename. Default is 0. Can be used to select tab by UUID.')
)
parser.add_option(
'--bgcolor',
dest='bgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) background color of '
'the selected tab.')
)
parser.add_option(
'--fgcolor',
dest='fgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) foreground color of the '
'selected tab.')
)
parser.add_option(
'--change-palette',
dest='palette_name',
action='store',
default='',
help=_('Change Guake palette scheme')
)
parser.add_option(
'--rename-tab',
dest='rename_tab',
metavar='TITLE',
action='store',
default='',
help=_(
'Rename the specified tab by --tab-index. Reset to default if TITLE is '
'a single dash "-".'
)
)
parser.add_option(
'-r',
'--rename-current-tab',
dest='rename_current_tab',
metavar='TITLE',
action='store',
default='',
help=_('Rename the current tab. Reset to default if TITLE is a '
'single dash "-".')
)
parser.add_option(
'-q',
'--quit',
dest='quit',
action='store_true',
default=False,
help=_('Says to Guake go away =(')
)
parser.add_option(
'-u',
'--no-startup-script',
dest='execute_startup_script',
action='store_false',
default=True,
help=_('Do not execute the start up script')
)
parser.add_option(
'--save-preferences',
dest='save_preferences',
action='store',
default=None,
help=_('Save Guake preferences to this filename')
)
parser.add_option(
'--restore-preferences',
dest='restore_preferences',
action='store',
default=None,
help=_('Restore Guake preferences from this file')
)
parser.add_option(
'--support',
dest='support',
action='store_true',
default=False,
help=_('Show support infomations')
)
# checking mandatory dependencies
missing_deps = False
try:
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
except ValueError:
print("[ERROR] missing mandatory dependency: GtK 3.0")
missing_deps = True
try:
gi.require_version('Vte', '2.91') # vte-0.42
except ValueError:
print("[ERROR] missing mandatory dependency: Vte >= 0.42")
missing_deps = True
try:
gi.require_version('Keybinder', '3.0')
except ValueError:
print("[ERROR] missing mandatory dependency: Keybinder 3")
missing_deps = True
try:
import cairo
except Impo
|
adamcharnock/django-su
|
django_su/templatetags/su_tags.py
|
Python
|
mit
| 270 | 0 |
# -*- coding: utf-8 -*-
from django
|
import template
from ..utils import su_login_callback
register = template.Library()
@register.inclusion_tag('su/login_link.html', takes_contex
|
t=False)
def login_su_link(user):
return {'can_su_login': su_login_callback(user)}
|
WillWeatherford/mars-rover
|
photos/migrations/0003_rover.py
|
Python
|
mit
| 852 | 0.001174 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-19 07:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(
|
migrations.Migration):
dependencies = [
('photos', '0002_auto_20160919_0737'),
]
operations = [
migrations.CreateModel(
name='Rover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nasa_id', models.IntegerField(u
|
nique=True)),
('name', models.CharField(max_length=30)),
('landing_date', models.DateField()),
('max_date', models.DateField()),
('max_sol', models.IntegerField()),
('total_photos', models.IntegerField()),
],
),
]
|
destroy/SleekXMPP-gevent
|
sleekxmpp/stanza/__init__.py
|
Python
|
mit
| 399 | 0 |
"""
|
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza.error import Error
from sleekxmpp.stanza.stream_error import StreamError
from sleekxmpp.stanza.iq import Iq
from sleekxmpp.stanza.message import Message
from sleekxmpp.stanza.prese
|
nce import Presence
|
whummer/moto
|
moto/kms/responses.py
|
Python
|
apache-2.0
| 14,169 | 0.003882 |
from __future__ import unicode_literals
import base64
import json
import re
import six
from moto.core.responses import BaseResponse
from .models import kms_backends
from .exceptions import NotFoundException, ValidationEx
|
ception, AlreadyExistsException, NotAuthorizedException
reserved_aliases = [
'alias/aws/ebs',
'alias/aws/s3',
'alias/aws/redshift',
'alias/aws/rds',
]
class KmsResponse(BaseResponse):
@property
def parameters(self):
return json.loads(self.body)
@property
def kms_backend(self):
return kms_backends[self.region]
def create_key(self):
policy = self.parameters.get('Policy')
key_usage = self.parameters.get('KeyUsage')
descri
|
ption = self.parameters.get('Description')
tags = self.parameters.get('Tags')
key = self.kms_backend.create_key(
policy, key_usage, description, tags, self.region)
return json.dumps(key.to_dict())
def update_key_description(self):
key_id = self.parameters.get('KeyId')
description = self.parameters.get('Description')
self.kms_backend.update_key_description(key_id, description)
return json.dumps(None)
def tag_resource(self):
key_id = self.parameters.get('KeyId')
tags = self.parameters.get('Tags')
self.kms_backend.tag_resource(key_id, tags)
return json.dumps({})
def list_resource_tags(self):
key_id = self.parameters.get('KeyId')
tags = self.kms_backend.list_resource_tags(key_id)
return json.dumps({
"Tags": tags,
"NextMarker": None,
"Truncated": False,
})
def describe_key(self):
key_id = self.parameters.get('KeyId')
try:
key = self.kms_backend.describe_key(
self.kms_backend.get_key_id(key_id))
except KeyError:
headers = dict(self.headers)
headers['status'] = 404
return "{}", headers
return json.dumps(key.to_dict())
def list_keys(self):
keys = self.kms_backend.list_keys()
return json.dumps({
"Keys": [
{
"KeyArn": key.arn,
"KeyId": key.id,
} for key in keys
],
"NextMarker": None,
"Truncated": False,
})
def create_alias(self):
alias_name = self.parameters['AliasName']
target_key_id = self.parameters['TargetKeyId']
if not alias_name.startswith('alias/'):
raise ValidationException('Invalid identifier')
if alias_name in reserved_aliases:
raise NotAuthorizedException()
if ':' in alias_name:
raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name))
if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name):
raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' "
"failed to satisfy constraint: Member must satisfy regular "
"expression pattern: ^[a-zA-Z0-9:/_-]+$"
.format(alias_name=alias_name))
if self.kms_backend.alias_exists(target_key_id):
raise ValidationException('Aliases must refer to keys. Not aliases')
if self.kms_backend.alias_exists(alias_name):
raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} '
'already exists'.format(region=self.region, alias_name=alias_name))
self.kms_backend.add_alias(target_key_id, alias_name)
return json.dumps(None)
def delete_alias(self):
alias_name = self.parameters['AliasName']
if not alias_name.startswith('alias/'):
raise ValidationException('Invalid identifier')
if not self.kms_backend.alias_exists(alias_name):
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:'
'{alias_name} is not found.'.format(region=self.region, alias_name=alias_name))
self.kms_backend.delete_alias(alias_name)
return json.dumps(None)
def list_aliases(self):
region = self.region
response_aliases = [
{
'AliasArn': u'arn:aws:kms:{region}:012345678912:{reserved_alias}'.format(region=region,
reserved_alias=reserved_alias),
'AliasName': reserved_alias
} for reserved_alias in reserved_aliases
]
backend_aliases = self.kms_backend.get_all_aliases()
for target_key_id, aliases in backend_aliases.items():
for alias_name in aliases:
response_aliases.append({
'AliasArn': u'arn:aws:kms:{region}:012345678912:{alias_name}'.format(region=region,
alias_name=alias_name),
'AliasName': alias_name,
'TargetKeyId': target_key_id,
})
return json.dumps({
'Truncated': False,
'Aliases': response_aliases,
})
def enable_key_rotation(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.enable_key_rotation(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def disable_key_rotation(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.disable_key_rotation(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_rotation_status(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
rotation_enabled = self.kms_backend.get_key_rotation_status(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyRotationEnabled': rotation_enabled})
def put_key_policy(self):
key_id = self.parameters.get('KeyId')
policy_name = self.parameters.get('PolicyName')
policy = self.parameters.get('Policy')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
_assert_default_policy(policy_name)
try:
self.kms_backend.put_key_policy(key_id, policy)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_policy(self):
key_id = self.parameters.get('KeyId')
policy_name = self.parameters.get('PolicyName')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
_assert_default_policy(policy_name)
try:
return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)})
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def list_key_policies(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
|
benallard/pythoncard
|
test/testCipher.py
|
Python
|
lgpl-3.0
| 3,613 | 0.016883 |
import unittest
from pythoncardx.cr
|
ypto import Cipher
from pythoncard.security import CryptoException, RSAPublicKey, KeyBuilder, KeyPair
class testCipher(unittest.TestCase):
def testInit(self):
c
|
= Cipher.getInstance(Cipher.ALG_RSA_NOPAD, False)
self.assertEqual(Cipher.ALG_RSA_NOPAD, c.getAlgorithm())
try:
c.update([], 0, 0, [], 0)
self.fail()
except CryptoException as ce:
self.assertEqual(CryptoException.INVALID_INIT, ce.getReason())
try:
c.init("abcd", Cipher.MODE_ENCRYPT)
self.fail()
except CryptoException as ce:
self.assertEqual(CryptoException.ILLEGAL_VALUE, ce.getReason())
pbk = KeyBuilder.buildKey(KeyBuilder.TYPE_RSA_PUBLIC, KeyBuilder.LENGTH_RSA_1024, False)
try:
c.init(pbk, Cipher.MODE_ENCRYPT)
self.fail()
except CryptoException as ce:
self.assertEqual(CryptoException.UNINITIALIZED_KEY, ce.getReason())
pbk.setExponent([0,1,2,3,4,5,6,7,8,9], 5, 5)
pbk.setModulus([7]*128, 0, 128) # 1024 // 8
c.init(pbk, Cipher.MODE_ENCRYPT)
def testRSAEncryptDecrypt(self):
kp = KeyPair(KeyPair.ALG_RSA, KeyBuilder.LENGTH_RSA_1024)
kp.genKeyPair()
pubk = kp.getPublic()
self.assertEqual(1024, pubk.getSize())
privk = kp.getPrivate()
self.assertEqual(1024, privk.getSize())
c = Cipher.getInstance(Cipher.ALG_RSA_PKCS1, False)
c.init(pubk, Cipher.MODE_ENCRYPT)
res = [0]*1024
l = c.doFinal([0,1,2,3,4,5], 0, 6, res, 0)
c.init(privk, Cipher.MODE_DECRYPT)
res2 = [0]*1024
l = c.doFinal(res, 0, l, res2, 0)
self.assertEqual([0,1,2,3,4,5], res2[:l])
def testRSASignVerify(self):
kp = KeyPair(KeyPair.ALG_RSA, KeyBuilder.LENGTH_RSA_1024)
kp.genKeyPair()
pubk = kp.getPublic()
self.assertEqual(1024, pubk.getSize())
privk = kp.getPrivate()
self.assertEqual(1024, privk.getSize())
c = Cipher.getInstance(Cipher.ALG_RSA_PKCS1, False)
c.init(privk, Cipher.MODE_ENCRYPT)
res = [0]*1024
l = c.doFinal([0,1,2,3,4,5], 0, 6, res, 0)
c.init(pubk, Cipher.MODE_DECRYPT)
res2 = [0]*1024
l = c.doFinal(res, 0, l, res2, 0)
self.assertEqual([0,1,2,3,4,5], res2[:l])
def GemaltoSample(self):
try:
rsa = javacardx.crypto.Cipher.getInstance( javacardx.crypto.Cipher.ALG_RSA_NOPAD , False )
pubkey = javacard.security.KeyBuilder.buildKey(TYPE_RSA_PUBLIC, LENGTH_RSA_512, False )
except javacardx.crypto.CryptoException as e:
#... RSA crypto engine not supported by this card
pass
pubkey.setModulus( modulus, 0, modulus_len)
pubkey.setExponent( exponent, 0, expo_len)
rsa.init(pubkey, MODE_ENCRYPT)
rsa.doFinal(buffer2encrypt, 0, 64, output_buffer, 0)
def testDES(self):
KeyArray = [1,2,3,4,5,6,7,8]
bytBuffer = [0 for i in range(8)]
MyBuffer = [7,5,6,8]
MyDesKey = KeyBuilder.buildKey(KeyBuilder.TYPE_DES, KeyBuilder.LENGTH_DES, False)
crypt_des = Cipher.getInstance(Cipher.ALG_DES_ECB_PKCS5, False)
MyDesKey.setKey(KeyArray, 0)
crypt_des.init(MyDesKey, Cipher.MODE_ENCRYPT)
length = crypt_des.doFinal(MyBuffer, 0, len(MyBuffer), bytBuffer, 0)
crypt_des.init(MyDesKey, Cipher.MODE_DECRYPT)
crypt_des.doFinal(bytBuffer, 0, length, MyBuffer, 0)
self.assertEqual([7,5,6,8], MyBuffer)
|
hubert667/AIR
|
src/python/ranker/AbstractRankingFunction.py
|
Python
|
gpl-3.0
| 2,935 | 0.003407 |
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
from utils import get_class
class AbstractRankingFunction:
"""Abstract base class for ranking functions."""
def __init__(self,
ranker_arg_str,
ties,
feature_count,
init=None,
sample=None):
self.feature_count = feature_count
ranking_model_str = "ranker.model.Linear"
for arg in ranker_arg_str:
if arg.startswith("ranker.model"):
ranking_model_str = arg
else:
self.ranker_type = float(arg)
self.ranking_model = get_class(ranking_model_str)(feature_count)
self.sample = getattr(__import__("utils"), sample)
self.ties = ties
self.w = self.ranking_model.initialize_weights(init)
def score(self, features):
return self.ranking_model.score(features, self.w.transpose())
def get_candidate_weight(self, delta):
u = self.sample(self.ranking_model.get_feature_count())
return self.w + delta * u, u
def init_ranking(self, query):
raise NotImplementedError("Derived class needs to implement "
"init_ranking.")
|
def next(self):
raise NotImplementedError("Derived class needs to implement "
"next.")
def next_det(self):
raise No
|
tImplementedError("Derived class needs to implement "
"next_det.")
def next_random(self):
raise NotImplementedError("Derived class needs to implement "
"next_random.")
def get_document_probability(self, docid):
raise NotImplementedError("Derived class needs to implement "
"get_document_probability.")
def getDocs(self, numdocs=None):
if numdocs != None:
return self.docids[:numdocs]
return self.docids
def rm_document(self, docid):
raise NotImplementedError("Derived class needs to implement "
"rm_document.")
def document_count(self):
raise NotImplementedError("Derived class needs to implement "
"document_count.")
def update_weights(self, w, alpha=None):
"""update weight vector"""
if alpha == None:
self.w = w
else:
self.w = self.w + alpha * w
|
dz0/websheets
|
grade_java.py
|
Python
|
agpl-3.0
| 5,860 | 0.014164 |
import config, json, cgi, sys, Websheet, re, os
def grade(reference_solution, student_solution, translate_line, websheet, student):
if not re.match(r"^\w+$", websheet.classname):
return ("Internal Error (Compiling)", "Invalid overridden classname <tt>" + websheet.classname + " </tt>")
dump = {
"reference." + websheet.classname : reference_solution,
"student." + websheet.classname : student_solution[1],
"tester." + websheet.classname : websheet.make_tester()
}
# print(student_solution[1])
# print(reference_solution)
# print(websheet.make_tester())
for clazz in ["Grader", "Options", "Utils"]:
dump["websheets."+clazz] = "".join(open("grade_java_files/"+clazz+".java"))
for dep in websheet.dependencies:
depws = Websheet.Websheet.from_name(dep)
if depws == None:
return ("Internal Error", "Dependent websheet " + dep + " does not exist");
submission = config.load_submission(student, dep, True)
if submission == False:
return("Dependency Error",
"<div class='dependency-error'><i>Dependency error</i>: " +
"You need to successfully complete the <a href='javascript:websheets.load(\""+dep+"\")'><tt>"+dep+"</tt></a> websheet first (while logged in).</div>") # error text
submission = [{'code': x, 'from': {'line': 0, 'ch':0}, 'to': {'line': 0, 'ch': 0}} for x in submission]
dump["student."+dep] = depws.combine_with_template(submission, "student")[1]
dump["reference."+dep] = depws.get_reference_solution("reference")
compileRun = config.run_java(["traceprinter/ramtools/CompileToBytes"], json.dumps(dump))
compileResult = compileRun.stdout
if (compileResult==""):
return ("Internal Error (Compiling)", "<pre>\n" +
cgi.escape(compileRun.stderr) +
"</pre>"+"<!--"+compileRun._toString()+"-->")
compileObj = json.loads(compileResult)
# print(compileObj['status'])
if compileObj['status'] == 'Internal Error':
return ("Internal Error (Compiling)", "<pre>\n" +
cgi.escape(compileObj["errmsg"]) +
"</pre>")
elif compileObj['status'] == 'Compile-time Error':
errorObj = compileObj['error']
if errorObj['filename'] == ("student." + websheet.classname + ".java"):
result = "Syntax error (could not compile):"
result += "<br>"
result += '<tt>'+errorObj['filename'].split('.')[-2]+'.java</tt>, line '
result += str(translate_line(errorObj['row'])) + ':'
#result += str(errorObj['row']) + ':'
result += "<pre>\n"
#remove the safeexec bits
result += cgi.escape(errorObj["errmsg"]
.replace("stdlibpack.", "")
.replace("student.", "")
)
result += "</pre>"
return ("Syntax Error", result)
else:
return("Internal Error (Compiling reference solution and testing suite)",
'<b>File: </b><tt>'+errorObj['filename']+'</tt><br><b>Line number: '
+str(errorObj['row'])+"</b><pre>"
+errorObj['errmsg']+":\n"+dump[errorObj['filename'][:-5]].split("\n")[errorObj['row']-1]+"</pre>")
#print(compileResult)
# prefetch all urls, pass them to the grader on stdin
compileObj["stdin"] = json.dumps({
"fetched_urls":websheet.prefetch_urls(True)
})
compileResult = json.dumps(compileObj)
runUser = config.run_java(["traceprinter/ramtools/RAMRun", "tester." + websheet.classname], compileResult)
#runUser = config.run_java("tester." + websheet.classname + " " + student)
#print(runUser.stdout)
RAMRunError = runUser.stdout.startswith("Error")
RAMRunErrmsg = runUser.stdout[:runUser.stdout.index('\n')]
runUser.stdout = runUser.stdout[runUser.stdout.index('\n')+1:]
#print(runUser.stdout)
#print(runUser.stderr)
if runUser.returncode != 0 or runUser.stdout.startswith("Time Limit Exceeded"):
errmsg = runUser.stderr.split('\n')[0]
result = runUser.stdout
result += "<div class='safeexec'>Crashed! The grader reported "
result += "<code>"
result += cgi.escape(errmsg)
result += "</code>"
result += "</div>"
result += "<!--" + runUser.stderr + "-->"
return ("Sandbox Limit", result)
if RAMRunError:
result += "<div class='safeexec'>Could not execute! "
result += "<code>"
result += cgi.escape(RAMRunErrmsg)
result += "</code>"
result += "</div>"
return ("Internal Error (RAMRun)", result)
runtimeOutput = re.sub(
re.compile("(at|from) line (\d+) "),
lambda match: match.group(1)+" line " + translate_line(match.group(2)) + " ",
runUser.stdout)
#print(runtimeOutput)
def ssf(s, t, u): # substring from of s from after t to before u
if t not in s: raise ValueError("Can't ssf("+s+","+t+","+u+")")
s = s[s.index(t)+len(t) : ]
return s[ : s.index(u)]
if "<div class='error'>Runtime error:" in runtimeOutput:
|
category = "Runtime Error"
errmsg = ssf(runtimeOutput[runtimeOutput.index("<div class='error'>Runtime error:"):], "<pre >", "\n")
elif "<div class='all-passed'>" in runtimeOutput:
category = "Passed"
epilogue = websheet.epilogue
else:
category = "Failed Tests"
if "<div class='error'>" in runtimeOutput
|
:
errmsg = ssf(runtimeOutput, "<div class='error'>", '</div>')
else:
return ("Internal Error", "<b>stderr</b><pre>" + runUser.stderr + "</pre><b>stdout</b><br>" + runUser.stdout)
return (category, runtimeOutput)
|
chrippa/livestreamer
|
src/livestreamer/plugins/servustv.py
|
Python
|
bsd-2-clause
| 758 | 0.001319 |
#!/usr/bin/env python
import re
from livestreamer.plugin import Plugin
from livestreamer.stream import HDSStream
_channel = dict(
at="servustvhd_1@51229",
de="servustvhdde_1@75540"
)
STREAM_INFO_URL = "http://hdiosstv-f.akamaihd.net/z/{channel}/manifest.f4m"
_url_re = re.compile(r"http://(?:www.)?servustv.com/(de|at)/.*")
class ServusTV(Plugin):
@classmethod
def can_handle_url(cls, url):
match = _url_re.match(url)
return match
def _get_streams(sel
|
f):
url_match = _url_re.match(self.url)
if url_match:
if url_match.group
|
(1) in _channel:
return HDSStream.parse_manifest(self.session, STREAM_INFO_URL.format(channel=_channel[url_match.group(1)]))
__plugin__ = ServusTV
|
arenadata/ambari
|
ambari-server/src/main/python/ambari_server/BackupRestore.py
|
Python
|
apache-2.0
| 6,601 | 0.009998 |
#!/usr/bin/env ambari-python-wrap
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import zipfile
import os
from ambari_server.ambariPath import AmbariPath
# Default values are hardcoded here
BACKUP_PROCESS = 'backup'
R
|
ESTORE_PROCESS = 'restore'
SUPPORTED_PROCESSES = [BACKUP_PROCESS, RESTORE_PROCESS]
# The list of files where the ambari server state is kept on the filesystem
AMBARI_FILESYSTEM_STATE
|
= [AmbariPath.get("/etc/ambari-server/conf"),
AmbariPath.get("/var/lib/ambari-server/resources"),
AmbariPath.get("/var/run/ambari-server/bootstrap/"),
AmbariPath.get("/var/run/ambari-server/stack-recommendations")]
# What to use when no path/archive is specified
DEFAULT_ARCHIVE = AmbariPath.get("/var/lib/ambari-server/Ambari_State_Backup.zip")
# Responsible for managing the Backup/Restore process
class BackupRestore:
def __init__(self, state_file_list, zipname, zip_folder_path):
"""
Zip file creator
:param state_file_list: the list of files where the Ambari State is kept on the filesystem
:param zipname: the name of the archive to use
:param zip_folder_path: the path of the archive
:return:
"""
self.state_file_list = state_file_list
self.zipname = zipname
self.zip_folder_path = zip_folder_path
def perform_backup(self):
"""
Used to perform the actual backup, by creating the zip archive
:return:
"""
try:
print("Creating zip file...")
# Use allowZip64=True to allow sizes greater than 4GB
zipf = zipfile.ZipFile(self.zip_folder_path + self.zipname, 'w', allowZip64=True)
zipdir(zipf, self.state_file_list, self.zipname)
except Exception, e:
sys.exit("Could not create zip file. Details: " + str(e))
print("Zip file created at " + self.zip_folder_path + self.zipname)
def perform_restore(self):
"""
Used to perform the restore process
:return:
"""
try:
print("Extracting the archive " + self.zip_folder_path + self.zipname)
unzip(self.zip_folder_path + self.zipname, '/')
except Exception, e:
sys.exit("Could not extract the zipfile " + self.zip_folder_path + self.zipname
+ " Details: " + str(e))
def unzip(source_filename, dest_dir):
"""
Zip archive extractor
:param source_filename: the absolute path of the file to unzip
:param dest_dir: the destination of the zip content
:return:
"""
zf = zipfile.ZipFile(source_filename)
try:
zf.extractall(dest_dir)
except Exception, e:
print("A problem occurred while unzipping. Details: " + str(e))
raise e
finally:
zf.close()
def zipdir(zipf, state_file_list, zipname):
"""
Used to archive the specified directory
:param zipf: the zipfile
:param state_file_list: the file list to archive
:param zipname: the name of the zip
:return:
"""
try:
for path in state_file_list:
for root, dirs, files in os.walk(path):
for file in files:
if not file == zipname:
zipf.write(os.path.join(root, file))
except Exception, e:
print("A problem occurred while unzipping. Details: " + str(e))
raise e
finally:
zipf.close()
def print_usage():
"""
Usage instructions
:return:
"""
print("Usage: python BackupRestore.py <processType> [zip-folder-path|zip-file-path]\n\n"
+ " processType - backup : backs up the filesystem state of the Ambari server into a zip file\n"
+ " processType - restore : restores the filesystem state of the Ambari server\n"
+ " [zip-folder-path] used with backup specifies the path of the folder where the zip file to be created\n"
+ " [zip-folder-path] used with restore specifies the path of the Ambari folder where the zip file to restore from is located\n")
def validate_folders(folders):
"""
Used to validate folder existence on the machine
:param folders: folder list containing paths to validate
:return:
"""
for folder in folders:
if not os.path.isdir(folder):
sys.exit("Error while validating folders. Folder " + folder + " does not exist.")
def retrieve_path_and_zipname(archive_absolute_path):
target = {'path': None , 'zipname': None}
try:
elements = archive_absolute_path.split("/")
if elements is not None and len(elements)>0:
target['zipname'] = elements[len(elements)-1]
target['path'] = archive_absolute_path.replace(elements[len(elements)-1], "")
except Exception, e:
sys.exit("Could not retrieve path and zipname from the absolute path " + archive_absolute_path + ". Please check arguments."
+ " Details: " + str(e))
return target
def main(argv=None):
# Arg checks
if len(argv) != 3 and len(argv) != 2:
print_usage()
sys.exit("Invalid usage.")
else:
process_type = argv[1]
if not (SUPPORTED_PROCESSES.__contains__(process_type)):
sys.exit("Unsupported process type: " + process_type)
# if no archive is specified
if len(argv) == 2:
print "No path specified. Will use " + DEFAULT_ARCHIVE
location_data = retrieve_path_and_zipname(DEFAULT_ARCHIVE)
else:
location_data = retrieve_path_and_zipname(argv[2])
validate_folders([location_data['path']])
zip_file_path = location_data['path']
ambari_backup_zip_filename = location_data['zipname']
backup_restore = BackupRestore(AMBARI_FILESYSTEM_STATE, ambari_backup_zip_filename, zip_file_path)
print(process_type.title() + " process initiated.")
if process_type == BACKUP_PROCESS:
validate_folders(AMBARI_FILESYSTEM_STATE)
backup_restore.perform_backup()
print(BACKUP_PROCESS.title() + " complete.")
if process_type == RESTORE_PROCESS:
backup_restore.perform_restore()
print(RESTORE_PROCESS.title() + " complete.")
if __name__ == '__main__':
main(sys.argv)
|
SpamScope/spamscope
|
tests/test_phishing.py
|
Python
|
apache-2.0
| 4,631 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2017 Fedele Mantuano (https://www.linkedin.com/in/fmantuano/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import copy
import os
import unittest
import mailparser
from pyfaup.faup import Faup
from context import mails
from context import utils
phishing = mails.phishing
base_path = os.path.realpath(os.path.dirname(__file__))
mail_thug = os.path.join(base_path, 'samples', 'mail_thug')
mail_form = os.path.join(base_path, 'samples', 'mail_form')
mail_test_5 = os.path.join(base_path, 'samples', 'mail_test_5')
mail_test_6 = os.path.join(base_path, 'samples', 'mail_test_6')
logging.getLogger().addHandler(logging.NullHandler())
class TestPhishing(unittest.TestCase):
faup = Faup()
def setUp(self):
parser = mailparser.parse_from_file(mail_thug)
self.email = parser.mail
self.attachments = parser.attachments
parser = mailparser.parse_from_file(mail_form)
self.email_form = parser.mail
body = self.email_form.get("body")
self.urls = utils.urls_extractor(body, self.faup)
d = {"generic": "conf/keywords/targets.example.yml",
"custom": "conf/keywords/targets_english.example.yml"}
self.targets = utils.load_keywords_dict(d)
d = {"generic": "conf/keywords/subjects.example.yml",
"custom": "conf/keywords/subjects_english.example.yml"}
self.subjects = utils.load_keywords_list(d)
def test_ParserError(self):
parser = mailparser.parse_from_file(mail_test_6)
body = parser.mail.get("body")
flag_form = phishing.check_form(body)
self.assertFalse(flag_form)
def test_none_values(self):
email = copy.deepcopy(self.email)
email
|
.pop("body", None)
email.pop("subjects", None)
email.pop("from", None)
phishing.check_phishing(
email=email,
attachments=self.attachments,
urls_body=self.urls,
urls_attachments=self.urls,
target_keys=self.targets,
subject_keys=self.subjects)
def test_check_form(self):
body = self.email_form.get("body")
flag_form = phishing.check_for
|
m(body)
self.assertTrue(flag_form)
body = self.email.get("body")
flag_form = phishing.check_form(body)
self.assertFalse(flag_form)
def test_form_value_error(self):
parser = mailparser.parse_from_file(mail_test_5)
body = parser.mail.get("body")
flag_form = phishing.check_form(body)
self.assertFalse(flag_form)
def test_check_urls(self):
flag = False
if any(phishing.check_urls(self.urls, i)
for i in self.targets.values()):
flag = True
self.assertTrue(flag)
def test_check_phishing(self):
results = phishing.check_phishing(
email=self.email,
attachments=self.attachments,
urls_body=self.urls,
urls_attachments=self.urls,
target_keys=self.targets,
subject_keys=self.subjects)
self.assertIsInstance(results, dict)
self.assertEqual(results["score"], 123)
self.assertIn("filename_attachments", results["score_expanded"])
self.assertIn("mail_subject", results["score_expanded"])
self.assertIn("mail_body", results["score_expanded"])
self.assertIn("mail_from", results["score_expanded"])
self.assertIn("urls_body", results["score_expanded"])
self.assertIn("urls_attachments", results["score_expanded"])
self.assertIn("Test", results["targets"])
self.assertTrue(results["with_phishing"])
def test_check_phishing_form(self):
results = phishing.check_phishing(
email=self.email_form,
attachments=self.attachments,
urls_body=self.urls,
urls_attachments=self.urls,
target_keys=self.targets,
subject_keys=self.subjects)
self.assertIn("mail_form", results["score_expanded"])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Kesel/django
|
demo/views.py
|
Python
|
mit
| 459 | 0.002179 |
import platform
import pip
from django import get_version
from django.shortcuts import render
def home(request):
"""
renders the deployment server details on the screen.
:param request: The django formatted HttpRequest
:return: renders context c with the demo template.
"""
c = dict(python_version=platform.p
|
ython_version(), django_version=get_version(), pip_version=pip.__version__)
return render(request, 'demo/demo.html', c)
|
|
18F/regulations-site
|
regulations/generator/layers/definitions.py
|
Python
|
cc0-1.0
| 1,632 | 0 |
from django.template import loader
from regulations.generator.layers.base import InlineLayer
from regulations.generator.section_url import SectionUrl
from regulations.generator.layers import utils
from ..node_types import to_markup_id
class DefinitionsLayer(InlineLayer):
shorthand = 'terms'
data_source =
|
'terms'
def __init__(self, layer):
self.layer = layer
self.template = loader.get_template(
'regulations/layers/definition_citation.html')
self.sectional = False
self.version = None
self.rev_urls = SectionUrl()
self.rendered = {}
# precomputation
|
for def_struct in self.layer['referenced'].values():
def_struct['reference_split'] = def_struct['reference'].split('-')
def replacement_for(self, original, data):
""" Create the link that takes you to the definition of the term. """
citation = data['ref']
# term = term w/o pluralization
term = self.layer['referenced'][citation]['term']
citation = self.layer['referenced'][citation]['reference_split']
key = (original, tuple(citation))
if key not in self.rendered:
context = {'citation': {
'url': self.rev_urls.fetch(citation, self.version,
self.sectional),
'label': original,
'term': term,
'definition_reference': '-'.join(to_markup_id(citation))}}
rendered = utils.render_template(self.template, context)
self.rendered[key] = rendered
return self.rendered[key]
|
jslhs/sunpy
|
sunpy/gui/__init__.py
|
Python
|
bsd-2-clause
| 1,811 | 0.005522 |
#-*- coding: utf-8 -*-
# Author: Matt Earnshaw <matt@earnshaw.org.uk>
from __future__ import absolute_import
import os
import sys
import sunpy
from PyQt4.QtGui import QApplication
from sunpy.gui.mainwindow import MainWindow
from sunpy.io import UnrecognizedFileTypeError
class Plotman(object):
""" Wraps a MainWindow so PlotMan instances can be created via the CLI.
Examples
--------
from sunpy.gui import Plotman
plots = Plotman("data/examples")
plots.show()
"""
def __init__(self, *paths):
""" *paths: directories containing FITS paths
or FITS paths to be opened in PlotMan """
self.app = QApplication(sys.argv)
self.main = M
|
ainWindow()
self.open_files(paths)
def open_files(self, inputs):
VALID_EXTENSIONS = [".jp2", ".fits", ".fts"]
to_open =
|
[]
# Determine files to process
for input_ in inputs:
if os.path.isfile(input_):
to_open.append(input_)
elif os.path.isdir(input_):
for file_ in os.listdir(input_):
to_open.append(file_)
else:
raise IOError("Path " + input_ + " does not exist.")
# Load files
for filepath in to_open:
name, ext = os.path.splitext(filepath) #pylint: disable=W0612
if ext.lower() in VALID_EXTENSIONS:
try:
self.main.add_tab(filepath, os.path.basename(filepath))
except UnrecognizedFileTypeError:
pass
def show(self):
self.main.show()
self.app.exec_()
if __name__=="__main__":
from sunpy.gui import Plotman
plots = Plotman(sunpy.AIA_171_IMAGE)
plots.show()
|
sdoumbouya/ovirt-node
|
src/ovirt/node/setup/core/status_page.py
|
Python
|
gpl-2.0
| 8,877 | 0.000113 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# status_page.py - Copyright (C) 2012 Red Hat, Inc.
# Written by Fabian Deutsch <fabiand@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
from ovirt.node import ui, plugins, utils
from ovirt.node.config import defaults
from ovirt.node.utils import security, virt, system
import os
import textwrap
"""
Status page plugin
"""
class Plugi
|
n(plugins.NodePlugin):
"""This is the summary page, summarizing all sorts of informations
There are no validators, as there is no input.
"""
_model = None
def name(self):
return "Status"
def rank(self):
return 0
def model(self):
net_status, net_br, net_addrs = utils.network.n
|
etworking_status()
net_addrs_str = ""
if net_addrs:
net_addrs_str = "\nIPv4: {inet}\nIPv6: {inet6}".format(**net_addrs)
num_domains = virt.number_of_domains()
return {
"status": virt.hardware_status(),
"networking": net_status,
"networking.bridge": "%s %s" % (net_br, net_addrs_str),
"logs": self._logging_summary(),
"libvirt.num_guests": num_domains,
}
def validators(self):
return {}
def ui_content(self):
"""Describes the UI this plugin requires
This is an ordered list of (path, widget) tuples.
"""
# Function to expand all "keywords" to the same length
aligned = lambda l: l.ljust(14)
# Network related widgets, appearing in one row
network_widgets = [ui.KeywordLabel("networking",
aligned("Networking: ")),
ui.KeywordLabel("networking.bridge",
"Bridge: "),
]
action_widgets = [ui.Button("action.lock", "Lock"),
ui.Button("action.logoff", "Log Off"),
ui.Button("action.restart", "Restart"),
ui.Button("action.poweroff", "Power Off")
]
widgets = [ui.Header("header[0]", "System Information"),
ui.KeywordLabel("status", aligned("Status: ")),
ui.Divider("divider[0]"),
ui.Row("row[0]", network_widgets),
ui.Divider("divider[1]"),
ui.KeywordLabel("logs", aligned("Logs: ")),
ui.Divider("divider[2]"),
ui.KeywordLabel("libvirt.num_guests",
aligned("Running VMs: ")),
ui.Divider("divider[3]"),
ui.Label("support.hint", "Press F8 for support menu"),
ui.Divider("divider[4]"),
ui.Row("row[1]",
[ui.Button("action.hostkey", "View Host Key"),
ui.Button("action.cpu_details",
"View CPU Details"),
]),
ui.Row("row[2]", action_widgets),
]
self.widgets.add(widgets)
page = ui.Page("page", widgets)
page.buttons = []
return page
def on_change(self, changes):
pass
def on_merge(self, changes):
# Handle button presses
number_of_vm = "There are %s Virtual Machines running." \
% (virt.number_of_domains())
if "action.lock" in changes:
self.logger.info("Locking screen")
self._lock_dialog = LockDialog()
self.application.ui.hotkeys_enabled(False)
self.widgets.add(self._lock_dialog)
return self._lock_dialog
elif "action.unlock" in changes and "password" in changes:
self.logger.info("UnLocking screen")
pam = security.PAM()
if pam.authenticate(os.getlogin(), changes["password"]):
self._lock_dialog.close()
self.application.ui.hotkeys_enabled(True)
else:
self.application.notice("The provided password was incorrect.")
self.widgets["password"].text("")
elif "action.logoff" in changes:
self.logger.info("Logging off")
self.application.quit()
elif "action.restart" in changes:
self.logger.info("Restarting")
return ui.ConfirmationDialog("confirm.reboot",
"Confirm System Restart",
number_of_vm +
"\nThis will restart the system,"
"proceed?")
elif "confirm.reboot.yes" in changes:
self.logger.info("Confirm Restarting")
self.dry_or(lambda: system.reboot())
elif "action.poweroff" in changes:
self.logger.info("Shutting down")
return ui.ConfirmationDialog("confirm.shutdown",
"Confirm System Poweroff",
number_of_vm +
"\nThis will shut down the system,"
"proceed?")
elif "confirm.shutdown.yes" in changes:
self.logger.info("Confirm Shutting down")
self.dry_or(lambda: system.poweroff())
elif "action.hostkey" in changes:
self.logger.info("Showing hostkey")
return HostkeyDialog("dialog.hostkey", "Host Key")
elif "action.cpu_details" in changes:
self.logger.info("Showing CPU details")
return CPUFeaturesDialog("dialog.cpu_details", "CPU Details")
elif "_save" in changes:
self.widgets["dialog.hostkey"].close()
def _logging_summary(self):
"""Return a textual summary of the current log configuration
"""
netconsole = defaults.Netconsole().retrieve()
syslog = defaults.Syslog().retrieve()
destinations = []
if syslog["server"]:
destinations.append("Rsyslog: %s:%s" % (syslog["server"],
syslog["port"] or "514"))
if netconsole["server"]:
destinations.append("Netconsole: %s:%s" %
(netconsole["server"],
netconsole["port"] or "6666"))
return ", ".join(destinations) if destinations else "Local Only"
class HostkeyDialog(ui.Dialog):
def __init__(self, path, title):
super(HostkeyDialog, self).__init__(path, title, [])
ssh = security.Ssh()
fp, hk = ssh.get_hostkey()
self.children = [ui.Label("hostkey.label[0]",
"RSA Host Key Fingerprint:"),
ui.Label("hostkey.fp", fp),
ui.Divider("hostkey.divider[0]"),
ui.Label("hostkey.label[1]",
"RSA Host Key:"),
ui.Label("hostkey", "\n".join(textwrap.wrap(hk, 64))),
]
self.buttons = [ui.CloseButton("dialog.close")]
class CPUFeaturesDialog(ui.InfoDialog):
"""The dialog beeing displayed when th euser clicks CPU Details
"""
def __init__(self, path, title):
msg = utils.system.cpu_details()
super(CPUFeaturesDialog, self)._
|
wcmitchell/insights-core
|
insights/parsers/ceilometer_conf.py
|
Python
|
apache-2.0
| 1,890 | 0.000529 |
"""
CeilometerConf - file ``/etc/ceilometer/ceilometer.conf``
=========================================================
The ``/etc/ceilometer/ceilometer.conf`` file is in a standard '.ini' format,
and this parser uses the IniConfigFile base class to read this.
Given a file containing the following test data::
[DEFAULT]
#
# From ceilometer
http_timeout = 600
debug = False
verbose = False
log_dir = /var/log/ceilometer
meter_dispatcher=database
event_dispatcher=database
[alarm]
evaluation_interval = 60
evaluation_service=ceilometer.alarm.service.SingletonAlarmService
partition_rpc_topic=alarm_partiti
|
on_coordination
[api]
port = 8777
host = 192.0.2.10
[central]
[collector]
udp_address = 0.0.0.0
udp_port = 4952
[compute]
[coordination]
backend_url = redis:
|
//:chDWmHdH8dyjsmpCWfCEpJR87@192.0.2.7:6379/
Example:
>>> config = shared[CeilometerConf]
>>> config.sections()
['DEFAULT', 'alarm', 'api', 'central', 'collector', 'compute', 'coordination']
>>> config.items('api')
['port', 'host']
>>> config.has_option('alarm', 'evaluation_interval')
True
>>> config.get('coordination', 'backend_url')
'redis://:chDWmHdH8dyjsmpCWfCEpJR87@192.0.2.7:6379/'
>>> config.getint('collector', 'udp_port')
4952
>>> config.getboolean('DEFAULT', 'debug')
False
"""
from .. import parser, IniConfigFile
from insights.specs import ceilometer_conf
@parser(ceilometer_conf)
class CeilometerConf(IniConfigFile):
"""
A dict of the content of the ``ceilometer.conf`` configuration file.
Example selection of dictionary contents::
{
"DEFAULT": {
"http_timeout":"600",
"debug": "False"
},
"api": {
"port":"8877",
},
}
"""
pass
|
timopulkkinen/BubbleFish
|
ppapi/generators/idl_thunk.py
|
Python
|
bsd-3-clause
| 16,821 | 0.009036 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C++ style thunks """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('thunkroot', 'Base directory of output',
default=os.path.join('..', 'thunk'))
class TGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
class ThunkBodyMetadata(object):
"""Metadata about thunk body. Used for selecting which headers to emit."""
def __init__(self):
self._apis = set()
self._includes = set()
def AddApi(self, api):
self._apis.add(api)
def Apis(self):
return self._apis
def AddInclude(self, include):
self._includes.add(include)
def Includes(self):
return self._includes
def _GetBaseFileName(filenode):
"""Returns the base name for output files, given the filenode.
Examples:
'dev/ppb_find_dev.h' -> 'ppb_find_dev'
'trusted/ppb_buffer_trusted.h' -> 'ppb_buffer_trusted'
"""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
return name
def _GetHeaderFileName(filenode):
"""Returns the name for the header for this file."""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if path:
header = "ppapi/c/%s/%s.h" % (path, name)
else:
header = "ppapi/c/%s.h" % name
return header
def _GetThunkFileName(filenode, relpath):
"""Returns the thunk file name."""
path = os.path.split(filenode.GetProperty('NAME'))[0]
name = _GetBaseFileName(filenode)
# We don't reattach the path for thunk.
if relpath: name = os.path.join(relpath, name)
name = '%s%s' % (name, '_thunk.cc')
return name
def _MakeEnterLine(filenode, interface, arg, handle_errors, callback, meta):
"""Returns an EnterInstance/EnterResource string for a function."""
if arg[0] == 'PP_Instance':
if callback is None:
return 'EnterInstance enter(%s);' % arg[1]
else:
return 'EnterInstance enter(%s, %s);' % (arg[1], callback)
elif arg[0] == 'PP_Resource':
api_name = interface.GetName()
if api_name.endswith('Trusted'):
api_name = api_name[:-len('Trusted')]
if api_name.endswith('_Dev'):
api_name = api_name[:-len('_Dev')]
api_name += '_API'
enter_type = 'EnterResource<%s>' % api_name
# The API header matches the file name, not the interface name.
api_basename = _GetBaseFileName(filenode)
if api_basename.endswith('_dev'):
# Clip off _dev suffix.
api_basename = api_basename[:-len('_dev')]
if api_basename.endswith('_trusted'):
# Clip off _trusted suffix.
api_basename = api_basename[:-len('_trusted')]
meta.AddApi(api_basename + '_api')
if callback is None:
return '%s enter(%s, %s);' % (enter_type, arg[1],
str(handle_errors).lower())
else:
return '%s enter(%s, %s, %s);' % (enter_type, arg[1],
callback,
str(handle_errors).lower())
else:
raise TGenError("Unknown type for _MakeEnterLine: %s" % arg[0])
def _GetShortName(interface, filter_suffixes):
"""Return a shorter interface name that matches Is* and Create* functions."""
parts = interface.GetName().split('_')[1:]
tail = parts[len(parts) - 1]
if tail in filter_suffixes:
parts = parts[:-1]
return ''.join(parts)
def _IsTypeCheck(interface, node):
"""Returns true if node represents a type-checking function."""
return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])
def _GetCreateFuncName(interface):
"""Returns the creation function name for an interface."""
return 'Create%s' % _GetShortName(interface, ['Dev'])
def _GetDefaultFailureValue(t):
"""Returns the default failure value for a given type.
Returns None if no default failure value exists for the type.
"""
values = {
'PP_Bool': 'PP_FALSE',
'PP_Resource': '0',
'struct PP_Var': 'PP_MakeUndefined()',
'float': '0.0f',
'int32_t': 'enter.retval()',
'uint16_t': '0',
'uint32_t': '0',
'uint64_t': '0',
}
if t in values:
return values[t]
return None
def _MakeCreateMemberBody(interface, member, args):
"""Returns the body of a Create() function.
Args:
interface - IDLNode for the interface
member - IDLNode for member function
args - List of arguments for the Create() function
"""
if args[0][0] == 'PP_Resource':
body = 'Resource* object =\n'
body += ' PpapiGlobals::Get()->GetResourceTracker()->'
body += 'GetResource(%s);\n' % args[0][1]
body += 'if (!object)\n'
body += ' return 0;\n'
body += 'EnterResourceCreation enter(object->pp_instance());\n'
elif args[0][0] == 'PP_Instance':
body = 'EnterResourceCreation enter(%s);\n' % args[0][1]
else:
raise TGenError('Unknown arg type for Create(): %s' % args[0][0])
body += 'if (enter.failed())\n'
body += ' return 0;\n'
arg_list = ', '.join([a[1] for a in args])
if member.GetProperty('create_func'):
create_func = member.GetProperty('create_func')
else:
create_func = _GetCreateFuncName(interface)
body += 'return enter.functions()->%s(%s);' % (create_func,
arg_list)
return body
def _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta):
"""Returns the body of a typical function.
Args:
filenode - IDLNode for the file
release - release to generate body for
node - IDLNode for the interface
member - IDLNode for the member function
rtype - Return type for the member function
args - List of 4-tuple arguments for the member function
include_version - whether to include the version in the invocation
meta - ThunkBodyMetadata for header hints
"""
is_callback_func = args[len(args) - 1][0] == 'struct PP_CompletionCallbac
|
k'
if is_callback_func:
call_args = args[:-1] + [('', 'enter.callback()', '', '')]
meta.AddInclude('ppapi/c/pp_completion_callback.h')
else:
call_args = args
if args[0][0] == 'PP_Instance':
call_arglist = ', '.join(a[1] for a in call_args)
function_container = 'functions'
else:
call_arglist = ', '.join(a[1] for a in call_args[1:])
function_container = 'object'
function_name = member.GetName()
if include_version:
version = node.GetVersion(release).replace('.
|
', '_')
function_name += version
invocation = 'enter.%s()->%s(%s)' % (function_container,
function_name,
call_arglist)
handle_errors = not (member.GetProperty('report_errors') == 'False')
if is_callback_func:
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
args[len(args) - 1][1], meta)
body += 'if (enter.failed())\n'
value = member.GetProperty('on_failure')
if value is None:
value = 'enter.retval()'
body += ' return %s;\n' % value
body += 'return enter.SetResult(%s);' % invocation
elif rtype == 'void':
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
None, meta)
body += 'if (enter.succeeded())\n'
body += ' %s;' % invocation
else:
value = member.GetProperty('on_failure')
if value is None:
value = _GetDefaultFailureValue(rtype)
if value is None:
raise TGenError('No default value for rtype %s' % rtype)
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
None, me
|
google/grumpy
|
lib/itertools_test.py
|
Python
|
apache-2.0
| 5,660 | 0.011131 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import weetest
def TestCycle():
want = []
got = []
for x in itertools.cycle(()):
got.append(x)
assert got == want, 'empty cycle yields no elements'
arg = (0, 1, 2)
want = (0, 1, 2) * 10
got = []
limit = 10 * len(arg)
counter = 0
for x in itertools.cycle((0, 1, 2)):
got.append(x)
counter += 1
if counter == limit:
break
assert tuple(got) == want, 'tuple(cycle%s) == %s, want %s' % (arg, tuple(got), want)
def TestDropwhile():
r = range(10)
cases = [
((lambda x: x < 5, r), (5, 6, 7, 8, 9)),
((lambda x: True, r), ()),
((lambda x: False, r), tuple(r)),
]
for args, want in cases:
got = tuple(itertools.dropwhile(*args))
assert got == want, 'tuple(dropwhile%s) == %s, want %s' % (args, got, want)
def TestChain():
r = range(10)
cases = [
([r], tuple(r)),
([r, r], tuple(r) + tuple(r)),
([], ())
]
for args, want in cases:
got = tuple(itertools.chain(*args))
assert got == want, 'tuple(chain%s) == %s, want %s' % (args, got, want)
def TestFromIterable():
r = range(10)
cases = [
([r], tuple(r)),
([r, r], tuple(r) + tuple(r)),
([], ())
]
for args, want in cases:
got = tuple(itertools.chain.from_iterable(args))
assert got == want, 'tuple(from_iterable%s) == %s, want %s' % (args, got, want)
def TestIFilter():
r = range(10)
cases = [
((lambda x: x < 5, r), (0, 1, 2, 3, 4)),
((lambda x: False, r), ()),
((lambda x: True, r), tuple(r)),
((None, r), (1, 2, 3, 4, 5, 6, 7, 8, 9))
]
for args, want in cases:
got = tuple(itertools.ifilter(*args))
assert got == want, 'tuple(ifilter%s) == %s, want %s' % (args, got, want)
def TestIFilterFalse():
r = range(10)
cases = [
((lambda x: x < 5, r), (5, 6, 7, 8, 9)),
((lambda x: False, r), tuple(r)),
((lambda x: True, r), ()),
((None, r), (0,))
]
for args, want in cases:
got = tuple(itertools.ifilterfalse(*args))
assert got == want, 'tuple(ifilterfalse%s) == %s, want %s' % (args, got, want)
def TestISlice():
r = range(10)
cases = [
((r, 5), (0, 1, 2, 3, 4)),
((r, 25, 30), ()),
((r, 1, None, 3), (1, 4, 7)),
]
for args, want in cases:
got = tuple(itertools.islice(*args))
assert got == want, 'tuple(islice%s) == %s, want %s' % (args, got, want)
def TestIZipLongest():
cases = [
(('abc', range(6)), (('a', 0), ('b', 1), ('c', 2), (None, 3), (None, 4), (None, 5))),
((range(6), 'abc'), ((0, 'a'), (1, 'b'), (2, 'c'), (3, None), (4, None), (5, None))),
(([1, None, 3], 'ab', range(1)), ((1, 'a', 0), (None, 'b', None), (3, None, None))),
]
for args, want in cases:
got = tuple(itertools.izip_longest(*args))
assert got == want, 'tuple(izip_longest%s) == %s, want %s' % (args, got, want)
def TestProduct():
cases = [
(([1, 2], ['a', 'b']), ((1, 'a'), (1, 'b'), (2, 'a'), (2, 'b'))),
(([1], ['a', 'b']), ((1, 'a'), (1, 'b'))),
(([],), ()),
]
for args, want in cases:
|
got = tuple(itertools.product(*args))
assert got == want, 'tuple(product%s) == %s, want %s' % (args, got, want)
def TestPermutations():
cases = [
(('AB',), (('A', 'B'), ('B', 'A'))),
(('ABC', 2), (('A', 'B'), ('A', 'C'), ('B', 'A'), ('B', 'C'), ('C', 'A'), ('C', 'B'))),
((range(3),), ((0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0))),
(([],), ((),)),
(([], 0), ((),)),
((r
|
ange(3), 4), ()),
]
for args, want in cases:
got = tuple(itertools.permutations(*args))
assert got == want, 'tuple(permutations%s) == %s, want %s' % (args, got, want)
def TestCombinations():
cases = [
((range(4), 3), ((0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3))),
]
for args, want in cases:
got = tuple(itertools.combinations(*args))
assert got == want, 'tuple(combinations%s) == %s, want %s' % (args, got, want)
def TestCombinationsWithReplacement():
cases = [
(([-12], 2), (((-12, -12),))),
(('AB', 3), (('A', 'A', 'A'), ('A', 'A', 'B'), ('A', 'B', 'B'), ('B', 'B', 'B'))),
(([], 2), ()),
(([], 0), ((),))
]
for args, want in cases:
got = tuple(itertools.combinations_with_replacement(*args))
assert got == want, 'tuple(combinations_with_replacement%s) == %s, want %s' % (args, got, want)
def TestGroupBy():
cases = [
(([1, 2, 2, 3, 3, 3, 4, 4, 4, 4],), [(1, [1]), (2, [2, 2]), (3, [3, 3, 3]), (4, [4, 4, 4, 4])]),
((['aa', 'ab', 'abc', 'bcd', 'abcde'], len), [(2, ['aa', 'ab']), (3, ['abc', 'bcd']), (5, ['abcde'])]),
]
for args, want in cases:
got = [(k, list(v)) for k, v in itertools.groupby(*args)]
assert got == want, 'groupby %s == %s, want %s' % (args, got, want)
def TestTakewhile():
r = range(10)
cases = [
((lambda x: x % 2 == 0, r), (0,)),
((lambda x: True, r), tuple(r)),
((lambda x: False, r), ())
]
for args, want in cases:
got = tuple(itertools.takewhile(*args))
assert got == want, 'tuple(takewhile%s) == %s, want %s' % (args, got, want)
if __name__ == '__main__':
weetest.RunTests()
|
BackupTheBerlios/xml2ddl-svn
|
xml2ddl/FirebirdInterface.py
|
Python
|
gpl-2.0
| 15,609 | 0.010635 |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
from downloadCommon import DownloadCommon, getSeqName
from DdlCommonInterface import DdlCommonInterface
import re
class FbDownloader(DownloadCommon):
def __init__(self):
self.strDbms = 'firebird'
def connect(self, info):
try:
import kinterbasdb
except:
print "Missing Firebird support through kinterbasdb"
return
self.strDbms = 'firebird'
self.version = info['version']
self.conn = kinterbasdb.connect(
dsn='localhost:%s' % info['dbname'],
user = info['user'],
password = info['pass'])
self.cursor = self.conn.cursor()
def useConnection(self, con, version):
self.conn = con
self.version = version
self.cursor = self.conn.cursor()
def getTables(self, tableList):
""" Returns the list of tables as a array of strings """
strQuery = "SELECT RDB$RELATION_NAME FROM RDB$RELATIONS WHERE RDB$SYSTEM_FLAG=0 AND RDB$VIEW_SOURCE IS NULL;"
self.cursor.execute(strQuery)
return self._confirmReturns([x[0].strip() for x in self.cursor.fetchall() ], tableList)
def getTableColumns(self, strTable):
""" Returns column in this format
(nColIndex, strColumnName, strColType, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, bNotNull, strDefault, auto_increment)
"""
strSql = """
SELECT RF.RDB$FIELD_POSITION, RF.RDB$FIELD_NAME, RDB$FIELD_TYPE, F.RDB$FIELD_LENGTH,
RDB$FIELD_PRECISION, RDB$FIELD_SCALE, RF.RDB$NULL_FLAG, RF.RDB$DEFAULT_SOURCE, F.RDB$FIELD_SUB_TYPE
FROM RDB$RELATION_FIELDS RF, RDB$FIELDS F
WHERE RF.RDB$RELATION_NAME = ?
AND RF.RDB$FIELD_SOURCE = F.RDB$FIELD_NAME
ORDER BY RF.RDB$FIELD_POSITION;"""
self.cursor.execute(strSql, [strTable])
rows = self.cursor.fetchall()
ret = []
# TODO auto_increment
bAutoIncrement = False
for row in rows:
attnum, name, nType, size, numsize, scale, attnull, default, sub_type = row
if scale and scale < 0:
scale = -scale
if not size and numprecradix == 10:
size = numsize
strType = self.convertTypeId(nType)
if sub_type == 1:
strType = 'numeric'
elif sub_type == 2:
strType = 'decimal'
if numsize > 0:
size = numsize
numsize = None
if strType == 'integer' and size == 4:
size = None
elif strType == 'date' and size == 4:
size = None
elif strType == 'float' and size == 4:
size = None
if default:
# Remove the 'DEFAULT ' part of the SQL
default = default.replace('DEFAULT ', '')
if self.hasAutoincrement(strTable, name):
bAutoIncrement = True
else:
bAutoIncrement = False
ret.append((name.strip(), strType, size, scale, attnull, default, bAutoIncrement))
return ret
def convertTypeId(self, nType):
types = {
261: 'blob',
14 : 'char',
40 : 'cstring',
11 : 'd_float',
27 : 'double',
10 : 'float',
16 : 'int64',
8
|
: 'integer',
9 : 'quad',
7 : 'smallint',
12 : 'date',
13 : 'time',
35 : 'timestamp',
37 : 'varchar',
}
strType = ''
if nType in types:
strType = types[nType]
if nType not in [14, 40, 37]:
size = None
else:
print "Uknown type %d" % (nType)
|
return strType
def hasAutoincrement(self, strTableName, strColName):
strSql = "SELECT RDB$GENERATOR_NAME FROM RDB$GENERATORS WHERE UPPER(RDB$GENERATOR_NAME)=UPPER(?);"
self.cursor.execute(strSql, [getSeqName(strTableName, strColName)[0:31]])
rows = self.cursor.fetchall()
if rows:
return True
return False
def getTableComment(self, strTableName):
""" Returns the comment as a string """
strSql = "SELECT RDB$DESCRIPTION FROM RDB$RELATIONS WHERE RDB$RELATION_NAME=?;"
self.cursor.execute(strSql, [strTableName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getColumnComment(self, strTableName, strColumnName):
""" Returns the comment as a string """
strSql = """SELECT RDB$DESCRIPTION
FROM RDB$RELATION_FIELDS
WHERE RDB$RELATION_NAME = ? AND RDB$FIELD_NAME = ?"""
self.cursor.execute(strSql, [strTableName, strColumnName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getTableIndexes(self, strTableName):
""" Returns
(strIndexName, [strColumns,], bIsUnique, bIsPrimary, bIsClustered)
or []
Warning the Primary key constraint cheats by knowing the name probably starts with pk_
"""
strSql = """SELECT RDB$INDEX_NAME, RDB$UNIQUE_FLAG
FROM RDB$INDICES
WHERE RDB$RELATION_NAME = '%s'
""" % (strTableName)
self.cursor.execute(strSql)
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
for row in rows:
(strIndexName, bIsUnique) = row
colList = self._fetchTableColumnsForIndex(strIndexName)
if strIndexName.lower().startswith('pk_'):
bIsPrimary = True
else:
bIsPrimary = False
strIndexName = strIndexName.strip()
ret.append((strIndexName, colList, bIsUnique, bIsPrimary, None))
return ret
def _fetchTableColumnsForIndex(self, strIndexName):
strSql = """SELECT RDB$FIELD_NAME
FROM RDB$INDEX_SEGMENTS
WHERE RDB$INDEX_NAME = ?
ORDER BY RDB$FIELD_POSITION
"""
self.cursor.execute(strSql, [strIndexName.strip()])
rows = self.cursor.fetchall()
return [row[0].strip() for row in rows]
def getTableRelations(self, strTableName):
""" Returns
(strConstraintName, colName, fk_table, fk_columns)
or []
"""
strSql = """SELECT RDB$CONSTRAINT_NAME
FROM RDB$RELATION_CONSTRAINTS
WHERE RDB$RELATION_NAME = '%s'
""" % (strTableName)
self.cursor.execute(strSql)
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
return ret
def _fetchTableColumnsNamesByNums(self, strTableName, nums):
strSql = """
SELECT pa.attname
FROM pg_attribute pa, pg_class pc
WHERE pa.attrelid = pc.oid
AND pa.attisdropped = 'f'
AND pc.relname = %s
AND pc.relkind = 'r'
AND pa.attnum in (%s)
ORDER BY pa.attnum
""" % ( '%s', ','.join(['%s' for num in nums]) )
self.cursor.execute(strSql, [strTableName] + nums)
rows = self.cursor.fetchall()
return [row[0] for row in rows]
def _decodeLength(self, type, atttypmod):
# gleamed from http://www.postgresql-websource.com/psql713/source-format_type.htm
VARHDRSZ = 4
if type == 'varchar':
return (atttypmod - VARHDRSZ, None)
if type == 'numeric':
atttypmod -= VARHDRSZ
return ( (atttypmod >> 16) & 0xffff, atttypmod & 0xffff)
|
grigoryk/calory-game-server
|
dishes/admin.py
|
Python
|
gpl-2.0
| 727 | 0 |
from django.contrib import admin
from .models import (
Image, NutritionalDataDish, NutritionalDataGu
|
ess, Dish, Guess)
class ImageInline(admin.StackedInline):
model = Image
class NutritionalDataDishInline(admin.StackedInline):
model = NutritionalDataDish
class DishAdmin(admin.ModelAdmin):
list_display = ('description', 'is_vegetarian', 'created_at')
inlines = [
ImageInline,
Nutriti
|
onalDataDishInline
]
class NutritionalDataGuessInline(admin.StackedInline):
model = NutritionalDataGuess
class GuessAdmin(admin.ModelAdmin):
inlines = [NutritionalDataGuessInline]
admin.site.register(Image)
admin.site.register(Dish, DishAdmin)
admin.site.register(Guess, GuessAdmin)
|
stephenhelms/WormTracker
|
python/tsstats.py
|
Python
|
apache-2.0
| 8,100 | 0.006667 |
import numpy as np
import numpy.ma as ma
from numpy import linalg as LA
import matplotlib.pyplot as plt
import itertools
import collections
from scipy import stats
def acf(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
# from stackexchange
x = x - x.mean() # remove mean
if type(lags) is int:
lags = range(lags)
C = ma.zeros((len(lags),))
sigma2 = x.var()
for i, l in enumerate(lags):
if l == 0:
C[i] = 1
elif l >= x.shape[0]:
C[i] = ma.masked
else:
x0 = x[:-l].copy()
x1 = x[l:].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject] = ma.masked
x1[reject] = ma.masked
C[i] = (x0*x1).mean()/sigma2
return C
def ccf(x, y, lags, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
x = x - x.mean() # remove mean
y = y - y.mean()
if type(lags) is int:
lags = np.arange(-lags,lags)
C = ma.zeros((len(lags),))
sigma2 = x.std()*y.std()
for i, l in enumerate(lags):
if l == 0:
C[i] = (x*y).mean()/sigma2
else:
if l > 0:
x0 = x[:-l].copy()
y1 = y[l:].copy()
else:
x0 = y[:l].copy()
y1 = x[-l:].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject] = ma.masked
y1[reject] = ma.masked
C[i] = (x0*y1).mean()/sigma2
return C
def acv(k, List):
'''
Autocovariance
k is the lag order
'''
y = List.copy()
y = y - y.mean()
if k == 0:
return (y*y).mean()
else:
return (y[:-k]*y[k:]).mean()
def dotacf(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
if type(lags) is int:
lags = xrange(lags)
C = ma.zeros((len(lags),))
for i, l in enumerate(lags):
if l == 0:
C[i] = (x*x).sum(axis=1).mean()
else:
x0 = x[:-l, :].copy()
x1 = x[l:, :].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject, :] = ma.masked
x1[reject, :] = ma.masked
C[i] = (x0*x1).sum(axis=1).mean()
return C
def pacfe(p,j,List):
'''
Partial autocorrelation function estimates
p is the order of the AR(p) process
j is the coefficient in an AR(p) process
'''
if p==2 and j==1:
return (acf(j,List)*(1-acf(p,List)))/(1-(acf(j,List))**2)
elif p==2 and j==2:
return (acf(2,List)-(acf(1,List))**2)/(1-(acf(1,List))**2)
elif p==j and p!=2 and j!=2:
c=0
for a in range(1,p):
c+=pacfe(p-1,a,List)*acf(p-a,List)
d=0
for b in range(1,p):
d+=pacfe(p-1,b,List)*acf(b,List)
return (acf(p,List)-c)/(1-d)
else:
return pacfe(p-1,j,List)-pacfe(p,p,List)*pacfe(p-1,p-j,List)
def drift(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
if type(lags) is int:
lags = xrange(lags)
mu = ma.zeros((len(lags),))
for i, lag in enumerate(lags):
if lag==0:
mu[i] = 0
elif lag >= x.shape[0]:
mu[i] = ma.masked
else:
x0 = x[lag:].copy()
x1 = x[:-lag].copy()
reject = (exclude[lag:]-exclude[:-lag])>0
x0[reject] = ma.masked
x1[reject] = ma.masked
displacements = x0 - x1
mu[i] = displacements.mean()
return mu
def unwrapma(x):
# Adapted from numpy unwrap, this version ignores missing data
idx = ma.array(np.arange(0,x.shape[0]), mask=x.mask)
idxc = idx.compressed()
xc = x.compressed()
dd = np.diff(xc)
ddmod = np.mod(dd+np.pi, 2*np.pi)-np.pi
ddmod[(ddmod==-np.pi) & (
|
dd > 0)] = np.pi
phc_correct = ddmod - dd
phc_correct[np.abs(dd)<np.pi] = 0
ph_correct = np.zeros(x.shape)
ph_correct[idxc[1:]] = phc_correct
up = x + ph_correct.cumsum()
return up
def nextpow2(n):
'''
Returns the next highest power of 2 from n
'''
m_f = np.log2(n)
m_i = np.ceil(m_f)
return 2**m_i
def phaserand(X, independent=False, reduceHighFreqNoise=True):
'''
Gene
|
rates a randomized surrogate dataset for X, preserving linear temporal
correlations. If independent is False (default), linear correlations
between columns of x are also preserved.
If X contains missing values, they are filled with the mean of that
channel.
The algorithm works by randomizing the phases in the Fourier domain. For
non-independent shuffling, the same random phases are used for each
channel.
References:
Theiler, J., Eubank, S., Longtin, A., Galdrikian, B., & Doyne Farmer, J.
(1992). Testing for nonlinearity in time series: the method of
surrogate data. Physica D: Nonlinear Phenomena, 58(1), 77-94.
Prichard, D. and Theiler, J. (1994) Generating surrogate data for time
series with several simultaneously measured variables. Phys. Rev.
Lett. 73(7), 951-954.
Podobnik, B., Fu, D. F., Stanley, H. E., & Ivanov, P. C. (2007).
Power-law autocorrelated stochastic processes with long-range
cross-correlations. The European Physical Journal B, 56(1), 47-52.
'''
# Deal with array vs matrix by adding new axis
if len(X.shape) == 1:
X = X[:, np.newaxis]
# Deal with missing data
if isinstance(X, ma.MaskedArray):
# truncate all missing data at beginning and end
idxNotAllMissing = (~np.all(X.mask, axis=1)).nonzero()[0]
X = X[idxNotAllMissing[0]:idxNotAllMissing[-1], :]
X = X.filled(X.mean(axis=0)) # fill interior mask with the mean
# Reduce high-frequency noise by min difference between first and last
if reduceHighFreqNoise:
delta = X - X[0, :]
threshold = 1e-3*np.std(X, axis=0)
# find last pt in which all the channels are about the same as the beginning
# and also the index is even
goodEndPt = np.nonzero((np.all(np.abs(delta) < threshold, axis=1)) &
(np.arange(0, X.shape[1]) % 2 == 0))[0][-1]
if goodEndPt > X.shape[0]/2: # make sure we keep at least half the data
X = X[:goodEndPt, :]
# Fourier transform and extract amplitude and phases
# The frequencies are shifted so 0 is centered (fftshift)
N = X.shape[0] #int(nextpow2(X.shape[0])) # size for FFT
if N % 2 != 0:
N = N-1
h = np.floor(N/2) # half the length of the data
Z = np.fft.fft(X, N, axis=0)
M = np.fft.fftshift(np.abs(Z), axes=0) # the amplitudes
phase = np.fft.fftshift(np.angle(Z), axes=0) # the original phases
# Randomize the phases. The phases need to be symmetric for postivie and
# negative frequencies.
if independent: # generate random phases for each channel
randphase = 2.*np.pi*np.random.rand((h-1, X.shape[1])) # random phases
newphase = np.zeros((N, X.shape[1])) # new phases to use
newphase[0, :] = phase[0, :] # keep the zero freq (don't know why)
newphase[1:h, :] = randphase[::-1, :]
newphase[h, :] = phase[h, :]
newphase[h+1:, :] = -randphase
else: # generate one set of random phases (same as above)
randphase = 2.*np.pi*np.random.rand(h-1)
newphase = np.zeros((N, X.shape[1]))
newphase[0, :] = phase[0, :]
newphase[1:h, :] = randphase[::-1, np.newaxis]
newphase[h, :] = phase[h, :]
newphase[h+1:, :] = -randphase[:, np.newaxis]
# Reconstruct the signal from the original amplitude and the new phases
z2 = M*np.exp(newphase*1.j)
# Return the time-domain signal
return np.fft.ifft(np.fft.ifftshift(z2, axes=0),
axis=0).real.squeeze()
|
nipe0324/kaggle-keypoints-detection-keras
|
plotter.py
|
Python
|
apache-2.0
| 1,057 | 0.02176 |
import os
import matplotlib.pyplot as plt
def plot_hist(history, model_name=None):
plt.plot(history['loss'], linewidth=3, label='tra
|
in')
plt.plot(history['val_loss'], linewidth=3, label='valid')
plt.grid()
plt.legend()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.ylim(1e-4, 1e-2)
plt.yscale('log')
if model_name:
path = os.path.join('images', model_na
|
me + '-loss.png')
plt.savefig(path)
else:
plt.show()
def plot_model_arch(model, model_name):
from keras.utils.visualize_util import plot
path = os.path.join('images', model_name + '.png')
plot(model, to_file=path, show_shapes=True)
def plot_samples(X, y):
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(16):
axis = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
_plot_sample(X[i], y[i], axis)
plt.show()
def _plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
|
BenjaminSchubert/Pyptables
|
pyptables/executors.py
|
Python
|
mit
| 6,181 | 0.00178 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Defines several helpers to add rules to Iptables
"""
from configparser import SectionProxy
from contextlib import suppress
from ipaddress import ip_address, ip_network
import re
import socket
from pyptables.iptables import Iptables, Ip6tables, IptablesRule
__author__ = 'Benjamin Schubert, ben.c.schubert@gmail.com'
ipv4_handler = Iptables()
ipv6_handler = Ip6tables()
def get_ip_address(name: str):
"""
Tries to convert the input to an ip address
:param name: the input to convert
:return: the correct ip address or None if unable to convert
"""
with suppress(ValueError):
return ip_address(name)
with suppress(ValueError):
return ip_network(name)
with suppress(socket.gaierror, ValueError):
retu
|
rn ip_address(socket.get
|
hostbyname(name))
return None
def setup_global_begin(config: SectionProxy) -> None:
"""
Sets up the tables globally for ipv4 and ipv6
:param config: the configuration used
"""
# noinspection PyUnresolvedReferences
def setup(handler: Iptables, _config: SectionProxy) -> None:
"""
Sets up the tables to accept new rules : resets all rules, set defaults and allow global traffic
:param handler: the Iptables instance on which to operate
:param _config: the configuration used
"""
handler.reset()
for chain in _config.getlist("closed_chains", []):
handler.set_default(chain, "DROP")
if _config.getboolean("allow_established_traffic", False):
handler.allow_existing_traffic()
for interface in _config.getlist("allow_traffic_on_interface", []):
handler.allow_traffic_on_interface(interface)
if _config.getboolean("drop_invalid_traffic", False):
handler.drop_invalid_traffic()
if config.getboolean("ipv4", False):
setup(ipv4_handler, config)
if config.getboolean("ipv6", False):
setup(ipv6_handler, config)
def setup_global_end(config: SectionProxy) -> None:
"""
Sets up the last things : logging, drops and ssh knocking
:param config: the config to use
"""
def setup(handler: Iptables, _config: SectionProxy, version) -> None:
"""
Ties up the settings : logging, drops and ssh knocking
:param handler: the Iptables instance on which to operate
:param _config: the configuration used
:param version: the version of ip protocol used (4 or 6)
"""
if _config.parser.has_section("logging"):
for entry in _config.parser.items("logging"):
if not entry[0].startswith("ignore_"):
continue
chain = entry[0].replace("ignore_", "").upper()
values = [item for item in re.split(r";\s*", entry[1]) if item != ""]
for value in values:
data = [item if item != "" else None for item in re.split(r",\s*", value.strip())]
address1, address2 = data[4:6]
if address1 is not None:
address1 = get_ip_address(address1)
if address2 is not None:
address2 = get_ip_address(address2)
if (address1 is not None and address1.version != version) or (
address2 is not None and address2.version != version):
continue
handler.no_log(chain, *data)
if _config.getboolean("ssh_knocking"):
handler.enable_ssh_knocking(_config.parser["ssh_knocking"])
if _config.parser.has_section("logging"):
section = _config.parser["logging"]
for chain in section.getlist("log"):
handler.log(chain, section.get("prefix"), section.get("rate", None), section.getint("level", 4))
if config.getboolean("ipv4", False):
setup(ipv4_handler, config, version=4)
if config.getboolean("ipv6", False):
setup(ipv6_handler, config, version=6)
# noinspection PyUnresolvedReferences
def handle_service(config: SectionProxy) -> None:
"""
Sets a rule or a service
:param config: the configuration for the rule
"""
for src in config.getlist("source", [None]):
for dst in config.getlist("destination", [None]):
source = None
destination = None
if src is not None:
source = get_ip_address(src)
if source is None:
print("[ERROR] Could not determine ip address for {} : skipping".format(src))
continue
if dst is not None:
destination = get_ip_address(dst)
if destination is None:
print("[ERROR] Could not determine ip address for {} : skipping".format(dst))
continue
rule = IptablesRule(
name=config.name,
interface=config.get("interface"),
chain=config.get("chain"),
protocol=config.get("protocol"),
action=config.get("action"),
source=source,
destination=destination,
sport=config.get("sport"),
dport=config.get("dport"),
remote=config.get("remote", None)
)
if config.getboolean("ipv4", False) and (rule.source is None or rule.source.version == 4) and \
(rule.destination is None or rule.source.version == 4):
ipv4_handler.add_rule(rule)
if config.getboolean("ipv6") and (rule.source is None or rule.source.version == 6) and \
(rule.destination is None or rule.source.version == 6):
ipv6_handler.add_rule(rule)
if (rule.source is not None and rule.destination is not None) and \
rule.destination.version != rule.source.version:
print("[ERROR] Could not add rule with ip versions no matching: {} and {}".format(
str(rule.source, rule.destination)
))
|
davidam/python-examples
|
security/md5/test/test_md5py.py
|
Python
|
gpl-3.0
| 352 | 0.008523 |
#!/usr/bin/env python
import unittest
from app.md5py import MD5
class TddInPythonExample(unittest.TestCase):
def test_object_program(self):
m = MD5()
m.update("1234")
hexdigest = m.hexd
|
igest()
self.a
|
ssertEqual("81dc9bdb52d04dc20036dbd8313ed055", hexdigest)
if __name__ == '__main__':
unittest.main()
|
tysonholub/twilio-python
|
twilio/base/list_resource.py
|
Python
|
mit
| 180 | 0 |
class ListRes
|
ource(object):
def __init__(self, version):
"""
:param Version version:
"""
self._version = version
""" :type: Ver
|
sion """
|
kikinteractive/MaxMind-DB-Reader-python
|
maxminddb/decoder.py
|
Python
|
apache-2.0
| 5,904 | 0 |
"""
maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
"""
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + type_size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value,) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset)
def _read_extended(self, offset):
(next_byte,) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
|
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type
|
_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)'
)
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(
b'!I', size_bytes.rjust(4, b'\x00'))[0] + 65821
return size, new_offset
|
cs-shadow/phabricator-tools
|
py/abd/abdt_branch__t.py
|
Python
|
apache-2.0
| 10,066 | 0 |
"""Test suite for abdt_branch."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [XB] can test is_abandoned, is_null, is_new
# [XC] can move between all states without error
# [XD] can set and retrieve repo name, branch link
# [ C] can move bad_pre_review -> 'new' states without duplicating branches
# [ D] unique names and emails are returned in the order of most recent first
# [ E] all commits are shown when no arguments are supplied
# [ E] number of commits can be limited by max_commits argument
# [ E] number of commits can be limited by max_size argument
# [ ] can detect if review branch has new commits (after ff, merge, rebase)
# [ ] can get raw diff from branch
# [ ] can get author names and emails from branch
# [ ] raise if get author names and emails from branch with no history
# [ ] raise if get author names and emails from branch with invalid base
# [ ] can 'get_any_author_emails', raise if no emails ever
# [ ] bad unicode chars in diffs
# [ ] bad unicode chars in commit messages
# [ ] can land an uncomplicated review
# [ ] XXX: withReservedBranch
# [ ] XXX: emptyMergeWorkflow
# [ ] XXX: mergeConflictWorkflow
# [ ] XXX: changeAlreadyMergedOnBase
# [ ] XXX: commandeeredLand
# [ ] XXX: createHugeReview
# [ ] XXX: hugeUpdateToReview
# [ ] XXX: empty repository, no history
# [ ] XXX: landing when origin has been updated underneath us
# [ ] XXX: moving tracker branches when there's something in the way
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_Breathing
# [ B] test_B_Empty
# [ C] test_C_BadPreReviewToNew
# [ D] test_D_AlternatingAuthors
# [ E] test_E_NewCommitsDescription
# [XB] test_XB_UntrackedBranch
# [XC] test_XC_MoveBetweenAllMarkedStates
# [XD] check_XD_SetRetrieveRepoNameBranchLink
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import phlgit_branch
import phlgit_push
import phlgit_revparse
import phlgitu_fixture
import phlgitx_refcache
import abdt_branch
import abdt_branchtester
import abdt_classicnaming
import abdt_differresultcache
import abdt_git
import abdt_naming
class Test(unittest.TestCase):
def __init__(self, data):
super(Test, self).__init__(data)
self.repos = None
self.repo_central = None
self.repo_dev = None
self.repo_arcyd = None
def setUp(self):
self.repos = phlgitu_fixture.CentralisedWithTwoWorkers()
self.repo_central = self.repos.central_repo
self.repo_dev = self.repos.w0.repo
sys_repo = self.repos.w1.repo
refcache_repo = phlgitx_refcache.Repo(sys_repo)
differ_cache = abdt_differresultcache.Cache(refcache_repo)
self.repo_arcyd = abdt_git.Repo(
refcache_repo, differ_cache, 'origin', 'myrepo')
def tearDown(self):
self.repos.close()
def test_A_Breathing(self):
pass
def test_B_Empty(self):
pass
def test_C_BadPreReviewToNew(self):
# can move bad_pre_review -> 'new' states without duplicating branches
base, branch_name, branch = self._setup_for_untracked_branch()
transition_list = [
branch.mark_ok_new_review, branch.mark_new_bad_in_review
]
for do_transition in transition_list:
branches = phlgit_branch.get_remote(self.repo_arcyd, 'origin')
branch.mark_bad_pre_review()
branches_bad_pre = phlgit_branch.get_remote(
self.repo_arcyd, 'origin')
do_transition(102)
branches_new = phlgit_branch.get_remote(self.repo_arcyd, 'origin')
# we expect to have gained one branch when starting to track as
# 'bad_pre_review'.
self.assertEqual(len(branches_bad_pre), len(branches) + 1)
# we expect to have the same number of branches after moving with
# 'mark_ok_new_review'
self.assertEqual(len(branches_bad_pre), len(branches_new))
# remove the tracking branch and make sure the count has gone down
branch.clear_mark()
branches_cleared = phlgit_branch.get_remote(
self.repo_arcyd, 'origin')
self.assertEqual(len(branches_cleared), len(branches))
def test_D_AlternatingAuthors(self):
base, branch_name, branch = self._setup_for_untracked_branch()
alice_user = 'Alice'
alice_email = 'alice@se
|
rver.test'
bob_user = 'Bob'
bob_email = 'bob@server.test'
self._dev_commit_new_empty_file('ALICE1', alice_user, alice_email)
self._dev_commit_new_empty_file('BOB1', bob
|
_user, bob_email)
self._dev_commit_new_empty_file('ALICE2', alice_user, alice_email)
phlgit_push.push(self.repo_dev, branch_name, 'origin')
self.repo_arcyd('fetch', 'origin')
author_names_emails = branch.get_author_names_emails()
self.assertTupleEqual(
author_names_emails[0],
(bob_user, bob_email))
self.assertTupleEqual(
author_names_emails[1],
(alice_user, alice_email))
# any_author_emails = branch.get_any_author_emails()
# self.assertEqual(any_author_emails[-1], alice_email)
# self.assertEqual(any_author_emails[-2], bob_email)
def test_E_NewCommitsDescription(self):
base, branch_name, branch = self._setup_for_untracked_branch()
user = 'Alice'
email = 'alice@server.test'
self._dev_commit_new_empty_file('Commit 1', user, email)
self._dev_commit_new_empty_file('Commit 2', user, email)
self._dev_commit_new_empty_file('Commit 3', user, email)
self._dev_commit_new_empty_file('Commit 4', user, email)
phlgit_push.push(self.repo_dev, branch_name, 'origin')
self.repo_arcyd('fetch', 'origin')
# [ E] all commits are shown when no arguments are supplied
new_commits_str = branch.describe_new_commits()
new_commits = new_commits_str.splitlines()
self.assertEqual(4, len(new_commits))
count = 4
for line in new_commits:
self.assertTrue(line.endswith('Commit {}'.format(count)))
count -= 1
# [ E] number of commits can be limited by max_commits argument
new_commits_str = branch.describe_new_commits(2)
new_commits = new_commits_str.splitlines()
self.assertEqual(3, len(new_commits))
self.assertTrue(new_commits[0].endswith('Commit 4'))
self.assertTrue(new_commits[1].endswith('Commit 3'))
self.assertEqual(new_commits[2], '...2 commits not shown.')
# [ E] number of commits can be limited by max_size argument
new_commits_str = branch.describe_new_commits(3, 20)
new_commits = new_commits_str.splitlines()
self.assertEqual(2, len(new_commits))
self.assertTrue(new_commits[0].endswith('Commit 4'))
self.assertEqual(new_commits[1], '...3 commits not shown.')
def _dev_commit_new_empty_file(self, filename, user, email):
self._create_new_file(self.repo_dev, filename)
self.repo_dev('add', filename)
self.repo_dev(
'commit',
'-m',
filename,
'--author=' + '{} <{}>'.format(user, email))
def test_XB_UntrackedBranch(self):
abdt_branchtester.check_XB_UntrackedBranch(self)
def test_XC_MoveBetweenAllMarkedStates(self):
abdt_branchtester.check_XC_MoveBetweenAllMarkedStates(self)
def check_D_SetRetrieveRepoNameBranchLink(self):
abdt_branchtester.check_XD_SetRetrieveRepoNameBranchLink(self)
def _create_new_file(self, repo, filename):
self.assertFalse(os.path.isfile(filename))
open(os.path.join(repo.working_dir, filename)
|
jemenake/LogicProjectTools
|
AudioArchiver.py
|
Python
|
mit
| 5,075 | 0.021084 |
#! /usr/bin/python
# Derived from dupinator.py.
#
# This program takes a list of pathnames to audio files and moves them to a central archive.
# It replaces the original with a symbolic link to the archived version.
# The archived version will have several names (all hard-linked): the MD5 hash (with the extension)
# appended to it, *plus* all names that the file has been archived as.
#
# For example:
# Audio#123.aif
# might get archived to:
# /all_hashed_audio/a/f/af171f6a82b3caf793d3b3ac3.aif
# /all_hashed_audio/a/f/af171f6a82b3caf793d3b3ac3.aliases/Audio#123.aif
# if the same audio is encountered in a file called:
# Audio#987.aif
# then it will be replaced by a symlink to the MD5-named file and an alias will be added:
# /all_hashed_audio/a/f/af171f6a82b3caf793d3b3ac3.aliases/Audio#987.aif
#
#
# WHAT IS THIS FOR?
#
# This program is for filesystems where there are a lot of large audio files and there is a
# high incidence of duplicates. This program allows for a great deal of space to be reclaimed.
#
# 2015-04-26 - joe@emenaker.com
import os
import hashlib
import pickle
from collections import defaultdict
REPOSITORY_BASE = "/Volumes/Old Time Machine/all_hashed_audio"
# ROOTS = ( "/Users/jemenake", )
ROOTS = ("/Volumes/Old Macintosh HD", "/Volumes/Old Time Machine")
def pickle_data(data, pathname):
picklefile = file(pathname, "w")
pickle.dump(data, picklefile)
picklefile.close()
###
### If a d
|
irectory doesn't exist, create it
###
def ensuredir(pathname):
if not os.path.isdir(pathname):
try:
os.mkdir(pathname)
except:
print "Can't create mandatory directory: " + pathname + " : Does it exist? Do we have permissions?"
exit()
###
### If a file is in the archive
###
def is_in_archive(md5):
pathname = get_archive_md5_name(md5)
return os.path.isfile(pathname)
###
### If an archived file with a MD5 is listed w
|
ith a particular name
###
def has_alias(md5, alias):
pathname = get_archive_alias_name(md5, alias)
return os.path.isfile(pathname)
###
### Do we want this file?
### (Used to indicate if a file qualifies as an audio file)
###
def want(pathname):
return pathname.endswith(".aif")
###
###
###
pathnames = list()
for rootname in ROOTS:
print 'Scanning directory "%s"....' % rootname
for (dirpath, dirnames, filenames) in os.walk(rootname):
pathnames.extend([ dirpath + "/" + a for a in filenames if want(dirpath + "/" + a)])
REPOSITORY = REPOSITORY_BASE
PICKLE_FILE = REPOSITORY + "/" + "hash_values.pickle"
print " creating hash folders..."
# Make sure that we have a place to stick all of the links for the hashes
ensuredir(REPOSITORY)
## Make a two-deep folder tree for holding all of the hashes
digits = range(10)
digits.extend([ 'a', 'b', 'c', 'd', 'e', 'f' ])
for digit1 in digits:
dir1 = REPOSITORY + "/" + str(digit1)
ensuredir(dir1)
for digit2 in digits:
dir2 = dir1 + "/" + str(digit2)
ensuredir(dir2)
print " calcuating hashes..."
# Calc the hash-value of every file
thehashes = defaultdict(list)
hashes_by_pathname = dict()
for pathname in pathnames:
print pathname
hashValue = hashlib.md5(pathname).hexdigest()
thehashes[hashValue].append(pathname)
basename = os.path.basename(pathname)
if basename in hashes_by_pathname.keys() and hashes_by_pathname[basename] != hashValue:
print "There are multiple files named " + basename + " and they have different hash values!"
pickle_data(thehashes, PICKLE_FILE)
print " making the hard-links..."
# Make the hash links
for hash in thehashes.keys():
print hash
hash_pathname = REPOSITORY + "/" + hash[0] + "/" + hash[1] + "/" + hash
# Link the first pathname in our list of files with this hash to a file with the hashvalue
if not os.path.isfile(hash_pathname):
os.link(thehashes[hash][0], hash_pathname)
alias_dir = hash_pathname + ".aliases"
ensuredir(alias_dir)
for pathname in thehashes[hash]:
alias_pathname = alias_dir + "/" + os.path.basename(pathname)
if not os.path.isfile(alias_pathname):
os.link(hash_pathname, alias_pathname)
print " " + pathname
exit()
print 'Finding potential dupes...'
potentialDupes = []
potentialCount = 0
trueType = type(True)
sizes = filesBySize.keys()
sizes.sort()
for k in sizes:
inFiles = filesBySize[k]
outFiles = []
hashes = {}
if len(inFiles) is 1: continue
print 'Testing %d files of size %d...' % (len(inFiles), k)
for fileName in inFiles:
if not os.path.isfile(fileName):
continue
aFile = file(fileName, 'r')
hasher = hashlib.md5(aFile.read(1024))
hashValue = hasher.digest()
if hashes.has_key(hashValue):
x = hashes[hashValue]
if type(x) is not trueType:
outFiles.append(hashes[hashValue])
hashes[hashValue] = True
outFiles.append(fileName)
else:
hashes[hashValue] = fileName
aFile.close()
if len(outFiles):
potentialDupes.append(outFiles)
potentialCount += len(outFiles)
del filesBySize
print 'Found %d sets of potential dupes...' % potentialCount
print 'Scanning for real dupes...'
dupdump = file("dupedump", "w")
pickle.dump(dupes, dupdump)
dupdump.close()
|
wknet123/harbor
|
tools/migration/migration_harbor/versions/0_4_0.py
|
Python
|
apache-2.0
| 2,016 | 0.008433 |
# Copyright (c) 2008-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.3.0 to 0.4.0
Revision ID: 0.3.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '0.4.0'
down_revision = '0.3.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
#alter column user.username, alter column user.email, project.name and add column replication_policy.deleted
op.alter_column('user', 'username', type_=sa.String(32), existing_type=sa.String(15))
op.alter_column('user', 'email', type_=sa.String(255), existing_type=sa.String(128))
op.alter_column('project', 'name', type_=sa.String(41), existing_type=sa.String(30), nullable=False)
op.alter_column('replication_target', 'password', type_=sa.String(128), existing_type=sa.String(40))
op.add_column('replication_policy', sa.Column('deleted', mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'")))
#create index pid_optime (project_id, op_time) on table access_
|
log, poid_uptime (policy_id, update_time) on table replication_job
op.create_index('pid_optime', 'access_log', ['project_id', 'op_time'])
op.create_index('poid_uptime', 'replication_job', ['policy_id', 'update_time'])
#create tables: repository
|
Repository.__table__.create(bind)
def downgrade():
"""
Downgrade has been disabled.
"""
pass
|
gmarke/erpnext
|
erpnext/accounts/utils.py
|
Python
|
agpl-3.0
| 16,635 | 0.025008 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import nowdate, cstr, flt, now, getdate, add_months
from frappe import throw, _
from frappe.utils import formatdate
import frappe.desk.reportview
class FiscalYearError(frappe.ValidationError): pass
class BudgetError(frappe.ValidationError): pass
@frappe.whitelist()
def get_fiscal_year(date=None, fiscal_year=None, label="Date", verbose=1, company=None):
return get_fiscal_years(date, fiscal_year, label, verbose, company)[0]
def get_fiscal_years(transaction_date=None, fiscal_year=None, label="Date", verbose=1, compan
|
y=None):
# if year start date is 2012-04-01, year end date should be 2013-03-31 (hence subdate)
cond = " ifnull(disabled, 0) = 0"
if fiscal_year:
c
|
ond += " and fy.name = %(fiscal_year)s"
else:
cond += " and %(transaction_date)s >= fy.year_start_date and %(transaction_date)s <= fy.year_end_date"
if company:
cond += """ and (not exists(select name from `tabFiscal Year Company` fyc where fyc.parent = fy.name)
or exists(select company from `tabFiscal Year Company` fyc where fyc.parent = fy.name and fyc.company=%(company)s ))"""
fy = frappe.db.sql("""select fy.name, fy.year_start_date, fy.year_end_date from `tabFiscal Year` fy
where %s order by fy.year_start_date desc""" % cond, {
"fiscal_year": fiscal_year,
"transaction_date": transaction_date,
"company": company
})
if not fy:
error_msg = _("""{0} {1} not in any active Fiscal Year. For more details check {2}.""").format(label, formatdate(transaction_date), "https://erpnext.com/kb/accounts/fiscal-year-error")
if verbose==1: frappe.msgprint(error_msg)
raise FiscalYearError, error_msg
return fy
def validate_fiscal_year(date, fiscal_year, label=_("Date"), doc=None):
years = [f[0] for f in get_fiscal_years(date, label=label)]
if fiscal_year not in years:
if doc:
doc.fiscal_year = years[0]
else:
throw(_("{0} '{1}' not in Fiscal Year {2}").format(label, formatdate(date), fiscal_year))
@frappe.whitelist()
def get_balance_on(account=None, date=None, party_type=None, party=None, in_account_currency=True):
if not account and frappe.form_dict.get("account"):
account = frappe.form_dict.get("account")
if not date and frappe.form_dict.get("date"):
date = frappe.form_dict.get("date")
if not party_type and frappe.form_dict.get("party_type"):
party_type = frappe.form_dict.get("party_type")
if not party and frappe.form_dict.get("party"):
party = frappe.form_dict.get("party")
cond = []
if date:
cond.append("posting_date <= '%s'" % date)
else:
# get balance of all entries that exist
date = nowdate()
try:
year_start_date = get_fiscal_year(date, verbose=0)[1]
except FiscalYearError:
if getdate(date) > getdate(nowdate()):
# if fiscal year not found and the date is greater than today
# get fiscal year for today's date and its corresponding year start date
year_start_date = get_fiscal_year(nowdate(), verbose=1)[1]
else:
# this indicates that it is a date older than any existing fiscal year.
# hence, assuming balance as 0.0
return 0.0
if account:
acc = frappe.get_doc("Account", account)
acc.check_permission("read")
# for pl accounts, get balance within a fiscal year
if acc.report_type == 'Profit and Loss':
cond.append("posting_date >= '%s' and voucher_type != 'Period Closing Voucher'" \
% year_start_date)
# different filter for group and ledger - improved performance
if acc.is_group:
cond.append("""exists (
select name from `tabAccount` ac where ac.name = gle.account
and ac.lft >= %s and ac.rgt <= %s
)""" % (acc.lft, acc.rgt))
# If group and currency same as company,
# always return balance based on debit and credit in company currency
if acc.account_currency == frappe.db.get_value("Company", acc.company, "default_currency"):
in_account_currency = False
else:
cond.append("""gle.account = "%s" """ % (account.replace('"', '\\"'), ))
if party_type and party:
cond.append("""gle.party_type = "%s" and gle.party = "%s" """ %
(party_type.replace('"', '\\"'), party.replace('"', '\\"')))
if account or (party_type and party):
if in_account_currency:
select_field = "sum(ifnull(debit_in_account_currency, 0)) - sum(ifnull(credit_in_account_currency, 0))"
else:
select_field = "sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))"
bal = frappe.db.sql("""
SELECT {0}
FROM `tabGL Entry` gle
WHERE {1}""".format(select_field, " and ".join(cond)))[0][0]
# if bal is None, return 0
return flt(bal)
@frappe.whitelist()
def add_ac(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
ac = frappe.new_doc("Account")
ac.update(args)
ac.old_parent = ""
ac.freeze_account = "No"
ac.insert()
return ac.name
@frappe.whitelist()
def add_cc(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
cc = frappe.new_doc("Cost Center")
cc.update(args)
cc.old_parent = ""
cc.insert()
return cc.name
def reconcile_against_document(args):
"""
Cancel JV, Update aginst document, split if required and resubmit jv
"""
for d in args:
check_if_jv_modified(d)
validate_allocated_amount(d)
# cancel JV
jv_obj = frappe.get_doc('Journal Entry', d['voucher_no'])
jv_obj.make_gl_entries(cancel=1, adv_adj=1)
# update ref in JV Detail
update_against_doc(d, jv_obj)
# re-submit JV
jv_obj = frappe.get_doc('Journal Entry', d['voucher_no'])
jv_obj.make_gl_entries(cancel = 0, adv_adj =1)
def check_if_jv_modified(args):
"""
check if there is already a voucher reference
check if amount is same
check if jv is submitted
"""
ret = frappe.db.sql("""
select t2.{dr_or_cr} from `tabJournal Entry` t1, `tabJournal Entry Account` t2
where t1.name = t2.parent and t2.account = %(account)s
and t2.party_type = %(party_type)s and t2.party = %(party)s
and ifnull(t2.reference_type, '') in ("", "Sales Order", "Purchase Order")
and t1.name = %(voucher_no)s and t2.name = %(voucher_detail_no)s
and t1.docstatus=1 """.format(dr_or_cr = args.get("dr_or_cr")), args)
if not ret:
throw(_("""Payment Entry has been modified after you pulled it. Please pull it again."""))
def validate_allocated_amount(args):
if args.get("allocated_amt") < 0:
throw(_("Allocated amount can not be negative"))
elif args.get("allocated_amt") > args.get("unadjusted_amt"):
throw(_("Allocated amount can not greater than unadusted amount"))
def update_against_doc(d, jv_obj):
"""
Updates against document, if partial amount splits into rows
"""
jv_detail = jv_obj.get("accounts", {"name": d["voucher_detail_no"]})[0]
jv_detail.set(d["dr_or_cr"], d["allocated_amt"])
original_reference_type = jv_detail.reference_type
original_reference_name = jv_detail.reference_name
jv_detail.set("reference_type", d["against_voucher_type"])
jv_detail.set("reference_name", d["against_voucher"])
if d['allocated_amt'] < d['unadjusted_amt']:
jvd = frappe.db.sql("""
select cost_center, balance, against_account, is_advance, account_type, exchange_rate
from `tabJournal Entry Account` where name = %s
""", d['voucher_detail_no'], as_dict=True)
# new entry with balance amount
ch = jv_obj.append("accounts")
ch.account = d['account']
ch.account_type = jvd[0]['account_type']
ch.exchange_rate = jvd[0]['exchange_rate']
ch.party_type = d["party_type"]
ch.party = d["party"]
ch.cost_center = cstr(jvd[0]["cost_center"])
ch.balance = flt(jvd[0]["balance"])
ch.set(d['dr_or_cr'], flt(d['unadjusted_amt']) - flt(d['allocated_amt']))
ch.set(d['dr_or_cr']== 'debit' and 'credit' or 'debit', 0)
ch.against_account = cstr(jvd[0]["against_account"])
ch.reference_type = original_reference_type
ch.reference_name = original_reference_name
ch.is_advance = cstr(jvd[0]["is_advance"])
ch.docstatus = 1
# will work as update after submit
jv_obj.flags.ignore_validate_update_after_submit = True
jv_obj.save()
def remove_against_link_from_jv(ref_type, ref_no):
linked_jv = frappe.db.sql_list("""select parent from `tabJ
|
JuezUN/INGInious
|
inginious/frontend/pages/course.py
|
Python
|
agpl-3.0
| 4,665 | 0.00493 |
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Course page """
import web
from inginious.frontend.pages.utils import INGIniousPage
class CoursePage(INGIniousPage):
""" Course page """
def get_course(self, courseid):
""" Return the course """
try:
course = self.course_factory.get_course(courseid)
except:
raise web.notfound()
return course
def POST(self, courseid): # pylint: disable=arguments-differ
""" POST request """
course = self.get_course(courseid)
user_input = web.input()
if "unregister" in user_input and course.allow_unregister():
self.user_manager.course_unregister_user(course, self.user_manager.session_username())
raise web.seeother(self.app.get_homepath() + '/mycourses')
return self.show_page(course)
def GET(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course = self.get_course(courseid)
user_input = web.input()
page = int(user_input.get("page", 1)) - 1
tag = user_input.get("tag", "")
return self.show_page(course, page, tag)
def show_page(self, course, current_page=0, current_tag=""):
""" Prepares and shows the course page """
username = self.user_manager.session_username()
if not self.user_manager.course_is_open_to_user(course, lti=False):
return self.template_helper.get_renderer().course_unavailable()
tasks = course.get_tasks()
last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(),
"taskid": {"$in": list(tasks.keys())}})
for submission in
|
last_submissions:
|
submission["taskname"] = tasks[submission['taskid']].get_name_or_id(self.user_manager.session_language())
tasks_data = {}
user_tasks = self.database.user_tasks.find(
{"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
is_admin = self.user_manager.has_staff_rights_on_course(course, username)
tasks_score = [0.0, 0.0]
for taskid, task in tasks.items():
tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False,
"grade": 0.0}
tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0
for user_task in user_tasks:
tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"]
tasks_data[user_task["taskid"]]["grade"] = user_task["grade"]
weighted_score = user_task["grade"] * tasks[user_task["taskid"]].get_grading_weight()
tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0
course_grade = round(tasks_score[0] / tasks_score[1]) if tasks_score[1] > 0 else 0
tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language())
user_info = self.database.users.find_one({"username": username})
# Filter tasks with the tag in case the tasks are filtered
if not current_tag:
filtered_tasks = tasks
else:
filtered_tasks = {task_id: task for task_id, task in tasks.items() if
current_tag in map(lambda x: x.get_name(), task.get_tags()[2] + task.get_tags()[0])}
# Manage tasks pagination
page_limit = 20
total_tasks = len(filtered_tasks)
pages = total_tasks // page_limit
if (total_tasks % page_limit) != 0 or pages == 0:
pages += 1
if (page_limit * current_page + page_limit) < total_tasks:
page_tasks_ids = list(filtered_tasks.keys())[page_limit * current_page:
page_limit * current_page + page_limit]
else:
page_tasks_ids = list(filtered_tasks.keys())[page_limit * current_page:]
filtered_tasks = {task_id: tasks_data[task_id] for task_id, __ in filtered_tasks.items() if
task_id in page_tasks_ids}
return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks,
filtered_tasks, course_grade, tag_list, pages,
current_page + 1, current_tag)
|
padilin/Discord-RPG-Bot
|
character.py
|
Python
|
mit
| 2,055 | 0.008273 |
import discord
import os.path
import json
from discord.ext import commands
class Character():
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context = True)
async def char (self, ctx):
"""Character Creation. Asks for all information then builds Json file."""
userid = ctx.message.author.id
print(userid)
if
|
os.path.isfile('cs_{}'.format(userid)) is True:
await
|
self.bot.say('You\'re already on my list!')
else:
def checkname(msg):
return msg.content.startswith('name: ')
def checkclass(msg):
return msg.content.startswith('class: ')
await self.bot.say('You look new here, what\'s your name? \nname: First Last')
entername = await self.bot.wait_for_message(timeout=60.0, author=ctx.message.author, check=checkname)
if entername is None:
await self.bot.say('\nSorry')
return
cs_name = entername.content[6:]
print(cs_name)
await self.bot.say('Well Hello there {}. \nWhat\'s your class? \nclass: Title'.format(entername.content[6:]))
enterclass = await self.bot.wait_for_message(timeout=60.0, author=ctx.message.author, check=checkclass)
if enterclass is None:
await self.bot.say('\nSorry, ask someone to lenghen the timeout!')
return
cs_class = enterclass.content[7:]
print(cs_class)
cp = {str(userid): {'name': str(cs_name), 'class': str(cs_class), 'atk': 10, 'def': 10, 'spd': 10, 'skills': {}}} # current player temp dict to add to end of charactersheet.json, should have checked before !char
print(cp)
with open('cs_{}'.format(userid), 'w') as f:
json.dump(cp, f) #write json file
await self.bot.say('Got it, {} the {}'.format(cs_name.title(), cs_class.title()))
def setup(bot):
bot.add_cog(Character(bot))
|
F5Networks/f5-common-python
|
f5/bigip/tm/net/tunnels.py
|
Python
|
apache-2.0
| 3,153 | 0 |
# coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dist
|
ributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® Network tunnels module.
REST URI
``http://localhost/mgmt/tm/net/tunnels``
GUI Path
``Network --> tunnels``
REST Kind
``tm:net:tunnels:*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Reso
|
urce
class TunnelS(OrganizingCollection):
"""BIG-IP® network tunnels collection"""
def __init__(self, net):
super(TunnelS, self).__init__(net)
self._meta_data['allowed_lazy_attributes'] = [
Gres,
Tunnels,
Vxlans,
]
class Tunnels(Collection):
"""BIG-IP® network tunnels resource (collection for GRE, Tunnel, VXLANs"""
def __init__(self, tunnelS):
super(Tunnels, self).__init__(tunnelS)
self._meta_data['allowed_lazy_attributes'] = [Gres, Tunnel, Vxlans]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:tunnel:tunnelstate': Tunnel}
class Tunnel(Resource):
"""BIG-IP® tunnels tunnel resource"""
def __init__(self, tunnels):
super(Tunnel, self).__init__(tunnels)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:tunnel:tunnelstate'
class Gres(Collection):
"""BIG-IP® tunnels GRE sub-collection"""
def __init__(self, tunnels):
super(Gres, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Gre]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:gre:grestate': Gre}
class Gre(Resource):
"""BIG-IP® tunnels GRE sub-collection resource"""
def __init__(self, gres):
super(Gre, self).__init__(gres)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:gre:grestate'
class Vxlans(Collection):
"""BIG-IP® tunnels VXLAN sub-collection"""
def __init__(self, tunnels):
super(Vxlans, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Vxlan]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:vxlan:vxlanstate': Vxlan}
class Vxlan(Resource):
"""BIG-IP® tunnels VXLAN sub-collection resource"""
def __init__(self, vxlans):
super(Vxlan, self).__init__(vxlans)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:vxlan:vxlanstate'
|
arruda/pyfuzzy
|
fuzzy/set/ZFunction.py
|
Python
|
lgpl-3.0
| 1,985 | 0.009068 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details
|
.
#
# You should have received a copy of the GNU Lesser G
|
eneral Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: ZFunction.py,v 1.13 2009/08/07 07:19:19 rliebscher Exp $"
from fuzzy.set.SFunction import SFunction
class ZFunction(SFunction):
r"""Realize a Z-shaped fuzzy set::
__
\
|\
| \
| |\
| | \__
| a |
| |
delta
see also U{http://pyfuzzy.sourceforge.net/test/set/ZFunction.png}
@ivar a: center of set.
@type a: float
@ivar delta: absolute distance between x-values for minimum and maximum.
@type delta: float
"""
def __init__(self,a=0.0,delta=1.0):
"""Initialize a Z-shaped fuzzy set.
@param a: center of set
@type a: float
@param delta: absolute distance between x-values for minimum and maximum
@type delta: float
"""
super(ZFunction, self).__init__(a,delta)
def __call__(self,x):
"""Return membership of x in this fuzzy set.
This method makes the set work like a function.
@param x: value for which the membership is to calculate
@type x: float
@return: membership
@rtype: float
"""
return 1.0 - SFunction.__call__(self,x)
|
moxuanchen/BMS
|
core/views/admin/login.py
|
Python
|
apache-2.0
| 1,670 | 0.000599 |
# -*- coding: utf-8 -*-
import urllib
from . import admin
from flask import request
from flask import url_for
from flask import redirect
from flask import render_template
from flask_login import UserMixin
from flask_login import login_user
from flask_login import logout_user
from flask_login import login_required
from core.extension import login_manager
from core.views.common import render_json
from core.models import AdminUser
class LoginUser(UserMixin):
def __init__(self, user):
self.user = user
def get_id(self):
return unicode(self.user.id)
@login_manager.user_loader
def load_user(userid):
user = AdminUser.get_by_id(int(userid))
return LoginUser(user)
@admin.route('/signin', methods=['GET', 'POST'])
def signin():
if request.method == 'POST':
user = AdminUser.query.filter_by(
active=True,
username=request.form['username'],
password=request.form['pwd']
).first()
if not user:
return render_json(1, {'err_no': 'pwd_error', 'input': 'pwd'})
login_user(LoginUser(user))
next = request.form.get('next',
|
'')
if next:
next = urllib.unquote(next)
return render_json(0, {'href': next, 'delaySuccess': True})
return render_json(0, {'href': '/admin/dashboard', 'delaySuccess': True})
return render_template('/admin/signin.html')
@admin.route('/signout', methods=['GET'])
def signout():
logout_user()
return redirect(url_for('admin.signin'))
@admin.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
return render_templ
|
ate('/admin/dashboard.html')
|
Morteo/kiot
|
devices/thermostat/config.py
|
Python
|
mit
| 1,329 | 0.009782 |
#!/usr/bin/python3
import machine
from update_display import update_display as update_display_function
config = {
"gateway": {
# "type": "electrodragon-wifi-iot-relay-board-spdt-based-esp8266"
"id": "thermostat"
# "description": "Thermostat Control near Kitchen"
},
"devices": [
{
"type": "DisplayDevice",
|
"id": "display",
# "description": "OLED IC2 128 x 64 display",
"update_display_function": update_display_function,
"width": 128,
"height": 64,
"display_type": "SSD1306_I2C",
"i2c": {
|
"bus": -1,
"gpio_scl": 4,
"gpio_sda": 5
}
},
{
"type": "DHTDevice",
"id": "dht",
# "description": "Digital Humidity and Temperature sensor",
"dht_type": 22,
"gpio": 14,
"just_changes": True,
"freq": 60
},
{
"type": "SwitchDevice",
"id": "heating_relay",
# "description": "Relay corner out controls central heating on/off",
"gpio": 13,
"state": False
},
{
"type": "SwitchDevice",
"id": "red_button",
# "description": "Physical impulse switch",
"switch_type": machine.Pin.IN,
"gpio": 15,
# "gpio": 2,
"freq": 0,
"debounce": 20,
"just_changes": True,
"state": False
}
]
}
|
xArm-Developer/xArm-Python-SDK
|
xarm/x3/base.py
|
Python
|
bsd-3-clause
| 102,034 | 0.002388 |
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import re
import time
import math
import threading
try:
from multiprocessing.pool import ThreadPool
except:
ThreadPool = None
try:
import asyncio
except:
asyncio = None
from .events import Events
from ..core.config.x_config import XCONF
from ..core.comm import SerialPort, SocketPort
from ..core.wrapper import UxbusCmdSer, UxbusCmdTcp
from ..core.utils.log import logger, pretty_print
from ..core.utils import convert
from ..core.config.x_code import ControllerWarn, ControllerError, ControllerErrorCodeMap, ControllerWarnCodeMap
from .utils import xarm_is_connected, compare_time, compare_version, xarm_is_not_simulation_mode, filter_invaild_number, xarm_is_pause, xarm_wait_until_cmdnum_lt_max
from .code import APIState
from ..tools.threads import ThreadManage
from ..version import __version__
controller_error_keys = ControllerErrorCodeMap.keys()
controller_warn_keys = ControllerWarnCodeMap.keys()
print('SDK_VERSION: {}'.format(__version__))
class Base(Events):
def __init__(self, port=None, is_radian=False, do_not_open=False, **kwargs):
if kwargs.get('init', False):
super(Base, self).__init__()
self._port = port
self._debug = kwargs.get('debug', False)
self._baudrate = kwargs.get('baudrate', XCONF.SerialConf.SERIAL_BAUD)
self._tim
|
eout = kwargs.get('timeout', None)
self._filters = kwargs.get('filters', None)
self._enable_heartbeat = kwargs.get('enable_heartbeat', False)
self._enable_report = kwargs.get('enable_report', True)
self._report_type = kwargs.get('report_type', 'rich')
self._forbid_uds = kwargs.ge
|
t('forbid_uds', False)
self._check_tcp_limit = kwargs.get('check_tcp_limit', False)
self._check_joint_limit = kwargs.get('check_joint_limit', True)
self._check_cmdnum_limit = kwargs.get('check_cmdnum_limit', True)
self._check_simulation_mode = kwargs.get('check_simulation_mode', True)
self._max_cmd_num = kwargs.get('max_cmdnum', 512)
if not isinstance(self._max_cmd_num, int):
self._max_cmd_num = 512
self._max_cmd_num = min(XCONF.MAX_CMD_NUM, self._max_cmd_num)
self._check_robot_sn = kwargs.get('check_robot_sn', False)
self._check_is_ready = kwargs.get('check_is_ready', True)
self._check_is_pause = kwargs.get('check_is_pause', True)
self._timed_comm = kwargs.get('timed_comm', True)
self._timed_comm_interval = kwargs.get('timed_comm_interval', 30)
self._timed_comm_t = None
self._timed_comm_t_alive = False
self._max_callback_thread_count = kwargs.get('max_callback_thread_count', 0)
self._asyncio_loop = None
self._asyncio_loop_alive = False
self._asyncio_loop_thread = None
self._pool = None
self._thread_manage = ThreadManage()
self._rewrite_modbus_baudrate_method = kwargs.get('rewrite_modbus_baudrate_method', True)
self._min_tcp_speed, self._max_tcp_speed = 0.1, 1000 # mm/s
self._min_tcp_acc, self._max_tcp_acc = 1.0, 50000 # mm/s^2
self._tcp_jerk = 1000 # mm/s^3
self._min_joint_speed, self._max_joint_speed = 0.01, 4.0 # rad/s
self._min_joint_acc, self._max_joint_acc = 0.01, 20.0 # rad/s^2
self._joint_jerk = 20.0 # rad/s^3
self._rot_jerk = 2.3
self._max_rot_acc = 2.7
self._stream_type = 'serial'
self._stream = None
self.arm_cmd = None
self._stream_report = None
self._report_thread = None
self._only_report_err_warn_changed = True
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._control_box_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._pose_aa = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_type = XCONF.Robot.Type.XARM7_X4
self._arm_axis = XCONF.Robot.Axis.XARM7
axis = kwargs.get('axis', self._arm_axis)
if axis in [5, 6, 7]:
self._arm_axis = axis
arm_type = kwargs.get('type', self._arm_type)
if arm_type in [3, 5, 6, 7, 8]:
self._arm_type = arm_type
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._default_is_radian = is_radian
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._pause_cond = threading.Condition()
self._pause_lock = threading.Lock()
self._pause_cnts = 0
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
self._iden_progress = 0
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
|
plotly/python-api
|
packages/python/plotly/plotly/validators/violin/_width.py
|
Python
|
mit
| 472 | 0 |
import _plotly_utils.basevalidators
class WidthValida
|
tor(_plotly_utils.basevalidators.Number
|
Validator):
def __init__(self, plotly_name="width", parent_name="violin", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
ghislainv/deforestprob
|
test/test_get_started.py
|
Python
|
gpl-3.0
| 9,570 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# author :Ghislain Vieilledent
# email :ghislain.vieilledent@cirad.fr, ghislainv@gmail.com
# web :https://ecology.ghislainv.fr
# python_version :>=2.7
# license :GPLv3
# ==============================================================================
import os
import numpy as np
def test_make_dir():
assert os.path.exists("output")
def test_data():
assert os.path.exists("data")
def test_plot_fcc23():
assert os.path.exists("output/fcc23.png")
def test_sample():
assert os.path.exists("output/sample.txt")
def test_dataset(gstart):
assert gstart["dataset"].iloc[0, 0] == 30.0
def test_cellneigh(gstart):
a = np.array([3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 8, 8, 8, 8, 8,
8, 8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5,
8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8,
8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 8,
8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8, 8,
8, 8, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3])
b = np.array([1, 11, 12, 0, 2, 11, 12, 13, 1, 3, 12, 13, 14, 2, 4,
13, 14, 15, 3, 5, 14, 15, 16, 4, 6, 15, 16, 17, 5,
7, 16, 17, 18, 6, 8, 17, 18, 19, 7, 9, 18, 19, 20,
8, 10, 19, 20, 21, 9, 20, 21, 0, 1, 12, 22, 23, 0,
1, 2, 11, 13, 22, 23, 24, 1, 2, 3, 12, 14, 23, 24,
25, 2, 3, 4, 13, 15, 24, 25, 26, 3, 4, 5, 14, 16,
25, 26, 27, 4, 5, 6, 15, 17, 26, 27, 28, 5, 6, 7,
16, 18, 27, 28, 29, 6, 7, 8, 17, 19, 28, 29, 30, 7,
8, 9, 18, 20, 29, 30, 31, 8, 9, 10, 19, 21, 30, 31,
32, 9, 10, 20, 31, 32, 11, 12, 23, 33, 34, 11, 12,
13, 22, 24, 33, 34, 35, 12, 13, 14, 23, 25, 34, 35,
36, 13, 14, 15, 24, 26, 35, 36, 37, 14, 15, 16, 25,
27, 36, 37, 38, 15, 16, 17, 26, 28, 37, 38, 39, 16,
17, 18, 27, 29, 38, 39, 40, 17, 18, 19, 28, 30, 39,
40, 41, 18, 19, 20, 29, 31, 40, 41, 42, 19, 20, 21,
30, 32, 41, 42, 43, 20, 21, 31, 42, 43, 22, 23, 34,
44, 45, 22, 23, 24, 33, 35, 44, 45, 46, 23, 24, 25,
34, 36, 45, 46, 47, 24, 25, 26, 35, 37, 46, 47, 48,
25, 26, 27, 36, 38, 47, 48, 49, 26, 27, 28, 37, 39,
48, 49, 50, 27, 28, 29, 38, 40, 49, 50, 51, 28, 29,
30, 39, 41, 50, 51, 52, 29, 30, 31, 40, 42, 51, 52,
53, 30, 31, 32, 41, 43, 52, 53, 54, 31, 32, 42, 53,
54, 33, 34, 45, 55, 56, 33, 34, 35, 44, 46, 55, 56,
57, 34, 35, 36, 45, 47, 56, 57, 58, 35, 36, 37, 46,
48, 57, 58, 59, 36, 37, 38, 47, 49, 58, 59, 60, 37,
38, 39, 48, 50, 59, 60, 61, 38, 39, 40, 49, 51, 60,
61, 62, 39, 40, 41, 50, 52, 61, 62, 63, 40, 41, 42,
51, 53, 62, 63, 64, 41, 42, 43, 52, 54, 63, 64, 65,
42, 43, 53, 64, 65, 44, 45, 56, 66, 67, 44, 45, 46,
55, 57, 66, 67, 68, 45, 46, 47, 56, 58, 67, 68, 69,
46, 47, 48, 57, 59, 68, 69, 70, 47, 48, 49, 58, 60,
69, 70, 71, 48, 49, 50, 59, 61, 70, 71, 72, 49, 50,
51, 60, 62, 71, 72, 73, 50, 51, 52, 61, 63, 72, 73,
74, 51, 52, 53, 62, 64, 73, 74, 75, 52, 53, 54, 63,
65, 74, 75, 76, 53, 54, 64, 75, 76, 55, 56, 67, 77,
78, 55, 56, 57, 66, 68, 77, 78, 79, 56, 57, 58, 67,
69, 78, 79, 80, 57, 58, 59, 68, 70, 79, 80, 81, 58,
59, 60, 69, 71, 80, 81, 82, 59, 60, 61, 70, 72, 81,
82, 83, 60, 61, 62, 71, 73, 82, 83, 84, 61, 62, 63,
72, 74, 83, 84, 85, 62, 63, 64, 73, 75, 84, 85, 86,
63, 64, 65, 74, 76, 85, 86, 87, 64, 65, 75, 86, 87,
66, 67, 78, 88, 89, 66, 67, 68, 77, 79, 88, 89, 90,
67, 68, 69, 78, 80, 89, 90, 91, 68, 69, 70, 79, 81,
90, 91, 92, 69, 70, 71, 80, 82, 91, 92, 93, 70, 71,
72, 81, 83, 92, 93, 94, 71, 72, 73, 82, 84, 93, 94,
95, 72, 73, 74, 83, 85, 94, 95, 96, 73, 74, 75, 84,
86, 95, 96, 97, 74, 75, 76, 85, 87, 96, 97, 98, 75,
76, 86, 97, 98, 77, 78, 89, 77, 78, 79, 88, 90, 78,
79, 80, 89, 91, 79, 80, 81, 90, 92, 80, 81, 82, 91,
93, 81, 82, 83, 92, 94, 82, 83, 84, 93, 95, 83, 84,
85, 94, 96, 84, 85, 86, 95, 97, 85, 86, 87, 96, 98,
86, 87, 97])
assert (np.array_equal(gstart["nneigh"], a) and
np.array_equal(gstart["adj"], b))
def test_model_binomial_iCAR(gstart):
p = np.array([0.34388896, 0.29002158, 0.51594223, 0.48436339,
0.60838453, 0.61257058, 0.55034979, 0.58819568,
0.51087469, 0.58819568, 0.64149789, 0.57400436,
0.59570952, 0.63212285, 0.566676, 0.62562204,
0.55379459, 0.15644965, 0.61284327, 0.36638686,
0.55439297, 0.57325744, 0.62562204, 0.17995823,
0.4930868, 0.54641479, 0.59782004, 0.48159526,
0.62882886, 0.59831051, 0.76245777, 0.74576097,
0.77356767, 0.73863295, 0.78188891, 0.75056545,
0.60775752, 0.64978574, 0.74654465, 0.77378323,
0.53994416, 0.75852715, 0.77754366, 0.60053684,
0.71543739, 0.74565542, 0.7555028, 0.44598923,
0.76401273, 0.75953027, 0.49027142, 0.69610182,
0.75679461, 0.78543649, 0.76863321, 0.6209473,
0.77653139, 0.76182804, 0.78169681, 0.58816002,
0.50453473, 0.77980428, 0.76084413, 0.73054832,
0.78289747, 0.71858934, 0.78362842, 0.74702923,
0.67357571, 0.789402
|
42, 0.75358937, 0.66791346,
0.75602843, 0.42494845, 0.77653139, 0.60509306,
0.60846943, 0.76187008, 0.73278992, 0.72792572,
0.47661681, 0.59456417, 0.71894598, 0.6731302,
0.74964489, 0.77247818, 0.78289747, 0.74200682,
0.78940242, 0.78508877, 0.73153419, 0.65636031,
0.78607775, 0.59738545, 0.72596162, 0.78216462,
|
0.75078253, 0.77527468, 0.69907386, 0.71991522])
assert np.allclose(gstart["pred_icar"][0:100], p)
def test_rho(gstart):
r = np.array([-3.72569484e-02, -1.16871478e-01, -1.82400711e-01,
2.13446770e-01, -6.44591325e-01, -9.89850864e-02,
1.10439030e-01, -2.31551563e-02, -3.30273946e-01,
-2.66995061e-01, -3.84426210e-01, 5.73572517e-02,
-5.73353804e-02, -3.12497338e-01, -8.37127591e-01,
7.62072575e-02, 3.86361945e-01, 1.26487021e-02,
-8.22069815e-02, -3.60656850e-01, -5.46586761e-01,
-4.17346094e-01, 1.05212875e+00, -4.32508096e-02,
-4.49589533e-01, -6.89872259e-01, -4.91230799e-01,
-3.84040358e-01, 5.67299746e-01, -2.10071117e-01,
-1.07456253e+00, -6.69339978e-01, -6.21974970e-01,
2.15020267e+00, -7.16437085e-02, -4.46424607e-01,
-2.17259138e-01, -3.30043032e-01, -2.59613996e-01,
2.68845283e-01, -3.78046974e-01, -5.18108829e-01,
-6.18235133e-01, -7.59652734e-01, 1.51771355e+00,
1.75357016e+00, -8.01814048e-02, 1.99270623e-01,
-1.75157345e-01, -6.10561635e-02, -1.26099802e-01,
-1.77864133e-01, -3.03381214e-01, -5.29892286e-01,
-5.47125418e-01, 1.30320979e+00, 2.37670385e+00,
4.97829325e-01, 8.88668246e-01, 3.92682659e-01,
-6.56913949e-03, -2.95774565e-01, -5.15489012e-01,
-6.01407176e-01, -5.67695385e-01, -6.48479745e-01,
|
MadeiraCloud/salt
|
sources/salt/utils/validate/path.py
|
Python
|
apache-2.0
| 1,466 | 0 |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.utils.validate.path
~~~~~~~~~~~~~~~~~~~~~~~~
Several path related validators
'''
# Import python libs
import os
def is_writeable(path, check_parent=False):
'''
Check if a given path is writeable by the current user.
:param path: The path to check
:param check_parent: If the path to check does not exist, check for the
ability to write to the parent directory instead
:returns: True or False
'''
if os.access(path, os.F_OK) and os.access(path, os.W_OK):
# The path exists and is writeable
return True
if os.access(path, os.F_OK) and not os.access(path, os.W_OK):
# The path exists and is not writeable
return False
# The path does not exists or is not writeable
if check_parent is False:
# We're not allowed to check the parent directory of the provided path
return False
|
# Lets get the parent directory of the provided path
parent_dir = os.path.dirname(path)
if not os.access(parent_dir, os.F_OK):
# Parent directory does not e
|
xit
return False
# Finally, return if we're allowed to write in the parent directory of the
# provided path
return os.access(parent_dir, os.W_OK)
|
Debian/openjfx
|
modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/checkpatchrelevance.py
|
Python
|
gpl-2.0
| 2,708 | 0 |
# Copyright (C) 2017 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN A
|
NY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class CheckPatchRelevance(AbstractStep):
@c
|
lassmethod
def options(cls):
return AbstractStep.options() + [
Options.group,
]
jsc_paths = [
"JSTests/",
"Source/JavaScriptCore/",
"Source/WTF/"
"Source/bmalloc/",
]
group_to_paths_mapping = {
'jsc': jsc_paths,
}
def _changes_are_relevant(self, changed_files):
# In the default case, all patches are relevant
if self._options.group != 'jsc':
return True
patterns = self.group_to_paths_mapping[self._options.group]
for changed_file in changed_files:
for pattern in patterns:
if re.search(pattern, changed_file, re.IGNORECASE):
return True
return False
def run(self, state):
_log.info("Checking relevance of patch")
change_list = self._tool.scm().changed_files()
if self._changes_are_relevant(change_list):
return True
_log.info("This patch does not have relevant changes.")
raise ScriptError(message="This patch does not have relevant changes.")
|
matrix-org/synapse
|
synapse/handlers/read_marker.py
|
Python
|
apache-2.0
| 2,249 | 0.001334 |
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING
from synapse.util.async_helpers import Linearizer
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReadMarkerHandler:
def __init__(self, hs: "HomeServer"):
self.server_name = hs.config.server.server_name
self.store = hs.get_datastores().main
self.account_data_handler = hs.get_account_data_handler()
self.read_marker_linearizer = Linearizer(name="read_marker")
async def received_client_read_marker(
self, room_id: str, user_id: str, event_id: str
) -> None:
"""Updates the read marker for a given user in a given room if the event ID given
is ahead in the stream relative to the current read marker.
This u
|
ses a notifier to indicate that account data should be sent down /sync if
the read marker has changed.
"""
with await self.
|
read_marker_linearizer.queue((room_id, user_id)):
existing_read_marker = await self.store.get_account_data_for_room_and_type(
user_id, room_id, "m.fully_read"
)
should_update = True
if existing_read_marker:
# Only update if the new marker is ahead in the stream
should_update = await self.store.is_event_after(
event_id, existing_read_marker["event_id"]
)
if should_update:
content = {"event_id": event_id}
await self.account_data_handler.add_account_data_to_room(
user_id, room_id, "m.fully_read", content
)
|
maurobaraldi/ll_interview_application
|
luizalabs/employees/tests/tests_models.py
|
Python
|
gpl-3.0
| 866 | 0.001155 |
from model_mommy import mommy
from django.test import TestCase
from ..models import Department, Employee
class DepartmentTestMommy(TestCase):
"""Department's modle test case."""
def test_department_creation_mommy(self):
"""Test create department's model."""
new_department = mommy.make('employees.Department')
self.assertTrue(isinstance(new_department, Department))
self.
|
assertEqual(new_department.__str__(), new_department.name)
class EmployeeTestMommy(TestCase):
"""Employee's model test cazse."""
def test_employee_creation_mommy(self):
"""Test create department's model."""
new_employee = mommy.make('employees.Employee')
self.assertTrue(isinstance(new_employee, Employee))
self.assertEqual(new_employee.__str__(), '%s %s' % (new
|
_employee.first_name, new_employee.last_name))
|
raccoongang/socraticqs2
|
mysite/mysite/tests/celery.py
|
Python
|
apache-2.0
| 2,987 | 0.002343 |
import datetime
import mock
from django.utils import timezone
from mock import Mock, call, PropertyMock
from django.test import TestCase
from django.contrib.sessions.models import Session
from mysite.celery import send_outcome, check_anonymous
class Ce
|
leryTasksTest(TestCase):
@mock.patch('mysite.celery.UserSession.objects.filter')
@mock.patch('mysite.celery.User.objects.filter')
def test_check_anonymous_user_session_no_session(self, mock_User_filter, mock_UserSession_filter):
mock_user = Mock(i
|
d=1)
call_mock_User_filter = [mock_user]
mock_session = Mock(id=2)
# user_session.session
p = PropertyMock(return_value=3, side_effect=Session.DoesNotExist('Object Does not exist'))
type(mock_session).session = p
call_mock_UserSession_filter = [mock_session]
mock_User_filter.return_value = call_mock_User_filter
mock_UserSession_filter.return_value = call_mock_UserSession_filter
mock_user_del = Mock()
mock_user.delete = mock_user_del
response = check_anonymous()
mock_user_del.assert_called_once_with()
mock_User_filter.assert_called_with(groups__name='Temporary')
mock_UserSession_filter.assert_called_with(user__groups__name='Temporary')
@mock.patch('mysite.celery.UserSession.objects.filter')
@mock.patch('mysite.celery.User.objects.filter')
def test_check_anonymous_user_session_has_session(self, mock_User_filter, mock_UserSession_filter):
mock_user = Mock(id=1)
call_mock_User_filter = [mock_user]
mock_session = Mock(id=2)
# user_session.session
mock_session.session.expire_date = timezone.now() - datetime.timedelta(days=1)
sess_session_del = Mock()
sess_user_del = Mock()
mock_session.session.delete = sess_session_del
mock_session.user.delete = sess_user_del
call_mock_UserSession_filter = [mock_session]
mock_User_filter.return_value = call_mock_User_filter
mock_UserSession_filter.return_value = call_mock_UserSession_filter
mock_user_del = Mock()
mock_user.delete = mock_user_del
response = check_anonymous()
sess_session_del.assert_called_once_with()
sess_user_del.assert_called_once_with()
mock_user_del.assert_called_once_with()
mock_User_filter.assert_called_with(groups__name='Temporary')
mock_UserSession_filter.assert_called_with(user__groups__name='Temporary')
@mock.patch('mysite.celery.GradedLaunch.objects.get')
@mock.patch('mysite.celery.send_score_update')
def test_send_outcome(self, mock_send_score_update, mock_GradedLaunch_get):
get_mock_ret_val = Mock()
mock_GradedLaunch_get.return_value = get_mock_ret_val
result = send_outcome('0', assignment_id=1)
mock_GradedLaunch_get.assert_called_once_with(id=1)
mock_send_score_update.assert_called_once_with(get_mock_ret_val, '0')
|
weewx/weewx
|
bin/weeimport/cumulusimport.py
|
Python
|
gpl-3.0
| 20,404 | 0.000784 |
#
# Copyright (c) 2009-2019 Tom Keffer <tkeffer@gmail.com> and
# Gary Roderick
#
# See the file LICENSE.txt for your full rights.
#
"""Module to interact with Cumulus monthly log files and import raw
observational data for use with weeimport.
"""
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# Python imports
import csv
import glob
import io
import logging
import os
import time
# WeeWX imports
from . import weeimport
import weewx
from weeutil.weeutil import timestamp_to_string
from weewx.units import unit_nicknames
log = logging.getLogger(__name__)
# Dict to lookup rainRate units given rain units
rain_units_dict = {'inch': 'inch_per_hour', 'mm': 'mm_per_hour'}
# ============================================================================
# class CumulusSource
# ============================================================================
class CumulusSource(weeimport.Source):
"""Class to interact with a Cumulus generated monthly log files.
Handles the import of data from Cumulus monthly log files.Cumulus stores
observation data in monthly log files. Each log file contains a month of
data in CSV format. The format of the CSV data (eg date separator, field
delimiter, decimal point character) depends upon the settings used in
Cumulus.
Data is imported from all month log files found in the source directory one
log file at a time. Units of measure are not specified in the monthly log
files so the units of measure must be specified in the wee_import config
file. Whilst the Cumulus monthly log file format is well defined, some
pre-processing of the data is required to provide data in a format the
suitable for use in the wee_import mapping methods.
"""
# List of field names used during import of Cumulus log files. These field
# names are for internal wee_import use only as Cumulus monthly log files
# do not have a header line with defined field names. Cumulus monthly log
# field 0 and field 1 are date and time fields respectively. getRawData()
# combines these fields to return a formatted date-time string that is later
# converted into a unix epoch timestamp.
_field_list = ['datetime', 'cur_out_temp', 'cur_out_hum',
'cur_dewpoint', 'avg_wind_speed', 'gust_wind_speed',
'avg_wind_bearing', 'cur_rain_rate', 'day_rain', 'cur_slp',
'rain_counter', 'curr_in_temp', 'cur_in_hum',
'lastest_wind_gust', 'cur_windchill', 'cur_heatindex',
'cur_uv', 'cur_solar', 'cur_et', 'annual_et',
'cur_app_temp', 'cur_tmax_solar', 'day_sunshine_hours',
'cur_wind_bearing', 'day_rain_rg11', 'midnight_rain']
# Dict to map all possible Cumulus field names (refer _field_list) to WeeWX
# archive field names and units.
_header_map = {'datetime': {'units': 'unix_epoch', 'map_to': 'dateTime'},
'cur_out_temp': {'map_to': 'outTemp'},
'curr_in_temp': {'map_to': 'inTemp'},
'cur_dewpoint': {'map_to': 'dewpoint'},
'cur_slp': {'map_to': 'barometer'},
'avg_wind_bearing': {'units': 'degree_compass',
'map_to': 'windDir'},
'avg_wind_speed': {'map_to': 'windSpeed'},
'cur_heatindex': {'map_to': 'heatindex'},
'gust_wind_speed': {'map_to': 'windGust'},
'cur_windchill': {'map_to': 'windchill'},
'cur_out_hum': {'units': 'percent', 'map_to': 'outHumidity'},
'cur_in_hum': {'units': 'percent', 'map_to': 'inHumidity'},
'midnight_rain': {'map_to': 'rain'},
'cur_rain_rate': {'map_to': 'rainRate'},
'cur_solar': {'units': 'watt_per_meter_squared',
'map_to': 'radiation'},
'cur_uv': {'units': 'uv_index', 'map_to': 'UV'},
'cur_app_temp': {'map_to': 'appTemp'}
}
def __init__(self, config_dict, config_path, cumulus_config_dict, import_config_path, options):
# call our parents __init__
super(CumulusSource, self).__init__(config_dict,
cumulus_config_dict,
options)
# save our import config path
self.import_config_path = import_config_path
# save our import config dict
self.cumulus_config_dict = cumulus_config_dict
# wind dir bounds
self.wind_dir = [0, 360]
# field delimiter used in monthly log files, default to comma
self.delimiter = str(cumulus_config_dict.get('delimiter', ','))
# decimal separator used in monthly log files, default to decimal point
self.decimal = cumulus_config_dict.get('decimal', '.')
# date separator used in monthly log files, default to solidus
separator = cumulus_config_dict.get('separator', '/')
# we combine Cumulus date and time fields to give a fixed format
# date-time string
self.raw_datetime_format = separator.join(('%d', '%m', '%y %H:%M'))
# Cumulus log files provide a number of cumulative rainfall fields. We
# cannot use the daily rainfall as this may reset at some time of day
# other than midnight (as required by WeeWX). So we use field 26, total
# rainfall since midnight and treat it as a cumulative value.
self.rain = 'cumulative'
# initialise our import field-to-WeeWX archive field map
self.map = None
# Cumulus log files have a number of 'rain' fields that can be used to
# derive the WeeWX rain field. Which one is available depends on the
# Cumulus version that created the logs. The preferred field is field
# 26(AA) - total rainfall since midnight but it is only available in
# Cumulus v1.9.4 or later. If that field is not available then the
# preferred field in field 09(J) - total rainfall today then field
# 11(L) - total rainfall counter. Initialise the rain_source_confirmed
# property now and we will deal with it later when we have some source
# data.
|
self.rain_source_confirmed = None
# Units of measure for some obs (eg temperatures) cannot be derived from
# th
|
e Cumulus monthly log files. These units must be specified by the
# user in the import config file. Read these units and fill in the
# missing unit data in the header map. Do some basic error checking and
# validation, if one of the fields is missing or invalid then we need
# to catch the error and raise it as we can't go on.
# Temperature
try:
temp_u = cumulus_config_dict['Units'].get('temperature')
except KeyError:
_msg = "No units specified for Cumulus temperature " \
"fields in %s." % (self.import_config_path, )
raise weewx.UnitError(_msg)
else:
# temperature units vary between unit systems so we can verify a
# valid temperature unit simply by checking for membership of
# weewx.units.conversionDict keys
if temp_u in weewx.units.conversionDict.keys():
self._header_map['cur_out_temp']['units'] = temp_u
self._header_map['curr_in_temp']['units'] = temp_u
self._header_map['cur_dewpoint']['units'] = temp_u
self._header_map['cur_heatindex']['units'] = temp_u
self._header_map['cur_windchill']['units'] = temp_u
self._header_map['cur_app_temp']['units'] = temp_u
else:
_msg = "Unknown units '%s' specified for Cumulus " \
"temperature fields in %s." % (temp_u,
self.import_config_path)
raise weewx.UnitError(_msg)
|
jchrismer/PiQuad
|
Calibration/Inertial_Calibration.py
|
Python
|
gpl-3.0
| 9,316 | 0.012988 |
__author__ = 'joseph'
import statistics
import numpy as np
class AccelData(object):
def __init__(self,Accel):
#Static accelerometer data
self.Accel = Accel
def applyCalib(self,params,Accel):
ax = params['ax']
ay = params['ay']
az = params['az']
scaling_Matrix = np.diag([params['kx'], params['ky'],params['kz']])
misalignment_Matrix = np.array([[1.0, -ax,ay],
[0, 1.0, -az],
[0,0,1.0]])
adjustment_matrix = np.dot(misalignment_Matrix,scaling_Matrix)
bx = params['bx']
by = params['by']
bz = params['bz']
# apply theta to the accelerometer
Accel[0,:] = Accel[0,:] - bx
Accel[1,:] = Accel[1,:] - by
Accel[2,:] = Accel[2,:] - bz
Accel = np.dot(adjustment_matrix,Accel)
return Accel # probally not necessary as it may of been passed by reference
class GyroData(object):
def __init__(self,Gyro,bx,by,bz):
self.bx = bx
self.by = by
self.bz = bz
self.Gyro = Gyro
def applyCalib(self,params,Gyro):
scaling_Matrix = np.diag([params['sx'], params['sy'],params['sz']])
misalignment_Matrix = np.array([
[1, params['gamma_yz'],params['gamma_zy']],
[params['gamma_xz'], 1, params['gamma_zx']],
[params['gamma_xy'],params['gamma_yx'],1]])
adjustment_matrix = np.dot(misalignment_Matrix,scaling_Matrix)
Gyro[0,:] = Gyro[0,:] - self.bx
Gyro[1,:] = Gyro[1,:] - self.by
Gyro[2,:] = Gyro[2,:] - self.bz
Gyro = np.dot(adjustment_matrix,Gyro)
return Gyro
class RollingStatistic(object):
def __init__(self, window_size):
self.N = window_size
self.window = window_size * [0]
self.average = 0
self.variance = 0
self.stddev = 0
self.index = 0
def update(self,new):
# Preload
if(self.index < self.N):
self.window[self.index] = new
self.index += 1
# If Window preloaded - start rolling statistics
if(self.index == self.N):
self.average = statistics.mean(self.window)
self.variance = statistics.variance(self.window)
return
# Push element into window list and remove the old element
old = self.window[0]
self.window.pop(0)
self.window.append(new)
oldavg = self.average
newavg = oldavg + (new - old)/self.N
self.average = newavg
if(self.N > 1):
self.variance += (new-old)*(new-newavg+old-oldavg)/(self.N-1)
def getVar(self):
if(self.index == 1):
return 0
elif(self.index < self.N):
return statistics.variance(self.window[0:self.index]) # Make return 0?
return self.variance
def reset(self):
self.index = 0
def static_invertal_detection(Data_in, Time, options,var_mult):
total_samples = len(Time)
Initial_Static = options[0]
index = 0
static_timer = 0
static_window = options[1]
running_var_x = RollingStatistic(25)
running_var_y = RollingStatistic(25)
running_var_z = RollingStatistic(25)
# Find the total number of entries in the initial wait period
while (static_timer <= Initial_Static):
static_timer = static_timer + Time[index]
index = index +1
Static_var_X = statistics.variance(Data_in[0:index,0])
Static_var_Y = statistics.variance(Data_in[0:index,1])
Static_var_Z = statistics.variance(Data_in[0:index,2])
Static_Th = Static_var_X**2 + Static_var_Y**2 + Static_var_Z**2 #Static threshold
static_timer = 0
current_interval_start = 1
current_interval_end = current_interval_start + 1
Valid_intervals_starts = []
Valid_intervals_ends = []
num_static = 0
Max = -999999
Min = 999999
#loop through the dataset and map the static intervals
for i in range(0,total_samples):
# update time
static_timer = static_timer + Time[i]
running_var_x.update(Data_in[i,0])
running_var_y.update(Data_in[i,1])
running_var_z.update(Data_in[i,2])
m = max([Data_in[i,0],Data_in[i,1],Data_in[i,2]])
mn = min([Data_in[i,0],Data_in[i,1],Data_in[i,2]])
# Store maximum for constructing the visualization of this later
if(m > Max):
Max = m
if(mn < Min):
Min = mn
# Check current (rolling) variance
current_norm = running_var_x.getVar()**2 + running_var_y.getVar()**2 + running_var_z.getVar()**2
if(current_norm > Static_Th*var_mult):
#check if the latest interval is valid length
if(static_timer >= static_window):
num_static += 1
current_interval_end = i -1 # skip the point that caused it to go beyond threshold
Valid_intervals_starts.append(current_interval_start)
Valid_intervals_ends.append(current_interval_end)
# Reset running variances
running_var_x.reset()
running_var_y.reset()
running_var_z.reset()
# Reset the current static interval starting and ending index
current_interval_end = i
current_interval_start = current_interval_end
# Reset timer
static_timer = 0
# Main loop ended
visualize = total_samples * [28000]
for i in range(0,num_static):
length = Valid_intervals_ends[i] - Valid_intervals_starts[i] + 1
visualize[Valid_intervals_starts[i]:(Valid_intervals_ends[i]+1)] = [.6*Max]*length
return Valid_intervals_starts, Valid_intervals_ends, visualize, index
def accel_resid(params, accel_staticx,accel_staticy,accel_staticz):
scaling_Matrix = np.diag([params['kx'], params['ky'],params['kz']])
misalignment_Matrix = np.array([[1, -params['ax'],params['ay']],
[0, 1, -params['az']],
[0,0,1]])
adjustment_matrix = np.dot(misalignment_Matrix,scaling_Matrix)
local_gravity = 9.81744
bx = params['bx']
by = params['by']
bz = params['bz']
# apply theta to the accelerometer
accel_static = np.zeros((3,len(accel_staticx)))
accel_static[0,:] = accel_staticx - bx
accel_static[1,:] = accel_staticy - by
accel_static[2,:] = accel_staticz - bz
accel_static = np.dot(adjustment_matrix,accel_static)
residual = len(accel_staticx)*[0.0]
for i in range (0,len(accel_staticx)):
residual[i] = (local_gravity**2 - (accel_static[0,i]**2 + accel_static[1,i]**2 + accel_static[2,i]**2))
return residual
def gyro_resid(params,GyroData,AccelData,Time):
index = 0
interval_count = len(GyroData.Gyro)
resid = interval_count*[0.0]
for Gyro in GyroData.Gyro:
# Apply calibration of the gyroscope
dt = Time[index]
G = np.array(Gyro)
G_calib = GyroData.applyCalib(params,G.transpose())
R = quaternion_RK4(G_calib,dt)
# Extract gravity vector from accelerometer
a = AccelData.Accel[:,index]
|
Ua = AccelData.Accel[:,index+1]
# Apply predicted rotation to accele
|
rometer and compare to observed
Ug = np.dot(R,a)
diff = Ua - Ug
# store the magnitude of the difference and update the static interval index
resid[index] = diff[0]**2 + diff[1]**2 + diff[2]**2
index += 1
return resid
#TODO: Move to misc. kinematics
def quaternion_RK4(gyro,dt):
num_samples = gyro.shape[1]
q_k = np.array([1,0,0,0])
# RK loop
for i in range(0,(num_samples-1)):
q1 = q_k
S1 = gyro_cross4(gyro[:,i])
k_1 = 1.0/2.0*np.dot(S1,q1)
q2 = q_k + dt*1.0/2.0*k_1
half_gyro_left = 1.0/2.0*(gyro[:,i] + gyro[:,i+1])
S_half = gyro_cross4(half_gyro_left)
k_2 = 1.0/2.0*np.dot(S_half,q2)
q3 = q_k + dt*1.0/2.0*k_2
k_3 = 1.0/2.0*np.dot(S_half,q3)
q4 = q_k + dt*k_3
S_2 = gyro_cro
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/functions/special/tests/test_gamma_functions.py
|
Python
|
gpl-3.0
| 12,392 | 0.000484 |
from sympy import (
Symbol, gamma, I, oo, nan, zoo, factorial, sqrt, Rational, log,
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin,
cos, O, cancel, lowergamma, exp, erf, beta, exp_polar, harmonic, zeta,
factorial)
from sympy.core.function import ArgumentIndexError
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
test_numerically as tn)
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1, 2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational
|
(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
|
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(Rational(
-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(
-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(
14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(
17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(
19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x - 1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x + 1)*gamma(x)
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
assert gamma(3*exp_polar(I*pi)/4).is_nonnegative is False
assert gamma(3*exp_polar(I*pi)/4).is_nonpositive is True
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/x + EulerGamma - 1 + x*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma) \
+ x**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 -
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O(x**3)
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, y).diff(y) == y**(x - 1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert lowergamma(x, y).diff(x) == \
gamma(x)*polygamma(0, x) - uppergamma(x, y)*log(y) \
+ meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(S(1)/3, lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert lowergamma(
x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(
k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x - 1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
assert uppergamma(S.Half, x) == sqrt(pi)*(1 - erf(sqrt(x)))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(S(1)/3, uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + \
gamma(y)*(1 - exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
def test_polygamma():
from sympy import I
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369, 20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert polygamma(0, x).rewrite(zeta) == polygamma(0, x)
assert polygamma(1, x).rewrite(zeta) == zeta(2, x)
assert polygamma(2, x).rewrite(zeta) == -2*zeta(3, x)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
assert polygamma(0, x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert polygamma(2, x).rewrite(harmonic) == 2*harmonic(x - 1, 3) - 2*zeta(3)
ni = Symbol("n", integer=True)
assert polygamma(ni, x).rewrite(harmonic) == (-1)**(ni + 1)*(-harmonic(x - 1, ni + 1)
+ zeta(ni + 1))*factorial(ni)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k,
|
obi-two/Rebelion
|
data/scripts/templates/object/building/lok/shared_mining_cave_01.py
|
Python
|
mit
| 440 | 0.047727 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
re
|
sult = Buildin
|
g()
result.template = "object/building/lok/shared_mining_cave_01.iff"
result.attribute_template_id = -1
result.stfName("building_name","cave")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
privacyidea/privacyidea
|
tests/ldap3mock.py
|
Python
|
agpl-3.0
| 28,972 | 0.002106 |
# -*- coding: utf-8 -*-
"""
2020-09-07 Cornelius Kölbel <cornelius.koelbel@netknights.it>
Add exception
2017-04-26 Friedrich Weber <friedrich.weber@netknights.it>
Make it possible to check for correct LDAPS/STARTTLS settings
2017-01-08 Cornelius Kölbel <cornelius.koelbel@netknights.it>
Remove objectGUID. Since we stick with ldap3 version 2.1,
the objectGUID is returned in a human readable format.
2016-12-05 Martin Wheldon <martin.wheldon@greenhills-it.co.uk>
Fixed issue creating ldap entries with objectClasses defined
Fix problem when searching for attribute values containing the
space character.
2016-05-26 Martin Wheldon <martin.wheldon@greenhills-it.co.uk>
Rewrite of search functionality to add recursive parsing
of ldap search filters
Fixed issue searching for attributes with multiple values
Added ability to use ~= in searches
Created unittests for mock
2016-02-19 Cornelius Kölbel <cornelius.koelbel@netknights.it>
Add the possibility to check objectGUID
2015-01-31 Change responses.py to be able to run with SMTP
Cornelius Kölbel <cornelius@privacyidea.org>
Original responses.py is:
Copyright 2013 Dropbox, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import (
absolute_im
|
port, division, unicode_literals
)
from passlib.hash import ldap_salted_sha1
from ast import literal_eval
import uuid
from ldap3.utils.conv import escape_bytes
import ldap3
import re
import pyparsing
from .smtpmock import get_wrapped
from collections import namedtuple, Sequence, Sized
from privacyidea.lib.utils import to_bytes, to_unicod
|
e
DIRECTORY = "tests/testdata/tmp_directory"
Call = namedtuple('Call', ['request', 'response'])
_wrapper_template = """\
def wrapper%(signature)s:
with ldap3mock:
return func%(funcargs)s
"""
def _convert_objectGUID(item):
item = uuid.UUID("{{{0!s}}}".format(item)).bytes_le
item = escape_bytes(item)
return item
class CallList(Sequence, Sized):
def __init__(self):
self._calls = []
def __iter__(self):
return iter(self._calls)
def __len__(self):
return len(self._calls)
def __getitem__(self, idx):
return self._calls[idx]
def setdata(self, request, response):
self._calls.append(Call(request, response))
def reset(self):
self._calls = []
class Connection(object):
class Extend(object):
class Standard(object):
def __init__(self, connection):
self.connection = connection
def paged_search(self, **kwargs):
self.connection.search(search_base=kwargs.get("search_base"),
search_scope=kwargs.get("search_scope"),
search_filter=kwargs.get(
"search_filter"),
attributes=kwargs.get("attributes"),
paged_size=kwargs.get("page_size"),
size_limit=kwargs.get("size_limit"),
paged_cookie=None)
result = self.connection.response
if kwargs.get("generator", False):
# If ``generator=True`` is passed, ``paged_search`` should return an iterator.
result = iter(result)
return result
def __init__(self, connection):
self.standard = self.Standard(connection)
def __init__(self, directory=None):
if directory is None:
directory = []
import copy
self.directory = copy.deepcopy(directory)
self.bound = False
self.start_tls_called = False
self.extend = self.Extend(self)
self.operation = {
"!" : self._search_not,
"&" : self._search_and,
"|" : self._search_or,
}
def set_directory(self, directory):
self.directory = directory
def _find_user(self, dn):
return next(i for (i, d) in enumerate(self.directory) if d["dn"] == dn)
@staticmethod
def open(read_server_info=True):
return
def bind(self, read_server_info=True):
return self.bound
def start_tls(self, read_server_info=True):
self.start_tls_called = True
def add(self, dn, object_class=None, attributes=None):
self.result = { 'dn' : '',
'referrals' : None,
'description' : 'success',
'result' : 0,
'message' : '',
'type' : 'addResponse'}
# Check to see if the user exists in the directory
try:
index = self._find_user(dn)
except StopIteration:
# If we get here the user doesn't exist so continue
# Create a entry object for the new user
entry = {}
entry['dn'] = dn
entry['attributes'] = attributes
if object_class != None:
entry['attributes'].update( {'objectClass': object_class} )
else:
# User already exists
self.result["description"] = "failure"
self.result["result"] = 68
self.result["message"] = \
"Error entryAlreadyExists for {0}".format(dn)
return False
# Add the user entry to the directory
self.directory.append(entry)
# Attempt to write changes to disk
with open(DIRECTORY, 'w+') as f:
f.write(str(self.directory))
return True
def delete(self, dn, controls=None):
self.result = { 'dn' : '',
'referrals' : None,
'description' : 'success',
'result' : 0,
'message' : '',
'type' : 'addResponse'}
# Check to see if the user exists in the directory
try:
index = self._find_user(dn)
except StopIteration:
# If we get here the user doesn't exist so continue
self.result["description"] = "failure"
self.result["result"] = 32
self.result["message"] = "Error no such object: {0}".format(dn)
return False
# Delete the entry object for the user
self.directory.pop(index)
# Attempt to write changes to disk
with open(DIRECTORY, 'w+') as f:
f.write(str(self.directory))
return True
def modify(self, dn, changes, controls=None):
self.result = { 'dn' : '',
'referrals' : None,
'description' : 'success',
'result' : 0,
'message' : '',
'type' : 'modifyResponse'}
# Check to see if the user exists in the directory
try:
index = self._find_user(dn)
except StopIteration:
# If we get here the user doesn't exist so continue
self.result["description"] = "failure"
self.result["result"] = 32
self.result["message"] = "Error no such object: {0!s}".format(dn)
return False
# extract the hash we are interested in
entry = self.directory[index].get("attributes")
# Loop over the changes hash and apply them
for k, v in changes.items():
if v[0] == "MODIFY_DELETE":
entry.pop(k)
elif v[0] == "MOD
|
Bergurth/aes_cmdl.py
|
aes_cmdl.py
|
Python
|
gpl-3.0
| 2,518 | 0.004369 |
import os, random, struct, sys
from Crypto.Cipher import AES
import getpass
from optparse import OptionParser
import hashlib
parser = OptionParser()
parser.add_option("-p")
(options, args) = parser.parse_args()
if(len(sys.argv) < 2):
print "usage: python aes_cmdl.py input_file_name <output_file_name> -p <password>"
sys.exit()
in_file = sys.argv[1]
if(len(sys.argv) == 3):
out_file = sys.argv[2]
else:
no_out_file =True
out_filename = in_file + '3125-9680.enc'
cwd = os.getcwd()
if(options.p):
password = options.p
else:
#password = raw_input("please specify your password")
password = getpass.getpass("please specify your password")
key = hashlib.sha256(password).digest()
def encrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):
""" Encrypts a file using AES (CBC mode) with the
|
given key.
key:
The encryption key - a string that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the i
|
nput file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16.
"""
if not out_filename:
#no_out = True
out_filename = in_filename + '3125-9680.enc'
iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
encrypt_file(key, in_file)
with open(cwd + "/" + out_filename,"r") as f:
#minlen = 12
for line in f:
sys.stdout.write(line)
if(no_out_file):
if sys.platform.startswith("linux"):
os.system("shred "+ cwd + "/" + out_filename)
os.remove(cwd + "/" + out_filename)
else:
os.remove(cwd + "/" + out_filename)
sys.exit(0)
|
mbareta/edx-platform-ft
|
lms/djangoapps/ccx/migrations/0027_merge.py
|
Python
|
agpl-3.0
| 289 | 0 |
#
|
-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ccx', '0026_auto_20170831_0420'),
('ccx', '0026_auto_20170831_0554'),
]
operations = [
]
| |
gregpuzzles1/Sandbox
|
htmllib-example-1.py
|
Python
|
gpl-3.0
| 812 | 0.002463 |
# File: htmllib-example-1.py
import htmllib
import formatter
import string
class Parser(htmllib.HTMLParser):
# return a dicti
|
onary mapping anchor texts to lists
# of associated hyperlinks
def __init__(self, verbose=0):
self.anchors = {}
f = formatter.NullFormatter()
htmllib.HTMLParser.__init__(self, f, verbose)
def anchor_bgn(self, href, name, type):
self.save_bgn()
self.anchor = p
def anchor_end(self):
text = string.strip(self.save_end())
if self.anchor and text:
self.anchors[text] = self.
|
anchors.get(text, []) + [self.anchor]
file = open("contemplate_his_majestic_personhood.html")
html = file.read()
file.close()
p = Parser()
p.feed(html)
p.close()
for k, v in p.anchors.items():
print k, "=>", v
print
|
lizardsystem/lizard-damage
|
lizard_damage/migrations/0004_auto__del_field_geoimage_name__add_field_damageevent_landuse_slugs__ad.py
|
Python
|
gpl-3.0
| 10,066 | 0.007451 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeoImage.name'
db.delete_column('lizard_damage_geoimage', 'name')
# Adding field 'DamageEvent.landuse_slugs'
db.add_column('lizard_damage_damageevent', 'landuse_slugs', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'DamageEvent.height_slugs'
db.add_column('lizard_damage_damageevent', 'height_slugs', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'GeoImage.name'
db.add_column('lizard_damage_geoimage', 'name', self.gf('django.db.models.fields.CharField')(default='name', max_length=80), keep_default=False)
# Deleting field 'DamageEvent.landuse_slugs'
db.delete_column('lizard_damage_damageevent', 'landuse_slugs')
# Deleting field 'DamageEvent.height_slugs'
db.delete_column('lizard_damage_damageevent', 'height_slugs')
models = {
'lizard_damage.ahnindex': {
'Meta': {'object_name': 'AhnIndex', 'db_table': "u'data_index'"},
'ar': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bladnr': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'cellsize': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'datum': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'lo_x': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'lo_y': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'max_datum': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'min_datum': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'update': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_damage.benefitscenario': {
'Meta': {'object_name': 'BenefitScenario'},
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'zip_result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zip_risk_a': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'zip_risk_b': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.benefitscenarioresult': {
'Meta': {'object_name': 'BenefitScenarioResult'},
'benefit_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.BenefitScenario']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageevent': {
'Meta': {'object_name': 'DamageEvent'},
'floodmonth': ('django.db.models.fields.IntegerField', [], {'default': '9'}),
'floodtime': ('django.db.models.fields.FloatField', [], {}),
'height_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'landuse_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repairtime_buildings': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repairtime_roads': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repetition_time': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'table': ('
|
django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_damage.damageeventresult': {
'Meta': {'object_name': 'DamageEventResult'},
'damage_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoFie
|
ld', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageeventwaterlevel': {
'Meta': {'ordering': "(u'index',)", 'object_name': 'DamageEventWaterlevel'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'waterlevel': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.damagescenario': {
'Meta': {'object_name': 'DamageScenario'},
'calc_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'damagetable': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scenario_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdecore/KTzfileTimeZone.py
|
Python
|
gpl-2.0
| 414 | 0.009662 |
# encoding: utf-8
|
# module PyKDE4.kdecore
# from /usr/lib/python3/dist-packages/PyKDE4/kdecore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtNetwork as __PyQt4_QtNetwork
from .KTimeZone import KTimeZone
class KTzfileTimeZone(KTimeZone):
|
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
gxx/auto_pull_request
|
auto_pull_request/plugins/pep8_info.py
|
Python
|
gpl-2.0
| 1,315 | 0.001521 |
# coding=utf-8
"""Auto pull request pep8 plugin"""
import subprocess
from git import Repo
from . import MASTER_BRANCH
from .base import AutoPullRequestPluginInterface, section_order
from ..nodes import NumberedList, DescriptionNode, CodeNode, NodeList, HeaderNode
class Pep8Plugin(AutoPullRequestPluginInterface):
def _get_diff_against_master(self):
repo = Repo('.git')
return repo.git.diff(MASTER_BRANCH)
def _get_pep8_compliance(self, diff):
process = subprocess.Popen(['pep8', '--diff'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=su
|
bprocess.PIPE)
output, errors = process.communicate(diff)
if errors:
raise
|
Exception(errors)
else:
return filter(None, output.strip().split('\n'))
@section_order(10)
def section_pep8_standards_compliance(self):
diff = self._get_diff_against_master()
pep8_compliance = self._get_pep8_compliance(diff)
if pep8_compliance:
value = NodeList([
HeaderNode('%d pep8 errors' % len(pep8_compliance), level=4),
NumberedList([CodeNode(item) for item in pep8_compliance])
])
else:
value = DescriptionNode('100% pep8 compliant!')
return value
|
amarquand/nispat
|
pcntoolkit/model/rfa.py
|
Python
|
gpl-3.0
| 7,985 | 0.007013 |
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
class GPRRFA:
"""Random Feature Approximation for Gaussian Process Regression
Estimation and prediction of Bayesian linear regression models
Basic usage::
R = GPRRFA()
hyp = R.estimate(hyp0, X, y)
ys,s2 = R.predict(hyp, X, y, Xs)
where the variables are
:param hyp: vector of hyperparmaters.
:param X: N x D data array
:param y: 1D Array of targets (length N)
:param Xs: Nte x D array of test cases
:param hyp0: starting estimates for hyperparameter optimisation
:returns: * ys - predictive mean
* s2 - predictive variance
The hyperparameters are::
hyp = [ log(sn), log(ell), log(sf) ] # hyp is a numpy array
where sn^2 is the noise variance, ell are lengthscale parameters and
sf^2 is the signal variance. This provides an approximation to the
covariance function::
k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z))
where Lambda = diag((ell_1^2, ... ell_D^2))
Written by A. Marquand
"""
def __init__(self, hyp=None, X=None, y=None, n_feat=None,
n_iter=100, tol=1e-3, verbose=False):
self.hyp = np.nan
self.nlZ = np.nan
self.tol = tol # not used at present
self.Nf = n_feat
self.n_iter = n_iter
self.verbose = verbose
self._n_restarts = 5
if (hyp is not None) and (
|
X is not None) and (y is not None):
self.post(hyp, X, y)
de
|
f _numpy2torch(self, X, y=None, hyp=None):
if type(X) is torch.Tensor:
pass
elif type(X) is np.ndarray:
X = torch.from_numpy(X)
else:
raise(ValueError, 'Unknown data type (X)')
X = X.double()
if y is not None:
if type(y) is torch.Tensor:
pass
elif type(y) is np.ndarray:
y = torch.from_numpy(y)
else:
raise(ValueError, 'Unknown data type (y)')
if len(y.shape) == 1:
y.resize_(y.shape[0],1)
y = y.double()
if hyp is not None:
if type(hyp) is torch.Tensor:
pass
else:
hyp = torch.tensor(hyp, requires_grad=True)
return X, y, hyp
def get_n_params(self, X):
return X.shape[1] + 2
def post(self, hyp, X, y):
""" Generic function to compute posterior distribution.
This function will save the posterior mean and precision matrix as
self.m and self.A and will also update internal parameters (e.g.
N, D and the prior covariance (Sigma) and precision (iSigma).
"""
# make sure all variables are the right type
X, y, hyp = self._numpy2torch(X, y, hyp)
self.N, self.Dx = X.shape
# ensure the number of features is specified (use 75% as a default)
if self.Nf is None:
self.Nf = int(0.75 * self.N)
self.Omega = torch.zeros((self.Dx, self.Nf), dtype=torch.double)
for f in range(self.Nf):
self.Omega[:,f] = torch.exp(hyp[1:-1]) * \
torch.randn((self.Dx, 1), dtype=torch.double).squeeze()
XO = torch.mm(X, self.Omega)
self.Phi = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \
torch.cat((torch.cos(XO), torch.sin(XO)), 1)
# concatenate linear weights
self.Phi = torch.cat((self.Phi, X), 1)
self.D = self.Phi.shape[1]
if self.verbose:
print("estimating posterior ... | hyp=", hyp)
self.A = torch.mm(torch.t(self.Phi), self.Phi) / torch.exp(2*hyp[0]) + \
torch.eye(self.D, dtype=torch.double)
self.m = torch.mm(torch.solve(torch.t(self.Phi), self.A)[0], y) / \
torch.exp(2*hyp[0])
# save hyperparameters
self.hyp = hyp
# update optimizer iteration count
if hasattr(self,'_iterations'):
self._iterations += 1
def loglik(self, hyp, X, y):
""" Function to compute compute log (marginal) likelihood """
X, y, hyp = self._numpy2torch(X, y, hyp)
# always recompute the posterior
self.post(hyp, X, y)
#logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A))))
try:
# compute the log determinants in a numerically stable way
logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A))))
except Exception as e:
print("Warning: Estimation of posterior distribution failed")
print(e)
#nlZ = torch.tensor(1/np.finfo(float).eps)
nlZ = torch.tensor(np.nan)
self._optim_failed = True
return nlZ
# compute negative marginal log likelihood
nlZ = -0.5 * (self.N*torch.log(1/torch.exp(2*hyp[0])) -
self.N*np.log(2*np.pi) -
torch.mm(torch.t(y - torch.mm(self.Phi,self.m)),
(y - torch.mm(self.Phi,self.m))) /
torch.exp(2*hyp[0]) -
torch.mm(torch.t(self.m), self.m) - logdetA)
if self.verbose:
print("nlZ= ", nlZ, " | hyp=", hyp)
# save marginal likelihood
self.nlZ = nlZ
return nlZ
def dloglik(self, hyp, X, y):
""" Function to compute derivatives """
print("derivatives not available")
return
def estimate(self, hyp0, X, y, optimizer='lbfgs'):
""" Function to estimate the model """
if type(hyp0) is torch.Tensor:
hyp = hyp0
hyp0.requires_grad_()
else:
hyp = torch.tensor(hyp0, requires_grad=True)
# save the starting values
self.hyp0 = hyp
if optimizer.lower() == 'lbfgs':
opt = torch.optim.LBFGS([hyp])
else:
raise(ValueError, "Optimizer " + " not implemented")
self._iterations = 0
def closure():
opt.zero_grad()
nlZ = self.loglik(hyp, X, y)
if not torch.isnan(nlZ):
nlZ.backward()
return nlZ
for r in range(self._n_restarts):
self._optim_failed = False
nlZ = opt.step(closure)
if self._optim_failed:
print("optimization failed. retrying (", r+1, "of",
self._n_restarts,")")
hyp = torch.randn_like(hyp, requires_grad=True)
self.hyp0 = hyp
else:
print("Optimzation complete after", self._iterations,
"evaluations. Function value =",
nlZ.detach().numpy().squeeze())
break
return self.hyp.detach().numpy()
def predict(self, hyp, X, y, Xs):
""" Function to make predictions from the model """
X, y, hyp = self._numpy2torch(X, y, hyp)
Xs, *_ = self._numpy2torch(Xs)
if (hyp != self.hyp).all() or not(hasattr(self, 'A')):
self.post(hyp, X, y)
# generate prediction tensors
XsO = torch.mm(Xs, self.Omega)
Phis = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \
torch.cat((torch.cos(XsO), torch.sin(XsO)), 1)
# add linear component
Phis = torch.cat((Phis, Xs), 1)
ys = torch.mm(Phis, self.m)
# compute diag(Phis*(Phis'\A)) avoiding computing off-diagonal entries
s2 = torch.exp(2*hyp[0]) + \
torch.sum(Phis * torch.t(torch.solve(torch.t(Phis), self.A)[0]), 1)
# return output as numpy arrays
return ys.detach().numpy().squeeze(), s2.detach().numpy().squeeze()
|
zeickan/Django-Store
|
store/api.py
|
Python
|
apache-2.0
| 3,203 | 0.029044 |
# -*- coding: utf-8 -*-
from django.http import HttpResponse, HttpRequest, QueryDict, HttpResponseRedirect
import json
import conekta
from store.models import *
from store.forms import *
### PETICIONES API PARA EL CARRITO
def delBasket(request):
id = str(request.GET.get('id'))
if request.GET.get('id'):
liston = request.session['basket']
if id in liston:
liston.remove(id)
request.session['basket'] = liston
msg = 'Success'
status = 'ok'
else:
msg = 'Error: Product not found in basket'
status = 'failed'
else:
msg = "Success"
status = 'ok'
try:
del request.session['basket']
except KeyError:
msg = 'Error: Cant delete basket'
status = 'failed'
"""
response_data = {}
response_data['result'] = status
response_data['message'] = msg
callback = request.GET.get('callback', '')
response = json.dumps(response_data)
response = callback + '(' + response + ');'
return HttpResponse(response,content_type="application/json")
"""
return HttpResponseRedirect("/store/checkout/")
def setBasket(request):
id = str(request.GET.get('id'))
if id.isdigit():
if request.session.get('basket',False):
# Se ha definido anteriormente
liston = request.session['basket']
if id in liston:
msg = 'Error: product already exists'
status = 'failed'
else:
liston.append(id)
request.session['basket'] = liston
msg = 'Success'
status = 'ok'
else:
# No se ha definido
msg = 'Success'
status = 'ok'
request.session['basket'] = [id]
else:
msg = 'Error en la peticion'
status = 'failed'
response_data = {}
response_data['result'] = status
response_data['message'] = msg
callback = request.GET.get('callback', '')
response = json.dumps(response_data)
|
response = callback + '(' + response + ');'
return HttpResponse(respons
|
e,content_type="application/json")
import pprint
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def conektaio(request):
try:
data = json.loads(request.body)
except:
data = False
if data:
try:
pedido = Pedido.objects.get(custom=data['data']['object']['reference_id'])
except:
pedido = False
if pedido:
dato = { "status": "success" ,"id": pedido.id, "nombre":pedido.payment }
if data['data']['object']['status'] == "paid":
pedido.paid=True
pedido.save()
numero = 200
else:
debug = Debug.objects.create(texto=data)
debug.save()
dato = { "status":"ergo" }
numero = 200
else:
dato = { "status":"error" }
numero = 400
return HttpResponse(dato['status'],content_type="application/json",status=numero)
#### END API
|
larsbutler/swift
|
swift/container/replicator.py
|
Python
|
apache-2.0
| 12,083 | 0 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import itertools
import json
import time
from collections import defaultdict
from eventlet import Timeout
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
get_reconciler_container_name, get_row_to_q_entry_translator)
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DeviceUnavailable
from swift.common.http import is_success
from swift.common.db import DatabaseAlreadyExists
from swift.common.utils import (Timestamp, hash_path,
storage_directory, majority_size)
class ContainerReplicator(db_replicator.Replicator):
server_type = 'container'
brokerclass = ContainerBroker
datadir = DATADIR
default_port = 6201
def report_up_to_date(self, full_info):
reported_key_map = {
'reported_put_timestamp': 'put_timestamp',
'reported_delete_timestamp': 'delete_timestamp',
'reported_bytes_used': 'bytes_used',
'reported_object_count': 'count',
}
for reported, value_key in reported_key_map.items():
if full_info[reported] != full_info[value_key]:
return False
return True
def _gather_sync_args(self, replication_info):
parent = super(ContainerReplicator, self)
sync_args = parent._gather_sync_args(replication_i
|
nfo)
if len(POLICIES) > 1:
sync_args += tuple(replication_info[k] for k in
('status_changed_at', 'count',
'storage_policy_index'))
return sync_args
def _handle_sync_response(se
|
lf, node, response, info, broker, http,
different_region):
parent = super(ContainerReplicator, self)
if is_success(response.status):
remote_info = json.loads(response.data)
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time())
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at.internal)
sync_timestamps = ('created_at', 'put_timestamp',
'delete_timestamp')
if any(info[key] != remote_info[key] for key in sync_timestamps):
broker.merge_timestamps(*(remote_info[key] for key in
sync_timestamps))
rv = parent._handle_sync_response(
node, response, info, broker, http, different_region)
return rv
def find_local_handoff_for_part(self, part):
"""
Look through devices in the ring for the first handoff device that was
identified during job creation as available on this node.
:returns: a node entry from the ring
"""
nodes = self.ring.get_part_nodes(part)
more_nodes = self.ring.get_more_nodes(part)
for node in itertools.chain(nodes, more_nodes):
if node['id'] in self._local_device_ids:
return node
return None
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable to Handoff reconciler '
'container %s in partition %s' % (container, part))
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, node['device'], db_dir, hsh + '.db')
broker = ContainerBroker(db_path, account=account, container=container)
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, 0)
except DatabaseAlreadyExists:
pass
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
def feed_reconciler(self, container, item_list):
"""
Add queue entries for rows in item_list to the local reconciler
container database.
:param container: the name of the reconciler container
:param item_list: the list of rows to enqueue
:returns: True if successfully enqueued
"""
try:
reconciler = self.get_reconciler_broker(container)
except DeviceUnavailable as e:
self.logger.warning('DeviceUnavailable: %s', e)
return False
self.logger.debug('Adding %d objects to the reconciler at %s',
len(item_list), reconciler.db_file)
try:
reconciler.merge_items(item_list)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: trying to merge '
'%d items to reconciler container %s',
len(item_list), reconciler.db_file)
return False
return True
def dump_to_reconciler(self, broker, point):
"""
Look for object rows for objects updates in the wrong storage policy
in broker with a ``ROWID`` greater than the rowid given as point.
:param broker: the container broker with misplaced objects
:param point: the last verified ``reconciler_sync_point``
:returns: the last successful enqueued rowid
"""
max_sync = broker.get_max_row()
misplaced = broker.get_misplaced_since(point, self.per_diff)
if not misplaced:
return max_sync
translator = get_row_to_q_entry_translator(broker)
errors = False
low_sync = point
while misplaced:
batches = defaultdict(list)
for item in misplaced:
container = get_reconciler_container_name(item['created_at'])
batches[container].append(translator(item))
for container, item_list in batches.items():
success = self.feed_reconciler(container, item_list)
if not success:
errors = True
point = misplaced[-1]['ROWID']
if not errors:
low_sync = point
misplaced = broker.get_misplaced_since(point, self.per_diff)
return low_sync
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s' %
broker.db_file)
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
|
vCentre/vFRP-6233
|
frappe/desk/page/messages/messages.py
|
Python
|
mit
| 3,886 | 0.024447 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils.user import get_enabled_system_users
from frappe.utils import cint
@frappe.whitelist()
def get_list(arg=None):
"""get list of messages"""
frappe.form_dict['limit_start'] = int(frappe.form_dict['limit_start'])
frappe.form_dict['limit_page_length'] = int(frappe.form_dict['limit_page_length'])
frappe.form_dict['user'] = frappe.session['user']
# set all messages as read
frappe.db.begin()
frappe.db.sql("""UPDATE `tabCommunication` set seen = 1
where
communication_type in ('Chat', 'Notification')
and reference_doctype = 'User'
and reference_name = %s""", frappe.session.user)
delete_notification_count_for("Messages")
frappe.local.flags.commit = True
if frappe.form_dict['con
|
tact'] == frappe.session['user']:
# return messages
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and (owner=%(contact)s
or reference_name=%(user)s
or owner=reference_name)
order by creation desc
limit %(limit_start)s, %(limit_page_length)s
|
""", frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and ((owner=%(contact)s and reference_name=%(user)s)
or (owner=%(contact)s and reference_name=%(contact)s))
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
# make sure current user is at the top, using has_session = 100
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
# in case of administrator
data.append({"name": frappe.session.user, "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
"""post message"""
d = frappe.new_doc('Communication')
d.communication_type = 'Notification' if parenttype else 'Chat'
d.subject = subject
d.content = txt
d.reference_doctype = 'User'
d.reference_name = contact
d.sender = frappe.session.user
d.insert(ignore_permissions=True)
delete_notification_count_for("Messages")
if notify and cint(notify):
if contact==frappe.session.user:
_notify([user.name for user in get_enabled_system_users()], txt)
else:
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Communication", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or "New Message from " + get_fullname(frappe.session.user),
message=frappe.get_template("templates/emails/new_message.html").render({
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
}),
bulk=True)
except frappe.OutgoingEmailError:
pass
|
koery/win-sublime
|
Data/Packages/Package Control/package_control/commands/add_channel_command.py
|
Python
|
mit
| 1,328 | 0.003012 |
import re
import sublime
import sublime_plugin
from ..show_error import show_error
from ..settings import pc_settings_filename
class AddChannelCommand(sublime_plugin.WindowCommand):
"""
A command to add a new channel (list of repositories) to the user's machine
"""
def run(self):
self.window.show_input_panel('Channel JSON URL', '',
self.on_done, self.on_change, self.on_cancel)
def on_done(self, input):
"""
Inp
|
ut panel handler - adds the provided URL as a channel
:param input:
A s
|
tring of the URL to the new channel
"""
input = input.strip()
if re.match('https?://', input, re.I) == None:
show_error(u"Unable to add the channel \"%s\" since it does not appear to be served via HTTP (http:// or https://)." % input)
return
settings = sublime.load_settings(pc_settings_filename())
channels = settings.get('channels', [])
if not channels:
channels = []
channels.append(input)
settings.set('channels', channels)
sublime.save_settings(pc_settings_filename())
sublime.status_message(('Channel %s successfully ' +
'added') % input)
def on_change(self, input):
pass
def on_cancel(self):
pass
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/absl/logging/__init__.py
|
Python
|
mit
| 35,420 | 0.007143 |
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abseil Python logging module implemented on top of standard logging.
Simple usage:
from absl import logging
logging.info('Interesting Stuff')
logging.info('Interesting Stuff with Arguments: %d', 42)
logging.set_verbosity(logging.INFO)
logging.log(logging.DEBUG, 'This will *not* be printed')
logging.set_verbosity(logging.DEBUG)
logging.log(logging.DEBUG, 'This will be printed')
logging.warning('Worrying Stuff')
logging.error('Alarming Stuff')
logging.fatal('AAAAHHHHH!!!!') # Process exits.
Usage note: Do not pre-format the strings in your program code.
Instead, let the logging module perform argument interpolation.
This saves cycles because strings that don't need to be printed
are never formatted. Note that this module does not attempt to
interpolate arguments when no arguments are given. In other words
logging.info('Interesting Stuff: %s')
does not raise an exception because logging.info() has only one
argument, the message string.
"Lazy" evaluation for debugging:
If you do something like this:
logging.debug('Thing: %s', thing.ExpensiveOp())
then the ExpensiveOp will be evaluated even if nothing
is printed to the log. To avoid this, use the level_debug() function:
if logging.level_debug():
logging.debug('Thing: %s', thing.ExpensiveOp())
Notes on Unicode:
The log output is encoded as UTF-8. Don't pass data in other encodings in
bytes() instances -- instead pass unicode string instances when you need to
(for both the format string and arguments).
Note on critical and fatal:
Standard logging module defines fatal as an alias to critical, but it's not
documented, and it does NOT actually terminate the program.
This module only defines fatal but not critical, and it DOES terminate the
program.
The differences in behavior are historical and unfortunate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import getpass
import io
import itertools
import logging
import os
import socket
import struct
import sys
import time
import traceback
import warnings
from ab
|
sl import flags
from absl.logging import converter
import six
if six.PY2:
import thread as _thread_lib # For .get_ident().
else:
import threading as _thread_lib # For .get_ident().
FLAGS = flags.FLAGS
# Logging levels.
FATAL = converter.ABSL_FATAL
ERROR = converter.ABSL_ERROR
WARNING = converter.ABSL_WARNING
WARN = converter.ABSL_WARNIN
|
G # Deprecated name.
INFO = converter.ABSL_INFO
DEBUG = converter.ABSL_DEBUG
# Regex to match/parse log line prefixes.
ABSL_LOGGING_PREFIX_REGEX = (
r'^(?P<severity>[IWEF])'
r'(?P<month>\d\d)(?P<day>\d\d) '
r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
r'\.(?P<microsecond>\d\d\d\d\d\d) +'
r'(?P<thread_id>-?\d+) '
r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
# Mask to convert integer thread ids to unsigned quantities for logging purposes
_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
# Extra property set on the LogRecord created by ABSLLogger when its level is
# CRITICAL/FATAL.
_ABSL_LOG_FATAL = '_absl_log_fatal'
# Extra prefix added to the log message when a non-absl logger logs a
# CRITICAL/FATAL message.
_CRITICAL_PREFIX = 'CRITICAL - '
# Used by findCaller to skip callers from */logging/__init__.py.
_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
# The ABSL logger instance, initialized in _initialize().
_absl_logger = None
# The ABSL handler instance, initialized in _initialize().
_absl_handler = None
_CPP_NAME_TO_LEVELS = {
'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
'info': '0',
'warning': '1',
'warn': '1',
'error': '2',
'fatal': '3'
}
_CPP_LEVEL_TO_NAMES = {
'0': 'info',
'1': 'warning',
'2': 'error',
'3': 'fatal',
}
class _VerbosityFlag(flags.Flag):
"""Flag class for -v/--verbosity."""
def __init__(self, *args, **kwargs):
super(_VerbosityFlag, self).__init__(
flags.IntegerParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
self._update_logging_levels()
def _update_logging_levels(self):
"""Updates absl logging levels to the current verbosity."""
if not _absl_logger:
return
if self._value <= converter.ABSL_DEBUG:
standard_verbosity = converter.absl_to_standard(self._value)
else:
# --verbosity is set to higher than 1 for vlog.
standard_verbosity = logging.DEBUG - (self._value - 1)
# Also update root level when absl_handler is used.
if _absl_handler in logging.root.handlers:
logging.root.setLevel(standard_verbosity)
class _StderrthresholdFlag(flags.Flag):
"""Flag class for --stderrthreshold."""
def __init__(self, *args, **kwargs):
super(_StderrthresholdFlag, self).__init__(
flags.ArgumentParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
if v in _CPP_LEVEL_TO_NAMES:
# --stderrthreshold also accepts numberic strings whose values are
# Abseil C++ log levels.
cpp_value = int(v)
v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
elif v.lower() in _CPP_NAME_TO_LEVELS:
v = v.lower()
if v == 'warn':
v = 'warning' # Use 'warning' as the canonical name.
cpp_value = int(_CPP_NAME_TO_LEVELS[v])
else:
raise ValueError(
'--stderrthreshold must be one of (case-insensitive) '
"'debug', 'info', 'warning', 'error', 'fatal', "
"or '0', '1', '2', '3', not '%s'" % v)
self._value = v
flags.DEFINE_boolean('logtostderr',
False,
'Should only log to stderr?', allow_override_cpp=True)
flags.DEFINE_boolean('alsologtostderr',
False,
'also log to stderr?', allow_override_cpp=True)
flags.DEFINE_string('log_dir',
os.getenv('TEST_TMPDIR', ''),
'directory to write logfiles into',
allow_override_cpp=True)
flags.DEFINE_flag(_VerbosityFlag(
'verbosity', -1,
'Logging verbosity level. Messages logged at this level or lower will '
'be included. Set to 1 for debug logging. If the flag was not set or '
'supplied, the value will be changed from the default of -1 (warning) to '
'0 (info) after flags are parsed.',
short_name='v', allow_hide_cpp=True))
flags.DEFINE_flag(_StderrthresholdFlag(
'stderrthreshold', 'fatal',
'log messages at this level, or more severe, to stderr in '
'addition to the logfile. Possible values are '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'Obsoletes --alsologtostderr. Using --alsologtostderr '
'cancels the effect of this flag. Please also note that '
'this flag is subject to --verbosity and requires logfile '
'not be stderr.', allow_hide_cpp=True))
flags.DEFINE_boolean('showprefixforinfo', True,
'If False, do not prepend prefix to info messages '
'when it\'s logged to stderr, '
'--verbosity is set to INFO level, '
'and python logging is used.')
def get_verbosity():
"""Returns the logging verbosity."""
return FLAGS['verbosity'].value
def set_verbosity(v):
"""Sets the logging verbosity.
Causes all messages of level <= v to be logged,
and all messages of
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/macosx/queues/TestQueues.py
|
Python
|
bsd-3-clause
| 17,019 | 0.001351 |
"""Test queues inspection SB APIs."""
from __future__ import print_function
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestQueues(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues()
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues_with_backtrace(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues_with_libBacktraceRecording()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c
|
"
def check_queue_for_valid_queue_id(self, queue):
self.assertTrue(
queue.GetQueueID() != 0, "Check queue %s for valid QueueID (got 0x%x)" %
(queue.GetName(), queue.GetQueue
|
ID()))
def check_running_and_pending_items_on_queue(
self, queue, expected_running, expected_pending):
self.assertTrue(
queue.GetNumPendingItems() == expected_pending,
"queue %s should have %d pending items, instead has %d pending items" %
(queue.GetName(),
expected_pending,
(queue.GetNumPendingItems())))
self.assertTrue(
queue.GetNumRunningItems() == expected_running,
"queue %s should have %d running items, instead has %d running items" %
(queue.GetName(),
expected_running,
(queue.GetNumRunningItems())))
def describe_threads(self):
desc = []
for x in self.inferior_process:
id = x.GetIndexID()
reason_str = lldbutil.stop_reason_to_str(x.GetStopReason())
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
desc.append(
"thread %d: %s (queue id: %s) at\n\t%s" %
(id, reason_str, x.GetQueueID(), location))
print('\n'.join(desc))
def check_number_of_threads_owned_by_queue(self, queue, number_threads):
if (queue.GetNumThreads() != number_threads):
self.describe_threads()
self.assertTrue(
queue.GetNumThreads() == number_threads,
"queue %s should have %d thread executing, but has %d" %
(queue.GetName(),
number_threads,
queue.GetNumThreads()))
def check_queue_kind(self, queue, kind):
expected_kind_string = "Unknown"
if kind == lldb.eQueueKindSerial:
expected_kind_string = "Serial queue"
if kind == lldb.eQueueKindConcurrent:
expected_kind_string = "Concurrent queue"
actual_kind_string = "Unknown"
if queue.GetKind() == lldb.eQueueKindSerial:
actual_kind_string = "Serial queue"
if queue.GetKind() == lldb.eQueueKindConcurrent:
actual_kind_string = "Concurrent queue"
self.assertTrue(
queue.GetKind() == kind,
"queue %s is expected to be a %s but it is actually a %s" %
(queue.GetName(),
expected_kind_string,
actual_kind_string))
def check_queues_threads_match_queue(self, queue):
for idx in range(0, queue.GetNumThreads()):
t = queue.GetThreadAtIndex(idx)
self.assertTrue(
t.IsValid(), "Queue %s's thread #%d must be valid" %
(queue.GetName(), idx))
self.assertTrue(
t.GetQueueID() == queue.GetQueueID(),
"Queue %s has a QueueID of %d but its thread #%d has a QueueID of %d" %
(queue.GetName(),
queue.GetQueueID(),
idx,
t.GetQueueID()))
self.assertTrue(
t.GetQueueName() == queue.GetName(),
"Queue %s has a QueueName of %s but its thread #%d has a QueueName of %s" %
(queue.GetName(),
queue.GetName(),
idx,
t.GetQueueName()))
self.assertTrue(
t.GetQueue().GetQueueID() == queue.GetQueueID(),
"Thread #%d's Queue's QueueID of %d is not the same as the QueueID of its owning queue %d" %
(idx,
t.GetQueue().GetQueueID(),
queue.GetQueueID()))
def queues(self):
"""Test queues inspection SB APIs without libBacktraceRecording."""
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
process = target.LaunchSimple(
[], None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
# We have threads running with all the different dispatch QoS service
# levels - find those threads and check that we can get the correct
# QoS name for each of them.
user_initiated_thread = lldb.SBThread()
user_interactive_thread = lldb.SBThread()
utility_thread = lldb.SBThread()
unspecified_thread = lldb.SBThread()
background_thread = lldb.SBThread()
for th in process.threads:
if th.GetName() == "user initiated QoS":
user_initiated_thread = th
if th.GetName() == "us
|
innoteq/devpi-slack
|
devpi_slack/main.py
|
Python
|
bsd-3-clause
| 1,761 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from devpi_common.request import new_requests_session
from devpi_slack import __version__
def devpiserver_indexconfig_defaults():
return {"slack_icon": None, "slack_hook": None, "slack_user": None}
def devpiserver_on_upload_sync(log, application_url, stage, project, version):
slack_hook = stage.ixconfig.get("slack_hook") or os.getenv("SLACK_HOOK")
slack_icon = stage.ixconfig.get("slack_icon") or os.getenv(
"SLACK_ICON", "http://doc.devpi.net/latest/_static/devpicat.jpg")
slack_user = stage.ixconfig.get(
"slack_user") or os.getenv("SLACK
|
_USER", "devpi")
if not slack_hook:
return
session = new_requests_session(agent=("devpi-slack", __version__))
try:
r = session.post(
slack_hook,
data={
'payload': json.dumps({
"text": "Uploaded {}=={} to {}".format(
project,
version,
application_url
),
"icon_ur
|
l": slack_icon,
"username": slack_user,
})
})
except session.Errors:
raise RuntimeError("%s: failed to send Slack notification %s",
project, slack_hook)
if 200 <= r.status_code < 300:
log.info("successfully sent Slack notification: %s", slack_hook)
else:
log.error("%s: failed to send Slack notification: %s", r.status_code,
slack_hook)
log.debug(r.content.decode('utf-8'))
raise RuntimeError("%s: failed to send Slack notification: %s",
project, slack_hook)
|
Pikecillo/genna
|
external/4Suite-XML-1.0.2/Ft/Lib/Terminal.py
|
Python
|
gpl-2.0
| 11,043 | 0.002083 |
########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Lib/Terminal.py,v 1.6.4.1 2006/09/18 17:05:25 jkloth Exp $
"""
Provides some of the information from the terminfo database.
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import os, re, sys
from Ft.Lib.Terminfo import TERMTYPES as _ANSITERMS
from Ft.Lib.Terminfo import DEFAULT_LINES as _LINES
from Ft.Lib.Terminfo import DEFAULT_COLUMNS as _COLUMNS
if sys.platform == 'win32':
import msvcrt
from Ft.Lib import _win32con
elif os.name == 'posix':
_HAVE_TIOCGWINSZ = False
try:
import fcntl, termios, struct
except ImportError:
pass
else:
_HAVE_TIOCGWINSZ = hasattr(termios, 'TIOCGWINSZ')
# ISO 6429 color sequences are composed of sequences of numbers
# separated by semicolons. The most common codes are:
#
# 0 to restore default color
# 1 for brighter colors
# 4 for underlined text
# 5 for flashing text
# 30 for black foreground
# 31 for red foreground
# 32 for green foreground
# 33 for yellow (or brown) foreground
# 34 for blue foreground
# 35 for purple foreground
# 36 for cyan foreground
# 37 for white (or gray) foreground
# 40 for black background
# 41 for red background
# 42 for green background
# 43 for yellow (or brown) background
# 44 for blue background
# 45 for purple background
# 46 for cyan background
# 47 for white (or gray) background
class AnsiEscapes:
class Colors:
DEFAULT = '\033[0m'
BOLD = '\033[1m'
FOREGROUND_BLACK = '\033[30m'
FOREGROUND_MAROON = '\033[31m'
FOREGROUND_GREEN = '\033[32m'
FOREGROUND_BROWN = FOREGROUND_OLIVE = '\033[33m'
FOREGROUND_NAVY = '\033[34m'
FOREGROUND_PURPLE = '\033[35m'
FOREGROUND_TEAL = '\033[36m'
FOREGROUND_SILVER = '\033[37m'
FOREGROUND_GRAY = '\033[1;30m'
FOREGROUND_RED = '\033[1;31m'
FOREGROUND_LIME = '\033[1;32m'
FOREGROUND_YELLOW = '\033[1;33m'
FOREGROUND_BLUE = '\033[1;34m'
FOREGROUND_MAGENTA = FOREGROUND_FUCHSIA = '\033[1;35m'
FOREGROUND_CYAN = FOREGROUND_AQUA = '\033[1;36m'
FOREGROUND_WHITE = '\033[1;37m'
BACKGROUND_BLACK = '\033[40m'
BACKGROUND_MAROON = '\033[41m'
BACKGROUND_GREEN = '\033[42m'
BACKGROUND_BROWN = BACKGROUND_OLIVE = '\033[43m'
BACKGROUND_NAVY = '\033[44m'
BACKGROUND_PURPLE = '\033[45m'
BACKGROUND_TEAL = '\033[46m'
BACKGROUND_SILVER = '\033[47m'
# Methods/members a Terminal instance should expose from its underly stream.
_file_methods = ('flush', 'write', 'read', 'isatty', 'encoding')
class Terminal:
def __init__(self, stream, keepAnsiEscapes=True):
self._stream = stream
for name in _file_methods:
method = getattr(stream, name, None)
if method is not None:
setattr(self, name, method)
if self.isatty():
if sys.platform == 'win32':
self._init_win32(stream, keepAnsiEscapes)
elif os.name == 'posix' and os.environ.get('TERM') in _ANSITERMS:
self._init_posix(stream, keepAnsiEscapes)
return
def _init_win32(self, stream, keepAnsiEscapes):
# Apparently there exists an IDE where isatty() is True, but
# the stream doesn't have a backing file descriptor.
try:
fileno = stream.fileno()
except AttributeError:
return
# Get the Windows console handle of the file descriptor.
try:
self._handle = msvcrt.get_osfhandle(fileno)
except IOError:
return
if keepAnsiEscapes:
self._write_escape = self._escape_win32
self._default_attribute = \
_win32con.GetConsoleScreenBufferInfo(self._handle)[2]
self.size = self._size_win32
return
def _init_posix(self, stream, keepAnsiEscapes):
if keepAnsiEscapes:
# stream handles ANSI escapes natively
self.writetty = stream.write
if _HAVE_TIOCGWINSZ:
self.size = self._size_termios
return
def lines(self):
return self.size()[0]
def columns(self):
return self.size()[1]
def size(self):
return (_LINES, _COLUMNS)
# noop method for underlying streams which do not implement it
def flush(self):
return
# noop method for underlying streams which do not implement it
def write(self, str):
return
# noop method for underlying streams which do not implement it
def read(self, size=-1):
return ''
# noop method for underlying streams which do not implement it
def isatty(self):
return False
def close(self):
# don't attempt to close a tty streams
if self.isatty():
return
# ignore any errors closing the underlying stream
try:
self._stream.close()
except:
pass
return
# ANSI Set Display Mode: ESC[#;...;#m
_ansi_sdm = re.compile('\033\\[([0-9]+)(?:;([0-9]+))*m')
def writetty(self, bytes):
start = 0
match = self._ansi_sdm.search(bytes)
while match is not None:
# write everything up to the escape sequence
self._stream.write(bytes[start:match.start()])
# process the color codes
self._write_escape(match.groups())
# skip over the escape sequence
start = match.end()
# find the next sequence
match = self._ansi_sdm.search(bytes, start)
# write the remainder
self._stream.write(bytes[start:])
return
def _write_escape(self, codes):
"""
Escape function for handling ANSI Set Display Mode.
Default behavior is to simply ignore the call (e.g. nothing is added
to the output).
"""
return
# -- Terminal specific functions -----------------------------------
def _size_termios(self):
ws = struct.pack("HHHH", 0, 0, 0, 0)
ws = fcntl.ioctl(self._stream.fileno(), termios.TIOCGWINSZ, ws)
lines, columns, x, y = struct.unpack("HHHH", ws)
return (lines, colum
|
ns)
def _escape_win32(self, codes):
"""Translates the ANSI color codes into the Win32 API equivalents."""
# get the current text attributes for the stream
size, cursor, attributes, window = \
_win32con.GetConsoleScreenBufferInfo(self._handle)
for code in map(int, filter(None, codes)):
if code == 0: # normal
# the default attribute
attributes = self._defa
|
ult_attribute
elif code == 1: # bold
# bold only applies to the foreground color
attributes |= _win32con.FOREGROUND_INTENSITY
elif code == 30: # black
attributes &= _win32con.BACKGROUND
elif code == 31: # red
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= _win32con.FOREGROUND_RED
elif code == 32: # green
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= _win32con.FOREGROUND_GREEN
elif code == 33: # brown (bold: yellow)
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= (_win32con.FOREGROUND_RED |
_win32con.FOREGROUND_GREEN)
elif code == 34: # blue
attributes &= (_win32con.FOREGROUND_INTENSITY |
|
annarev/tensorflow
|
tensorflow/python/keras/regularizers.py
|
Python
|
apache-2.0
| 12,860 | 0.003421 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
import math
import six
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
def _check_penalty_number(x):
"""check penalty number availability, raise ValueError if failed."""
|
if not isinstance(x, (float, int)):
raise ValueError(('Value: {} is not a valid regularization penalty number, '
'expected an int or float value').format(x))
if math.isinf(x) or math.isnan(x):
raise ValueError(
('Value: {} is not a valid regularization penalty number, '
'a positive/negative infinity or NaN is not a property value'
).format(x))
def _none_to_default(inputs, default):
return default if inputs is None else default
@keras_export('keras.regularizers.Regularizer')
class Regularizer(object):
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API will
depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and
`Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers and
the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=tf.keras.regularizers.L1(0.01),
... activity_regularizer=tf.keras.regularizers.L2(0.01))
>>> tensor = tf.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size) is 5
>>> tf.math.reduce_sum(layer.losses)
<tf.Tensor: shape=(), dtype=float32, numpy=5.25>
## Available penalties
```python
tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty
tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty
tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = tf.keras.regularizers.L2(2.)
>>> tensor = tf.ones(shape=(5, 5))
>>> regularizer(tensor)
<tf.Tensor: shape=(), dtype=float32, numpy=50.0>
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
... def l1_reg(weight_matrix):
... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
...
>>> layer = tf.keras.layers.Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
... class L2Regularizer(tf.keras.regularizers.Regularizer):
... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * tf.math.reduce_sum(tf.math.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this functionality,
you must make sure any python process running your model has also defined
and registered your custom regularizer.
`tf.keras.utils.register_keras_serializable` is only available in TF 2.1 and
beyond. In earlier versions of TensorFlow you must pass your custom
regularizer to the `custom_objects` argument of methods that expect custom
regularizers to be registered as serializable.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(str(self) + ' does not implement get_config()')
@keras_export('keras.regularizers.L1L2')
class L1L2(Regularizer):
"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as
`loss = l2 * reduce_sum(squa
|
joelwilliamson/cs234
|
a1/a01q2b.py
|
Python
|
gpl-2.0
| 3,280 | 0.051829 |
#!/usr/bin/python2
import check
from fractions import gcd
# Algorithm taken from en.wikipedia.org/wiki/Line-line-_intersection
# All code written by Joel Williamson
## intersection: Int Int Int Int Int Int Int Int -> (union "parallel" (tuple Int Int Int Int))
##
## Purpose: Treating the input as 4 pairs of integers, each representing the
## endpoint of a line, returns the intersection of the two lines, or
## "parallel" if they are parallel
##
## Effects:
##
## Example: intersection(-15,15,15,-15,-10,-10,10,10) => [0,1,0,1]
def intersection(x1, y1, x2, y2, x3, y3, x4, y4):
x_numerator = ((x1*y2-y1*x2)*(x3-x4) - (x1-x2)*(x3*y4-y3*x4))
denominator = (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)
if (denominator == 0) :
return "parallel"
x_gcd = gcd(x_numerator,denominator)
y_numerator = (x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4)
y_gcd = gcd(y_numerator,denominator)
return (x_numerator/x_gcd,denominator/x_gcd,
y_numerator/y_gcd,denominator/y_gcd)
## Tests:
check.expect('Sample test', intersection(-15,15,15,-15,-10,-10,10,10), (0,1,0,1))
check.expect('Parallel', intersection(-10,-10,10,10,-20,-10,0,10),"parallel")
## point_range: (listof Int) (listof Int) (listof Int) (listof Int) (optional (tuple Int Int Int Int))
## -> (iterable (tuple Int Int Int Int))
##
## Purpose: Merges four lists of equal length into an iterable of points,
## optionally starting after the point specified by (init_x1,init_y1,initx2,inity2)
##
## Example: i_p = point_range([1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16])
## i_p.next() = (1,5,9,13)
## i_p.next() = (2,6,10,14)
def point_range(X1,Y1,X2,Y2,(init_x1 ,init_y1, init_x2, init_y2 )= (None,None,None,None)) :
if (init_x1 == None) :
started = True
else :
started = False
for i in range(len(X1)) :
if (not started and not((X1[i],Y1[i],X2[i],Y2[i]) == (init_x1,init_y1,init_x2,init_y2))) :
continue
elif (not started) :
started = True
continue
yield (X1[i],Y1[i],X2[i],Y2[i])
## pieces: Int Int (listof Int) (listof Int) (listof Int) (listof Int) -> Int
##
## Purpose: pieces takes the radius of a circle, N is the number of lines dividing
## the circle and the four lists correspond to the endpoints of the lines
## It produces the number of segments the lines divide the circle into.
##
## Effects:
##
## Examples: pieces(10,3,[-15,1,10],[15,12,4],[15,-6,-10],[-15,-12,-8]) => 7
## pieces(10,3,[0,-11,-11],[11,3,-1],[0,11,11],[-11,3,7]) => 6
def pieces(R, N, X1, Y1, X2, Y2):
segments = 1
for l1 in point_range(X1,Y1,X2,Y2) :
segments += 1
intersections = {}
for l2 in point_range(X1,Y1,X2,Y2,(l1[0],l1[1],l1[2],l1[3])) :
inter = intersection(l1[0],l1[1],l1[2],l1[3],l2[0],l2[1],l2[2],l2[3])
if (in
|
ter == "parallel") :
continue
if inter in intersections :
continue
if ((inter[0]*inter[0])/(inter[1]*inter[1]) + (inter[2]*inter[2])/(inter[3]*inter[3]) >= R*R) :
continue
intersections[inter] = True
segments += 1
return segments
## Tests:
check.expect('Example 1',pieces(10,3,[-15,1,10],[15,12,4],[15,-6,-10],[-15,-12,-8]),7)
check.expect('Example 2',pieces(10,3,[0,-11,-11],[11,3,-1],[0,11,11],[-11,3,7]),6)
|
# Be sure to do lots more of your own testing!
|
neo4j/neo4j-python-driver
|
tests/integration/examples/test_config_unencrypted_example.py
|
Python
|
apache-2.0
| 1,486 | 0.000673 |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from neo4j._exceptions import BoltHandshakeError
from neo4j.exceptions import ServiceUnavailable
from tests.integration.examples import DriverSetupExample
# isort: off
# tag::config-unencrypted-import[]
from neo4j import GraphDatabase
# end::config-unencrypted-i
|
mport[]
# isort: on
# python -m pytest tests/integration/examples/test_config_unencrypted_example.py -s -v
class ConfigUnencryptedExample(DriverSetupExample):
# tag::config-unencrypted[]
def __init__(self, uri, auth):
self.driver = GraphDatabase.driver(uri, auth=auth, encrypted=False)
# end::config-unencrypted[]
def test_example(uri, auth):
try:
ConfigUnencryptedExample.test(uri, auth)
except Ser
|
viceUnavailable as error:
if isinstance(error.__cause__, BoltHandshakeError):
pytest.skip(error.args[0])
|
miraculixx/geocoder
|
geocoder/api.py
|
Python
|
mit
| 11,482 | 0 |
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.osm import Osm
from geocoder.w3w import W3W
from geocoder.bing import Bing
from geocoder.here import Here
from geocoder.yahoo import Yahoo
from geocoder.baidu import Baidu
from geocoder.tomtom import Tomtom
from geocoder.arcgis import Arcgis
from geocoder.ottawa import Ottawa
from geocoder.yandex import Yandex
from geocoder.google import Google
from geocoder.mapbox import Mapbox
from geocoder.maxmind import Maxmind
from geocoder.location import Location
from geocoder.opencage import OpenCage
from geocoder.geonames import Geonames
from geocoder.mapquest import Mapquest
from geocoder.distance import Distance
from geocoder.geolytica import Geolytica
from geocoder.freegeoip import FreeGeoIP
from geocoder.canadapost import Canadapost
from geocoder.w3w_reverse import W3WReverse
from geocoder.here_reverse import HereReverse
from geocoder.bing_reverse import BingReverse
from geocoder.yandex_reverse import YandexReverse
from geocoder.mapbox_reverse import MapboxReverse
from geocoder.google_reverse import GoogleReverse
from geocoder.google_timezone import Timezone
from geocoder.google_elevation import Elevation
from geocoder.mapquest_reverse import MapquestReverse
from geocoder.opencage_reverse import OpenCageReverse
def get(location, **kwargs):
"""Get Geocode
:param ``location``: Your search location you want geocoded.
:param ``provider``: The geocoding engine you want to use.
:param ``method``: Define the method (geocode, method).
"""
provider = kwargs.get('provider', 'bing').lower().strip()
method = kwargs.get('method', 'geocode').lower().strip()
options = {
'osm': {'geocode': Osm},
'here': {
'geocode': Here,
'reverse': HereReverse,
},
'baidu': {'geocode': Baidu},
'yahoo': {'geocode': Yahoo},
'tomtom': {'geocode': Tomtom},
'arcgis': {'geocode': Arcgis},
'ottawa': {'geocode': Ottawa},
'mapbox': {
'geocode': Mapbox,
'reverse': MapboxReverse,
},
'maxmind': {'geocode': Maxmind},
'geonames': {'geocode': Geonames},
'freegeoip': {'geocode': FreeGeoIP},
'w3w': {
|
'geocode': W3W,
'reverse': W3WReverse,
},
'yandex': {
'geocode': Yandex,
'reverse': YandexReverse,
},
'mapquest': {
'geocode': Mapquest,
'reverse': MapquestReverse,
},
'geolytica': {'geocode': Geolytica},
'canadapost': {'geocode': Canadapost},
'opencage': {
'geocode': OpenCage,
'reverse': OpenCageReverse,
},
|
'bing': {
'geocode': Bing,
'reverse': BingReverse,
},
'google': {
'geocode': Google,
'reverse': GoogleReverse,
'timezone': Timezone,
'elevation': Elevation,
},
}
if isinstance(location, (list, dict)) and method == 'geocode':
raise ValueError("Location should be a string")
if provider not in options:
raise ValueError("Invalid provider")
else:
if method not in options[provider]:
raise ValueError("Invalid method")
return options[provider][method](location, **kwargs)
def distance(*args, **kwargs):
"""Distance tool measures the distance between two or multiple points.
:param location: (min 2x locations) Your search location you want geocoded.
:param units: (default=kilometers) Unit of measurement.
> kilometers
> miles
> feet
> meters
"""
return Distance(*args, **kwargs)
def location(location, **kwargs):
"""Parser for different location formats
"""
return Location(location, **kwargs)
def google(location, **kwargs):
"""Google Provider
:param location: Your search location you want geocoded.
:param method: (default=geocode) Use the following:
> geocode
> reverse
> batch
> timezone
> elevation
"""
return get(location, provider='google', **kwargs)
def mapbox(location, **kwargs):
"""Mapbox Provider
:param location: Your search location you want geocoded.
:param proximity: Search nearby [lat, lng]
:param method: (default=geocode) Use the following:
> geocode
> reverse
> batch
"""
return get(location, provider='mapbox', **kwargs)
def yandex(location, **kwargs):
"""Yandex Provider
:param location: Your search location you want geocoded.
:param lang: Chose the following language:
> ru-RU — Russian (by default)
> uk-UA — Ukrainian
> be-BY — Belarusian
> en-US — American English
> en-BR — British English
> tr-TR — Turkish (only for maps of Turkey)
:param kind: Type of toponym (only for reverse geocoding):
> house - house or building
> street - street
> metro - subway station
> district - city district
> locality - locality (city, town, village, etc.)
"""
return get(location, provider='yandex', **kwargs)
def w3w(location, **kwargs):
"""what3words Provider
:param location: Your search location you want geocoded.
:param key: W3W API key.
:param method: Chose a method (geocode, method)
"""
return get(location, provider='w3w', **kwargs)
def baidu(location, **kwargs):
"""Baidu Provider
:param location: Your search location you want geocoded.
:param key: Baidu API key.
:param referer: Baidu API referer website.
"""
return get(location, provider='baidu', **kwargs)
def ottawa(location, **kwargs):
"""Ottawa Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='ottawa', **kwargs)
def elevation(location, **kwargs):
"""Elevation - Google Provider
:param location: Your search location you want to retrieve elevation data.
"""
return get(location, method='elevation', provider='google', **kwargs)
def timezone(location, **kwargs):
"""Timezone - Google Provider
:param location: Your search location you want to retrieve timezone data.
:param timestamp: Define your own specified time to calculate timezone.
"""
return get(location, method='timezone', provider='google', **kwargs)
def reverse(location, provider='google', **kwargs):
"""Reverse Geocoding
:param location: Your search location you want to reverse geocode.
:param key: (optional) use your own API Key from Bing.
:param provider: (default=google) Use the following:
> google
> bing
"""
return get(location, method='reverse', provider=provider, **kwargs)
def bing(location, **kwargs):
"""Bing Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from Bing.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='bing', **kwargs)
def yahoo(location, **kwargs):
"""Yahoo Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='yahoo', **kwargs)
def geolytica(location, **kwargs):
"""Geolytica (Geocoder.ca) Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='geolytica', **kwargs)
def opencage(location, **kwargs):
"""Opencage Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from OpenCage.
"""
return get(location, provider='opencage', **kwargs)
def arcgis(location, **kwargs):
"""ArcGIS Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='arcgis', **kwargs)
def here(location, **kwargs):
"""HERE Provider
:param location: Your search location you want geocoded.
:param app_code: (optional) use your own Application Code from H
|
tebeka/arrow
|
python/examples/plasma/sorting/sort_df.py
|
Python
|
apache-2.0
| 6,843 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from multiprocessing import Pool
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.plasma as plasma
import subprocess
import time
import multimerge
# To run this example, you will first need to run "python setup.py install" in
# this directory to build the Cython module.
#
# You will only see speedups
|
if you run this code on more data, this is just a
# small example that can run on a laptop.
#
# The
|
values we used to get a speedup (on a m4.10xlarge instance on EC2) were
# object_store_size = 84 * 10 ** 9
# num_cores = 20
# num_rows = 10 ** 9
# num_cols = 1
client = None
object_store_size = 2 * 10 ** 9 # 2 GB
num_cores = 8
num_rows = 200000
num_cols = 2
column_names = [str(i) for i in range(num_cols)]
column_to_sort = column_names[0]
# Connect to clients
def connect():
global client
client = plasma.connect('/tmp/store')
np.random.seed(int(time.time() * 10e7) % 10000000)
def put_df(df):
record_batch = pa.RecordBatch.from_pandas(df)
# Get size of record batch and schema
mock_sink = pa.MockOutputStream()
stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
stream_writer.write_batch(record_batch)
data_size = mock_sink.size()
# Generate an ID and allocate a buffer in the object store for the
# serialized DataFrame
object_id = plasma.ObjectID(np.random.bytes(20))
buf = client.create(object_id, data_size)
# Write the serialized DataFrame to the object store
sink = pa.FixedSizeBufferWriter(buf)
stream_writer = pa.RecordBatchStreamWriter(sink, record_batch.schema)
stream_writer.write_batch(record_batch)
# Seal the object
client.seal(object_id)
return object_id
def get_dfs(object_ids):
"""Retrieve dataframes from the object store given their object IDs."""
buffers = client.get_buffers(object_ids)
return [pa.RecordBatchStreamReader(buf).read_next_batch().to_pandas()
for buf in buffers]
def local_sort(object_id):
"""Sort a partition of a dataframe."""
# Get the dataframe from the object store.
[df] = get_dfs([object_id])
# Sort the dataframe.
sorted_df = df.sort_values(by=column_to_sort)
# Get evenly spaced values from the dataframe.
indices = np.linspace(0, len(df) - 1, num=num_cores, dtype=np.int64)
# Put the sorted dataframe in the object store and return the corresponding
# object ID as well as the sampled values.
return put_df(sorted_df), sorted_df.as_matrix().take(indices)
def local_partitions(object_id_and_pivots):
"""Take a sorted partition of a dataframe and split it into more pieces."""
object_id, pivots = object_id_and_pivots
[df] = get_dfs([object_id])
split_at = df[column_to_sort].searchsorted(pivots)
split_at = [0] + list(split_at) + [len(df)]
# Partition the sorted dataframe and put each partition into the object
# store.
return [put_df(df[i:j]) for i, j in zip(split_at[:-1], split_at[1:])]
def merge(object_ids):
"""Merge a number of sorted dataframes into a single sorted dataframe."""
dfs = get_dfs(object_ids)
# In order to use our multimerge code, we have to convert the arrays from
# the Fortran format to the C format.
arrays = [np.ascontiguousarray(df.as_matrix()) for df in dfs]
for a in arrays:
assert a.dtype == np.float64
assert not np.isfortran(a)
# Filter out empty arrays.
arrays = [a for a in arrays if a.shape[0] > 0]
if len(arrays) == 0:
return None
resulting_array = multimerge.multimerge2d(*arrays)
merged_df2 = pd.DataFrame(resulting_array, columns=column_names)
return put_df(merged_df2)
if __name__ == '__main__':
# Start the plasma store.
p = subprocess.Popen(['plasma_store',
'-s', '/tmp/store',
'-m', str(object_store_size)])
# Connect to the plasma store.
connect()
# Connect the processes in the pool.
pool = Pool(initializer=connect, initargs=(), processes=num_cores)
# Create a DataFrame from a numpy array.
df = pd.DataFrame(np.random.randn(num_rows, num_cols),
columns=column_names)
partition_ids = [put_df(partition) for partition
in np.split(df, num_cores)]
# Begin timing the parallel sort example.
parallel_sort_start = time.time()
# Sort each partition and subsample them. The subsampled values will be
# used to create buckets.
sorted_df_ids, pivot_groups = list(zip(*pool.map(local_sort,
partition_ids)))
# Choose the pivots.
all_pivots = np.concatenate(pivot_groups)
indices = np.linspace(0, len(all_pivots) - 1, num=num_cores,
dtype=np.int64)
pivots = np.take(np.sort(all_pivots), indices)
# Break all of the sorted partitions into even smaller partitions. Group
# the object IDs from each bucket together.
results = list(zip(*pool.map(local_partitions,
zip(sorted_df_ids,
len(sorted_df_ids) * [pivots]))))
# Merge each of the buckets and store the results in the object store.
object_ids = pool.map(merge, results)
resulting_ids = [object_id for object_id in object_ids
if object_id is not None]
# Stop timing the paralle sort example.
parallel_sort_end = time.time()
print('Parallel sort took {} seconds.'
.format(parallel_sort_end - parallel_sort_start))
serial_sort_start = time.time()
original_sorted_df = df.sort_values(by=column_to_sort)
serial_sort_end = time.time()
# Check that we sorted the DataFrame properly.
sorted_dfs = get_dfs(resulting_ids)
sorted_df = pd.concat(sorted_dfs)
print('Serial sort took {} seconds.'
.format(serial_sort_end - serial_sort_start))
assert np.allclose(sorted_df.values, original_sorted_df.values)
# Kill the object store.
p.kill()
|
tallakahath/pymatgen
|
pymatgen/io/vasp/sets.py
|
Python
|
mit
| 58,824 | 0.000357 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import abc
import re
import os
import glob
import shutil
import warnings
from itertools import chain
from copy import deepcopy
import six
import numpy as np
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.sites import PeriodicSite
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you ar
|
e doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings and user_kpoi
|
nts_settings are absolute. Any new sets you
implement must obey this. If a user wants to override your settings,
you assume he knows what he is doing. Do not magically override user
supplied settings. You can issue a warning if you think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
__author__ = "Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 28 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class VaspInputSet(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@abc.abstractproperty
def incar(self):
"""Incar object"""
pass
@abc.abstractproperty
def kpoints(self):
"""Kpoints object"""
pass
@abc.abstractproperty
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self.config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]['symbol']
if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
return Potcar(self.potcar_symbols, functional=self.potcar_functional)
@property
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
kpoints = self.kpoints
incar = self.incar
if np.product(kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
return {'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': self.poscar,
'POTCAR': self.potcar}
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.all_input.items():
v.write_file(os.path.join(output_dir, k))
if include_cif:
s = self.all_input["POSCAR"].structure
fname = os.path.join(output_dir, "%s.cif" % re.sub(r'\s', "",
s.formula))
s.to(filename=fname)
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values d
|
Tehsmash/networking-cisco
|
networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/b29f1026b281_add_support_for_ucsm_vnic_templates.py
|
Python
|
apache-2.0
| 1,374 | 0.002183 |
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
|
under the License.
"""Add support for UCSM VNIC Templates
Revision ID: b29f1026b281
Revises: 13bd9ebffbf5
Create Date: 2016-02-18 15:12:31.294651
"""
# revision identifiers, used by Alembic.
revision = 'b29f1026b281'
down_revision = '13bd9ebffbf5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('ml2_ucsm_vnic_templates',
sa.Column('vlan
|
_id', sa.Integer(), nullable=False),
sa.Column('vnic_template', sa.String(length=64), nullable=False),
sa.Column('device_id', sa.String(length=64), nullable=False),
sa.Column('physnet', sa.String(length=32), nullable=False),
sa.Column('updated_on_ucs', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('vlan_id', 'vnic_template', 'device_id')
)
|
mixedup4x4/Speedy
|
Contents/LanScan.py
|
Python
|
gpl-3.0
| 7,956 | 0.007793 |
import subprocess
import time
import sys
import re
class checkIfUp:
__shellPings = []
__shell2Nbst = []
__ipsToCheck = []
checkedIps = 0
onlineIps = 0
unreachable = 0
timedOut = 0
upIpsAddress = []
computerName = []
completeMacAddr
|
ess = []
executionTime = 0
def __init__(self,fromIp,toIp):
startTime = time.time()
self.fromIp = fromIp # from 192.168.1.x
self.toIp = toIp # to 192.168.x.x
self.__checkIfIpIsValid(fromIp)
|
self.__checkIfIpIsValid(toIp)
self.__getRange(fromIp,toIp)
self.__shellToQueue()
#self.__checkIfUp() # run by the shellToQueue queue organizer
self.__computerInfoInQueue()
endTime = time.time()
self.executionTime = round(endTime - startTime,3)
def __checkIfIpIsValid(self,ip):
def validateRange(val):
# valid range => 1 <-> 255
try:
val = int(val)
if val < 0 or val > 255:
print "Invalid IP Range ("+str(val)+")"
sys.exit(0)
except:
print "Invalid IP"
sys.exit(0)
ip = ip.split(".")
firstVal = validateRange(ip[0])
secondVal = validateRange(ip[1])
thirdVal = validateRange(ip[2])
fourthVal = validateRange(ip[3])
return True
def __getRange(self,fromIp,toIp):
fromIp = fromIp.split(".")
toIp = toIp.split(".")
# toIp must be > fromIp
def ip3chars(ipBlock):
# input 1; output 001
ipBlock = str(ipBlock)
while len(ipBlock) != 3:
ipBlock = "0"+ipBlock
return ipBlock
fromIpRaw = ip3chars(fromIp[0])+ip3chars(fromIp[1])+ip3chars(fromIp[2])+ip3chars(fromIp[3])
toIpRaw = ip3chars(toIp[0])+ip3chars(toIp[1])+ip3chars(toIp[2])+ip3chars(toIp[3])
if fromIpRaw > toIpRaw:
# if from is bigger switch the order
temp = fromIp
fromIp = toIp
toIp = temp
currentIp = [0,0,0,0]
# all to integers
currentIp0 = int(fromIp[0])
currentIp1 = int(fromIp[1])
currentIp2 = int(fromIp[2])
currentIp3 = int(fromIp[3])
toIp0 = int(toIp[0])
toIp1 = int(toIp[1])
toIp2 = int(toIp[2])
toIp3 = int(toIp[3])
firstIp = str(currentIp0)+"."+str(currentIp1)+"."+str(currentIp2)+"."+str(currentIp3)
self.__ipsToCheck = [firstIp]
while currentIp3 != toIp3 or currentIp2 != toIp2 or currentIp1 != toIp1 or currentIp0 != toIp0:
currentIp3 += 1
if currentIp3 > 255:
currentIp3 = 0
currentIp2 += 1
if currentIp2 > 255:
currentIp2 = 0
currentIp1 += 1
if currentIp1 > 255:
currentIp1 = 0
currentIp0 += 1
addIp = str(currentIp0)+"."+str(currentIp1)+"."+str(currentIp2)+"."+str(currentIp3)
self.__ipsToCheck.append(addIp)
def __shellToQueue(self):
# write them in the shell queue
maxPingsAtOnce = 200
currentQueuedPings = 0
for pingIp in self.__ipsToCheck:
proc = subprocess.Popen(['ping','-n','1',pingIp],stdout=subprocess.PIPE,shell=True)
self.__shellPings.append(proc)
currentQueuedPings += 1
if currentQueuedPings >= maxPingsAtOnce:
#execute shells
self.__checkIfUp()
currentQueuedPings = 0
self.__shellPings = []
self.__checkIfUp() # execute last queue
def __checkIfUp(self):
# execute the shells & determine whether the host is up or not
for shellInQueue in self.__shellPings:
pingResult = ""
shellInQueue.wait()
while True:
line = shellInQueue.stdout.readline()
if line != "":
pingResult += line
else:
break;
self.checkedIps += 1
if 'unreachable' in pingResult:
self.unreachable += 1
elif 'timed out' in pingResult:
self.timedOut += 1
else:
self.onlineIps += 1
currentIp = self.__ipsToCheck[self.checkedIps-1]
self.upIpsAddress.append(currentIp)
def __computerInfoInQueue(self):
# shell queue for online hosts
maxShellsAtOnce = 255
currentQueuedNbst = 0
for onlineIp in self.upIpsAddress:
proc = subprocess.Popen(['\\Windows\\sysnative\\nbtstat.exe','-a',onlineIp],stdout=subprocess.PIPE,shell=True)
self.__shell2Nbst.append(proc)
currentQueuedNbst += 1
if currentQueuedNbst >= maxShellsAtOnce:
# execute shells
self.__gatherComputerInfo()
currentQueuedNbst = 0
self.__shell2Nbst = []
self.__gatherComputerInfo() # execute last queue
def __gatherComputerInfo(self):
# execute the shells and find host Name and MAC
for shellInQueue in self.__shell2Nbst:
nbstResult = ""
shellInQueue.wait()
computerNameLine = ""
macAddressLine = ""
computerName = ""
macAddress = ""
while True:
line = shellInQueue.stdout.readline()
if line != "":
if '<00>' in line and 'UNIQUE' in line:
computerNameLine = line
if 'MAC Address' in line:
macAddressLine = line
else:
break;
computerName = re.findall('([ ]+)(.*?)([ ]+)<00>', computerNameLine)
macAddress = re.findall('([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)',macAddressLine)
try:
self.computerName.append(computerName[0][1])
except:
self.computerName.append("")
completeMacAddress = ""
firstMacElement = 0
try:
for macEach in macAddress[0]:
if firstMacElement == 0:
firstMacElement += 1
else:
completeMacAddress += ":"
completeMacAddress += macEach
firstMacElement = 0
except:
completeMacAddress = ""
self.completeMacAddress.append(completeMacAddress)
def readValue(self):
# debugging use only
ips = []
for ip in self.completeMacAddress:
ips.append(ip)
return ips
print "\t\t---LANScanner v1.0---\n"
# brief tutorial
print "Sample input data:"
print "FromIP: 192.168.1.50"
print "ToIP: 192.168.1.20"
print "---"
# input
fromIp = raw_input("From: ")
toIp = raw_input("To: ")
# enter values to class
userRange = checkIfUp(fromIp,toIp)
# read class values
print ""
#print userRange.readValue() # debugging use only
print "Checked",userRange.checkedIps,"IPs"
print ""
print "Online:",str(userRange.onlineIps)+"/"+str(userRange.checkedIps)
print "Unreachable:",userRange.unreachable,"Timed out:",userRange.timedOut
print "" # newline
print "Online IPs:"
print "IP\t\tNAME\t\tMAC"
counter = 0
for onlineIp in userRange.upIpsAddress:
print onlineIp+"\t"+userRange.computerName[counter]+"\t"+userRange.completeMacAddress[counter]
counter += 1
print ""
print "Took",userRange.executionTime,"seconds"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.