text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding=utf-8
"""
Bridges calls made inside of a Python environment to the Cmd2 host app
while maintaining a reasonable degree of isolation between the two.
"""
import sys
from contextlib import (
redirect_stderr,
redirect_stdout,
)
from typing import (
IO,
TYPE_CHECKING,
Any,
List,
NamedTuple,
Optional,
TextIO,
Union,
cast,
)
from .utils import ( # namedtuple_with_defaults,
StdSim,
)
if TYPE_CHECKING: # pragma: no cover
import cmd2
class CommandResult(NamedTuple):
"""Encapsulates the results from a cmd2 app command
:stdout: str - output captured from stdout while this command is executing
:stderr: str - output captured from stderr while this command is executing
:stop: bool - return value of onecmd_plus_hooks after it runs the given
command line.
:data: possible data populated by the command.
Any combination of these fields can be used when developing a scripting API
for a given command. By default stdout, stderr, and stop will be captured
for you. If there is additional command specific data, then write that to
cmd2's last_result member. That becomes the data member of this tuple.
In some cases, the data member may contain everything needed for a command
and storing stdout and stderr might just be a duplication of data that
wastes memory. In that case, the StdSim can be told not to store output
with its pause_storage member. While this member is True, any output sent
to StdSim won't be saved in its buffer.
The code would look like this::
if isinstance(self.stdout, StdSim):
self.stdout.pause_storage = True
if isinstance(sys.stderr, StdSim):
sys.stderr.pause_storage = True
See :class:`~cmd2.utils.StdSim` for more information.
.. note::
Named tuples are immutable. The contents are there for access,
not for modification.
"""
stdout: str = ''
stderr: str = ''
stop: bool = False
data: Any = None
def __bool__(self) -> bool:
"""Returns True if the command succeeded, otherwise False"""
# If data was set, then use it to determine success
if self.data is not None:
return bool(self.data)
# Otherwise check if stderr was filled out
else:
return not self.stderr
class PyBridge:
"""Provides a Python API wrapper for application commands."""
def __init__(self, cmd2_app: 'cmd2.Cmd') -> None:
self._cmd2_app = cmd2_app
self.cmd_echo = False
# Tells if any of the commands run via __call__ returned True for stop
self.stop = False
def __dir__(self) -> List[str]:
"""Return a custom set of attribute names"""
attributes: List[str] = []
attributes.insert(0, 'cmd_echo')
return attributes
def __call__(self, command: str, *, echo: Optional[bool] = None) -> CommandResult:
"""
Provide functionality to call application commands by calling PyBridge
ex: app('help')
:param command: command line being run
:param echo: If provided, this temporarily overrides the value of self.cmd_echo while the
command runs. If True, output will be echoed to stdout/stderr. (Defaults to None)
"""
if echo is None:
echo = self.cmd_echo
# This will be used to capture _cmd2_app.stdout and sys.stdout
copy_cmd_stdout = StdSim(cast(Union[TextIO, StdSim], self._cmd2_app.stdout), echo=echo)
# Pause the storing of stdout until onecmd_plus_hooks enables it
copy_cmd_stdout.pause_storage = True
# This will be used to capture sys.stderr
copy_stderr = StdSim(sys.stderr, echo=echo)
self._cmd2_app.last_result = None
stop = False
try:
self._cmd2_app.stdout = cast(TextIO, copy_cmd_stdout)
with redirect_stdout(cast(IO[str], copy_cmd_stdout)):
with redirect_stderr(cast(IO[str], copy_stderr)):
stop = self._cmd2_app.onecmd_plus_hooks(command, py_bridge_call=True)
finally:
with self._cmd2_app.sigint_protection:
self._cmd2_app.stdout = cast(IO[str], copy_cmd_stdout.inner_stream)
self.stop = stop or self.stop
# Save the result
result = CommandResult(
stdout=copy_cmd_stdout.getvalue(),
stderr=copy_stderr.getvalue(),
stop=stop,
data=self._cmd2_app.last_result,
)
return result
| python-cmd2/cmd2 | cmd2/py_bridge.py | Python | mit | 4,605 | 0.001303 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
class TransposicionGrupo(object):
"""
"""
def __init__(self, cadena=None, clave=None):
self.cadena = cadena #Recibe una lista, la longitud de cada elemento es a longitud de la clave
self.clave = clave
self.textoClaro = ""
self.textoCifrado = ""
self.caracterRelleno = "₫" #₫
def cifrar(self, cantidadRellenoB64=0):
textoCifrado = ""
linea_a_cifrar = None
saltosLinea = len(self.cadena)-1
i = 0
for linea in self.cadena:
if i < saltosLinea:
linea_a_cifrar = self.dividirGrupos(linea,cantidadRellenoB64)
textoCifrado = textoCifrado + self.__cifrar(linea_a_cifrar) + "\n"
i += 1
else:
linea_a_cifrar = self.dividirGrupos(linea, cantidadRellenoB64)
textoCifrado = textoCifrado + self.__cifrar(linea_a_cifrar)
self.textoCifrado = textoCifrado
def descifrar(self, cantidadRellenoB64=0):
textoDescifrado = ""
linea_a_descifrar = None
saltosLinea = len(self.cadena)-1
i = 0
for linea in self.cadena:
if i < saltosLinea:
linea_a_descifrar = self.dividirGrupos(linea)
textoDescifrado = textoDescifrado + self.__descifrar(linea_a_descifrar) + "\n"
i += 1
else:
linea_a_descifrar = self.dividirGrupos(linea, cantidadRellenoB64)
textoDescifrado = textoDescifrado + self.__descifrar(linea_a_descifrar)
self.textoClaro = textoDescifrado
#---------------------------------------------------------- Métodos complementarios
def dividirGrupos(self, linea, cantidadRellenoB64=0):
lineaNueva = linea
tamanioLinea = len(linea)-cantidadRellenoB64
tamanioBloque = len(str(self.clave))
#print(tamanioLinea, tamanioBloque)
if tamanioLinea % tamanioBloque != 0:
lineaNueva = self.adicionarRelleno(linea, tamanioLinea, tamanioBloque)
tamanioLinea = len(lineaNueva)
nuevaCadena = list()
bloque = ""
i = 0
while i < tamanioLinea:
bloque = bloque + lineaNueva[i]
i += 1
if i % tamanioBloque == 0 and i > 0:
nuevaCadena.append(bloque)
bloque = ""
return nuevaCadena
def adicionarRelleno(self, linea, tamanioLinea, tamanioBloque):
if tamanioLinea % tamanioBloque == 0:
return linea
else:
linea = linea + self.caracterRelleno
return self.adicionarRelleno(linea ,len(linea), tamanioBloque)
def eliminarRelleno(self, cadena):
apareceRelleno = 0
nuevaLinea = ""
if len(cadena) > 1:
cadena.pop()
for linea in cadena:
apareceRelleno = linea.find(self.caracterRelleno)
nuevaLinea += linea[0:int(apareceRelleno)] +"\n"
return nuevaLinea
def intercambiar_cifrar(self, bloque, clave):
tamanioBloque = len(bloque)
claveStr = str(clave)
nuevoBloque = list()
i = 0
pos = 0
while i < tamanioBloque:
pos = int(claveStr[i])-1
nuevoBloque.insert(i, bloque[pos])
i += 1
nuevoBloque = ''.join(nuevoBloque)
return nuevoBloque
def intercambiar_descifrar(self, bloque, clave):
tamanioBloque = len(bloque)
claveStr = str(clave)
nuevoBloque = {}
bloqueDescifrado = list()
i = 0
pos = 0
while i < tamanioBloque:
pos = int(claveStr[i])-1
nuevoBloque.update({pos:bloque[i]})
i += 1
for llave, valor in nuevoBloque.items():
bloqueDescifrado.append(valor)
bloqueDescifrado = ''.join(bloqueDescifrado)
return bloqueDescifrado
#----------------------------------------------------------------- Métodos privados
def __cifrar(self, linea_a_cifrar, cantidadRellenoB64=0):
lineaNueva = list()
for bloque in linea_a_cifrar:
lineaNueva.append(self.intercambiar_cifrar(bloque, self.clave))
lineaNueva = ''.join(lineaNueva)
return lineaNueva
def __descifrar(self, linea_a_descifrar, cantidadRellenoB64=0):
lineaNueva = list()
for bloque in linea_a_descifrar:
lineaNueva.append(self.intercambiar_descifrar(bloque, self.clave))
lineaNueva = ''.join(lineaNueva)
return lineaNueva | pordnajela/AlgoritmosCriptografiaClasica | Transposicion/TransposicionGrupo.py | Python | apache-2.0 | 3,845 | 0.036468 |
import ctypes
from . import cmarkgfm
from ..util.TypedTree import TypedTree
cmarkgfm.document_to_html.restype = ctypes.POINTER(ctypes.c_char)
class CmarkDocument(object):
def __init__(self, txt, encoding='utf_8'):
if not isinstance(txt, bytes):
txt = txt.encode(encoding=encoding)
self._doc = cmarkgfm.string_to_document(txt)
def toHTML(self):
result = cmarkgfm.document_to_html(self._doc)
out = ctypes.cast(result, ctypes.c_char_p).value.decode()
cmarkgfm.cmark_get_default_mem_allocator().contents.free(result)
return out
def toLatex(self):
result = cmarkgfm.document_to_latex(self._doc)
out = ctypes.cast(result, ctypes.c_char_p).value.decode()
cmarkgfm.cmark_get_default_mem_allocator().contents.free(result)
return out
def toAST(self):
return TypedTree.Build('Document', nodes=[self._toAST(c) for c in self._children(self._doc)])
##### AST GENERATION #####
@classmethod
def _children(cls, node):
out = [cmarkgfm.cmark_node_first_child(node)]
while out[-1]: # iterate until null pointer
out.append(cmarkgfm.cmark_node_next(out[-1]))
return tuple(out[:-1])
@classmethod
def _position(cls, node):
return TypedTree.Build('position',
r1=cmarkgfm.cmark_node_get_start_line(node),
c1=cmarkgfm.cmark_node_get_start_column(node),
r2=cmarkgfm.cmark_node_get_end_line(node),
c2=cmarkgfm.cmark_node_get_end_column(node))
@classmethod
def _toAST(cls, node, children=None, **attr):
tag = cmarkgfm.cmark_node_get_type_string(node).decode()
if tag == 'table' and children is None:
return cls._tableToAST(node)
elif tag == 'list' and len(attr) == 0:
return cls._listToAST(node)
if children is None:
children = [cls._toAST(c) for c in cls._children(node)]
if tag in {'text', 'code_block', 'code', 'html_block', 'html_inline', 'latex_block', 'latex_inline'}:
attr['Text'] = cmarkgfm.cmark_node_get_literal(node).decode()
if tag == 'heading':
attr['Level'] = cmarkgfm.cmark_node_get_heading_level(node)
if tag == 'code_block':
attr['Info'] = cmarkgfm.cmark_node_get_fence_info(node).decode()
if tag in {'link', 'image'}:
attr['Destination'] = cmarkgfm.cmark_node_get_url(node).decode()
attr['Title'] = cmarkgfm.cmark_node_get_title(node).decode()
return TypedTree.Build(tag, position=cls._position(node), children=children, **attr)
@classmethod
def _listToAST(cls, node):
attr = {
'Type': ['None', 'Bullet', 'Ordered'][cmarkgfm.cmark_node_get_list_type(node)],
'Tight': cmarkgfm.cmark_node_get_list_tight(node) != 0
}
if attr['Type'] == 'Ordered':
attr['Start'] = cmarkgfm.cmark_node_get_list_start(node)
attr['Delim'] = ["None", "Period", "Paren"][cmarkgfm.cmark_node_get_list_delim(node)]
return cls._toAST(node, **attr)
@classmethod
def _tableToAST(cls, node):
align = cmarkgfm.cmark_gfm_extensions_get_table_alignments(node)
rows = []
for tr in cls._children(node):
cols = []
for td, a in zip(cls._children(tr), align):
cols.append(cls._toAST(td, Alignment={'l': "Left", 'c': "Center", 'r': "Right"}.get(a,'Left')))
rows.append(cls._toAST(tr, children=cols))
return cls._toAST(node, children=rows) | daryl314/markdown-browser | pycmark/cmarkgfm/CmarkDocument.py | Python | mit | 3,664 | 0.002729 |
#!/usr/bin/python
# =======================================================================
# This file is part of MCLRE.
#
# MCLRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MCLRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCLRE. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2015 Augusto Queiroz de Macedo <augustoqmacedo@gmail.com>
# =======================================================================
"""
MRBPR Runner
"""
from os import path
from argparse import ArgumentParser
import shlex
import subprocess
import multiprocessing
import logging
from run_rec_functions import read_experiment_atts
from mrbpr.mrbpr_runner import create_meta_file, run
##############################################################################
# GLOBAL VARIABLES
##############################################################################
# Define the Logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(name)s : %(message)s',
level=logging.INFO)
LOGGER = logging.getLogger('mrbpr.run_rec_mrbpr')
LOGGER.setLevel(logging.INFO)
##############################################################################
# AUXILIAR FUNCTIONS
##############################################################################
def get_mrbpr_confs():
""" Yield the MRBPR Models Configurations """
pass
##############################################################################
# MAIN
##############################################################################
if __name__ == '__main__':
# ------------------------------------------------------------------------
# Define the argument parser
PARSER = ArgumentParser(description="Script that runs the mrbpr event recommender algorithms for" \
" a given 'experiment_name' with data from a given 'region'")
PARSER.add_argument("-e", "--experiment_name", type=str, required=True,
help="The Experiment Name (e.g. recsys-15)")
PARSER.add_argument("-r", "--region", type=str, required=True,
help="The data Region (e.g. san_jose)")
PARSER.add_argument("-a", "--algorithm", type=str, required=True,
help="The algorithm name (used only to differenciate our proposed MRBPR to the others")
ARGS = PARSER.parse_args()
EXPERIMENT_NAME = ARGS.experiment_name
REGION = ARGS.region
ALGORITHM_NAME = ARGS.algorithm
LOGGER.info(ALGORITHM_NAME)
DATA_DIR = "data"
PARTITIONED_DATA_DIR = path.join(DATA_DIR, "partitioned_data")
PARTITIONED_REGION_DATA_DIR = path.join(PARTITIONED_DATA_DIR, REGION)
EXPERIMENT_DIR = path.join(DATA_DIR, "experiments", EXPERIMENT_NAME)
EXPERIMENT_REGION_DATA_DIR = path.join(EXPERIMENT_DIR, REGION)
# LOGGER.info('Defining the MRBPR relation weights file...')
subprocess.call(shlex.split("Rscript %s %s %s" %
(path.join("src", "recommender_execution", "mrbpr", "mrbpr_relation_weights.R"),
EXPERIMENT_NAME, ALGORITHM_NAME)))
# ------------------------------------------------------------------------
# Reading and Defining the Experiment Attributes
EXPERIMENT_ATTS = read_experiment_atts(EXPERIMENT_DIR)
PARALLEL_RUNS = multiprocessing.cpu_count() - 1
TRAIN_RELATION_NAMES = EXPERIMENT_ATTS['%s_relation_names' % ALGORITHM_NAME.lower()]
TRAIN_RELATION_FILES = ["%s_train.tsv" % name for name in TRAIN_RELATION_NAMES]
PARTITIONS = reversed(EXPERIMENT_ATTS['partitions'])
# ------------------------------------------------------------------------
# Reading and Defining the Experiment Attributes
META_FILE = path.join(EXPERIMENT_DIR, "%s_meetup.meta" % ALGORITHM_NAME.lower())
LOGGER.info('Creating the META relations file...')
create_meta_file(TRAIN_RELATION_NAMES, META_FILE, PARTITIONED_DATA_DIR)
# ------------------------------------------------------------------------
# Fixed parameters
# ------------------------------------------------------------------------
# Algorithm (0 - MRBPR)
ALGORITHM = 0
# Size of the Ranked list of events per User
RANK_SIZE = 100
# Save Parameters
SAVE_MODEL = 0
# Hyper Parameters
REGULARIZATION_PER_ENTITY = ""
REGULARIZATION_PER_RELATION = ""
RELATION_WEIGHTS_FILE = path.join(EXPERIMENT_DIR, "%s_relation_weights.txt" % ALGORITHM_NAME.lower())
# ------------------------------------------------------------------------
if ALGORITHM_NAME == "MRBPR":
LEARN_RATES = [0.1]
NUM_FACTORS = [300]
NUM_ITERATIONS = [1500]
elif ALGORITHM_NAME == "BPR-NET":
LEARN_RATES = [0.1]
NUM_FACTORS = [200]
NUM_ITERATIONS = [600]
else:
LEARN_RATES = [0.1]
NUM_FACTORS = [10]
NUM_ITERATIONS = [10]
MRBPR_BIN_PATH = path.join("src", "recommender_execution", "mrbpr", "mrbpr.bin")
LOGGER.info("Start running MRBPR Process Scheduler!")
run(PARTITIONED_REGION_DATA_DIR, EXPERIMENT_REGION_DATA_DIR,
REGION, ALGORITHM, RANK_SIZE, SAVE_MODEL, META_FILE,
REGULARIZATION_PER_ENTITY, REGULARIZATION_PER_RELATION,
RELATION_WEIGHTS_FILE, TRAIN_RELATION_FILES,
PARTITIONS, NUM_ITERATIONS, NUM_FACTORS, LEARN_RATES,
MRBPR_BIN_PATH, PARALLEL_RUNS, ALGORITHM_NAME)
LOGGER.info("DONE!")
| augustoqm/MCLRE | src/recommender_execution/run_rec_mrbpr.py | Python | gpl-3.0 | 5,871 | 0.002044 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tipi.compat import unicode
from tipi.html import HTMLFragment
__all__ = ('Replacement', 'replace')
class Replacement(object):
"""Replacement representation."""
skipped_tags = (
'code', 'kbd', 'pre', 'samp', 'script', 'style', 'tt', 'xmp'
)
textflow_tags = (
'b', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'cite',
'dfn', 'em', 'kbd', 'strong', 'samp', 'var', 'a', 'bdo', 'q', 'script',
'span', 'sub', 'sup'
)
def __init__(self, pattern, replacement):
self.pattern = pattern
self.replacement = replacement
def _is_replacement_allowed(self, s):
"""Tests whether replacement is allowed on given piece of HTML text."""
if any(tag in s.parent_tags for tag in self.skipped_tags):
return False
if any(tag not in self.textflow_tags for tag in s.involved_tags):
return False
return True
def replace(self, html):
"""Perform replacements on given HTML fragment."""
self.html = html
text = html.text()
positions = []
def perform_replacement(match):
offset = sum(positions)
start, stop = match.start() + offset, match.end() + offset
s = self.html[start:stop]
if self._is_replacement_allowed(s):
repl = match.expand(self.replacement)
self.html[start:stop] = repl
else:
repl = match.group() # no replacement takes place
positions.append(match.end())
return repl
while True:
if positions:
text = text[positions[-1]:]
text, n = self.pattern.subn(perform_replacement, text, count=1)
if not n: # all is already replaced
break
def replace(html, replacements=None):
"""Performs replacements on given HTML string."""
if not replacements:
return html # no replacements
html = HTMLFragment(html)
for r in replacements:
r.replace(html)
return unicode(html)
| honzajavorek/tipi | tipi/repl.py | Python | mit | 2,146 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2014-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases/{databaseName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_server_request(
subscription_id: str,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2014-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class RecoverableDatabasesOperations(object):
"""RecoverableDatabasesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> "_models.RecoverableDatabase":
"""Gets a recoverable database, which is a resource representing a database's geo backup.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecoverableDatabase, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.RecoverableDatabase
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoverableDatabase"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecoverableDatabase', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases/{databaseName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> Iterable["_models.RecoverableDatabaseListResult"]:
"""Gets a list of recoverable databases.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecoverableDatabaseListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.RecoverableDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoverableDatabaseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("RecoverableDatabaseListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/recoverableDatabases'} # type: ignore
| Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_recoverable_databases_operations.py | Python | mit | 10,559 | 0.004072 |
import pytest
@pytest.mark.parametrize("text", ["ca.", "m.a.o.", "Jan.", "Dec.", "kr.", "jf."])
def test_da_tokenizer_handles_abbr(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["Jul.", "jul.", "Tor.", "Tors."])
def test_da_tokenizer_handles_ambiguous_abbr(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["1.", "10.", "31."])
def test_da_tokenizer_handles_dates(da_tokenizer, text):
tokens = da_tokenizer(text)
assert len(tokens) == 1
def test_da_tokenizer_handles_exc_in_text(da_tokenizer):
text = "Det er bl.a. ikke meningen"
tokens = da_tokenizer(text)
assert len(tokens) == 5
assert tokens[2].text == "bl.a."
def test_da_tokenizer_handles_custom_base_exc(da_tokenizer):
text = "Her er noget du kan kigge i."
tokens = da_tokenizer(text)
assert len(tokens) == 8
assert tokens[6].text == "i"
assert tokens[7].text == "."
@pytest.mark.parametrize(
"text,n_tokens",
[
("Godt og/eller skidt", 3),
("Kør 4 km/t på vejen", 5),
("Det blæser 12 m/s.", 5),
("Det blæser 12 m/sek. på havnen", 6),
("Windows 8/Windows 10", 5),
("Billeten virker til bus/tog/metro", 8),
("26/02/2019", 1),
("Kristiansen c/o Madsen", 3),
("Sprogteknologi a/s", 2),
("De boede i A/B Bellevue", 5),
# note: skipping due to weirdness in UD_Danish-DDT
# ("Rotorhastigheden er 3400 o/m.", 5),
("Jeg købte billet t/r.", 5),
("Murerarbejdsmand m/k søges", 3),
("Netværket kører over TCP/IP", 4),
],
)
def test_da_tokenizer_slash(da_tokenizer, text, n_tokens):
tokens = da_tokenizer(text)
assert len(tokens) == n_tokens
| explosion/spaCy | spacy/tests/lang/da/test_exceptions.py | Python | mit | 1,824 | 0.000551 |
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="scatter", **kwargs):
super(LegendrankValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scatter/_legendrank.py | Python | mit | 406 | 0.002463 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Gradient Boosted Trees Regression Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesRegressionExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainRegressor(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda (v, p): (v - p) * (v - p)).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingRegressionModel")
sameModel = GradientBoostedTreesModel.load(sc, "target/tmp/myGradientBoostingRegressionModel")
# $example off$
| chenc10/Spark-PAF | examples/src/main/python/mllib/gradient_boosting_regression_example.py | Python | apache-2.0 | 2,443 | 0.001637 |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
# flake8: noqa
from __future__ import unicode_literals
from django.test import TestCase
from unittest import expectedFailure
class SettingTests(TestCase):
@expectedFailure
def test_fails(self):
self.assertTrue(False)
| django-bmf/django-bmf | tests/appapis/test_sites_setting.py | Python | bsd-3-clause | 285 | 0 |
#!/usr/bin/env python
# Copyright 2012 Johns Hopkins University (author: Daniel Povey)
# Generate a topology file. This allows control of the number of states in the
# non-silence HMMs, and in the silence HMMs. This is a modified version of
# 'utils/gen_topo.pl' that generates a different type of topology, one that we
# believe should be useful in the 'chain' model. Note: right now it doesn't
# have any real options, and it treats silence and nonsilence the same. The
# intention is that you write different versions of this script, or add options,
# if you experiment with it.
from __future__ import print_function
import argparse
parser = argparse.ArgumentParser(description="Usage: steps/nnet3/chain/gen_topo.py "
"<colon-separated-nonsilence-phones> <colon-separated-silence-phones>"
"e.g.: steps/nnet3/chain/gen_topo.pl 4:5:6:7:8:9:10 1:2:3\n",
epilog="See egs/swbd/s5c/local/chain/train_tdnn_a.sh for example of usage.");
parser.add_argument("nonsilence_phones", type=str,
help="List of non-silence phones as integers, separated by colons, e.g. 4:5:6:7:8:9");
parser.add_argument("silence_phones", type=str,
help="List of silence phones as integers, separated by colons, e.g. 1:2:3");
args = parser.parse_args()
silence_phones = [ int(x) for x in args.silence_phones.split(":") ]
nonsilence_phones = [ int(x) for x in args.nonsilence_phones.split(":") ]
all_phones = silence_phones + nonsilence_phones
print("<Topology>")
print("<TopologyEntry>")
print("<ForPhones>")
print(" ".join([str(x) for x in all_phones]))
print("</ForPhones>")
print("<State> 0 <PdfClass> 0 <Transition> 0 0.5 <Transition> 1 0.5 </State>")
print("<State> 1 </State>")
print("</TopologyEntry>")
print("</Topology>")
| keighrim/kaldi-yesno-tutorial | steps/nnet3/chain/gen_topo3.py | Python | apache-2.0 | 1,879 | 0.008515 |
import random
class Decision(object):
def __init__(self, name, min_val, max_val):
self.name = name
self.min_val = min_val
self.max_val = max_val
def generate_valid_val(self):
return random.uniform(self.min_val, self.max_val)
def get_range(self):
return (self.min_val, self.max_val)
| rchakra3/generic-experiment-loop | model/helpers/decision.py | Python | gpl-2.0 | 339 | 0 |
#!/usr/bin/env python
# coding: utf-8
class CheckGear():
def __init__(self):
pass
def proc(self):
print 'test'
| sou-komatsu/checkgear | checkgear/checkgear.py | Python | mit | 138 | 0.007246 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices', 'tags']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'flavor', 'old_flavor',
'new_flavor', 'ec2_ids']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests',
'flavor', 'vcpu_model']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
_INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining.
NB: This function may modify expected_attrs if one
requested attribute requires another.
"""
if not expected_attrs:
return expected_attrs
if ('system_metadata' in expected_attrs and
'flavor' not in expected_attrs):
# NOTE(danms): If the client asked for sysmeta, we have to
# pull flavor so we can potentially provide compatibility
expected_attrs.append('flavor')
simple_cols = [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
complex_cols = ['extra.%s' % field
for field in _INSTANCE_EXTRA_FIELDS
if field in expected_attrs]
if complex_cols:
simple_cols.append('extra')
simple_cols = filter(lambda x: x not in _INSTANCE_EXTRA_FIELDS,
simple_cols)
if (any([flavor in expected_attrs
for flavor in ['flavor', 'old_flavor', 'new_flavor']]) and
'system_metadata' not in simple_cols):
# NOTE(danms): While we're maintaining compatibility with
# flavor data being stored in system_metadata, we need to
# ask for it any time flavors are requested.
simple_cols.append('system_metadata')
expected_attrs.append('system_metadata')
return simple_cols + complex_cols
def compat_instance(instance):
"""Create a dict-like instance structure from an objects.Instance.
This is basically the same as nova.objects.base.obj_to_primitive(),
except that it includes some instance-specific details, like stashing
flavor information in system_metadata.
If you have a function (or RPC client) that needs to see the instance
as a dict that has flavor information in system_metadata, use this
to appease it (while you fix said thing).
:param instance: a nova.objects.Instance instance
:returns: a dict-based instance structure
"""
if not isinstance(instance, objects.Instance):
return instance
db_instance = copy.deepcopy(base.obj_to_primitive(instance))
flavor_attrs = [('', 'flavor'), ('old_', 'old_flavor'),
('new_', 'new_flavor')]
for prefix, attr in flavor_attrs:
flavor = (instance.obj_attr_is_set(attr) and
getattr(instance, attr) or None)
if flavor:
# NOTE(danms): If flavor is unset or None, don't
# copy it into the primitive's system_metadata
db_instance['system_metadata'] = \
flavors.save_flavor_info(
db_instance.get('system_metadata', {}),
flavor, prefix)
if attr in db_instance:
del db_instance[attr]
return db_instance
# TODO(berrange): Remove NovaObjectDictCompat
class Instance(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
# Version 1.10: Added use_slave to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_uuid
# Version 1.13: Added delete_metadata_key()
# Version 1.14: Added numa_topology
# Version 1.15: PciDeviceList 1.1
# Version 1.16: Added pci_requests
# Version 1.17: Added tags
# Version 1.18: Added flavor, old_flavor, new_flavor
# Version 1.19: Added vcpu_model
# Version 1.20: Added ec2_ids
VERSION = '1.20'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'tags': fields.ObjectField('TagList'),
'flavor': fields.ObjectField('Flavor'),
'old_flavor': fields.ObjectField('Flavor', nullable=True),
'new_flavor': fields.ObjectField('Flavor', nullable=True),
'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True),
'ec2_ids': fields.ObjectField('EC2Ids'),
}
obj_extra_fields = ['name']
obj_relationships = {
'fault': [('1.0', '1.0'), ('1.13', '1.2')],
'info_cache': [('1.1', '1.0'), ('1.9', '1.4'), ('1.10', '1.5')],
'security_groups': [('1.2', '1.0')],
'pci_devices': [('1.6', '1.0'), ('1.15', '1.1')],
'numa_topology': [('1.14', '1.0'), ('1.16', '1.1')],
'pci_requests': [('1.16', '1.1')],
'tags': [('1.17', '1.0')],
'flavor': [('1.18', '1.1')],
'old_flavor': [('1.18', '1.1')],
'new_flavor': [('1.18', '1.1')],
'vcpu_model': [('1.19', '1.0')],
'ec2_ids': [('1.20', '1.0')],
}
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_reset_changes(self, fields=None):
super(Instance, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = super(Instance, cls)._obj_from_primitive(context, objver,
primitive)
self._reset_metadata_tracking()
return self
def obj_make_compatible(self, primitive, target_version):
super(Instance, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
'user_data', 'availability_zone',
'display_name', 'display_description',
'launched_on', 'locked_by', 'os_type',
'architecture', 'vm_mode', 'root_device_name',
'default_ephemeral_device',
'default_swap_device', 'config_drive',
'cell_name']
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we couldn't handle unicode in
# string fields, so squash it here
for field in [x for x in unicode_attributes if x in primitive
and primitive[x] is not None]:
primitive[field] = primitive[field].encode('ascii', 'replace')
if target_version < (1, 18):
if 'system_metadata' in primitive:
for ftype in ('', 'old_', 'new_'):
attrname = '%sflavor' % ftype
primitive.pop(attrname, None)
if self[attrname] is not None:
flavors.save_flavor_info(
primitive['system_metadata'],
getattr(self, attrname), ftype)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@staticmethod
def _migrate_flavor(instance):
"""Migrate a fractional flavor to a full one stored in extra.
This method migrates flavor information stored in an instance's
system_metadata to instance_extra. Since the information in the
former is not complete, we must attempt to fetch the original
flavor by id to merge its extra_specs with what we store.
This is a transitional tool and can be removed in a later release
once we can ensure that everyone has migrated their instances
(likely the L release).
"""
# NOTE(danms): Always use admin context and read_deleted=yes here
# because we need to make sure we can look up our original flavor
# and try to reconstruct extra_specs, even if it has been deleted
ctxt = context.get_admin_context(read_deleted='yes')
instance.flavor = flavors.extract_flavor(instance)
flavors.delete_flavor_info(instance.system_metadata, '')
for ftype in ('old', 'new'):
attrname = '%s_flavor' % ftype
prefix = '%s_' % ftype
try:
flavor = flavors.extract_flavor(instance, prefix)
setattr(instance, attrname, flavor)
flavors.delete_flavor_info(instance.system_metadata, prefix)
except KeyError:
setattr(instance, attrname, None)
# NOTE(danms): Merge in the extra_specs from the original flavor
# since they weren't stored with the instance.
for flv in (instance.flavor, instance.new_flavor, instance.old_flavor):
if flv is not None:
try:
db_flavor = objects.Flavor.get_by_flavor_id(ctxt,
flv.flavorid)
except exception.FlavorNotFound:
continue
extra_specs = dict(db_flavor.extra_specs)
extra_specs.update(flv.get('extra_specs', {}))
flv.extra_specs = extra_specs
def _flavor_from_db(self, db_flavor):
"""Load instance flavor information from instance_extra."""
flavor_info = jsonutils.loads(db_flavor)
self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
if flavor_info['old']:
self.old_flavor = objects.Flavor.obj_from_primitive(
flavor_info['old'])
else:
self.old_flavor = None
if flavor_info['new']:
self.new_flavor = objects.Flavor.obj_from_primitive(
flavor_info['new'])
else:
self.new_flavor = None
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _maybe_migrate_flavor(self, db_inst, expected_attrs):
"""Determine the proper place and format for flavor loading.
This method loads the flavor information into the instance. If
the information is already migrated to instance_extra, then we
load that. If it is in system_metadata, we migrate it to extra.
If, however, we're loading an instance for an older client and
the flavor has already been migrated, we need to stash it back
into system metadata, which we do here.
This is transitional and can be removed when we remove
_migrate_flavor().
"""
version = utils.convert_version_to_tuple(self.VERSION)
flavor_requested = any(
[flavor in expected_attrs
for flavor in ('flavor', 'old_flavor', 'new_flavor')])
flavor_implied = (version < (1, 18) and
'system_metadata' in expected_attrs)
# NOTE(danms): This is compatibility logic. If the flavor
# attributes were requested, then we do this load/migrate
# logic. However, if the instance is old, we might need to
# do it anyway in order to satisfy our sysmeta-based contract.
if not (flavor_requested or flavor_implied):
return False
migrated_flavor = False
if flavor_implied:
# This instance is from before flavors were migrated out of
# system_metadata. Make sure that we honor that.
if db_inst['extra']['flavor'] is not None:
self._flavor_from_db(db_inst['extra']['flavor'])
sysmeta = self.system_metadata
flavors.save_flavor_info(sysmeta, self.flavor)
del self.flavor
if self.old_flavor:
flavors.save_flavor_info(sysmeta, self.old_flavor, 'old_')
del self.old_flavor
if self.new_flavor:
flavors.save_flavor_info(sysmeta, self.new_flavor, 'new_')
del self.new_flavor
self.system_metadata = sysmeta
else:
# Migrate the flavor from system_metadata to extra,
# if needed
instance_extra = db_inst.get('extra') or {}
if instance_extra.get('flavor') is not None:
self._flavor_from_db(db_inst['extra']['flavor'])
elif 'instance_type_id' in self.system_metadata:
self._migrate_flavor(self)
migrated_flavor = True
return migrated_flavor
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
instance._context = context
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'numa_topology' in expected_attrs:
instance._load_numa_topology(
db_inst.get('extra').get('numa_topology'))
if 'pci_requests' in expected_attrs:
instance._load_pci_requests(
db_inst.get('extra').get('pci_requests'))
if 'vcpu_model' in expected_attrs:
instance._load_vcpu_model(db_inst.get('extra').get('vcpu_model'))
if 'ec2_ids' in expected_attrs:
instance._load_ec2_ids()
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = objects.InstanceInfoCache(context)
if instance.info_cache is not None:
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
migrated_flavor = instance._maybe_migrate_flavor(db_inst,
expected_attrs)
# TODO(danms): If we are updating these on a backlevel instance,
# we'll end up sending back new versions of these objects (see
# above note for new info_caches
if 'pci_devices' in expected_attrs:
pci_devices = base.obj_make_list(
context, objects.PciDeviceList(context),
objects.PciDevice, db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
objects.SecurityGroup, db_inst['security_groups'])
instance['security_groups'] = sec_groups
if 'tags' in expected_attrs:
tags = base.obj_make_list(
context, objects.TagList(context),
objects.Tag, db_inst['tags'])
instance['tags'] = tags
instance.obj_reset_changes()
if migrated_flavor:
# NOTE(danms): If we migrated the flavor above, we need to make
# sure we know that flavor and system_metadata have been
# touched so that the next save will update them. We can remove
# this when we remove _migrate_flavor().
instance._changed_fields.add('system_metadata')
instance._changed_fields.add('flavor')
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
updates['extra'] = {}
numa_topology = updates.pop('numa_topology', None)
if numa_topology:
expected_attrs.append('numa_topology')
updates['extra']['numa_topology'] = numa_topology._to_json()
pci_requests = updates.pop('pci_requests', None)
if pci_requests:
expected_attrs.append('pci_requests')
updates['extra']['pci_requests'] = (
pci_requests.to_json())
flavor = updates.pop('flavor', None)
if flavor:
expected_attrs.append('flavor')
old = ((self.obj_attr_is_set('old_flavor') and
self.old_flavor) and
self.old_flavor.obj_to_primitive() or None)
new = ((self.obj_attr_is_set('new_flavor') and
self.new_flavor) and
self.new_flavor.obj_to_primitive() or None)
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': old,
'new': new,
}
updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
vcpu_model = updates.pop('vcpu_model', None)
if vcpu_model:
expected_attrs.append('vcpu_model')
updates['extra']['vcpu_model'] = (
jsonutils.dumps(vcpu_model.obj_to_primitive()))
db_inst = db.instance_create(self._context, updates)
self._from_db_object(self._context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db_inst = db.instance_destroy(self._context, self.uuid,
constraint=constraint)
self._from_db_object(self._context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
if self.info_cache:
with self.info_cache.obj_alternate_context(context):
self.info_cache.save()
def _save_security_groups(self, context):
security_groups = self.security_groups or []
for secgroup in security_groups:
with secgroup.obj_alternate_context(context):
secgroup.save()
self.security_groups.obj_reset_changes()
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_numa_topology(self, context):
if self.numa_topology:
self.numa_topology.instance_uuid = self.uuid
with self.numa_topology.obj_alternate_context(context):
self.numa_topology._save()
else:
objects.InstanceNUMATopology.delete_by_instance_uuid(
context, self.uuid)
def _save_pci_requests(self, context):
# NOTE(danms): No need for this yet.
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
def _save_flavor(self, context):
# FIXME(danms): We can do this smarterly by updating this
# with all the other extra things at the same time
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': (self.old_flavor and
self.old_flavor.obj_to_primitive() or None),
'new': (self.new_flavor and
self.new_flavor.obj_to_primitive() or None),
}
db.instance_extra_update_by_uuid(
context, self.uuid,
{'flavor': jsonutils.dumps(flavor_info)})
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _save_old_flavor(self, context):
if 'old_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_new_flavor(self, context):
if 'new_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_vcpu_model(self, context):
# TODO(yjiang5): should merge the db accesses for all the extra
# fields
if 'vcpu_model' in self.obj_what_changed():
if self.vcpu_model:
update = jsonutils.dumps(self.vcpu_model.obj_to_primitive())
else:
update = None
db.instance_extra_update_by_uuid(
context, self.uuid,
{'vcpu_model': update})
def _save_ec2_ids(self, context):
# NOTE(hanlind): Read-only so no need to save this.
pass
def _maybe_upgrade_flavor(self):
# NOTE(danms): We may have regressed to flavors stored in sysmeta,
# so we have to merge back in here. That could happen if we pass
# a converted instance to an older node, which still stores the
# flavor in sysmeta, which then calls save(). We need to not
# store that flavor info back into sysmeta after we've already
# converted it.
if (not self.obj_attr_is_set('system_metadata') or
'instance_type_id' not in self.system_metadata):
return
LOG.debug('Transforming legacy flavors on save', instance=self)
for ftype in ('', 'old_', 'new_'):
attr = '%sflavor' % ftype
try:
flavor = flavors.extract_flavor(self, prefix=ftype)
flavors.delete_flavor_info(self.system_metadata, ftype)
# NOTE(danms): This may trigger a lazy-load of the flavor
# information, but only once and it avoids re-fetching and
# re-migrating the original flavor.
getattr(self, attr).update(flavor)
except AttributeError:
setattr(self, attr, flavor)
except KeyError:
setattr(self, attr, None)
@base.remotable
def save(self, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param:context: Security context
:param:expected_task_state: Optional tuple of valid task states
for the instance to be in
:param:expected_vm_state: Optional tuple of valid vm states
for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state
"""
# Store this on the class because _cell_name_blocks_sync is useless
# after the db update call below.
self._sync_cells = not self._cell_name_blocks_sync()
context = self._context
cell_type = cells_opts.get_cell_type()
if cell_type == 'api' and self.cell_name:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
def _handle_cell_update_from_api():
if self._sync_cells:
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
else:
stale_instance = None
self._maybe_upgrade_flavor()
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
# NOTE(danms): For object fields, we construct and call a
# helper method like self._save_$attrname()
if (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ObjectField)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_LE('No save handler for %s'), field,
instance=self)
elif field in changes:
if (field == 'cell_name' and self[field] is not None and
self[field].startswith(cells_utils.BLOCK_SYNC_FLAG)):
updates[field] = self[field].replace(
cells_utils.BLOCK_SYNC_FLAG, '', 1)
else:
updates[field] = self[field]
if not updates:
if stale_instance:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
if (self.VERSION == '1.9' and
expected_task_state == 'image_snapshot'):
# NOTE(danms): Icehouse introduced a pending state which
# Havana doesn't know about. If we're an old instance,
# tolerate the pending state as well
expected_task_state = [
expected_task_state, 'image_snapshot_pending']
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
if 'pci_devices' in expected_attrs:
# NOTE(danms): We don't refresh pci_devices on save right now
expected_attrs.remove('pci_devices')
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
# NOTE(danms): If we have sysmeta, we need flavor since the caller
# might be expecting flavor information as a result
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
expected_attrs.append('flavor')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates, update_cells=False,
columns_to_join=_expected_cols(expected_attrs))
self._from_db_object(context, self, inst_ref,
expected_attrs=expected_attrs)
# NOTE(danms): We have to be super careful here not to trigger
# any lazy-loads that will unmigrate or unbackport something. So,
# make a copy of the instance for notifications first.
new_ref = self.obj_clone()
if stale_instance:
_handle_cell_update_from_api()
elif cell_type == 'compute':
if self._sync_cells:
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context,
base.obj_to_primitive(new_ref))
notifications.send_update(context, old_ref, new_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if self.obj_attr_is_set(field):
if field == 'info_cache':
self.info_cache.refresh()
elif self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _load_generic(self, attrname):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def _load_fault(self):
self.fault = objects.InstanceFault.get_latest_for_instance(
self._context, self.uuid)
def _load_numa_topology(self, db_topology=None):
if db_topology is not None:
self.numa_topology = \
objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
db_topology)
else:
try:
self.numa_topology = \
objects.InstanceNUMATopology.get_by_instance_uuid(
self._context, self.uuid)
except exception.NumaTopologyNotFound:
self.numa_topology = None
def _load_pci_requests(self, db_requests=None):
# FIXME: also do this if none!
if db_requests is not None:
self.pci_requests = objects.InstancePCIRequests.obj_from_db(
self._context, self.uuid, db_requests)
else:
self.pci_requests = \
objects.InstancePCIRequests.get_by_instance_uuid(
self._context, self.uuid)
def _load_flavor(self):
try:
instance = self.__class__.get_by_uuid(
self._context, uuid=self.uuid,
expected_attrs=['flavor', 'system_metadata'])
except exception.InstanceNotFound:
# NOTE(danms): Before we had instance types in system_metadata,
# we just looked up the instance_type_id. Since we could still
# have an instance in the database that doesn't have either
# newer setup, mirror the original behavior here if the instance
# is deleted
if not self.deleted:
raise
self.flavor = objects.Flavor.get_by_id(self._context,
self.instance_type_id)
self.old_flavor = None
self.new_flavor = None
return
# NOTE(danms): Orphan the instance to make sure we don't lazy-load
# anything below
instance._context = None
self.flavor = instance.flavor
self.old_flavor = instance.old_flavor
self.new_flavor = instance.new_flavor
# NOTE(danms): The query above may have migrated the flavor from
# system_metadata. Since we have it anyway, go ahead and refresh
# our system_metadata from it so that a save will be accurate.
instance.system_metadata.update(self.get('system_metadata', {}))
self.system_metadata = instance.system_metadata
def _load_vcpu_model(self, db_vcpu_model=None):
if db_vcpu_model is None:
self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self._context, self.uuid)
else:
db_vcpu_model = jsonutils.loads(db_vcpu_model)
self.vcpu_model = objects.VirtCPUModel.obj_from_primitive(
db_vcpu_model)
def _load_ec2_ids(self):
self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if ('flavor' in attrname and
self.obj_attr_is_set('system_metadata') and
'instance_type_id' in self.system_metadata):
# NOTE(danms): Looks like we're loading a flavor, and that
# should be doable without a context, so do this before the
# orphan check below.
self._migrate_flavor(self)
if self.obj_attr_is_set(attrname):
return
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'fault':
self._load_fault()
elif attrname == 'numa_topology':
self._load_numa_topology()
elif attrname == 'pci_requests':
self._load_pci_requests()
elif attrname == 'vcpu_model':
self._load_vcpu_model()
elif attrname == 'ec2_ids':
self._load_ec2_ids()
elif 'flavor' in attrname:
self._load_flavor()
else:
# FIXME(comstud): This should be optimized to only load the attr.
self._load_generic(attrname)
self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
try:
return getattr(self, attr)
except exception.FlavorNotFound:
# NOTE(danms): This only happens in the case where we don't
# have flavor information in sysmeta or extra, and doing
# this triggers a lookup based on our instance_type_id for
# (very) legacy instances. That legacy code expects a None here,
# so emulate it for this helper, even though the actual attribute
# is not nullable.
return None
def set_flavor(self, flavor, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
setattr(self, attr, flavor)
self.save()
def delete_flavor(self, namespace):
prefix = ('%s_' % namespace) if namespace else ''
attr = '%sflavor' % prefix
setattr(self, attr, None)
self.save()
@base.remotable
def delete_metadata_key(self, key):
"""Optimized metadata delete method.
This provides a more efficient way to delete a single metadata
key, instead of just calling instance.save(). This should be called
with the key still present in self.metadata, which it will update
after completion.
"""
db.instance_metadata_delete(self._context, self.uuid, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
notifications.send_update(self._context, self, self)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
def _cell_name_blocks_sync(self):
if (self.obj_attr_is_set('cell_name') and
self.cell_name is not None and
self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG)):
return True
return False
def _normalize_cell_name(self):
"""Undo skip_cell_sync()'s cell_name modification if applied"""
if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
return
cn_changed = 'cell_name' in self.obj_what_changed()
if self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG):
self.cell_name = self.cell_name.replace(
cells_utils.BLOCK_SYNC_FLAG, '', 1)
# cell_name is not normally an empty string, this means it was None
# or unset before cells_utils.BLOCK_SYNC_FLAG was applied.
if len(self.cell_name) == 0:
self.cell_name = None
if not cn_changed:
self.obj_reset_changes(['cell_name'])
@contextlib.contextmanager
def skip_cells_sync(self):
"""Context manager to save an instance without syncing cells.
Temporarily disables the cells syncing logic, if enabled. This should
only be used when saving an instance that has been passed down/up from
another cell in order to avoid passing it back to the originator to be
re-saved.
"""
cn_changed = 'cell_name' in self.obj_what_changed()
if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
self.cell_name = ''
self.cell_name = '%s%s' % (cells_utils.BLOCK_SYNC_FLAG, self.cell_name)
if not cn_changed:
self.obj_reset_changes(['cell_name'])
try:
yield
finally:
self._normalize_cell_name()
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = objects.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = objects.Instance._from_db_object(
context, objects.Instance(context), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added use_slave to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
# Version 1.3: Added use_slave to get_by_filters
# Version 1.4: Instance <= version 1.12
# Version 1.5: Added method get_active_by_window_joined.
# Version 1.6: Instance <= version 1.13
# Version 1.7: Added use_slave to get_active_by_window_joined
# Version 1.8: Instance <= version 1.14
# Version 1.9: Instance <= version 1.15
# Version 1.10: Instance <= version 1.16
# Version 1.11: Added sort_keys and sort_dirs to get_by_filters
# Version 1.12: Pass expected_attrs to instance_get_active_by_window_joined
# Version 1.13: Instance <= version 1.17
# Version 1.14: Instance <= version 1.18
# Version 1.15: Instance <= version 1.19
# Version 1.16: Added get_all() method
# Version 1.17: Instance <= version 1.20
VERSION = '1.17'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
child_versions = {
'1.1': '1.9',
# NOTE(danms): Instance was at 1.9 before we added this
'1.2': '1.11',
'1.3': '1.11',
'1.4': '1.12',
'1.5': '1.12',
'1.6': '1.13',
'1.7': '1.13',
'1.8': '1.14',
'1.9': '1.15',
'1.10': '1.16',
'1.11': '1.16',
'1.12': '1.16',
'1.13': '1.17',
'1.14': '1.18',
'1.15': '1.19',
'1.16': '1.19',
'1.17': '1.20',
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
if sort_keys or sort_dirs:
db_inst_list = db.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit,
marker=marker, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_all(cls, context, expected_attrs=None):
"""Returns all instances on all nodes."""
db_instances = db.instance_get_all(
context, columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_instances,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
db_inst_list = db.instance_get_active_by_window_joined(
context, begin, end, project_id, host,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
"""Get instances and joins active during a certain time window.
:param:context: nova request context
:param:begin: datetime for the start of the time window
:param:end: datetime for the end of the time window
:param:project_id: used to filter instances by project
:param:host: used to filter instances on a given compute host
:param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
:param use_slave if True, ship this query off to a DB slave
:returns: InstanceList
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
begin = timeutils.isotime(begin)
end = timeutils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs,
use_slave=use_slave)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = objects.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
| bgxavier/nova | nova/objects/instance.py | Python | apache-2.0 | 57,332 | 0.000157 |
from datetime import timedelta
import markus
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from normandy.recipes.models import Recipe
from normandy.recipes.exports import RemoteSettings
metrics = markus.get_metrics("normandy.signing.recipes")
class Command(BaseCommand):
"""
Update signatures for enabled Recipes that have no signature or an old signature
"""
help = "Update Recipe signatures"
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
"-f", "--force", action="store_true", help="Update signatures for all recipes"
)
def handle(self, *args, force=False, **options):
remote_settings = RemoteSettings()
if force:
recipes_to_update = Recipe.objects.only_enabled()
else:
recipes_to_update = self.get_outdated_recipes()
count = recipes_to_update.count()
if count == 0:
self.stdout.write("No out of date recipes to sign")
else:
self.stdout.write(f"Signing {count} recipes:")
for recipe in recipes_to_update:
self.stdout.write(" * " + recipe.approved_revision.name)
recipe.update_signature()
recipe.save()
remote_settings.publish(recipe, approve_changes=False)
# Approve all Remote Settings changes.
remote_settings.approve_changes()
metrics.gauge("signed", count, tags=["force"] if force else [])
recipes_to_unsign = Recipe.objects.only_disabled().exclude(signature=None)
count = recipes_to_unsign.count()
if count == 0:
self.stdout.write("No disabled recipes to unsign")
else:
self.stdout.write(f"Unsigning {count} disabled recipes:")
for recipe in recipes_to_unsign:
self.stdout.write(" * " + recipe.approved_revision.name)
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
metrics.gauge("unsigned", count, tags=["force"] if force else [])
self.stdout.write("all signing done")
def get_outdated_recipes(self):
outdated_age = timedelta(seconds=settings.AUTOGRAPH_SIGNATURE_MAX_AGE)
outdated_filter = Q(signature__timestamp__lt=timezone.now() - outdated_age)
missing_filter = Q(signature=None)
return Recipe.objects.only_enabled().filter(outdated_filter | missing_filter)
| mozilla/normandy | normandy/recipes/management/commands/update_recipe_signatures.py | Python | mpl-2.0 | 2,612 | 0.001914 |
import platform
def is_windows():
"""Returns true if current platform is windows"""
return any(platform.win32_ver()) | huvermann/MyPiHomeAutomation | HomeAutomation/thingUtils.py | Python | mit | 125 | 0.016 |
import threading
import Queue
import statvent
jobs = Queue.Queue()
done = Queue.Queue()
def do_inc():
while True:
job = jobs.get()
if job is None:
done.put(None)
break
statvent.incr('thread.test')
def test_10k_iterations_in_N_threads_results_in_10k_incrs():
n = 25
threads = []
for i in xrange(n):
t = threading.Thread(target=do_inc)
t.start()
threads.append(t)
for i in xrange(5000):
jobs.put(i)
for i in xrange(n):
jobs.put(None)
for i in xrange(n):
done.get()
actual = statvent.get_all()['thread.test']
assert actual == 5000, actual
| dowski/statvent | tests/test_thread_safety.py | Python | bsd-2-clause | 677 | 0.004431 |
import logging
import colorlog
from logging.config import fileConfig
import json
class mylogger():
def __init__(self, sdict, logfn):
fileConfig('./logging_config.ini', defaults={'logfilename': logfn})
self.logger = logging.getLogger()
self.sdict = sdict
#save or open from json file
def __call__(self, type, key, value, msg):
if key:
self.sdict[key] = value
if type==1:
self.logger.info(msg)
elif type==2:
self.logger.warning(msg)
elif type==3:
self.logger.error(msg)
elif type==4:
self.logger.exception(msg)
| balbinot/arghphot | arghphot/logutil.py | Python | mit | 652 | 0.010736 |
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
if not prices:
return 0
n = len(prices)
m1 = [0] * n
m2 = [0] * n
max_profit1 = 0
min_price1 = prices[0]
max_profit2 = 0
max_price2 = prices[-1]
for i in range(n):
max_profit1 = max(max_profit1, prices[i] - min_price1)
m1[i] = max_profit1
min_price1 = min(min_price1, prices[i])
for i in range(n):
max_profit2 = max(max_profit2, max_price2 - prices[n - 1 - i])
m2[n - 1 - i] = max_profit2
max_price2 = max(max_price2, prices[n - 1 - i])
max_profit = 0
for i in range(n):
max_profit = max(m1[i] + m2[i], max_profit)
return max_profit
| JiaminXuan/leetcode-python | best_time_to_buy_and_sell_stock_iii/solution.py | Python | bsd-2-clause | 846 | 0 |
import sys
import argparse
import numpy as np
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description="Process results from LOLA")
parser.add_argument("--lolaResultsFileNameListFileName", required=True, help="List of file names with LOLA results")
parser.add_argument("--lolaHeadersFileName", required=True, help="Headers for database region lists used in LOLA")
parser.add_argument("--lolaHeadersExcludeFileName", required=False, default=None, \
help="Headers for database region lists used in LOLA that should not be included, should be subset of lolaHeadersFileName")
parser.add_argument("--fileNamePartsInHeader", action='append', type=int, required=False, default=[1], \
help="Parts of the file name that are in the lola headers, where parts are separated by .'s")
parser.add_argument("--outputFileName", required=True, help="Name where p-values from LOLA will be recorded")
parser.add_argument("--singleFile", action='store_true', required=False, \
help="lolaResultsFileNameListFileName is the name of a file with LOLA results and not a file with a list of LOLA results file names")
parser.add_argument("--outputLog", action='store_true', required=False, \
help="Output the -log10 of the p-values and no headers")
options = parser.parse_args()
return options
def processLolaResults(options):
# Process results from LOLA
outputFile = open(options.outputFileName, 'w+')
if not options.outputLog:
# Include the headers
outputFile.write("TF")
lolaHeadersFile = open(options.lolaHeadersFileName)
lolaHeaders = [line.strip() for line in lolaHeadersFile]
lolaHeadersFile.close()
lolaHeadersExclude = []
if options.lolaHeadersExcludeFileName != None:
# There are headers that should be excluded
lolaHeadersExcludeFile = open(options.lolaHeadersExcludeFileName)
lolaHeadersExclude = [line.strip() for line in lolaHeadersExcludeFile]
lolaHeadersExcludeFile.close()
if not options.outputLog:
# Include the headers
for lh in lolaHeaders:
# Iterate through the headers and record each
if lh not in lolaHeadersExclude:
# The current header should be included
outputFile.write("\t" + lh)
outputFile.write("\n")
lolaResultsFileNameList = []
if options.singleFile:
# The inputted file name is a file with LOLA results
lolaResultsFileNameList = [options.lolaResultsFileNameListFileName]
else:
# The inputted file name is a list of files with LOLA results
lolaResultsFileNameListFile = open(options.lolaResultsFileNameListFileName)
lolaResultsFileNameList = [line.strip() for line in lolaResultsFileNameListFile]
lolaResultsFileNameListFile.close()
numTests = (len(lolaHeaders) - len(lolaHeadersExclude)) * len(lolaResultsFileNameList)
for lolaResultsFileName in lolaResultsFileNameList:
# Iterate through the results files and record the p-value for each TF in each category
if not options.outputLog:
# Include the headers
TF = lolaResultsFileName.split("/")[-1].split(".")[0].split("_")[0]
outputFile.write(TF + "\t")
lolaResultsFile = open(lolaResultsFileName)
for line in lolaResultsFile:
# Iterate through the categories and record the Bonferroni-corrected p-value for each
lineElements = line.strip().split("\t")
currentHeaderElements = lineElements[20].split(".")
currentHeaderElementsFilt = [currentHeaderElements[fnp] for fnp in options.fileNamePartsInHeader]
currentHeader = ".".join(currentHeaderElementsFilt)
if currentHeader in lolaHeadersExclude:
# Skip the current category
continue
if not options.outputLog:
# Output the p-value
pVal = (10 ** (0 - float(lineElements[3]))) * numTests
pValStr = str(pVal)
if pVal > 1:
# Change the p-value string to be > 1
pValStr = "> 1"
outputFile.write(pValStr + "\t")
else:
# Output the -log10 of the p-value
pVal = float(lineElements[3]) - np.log10(numTests)
if pVal < 0:
# The p-value is > 1, so set its -log10 to 0
pVal = 0.0
if pVal > 250:
# The p-value is really small, so set its -log10 to 250
pVal = 250.0
outputFile.write(str(pVal) + "\t")
outputFile.write("\n")
outputFile.close()
if __name__ == "__main__":
options = parseArgument()
processLolaResults(options)
| imk1/IMKTFBindingCode | processLolaResults.py | Python | mit | 4,260 | 0.026761 |
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import shutil
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_monetaryunitds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_monetaryunitds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave monetaryunitds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing monetaryunitd/monetaryunit-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
#stop_nodes(self.nodes)
#wait_monetaryunitds()
#shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
| bankonme/MUE-Src | qa/rpc-tests/test_framework.py | Python | mit | 4,729 | 0.003383 |
"""
Django signals for the app.
"""
import logging
from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Response, UnitLesson
from .ct_util import get_middle_indexes
from core.common.mongo import c_milestone_orct
from core.common.utils import send_email, suspending_receiver
log = logging.getLogger(__name__)
@suspending_receiver(post_save, sender=Response)
def run_courselet_notif_flow(sender, instance, **kwargs):
# TODO: add check that Response has a text, as an obj can be created before a student submits
# TODO: exclude self eval submissions other than a response submission (e.g. "just guessing")
if (instance.kind == Response.ORCT_RESPONSE and not
(instance.unitLesson.kind == UnitLesson.RESOLVES or
instance.is_test or instance.is_preview or not instance.unitLesson.order)):
course = instance.course
course_id = course.id if course else None
instructors = course.get_users(role="prof")
lesson = instance.lesson
lesson_id = lesson.id if lesson else None
student = instance.author
student_id = student.id if student else None
unit_lesson = instance.unitLesson
unit_lesson_id = unit_lesson.id if unit_lesson else None # it's a thread
# Exclude instructors, e.g. the ones submitting in preview mode
for instructor in instructors:
if student_id == instructor.id:
return
# Define if it's a milestone question (either first, middle, or last)
milestone = None
questions = unit_lesson.unit.all_orct()
i = [_[0] for _ in questions.values_list('id')].index(unit_lesson_id)
if i == 0:
milestone = "first"
elif i == len(questions) - 1:
milestone = "last"
elif i in get_middle_indexes(questions):
milestone = "middle" # TODO consider returning a single number
# If milestone, store the record
if milestone:
to_save = {
"milestone": milestone,
"lesson_title": lesson.title if lesson else None,
"lesson_id": lesson_id,
"unit_lesson_id": unit_lesson_id,
"course_title": course.title if course else None,
"course_id": course_id,
"student_username": student.username if student else None,
"student_id": student_id,
# "datetime": datetime.datetime.now() # TODO: consider changing to UTC (and making it a timestamp)
}
# Do not store if such `student_id`-`lesson_id` row is already present
milestone_orct_answers_cursor = c_milestone_orct(use_secondary=False).find({
"milestone": milestone,
"lesson_id": lesson_id
})
initial_milestone_orct_answers_number = milestone_orct_answers_cursor.count()
milestone_orct_answers = (a for a in milestone_orct_answers_cursor)
already_exists = False
for answer in milestone_orct_answers:
if answer.get("student_id") == student_id:
already_exists = True
break
if not already_exists:
c_milestone_orct(use_secondary=False).save(to_save)
milestone_orct_answers_number = initial_milestone_orct_answers_number + 1
# If N students responded to a milestone question, send an email.
# The threshold holds for each milestone separately.
if milestone_orct_answers_number == settings.MILESTONE_ORCT_NUMBER:
context_data = {
"milestone": milestone,
"students_number": milestone_orct_answers_number,
"course_title": course.title if course else None,
"lesson_title": lesson.title if lesson else None,
"current_site": Site.objects.get_current(),
"course_id": course_id,
"unit_lesson_id": unit_lesson_id,
"courselet_pk": unit_lesson.unit.id if unit_lesson.unit else None
} # pragma: no cover
log.info("""Courselet notification with data:
Course title - {course_title},
Lesson title - {lesson_title},
Students number - {students_number},
Unit lesson id - {unit_lesson_id},
Course id - {course_id},
Milestone - {milestone}
""".format(**context_data)) # pragma: no cover
send_email(
context_data=context_data,
from_email=settings.EMAIL_FROM,
to_email=[instructor.email for instructor in instructors],
template_subject="ct/email/milestone_ortc_notify_subject",
template_text="ct/email/milestone_ortc_notify_text"
)
| cjlee112/socraticqs2 | mysite/ct/signals.py | Python | apache-2.0 | 5,199 | 0.002693 |
from abc import ABCMeta
from recommenders.similarity.weights_similarity_matrix_builder import \
WeightsSimilarityMatrixBuilder
from tripadvisor.fourcity import extractor
from recommenders.base_recommender import BaseRecommender
from utils import dictionary_utils
__author__ = 'fpena'
class MultiCriteriaBaseRecommender(BaseRecommender):
__metaclass__ = ABCMeta
def __init__(
self, name, similarity_metric=None,
significant_criteria_ranges=None):
super(MultiCriteriaBaseRecommender, self).__init__(name, None)
self._significant_criteria_ranges = significant_criteria_ranges
self._similarity_matrix_builder = WeightsSimilarityMatrixBuilder(similarity_metric)
self.user_cluster_dictionary = None
def load(self, reviews):
self.reviews = reviews
self.user_ids = extractor.get_groupby_list(self.reviews, 'user_id')
self.user_dictionary =\
extractor.initialize_cluster_users(self.reviews, self._significant_criteria_ranges)
self.user_cluster_dictionary = self.build_user_clusters(
self.reviews, self._significant_criteria_ranges)
if self._similarity_matrix_builder._similarity_metric is not None:
self.user_similarity_matrix =\
self._similarity_matrix_builder.build_similarity_matrix(
self.user_dictionary, self.user_ids)
def clear(self):
super(MultiCriteriaBaseRecommender, self).clear()
self.user_cluster_dictionary = None
# TODO: Add the item_id as a parameter in order to optimize the method
def get_neighbourhood(self, user_id):
cluster_name = self.user_dictionary[user_id].cluster
cluster_users = list(self.user_cluster_dictionary[cluster_name])
cluster_users.remove(user_id)
# We remove the given user from the cluster in order to avoid bias
if self._num_neighbors is None:
return cluster_users
similarity_matrix = self.user_similarity_matrix[user_id].copy()
similarity_matrix.pop(user_id, None)
ordered_similar_users = dictionary_utils.sort_dictionary_keys(
similarity_matrix)
intersection_set = set.intersection(set(ordered_similar_users), set(cluster_users))
intersection_lst = [t for t in ordered_similar_users if t in intersection_set]
return intersection_lst # [:self._num_neighbors]
@staticmethod
def build_user_clusters(reviews, significant_criteria_ranges=None):
"""
Builds a series of clusters for users according to their significant
criteria. Users that have exactly the same significant criteria will belong
to the same cluster.
:param reviews: the list of reviews
:return: a dictionary where all the keys are the cluster names and the
values for those keys are list of users that belong to that cluster
"""
user_list = extractor.get_groupby_list(reviews, 'user_id')
user_cluster_dictionary = {}
for user in user_list:
weights = extractor.get_criteria_weights(reviews, user)
significant_criteria, cluster_name =\
extractor.get_significant_criteria(weights, significant_criteria_ranges)
if cluster_name in user_cluster_dictionary:
user_cluster_dictionary[cluster_name].append(user)
else:
user_cluster_dictionary[cluster_name] = [user]
return user_cluster_dictionary
| melqkiades/yelp | source/python/recommenders/multicriteria/multicriteria_base_recommender.py | Python | lgpl-2.1 | 3,524 | 0.001703 |
from django.apps import AppConfig
class MineralsConfig(AppConfig):
name = 'minerals'
| squadran2003/filtering-searching-mineral-catalogue | filtering-searching-mineral-catalogue/minerals/apps.py | Python | mit | 91 | 0 |
from pycukes import BeforeAll, AfterAll, BeforeEach, AfterEach
@BeforeAll
def add_message1_attr(context):
context.counter = 1
@BeforeEach
def add_message_attr(context):
context.counter += 1
setattr(context, 'message%d' % context.counter, 'msg')
@AfterEach
def increment_one(context):
context.counter += 1
@AfterAll
def show_hello_world(context):
print 'hello world'
| hltbra/pycukes | specs/console_examples/stories_with_hooks/support/env.py | Python | mit | 392 | 0.005102 |
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.filters
~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
from jinja2._compat import text_type, implements_to_string
env = Environment()
class FilterTestCase(JinjaTestCase):
def test_filter_calling(self):
rv = env.call_filter('sum', [1, 2, 3])
self.assert_equal(rv, 6)
def test_capitalize(self):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == 'Foo bar'
def test_center(self):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == ' foo '
def test_default(self):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given='yes') == 'no|False|no|yes'
def test_dictsort(self):
tmpl = env.from_string(
'{{ foo|dictsort }}|'
'{{ foo|dictsort(true) }}|'
'{{ foo|dictsort(false, "value") }}'
)
out = tmpl.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == ("[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]|"
"[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]|"
"[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]")
def test_batch(self):
tmpl = env.from_string("{{ foo|batch(3)|list }}|"
"{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]")
def test_slice(self):
tmpl = env.from_string('{{ foo|slice(3)|list }}|'
'{{ foo|slice(3, "X")|list }}')
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
def test_escape(self):
tmpl = env.from_string('''{{ '<">&'|escape }}''')
out = tmpl.render()
assert out == '<">&'
def test_striptags(self):
tmpl = env.from_string('''{{ foo|striptags }}''')
out = tmpl.render(foo=' <p>just a small \n <a href="#">'
'example</a> link</p>\n<p>to a webpage</p> '
'<!-- <p>and some commented stuff</p> -->')
assert out == 'just a small example link to a webpage'
def test_filesizeformat(self):
tmpl = env.from_string(
'{{ 100|filesizeformat }}|'
'{{ 1000|filesizeformat }}|'
'{{ 1000000|filesizeformat }}|'
'{{ 1000000000|filesizeformat }}|'
'{{ 1000000000000|filesizeformat }}|'
'{{ 100|filesizeformat(true) }}|'
'{{ 1000|filesizeformat(true) }}|'
'{{ 1000000|filesizeformat(true) }}|'
'{{ 1000000000|filesizeformat(true) }}|'
'{{ 1000000000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|'
'1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB'
))
def test_filesizeformat_issue59(self):
tmpl = env.from_string(
'{{ 300|filesizeformat }}|'
'{{ 3000|filesizeformat }}|'
'{{ 3000000|filesizeformat }}|'
'{{ 3000000000|filesizeformat }}|'
'{{ 3000000000000|filesizeformat }}|'
'{{ 300|filesizeformat(true) }}|'
'{{ 3000|filesizeformat(true) }}|'
'{{ 3000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|'
'2.9 KiB|2.9 MiB'
))
def test_first(self):
tmpl = env.from_string('{{ foo|first }}')
out = tmpl.render(foo=list(range(10)))
assert out == '0'
def test_float(self):
tmpl = env.from_string('{{ "42"|float }}|'
'{{ "ajsghasjgd"|float }}|'
'{{ "32.32"|float }}')
out = tmpl.render()
assert out == '42.0|0.0|32.32'
def test_format(self):
tmpl = env.from_string('''{{ "%s|%s"|format("a", "b") }}''')
out = tmpl.render()
assert out == 'a|b'
def test_indent(self):
tmpl = env.from_string('{{ foo|indent(2) }}|{{ foo|indent(2, true) }}')
text = '\n'.join([' '.join(['foo', 'bar'] * 2)] * 2)
out = tmpl.render(foo=text)
assert out == ('foo bar foo bar\n foo bar foo bar| '
'foo bar foo bar\n foo bar foo bar')
def test_int(self):
tmpl = env.from_string('{{ "42"|int }}|{{ "ajsghasjgd"|int }}|'
'{{ "32.32"|int }}')
out = tmpl.render()
assert out == '42|0|32'
def test_join(self):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == '1|2|3'
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == '<foo><span>foo</span>'
def test_join_attribute(self):
class User(object):
def __init__(self, username):
self.username = username
tmpl = env.from_string('''{{ users|join(', ', 'username') }}''')
assert tmpl.render(users=map(User, ['foo', 'bar'])) == 'foo, bar'
def test_last(self):
tmpl = env.from_string('''{{ foo|last }}''')
out = tmpl.render(foo=list(range(10)))
assert out == '9'
def test_length(self):
tmpl = env.from_string('''{{ "hello world"|length }}''')
out = tmpl.render()
assert out == '11'
def test_lower(self):
tmpl = env.from_string('''{{ "FOO"|lower }}''')
out = tmpl.render()
assert out == 'foo'
def test_pprint(self):
from pprint import pformat
tmpl = env.from_string('''{{ data|pprint }}''')
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self):
tmpl = env.from_string('''{{ seq|random }}''')
seq = list(range(100))
for _ in range(10):
assert int(tmpl.render(seq=seq)) in seq
def test_reverse(self):
tmpl = env.from_string('{{ "foobar"|reverse|join }}|'
'{{ [1, 2, 3]|reverse|list }}')
assert tmpl.render() == 'raboof|[3, 2, 1]'
def test_string(self):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string('''{{ obj|string }}''')
assert tmpl.render(obj=x) == text_type(x)
def test_title(self):
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "foo's bar"|title }}''')
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "f bar f"|title }}''')
assert tmpl.render() == "F Bar F"
tmpl = env.from_string('''{{ "foo-bar"|title }}''')
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string('''{{ "foo\tbar"|title }}''')
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string('''{{ "FOO\tBAR"|title }}''')
assert tmpl.render() == "Foo\tBar"
class Foo:
def __str__(self):
return 'foo-bar'
tmpl = env.from_string('''{{ data|title }}''')
out = tmpl.render(data=Foo())
assert out == 'Foo-Bar'
def test_truncate(self):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
'{{ smalldata|truncate(15) }}'
)
out = tmpl.render(data='foobar baz bar' * 1000,
smalldata='foobar baz bar')
assert out == 'foobar baz barf>>>|foobar baz >>>|foobar baz bar'
def test_upper(self):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == 'FOO'
def test_urlize(self):
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == 'foo <a href="http://www.example.com/">'\
'http://www.example.com/</a> bar'
def test_wordcount(self):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == '3'
def test_block(self):
tmpl = env.from_string('{% filter lower|escape %}<HEHE>{% endfilter %}')
assert tmpl.render() == '<hehe>'
def test_chaining(self):
tmpl = env.from_string('''{{ ['<foo>', '<bar>']|first|upper|escape }}''')
assert tmpl.render() == '<FOO>'
def test_sum(self):
tmpl = env.from_string('''{{ [1, 2, 3, 4, 5, 6]|sum }}''')
assert tmpl.render() == '21'
def test_sum_attributes(self):
tmpl = env.from_string('''{{ values|sum('value') }}''')
assert tmpl.render(values=[
{'value': 23},
{'value': 1},
{'value': 18},
]) == '42'
def test_sum_attributes_nested(self):
tmpl = env.from_string('''{{ values|sum('real.value') }}''')
assert tmpl.render(values=[
{'real': {'value': 23}},
{'real': {'value': 1}},
{'real': {'value': 18}},
]) == '42'
def test_sum_attributes_tuple(self):
tmpl = env.from_string('''{{ values.items()|sum('1') }}''')
assert tmpl.render(values={
'foo': 23,
'bar': 1,
'baz': 18,
}) == '42'
def test_abs(self):
tmpl = env.from_string('''{{ -1|abs }}|{{ 1|abs }}''')
assert tmpl.render() == '1|1', tmpl.render()
def test_round_positive(self):
tmpl = env.from_string('{{ 2.7|round }}|{{ 2.1|round }}|'
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}")
assert tmpl.render() == '3.0|2.0|2.123|3.0', tmpl.render()
def test_round_negative(self):
tmpl = env.from_string('{{ 21.3|round(-1)}}|'
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}")
assert tmpl.render() == '20.0|30.0|20.0',tmpl.render()
def test_xmlattr(self):
tmpl = env.from_string("{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}")
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self):
tmpl = env.from_string('{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_sort2(self):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == 'AbcD'
def test_sort3(self):
tmpl = env.from_string('''{{ ['foo', 'Bar', 'blah']|sort }}''')
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self):
@implements_to_string
class Magic(object):
def __init__(self, value):
self.value = value
def __str__(self):
return text_type(self.value)
tmpl = env.from_string('''{{ items|sort(attribute='value')|join }}''')
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == '1234'
def test_groupby(self):
tmpl = env.from_string('''
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render().split('|') == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
""
]
def test_groupby_tuple_index(self):
tmpl = env.from_string('''
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render() == 'a:1:2|b:1|'
def test_groupby_multidot(self):
class Date(object):
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
class Article(object):
def __init__(self, title, *date):
self.date = Date(*date)
self.title = title
articles = [
Article('aha', 1, 1, 1970),
Article('interesting', 2, 1, 1970),
Article('really?', 3, 1, 1970),
Article('totally not', 1, 1, 1971)
]
tmpl = env.from_string('''
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}''')
assert tmpl.render(articles=articles).split('|') == [
'1970[aha][interesting][really?]',
'1971[totally not]',
''
]
def test_filtertag(self):
tmpl = env.from_string("{% filter upper|replace('FOO', 'foo') %}"
"foobar{% endfilter %}")
assert tmpl.render() == 'fooBAR'
def test_replace(self):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string='<foo>') == '42foo>'
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup('foo')) == 'f>x<>x<'
def test_forceescape(self):
tmpl = env.from_string('{{ x|forceescape }}')
assert tmpl.render(x=Markup('<div />')) == u'<div />'
def test_safe(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == '<div>foo</div>'
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == '<div>foo</div>'
def test_urlencode(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "Hello, world!"|urlencode }}')
assert tmpl.render() == 'Hello%2C%20world%21'
tmpl = env.from_string('{{ o|urlencode }}')
assert tmpl.render(o=u"Hello, world\u203d") == "Hello%2C%20world%E2%80%BD"
assert tmpl.render(o=(("f", 1),)) == "f=1"
assert tmpl.render(o=(('f', 1), ("z", 2))) == "f=1&z=2"
assert tmpl.render(o=((u"\u203d", 1),)) == "%E2%80%BD=1"
assert tmpl.render(o={u"\u203d": 1}) == "%E2%80%BD=1"
assert tmpl.render(o={0: 1}) == "0=1"
def test_simple_map(self):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
self.assertEqual(tmpl.render(), '6')
def test_attribute_map(self):
class User(object):
def __init__(self, name):
self.name = name
env = Environment()
users = [
User('john'),
User('jane'),
User('mike'),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|jane|mike')
def test_empty_map(self):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
self.assertEqual(tmpl.render(), '[]')
def test_simple_select(self):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
self.assertEqual(tmpl.render(), '1|3|5')
def test_bool_select(self):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
self.assertEqual(tmpl.render(), '1|2|3|4|5')
def test_simple_reject(self):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
self.assertEqual(tmpl.render(), '2|4')
def test_bool_reject(self):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
self.assertEqual(tmpl.render(), 'None|False|0')
def test_simple_select_attr(self):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|selectattr("is_active")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|jane')
def test_simple_reject_attr(self):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|rejectattr("is_active")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'mike')
def test_func_select_attr(self):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|selectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|mike')
def test_func_reject_attr(self):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|rejectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'jane')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FilterTestCase))
return suite
| josephlewis42/magpie | magpie/lib/jinja2/testsuite/filters.py | Python | bsd-3-clause | 19,379 | 0.000929 |
# -*- coding: utf-8 -*-
"""
Bit Reading Request/Response messages
--------------------------------------
"""
import struct
from pymodbus3.pdu import ModbusRequest
from pymodbus3.pdu import ModbusResponse
from pymodbus3.pdu import ModbusExceptions
from pymodbus3.utilities import pack_bitstring, unpack_bitstring
class ReadBitsRequestBase(ModbusRequest):
""" Base class for Messages Requesting bit values """
_rtu_frame_size = 8
def __init__(self, address, count, **kwargs):
""" Initializes the read request data
:param address: The start address to read from
:param count: The number of bits after 'address' to read
"""
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
""" Encodes a request pdu
:returns: The encoded pdu
"""
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
""" Decodes a request pdu
:param data: The packet data to decode
"""
self.address, self.count = struct.unpack('>HH', data)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'ReadBitRequest({0},{1})'.format(self.address, self.count)
class ReadBitsResponseBase(ModbusResponse):
""" Base class for Messages responding to bit-reading values """
_rtu_byte_count_pos = 2
def __init__(self, values, **kwargs):
""" Initializes a new instance
:param values: The requested values to be returned
"""
self.byte_count = None
ModbusResponse.__init__(self, **kwargs)
self.bits = values or []
def encode(self):
""" Encodes response pdu
:returns: The encoded packet message
"""
result = pack_bitstring(self.bits)
packet = struct.pack('>B', len(result)) + result
return packet
def decode(self, data):
""" Decodes response pdu
:param data: The packet data to decode
"""
self.byte_count = data[0]
self.bits = unpack_bitstring(data[1:])
def set_bit(self, address, value=1):
""" Helper function to set the specified bit
:param address: The bit to set
:param value: The value to set the bit to
"""
self.bits[address] = (value != 0)
def reset_bit(self, address):
""" Helper function to set the specified bit to 0
:param address: The bit to reset
"""
self.set_bit(address, 0)
def get_bit(self, address):
""" Helper function to get the specified bit's value
:param address: The bit to query
:returns: The value of the requested bit
"""
return self.bits[address]
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'ReadBitResponse({0})'.format(len(self.bits))
class ReadCoilsRequest(ReadBitsRequestBase):
"""
This function code is used to read from 1 to 2000(0x7d0) contiguous status
of coils in a remote device. The Request PDU specifies the starting
address, ie the address of the first coil specified, and the number of
coils. In the PDU Coils are addressed starting at zero. Therefore coils
numbered 1-16 are addressed as 0-15.
"""
function_code = 1
def __init__(self, address=None, count=None, **kwargs):
""" Initializes a new instance
:param address: The address to start reading from
:param count: The number of bits to read
"""
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
""" Run a read coils request against a datastore
Before running the request, we make sure that the request is in
the max valid range (0x001-0x7d0). Next we make sure that the
request is valid against the current datastore.
:param context: The datastore to request from
:returns: The initializes response message, exception message otherwise
"""
if not (1 <= self.count <= 0x7d0):
return self.do_exception(ModbusExceptions.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.do_exception(ModbusExceptions.IllegalAddress)
values = context.get_values(
self.function_code, self.address, self.count
)
return ReadCoilsResponse(values)
class ReadCoilsResponse(ReadBitsResponseBase):
"""
The coils in the response message are packed as one coil per bit of
the data field. Status is indicated as 1= ON and 0= OFF. The LSB of the
first data byte contains the output addressed in the query. The other
coils follow toward the high order end of this byte, and from low order
to high order in subsequent bytes.
If the returned output quantity is not a multiple of eight, the
remaining bits in the final data byte will be padded with zeros
(toward the high order end of the byte). The Byte Count field specifies
the quantity of complete bytes of data.
"""
function_code = 1
def __init__(self, values=None, **kwargs):
""" Initializes a new instance
:param values: The request values to respond with
"""
ReadBitsResponseBase.__init__(self, values, **kwargs)
class ReadDiscreteInputsRequest(ReadBitsRequestBase):
"""
This function code is used to read from 1 to 2000(0x7d0) contiguous status
of discrete inputs in a remote device. The Request PDU specifies the
starting address, ie the address of the first input specified, and the
number of inputs. In the PDU Discrete Inputs are addressed starting at
zero. Therefore Discrete inputs numbered 1-16 are addressed as 0-15.
"""
function_code = 2
def __init__(self, address=None, count=None, **kwargs):
""" Initializes a new instance
:param address: The address to start reading from
:param count: The number of bits to read
"""
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
""" Run a read discrete input request against a datastore
Before running the request, we make sure that the request is in
the max valid range (0x001-0x7d0). Next we make sure that the
request is valid against the current datastore.
:param context: The datastore to request from
:returns: The initializes response message, exception message otherwise
"""
if not (1 <= self.count <= 0x7d0):
return self.do_exception(ModbusExceptions.IllegalValue)
if not context.validate(
self.function_code, self.address, self.count
):
return self.do_exception(ModbusExceptions.IllegalAddress)
values = context.get_values(
self.function_code, self.address, self.count
)
return ReadDiscreteInputsResponse(values)
class ReadDiscreteInputsResponse(ReadBitsResponseBase):
"""
The discrete inputs in the response message are packed as one input per
bit of the data field. Status is indicated as 1= ON; 0= OFF. The LSB of
the first data byte contains the input addressed in the query. The other
inputs follow toward the high order end of this byte, and from low order
to high order in subsequent bytes.
If the returned input quantity is not a multiple of eight, the
remaining bits in the final data byte will be padded with zeros
(toward the high order end of the byte). The Byte Count field specifies
the quantity of complete bytes of data.
"""
function_code = 2
def __init__(self, values=None, **kwargs):
""" Initializes a new instance
:param values: The request values to respond with
"""
ReadBitsResponseBase.__init__(self, values, **kwargs)
# Exported symbols
__all__ = [
'ReadCoilsRequest',
'ReadCoilsResponse',
'ReadDiscreteInputsRequest',
'ReadDiscreteInputsResponse',
]
| gregorschatz/pymodbus3 | pymodbus3/bit_read_message.py | Python | bsd-3-clause | 8,239 | 0 |
"""SCons.Debug
Code for debugging SCons internal things. Not everything here is
guaranteed to work all the way back to Python 1.5.2, and shouldn't be
needed by most users.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py 3897 2009/01/13 06:45:54 scons"
import os
import string
import sys
# Recipe 14.10 from the Python Cookbook.
try:
import weakref
except ImportError:
def logInstanceCreation(instance, name=None):
pass
else:
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if not tracked_classes.has_key(name):
tracked_classes[name] = []
tracked_classes[name].append(weakref.ref(instance))
tracked_classes = {}
def string_to_classes(s):
if s == '*':
c = tracked_classes.keys()
c.sort()
return c
else:
return string.split(s)
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return map(lambda cn: (cn, len(tracked_classes[cn])), classnames)
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = string.split(mstr)[22]
return int(mstr)
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack(*backlist):
import traceback
if not backlist:
backlist = [0]
result = []
for back in backlist:
tb = traceback.extract_stack(limit=3+back)
key = tb[0][:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
# trace a caller's stack
def caller_trace(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
l = []
for c,v in caller_dicts[key].items():
l.append((-v,c))
l.sort()
leader = ' '*level
for v,c in l:
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if caller_dicts.has_key(c):
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
keys = caller_bases.keys()
keys.sort()
for k in keys:
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
def platformize(t):
return (string.replace(t[0], '/', os.sep), t[1])
shorten_list = map(platformize, shorten_list)
del platformize
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = string.find(f, t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
def Trace(msg, file=None, mode='w'):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
if file is None:
file = TraceDefault
else:
TraceDefault = file
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
fp.write(msg)
fp.flush()
| kuiche/chromium | third_party/scons/scons-local/SCons/Debug.py | Python | bsd-3-clause | 6,593 | 0.00546 |
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2013 Guake authors
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
import inspect
import time
# You can put calls to p() everywhere in this page to inspect timing
# g_start = time.time()
# def p():
# print(time.time() - g_start, __file__, inspect.currentframe().f_back.f_lineno)
import logging
import os
import signal
import subprocess
import sys
import uuid
from locale import gettext as _
from optparse import OptionParser
log = logging.getLogger(__name__)
from guake.globals import NAME
from guake.globals import bindtextdomain
from guake.support import print_support
from guake.utils import restore_preferences
from guake.utils import save_preferences
# When we are in the document generation on readthedocs, we do not have paths.py generated
try:
from guake.paths import LOCALE_DIR
bindtextdomain(NAME, LOCALE_DIR)
except: # pylint: disable=bare-except
pass
def main():
"""Parses the command line parameters and decide if dbus methods
should be called or not. If there is already a guake instance
running it will be used and a True value will be returned,
otherwise, false will be returned.
"""
# Force to xterm-256 colors for compatibility with some old command line programs
os.environ["TERM"] = "xterm-256color"
# Force use X11 backend underwayland
os.environ["GDK_BACKEND"] = "x11"
# do not use version keywords here, pbr might be slow to find the version of Guake module
parser = OptionParser()
parser.add_option(
'-V',
'--version',
dest='version',
action='store_true',
default=False,
help=_('Show Guake version number and exit')
)
parser.add_option(
'-v',
'--verbose',
dest='verbose',
action='store_true',
default=False,
help=_('Enable verbose logging')
)
parser.add_option(
'-f',
'--fullscreen',
dest='fullscreen',
action='store_true',
default=False,
help=_('Put Guake in fullscreen mode')
)
parser.add_option(
'-t',
'--toggle-visibility',
dest='show_hide',
action='store_true',
default=False,
help=_('Toggles the visibility of the terminal window')
)
parser.add_option(
'--show',
dest="show",
action='store_true',
default=False,
help=_('Shows Guake main window')
)
parser.add_option(
'--hide',
dest='hide',
action='store_true',
default=False,
help=_('Hides Guake main window')
)
parser.add_option(
'-p',
'--preferences',
dest='show_preferences',
action='store_true',
default=False,
help=_('Shows Guake preference window')
)
parser.add_option(
'-a',
'--about',
dest='show_about',
action='store_true',
default=False,
help=_('Shows Guake\'s about info')
)
parser.add_option(
'-n',
'--new-tab',
dest='new_tab',
action='store',
default='',
help=_('Add a new tab (with current directory set to NEW_TAB)')
)
parser.add_option(
'-s',
'--select-tab',
dest='select_tab',
action='store',
default='',
help=_('Select a tab (SELECT_TAB is the index of the tab)')
)
parser.add_option(
'-g',
'--selected-tab',
dest='selected_tab',
action='store_true',
default=False,
help=_('Return the selected tab index.')
)
parser.add_option(
'-l',
'--selected-tablabel',
dest='selected_tablabel',
action='store_true',
default=False,
help=_('Return the selected tab label.')
)
parser.add_option(
'--split-vertical',
dest='split_vertical',
action='store_true',
default=False,
help=_('Split the selected tab vertically.')
)
parser.add_option(
'--split-horizontal',
dest='split_horizontal',
action='store_true',
default=False,
help=_('Split the selected tab horizontally.')
)
parser.add_option(
'-e',
'--execute-command',
dest='command',
action='store',
default='',
help=_('Execute an arbitrary command in the selected tab.')
)
parser.add_option(
'-i',
'--tab-index',
dest='tab_index',
action='store',
default='0',
help=_('Specify the tab to rename. Default is 0. Can be used to select tab by UUID.')
)
parser.add_option(
'--bgcolor',
dest='bgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) background color of '
'the selected tab.')
)
parser.add_option(
'--fgcolor',
dest='fgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) foreground color of the '
'selected tab.')
)
parser.add_option(
'--change-palette',
dest='palette_name',
action='store',
default='',
help=_('Change Guake palette scheme')
)
parser.add_option(
'--rename-tab',
dest='rename_tab',
metavar='TITLE',
action='store',
default='',
help=_(
'Rename the specified tab by --tab-index. Reset to default if TITLE is '
'a single dash "-".'
)
)
parser.add_option(
'-r',
'--rename-current-tab',
dest='rename_current_tab',
metavar='TITLE',
action='store',
default='',
help=_('Rename the current tab. Reset to default if TITLE is a '
'single dash "-".')
)
parser.add_option(
'-q',
'--quit',
dest='quit',
action='store_true',
default=False,
help=_('Says to Guake go away =(')
)
parser.add_option(
'-u',
'--no-startup-script',
dest='execute_startup_script',
action='store_false',
default=True,
help=_('Do not execute the start up script')
)
parser.add_option(
'--save-preferences',
dest='save_preferences',
action='store',
default=None,
help=_('Save Guake preferences to this filename')
)
parser.add_option(
'--restore-preferences',
dest='restore_preferences',
action='store',
default=None,
help=_('Restore Guake preferences from this file')
)
parser.add_option(
'--support',
dest='support',
action='store_true',
default=False,
help=_('Show support infomations')
)
# checking mandatory dependencies
missing_deps = False
try:
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
except ValueError:
print("[ERROR] missing mandatory dependency: GtK 3.0")
missing_deps = True
try:
gi.require_version('Vte', '2.91') # vte-0.42
except ValueError:
print("[ERROR] missing mandatory dependency: Vte >= 0.42")
missing_deps = True
try:
gi.require_version('Keybinder', '3.0')
except ValueError:
print("[ERROR] missing mandatory dependency: Keybinder 3")
missing_deps = True
try:
import cairo
except ImportError:
print("[ERROR] missing mandatory dependency: cairo")
missing_deps = True
if missing_deps:
print(
"[ERROR] missing at least one system dependencies. "
"You need to install additional packages for Guake to run"
)
print(
"[ERROR] On Debian/Ubuntu you need to install the following libraries:\n"
" sudo apt-get install -y --no-install-recommends \\\n"
" gir1.2-keybinder-3.0 \\\n"
" gir1.2-notify-0.7 \\\n"
" gir1.2-vte-2.91 \\\n"
" gir1.2-wnck-3.0 \\\n"
" libkeybinder-3.0-0 \\\n"
" libutempter0 \\\n"
" python3 \\\n"
" python3-cairo \\\n"
" python3-dbus \\\n"
" python3-gi \\\n"
" python3-pbr \\\n"
" python3-pip"
)
sys.exit(1)
options = parser.parse_args()[0]
if options.version:
from guake import gtk_version
from guake import guake_version
from guake import vte_version
from guake import vte_runtime_version
print('Guake Terminal: {}'.format(guake_version()))
print('VTE: {}'.format(vte_version()))
print('VTE runtime: {}'.format(vte_runtime_version()))
print('Gtk: {}'.format(gtk_version()))
sys.exit(0)
if options.save_preferences and options.restore_preferences:
parser.error('options --save-preferences and --restore-preferences are mutually exclusive')
if options.save_preferences:
save_preferences(options.save_preferences)
sys.exit(0)
elif options.restore_preferences:
restore_preferences(options.restore_preferences)
sys.exit(0)
if options.support:
print_support()
sys.exit(0)
import dbus
from guake.dbusiface import DBUS_NAME
from guake.dbusiface import DBUS_PATH
from guake.dbusiface import DbusManager
from guake.guake_logging import setupLogging
instance = None
# Trying to get an already running instance of guake. If it is not
# possible, lets create a new instance. This function will return
# a boolean value depending on this decision.
try:
bus = dbus.SessionBus()
remote_object = bus.get_object(DBUS_NAME, DBUS_PATH)
already_running = True
except dbus.DBusException:
# can now configure the logging
setupLogging(options.verbose)
# COLORTERM is an environment variable set by some terminal emulators such as
# gnome-terminal.
# To avoid confusing applications running inside Guake, clean up COLORTERM at startup.
if "COLORTERM" in os.environ:
del os.environ['COLORTERM']
log.info("Guake not running, starting it")
# late loading of the Guake object, to speed up dbus comm
from guake.guake_app import Guake
instance = Guake()
remote_object = DbusManager(instance)
already_running = False
only_show_hide = True
if options.fullscreen:
remote_object.fullscreen()
if options.show:
remote_object.show_from_remote()
if options.hide:
remote_object.hide_from_remote()
if options.show_preferences:
remote_object.show_prefs()
only_show_hide = options.show
if options.new_tab:
remote_object.add_tab(options.new_tab)
only_show_hide = options.show
if options.select_tab:
selected = int(options.select_tab)
tab_count = int(remote_object.get_tab_count())
if 0 <= selected < tab_count:
remote_object.select_tab(selected)
else:
sys.stderr.write('invalid index: %d\n' % selected)
only_show_hide = options.show
if options.selected_tab:
selected = remote_object.get_selected_tab()
sys.stdout.write('%d\n' % selected)
only_show_hide = options.show
if options.selected_tablabel:
selectedlabel = remote_object.get_selected_tablabel()
sys.stdout.write('%s\n' % selectedlabel)
only_show_hide = options.show
if options.split_vertical:
remote_object.v_split_current_terminal()
only_show_hide = options.show
if options.split_horizontal:
remote_object.h_split_current_terminal()
only_show_hide = options.show
if options.command:
remote_object.execute_command(options.command)
only_show_hide = options.show
if options.tab_index and options.rename_tab:
try:
remote_object.rename_tab_uuid(str(uuid.UUID(options.tab_index)), options.rename_tab)
except ValueError:
remote_object.rename_tab(int(options.tab_index), options.rename_tab)
only_show_hide = options.show
if options.bgcolor:
remote_object.set_bgcolor(options.bgcolor)
only_show_hide = options.show
if options.fgcolor:
remote_object.set_fgcolor(options.fgcolor)
only_show_hide = options.show
if options.palette_name:
remote_object.change_palette_name(options.palette_name)
only_show_hide = options.show
if options.rename_current_tab:
remote_object.rename_current_tab(options.rename_current_tab)
only_show_hide = options.show
if options.show_about:
remote_object.show_about()
only_show_hide = options.show
if options.quit:
try:
remote_object.quit()
return True
except dbus.DBusException:
return True
if already_running and only_show_hide:
# here we know that guake was called without any parameter and
# it is already running, so, lets toggle its visibility.
remote_object.show_hide()
if options.execute_startup_script:
if not already_running:
startup_script = instance.settings.general.get_string("startup-script")
if startup_script:
log.info("Calling startup script: %s", startup_script)
pid = subprocess.Popen([startup_script],
shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
log.info("Startup script started with pid: %s", pid)
# Please ensure this is the last line !!!!
else:
log.info("--no-startup-script argument defined, so don't execute the startup script")
if already_running:
log.info("Guake is already running")
return already_running
def exec_main():
if not main():
log.debug("Running main gtk loop")
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Load gi pretty late, to speed up as much as possible the parsing of the option for DBus
# comm through command line
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
Gtk.main()
if __name__ == '__main__':
exec_main()
| mouseratti/guake | guake/main.py | Python | gpl-2.0 | 15,344 | 0.001369 |
# -*- coding: utf-8 -*-
from django import template
from ..utils import su_login_callback
register = template.Library()
@register.inclusion_tag('su/login_link.html', takes_context=False)
def login_su_link(user):
return {'can_su_login': su_login_callback(user)}
| adamcharnock/django-su | django_su/templatetags/su_tags.py | Python | mit | 270 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-19 07:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photos', '0002_auto_20160919_0737'),
]
operations = [
migrations.CreateModel(
name='Rover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nasa_id', models.IntegerField(unique=True)),
('name', models.CharField(max_length=30)),
('landing_date', models.DateField()),
('max_date', models.DateField()),
('max_sol', models.IntegerField()),
('total_photos', models.IntegerField()),
],
),
]
| WillWeatherford/mars-rover | photos/migrations/0003_rover.py | Python | mit | 852 | 0.001174 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza.error import Error
from sleekxmpp.stanza.stream_error import StreamError
from sleekxmpp.stanza.iq import Iq
from sleekxmpp.stanza.message import Message
from sleekxmpp.stanza.presence import Presence
| destroy/SleekXMPP-gevent | sleekxmpp/stanza/__init__.py | Python | mit | 399 | 0 |
from __future__ import unicode_literals
import base64
import json
import re
import six
from moto.core.responses import BaseResponse
from .models import kms_backends
from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException
reserved_aliases = [
'alias/aws/ebs',
'alias/aws/s3',
'alias/aws/redshift',
'alias/aws/rds',
]
class KmsResponse(BaseResponse):
@property
def parameters(self):
return json.loads(self.body)
@property
def kms_backend(self):
return kms_backends[self.region]
def create_key(self):
policy = self.parameters.get('Policy')
key_usage = self.parameters.get('KeyUsage')
description = self.parameters.get('Description')
tags = self.parameters.get('Tags')
key = self.kms_backend.create_key(
policy, key_usage, description, tags, self.region)
return json.dumps(key.to_dict())
def update_key_description(self):
key_id = self.parameters.get('KeyId')
description = self.parameters.get('Description')
self.kms_backend.update_key_description(key_id, description)
return json.dumps(None)
def tag_resource(self):
key_id = self.parameters.get('KeyId')
tags = self.parameters.get('Tags')
self.kms_backend.tag_resource(key_id, tags)
return json.dumps({})
def list_resource_tags(self):
key_id = self.parameters.get('KeyId')
tags = self.kms_backend.list_resource_tags(key_id)
return json.dumps({
"Tags": tags,
"NextMarker": None,
"Truncated": False,
})
def describe_key(self):
key_id = self.parameters.get('KeyId')
try:
key = self.kms_backend.describe_key(
self.kms_backend.get_key_id(key_id))
except KeyError:
headers = dict(self.headers)
headers['status'] = 404
return "{}", headers
return json.dumps(key.to_dict())
def list_keys(self):
keys = self.kms_backend.list_keys()
return json.dumps({
"Keys": [
{
"KeyArn": key.arn,
"KeyId": key.id,
} for key in keys
],
"NextMarker": None,
"Truncated": False,
})
def create_alias(self):
alias_name = self.parameters['AliasName']
target_key_id = self.parameters['TargetKeyId']
if not alias_name.startswith('alias/'):
raise ValidationException('Invalid identifier')
if alias_name in reserved_aliases:
raise NotAuthorizedException()
if ':' in alias_name:
raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name))
if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name):
raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' "
"failed to satisfy constraint: Member must satisfy regular "
"expression pattern: ^[a-zA-Z0-9:/_-]+$"
.format(alias_name=alias_name))
if self.kms_backend.alias_exists(target_key_id):
raise ValidationException('Aliases must refer to keys. Not aliases')
if self.kms_backend.alias_exists(alias_name):
raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} '
'already exists'.format(region=self.region, alias_name=alias_name))
self.kms_backend.add_alias(target_key_id, alias_name)
return json.dumps(None)
def delete_alias(self):
alias_name = self.parameters['AliasName']
if not alias_name.startswith('alias/'):
raise ValidationException('Invalid identifier')
if not self.kms_backend.alias_exists(alias_name):
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:'
'{alias_name} is not found.'.format(region=self.region, alias_name=alias_name))
self.kms_backend.delete_alias(alias_name)
return json.dumps(None)
def list_aliases(self):
region = self.region
response_aliases = [
{
'AliasArn': u'arn:aws:kms:{region}:012345678912:{reserved_alias}'.format(region=region,
reserved_alias=reserved_alias),
'AliasName': reserved_alias
} for reserved_alias in reserved_aliases
]
backend_aliases = self.kms_backend.get_all_aliases()
for target_key_id, aliases in backend_aliases.items():
for alias_name in aliases:
response_aliases.append({
'AliasArn': u'arn:aws:kms:{region}:012345678912:{alias_name}'.format(region=region,
alias_name=alias_name),
'AliasName': alias_name,
'TargetKeyId': target_key_id,
})
return json.dumps({
'Truncated': False,
'Aliases': response_aliases,
})
def enable_key_rotation(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.enable_key_rotation(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def disable_key_rotation(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.disable_key_rotation(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_rotation_status(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
rotation_enabled = self.kms_backend.get_key_rotation_status(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyRotationEnabled': rotation_enabled})
def put_key_policy(self):
key_id = self.parameters.get('KeyId')
policy_name = self.parameters.get('PolicyName')
policy = self.parameters.get('Policy')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
_assert_default_policy(policy_name)
try:
self.kms_backend.put_key_policy(key_id, policy)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_policy(self):
key_id = self.parameters.get('KeyId')
policy_name = self.parameters.get('PolicyName')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
_assert_default_policy(policy_name)
try:
return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)})
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def list_key_policies(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.describe_key(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'Truncated': False, 'PolicyNames': ['default']})
def encrypt(self):
"""
We perform no encryption, we just encode the value as base64 and then
decode it in decrypt().
"""
value = self.parameters.get("Plaintext")
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'})
def decrypt(self):
# TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated
value = self.parameters.get("CiphertextBlob")
try:
return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")})
except UnicodeDecodeError:
# Generate data key will produce random bytes which when decrypted is still returned as base64
return json.dumps({"Plaintext": value})
def disable_key(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.disable_key(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def enable_key(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.enable_key(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def cancel_key_deletion(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.cancel_key_deletion(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyId': key_id})
def schedule_key_deletion(self):
key_id = self.parameters.get('KeyId')
if self.parameters.get('PendingWindowInDays') is None:
pending_window_in_days = 30
else:
pending_window_in_days = self.parameters.get('PendingWindowInDays')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
return json.dumps({
'KeyId': key_id,
'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days)
})
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def generate_data_key(self):
key_id = self.parameters.get('KeyId')
encryption_context = self.parameters.get('EncryptionContext')
number_of_bytes = self.parameters.get('NumberOfBytes')
key_spec = self.parameters.get('KeySpec')
grant_tokens = self.parameters.get('GrantTokens')
# Param validation
if key_id.startswith('alias'):
if self.kms_backend.get_key_id_from_alias(key_id) is None:
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(
region=self.region, alias_name=key_id))
else:
if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys:
raise NotFoundException('Invalid keyId')
if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0):
raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed "
"to satisfy constraint: Member must have value less than or "
"equal to 1024")
if key_spec and key_spec not in ('AES_256', 'AES_128'):
raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[AES_256, AES_128]")
if not key_spec and not number_of_bytes:
raise ValidationException("Please specify either number of bytes or key spec.")
if key_spec and number_of_bytes:
raise ValidationException("Please specify either number of bytes or key spec.")
plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context,
number_of_bytes, key_spec, grant_tokens)
plaintext = base64.b64encode(plaintext).decode()
return json.dumps({
'CiphertextBlob': plaintext,
'Plaintext': plaintext,
'KeyId': key_arn # not alias
})
def generate_data_key_without_plaintext(self):
result = json.loads(self.generate_data_key())
del result['Plaintext']
return json.dumps(result)
def _assert_valid_key_id(key_id):
if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE):
raise NotFoundException('Invalid keyId')
def _assert_default_policy(policy_name):
if policy_name != 'default':
raise NotFoundException("No such policy exists")
| whummer/moto | moto/kms/responses.py | Python | apache-2.0 | 14,169 | 0.003882 |
import unittest
from pythoncardx.crypto import Cipher
from pythoncard.security import CryptoException, RSAPublicKey, KeyBuilder, KeyPair
class testCipher(unittest.TestCase):
def testInit(self):
c = Cipher.getInstance(Cipher.ALG_RSA_NOPAD, False)
self.assertEqual(Cipher.ALG_RSA_NOPAD, c.getAlgorithm())
try:
c.update([], 0, 0, [], 0)
self.fail()
except CryptoException as ce:
self.assertEqual(CryptoException.INVALID_INIT, ce.getReason())
try:
c.init("abcd", Cipher.MODE_ENCRYPT)
self.fail()
except CryptoException as ce:
self.assertEqual(CryptoException.ILLEGAL_VALUE, ce.getReason())
pbk = KeyBuilder.buildKey(KeyBuilder.TYPE_RSA_PUBLIC, KeyBuilder.LENGTH_RSA_1024, False)
try:
c.init(pbk, Cipher.MODE_ENCRYPT)
self.fail()
except CryptoException as ce:
self.assertEqual(CryptoException.UNINITIALIZED_KEY, ce.getReason())
pbk.setExponent([0,1,2,3,4,5,6,7,8,9], 5, 5)
pbk.setModulus([7]*128, 0, 128) # 1024 // 8
c.init(pbk, Cipher.MODE_ENCRYPT)
def testRSAEncryptDecrypt(self):
kp = KeyPair(KeyPair.ALG_RSA, KeyBuilder.LENGTH_RSA_1024)
kp.genKeyPair()
pubk = kp.getPublic()
self.assertEqual(1024, pubk.getSize())
privk = kp.getPrivate()
self.assertEqual(1024, privk.getSize())
c = Cipher.getInstance(Cipher.ALG_RSA_PKCS1, False)
c.init(pubk, Cipher.MODE_ENCRYPT)
res = [0]*1024
l = c.doFinal([0,1,2,3,4,5], 0, 6, res, 0)
c.init(privk, Cipher.MODE_DECRYPT)
res2 = [0]*1024
l = c.doFinal(res, 0, l, res2, 0)
self.assertEqual([0,1,2,3,4,5], res2[:l])
def testRSASignVerify(self):
kp = KeyPair(KeyPair.ALG_RSA, KeyBuilder.LENGTH_RSA_1024)
kp.genKeyPair()
pubk = kp.getPublic()
self.assertEqual(1024, pubk.getSize())
privk = kp.getPrivate()
self.assertEqual(1024, privk.getSize())
c = Cipher.getInstance(Cipher.ALG_RSA_PKCS1, False)
c.init(privk, Cipher.MODE_ENCRYPT)
res = [0]*1024
l = c.doFinal([0,1,2,3,4,5], 0, 6, res, 0)
c.init(pubk, Cipher.MODE_DECRYPT)
res2 = [0]*1024
l = c.doFinal(res, 0, l, res2, 0)
self.assertEqual([0,1,2,3,4,5], res2[:l])
def GemaltoSample(self):
try:
rsa = javacardx.crypto.Cipher.getInstance( javacardx.crypto.Cipher.ALG_RSA_NOPAD , False )
pubkey = javacard.security.KeyBuilder.buildKey(TYPE_RSA_PUBLIC, LENGTH_RSA_512, False )
except javacardx.crypto.CryptoException as e:
#... RSA crypto engine not supported by this card
pass
pubkey.setModulus( modulus, 0, modulus_len)
pubkey.setExponent( exponent, 0, expo_len)
rsa.init(pubkey, MODE_ENCRYPT)
rsa.doFinal(buffer2encrypt, 0, 64, output_buffer, 0)
def testDES(self):
KeyArray = [1,2,3,4,5,6,7,8]
bytBuffer = [0 for i in range(8)]
MyBuffer = [7,5,6,8]
MyDesKey = KeyBuilder.buildKey(KeyBuilder.TYPE_DES, KeyBuilder.LENGTH_DES, False)
crypt_des = Cipher.getInstance(Cipher.ALG_DES_ECB_PKCS5, False)
MyDesKey.setKey(KeyArray, 0)
crypt_des.init(MyDesKey, Cipher.MODE_ENCRYPT)
length = crypt_des.doFinal(MyBuffer, 0, len(MyBuffer), bytBuffer, 0)
crypt_des.init(MyDesKey, Cipher.MODE_DECRYPT)
crypt_des.doFinal(bytBuffer, 0, length, MyBuffer, 0)
self.assertEqual([7,5,6,8], MyBuffer)
| benallard/pythoncard | test/testCipher.py | Python | lgpl-3.0 | 3,613 | 0.016883 |
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
from utils import get_class
class AbstractRankingFunction:
"""Abstract base class for ranking functions."""
def __init__(self,
ranker_arg_str,
ties,
feature_count,
init=None,
sample=None):
self.feature_count = feature_count
ranking_model_str = "ranker.model.Linear"
for arg in ranker_arg_str:
if arg.startswith("ranker.model"):
ranking_model_str = arg
else:
self.ranker_type = float(arg)
self.ranking_model = get_class(ranking_model_str)(feature_count)
self.sample = getattr(__import__("utils"), sample)
self.ties = ties
self.w = self.ranking_model.initialize_weights(init)
def score(self, features):
return self.ranking_model.score(features, self.w.transpose())
def get_candidate_weight(self, delta):
u = self.sample(self.ranking_model.get_feature_count())
return self.w + delta * u, u
def init_ranking(self, query):
raise NotImplementedError("Derived class needs to implement "
"init_ranking.")
def next(self):
raise NotImplementedError("Derived class needs to implement "
"next.")
def next_det(self):
raise NotImplementedError("Derived class needs to implement "
"next_det.")
def next_random(self):
raise NotImplementedError("Derived class needs to implement "
"next_random.")
def get_document_probability(self, docid):
raise NotImplementedError("Derived class needs to implement "
"get_document_probability.")
def getDocs(self, numdocs=None):
if numdocs != None:
return self.docids[:numdocs]
return self.docids
def rm_document(self, docid):
raise NotImplementedError("Derived class needs to implement "
"rm_document.")
def document_count(self):
raise NotImplementedError("Derived class needs to implement "
"document_count.")
def update_weights(self, w, alpha=None):
"""update weight vector"""
if alpha == None:
self.w = w
else:
self.w = self.w + alpha * w
| hubert667/AIR | src/python/ranker/AbstractRankingFunction.py | Python | gpl-3.0 | 2,935 | 0.003407 |
import config, json, cgi, sys, Websheet, re, os
def grade(reference_solution, student_solution, translate_line, websheet, student):
if not re.match(r"^\w+$", websheet.classname):
return ("Internal Error (Compiling)", "Invalid overridden classname <tt>" + websheet.classname + " </tt>")
dump = {
"reference." + websheet.classname : reference_solution,
"student." + websheet.classname : student_solution[1],
"tester." + websheet.classname : websheet.make_tester()
}
# print(student_solution[1])
# print(reference_solution)
# print(websheet.make_tester())
for clazz in ["Grader", "Options", "Utils"]:
dump["websheets."+clazz] = "".join(open("grade_java_files/"+clazz+".java"))
for dep in websheet.dependencies:
depws = Websheet.Websheet.from_name(dep)
if depws == None:
return ("Internal Error", "Dependent websheet " + dep + " does not exist");
submission = config.load_submission(student, dep, True)
if submission == False:
return("Dependency Error",
"<div class='dependency-error'><i>Dependency error</i>: " +
"You need to successfully complete the <a href='javascript:websheets.load(\""+dep+"\")'><tt>"+dep+"</tt></a> websheet first (while logged in).</div>") # error text
submission = [{'code': x, 'from': {'line': 0, 'ch':0}, 'to': {'line': 0, 'ch': 0}} for x in submission]
dump["student."+dep] = depws.combine_with_template(submission, "student")[1]
dump["reference."+dep] = depws.get_reference_solution("reference")
compileRun = config.run_java(["traceprinter/ramtools/CompileToBytes"], json.dumps(dump))
compileResult = compileRun.stdout
if (compileResult==""):
return ("Internal Error (Compiling)", "<pre>\n" +
cgi.escape(compileRun.stderr) +
"</pre>"+"<!--"+compileRun._toString()+"-->")
compileObj = json.loads(compileResult)
# print(compileObj['status'])
if compileObj['status'] == 'Internal Error':
return ("Internal Error (Compiling)", "<pre>\n" +
cgi.escape(compileObj["errmsg"]) +
"</pre>")
elif compileObj['status'] == 'Compile-time Error':
errorObj = compileObj['error']
if errorObj['filename'] == ("student." + websheet.classname + ".java"):
result = "Syntax error (could not compile):"
result += "<br>"
result += '<tt>'+errorObj['filename'].split('.')[-2]+'.java</tt>, line '
result += str(translate_line(errorObj['row'])) + ':'
#result += str(errorObj['row']) + ':'
result += "<pre>\n"
#remove the safeexec bits
result += cgi.escape(errorObj["errmsg"]
.replace("stdlibpack.", "")
.replace("student.", "")
)
result += "</pre>"
return ("Syntax Error", result)
else:
return("Internal Error (Compiling reference solution and testing suite)",
'<b>File: </b><tt>'+errorObj['filename']+'</tt><br><b>Line number: '
+str(errorObj['row'])+"</b><pre>"
+errorObj['errmsg']+":\n"+dump[errorObj['filename'][:-5]].split("\n")[errorObj['row']-1]+"</pre>")
#print(compileResult)
# prefetch all urls, pass them to the grader on stdin
compileObj["stdin"] = json.dumps({
"fetched_urls":websheet.prefetch_urls(True)
})
compileResult = json.dumps(compileObj)
runUser = config.run_java(["traceprinter/ramtools/RAMRun", "tester." + websheet.classname], compileResult)
#runUser = config.run_java("tester." + websheet.classname + " " + student)
#print(runUser.stdout)
RAMRunError = runUser.stdout.startswith("Error")
RAMRunErrmsg = runUser.stdout[:runUser.stdout.index('\n')]
runUser.stdout = runUser.stdout[runUser.stdout.index('\n')+1:]
#print(runUser.stdout)
#print(runUser.stderr)
if runUser.returncode != 0 or runUser.stdout.startswith("Time Limit Exceeded"):
errmsg = runUser.stderr.split('\n')[0]
result = runUser.stdout
result += "<div class='safeexec'>Crashed! The grader reported "
result += "<code>"
result += cgi.escape(errmsg)
result += "</code>"
result += "</div>"
result += "<!--" + runUser.stderr + "-->"
return ("Sandbox Limit", result)
if RAMRunError:
result += "<div class='safeexec'>Could not execute! "
result += "<code>"
result += cgi.escape(RAMRunErrmsg)
result += "</code>"
result += "</div>"
return ("Internal Error (RAMRun)", result)
runtimeOutput = re.sub(
re.compile("(at|from) line (\d+) "),
lambda match: match.group(1)+" line " + translate_line(match.group(2)) + " ",
runUser.stdout)
#print(runtimeOutput)
def ssf(s, t, u): # substring from of s from after t to before u
if t not in s: raise ValueError("Can't ssf("+s+","+t+","+u+")")
s = s[s.index(t)+len(t) : ]
return s[ : s.index(u)]
if "<div class='error'>Runtime error:" in runtimeOutput:
category = "Runtime Error"
errmsg = ssf(runtimeOutput[runtimeOutput.index("<div class='error'>Runtime error:"):], "<pre >", "\n")
elif "<div class='all-passed'>" in runtimeOutput:
category = "Passed"
epilogue = websheet.epilogue
else:
category = "Failed Tests"
if "<div class='error'>" in runtimeOutput:
errmsg = ssf(runtimeOutput, "<div class='error'>", '</div>')
else:
return ("Internal Error", "<b>stderr</b><pre>" + runUser.stderr + "</pre><b>stdout</b><br>" + runUser.stdout)
return (category, runtimeOutput)
| dz0/websheets | grade_java.py | Python | agpl-3.0 | 5,860 | 0.014164 |
#!/usr/bin/env python
import re
from livestreamer.plugin import Plugin
from livestreamer.stream import HDSStream
_channel = dict(
at="servustvhd_1@51229",
de="servustvhdde_1@75540"
)
STREAM_INFO_URL = "http://hdiosstv-f.akamaihd.net/z/{channel}/manifest.f4m"
_url_re = re.compile(r"http://(?:www.)?servustv.com/(de|at)/.*")
class ServusTV(Plugin):
@classmethod
def can_handle_url(cls, url):
match = _url_re.match(url)
return match
def _get_streams(self):
url_match = _url_re.match(self.url)
if url_match:
if url_match.group(1) in _channel:
return HDSStream.parse_manifest(self.session, STREAM_INFO_URL.format(channel=_channel[url_match.group(1)]))
__plugin__ = ServusTV
| chrippa/livestreamer | src/livestreamer/plugins/servustv.py | Python | bsd-2-clause | 758 | 0.001319 |
#!/usr/bin/env ambari-python-wrap
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import zipfile
import os
from ambari_server.ambariPath import AmbariPath
# Default values are hardcoded here
BACKUP_PROCESS = 'backup'
RESTORE_PROCESS = 'restore'
SUPPORTED_PROCESSES = [BACKUP_PROCESS, RESTORE_PROCESS]
# The list of files where the ambari server state is kept on the filesystem
AMBARI_FILESYSTEM_STATE = [AmbariPath.get("/etc/ambari-server/conf"),
AmbariPath.get("/var/lib/ambari-server/resources"),
AmbariPath.get("/var/run/ambari-server/bootstrap/"),
AmbariPath.get("/var/run/ambari-server/stack-recommendations")]
# What to use when no path/archive is specified
DEFAULT_ARCHIVE = AmbariPath.get("/var/lib/ambari-server/Ambari_State_Backup.zip")
# Responsible for managing the Backup/Restore process
class BackupRestore:
def __init__(self, state_file_list, zipname, zip_folder_path):
"""
Zip file creator
:param state_file_list: the list of files where the Ambari State is kept on the filesystem
:param zipname: the name of the archive to use
:param zip_folder_path: the path of the archive
:return:
"""
self.state_file_list = state_file_list
self.zipname = zipname
self.zip_folder_path = zip_folder_path
def perform_backup(self):
"""
Used to perform the actual backup, by creating the zip archive
:return:
"""
try:
print("Creating zip file...")
# Use allowZip64=True to allow sizes greater than 4GB
zipf = zipfile.ZipFile(self.zip_folder_path + self.zipname, 'w', allowZip64=True)
zipdir(zipf, self.state_file_list, self.zipname)
except Exception, e:
sys.exit("Could not create zip file. Details: " + str(e))
print("Zip file created at " + self.zip_folder_path + self.zipname)
def perform_restore(self):
"""
Used to perform the restore process
:return:
"""
try:
print("Extracting the archive " + self.zip_folder_path + self.zipname)
unzip(self.zip_folder_path + self.zipname, '/')
except Exception, e:
sys.exit("Could not extract the zipfile " + self.zip_folder_path + self.zipname
+ " Details: " + str(e))
def unzip(source_filename, dest_dir):
"""
Zip archive extractor
:param source_filename: the absolute path of the file to unzip
:param dest_dir: the destination of the zip content
:return:
"""
zf = zipfile.ZipFile(source_filename)
try:
zf.extractall(dest_dir)
except Exception, e:
print("A problem occurred while unzipping. Details: " + str(e))
raise e
finally:
zf.close()
def zipdir(zipf, state_file_list, zipname):
"""
Used to archive the specified directory
:param zipf: the zipfile
:param state_file_list: the file list to archive
:param zipname: the name of the zip
:return:
"""
try:
for path in state_file_list:
for root, dirs, files in os.walk(path):
for file in files:
if not file == zipname:
zipf.write(os.path.join(root, file))
except Exception, e:
print("A problem occurred while unzipping. Details: " + str(e))
raise e
finally:
zipf.close()
def print_usage():
"""
Usage instructions
:return:
"""
print("Usage: python BackupRestore.py <processType> [zip-folder-path|zip-file-path]\n\n"
+ " processType - backup : backs up the filesystem state of the Ambari server into a zip file\n"
+ " processType - restore : restores the filesystem state of the Ambari server\n"
+ " [zip-folder-path] used with backup specifies the path of the folder where the zip file to be created\n"
+ " [zip-folder-path] used with restore specifies the path of the Ambari folder where the zip file to restore from is located\n")
def validate_folders(folders):
"""
Used to validate folder existence on the machine
:param folders: folder list containing paths to validate
:return:
"""
for folder in folders:
if not os.path.isdir(folder):
sys.exit("Error while validating folders. Folder " + folder + " does not exist.")
def retrieve_path_and_zipname(archive_absolute_path):
target = {'path': None , 'zipname': None}
try:
elements = archive_absolute_path.split("/")
if elements is not None and len(elements)>0:
target['zipname'] = elements[len(elements)-1]
target['path'] = archive_absolute_path.replace(elements[len(elements)-1], "")
except Exception, e:
sys.exit("Could not retrieve path and zipname from the absolute path " + archive_absolute_path + ". Please check arguments."
+ " Details: " + str(e))
return target
def main(argv=None):
# Arg checks
if len(argv) != 3 and len(argv) != 2:
print_usage()
sys.exit("Invalid usage.")
else:
process_type = argv[1]
if not (SUPPORTED_PROCESSES.__contains__(process_type)):
sys.exit("Unsupported process type: " + process_type)
# if no archive is specified
if len(argv) == 2:
print "No path specified. Will use " + DEFAULT_ARCHIVE
location_data = retrieve_path_and_zipname(DEFAULT_ARCHIVE)
else:
location_data = retrieve_path_and_zipname(argv[2])
validate_folders([location_data['path']])
zip_file_path = location_data['path']
ambari_backup_zip_filename = location_data['zipname']
backup_restore = BackupRestore(AMBARI_FILESYSTEM_STATE, ambari_backup_zip_filename, zip_file_path)
print(process_type.title() + " process initiated.")
if process_type == BACKUP_PROCESS:
validate_folders(AMBARI_FILESYSTEM_STATE)
backup_restore.perform_backup()
print(BACKUP_PROCESS.title() + " complete.")
if process_type == RESTORE_PROCESS:
backup_restore.perform_restore()
print(RESTORE_PROCESS.title() + " complete.")
if __name__ == '__main__':
main(sys.argv)
| arenadata/ambari | ambari-server/src/main/python/ambari_server/BackupRestore.py | Python | apache-2.0 | 6,601 | 0.009998 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2017 Fedele Mantuano (https://www.linkedin.com/in/fmantuano/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import copy
import os
import unittest
import mailparser
from pyfaup.faup import Faup
from context import mails
from context import utils
phishing = mails.phishing
base_path = os.path.realpath(os.path.dirname(__file__))
mail_thug = os.path.join(base_path, 'samples', 'mail_thug')
mail_form = os.path.join(base_path, 'samples', 'mail_form')
mail_test_5 = os.path.join(base_path, 'samples', 'mail_test_5')
mail_test_6 = os.path.join(base_path, 'samples', 'mail_test_6')
logging.getLogger().addHandler(logging.NullHandler())
class TestPhishing(unittest.TestCase):
faup = Faup()
def setUp(self):
parser = mailparser.parse_from_file(mail_thug)
self.email = parser.mail
self.attachments = parser.attachments
parser = mailparser.parse_from_file(mail_form)
self.email_form = parser.mail
body = self.email_form.get("body")
self.urls = utils.urls_extractor(body, self.faup)
d = {"generic": "conf/keywords/targets.example.yml",
"custom": "conf/keywords/targets_english.example.yml"}
self.targets = utils.load_keywords_dict(d)
d = {"generic": "conf/keywords/subjects.example.yml",
"custom": "conf/keywords/subjects_english.example.yml"}
self.subjects = utils.load_keywords_list(d)
def test_ParserError(self):
parser = mailparser.parse_from_file(mail_test_6)
body = parser.mail.get("body")
flag_form = phishing.check_form(body)
self.assertFalse(flag_form)
def test_none_values(self):
email = copy.deepcopy(self.email)
email.pop("body", None)
email.pop("subjects", None)
email.pop("from", None)
phishing.check_phishing(
email=email,
attachments=self.attachments,
urls_body=self.urls,
urls_attachments=self.urls,
target_keys=self.targets,
subject_keys=self.subjects)
def test_check_form(self):
body = self.email_form.get("body")
flag_form = phishing.check_form(body)
self.assertTrue(flag_form)
body = self.email.get("body")
flag_form = phishing.check_form(body)
self.assertFalse(flag_form)
def test_form_value_error(self):
parser = mailparser.parse_from_file(mail_test_5)
body = parser.mail.get("body")
flag_form = phishing.check_form(body)
self.assertFalse(flag_form)
def test_check_urls(self):
flag = False
if any(phishing.check_urls(self.urls, i)
for i in self.targets.values()):
flag = True
self.assertTrue(flag)
def test_check_phishing(self):
results = phishing.check_phishing(
email=self.email,
attachments=self.attachments,
urls_body=self.urls,
urls_attachments=self.urls,
target_keys=self.targets,
subject_keys=self.subjects)
self.assertIsInstance(results, dict)
self.assertEqual(results["score"], 123)
self.assertIn("filename_attachments", results["score_expanded"])
self.assertIn("mail_subject", results["score_expanded"])
self.assertIn("mail_body", results["score_expanded"])
self.assertIn("mail_from", results["score_expanded"])
self.assertIn("urls_body", results["score_expanded"])
self.assertIn("urls_attachments", results["score_expanded"])
self.assertIn("Test", results["targets"])
self.assertTrue(results["with_phishing"])
def test_check_phishing_form(self):
results = phishing.check_phishing(
email=self.email_form,
attachments=self.attachments,
urls_body=self.urls,
urls_attachments=self.urls,
target_keys=self.targets,
subject_keys=self.subjects)
self.assertIn("mail_form", results["score_expanded"])
if __name__ == '__main__':
unittest.main(verbosity=2)
| SpamScope/spamscope | tests/test_phishing.py | Python | apache-2.0 | 4,631 | 0 |
import platform
import pip
from django import get_version
from django.shortcuts import render
def home(request):
"""
renders the deployment server details on the screen.
:param request: The django formatted HttpRequest
:return: renders context c with the demo template.
"""
c = dict(python_version=platform.python_version(), django_version=get_version(), pip_version=pip.__version__)
return render(request, 'demo/demo.html', c)
| Kesel/django | demo/views.py | Python | mit | 459 | 0.002179 |
from django.template import loader
from regulations.generator.layers.base import InlineLayer
from regulations.generator.section_url import SectionUrl
from regulations.generator.layers import utils
from ..node_types import to_markup_id
class DefinitionsLayer(InlineLayer):
shorthand = 'terms'
data_source = 'terms'
def __init__(self, layer):
self.layer = layer
self.template = loader.get_template(
'regulations/layers/definition_citation.html')
self.sectional = False
self.version = None
self.rev_urls = SectionUrl()
self.rendered = {}
# precomputation
for def_struct in self.layer['referenced'].values():
def_struct['reference_split'] = def_struct['reference'].split('-')
def replacement_for(self, original, data):
""" Create the link that takes you to the definition of the term. """
citation = data['ref']
# term = term w/o pluralization
term = self.layer['referenced'][citation]['term']
citation = self.layer['referenced'][citation]['reference_split']
key = (original, tuple(citation))
if key not in self.rendered:
context = {'citation': {
'url': self.rev_urls.fetch(citation, self.version,
self.sectional),
'label': original,
'term': term,
'definition_reference': '-'.join(to_markup_id(citation))}}
rendered = utils.render_template(self.template, context)
self.rendered[key] = rendered
return self.rendered[key]
| 18F/regulations-site | regulations/generator/layers/definitions.py | Python | cc0-1.0 | 1,632 | 0 |
#-*- coding: utf-8 -*-
# Author: Matt Earnshaw <matt@earnshaw.org.uk>
from __future__ import absolute_import
import os
import sys
import sunpy
from PyQt4.QtGui import QApplication
from sunpy.gui.mainwindow import MainWindow
from sunpy.io import UnrecognizedFileTypeError
class Plotman(object):
""" Wraps a MainWindow so PlotMan instances can be created via the CLI.
Examples
--------
from sunpy.gui import Plotman
plots = Plotman("data/examples")
plots.show()
"""
def __init__(self, *paths):
""" *paths: directories containing FITS paths
or FITS paths to be opened in PlotMan """
self.app = QApplication(sys.argv)
self.main = MainWindow()
self.open_files(paths)
def open_files(self, inputs):
VALID_EXTENSIONS = [".jp2", ".fits", ".fts"]
to_open = []
# Determine files to process
for input_ in inputs:
if os.path.isfile(input_):
to_open.append(input_)
elif os.path.isdir(input_):
for file_ in os.listdir(input_):
to_open.append(file_)
else:
raise IOError("Path " + input_ + " does not exist.")
# Load files
for filepath in to_open:
name, ext = os.path.splitext(filepath) #pylint: disable=W0612
if ext.lower() in VALID_EXTENSIONS:
try:
self.main.add_tab(filepath, os.path.basename(filepath))
except UnrecognizedFileTypeError:
pass
def show(self):
self.main.show()
self.app.exec_()
if __name__=="__main__":
from sunpy.gui import Plotman
plots = Plotman(sunpy.AIA_171_IMAGE)
plots.show()
| jslhs/sunpy | sunpy/gui/__init__.py | Python | bsd-2-clause | 1,811 | 0.005522 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# status_page.py - Copyright (C) 2012 Red Hat, Inc.
# Written by Fabian Deutsch <fabiand@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
from ovirt.node import ui, plugins, utils
from ovirt.node.config import defaults
from ovirt.node.utils import security, virt, system
import os
import textwrap
"""
Status page plugin
"""
class Plugin(plugins.NodePlugin):
"""This is the summary page, summarizing all sorts of informations
There are no validators, as there is no input.
"""
_model = None
def name(self):
return "Status"
def rank(self):
return 0
def model(self):
net_status, net_br, net_addrs = utils.network.networking_status()
net_addrs_str = ""
if net_addrs:
net_addrs_str = "\nIPv4: {inet}\nIPv6: {inet6}".format(**net_addrs)
num_domains = virt.number_of_domains()
return {
"status": virt.hardware_status(),
"networking": net_status,
"networking.bridge": "%s %s" % (net_br, net_addrs_str),
"logs": self._logging_summary(),
"libvirt.num_guests": num_domains,
}
def validators(self):
return {}
def ui_content(self):
"""Describes the UI this plugin requires
This is an ordered list of (path, widget) tuples.
"""
# Function to expand all "keywords" to the same length
aligned = lambda l: l.ljust(14)
# Network related widgets, appearing in one row
network_widgets = [ui.KeywordLabel("networking",
aligned("Networking: ")),
ui.KeywordLabel("networking.bridge",
"Bridge: "),
]
action_widgets = [ui.Button("action.lock", "Lock"),
ui.Button("action.logoff", "Log Off"),
ui.Button("action.restart", "Restart"),
ui.Button("action.poweroff", "Power Off")
]
widgets = [ui.Header("header[0]", "System Information"),
ui.KeywordLabel("status", aligned("Status: ")),
ui.Divider("divider[0]"),
ui.Row("row[0]", network_widgets),
ui.Divider("divider[1]"),
ui.KeywordLabel("logs", aligned("Logs: ")),
ui.Divider("divider[2]"),
ui.KeywordLabel("libvirt.num_guests",
aligned("Running VMs: ")),
ui.Divider("divider[3]"),
ui.Label("support.hint", "Press F8 for support menu"),
ui.Divider("divider[4]"),
ui.Row("row[1]",
[ui.Button("action.hostkey", "View Host Key"),
ui.Button("action.cpu_details",
"View CPU Details"),
]),
ui.Row("row[2]", action_widgets),
]
self.widgets.add(widgets)
page = ui.Page("page", widgets)
page.buttons = []
return page
def on_change(self, changes):
pass
def on_merge(self, changes):
# Handle button presses
number_of_vm = "There are %s Virtual Machines running." \
% (virt.number_of_domains())
if "action.lock" in changes:
self.logger.info("Locking screen")
self._lock_dialog = LockDialog()
self.application.ui.hotkeys_enabled(False)
self.widgets.add(self._lock_dialog)
return self._lock_dialog
elif "action.unlock" in changes and "password" in changes:
self.logger.info("UnLocking screen")
pam = security.PAM()
if pam.authenticate(os.getlogin(), changes["password"]):
self._lock_dialog.close()
self.application.ui.hotkeys_enabled(True)
else:
self.application.notice("The provided password was incorrect.")
self.widgets["password"].text("")
elif "action.logoff" in changes:
self.logger.info("Logging off")
self.application.quit()
elif "action.restart" in changes:
self.logger.info("Restarting")
return ui.ConfirmationDialog("confirm.reboot",
"Confirm System Restart",
number_of_vm +
"\nThis will restart the system,"
"proceed?")
elif "confirm.reboot.yes" in changes:
self.logger.info("Confirm Restarting")
self.dry_or(lambda: system.reboot())
elif "action.poweroff" in changes:
self.logger.info("Shutting down")
return ui.ConfirmationDialog("confirm.shutdown",
"Confirm System Poweroff",
number_of_vm +
"\nThis will shut down the system,"
"proceed?")
elif "confirm.shutdown.yes" in changes:
self.logger.info("Confirm Shutting down")
self.dry_or(lambda: system.poweroff())
elif "action.hostkey" in changes:
self.logger.info("Showing hostkey")
return HostkeyDialog("dialog.hostkey", "Host Key")
elif "action.cpu_details" in changes:
self.logger.info("Showing CPU details")
return CPUFeaturesDialog("dialog.cpu_details", "CPU Details")
elif "_save" in changes:
self.widgets["dialog.hostkey"].close()
def _logging_summary(self):
"""Return a textual summary of the current log configuration
"""
netconsole = defaults.Netconsole().retrieve()
syslog = defaults.Syslog().retrieve()
destinations = []
if syslog["server"]:
destinations.append("Rsyslog: %s:%s" % (syslog["server"],
syslog["port"] or "514"))
if netconsole["server"]:
destinations.append("Netconsole: %s:%s" %
(netconsole["server"],
netconsole["port"] or "6666"))
return ", ".join(destinations) if destinations else "Local Only"
class HostkeyDialog(ui.Dialog):
def __init__(self, path, title):
super(HostkeyDialog, self).__init__(path, title, [])
ssh = security.Ssh()
fp, hk = ssh.get_hostkey()
self.children = [ui.Label("hostkey.label[0]",
"RSA Host Key Fingerprint:"),
ui.Label("hostkey.fp", fp),
ui.Divider("hostkey.divider[0]"),
ui.Label("hostkey.label[1]",
"RSA Host Key:"),
ui.Label("hostkey", "\n".join(textwrap.wrap(hk, 64))),
]
self.buttons = [ui.CloseButton("dialog.close")]
class CPUFeaturesDialog(ui.InfoDialog):
"""The dialog beeing displayed when th euser clicks CPU Details
"""
def __init__(self, path, title):
msg = utils.system.cpu_details()
super(CPUFeaturesDialog, self).__init__(path, title, msg)
class LockDialog(ui.Dialog):
"""The dialog beeing displayed when the srceen is locked
"""
def __init__(self, path="lock.dialog", title="This screen is locked."):
super(LockDialog, self).__init__(path, title, [])
self.children = [ui.Header("lock.label[0]",
"Enter the admin password to unlock"),
ui.KeywordLabel("username", "Username: ",
os.getlogin()),
ui.PasswordEntry("password", "Password:")
]
self.buttons = [ui.Button("action.unlock", "Unlock")]
self.escape_key = None
| sdoumbouya/ovirt-node | src/ovirt/node/setup/core/status_page.py | Python | gpl-2.0 | 8,877 | 0.000113 |
"""
CeilometerConf - file ``/etc/ceilometer/ceilometer.conf``
=========================================================
The ``/etc/ceilometer/ceilometer.conf`` file is in a standard '.ini' format,
and this parser uses the IniConfigFile base class to read this.
Given a file containing the following test data::
[DEFAULT]
#
# From ceilometer
http_timeout = 600
debug = False
verbose = False
log_dir = /var/log/ceilometer
meter_dispatcher=database
event_dispatcher=database
[alarm]
evaluation_interval = 60
evaluation_service=ceilometer.alarm.service.SingletonAlarmService
partition_rpc_topic=alarm_partition_coordination
[api]
port = 8777
host = 192.0.2.10
[central]
[collector]
udp_address = 0.0.0.0
udp_port = 4952
[compute]
[coordination]
backend_url = redis://:chDWmHdH8dyjsmpCWfCEpJR87@192.0.2.7:6379/
Example:
>>> config = shared[CeilometerConf]
>>> config.sections()
['DEFAULT', 'alarm', 'api', 'central', 'collector', 'compute', 'coordination']
>>> config.items('api')
['port', 'host']
>>> config.has_option('alarm', 'evaluation_interval')
True
>>> config.get('coordination', 'backend_url')
'redis://:chDWmHdH8dyjsmpCWfCEpJR87@192.0.2.7:6379/'
>>> config.getint('collector', 'udp_port')
4952
>>> config.getboolean('DEFAULT', 'debug')
False
"""
from .. import parser, IniConfigFile
from insights.specs import ceilometer_conf
@parser(ceilometer_conf)
class CeilometerConf(IniConfigFile):
"""
A dict of the content of the ``ceilometer.conf`` configuration file.
Example selection of dictionary contents::
{
"DEFAULT": {
"http_timeout":"600",
"debug": "False"
},
"api": {
"port":"8877",
},
}
"""
pass
| wcmitchell/insights-core | insights/parsers/ceilometer_conf.py | Python | apache-2.0 | 1,890 | 0.000529 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C++ style thunks """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('thunkroot', 'Base directory of output',
default=os.path.join('..', 'thunk'))
class TGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
class ThunkBodyMetadata(object):
"""Metadata about thunk body. Used for selecting which headers to emit."""
def __init__(self):
self._apis = set()
self._includes = set()
def AddApi(self, api):
self._apis.add(api)
def Apis(self):
return self._apis
def AddInclude(self, include):
self._includes.add(include)
def Includes(self):
return self._includes
def _GetBaseFileName(filenode):
"""Returns the base name for output files, given the filenode.
Examples:
'dev/ppb_find_dev.h' -> 'ppb_find_dev'
'trusted/ppb_buffer_trusted.h' -> 'ppb_buffer_trusted'
"""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
return name
def _GetHeaderFileName(filenode):
"""Returns the name for the header for this file."""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if path:
header = "ppapi/c/%s/%s.h" % (path, name)
else:
header = "ppapi/c/%s.h" % name
return header
def _GetThunkFileName(filenode, relpath):
"""Returns the thunk file name."""
path = os.path.split(filenode.GetProperty('NAME'))[0]
name = _GetBaseFileName(filenode)
# We don't reattach the path for thunk.
if relpath: name = os.path.join(relpath, name)
name = '%s%s' % (name, '_thunk.cc')
return name
def _MakeEnterLine(filenode, interface, arg, handle_errors, callback, meta):
"""Returns an EnterInstance/EnterResource string for a function."""
if arg[0] == 'PP_Instance':
if callback is None:
return 'EnterInstance enter(%s);' % arg[1]
else:
return 'EnterInstance enter(%s, %s);' % (arg[1], callback)
elif arg[0] == 'PP_Resource':
api_name = interface.GetName()
if api_name.endswith('Trusted'):
api_name = api_name[:-len('Trusted')]
if api_name.endswith('_Dev'):
api_name = api_name[:-len('_Dev')]
api_name += '_API'
enter_type = 'EnterResource<%s>' % api_name
# The API header matches the file name, not the interface name.
api_basename = _GetBaseFileName(filenode)
if api_basename.endswith('_dev'):
# Clip off _dev suffix.
api_basename = api_basename[:-len('_dev')]
if api_basename.endswith('_trusted'):
# Clip off _trusted suffix.
api_basename = api_basename[:-len('_trusted')]
meta.AddApi(api_basename + '_api')
if callback is None:
return '%s enter(%s, %s);' % (enter_type, arg[1],
str(handle_errors).lower())
else:
return '%s enter(%s, %s, %s);' % (enter_type, arg[1],
callback,
str(handle_errors).lower())
else:
raise TGenError("Unknown type for _MakeEnterLine: %s" % arg[0])
def _GetShortName(interface, filter_suffixes):
"""Return a shorter interface name that matches Is* and Create* functions."""
parts = interface.GetName().split('_')[1:]
tail = parts[len(parts) - 1]
if tail in filter_suffixes:
parts = parts[:-1]
return ''.join(parts)
def _IsTypeCheck(interface, node):
"""Returns true if node represents a type-checking function."""
return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])
def _GetCreateFuncName(interface):
"""Returns the creation function name for an interface."""
return 'Create%s' % _GetShortName(interface, ['Dev'])
def _GetDefaultFailureValue(t):
"""Returns the default failure value for a given type.
Returns None if no default failure value exists for the type.
"""
values = {
'PP_Bool': 'PP_FALSE',
'PP_Resource': '0',
'struct PP_Var': 'PP_MakeUndefined()',
'float': '0.0f',
'int32_t': 'enter.retval()',
'uint16_t': '0',
'uint32_t': '0',
'uint64_t': '0',
}
if t in values:
return values[t]
return None
def _MakeCreateMemberBody(interface, member, args):
"""Returns the body of a Create() function.
Args:
interface - IDLNode for the interface
member - IDLNode for member function
args - List of arguments for the Create() function
"""
if args[0][0] == 'PP_Resource':
body = 'Resource* object =\n'
body += ' PpapiGlobals::Get()->GetResourceTracker()->'
body += 'GetResource(%s);\n' % args[0][1]
body += 'if (!object)\n'
body += ' return 0;\n'
body += 'EnterResourceCreation enter(object->pp_instance());\n'
elif args[0][0] == 'PP_Instance':
body = 'EnterResourceCreation enter(%s);\n' % args[0][1]
else:
raise TGenError('Unknown arg type for Create(): %s' % args[0][0])
body += 'if (enter.failed())\n'
body += ' return 0;\n'
arg_list = ', '.join([a[1] for a in args])
if member.GetProperty('create_func'):
create_func = member.GetProperty('create_func')
else:
create_func = _GetCreateFuncName(interface)
body += 'return enter.functions()->%s(%s);' % (create_func,
arg_list)
return body
def _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta):
"""Returns the body of a typical function.
Args:
filenode - IDLNode for the file
release - release to generate body for
node - IDLNode for the interface
member - IDLNode for the member function
rtype - Return type for the member function
args - List of 4-tuple arguments for the member function
include_version - whether to include the version in the invocation
meta - ThunkBodyMetadata for header hints
"""
is_callback_func = args[len(args) - 1][0] == 'struct PP_CompletionCallback'
if is_callback_func:
call_args = args[:-1] + [('', 'enter.callback()', '', '')]
meta.AddInclude('ppapi/c/pp_completion_callback.h')
else:
call_args = args
if args[0][0] == 'PP_Instance':
call_arglist = ', '.join(a[1] for a in call_args)
function_container = 'functions'
else:
call_arglist = ', '.join(a[1] for a in call_args[1:])
function_container = 'object'
function_name = member.GetName()
if include_version:
version = node.GetVersion(release).replace('.', '_')
function_name += version
invocation = 'enter.%s()->%s(%s)' % (function_container,
function_name,
call_arglist)
handle_errors = not (member.GetProperty('report_errors') == 'False')
if is_callback_func:
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
args[len(args) - 1][1], meta)
body += 'if (enter.failed())\n'
value = member.GetProperty('on_failure')
if value is None:
value = 'enter.retval()'
body += ' return %s;\n' % value
body += 'return enter.SetResult(%s);' % invocation
elif rtype == 'void':
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
None, meta)
body += 'if (enter.succeeded())\n'
body += ' %s;' % invocation
else:
value = member.GetProperty('on_failure')
if value is None:
value = _GetDefaultFailureValue(rtype)
if value is None:
raise TGenError('No default value for rtype %s' % rtype)
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
None, meta)
body += 'if (enter.failed())\n'
body += ' return %s;\n' % value
body += 'return %s;' % invocation
return body
def DefineMember(filenode, node, member, release, include_version, meta):
"""Returns a definition for a member function of an interface.
Args:
filenode - IDLNode for the file
node - IDLNode for the interface
member - IDLNode for the member function
release - release to generate
include_version - include the version in emitted function name.
meta - ThunkMetadata for header hints
Returns:
A string with the member definition.
"""
cgen = CGen()
rtype, name, arrays, args = cgen.GetComponents(member, release, 'return')
if _IsTypeCheck(node, member):
body = '%s\n' % _MakeEnterLine(filenode, node, args[0], False, None, meta)
body += 'return PP_FromBool(enter.succeeded());'
elif member.GetName() == 'Create':
body = _MakeCreateMemberBody(node, member, args)
else:
body = _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta)
signature = cgen.GetSignature(member, release, 'return', func_as_ptr=False,
include_version=include_version)
return '%s\n%s\n}' % (cgen.Indent('%s {' % signature, tabs=0),
cgen.Indent(body, tabs=1))
def _IsNewestMember(member, members, releases):
"""Returns true if member is the newest node with its name in members.
Currently, every node in the AST only has one version. This means that we
will have two sibling nodes with the same name to represent different
versions.
See http://crbug.com/157017 .
Special handling is required for nodes which share their name with others,
but aren't the newest version in the IDL.
Args:
member - The member which is checked if it's newest
members - The list of members to inspect
releases - The set of releases to check for versions in.
"""
build_list = member.GetUniqueReleases(releases)
release = build_list[0] # Pick the oldest release.
same_name_siblings = filter(
lambda n: str(n) == str(member) and n != member, members)
for s in same_name_siblings:
sibling_build_list = s.GetUniqueReleases(releases)
sibling_release = sibling_build_list[0]
if sibling_release > release:
return False
return True
class TGen(GeneratorByFile):
def __init__(self):
Generator.__init__(self, 'Thunk', 'tgen', 'Generate the C++ thunk.')
def GenerateFile(self, filenode, releases, options):
savename = _GetThunkFileName(filenode, GetOption('thunkroot'))
my_min, my_max = filenode.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
if os.path.isfile(savename):
print "Removing stale %s for this range." % filenode.GetName()
os.remove(os.path.realpath(savename))
return False
do_generate = filenode.GetProperty('generate_thunk')
if not do_generate:
return False
thunk_out = IDLOutFile(savename)
body, meta = self.GenerateBody(thunk_out, filenode, releases, options)
self.WriteHead(thunk_out, filenode, releases, options, meta)
thunk_out.Write('\n\n'.join(body))
self.WriteTail(thunk_out, filenode, releases, options)
return thunk_out.Close()
def WriteHead(self, out, filenode, releases, options, meta):
__pychecker__ = 'unusednames=options'
cgen = CGen()
cright_node = filenode.GetChildren()[0]
assert(cright_node.IsA('Copyright'))
out.Write('%s\n' % cgen.Copyright(cright_node, cpp_style=True))
# Wrap the From ... modified ... comment if it would be >80 characters.
from_text = 'From %s' % (
filenode.GetProperty('NAME').replace(os.sep,'/'))
modified_text = 'modified %s.' % (
filenode.GetProperty('DATETIME'))
if len(from_text) + len(modified_text) < 74:
out.Write('// %s %s\n\n' % (from_text, modified_text))
else:
out.Write('// %s,\n// %s\n\n' % (from_text, modified_text))
# TODO(teravest): Don't emit includes we don't need.
includes = ['ppapi/c/pp_errors.h',
'ppapi/shared_impl/tracked_callback.h',
'ppapi/thunk/enter.h',
'ppapi/thunk/ppb_instance_api.h',
'ppapi/thunk/resource_creation_api.h',
'ppapi/thunk/thunk.h']
includes.append(_GetHeaderFileName(filenode))
for api in meta.Apis():
includes.append('ppapi/thunk/%s.h' % api.lower())
for i in meta.Includes():
includes.append(i)
for include in sorted(includes):
out.Write('#include "%s"\n' % include)
out.Write('\n')
out.Write('namespace ppapi {\n')
out.Write('namespace thunk {\n')
out.Write('\n')
out.Write('namespace {\n')
out.Write('\n')
def GenerateBody(self, out, filenode, releases, options):
"""Generates a member function lines to be written and metadata.
Returns a tuple of (body, meta) where:
body - a list of lines with member function bodies
meta - a ThunkMetadata instance for hinting which headers are needed.
"""
__pychecker__ = 'unusednames=options'
out_members = []
meta = ThunkBodyMetadata()
for node in filenode.GetListOf('Interface'):
# Skip if this node is not in this release
if not node.InReleases(releases):
print "Skipping %s" % node
continue
# Generate Member functions
if node.IsA('Interface'):
members = node.GetListOf('Member')
for child in members:
build_list = child.GetUniqueReleases(releases)
# We have to filter out releases this node isn't in.
build_list = filter(lambda r: child.InReleases([r]), build_list)
if len(build_list) == 0:
continue
assert(len(build_list) == 1)
release = build_list[-1]
include_version = not _IsNewestMember(child, members, releases)
member = DefineMember(filenode, node, child, release, include_version,
meta)
if not member:
continue
out_members.append(member)
return (out_members, meta)
def WriteTail(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
cgen = CGen()
version_list = []
out.Write('\n\n')
for node in filenode.GetListOf('Interface'):
build_list = node.GetUniqueReleases(releases)
for build in build_list:
version = node.GetVersion(build).replace('.', '_')
thunk_name = 'g_' + node.GetName().lower() + '_thunk_' + \
version
thunk_type = '_'.join((node.GetName(), version))
version_list.append((thunk_type, thunk_name))
declare_line = 'const %s %s = {' % (thunk_type, thunk_name)
if len(declare_line) > 80:
declare_line = 'const %s\n %s = {' % (thunk_type, thunk_name)
out.Write('%s\n' % declare_line)
generated_functions = []
members = node.GetListOf('Member')
for child in members:
rtype, name, arrays, args = cgen.GetComponents(
child, build, 'return')
if not _IsNewestMember(child, members, releases):
version = node.GetVersion(build).replace('.', '_')
name += '_' + version
if child.InReleases([build]):
generated_functions.append(name)
out.Write(',\n'.join([' &%s' % f for f in generated_functions]))
out.Write('\n};\n\n')
out.Write('} // namespace\n')
out.Write('\n')
for thunk_type, thunk_name in version_list:
thunk_decl = 'const %s* Get%s_Thunk() {\n' % (thunk_type, thunk_type)
if len(thunk_decl) > 80:
thunk_decl = 'const %s*\n Get%s_Thunk() {\n' % (thunk_type,
thunk_type)
out.Write(thunk_decl)
out.Write(' return &%s;\n' % thunk_name)
out.Write('}\n')
out.Write('\n')
out.Write('} // namespace thunk\n')
out.Write('} // namespace ppapi\n')
tgen = TGen()
def Main(args):
# Default invocation will verify the golden files are unchanged.
failed = 0
if not args:
args = ['--wnone', '--diff', '--test', '--thunkroot=.']
ParseOptions(args)
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_thunk', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if tgen.GenerateRange(ast, ['M13', 'M14'], {}):
print "Golden file for M13-M14 failed."
failed = 1
else:
print "Golden file for M13-M14 passed."
return failed
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| timopulkkinen/BubbleFish | ppapi/generators/idl_thunk.py | Python | bsd-3-clause | 16,821 | 0.009036 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import weetest
def TestCycle():
want = []
got = []
for x in itertools.cycle(()):
got.append(x)
assert got == want, 'empty cycle yields no elements'
arg = (0, 1, 2)
want = (0, 1, 2) * 10
got = []
limit = 10 * len(arg)
counter = 0
for x in itertools.cycle((0, 1, 2)):
got.append(x)
counter += 1
if counter == limit:
break
assert tuple(got) == want, 'tuple(cycle%s) == %s, want %s' % (arg, tuple(got), want)
def TestDropwhile():
r = range(10)
cases = [
((lambda x: x < 5, r), (5, 6, 7, 8, 9)),
((lambda x: True, r), ()),
((lambda x: False, r), tuple(r)),
]
for args, want in cases:
got = tuple(itertools.dropwhile(*args))
assert got == want, 'tuple(dropwhile%s) == %s, want %s' % (args, got, want)
def TestChain():
r = range(10)
cases = [
([r], tuple(r)),
([r, r], tuple(r) + tuple(r)),
([], ())
]
for args, want in cases:
got = tuple(itertools.chain(*args))
assert got == want, 'tuple(chain%s) == %s, want %s' % (args, got, want)
def TestFromIterable():
r = range(10)
cases = [
([r], tuple(r)),
([r, r], tuple(r) + tuple(r)),
([], ())
]
for args, want in cases:
got = tuple(itertools.chain.from_iterable(args))
assert got == want, 'tuple(from_iterable%s) == %s, want %s' % (args, got, want)
def TestIFilter():
r = range(10)
cases = [
((lambda x: x < 5, r), (0, 1, 2, 3, 4)),
((lambda x: False, r), ()),
((lambda x: True, r), tuple(r)),
((None, r), (1, 2, 3, 4, 5, 6, 7, 8, 9))
]
for args, want in cases:
got = tuple(itertools.ifilter(*args))
assert got == want, 'tuple(ifilter%s) == %s, want %s' % (args, got, want)
def TestIFilterFalse():
r = range(10)
cases = [
((lambda x: x < 5, r), (5, 6, 7, 8, 9)),
((lambda x: False, r), tuple(r)),
((lambda x: True, r), ()),
((None, r), (0,))
]
for args, want in cases:
got = tuple(itertools.ifilterfalse(*args))
assert got == want, 'tuple(ifilterfalse%s) == %s, want %s' % (args, got, want)
def TestISlice():
r = range(10)
cases = [
((r, 5), (0, 1, 2, 3, 4)),
((r, 25, 30), ()),
((r, 1, None, 3), (1, 4, 7)),
]
for args, want in cases:
got = tuple(itertools.islice(*args))
assert got == want, 'tuple(islice%s) == %s, want %s' % (args, got, want)
def TestIZipLongest():
cases = [
(('abc', range(6)), (('a', 0), ('b', 1), ('c', 2), (None, 3), (None, 4), (None, 5))),
((range(6), 'abc'), ((0, 'a'), (1, 'b'), (2, 'c'), (3, None), (4, None), (5, None))),
(([1, None, 3], 'ab', range(1)), ((1, 'a', 0), (None, 'b', None), (3, None, None))),
]
for args, want in cases:
got = tuple(itertools.izip_longest(*args))
assert got == want, 'tuple(izip_longest%s) == %s, want %s' % (args, got, want)
def TestProduct():
cases = [
(([1, 2], ['a', 'b']), ((1, 'a'), (1, 'b'), (2, 'a'), (2, 'b'))),
(([1], ['a', 'b']), ((1, 'a'), (1, 'b'))),
(([],), ()),
]
for args, want in cases:
got = tuple(itertools.product(*args))
assert got == want, 'tuple(product%s) == %s, want %s' % (args, got, want)
def TestPermutations():
cases = [
(('AB',), (('A', 'B'), ('B', 'A'))),
(('ABC', 2), (('A', 'B'), ('A', 'C'), ('B', 'A'), ('B', 'C'), ('C', 'A'), ('C', 'B'))),
((range(3),), ((0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0))),
(([],), ((),)),
(([], 0), ((),)),
((range(3), 4), ()),
]
for args, want in cases:
got = tuple(itertools.permutations(*args))
assert got == want, 'tuple(permutations%s) == %s, want %s' % (args, got, want)
def TestCombinations():
cases = [
((range(4), 3), ((0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3))),
]
for args, want in cases:
got = tuple(itertools.combinations(*args))
assert got == want, 'tuple(combinations%s) == %s, want %s' % (args, got, want)
def TestCombinationsWithReplacement():
cases = [
(([-12], 2), (((-12, -12),))),
(('AB', 3), (('A', 'A', 'A'), ('A', 'A', 'B'), ('A', 'B', 'B'), ('B', 'B', 'B'))),
(([], 2), ()),
(([], 0), ((),))
]
for args, want in cases:
got = tuple(itertools.combinations_with_replacement(*args))
assert got == want, 'tuple(combinations_with_replacement%s) == %s, want %s' % (args, got, want)
def TestGroupBy():
cases = [
(([1, 2, 2, 3, 3, 3, 4, 4, 4, 4],), [(1, [1]), (2, [2, 2]), (3, [3, 3, 3]), (4, [4, 4, 4, 4])]),
((['aa', 'ab', 'abc', 'bcd', 'abcde'], len), [(2, ['aa', 'ab']), (3, ['abc', 'bcd']), (5, ['abcde'])]),
]
for args, want in cases:
got = [(k, list(v)) for k, v in itertools.groupby(*args)]
assert got == want, 'groupby %s == %s, want %s' % (args, got, want)
def TestTakewhile():
r = range(10)
cases = [
((lambda x: x % 2 == 0, r), (0,)),
((lambda x: True, r), tuple(r)),
((lambda x: False, r), ())
]
for args, want in cases:
got = tuple(itertools.takewhile(*args))
assert got == want, 'tuple(takewhile%s) == %s, want %s' % (args, got, want)
if __name__ == '__main__':
weetest.RunTests()
| google/grumpy | lib/itertools_test.py | Python | apache-2.0 | 5,660 | 0.011131 |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
from downloadCommon import DownloadCommon, getSeqName
from DdlCommonInterface import DdlCommonInterface
import re
class FbDownloader(DownloadCommon):
def __init__(self):
self.strDbms = 'firebird'
def connect(self, info):
try:
import kinterbasdb
except:
print "Missing Firebird support through kinterbasdb"
return
self.strDbms = 'firebird'
self.version = info['version']
self.conn = kinterbasdb.connect(
dsn='localhost:%s' % info['dbname'],
user = info['user'],
password = info['pass'])
self.cursor = self.conn.cursor()
def useConnection(self, con, version):
self.conn = con
self.version = version
self.cursor = self.conn.cursor()
def getTables(self, tableList):
""" Returns the list of tables as a array of strings """
strQuery = "SELECT RDB$RELATION_NAME FROM RDB$RELATIONS WHERE RDB$SYSTEM_FLAG=0 AND RDB$VIEW_SOURCE IS NULL;"
self.cursor.execute(strQuery)
return self._confirmReturns([x[0].strip() for x in self.cursor.fetchall() ], tableList)
def getTableColumns(self, strTable):
""" Returns column in this format
(nColIndex, strColumnName, strColType, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, bNotNull, strDefault, auto_increment)
"""
strSql = """
SELECT RF.RDB$FIELD_POSITION, RF.RDB$FIELD_NAME, RDB$FIELD_TYPE, F.RDB$FIELD_LENGTH,
RDB$FIELD_PRECISION, RDB$FIELD_SCALE, RF.RDB$NULL_FLAG, RF.RDB$DEFAULT_SOURCE, F.RDB$FIELD_SUB_TYPE
FROM RDB$RELATION_FIELDS RF, RDB$FIELDS F
WHERE RF.RDB$RELATION_NAME = ?
AND RF.RDB$FIELD_SOURCE = F.RDB$FIELD_NAME
ORDER BY RF.RDB$FIELD_POSITION;"""
self.cursor.execute(strSql, [strTable])
rows = self.cursor.fetchall()
ret = []
# TODO auto_increment
bAutoIncrement = False
for row in rows:
attnum, name, nType, size, numsize, scale, attnull, default, sub_type = row
if scale and scale < 0:
scale = -scale
if not size and numprecradix == 10:
size = numsize
strType = self.convertTypeId(nType)
if sub_type == 1:
strType = 'numeric'
elif sub_type == 2:
strType = 'decimal'
if numsize > 0:
size = numsize
numsize = None
if strType == 'integer' and size == 4:
size = None
elif strType == 'date' and size == 4:
size = None
elif strType == 'float' and size == 4:
size = None
if default:
# Remove the 'DEFAULT ' part of the SQL
default = default.replace('DEFAULT ', '')
if self.hasAutoincrement(strTable, name):
bAutoIncrement = True
else:
bAutoIncrement = False
ret.append((name.strip(), strType, size, scale, attnull, default, bAutoIncrement))
return ret
def convertTypeId(self, nType):
types = {
261: 'blob',
14 : 'char',
40 : 'cstring',
11 : 'd_float',
27 : 'double',
10 : 'float',
16 : 'int64',
8 : 'integer',
9 : 'quad',
7 : 'smallint',
12 : 'date',
13 : 'time',
35 : 'timestamp',
37 : 'varchar',
}
strType = ''
if nType in types:
strType = types[nType]
if nType not in [14, 40, 37]:
size = None
else:
print "Uknown type %d" % (nType)
return strType
def hasAutoincrement(self, strTableName, strColName):
strSql = "SELECT RDB$GENERATOR_NAME FROM RDB$GENERATORS WHERE UPPER(RDB$GENERATOR_NAME)=UPPER(?);"
self.cursor.execute(strSql, [getSeqName(strTableName, strColName)[0:31]])
rows = self.cursor.fetchall()
if rows:
return True
return False
def getTableComment(self, strTableName):
""" Returns the comment as a string """
strSql = "SELECT RDB$DESCRIPTION FROM RDB$RELATIONS WHERE RDB$RELATION_NAME=?;"
self.cursor.execute(strSql, [strTableName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getColumnComment(self, strTableName, strColumnName):
""" Returns the comment as a string """
strSql = """SELECT RDB$DESCRIPTION
FROM RDB$RELATION_FIELDS
WHERE RDB$RELATION_NAME = ? AND RDB$FIELD_NAME = ?"""
self.cursor.execute(strSql, [strTableName, strColumnName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getTableIndexes(self, strTableName):
""" Returns
(strIndexName, [strColumns,], bIsUnique, bIsPrimary, bIsClustered)
or []
Warning the Primary key constraint cheats by knowing the name probably starts with pk_
"""
strSql = """SELECT RDB$INDEX_NAME, RDB$UNIQUE_FLAG
FROM RDB$INDICES
WHERE RDB$RELATION_NAME = '%s'
""" % (strTableName)
self.cursor.execute(strSql)
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
for row in rows:
(strIndexName, bIsUnique) = row
colList = self._fetchTableColumnsForIndex(strIndexName)
if strIndexName.lower().startswith('pk_'):
bIsPrimary = True
else:
bIsPrimary = False
strIndexName = strIndexName.strip()
ret.append((strIndexName, colList, bIsUnique, bIsPrimary, None))
return ret
def _fetchTableColumnsForIndex(self, strIndexName):
strSql = """SELECT RDB$FIELD_NAME
FROM RDB$INDEX_SEGMENTS
WHERE RDB$INDEX_NAME = ?
ORDER BY RDB$FIELD_POSITION
"""
self.cursor.execute(strSql, [strIndexName.strip()])
rows = self.cursor.fetchall()
return [row[0].strip() for row in rows]
def getTableRelations(self, strTableName):
""" Returns
(strConstraintName, colName, fk_table, fk_columns)
or []
"""
strSql = """SELECT RDB$CONSTRAINT_NAME
FROM RDB$RELATION_CONSTRAINTS
WHERE RDB$RELATION_NAME = '%s'
""" % (strTableName)
self.cursor.execute(strSql)
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
return ret
def _fetchTableColumnsNamesByNums(self, strTableName, nums):
strSql = """
SELECT pa.attname
FROM pg_attribute pa, pg_class pc
WHERE pa.attrelid = pc.oid
AND pa.attisdropped = 'f'
AND pc.relname = %s
AND pc.relkind = 'r'
AND pa.attnum in (%s)
ORDER BY pa.attnum
""" % ( '%s', ','.join(['%s' for num in nums]) )
self.cursor.execute(strSql, [strTableName] + nums)
rows = self.cursor.fetchall()
return [row[0] for row in rows]
def _decodeLength(self, type, atttypmod):
# gleamed from http://www.postgresql-websource.com/psql713/source-format_type.htm
VARHDRSZ = 4
if type == 'varchar':
return (atttypmod - VARHDRSZ, None)
if type == 'numeric':
atttypmod -= VARHDRSZ
return ( (atttypmod >> 16) & 0xffff, atttypmod & 0xffff)
if type == 'varbit' or type == 'bit':
return (atttypmod, None)
return (None, None)
def getViews(self, viewList):
strQuery = "SELECT RDB$VIEW_NAME FROM RDB$VIEW_RELATIONS"
#TODO add viewList constraint
self.cursor.execute(strQuery)
return self._confirmReturns([x[0].strip() for x in self.cursor.fetchall() ], viewList)
def getViewDefinition(self, strViewName):
strQuery = "SELECT RDB$RELATION_NAME, RDB$VIEW_SOURCE FROM RDB$RELATIONS WHERE RDB$RELATION_NAME = UPPER(?)"
self.cursor.execute(strQuery, [strViewName])
rows = self.cursor.fetchall()
if rows:
ret = rows[0][1].strip()
return ret
return ''
def getFunctions(self, functionList):
#strQuery = "SELECT RDB$FUNCTION_NAME FROM RDB$FUNCTIONS WHERE RDB$SYSTEM_FLAG = 0"
#TODO add functionList constraint
strQuery = "SELECT RDB$PROCEDURE_NAME FROM RDB$PROCEDURES WHERE RDB$SYSTEM_FLAG = 0"
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
return self._confirmReturns([x[0].strip() for x in rows], functionList)
def getFunctionDefinition(self, strSpecifiName):
""" Returns (routineName, parameters, return, language, definition) """
strQuery = "SELECT RDB$PROCEDURE_NAME, RDB$PROCEDURE_SOURCE FROM RDB$PROCEDURES WHERE RDB$SYSTEM_FLAG = 0 AND RDB$PROCEDURE_NAME = upper(?)"
self.cursor.execute(strQuery, [strSpecifiName])
rows = self.cursor.fetchall()
strProcName, strDefinition = rows[0]
strDefinition = strDefinition.strip()
strProcName = strProcName.strip()
strQuery = """SELECT PP.RDB$PARAMETER_NAME, PP.RDB$FIELD_SOURCE, PP.RDB$PARAMETER_TYPE, F.RDB$FIELD_TYPE, F.RDB$FIELD_LENGTH, F.RDB$FIELD_PRECISION, RDB$FIELD_SCALE
FROM RDB$PROCEDURE_PARAMETERS PP, RDB$FIELDS F
WHERE PP.RDB$PROCEDURE_NAME = upper(?)
AND PP.RDB$FIELD_SOURCE = F.RDB$FIELD_NAME
ORDER BY PP.RDB$PARAMETER_NUMBER"""
self.cursor.execute(strQuery, [strSpecifiName])
rows = self.cursor.fetchall()
args = []
rets = []
for row in rows:
strParamName, strSrc, nParamType, nType, nLen, nPrecision, nScale = row
strParamName = strParamName.strip().lower()
strSrc = strSrc.strip()
strType = self.convertTypeId(nType)
if nParamType == 0:
args.append(strParamName + ' ' + strType)
else:
if strParamName.lower() == 'ret':
rets.append(strType)
else:
rets.append(strParamName + ' ' + strType)
return (strProcName.lower(), args, ','.join(rets), '', strDefinition)
class DdlFirebird(DdlCommonInterface):
def __init__(self):
DdlCommonInterface.__init__(self, 'firebird')
self.params['max_id_len'] = { 'default' : 256 }
self.params['table_desc'] = ["UPDATE RDB$RELATIONS SET RDB$DESCRIPTION = %(desc)s\n\tWHERE RDB$RELATION_NAME = upper('%(table)s')"]
self.params['column_desc'] = ["UPDATE RDB$RELATION_FIELDS SET RDB$DESCRIPTION = %(desc)s\n\tWHERE RDB$RELATION_NAME = upper('%(table)s') AND RDB$FIELD_NAME = upper('%(column)s')"]
self.params['drop_constraints_on_col_rename'] = True
self.params['drop_table_has_cascade'] = False
self.params['alter_default'] = ['ALTER TABLE %(table_name)s ALTER %(column_name)s TYPE %(column_type)s']
self.params['rename_column'] = ['ALTER TABLE %(table_name)s ALTER %(old_col_name)s TO %(new_col_name)s']
self.params['alter_default'] = ['ALTER TABLE %(table_name)s ALTER COLUMN %(column_name)s SET DEFAULT %(new_default)s']
self.params['keywords'] = """
ACTION ACTIVE ADD ADMIN AFTER ALL ALTER AND ANY AS ASC ASCENDING AT AUTO AUTODDL AVG BASED BASENAME BASE_NAME
BEFORE BEGIN BETWEEN BLOB BLOBEDIT BUFFER BY CACHE CASCADE CAST CHAR CHARACTER CHARACTER_LENGTH CHAR_LENGTH
CHECK CHECK_POINT_LEN CHECK_POINT_LENGTH COLLATE COLLATION COLUMN COMMIT COMMITTED COMPILETIME COMPUTED CLOSE
CONDITIONAL CONNECT CONSTRAINT CONTAINING CONTINUE COUNT CREATE CSTRING CURRENT CURRENT_DATE CURRENT_TIME
CURRENT_TIMESTAMP CURSOR DATABASE DATE DAY DB_KEY DEBUG DEC DECIMAL DECLARE DEFAULT
DELETE DESC DESCENDING DESCRIBE DESCRIPTOR DISCONNECT DISPLAY DISTINCT DO DOMAIN DOUBLE DROP ECHO EDIT ELSE
END ENTRY_POINT ESCAPE EVENT EXCEPTION EXECUTE EXISTS EXIT EXTERN EXTERNAL EXTRACT FETCH FILE FILTER FLOAT
FOR FOREIGN FOUND FREE_IT FROM FULL FUNCTION GDSCODE GENERATOR GEN_ID GLOBAL GOTO GRANT GROUP GROUP_COMMIT_WAIT
GROUP_COMMIT_ WAIT_TIME HAVING HELP HOUR IF IMMEDIATE IN INACTIVE INDEX INDICATOR INIT INNER INPUT INPUT_TYPE
INSERT INT INTEGER INTO IS ISOLATION ISQL JOIN KEY LC_MESSAGES LC_TYPE LEFT LENGTH LEV LEVEL LIKE LOGFILE
LOG_BUFFER_SIZE LOG_BUF_SIZE LONG MANUAL MAX MAXIMUM MAXIMUM_SEGMENT MAX_SEGMENT MERGE MESSAGE MIN MINIMUM
MINUTE MODULE_NAME MONTH NAMES NATIONAL NATURAL NCHAR NO NOAUTO NOT NULL NUMERIC NUM_LOG_BUFS NUM_LOG_BUFFERS
OCTET_LENGTH OF ON ONLY OPEN OPTION OR ORDER OUTER OUTPUT OUTPUT_TYPE OVERFLOW PAGE PAGELENGTH PAGES PAGE_SIZE
PARAMETER PASSWORD PLAN POSITION POST_EVENT PRECISION PREPARE PROCEDURE PROTECTED PRIMARY PRIVILEGES PUBLIC QUIT
RAW_PARTITIONS RDB$DB_KEY READ REAL RECORD_VERSION REFERENCES RELEASE RESERV RESERVING RESTRICT RETAIN RETURN
RETURNING_VALUES RETURNS REVOKE RIGHT ROLE ROLLBACK RUNTIME SCHEMA SECOND SEGMENT SELECT SET SHADOW SHARED SHELL
SHOW SINGULAR SIZE SMALLINT SNAPSHOT SOME SORT SQLCODE SQLERROR SQLWARNING STABILITY STARTING STARTS STATEMENT
STATIC STATISTICS SUB_TYPE SUM SUSPEND TABLE TERMINATOR THEN TIME TIMESTAMP TO TRANSACTION TRANSLATE TRANSLATION
TRIGGER TRIM TYPE UNCOMMITTED UNION UNIQUE UPDATE UPPER USER USING VALUE VALUES VARCHAR VARIABLE VARYING VERSION
VIEW WAIT WEEKDAY WHEN WHENEVER WHERE WHILE WITH WORK WRITE YEAR YEARDAY""".split()
# Note you need to remove the constraints like:
# alter table table1 drop constraint pk_table1;
# before dropping the table (what a pain)
def addFunction(self, strNewFunctionName, argumentList, strReturn, strContents, attribs, diffs):
argumentList = [ '%s' % arg for arg in argumentList ]
info = {
'functionname' : self.quoteName(strNewFunctionName),
'arguments' : ', '.join(argumentList),
'returns' : strReturn,
'contents' : strContents.replace("'", "''"),
'language' : '',
}
if 'language' in attribs:
info['language'] = ' LANGUAGE %s' % (attribs['language'])
diffs.append(('Add function', # OR REPLACE
"CREATE PROCEDURE %(functionname)s(%(arguments)s) RETURNS (ret %(returns)s) AS \n%(contents)s;" % info )
)
def dropFunction(self, strOldFunctionName, argumentList, diffs):
info = {
'functionname' : self.quoteName(strOldFunctionName),
}
diffs.append(('Drop function',
'DROP PROCEDURE %(functionname)s' % info )
)
| BackupTheBerlios/xml2ddl-svn | xml2ddl/FirebirdInterface.py | Python | gpl-2.0 | 15,609 | 0.010635 |
from django.contrib import admin
from .models import (
Image, NutritionalDataDish, NutritionalDataGuess, Dish, Guess)
class ImageInline(admin.StackedInline):
model = Image
class NutritionalDataDishInline(admin.StackedInline):
model = NutritionalDataDish
class DishAdmin(admin.ModelAdmin):
list_display = ('description', 'is_vegetarian', 'created_at')
inlines = [
ImageInline,
NutritionalDataDishInline
]
class NutritionalDataGuessInline(admin.StackedInline):
model = NutritionalDataGuess
class GuessAdmin(admin.ModelAdmin):
inlines = [NutritionalDataGuessInline]
admin.site.register(Image)
admin.site.register(Dish, DishAdmin)
admin.site.register(Guess, GuessAdmin)
| grigoryk/calory-game-server | dishes/admin.py | Python | gpl-2.0 | 727 | 0 |
import numpy as np
import numpy.ma as ma
from numpy import linalg as LA
import matplotlib.pyplot as plt
import itertools
import collections
from scipy import stats
def acf(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
# from stackexchange
x = x - x.mean() # remove mean
if type(lags) is int:
lags = range(lags)
C = ma.zeros((len(lags),))
sigma2 = x.var()
for i, l in enumerate(lags):
if l == 0:
C[i] = 1
elif l >= x.shape[0]:
C[i] = ma.masked
else:
x0 = x[:-l].copy()
x1 = x[l:].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject] = ma.masked
x1[reject] = ma.masked
C[i] = (x0*x1).mean()/sigma2
return C
def ccf(x, y, lags, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
x = x - x.mean() # remove mean
y = y - y.mean()
if type(lags) is int:
lags = np.arange(-lags,lags)
C = ma.zeros((len(lags),))
sigma2 = x.std()*y.std()
for i, l in enumerate(lags):
if l == 0:
C[i] = (x*y).mean()/sigma2
else:
if l > 0:
x0 = x[:-l].copy()
y1 = y[l:].copy()
else:
x0 = y[:l].copy()
y1 = x[-l:].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject] = ma.masked
y1[reject] = ma.masked
C[i] = (x0*y1).mean()/sigma2
return C
def acv(k, List):
'''
Autocovariance
k is the lag order
'''
y = List.copy()
y = y - y.mean()
if k == 0:
return (y*y).mean()
else:
return (y[:-k]*y[k:]).mean()
def dotacf(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
if type(lags) is int:
lags = xrange(lags)
C = ma.zeros((len(lags),))
for i, l in enumerate(lags):
if l == 0:
C[i] = (x*x).sum(axis=1).mean()
else:
x0 = x[:-l, :].copy()
x1 = x[l:, :].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject, :] = ma.masked
x1[reject, :] = ma.masked
C[i] = (x0*x1).sum(axis=1).mean()
return C
def pacfe(p,j,List):
'''
Partial autocorrelation function estimates
p is the order of the AR(p) process
j is the coefficient in an AR(p) process
'''
if p==2 and j==1:
return (acf(j,List)*(1-acf(p,List)))/(1-(acf(j,List))**2)
elif p==2 and j==2:
return (acf(2,List)-(acf(1,List))**2)/(1-(acf(1,List))**2)
elif p==j and p!=2 and j!=2:
c=0
for a in range(1,p):
c+=pacfe(p-1,a,List)*acf(p-a,List)
d=0
for b in range(1,p):
d+=pacfe(p-1,b,List)*acf(b,List)
return (acf(p,List)-c)/(1-d)
else:
return pacfe(p-1,j,List)-pacfe(p,p,List)*pacfe(p-1,p-j,List)
def drift(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
if type(lags) is int:
lags = xrange(lags)
mu = ma.zeros((len(lags),))
for i, lag in enumerate(lags):
if lag==0:
mu[i] = 0
elif lag >= x.shape[0]:
mu[i] = ma.masked
else:
x0 = x[lag:].copy()
x1 = x[:-lag].copy()
reject = (exclude[lag:]-exclude[:-lag])>0
x0[reject] = ma.masked
x1[reject] = ma.masked
displacements = x0 - x1
mu[i] = displacements.mean()
return mu
def unwrapma(x):
# Adapted from numpy unwrap, this version ignores missing data
idx = ma.array(np.arange(0,x.shape[0]), mask=x.mask)
idxc = idx.compressed()
xc = x.compressed()
dd = np.diff(xc)
ddmod = np.mod(dd+np.pi, 2*np.pi)-np.pi
ddmod[(ddmod==-np.pi) & (dd > 0)] = np.pi
phc_correct = ddmod - dd
phc_correct[np.abs(dd)<np.pi] = 0
ph_correct = np.zeros(x.shape)
ph_correct[idxc[1:]] = phc_correct
up = x + ph_correct.cumsum()
return up
def nextpow2(n):
'''
Returns the next highest power of 2 from n
'''
m_f = np.log2(n)
m_i = np.ceil(m_f)
return 2**m_i
def phaserand(X, independent=False, reduceHighFreqNoise=True):
'''
Generates a randomized surrogate dataset for X, preserving linear temporal
correlations. If independent is False (default), linear correlations
between columns of x are also preserved.
If X contains missing values, they are filled with the mean of that
channel.
The algorithm works by randomizing the phases in the Fourier domain. For
non-independent shuffling, the same random phases are used for each
channel.
References:
Theiler, J., Eubank, S., Longtin, A., Galdrikian, B., & Doyne Farmer, J.
(1992). Testing for nonlinearity in time series: the method of
surrogate data. Physica D: Nonlinear Phenomena, 58(1), 77-94.
Prichard, D. and Theiler, J. (1994) Generating surrogate data for time
series with several simultaneously measured variables. Phys. Rev.
Lett. 73(7), 951-954.
Podobnik, B., Fu, D. F., Stanley, H. E., & Ivanov, P. C. (2007).
Power-law autocorrelated stochastic processes with long-range
cross-correlations. The European Physical Journal B, 56(1), 47-52.
'''
# Deal with array vs matrix by adding new axis
if len(X.shape) == 1:
X = X[:, np.newaxis]
# Deal with missing data
if isinstance(X, ma.MaskedArray):
# truncate all missing data at beginning and end
idxNotAllMissing = (~np.all(X.mask, axis=1)).nonzero()[0]
X = X[idxNotAllMissing[0]:idxNotAllMissing[-1], :]
X = X.filled(X.mean(axis=0)) # fill interior mask with the mean
# Reduce high-frequency noise by min difference between first and last
if reduceHighFreqNoise:
delta = X - X[0, :]
threshold = 1e-3*np.std(X, axis=0)
# find last pt in which all the channels are about the same as the beginning
# and also the index is even
goodEndPt = np.nonzero((np.all(np.abs(delta) < threshold, axis=1)) &
(np.arange(0, X.shape[1]) % 2 == 0))[0][-1]
if goodEndPt > X.shape[0]/2: # make sure we keep at least half the data
X = X[:goodEndPt, :]
# Fourier transform and extract amplitude and phases
# The frequencies are shifted so 0 is centered (fftshift)
N = X.shape[0] #int(nextpow2(X.shape[0])) # size for FFT
if N % 2 != 0:
N = N-1
h = np.floor(N/2) # half the length of the data
Z = np.fft.fft(X, N, axis=0)
M = np.fft.fftshift(np.abs(Z), axes=0) # the amplitudes
phase = np.fft.fftshift(np.angle(Z), axes=0) # the original phases
# Randomize the phases. The phases need to be symmetric for postivie and
# negative frequencies.
if independent: # generate random phases for each channel
randphase = 2.*np.pi*np.random.rand((h-1, X.shape[1])) # random phases
newphase = np.zeros((N, X.shape[1])) # new phases to use
newphase[0, :] = phase[0, :] # keep the zero freq (don't know why)
newphase[1:h, :] = randphase[::-1, :]
newphase[h, :] = phase[h, :]
newphase[h+1:, :] = -randphase
else: # generate one set of random phases (same as above)
randphase = 2.*np.pi*np.random.rand(h-1)
newphase = np.zeros((N, X.shape[1]))
newphase[0, :] = phase[0, :]
newphase[1:h, :] = randphase[::-1, np.newaxis]
newphase[h, :] = phase[h, :]
newphase[h+1:, :] = -randphase[:, np.newaxis]
# Reconstruct the signal from the original amplitude and the new phases
z2 = M*np.exp(newphase*1.j)
# Return the time-domain signal
return np.fft.ifft(np.fft.ifftshift(z2, axes=0),
axis=0).real.squeeze()
| stephenhelms/WormTracker | python/tsstats.py | Python | apache-2.0 | 8,100 | 0.006667 |
import os
import matplotlib.pyplot as plt
def plot_hist(history, model_name=None):
plt.plot(history['loss'], linewidth=3, label='train')
plt.plot(history['val_loss'], linewidth=3, label='valid')
plt.grid()
plt.legend()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.ylim(1e-4, 1e-2)
plt.yscale('log')
if model_name:
path = os.path.join('images', model_name + '-loss.png')
plt.savefig(path)
else:
plt.show()
def plot_model_arch(model, model_name):
from keras.utils.visualize_util import plot
path = os.path.join('images', model_name + '.png')
plot(model, to_file=path, show_shapes=True)
def plot_samples(X, y):
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(16):
axis = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
_plot_sample(X[i], y[i], axis)
plt.show()
def _plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
| nipe0324/kaggle-keypoints-detection-keras | plotter.py | Python | apache-2.0 | 1,057 | 0.02176 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Defines several helpers to add rules to Iptables
"""
from configparser import SectionProxy
from contextlib import suppress
from ipaddress import ip_address, ip_network
import re
import socket
from pyptables.iptables import Iptables, Ip6tables, IptablesRule
__author__ = 'Benjamin Schubert, ben.c.schubert@gmail.com'
ipv4_handler = Iptables()
ipv6_handler = Ip6tables()
def get_ip_address(name: str):
"""
Tries to convert the input to an ip address
:param name: the input to convert
:return: the correct ip address or None if unable to convert
"""
with suppress(ValueError):
return ip_address(name)
with suppress(ValueError):
return ip_network(name)
with suppress(socket.gaierror, ValueError):
return ip_address(socket.gethostbyname(name))
return None
def setup_global_begin(config: SectionProxy) -> None:
"""
Sets up the tables globally for ipv4 and ipv6
:param config: the configuration used
"""
# noinspection PyUnresolvedReferences
def setup(handler: Iptables, _config: SectionProxy) -> None:
"""
Sets up the tables to accept new rules : resets all rules, set defaults and allow global traffic
:param handler: the Iptables instance on which to operate
:param _config: the configuration used
"""
handler.reset()
for chain in _config.getlist("closed_chains", []):
handler.set_default(chain, "DROP")
if _config.getboolean("allow_established_traffic", False):
handler.allow_existing_traffic()
for interface in _config.getlist("allow_traffic_on_interface", []):
handler.allow_traffic_on_interface(interface)
if _config.getboolean("drop_invalid_traffic", False):
handler.drop_invalid_traffic()
if config.getboolean("ipv4", False):
setup(ipv4_handler, config)
if config.getboolean("ipv6", False):
setup(ipv6_handler, config)
def setup_global_end(config: SectionProxy) -> None:
"""
Sets up the last things : logging, drops and ssh knocking
:param config: the config to use
"""
def setup(handler: Iptables, _config: SectionProxy, version) -> None:
"""
Ties up the settings : logging, drops and ssh knocking
:param handler: the Iptables instance on which to operate
:param _config: the configuration used
:param version: the version of ip protocol used (4 or 6)
"""
if _config.parser.has_section("logging"):
for entry in _config.parser.items("logging"):
if not entry[0].startswith("ignore_"):
continue
chain = entry[0].replace("ignore_", "").upper()
values = [item for item in re.split(r";\s*", entry[1]) if item != ""]
for value in values:
data = [item if item != "" else None for item in re.split(r",\s*", value.strip())]
address1, address2 = data[4:6]
if address1 is not None:
address1 = get_ip_address(address1)
if address2 is not None:
address2 = get_ip_address(address2)
if (address1 is not None and address1.version != version) or (
address2 is not None and address2.version != version):
continue
handler.no_log(chain, *data)
if _config.getboolean("ssh_knocking"):
handler.enable_ssh_knocking(_config.parser["ssh_knocking"])
if _config.parser.has_section("logging"):
section = _config.parser["logging"]
for chain in section.getlist("log"):
handler.log(chain, section.get("prefix"), section.get("rate", None), section.getint("level", 4))
if config.getboolean("ipv4", False):
setup(ipv4_handler, config, version=4)
if config.getboolean("ipv6", False):
setup(ipv6_handler, config, version=6)
# noinspection PyUnresolvedReferences
def handle_service(config: SectionProxy) -> None:
"""
Sets a rule or a service
:param config: the configuration for the rule
"""
for src in config.getlist("source", [None]):
for dst in config.getlist("destination", [None]):
source = None
destination = None
if src is not None:
source = get_ip_address(src)
if source is None:
print("[ERROR] Could not determine ip address for {} : skipping".format(src))
continue
if dst is not None:
destination = get_ip_address(dst)
if destination is None:
print("[ERROR] Could not determine ip address for {} : skipping".format(dst))
continue
rule = IptablesRule(
name=config.name,
interface=config.get("interface"),
chain=config.get("chain"),
protocol=config.get("protocol"),
action=config.get("action"),
source=source,
destination=destination,
sport=config.get("sport"),
dport=config.get("dport"),
remote=config.get("remote", None)
)
if config.getboolean("ipv4", False) and (rule.source is None or rule.source.version == 4) and \
(rule.destination is None or rule.source.version == 4):
ipv4_handler.add_rule(rule)
if config.getboolean("ipv6") and (rule.source is None or rule.source.version == 6) and \
(rule.destination is None or rule.source.version == 6):
ipv6_handler.add_rule(rule)
if (rule.source is not None and rule.destination is not None) and \
rule.destination.version != rule.source.version:
print("[ERROR] Could not add rule with ip versions no matching: {} and {}".format(
str(rule.source, rule.destination)
))
| BenjaminSchubert/Pyptables | pyptables/executors.py | Python | mit | 6,181 | 0.00178 |
#!/usr/bin/env python
import unittest
from app.md5py import MD5
class TddInPythonExample(unittest.TestCase):
def test_object_program(self):
m = MD5()
m.update("1234")
hexdigest = m.hexdigest()
self.assertEqual("81dc9bdb52d04dc20036dbd8313ed055", hexdigest)
if __name__ == '__main__':
unittest.main()
| davidam/python-examples | security/md5/test/test_md5py.py | Python | gpl-3.0 | 352 | 0.008523 |
class ListResource(object):
def __init__(self, version):
"""
:param Version version:
"""
self._version = version
""" :type: Version """
| tysonholub/twilio-python | twilio/base/list_resource.py | Python | mit | 180 | 0 |
"""
maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
"""
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + type_size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value,) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset)
def _read_extended(self, offset):
(next_byte,) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)'
)
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(
b'!I', size_bytes.rjust(4, b'\x00'))[0] + 65821
return size, new_offset
| kikinteractive/MaxMind-DB-Reader-python | maxminddb/decoder.py | Python | apache-2.0 | 5,904 | 0 |
"""Test suite for abdt_branch."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [XB] can test is_abandoned, is_null, is_new
# [XC] can move between all states without error
# [XD] can set and retrieve repo name, branch link
# [ C] can move bad_pre_review -> 'new' states without duplicating branches
# [ D] unique names and emails are returned in the order of most recent first
# [ E] all commits are shown when no arguments are supplied
# [ E] number of commits can be limited by max_commits argument
# [ E] number of commits can be limited by max_size argument
# [ ] can detect if review branch has new commits (after ff, merge, rebase)
# [ ] can get raw diff from branch
# [ ] can get author names and emails from branch
# [ ] raise if get author names and emails from branch with no history
# [ ] raise if get author names and emails from branch with invalid base
# [ ] can 'get_any_author_emails', raise if no emails ever
# [ ] bad unicode chars in diffs
# [ ] bad unicode chars in commit messages
# [ ] can land an uncomplicated review
# [ ] XXX: withReservedBranch
# [ ] XXX: emptyMergeWorkflow
# [ ] XXX: mergeConflictWorkflow
# [ ] XXX: changeAlreadyMergedOnBase
# [ ] XXX: commandeeredLand
# [ ] XXX: createHugeReview
# [ ] XXX: hugeUpdateToReview
# [ ] XXX: empty repository, no history
# [ ] XXX: landing when origin has been updated underneath us
# [ ] XXX: moving tracker branches when there's something in the way
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_Breathing
# [ B] test_B_Empty
# [ C] test_C_BadPreReviewToNew
# [ D] test_D_AlternatingAuthors
# [ E] test_E_NewCommitsDescription
# [XB] test_XB_UntrackedBranch
# [XC] test_XC_MoveBetweenAllMarkedStates
# [XD] check_XD_SetRetrieveRepoNameBranchLink
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import phlgit_branch
import phlgit_push
import phlgit_revparse
import phlgitu_fixture
import phlgitx_refcache
import abdt_branch
import abdt_branchtester
import abdt_classicnaming
import abdt_differresultcache
import abdt_git
import abdt_naming
class Test(unittest.TestCase):
def __init__(self, data):
super(Test, self).__init__(data)
self.repos = None
self.repo_central = None
self.repo_dev = None
self.repo_arcyd = None
def setUp(self):
self.repos = phlgitu_fixture.CentralisedWithTwoWorkers()
self.repo_central = self.repos.central_repo
self.repo_dev = self.repos.w0.repo
sys_repo = self.repos.w1.repo
refcache_repo = phlgitx_refcache.Repo(sys_repo)
differ_cache = abdt_differresultcache.Cache(refcache_repo)
self.repo_arcyd = abdt_git.Repo(
refcache_repo, differ_cache, 'origin', 'myrepo')
def tearDown(self):
self.repos.close()
def test_A_Breathing(self):
pass
def test_B_Empty(self):
pass
def test_C_BadPreReviewToNew(self):
# can move bad_pre_review -> 'new' states without duplicating branches
base, branch_name, branch = self._setup_for_untracked_branch()
transition_list = [
branch.mark_ok_new_review, branch.mark_new_bad_in_review
]
for do_transition in transition_list:
branches = phlgit_branch.get_remote(self.repo_arcyd, 'origin')
branch.mark_bad_pre_review()
branches_bad_pre = phlgit_branch.get_remote(
self.repo_arcyd, 'origin')
do_transition(102)
branches_new = phlgit_branch.get_remote(self.repo_arcyd, 'origin')
# we expect to have gained one branch when starting to track as
# 'bad_pre_review'.
self.assertEqual(len(branches_bad_pre), len(branches) + 1)
# we expect to have the same number of branches after moving with
# 'mark_ok_new_review'
self.assertEqual(len(branches_bad_pre), len(branches_new))
# remove the tracking branch and make sure the count has gone down
branch.clear_mark()
branches_cleared = phlgit_branch.get_remote(
self.repo_arcyd, 'origin')
self.assertEqual(len(branches_cleared), len(branches))
def test_D_AlternatingAuthors(self):
base, branch_name, branch = self._setup_for_untracked_branch()
alice_user = 'Alice'
alice_email = 'alice@server.test'
bob_user = 'Bob'
bob_email = 'bob@server.test'
self._dev_commit_new_empty_file('ALICE1', alice_user, alice_email)
self._dev_commit_new_empty_file('BOB1', bob_user, bob_email)
self._dev_commit_new_empty_file('ALICE2', alice_user, alice_email)
phlgit_push.push(self.repo_dev, branch_name, 'origin')
self.repo_arcyd('fetch', 'origin')
author_names_emails = branch.get_author_names_emails()
self.assertTupleEqual(
author_names_emails[0],
(bob_user, bob_email))
self.assertTupleEqual(
author_names_emails[1],
(alice_user, alice_email))
# any_author_emails = branch.get_any_author_emails()
# self.assertEqual(any_author_emails[-1], alice_email)
# self.assertEqual(any_author_emails[-2], bob_email)
def test_E_NewCommitsDescription(self):
base, branch_name, branch = self._setup_for_untracked_branch()
user = 'Alice'
email = 'alice@server.test'
self._dev_commit_new_empty_file('Commit 1', user, email)
self._dev_commit_new_empty_file('Commit 2', user, email)
self._dev_commit_new_empty_file('Commit 3', user, email)
self._dev_commit_new_empty_file('Commit 4', user, email)
phlgit_push.push(self.repo_dev, branch_name, 'origin')
self.repo_arcyd('fetch', 'origin')
# [ E] all commits are shown when no arguments are supplied
new_commits_str = branch.describe_new_commits()
new_commits = new_commits_str.splitlines()
self.assertEqual(4, len(new_commits))
count = 4
for line in new_commits:
self.assertTrue(line.endswith('Commit {}'.format(count)))
count -= 1
# [ E] number of commits can be limited by max_commits argument
new_commits_str = branch.describe_new_commits(2)
new_commits = new_commits_str.splitlines()
self.assertEqual(3, len(new_commits))
self.assertTrue(new_commits[0].endswith('Commit 4'))
self.assertTrue(new_commits[1].endswith('Commit 3'))
self.assertEqual(new_commits[2], '...2 commits not shown.')
# [ E] number of commits can be limited by max_size argument
new_commits_str = branch.describe_new_commits(3, 20)
new_commits = new_commits_str.splitlines()
self.assertEqual(2, len(new_commits))
self.assertTrue(new_commits[0].endswith('Commit 4'))
self.assertEqual(new_commits[1], '...3 commits not shown.')
def _dev_commit_new_empty_file(self, filename, user, email):
self._create_new_file(self.repo_dev, filename)
self.repo_dev('add', filename)
self.repo_dev(
'commit',
'-m',
filename,
'--author=' + '{} <{}>'.format(user, email))
def test_XB_UntrackedBranch(self):
abdt_branchtester.check_XB_UntrackedBranch(self)
def test_XC_MoveBetweenAllMarkedStates(self):
abdt_branchtester.check_XC_MoveBetweenAllMarkedStates(self)
def check_D_SetRetrieveRepoNameBranchLink(self):
abdt_branchtester.check_XD_SetRetrieveRepoNameBranchLink(self)
def _create_new_file(self, repo, filename):
self.assertFalse(os.path.isfile(filename))
open(os.path.join(repo.working_dir, filename), 'a').close()
def _setup_for_tracked_branch(self):
base, branch_name, branch = self._setup_for_untracked_branch()
branch.mark_ok_new_review(101)
return base, branch_name, branch
def _setup_for_untracked_branch(self, repo_name='name', branch_url=None):
base = abdt_naming.EXAMPLE_REVIEW_BRANCH_BASE
naming = abdt_classicnaming.Naming()
branch_name = abdt_classicnaming.EXAMPLE_REVIEW_BRANCH_NAME
self.repo_dev('checkout', '-b', branch_name)
phlgit_push.push(self.repo_dev, branch_name, 'origin')
self.repo_arcyd('fetch', 'origin')
review_branch = naming.make_review_branch_from_name(branch_name)
review_hash = phlgit_revparse.get_sha1_or_none(
self.repo_arcyd, review_branch.branch)
branch = abdt_branch.Branch(
self.repo_arcyd,
review_branch,
review_hash,
None,
None,
None,
repo_name,
branch_url)
# should not raise
branch.verify_review_branch_base()
return base, branch_name, branch
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| cs-shadow/phabricator-tools | py/abd/abdt_branch__t.py | Python | apache-2.0 | 10,066 | 0 |
#! /usr/bin/python
# Derived from dupinator.py.
#
# This program takes a list of pathnames to audio files and moves them to a central archive.
# It replaces the original with a symbolic link to the archived version.
# The archived version will have several names (all hard-linked): the MD5 hash (with the extension)
# appended to it, *plus* all names that the file has been archived as.
#
# For example:
# Audio#123.aif
# might get archived to:
# /all_hashed_audio/a/f/af171f6a82b3caf793d3b3ac3.aif
# /all_hashed_audio/a/f/af171f6a82b3caf793d3b3ac3.aliases/Audio#123.aif
# if the same audio is encountered in a file called:
# Audio#987.aif
# then it will be replaced by a symlink to the MD5-named file and an alias will be added:
# /all_hashed_audio/a/f/af171f6a82b3caf793d3b3ac3.aliases/Audio#987.aif
#
#
# WHAT IS THIS FOR?
#
# This program is for filesystems where there are a lot of large audio files and there is a
# high incidence of duplicates. This program allows for a great deal of space to be reclaimed.
#
# 2015-04-26 - joe@emenaker.com
import os
import hashlib
import pickle
from collections import defaultdict
REPOSITORY_BASE = "/Volumes/Old Time Machine/all_hashed_audio"
# ROOTS = ( "/Users/jemenake", )
ROOTS = ("/Volumes/Old Macintosh HD", "/Volumes/Old Time Machine")
def pickle_data(data, pathname):
picklefile = file(pathname, "w")
pickle.dump(data, picklefile)
picklefile.close()
###
### If a directory doesn't exist, create it
###
def ensuredir(pathname):
if not os.path.isdir(pathname):
try:
os.mkdir(pathname)
except:
print "Can't create mandatory directory: " + pathname + " : Does it exist? Do we have permissions?"
exit()
###
### If a file is in the archive
###
def is_in_archive(md5):
pathname = get_archive_md5_name(md5)
return os.path.isfile(pathname)
###
### If an archived file with a MD5 is listed with a particular name
###
def has_alias(md5, alias):
pathname = get_archive_alias_name(md5, alias)
return os.path.isfile(pathname)
###
### Do we want this file?
### (Used to indicate if a file qualifies as an audio file)
###
def want(pathname):
return pathname.endswith(".aif")
###
###
###
pathnames = list()
for rootname in ROOTS:
print 'Scanning directory "%s"....' % rootname
for (dirpath, dirnames, filenames) in os.walk(rootname):
pathnames.extend([ dirpath + "/" + a for a in filenames if want(dirpath + "/" + a)])
REPOSITORY = REPOSITORY_BASE
PICKLE_FILE = REPOSITORY + "/" + "hash_values.pickle"
print " creating hash folders..."
# Make sure that we have a place to stick all of the links for the hashes
ensuredir(REPOSITORY)
## Make a two-deep folder tree for holding all of the hashes
digits = range(10)
digits.extend([ 'a', 'b', 'c', 'd', 'e', 'f' ])
for digit1 in digits:
dir1 = REPOSITORY + "/" + str(digit1)
ensuredir(dir1)
for digit2 in digits:
dir2 = dir1 + "/" + str(digit2)
ensuredir(dir2)
print " calcuating hashes..."
# Calc the hash-value of every file
thehashes = defaultdict(list)
hashes_by_pathname = dict()
for pathname in pathnames:
print pathname
hashValue = hashlib.md5(pathname).hexdigest()
thehashes[hashValue].append(pathname)
basename = os.path.basename(pathname)
if basename in hashes_by_pathname.keys() and hashes_by_pathname[basename] != hashValue:
print "There are multiple files named " + basename + " and they have different hash values!"
pickle_data(thehashes, PICKLE_FILE)
print " making the hard-links..."
# Make the hash links
for hash in thehashes.keys():
print hash
hash_pathname = REPOSITORY + "/" + hash[0] + "/" + hash[1] + "/" + hash
# Link the first pathname in our list of files with this hash to a file with the hashvalue
if not os.path.isfile(hash_pathname):
os.link(thehashes[hash][0], hash_pathname)
alias_dir = hash_pathname + ".aliases"
ensuredir(alias_dir)
for pathname in thehashes[hash]:
alias_pathname = alias_dir + "/" + os.path.basename(pathname)
if not os.path.isfile(alias_pathname):
os.link(hash_pathname, alias_pathname)
print " " + pathname
exit()
print 'Finding potential dupes...'
potentialDupes = []
potentialCount = 0
trueType = type(True)
sizes = filesBySize.keys()
sizes.sort()
for k in sizes:
inFiles = filesBySize[k]
outFiles = []
hashes = {}
if len(inFiles) is 1: continue
print 'Testing %d files of size %d...' % (len(inFiles), k)
for fileName in inFiles:
if not os.path.isfile(fileName):
continue
aFile = file(fileName, 'r')
hasher = hashlib.md5(aFile.read(1024))
hashValue = hasher.digest()
if hashes.has_key(hashValue):
x = hashes[hashValue]
if type(x) is not trueType:
outFiles.append(hashes[hashValue])
hashes[hashValue] = True
outFiles.append(fileName)
else:
hashes[hashValue] = fileName
aFile.close()
if len(outFiles):
potentialDupes.append(outFiles)
potentialCount += len(outFiles)
del filesBySize
print 'Found %d sets of potential dupes...' % potentialCount
print 'Scanning for real dupes...'
dupdump = file("dupedump", "w")
pickle.dump(dupes, dupdump)
dupdump.close()
| jemenake/LogicProjectTools | AudioArchiver.py | Python | mit | 5,075 | 0.021084 |
# Copyright (c) 2008-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.3.0 to 0.4.0
Revision ID: 0.3.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '0.4.0'
down_revision = '0.3.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
#alter column user.username, alter column user.email, project.name and add column replication_policy.deleted
op.alter_column('user', 'username', type_=sa.String(32), existing_type=sa.String(15))
op.alter_column('user', 'email', type_=sa.String(255), existing_type=sa.String(128))
op.alter_column('project', 'name', type_=sa.String(41), existing_type=sa.String(30), nullable=False)
op.alter_column('replication_target', 'password', type_=sa.String(128), existing_type=sa.String(40))
op.add_column('replication_policy', sa.Column('deleted', mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'")))
#create index pid_optime (project_id, op_time) on table access_log, poid_uptime (policy_id, update_time) on table replication_job
op.create_index('pid_optime', 'access_log', ['project_id', 'op_time'])
op.create_index('poid_uptime', 'replication_job', ['policy_id', 'update_time'])
#create tables: repository
Repository.__table__.create(bind)
def downgrade():
"""
Downgrade has been disabled.
"""
pass
| wknet123/harbor | tools/migration/migration_harbor/versions/0_4_0.py | Python | apache-2.0 | 2,016 | 0.008433 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import nowdate, cstr, flt, now, getdate, add_months
from frappe import throw, _
from frappe.utils import formatdate
import frappe.desk.reportview
class FiscalYearError(frappe.ValidationError): pass
class BudgetError(frappe.ValidationError): pass
@frappe.whitelist()
def get_fiscal_year(date=None, fiscal_year=None, label="Date", verbose=1, company=None):
return get_fiscal_years(date, fiscal_year, label, verbose, company)[0]
def get_fiscal_years(transaction_date=None, fiscal_year=None, label="Date", verbose=1, company=None):
# if year start date is 2012-04-01, year end date should be 2013-03-31 (hence subdate)
cond = " ifnull(disabled, 0) = 0"
if fiscal_year:
cond += " and fy.name = %(fiscal_year)s"
else:
cond += " and %(transaction_date)s >= fy.year_start_date and %(transaction_date)s <= fy.year_end_date"
if company:
cond += """ and (not exists(select name from `tabFiscal Year Company` fyc where fyc.parent = fy.name)
or exists(select company from `tabFiscal Year Company` fyc where fyc.parent = fy.name and fyc.company=%(company)s ))"""
fy = frappe.db.sql("""select fy.name, fy.year_start_date, fy.year_end_date from `tabFiscal Year` fy
where %s order by fy.year_start_date desc""" % cond, {
"fiscal_year": fiscal_year,
"transaction_date": transaction_date,
"company": company
})
if not fy:
error_msg = _("""{0} {1} not in any active Fiscal Year. For more details check {2}.""").format(label, formatdate(transaction_date), "https://erpnext.com/kb/accounts/fiscal-year-error")
if verbose==1: frappe.msgprint(error_msg)
raise FiscalYearError, error_msg
return fy
def validate_fiscal_year(date, fiscal_year, label=_("Date"), doc=None):
years = [f[0] for f in get_fiscal_years(date, label=label)]
if fiscal_year not in years:
if doc:
doc.fiscal_year = years[0]
else:
throw(_("{0} '{1}' not in Fiscal Year {2}").format(label, formatdate(date), fiscal_year))
@frappe.whitelist()
def get_balance_on(account=None, date=None, party_type=None, party=None, in_account_currency=True):
if not account and frappe.form_dict.get("account"):
account = frappe.form_dict.get("account")
if not date and frappe.form_dict.get("date"):
date = frappe.form_dict.get("date")
if not party_type and frappe.form_dict.get("party_type"):
party_type = frappe.form_dict.get("party_type")
if not party and frappe.form_dict.get("party"):
party = frappe.form_dict.get("party")
cond = []
if date:
cond.append("posting_date <= '%s'" % date)
else:
# get balance of all entries that exist
date = nowdate()
try:
year_start_date = get_fiscal_year(date, verbose=0)[1]
except FiscalYearError:
if getdate(date) > getdate(nowdate()):
# if fiscal year not found and the date is greater than today
# get fiscal year for today's date and its corresponding year start date
year_start_date = get_fiscal_year(nowdate(), verbose=1)[1]
else:
# this indicates that it is a date older than any existing fiscal year.
# hence, assuming balance as 0.0
return 0.0
if account:
acc = frappe.get_doc("Account", account)
acc.check_permission("read")
# for pl accounts, get balance within a fiscal year
if acc.report_type == 'Profit and Loss':
cond.append("posting_date >= '%s' and voucher_type != 'Period Closing Voucher'" \
% year_start_date)
# different filter for group and ledger - improved performance
if acc.is_group:
cond.append("""exists (
select name from `tabAccount` ac where ac.name = gle.account
and ac.lft >= %s and ac.rgt <= %s
)""" % (acc.lft, acc.rgt))
# If group and currency same as company,
# always return balance based on debit and credit in company currency
if acc.account_currency == frappe.db.get_value("Company", acc.company, "default_currency"):
in_account_currency = False
else:
cond.append("""gle.account = "%s" """ % (account.replace('"', '\\"'), ))
if party_type and party:
cond.append("""gle.party_type = "%s" and gle.party = "%s" """ %
(party_type.replace('"', '\\"'), party.replace('"', '\\"')))
if account or (party_type and party):
if in_account_currency:
select_field = "sum(ifnull(debit_in_account_currency, 0)) - sum(ifnull(credit_in_account_currency, 0))"
else:
select_field = "sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))"
bal = frappe.db.sql("""
SELECT {0}
FROM `tabGL Entry` gle
WHERE {1}""".format(select_field, " and ".join(cond)))[0][0]
# if bal is None, return 0
return flt(bal)
@frappe.whitelist()
def add_ac(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
ac = frappe.new_doc("Account")
ac.update(args)
ac.old_parent = ""
ac.freeze_account = "No"
ac.insert()
return ac.name
@frappe.whitelist()
def add_cc(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
cc = frappe.new_doc("Cost Center")
cc.update(args)
cc.old_parent = ""
cc.insert()
return cc.name
def reconcile_against_document(args):
"""
Cancel JV, Update aginst document, split if required and resubmit jv
"""
for d in args:
check_if_jv_modified(d)
validate_allocated_amount(d)
# cancel JV
jv_obj = frappe.get_doc('Journal Entry', d['voucher_no'])
jv_obj.make_gl_entries(cancel=1, adv_adj=1)
# update ref in JV Detail
update_against_doc(d, jv_obj)
# re-submit JV
jv_obj = frappe.get_doc('Journal Entry', d['voucher_no'])
jv_obj.make_gl_entries(cancel = 0, adv_adj =1)
def check_if_jv_modified(args):
"""
check if there is already a voucher reference
check if amount is same
check if jv is submitted
"""
ret = frappe.db.sql("""
select t2.{dr_or_cr} from `tabJournal Entry` t1, `tabJournal Entry Account` t2
where t1.name = t2.parent and t2.account = %(account)s
and t2.party_type = %(party_type)s and t2.party = %(party)s
and ifnull(t2.reference_type, '') in ("", "Sales Order", "Purchase Order")
and t1.name = %(voucher_no)s and t2.name = %(voucher_detail_no)s
and t1.docstatus=1 """.format(dr_or_cr = args.get("dr_or_cr")), args)
if not ret:
throw(_("""Payment Entry has been modified after you pulled it. Please pull it again."""))
def validate_allocated_amount(args):
if args.get("allocated_amt") < 0:
throw(_("Allocated amount can not be negative"))
elif args.get("allocated_amt") > args.get("unadjusted_amt"):
throw(_("Allocated amount can not greater than unadusted amount"))
def update_against_doc(d, jv_obj):
"""
Updates against document, if partial amount splits into rows
"""
jv_detail = jv_obj.get("accounts", {"name": d["voucher_detail_no"]})[0]
jv_detail.set(d["dr_or_cr"], d["allocated_amt"])
original_reference_type = jv_detail.reference_type
original_reference_name = jv_detail.reference_name
jv_detail.set("reference_type", d["against_voucher_type"])
jv_detail.set("reference_name", d["against_voucher"])
if d['allocated_amt'] < d['unadjusted_amt']:
jvd = frappe.db.sql("""
select cost_center, balance, against_account, is_advance, account_type, exchange_rate
from `tabJournal Entry Account` where name = %s
""", d['voucher_detail_no'], as_dict=True)
# new entry with balance amount
ch = jv_obj.append("accounts")
ch.account = d['account']
ch.account_type = jvd[0]['account_type']
ch.exchange_rate = jvd[0]['exchange_rate']
ch.party_type = d["party_type"]
ch.party = d["party"]
ch.cost_center = cstr(jvd[0]["cost_center"])
ch.balance = flt(jvd[0]["balance"])
ch.set(d['dr_or_cr'], flt(d['unadjusted_amt']) - flt(d['allocated_amt']))
ch.set(d['dr_or_cr']== 'debit' and 'credit' or 'debit', 0)
ch.against_account = cstr(jvd[0]["against_account"])
ch.reference_type = original_reference_type
ch.reference_name = original_reference_name
ch.is_advance = cstr(jvd[0]["is_advance"])
ch.docstatus = 1
# will work as update after submit
jv_obj.flags.ignore_validate_update_after_submit = True
jv_obj.save()
def remove_against_link_from_jv(ref_type, ref_no):
linked_jv = frappe.db.sql_list("""select parent from `tabJournal Entry Account`
where reference_type=%s and reference_name=%s and docstatus < 2""", (ref_type, ref_no))
if linked_jv:
frappe.db.sql("""update `tabJournal Entry Account`
set reference_type=null, reference_name = null,
modified=%s, modified_by=%s
where reference_type=%s and reference_name=%s
and docstatus < 2""", (now(), frappe.session.user, ref_type, ref_no))
frappe.db.sql("""update `tabGL Entry`
set against_voucher_type=null, against_voucher=null,
modified=%s, modified_by=%s
where against_voucher_type=%s and against_voucher=%s
and voucher_no != ifnull(against_voucher, '')""",
(now(), frappe.session.user, ref_type, ref_no))
frappe.msgprint(_("Journal Entries {0} are un-linked".format("\n".join(linked_jv))))
@frappe.whitelist()
def get_company_default(company, fieldname):
value = frappe.db.get_value("Company", company, fieldname)
if not value:
throw(_("Please set default value {0} in Company {1}").format(frappe.get_meta("Company").get_label(fieldname), company))
return value
def fix_total_debit_credit():
vouchers = frappe.db.sql("""select voucher_type, voucher_no,
sum(debit) - sum(credit) as diff
from `tabGL Entry`
group by voucher_type, voucher_no
having sum(ifnull(debit, 0)) != sum(ifnull(credit, 0))""", as_dict=1)
for d in vouchers:
if abs(d.diff) > 0:
dr_or_cr = d.voucher_type == "Sales Invoice" and "credit" or "debit"
frappe.db.sql("""update `tabGL Entry` set %s = %s + %s
where voucher_type = %s and voucher_no = %s and %s > 0 limit 1""" %
(dr_or_cr, dr_or_cr, '%s', '%s', '%s', dr_or_cr),
(d.diff, d.voucher_type, d.voucher_no))
def get_stock_and_account_difference(account_list=None, posting_date=None):
from erpnext.stock.utils import get_stock_value_on
if not posting_date: posting_date = nowdate()
difference = {}
account_warehouse = dict(frappe.db.sql("""select name, warehouse from tabAccount
where account_type = 'Warehouse' and ifnull(warehouse, '') != ''
and name in (%s)""" % ', '.join(['%s']*len(account_list)), account_list))
for account, warehouse in account_warehouse.items():
account_balance = get_balance_on(account, posting_date, in_account_currency=False)
stock_value = get_stock_value_on(warehouse, posting_date)
if abs(flt(stock_value) - flt(account_balance)) > 0.005:
difference.setdefault(account, flt(stock_value) - flt(account_balance))
return difference
def validate_expense_against_budget(args):
args = frappe._dict(args)
if frappe.db.get_value("Account", {"name": args.account, "report_type": "Profit and Loss"}):
budget = frappe.db.sql("""
select bd.budget_allocated, cc.distribution_id
from `tabCost Center` cc, `tabBudget Detail` bd
where cc.name=bd.parent and cc.name=%s and account=%s and bd.fiscal_year=%s
""", (args.cost_center, args.account, args.fiscal_year), as_dict=True)
if budget and budget[0].budget_allocated:
yearly_action, monthly_action = frappe.db.get_value("Company", args.company,
["yearly_bgt_flag", "monthly_bgt_flag"])
action_for = action = ""
if monthly_action in ["Stop", "Warn"]:
budget_amount = get_allocated_budget(budget[0].distribution_id,
args.posting_date, args.fiscal_year, budget[0].budget_allocated)
args["month_end_date"] = frappe.db.sql("select LAST_DAY(%s)",
args.posting_date)[0][0]
action_for, action = _("Monthly"), monthly_action
elif yearly_action in ["Stop", "Warn"]:
budget_amount = budget[0].budget_allocated
action_for, action = _("Annual"), yearly_action
if action_for:
actual_expense = get_actual_expense(args)
if actual_expense > budget_amount:
frappe.msgprint(_("{0} budget for Account {1} against Cost Center {2} will exceed by {3}").format(
_(action_for), args.account, args.cost_center, cstr(actual_expense - budget_amount)))
if action=="Stop":
raise BudgetError
def get_allocated_budget(distribution_id, posting_date, fiscal_year, yearly_budget):
if distribution_id:
distribution = {}
for d in frappe.db.sql("""select mdp.month, mdp.percentage_allocation
from `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where mdp.parent=md.name and md.fiscal_year=%s""", fiscal_year, as_dict=1):
distribution.setdefault(d.month, d.percentage_allocation)
dt = frappe.db.get_value("Fiscal Year", fiscal_year, "year_start_date")
budget_percentage = 0.0
while(dt <= getdate(posting_date)):
if distribution_id:
budget_percentage += distribution.get(getdate(dt).strftime("%B"), 0)
else:
budget_percentage += 100.0/12
dt = add_months(dt, 1)
return yearly_budget * budget_percentage / 100
def get_actual_expense(args):
args["condition"] = " and posting_date<='%s'" % args.month_end_date \
if args.get("month_end_date") else ""
return flt(frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabGL Entry`
where account='%(account)s' and cost_center='%(cost_center)s'
and fiscal_year='%(fiscal_year)s' and company='%(company)s' %(condition)s
""" % (args))[0][0])
def get_currency_precision(currency=None):
if not currency:
currency = frappe.db.get_value("Company",
frappe.db.get_default("company"), "default_currency", cache=True)
currency_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
from frappe.utils import get_number_format_info
return get_number_format_info(currency_format)[2]
def get_stock_rbnb_difference(posting_date, company):
stock_items = frappe.db.sql_list("""select distinct item_code
from `tabStock Ledger Entry` where company=%s""", company)
pr_valuation_amount = frappe.db.sql("""
select sum(ifnull(pr_item.valuation_rate, 0) * ifnull(pr_item.qty, 0) * ifnull(pr_item.conversion_factor, 0))
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name = pr_item.parent and pr.docstatus=1 and pr.company=%s
and pr.posting_date <= %s and pr_item.item_code in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(stock_items))), tuple([company, posting_date] + stock_items))[0][0]
pi_valuation_amount = frappe.db.sql("""
select sum(ifnull(pi_item.valuation_rate, 0) * ifnull(pi_item.qty, 0) * ifnull(pi_item.conversion_factor, 0))
from `tabPurchase Invoice Item` pi_item, `tabPurchase Invoice` pi
where pi.name = pi_item.parent and pi.docstatus=1 and pi.company=%s
and pi.posting_date <= %s and pi_item.item_code in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(stock_items))), tuple([company, posting_date] + stock_items))[0][0]
# Balance should be
stock_rbnb = flt(pr_valuation_amount, 2) - flt(pi_valuation_amount, 2)
# Balance as per system
stock_rbnb_account = "Stock Received But Not Billed - " + frappe.db.get_value("Company", company, "abbr")
sys_bal = get_balance_on(stock_rbnb_account, posting_date, in_account_currency=False)
# Amount should be credited
return flt(stock_rbnb) + flt(sys_bal)
def get_outstanding_invoices(amount_query, account, party_type, party):
all_outstanding_vouchers = []
outstanding_voucher_list = frappe.db.sql("""
select
voucher_no, voucher_type, posting_date,
ifnull(sum({amount_query}), 0) as invoice_amount
from
`tabGL Entry`
where
account = %s and party_type=%s and party=%s and {amount_query} > 0
and (CASE
WHEN voucher_type = 'Journal Entry'
THEN ifnull(against_voucher, '') = ''
ELSE 1=1
END)
group by voucher_type, voucher_no
""".format(amount_query = amount_query), (account, party_type, party), as_dict = True)
for d in outstanding_voucher_list:
payment_amount = frappe.db.sql("""
select ifnull(sum({amount_query}), 0)
from
`tabGL Entry`
where
account = %s and party_type=%s and party=%s and {amount_query} < 0
and against_voucher_type = %s and ifnull(against_voucher, '') = %s
""".format(**{
"amount_query": amount_query
}), (account, party_type, party, d.voucher_type, d.voucher_no))
payment_amount = -1*payment_amount[0][0] if payment_amount else 0
precision = frappe.get_precision("Sales Invoice", "outstanding_amount")
if d.invoice_amount > payment_amount:
all_outstanding_vouchers.append({
'voucher_no': d.voucher_no,
'voucher_type': d.voucher_type,
'posting_date': d.posting_date,
'invoice_amount': flt(d.invoice_amount, precision),
'outstanding_amount': flt(d.invoice_amount - payment_amount, precision)
})
return all_outstanding_vouchers
| gmarke/erpnext | erpnext/accounts/utils.py | Python | agpl-3.0 | 16,635 | 0.025008 |
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Course page """
import web
from inginious.frontend.pages.utils import INGIniousPage
class CoursePage(INGIniousPage):
""" Course page """
def get_course(self, courseid):
""" Return the course """
try:
course = self.course_factory.get_course(courseid)
except:
raise web.notfound()
return course
def POST(self, courseid): # pylint: disable=arguments-differ
""" POST request """
course = self.get_course(courseid)
user_input = web.input()
if "unregister" in user_input and course.allow_unregister():
self.user_manager.course_unregister_user(course, self.user_manager.session_username())
raise web.seeother(self.app.get_homepath() + '/mycourses')
return self.show_page(course)
def GET(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course = self.get_course(courseid)
user_input = web.input()
page = int(user_input.get("page", 1)) - 1
tag = user_input.get("tag", "")
return self.show_page(course, page, tag)
def show_page(self, course, current_page=0, current_tag=""):
""" Prepares and shows the course page """
username = self.user_manager.session_username()
if not self.user_manager.course_is_open_to_user(course, lti=False):
return self.template_helper.get_renderer().course_unavailable()
tasks = course.get_tasks()
last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(),
"taskid": {"$in": list(tasks.keys())}})
for submission in last_submissions:
submission["taskname"] = tasks[submission['taskid']].get_name_or_id(self.user_manager.session_language())
tasks_data = {}
user_tasks = self.database.user_tasks.find(
{"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
is_admin = self.user_manager.has_staff_rights_on_course(course, username)
tasks_score = [0.0, 0.0]
for taskid, task in tasks.items():
tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False,
"grade": 0.0}
tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0
for user_task in user_tasks:
tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"]
tasks_data[user_task["taskid"]]["grade"] = user_task["grade"]
weighted_score = user_task["grade"] * tasks[user_task["taskid"]].get_grading_weight()
tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0
course_grade = round(tasks_score[0] / tasks_score[1]) if tasks_score[1] > 0 else 0
tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language())
user_info = self.database.users.find_one({"username": username})
# Filter tasks with the tag in case the tasks are filtered
if not current_tag:
filtered_tasks = tasks
else:
filtered_tasks = {task_id: task for task_id, task in tasks.items() if
current_tag in map(lambda x: x.get_name(), task.get_tags()[2] + task.get_tags()[0])}
# Manage tasks pagination
page_limit = 20
total_tasks = len(filtered_tasks)
pages = total_tasks // page_limit
if (total_tasks % page_limit) != 0 or pages == 0:
pages += 1
if (page_limit * current_page + page_limit) < total_tasks:
page_tasks_ids = list(filtered_tasks.keys())[page_limit * current_page:
page_limit * current_page + page_limit]
else:
page_tasks_ids = list(filtered_tasks.keys())[page_limit * current_page:]
filtered_tasks = {task_id: tasks_data[task_id] for task_id, __ in filtered_tasks.items() if
task_id in page_tasks_ids}
return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks,
filtered_tasks, course_grade, tag_list, pages,
current_page + 1, current_tag)
| JuezUN/INGInious | inginious/frontend/pages/course.py | Python | agpl-3.0 | 4,665 | 0.00493 |
import discord
import os.path
import json
from discord.ext import commands
class Character():
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context = True)
async def char (self, ctx):
"""Character Creation. Asks for all information then builds Json file."""
userid = ctx.message.author.id
print(userid)
if os.path.isfile('cs_{}'.format(userid)) is True:
await self.bot.say('You\'re already on my list!')
else:
def checkname(msg):
return msg.content.startswith('name: ')
def checkclass(msg):
return msg.content.startswith('class: ')
await self.bot.say('You look new here, what\'s your name? \nname: First Last')
entername = await self.bot.wait_for_message(timeout=60.0, author=ctx.message.author, check=checkname)
if entername is None:
await self.bot.say('\nSorry')
return
cs_name = entername.content[6:]
print(cs_name)
await self.bot.say('Well Hello there {}. \nWhat\'s your class? \nclass: Title'.format(entername.content[6:]))
enterclass = await self.bot.wait_for_message(timeout=60.0, author=ctx.message.author, check=checkclass)
if enterclass is None:
await self.bot.say('\nSorry, ask someone to lenghen the timeout!')
return
cs_class = enterclass.content[7:]
print(cs_class)
cp = {str(userid): {'name': str(cs_name), 'class': str(cs_class), 'atk': 10, 'def': 10, 'spd': 10, 'skills': {}}} # current player temp dict to add to end of charactersheet.json, should have checked before !char
print(cp)
with open('cs_{}'.format(userid), 'w') as f:
json.dump(cp, f) #write json file
await self.bot.say('Got it, {} the {}'.format(cs_name.title(), cs_class.title()))
def setup(bot):
bot.add_cog(Character(bot)) | padilin/Discord-RPG-Bot | character.py | Python | mit | 2,055 | 0.008273 |
# coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® Network tunnels module.
REST URI
``http://localhost/mgmt/tm/net/tunnels``
GUI Path
``Network --> tunnels``
REST Kind
``tm:net:tunnels:*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class TunnelS(OrganizingCollection):
"""BIG-IP® network tunnels collection"""
def __init__(self, net):
super(TunnelS, self).__init__(net)
self._meta_data['allowed_lazy_attributes'] = [
Gres,
Tunnels,
Vxlans,
]
class Tunnels(Collection):
"""BIG-IP® network tunnels resource (collection for GRE, Tunnel, VXLANs"""
def __init__(self, tunnelS):
super(Tunnels, self).__init__(tunnelS)
self._meta_data['allowed_lazy_attributes'] = [Gres, Tunnel, Vxlans]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:tunnel:tunnelstate': Tunnel}
class Tunnel(Resource):
"""BIG-IP® tunnels tunnel resource"""
def __init__(self, tunnels):
super(Tunnel, self).__init__(tunnels)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:tunnel:tunnelstate'
class Gres(Collection):
"""BIG-IP® tunnels GRE sub-collection"""
def __init__(self, tunnels):
super(Gres, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Gre]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:gre:grestate': Gre}
class Gre(Resource):
"""BIG-IP® tunnels GRE sub-collection resource"""
def __init__(self, gres):
super(Gre, self).__init__(gres)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:gre:grestate'
class Vxlans(Collection):
"""BIG-IP® tunnels VXLAN sub-collection"""
def __init__(self, tunnels):
super(Vxlans, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Vxlan]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:vxlan:vxlanstate': Vxlan}
class Vxlan(Resource):
"""BIG-IP® tunnels VXLAN sub-collection resource"""
def __init__(self, vxlans):
super(Vxlan, self).__init__(vxlans)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:vxlan:vxlanstate'
| F5Networks/f5-common-python | f5/bigip/tm/net/tunnels.py | Python | apache-2.0 | 3,153 | 0 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: ZFunction.py,v 1.13 2009/08/07 07:19:19 rliebscher Exp $"
from fuzzy.set.SFunction import SFunction
class ZFunction(SFunction):
r"""Realize a Z-shaped fuzzy set::
__
\
|\
| \
| |\
| | \__
| a |
| |
delta
see also U{http://pyfuzzy.sourceforge.net/test/set/ZFunction.png}
@ivar a: center of set.
@type a: float
@ivar delta: absolute distance between x-values for minimum and maximum.
@type delta: float
"""
def __init__(self,a=0.0,delta=1.0):
"""Initialize a Z-shaped fuzzy set.
@param a: center of set
@type a: float
@param delta: absolute distance between x-values for minimum and maximum
@type delta: float
"""
super(ZFunction, self).__init__(a,delta)
def __call__(self,x):
"""Return membership of x in this fuzzy set.
This method makes the set work like a function.
@param x: value for which the membership is to calculate
@type x: float
@return: membership
@rtype: float
"""
return 1.0 - SFunction.__call__(self,x)
| arruda/pyfuzzy | fuzzy/set/ZFunction.py | Python | lgpl-3.0 | 1,985 | 0.009068 |
# -*- coding: utf-8 -*-
import urllib
from . import admin
from flask import request
from flask import url_for
from flask import redirect
from flask import render_template
from flask_login import UserMixin
from flask_login import login_user
from flask_login import logout_user
from flask_login import login_required
from core.extension import login_manager
from core.views.common import render_json
from core.models import AdminUser
class LoginUser(UserMixin):
def __init__(self, user):
self.user = user
def get_id(self):
return unicode(self.user.id)
@login_manager.user_loader
def load_user(userid):
user = AdminUser.get_by_id(int(userid))
return LoginUser(user)
@admin.route('/signin', methods=['GET', 'POST'])
def signin():
if request.method == 'POST':
user = AdminUser.query.filter_by(
active=True,
username=request.form['username'],
password=request.form['pwd']
).first()
if not user:
return render_json(1, {'err_no': 'pwd_error', 'input': 'pwd'})
login_user(LoginUser(user))
next = request.form.get('next', '')
if next:
next = urllib.unquote(next)
return render_json(0, {'href': next, 'delaySuccess': True})
return render_json(0, {'href': '/admin/dashboard', 'delaySuccess': True})
return render_template('/admin/signin.html')
@admin.route('/signout', methods=['GET'])
def signout():
logout_user()
return redirect(url_for('admin.signin'))
@admin.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
return render_template('/admin/dashboard.html')
| moxuanchen/BMS | core/views/admin/login.py | Python | apache-2.0 | 1,670 | 0.000599 |
#!/usr/bin/python3
import machine
from update_display import update_display as update_display_function
config = {
"gateway": {
# "type": "electrodragon-wifi-iot-relay-board-spdt-based-esp8266"
"id": "thermostat"
# "description": "Thermostat Control near Kitchen"
},
"devices": [
{
"type": "DisplayDevice",
"id": "display",
# "description": "OLED IC2 128 x 64 display",
"update_display_function": update_display_function,
"width": 128,
"height": 64,
"display_type": "SSD1306_I2C",
"i2c": {
"bus": -1,
"gpio_scl": 4,
"gpio_sda": 5
}
},
{
"type": "DHTDevice",
"id": "dht",
# "description": "Digital Humidity and Temperature sensor",
"dht_type": 22,
"gpio": 14,
"just_changes": True,
"freq": 60
},
{
"type": "SwitchDevice",
"id": "heating_relay",
# "description": "Relay corner out controls central heating on/off",
"gpio": 13,
"state": False
},
{
"type": "SwitchDevice",
"id": "red_button",
# "description": "Physical impulse switch",
"switch_type": machine.Pin.IN,
"gpio": 15,
# "gpio": 2,
"freq": 0,
"debounce": 20,
"just_changes": True,
"state": False
}
]
}
| Morteo/kiot | devices/thermostat/config.py | Python | mit | 1,329 | 0.009782 |
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import re
import time
import math
import threading
try:
from multiprocessing.pool import ThreadPool
except:
ThreadPool = None
try:
import asyncio
except:
asyncio = None
from .events import Events
from ..core.config.x_config import XCONF
from ..core.comm import SerialPort, SocketPort
from ..core.wrapper import UxbusCmdSer, UxbusCmdTcp
from ..core.utils.log import logger, pretty_print
from ..core.utils import convert
from ..core.config.x_code import ControllerWarn, ControllerError, ControllerErrorCodeMap, ControllerWarnCodeMap
from .utils import xarm_is_connected, compare_time, compare_version, xarm_is_not_simulation_mode, filter_invaild_number, xarm_is_pause, xarm_wait_until_cmdnum_lt_max
from .code import APIState
from ..tools.threads import ThreadManage
from ..version import __version__
controller_error_keys = ControllerErrorCodeMap.keys()
controller_warn_keys = ControllerWarnCodeMap.keys()
print('SDK_VERSION: {}'.format(__version__))
class Base(Events):
def __init__(self, port=None, is_radian=False, do_not_open=False, **kwargs):
if kwargs.get('init', False):
super(Base, self).__init__()
self._port = port
self._debug = kwargs.get('debug', False)
self._baudrate = kwargs.get('baudrate', XCONF.SerialConf.SERIAL_BAUD)
self._timeout = kwargs.get('timeout', None)
self._filters = kwargs.get('filters', None)
self._enable_heartbeat = kwargs.get('enable_heartbeat', False)
self._enable_report = kwargs.get('enable_report', True)
self._report_type = kwargs.get('report_type', 'rich')
self._forbid_uds = kwargs.get('forbid_uds', False)
self._check_tcp_limit = kwargs.get('check_tcp_limit', False)
self._check_joint_limit = kwargs.get('check_joint_limit', True)
self._check_cmdnum_limit = kwargs.get('check_cmdnum_limit', True)
self._check_simulation_mode = kwargs.get('check_simulation_mode', True)
self._max_cmd_num = kwargs.get('max_cmdnum', 512)
if not isinstance(self._max_cmd_num, int):
self._max_cmd_num = 512
self._max_cmd_num = min(XCONF.MAX_CMD_NUM, self._max_cmd_num)
self._check_robot_sn = kwargs.get('check_robot_sn', False)
self._check_is_ready = kwargs.get('check_is_ready', True)
self._check_is_pause = kwargs.get('check_is_pause', True)
self._timed_comm = kwargs.get('timed_comm', True)
self._timed_comm_interval = kwargs.get('timed_comm_interval', 30)
self._timed_comm_t = None
self._timed_comm_t_alive = False
self._max_callback_thread_count = kwargs.get('max_callback_thread_count', 0)
self._asyncio_loop = None
self._asyncio_loop_alive = False
self._asyncio_loop_thread = None
self._pool = None
self._thread_manage = ThreadManage()
self._rewrite_modbus_baudrate_method = kwargs.get('rewrite_modbus_baudrate_method', True)
self._min_tcp_speed, self._max_tcp_speed = 0.1, 1000 # mm/s
self._min_tcp_acc, self._max_tcp_acc = 1.0, 50000 # mm/s^2
self._tcp_jerk = 1000 # mm/s^3
self._min_joint_speed, self._max_joint_speed = 0.01, 4.0 # rad/s
self._min_joint_acc, self._max_joint_acc = 0.01, 20.0 # rad/s^2
self._joint_jerk = 20.0 # rad/s^3
self._rot_jerk = 2.3
self._max_rot_acc = 2.7
self._stream_type = 'serial'
self._stream = None
self.arm_cmd = None
self._stream_report = None
self._report_thread = None
self._only_report_err_warn_changed = True
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._control_box_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._pose_aa = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_type = XCONF.Robot.Type.XARM7_X4
self._arm_axis = XCONF.Robot.Axis.XARM7
axis = kwargs.get('axis', self._arm_axis)
if axis in [5, 6, 7]:
self._arm_axis = axis
arm_type = kwargs.get('type', self._arm_type)
if arm_type in [3, 5, 6, 7, 8]:
self._arm_type = arm_type
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._default_is_radian = is_radian
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._pause_cond = threading.Condition()
self._pause_lock = threading.Lock()
self._pause_cnts = 0
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
self._iden_progress = 0
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
self._is_collision_detection = 1
self._collision_tool_type = 0
self._collision_tool_params = [0, 0, 0, 0, 0, 0]
self._is_simulation_robot = False
self._last_update_err_time = 0
self._last_update_state_time = 0
self._last_update_cmdnum_time = 0
self._arm_type_is_1300 = False
self._control_box_type_is_1300 = False
self.linear_track_baud = -1
self.linear_track_speed = 1
self.linear_track_is_enabled = False
self._ft_ext_force = [0, 0, 0, 0, 0, 0]
self._ft_raw_force = [0, 0, 0, 0, 0, 0]
self._has_motion_cmd = False
self._need_sync = False
if not do_not_open:
self.connect()
def _init(self):
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._control_box_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._pose_aa = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1,
-1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1,
-1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._pause_cond = threading.Condition()
self._pause_lock = threading.Lock()
self._pause_cnts = 0
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
self._iden_progress = 0
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
self._is_collision_detection = 1
self._collision_tool_type = 0
self._collision_tool_params = [0, 0, 0, 0, 0, 0]
self._is_simulation_robot = False
self._last_update_err_time = 0
self._last_update_state_time = 0
self._last_update_cmdnum_time = 0
self._arm_type_is_1300 = False
self._control_box_type_is_1300 = False
self.linear_track_baud = -1
self.linear_track_speed = 1
self.linear_track_is_enabled = False
self._ft_ext_force = [0, 0, 0, 0, 0, 0]
self._ft_raw_force = [0, 0, 0, 0, 0, 0]
self._has_motion_cmd = False
self._need_sync = False
@staticmethod
def log_api_info(msg, *args, code=0, **kwargs):
if code == 0:
logger.info(msg, *args, **kwargs)
else:
logger.error(msg, *args, **kwargs)
def _check_version(self, is_first=False):
if is_first:
self._version = None
self._robot_sn = None
self._control_box_sn = None
try:
if not self._version:
self.get_version()
if is_first:
fail_cnt = 0
while not self._version and fail_cnt < 100:
code, _ = self.get_version()
fail_cnt += 1 if code != 0 else 0
if code != 0 or not self._version:
time.sleep(0.1)
if not self._version and fail_cnt >= 100:
logger.error('failed to get version')
return -2
if self._version and isinstance(self._version, str):
pattern = re.compile(
r'.*(\d+),(\d+),(\S+),(\S+),.*[vV](\d+)\.(\d+)\.(\d+)')
m = re.match(pattern, self._version)
if m:
(xarm_axis, xarm_type, xarm_sn, ac_version,
major_version_number,
minor_version_number,
revision_version_number) = m.groups()
self._arm_axis = int(xarm_axis)
self._arm_type = int(xarm_type)
self._major_version_number = int(major_version_number)
self._minor_version_number = int(minor_version_number)
self._revision_version_number = int(revision_version_number)
self._robot_sn = xarm_sn
self._control_box_sn = ac_version.strip()
self._arm_type_is_1300 = int(xarm_sn[2:6]) >= 1300 if xarm_sn[2:6].isdigit() else False
self._control_box_type_is_1300 = int(ac_version[2:6]) >= 1300 if ac_version[2:6].isdigit() else False
else:
pattern = re.compile(r'.*[vV](\d+)\.(\d+)\.(\d+)')
m = re.match(pattern, self._version)
if m:
(self._major_version_number,
self._minor_version_number,
self._revision_version_number) = map(int, m.groups())
else:
version_date = '-'.join(self._version.split('-')[-3:])
self._is_old_protocol = compare_time('2019-02-01', version_date)
if self._is_old_protocol:
self._major_version_number = 0
self._minor_version_number = 0
self._revision_version_number = 1
else:
self._major_version_number = 0
self._minor_version_number = 1
self._revision_version_number = 0
if is_first:
if self._check_robot_sn:
count = 2
self.get_robot_sn()
while not self._robot_sn and count and self.warn_code == 0:
self.get_robot_sn()
self.get_err_warn_code()
if not self._robot_sn and self.warn_code == 0 and count:
time.sleep(0.1)
count -= 1
if self.warn_code != 0:
self.clean_warn()
print('FIRMWARE_VERSION: v{}, PROTOCOL: {}, DETAIL: {}'.format(
'{}.{}.{}'.format(self._major_version_number, self._minor_version_number, self._revision_version_number),
'V0' if self._is_old_protocol else 'V1', self._version
))
return 0
except Exception as e:
print('compare_time: {}, {}'.format(self._version, e))
return -1
@property
def realtime_tcp_speed(self):
return self._realtime_tcp_speed
@property
def realtime_joint_speeds(self):
return [speed if self._default_is_radian else math.degrees(speed) for speed in self._realtime_joint_speeds]
@property
def version_number(self):
return self._major_version_number, self._minor_version_number, self._revision_version_number
@property
def connected(self):
return self._stream and self._stream.connected
@property
def ready(self):
return self._is_ready
@property
def default_is_radian(self):
return self._default_is_radian
@property
def is_simulation_robot(self):
return self._is_simulation_robot
def check_is_simulation_robot(self):
return self._check_simulation_mode and self.is_simulation_robot
# return self._check_simulation_mode and self.mode != 4
@property
def version(self):
if not self._version:
self.get_version()
return self._version
# return 'v' + '.'.join(map(str, self.version_number))
@property
def sn(self):
return self._robot_sn
@property
def control_box_sn(self):
return self._control_box_sn
@property
def position(self):
if not self._enable_report:
self.get_position()
return [math.degrees(self._position[i]) if 2 < i < 6 and not self._default_is_radian
else self._position[i] for i in range(len(self._position))]
@property
def position_aa(self):
if not self._enable_report:
self.get_position_aa()
return [math.degrees(self._pose_aa[i]) if 2 < i < 6 and not self._default_is_radian
else self._pose_aa[i] for i in range(len(self._pose_aa))]
@property
def tcp_jerk(self):
return self._tcp_jerk
@property
def tcp_speed_limit(self):
return [self._min_tcp_speed, self._max_tcp_speed]
@property
def tcp_acc_limit(self):
return [self._min_tcp_acc, self._max_tcp_acc]
@property
def last_used_position(self):
return [math.degrees(self._last_position[i]) if 2 < i < 6 and not self._default_is_radian
else self._last_position[i] for i in range(len(self._last_position))]
@property
def last_used_tcp_speed(self):
return self._last_tcp_speed
@property
def last_used_tcp_acc(self):
return self._last_tcp_acc
@property
def angles(self):
if not self._enable_report:
self.get_servo_angle()
return [angle if self._default_is_radian else math.degrees(angle) for angle in self._angles]
@property
def joint_jerk(self):
return self._joint_jerk if self._default_is_radian else math.degrees(self._joint_jerk)
@property
def joint_speed_limit(self):
limit = [self._min_joint_speed, self._max_joint_speed]
if not self._default_is_radian:
limit = [math.degrees(i) for i in limit]
return limit
@property
def joint_acc_limit(self):
limit = [self._min_joint_acc, self._max_joint_acc]
if not self._default_is_radian:
limit = [math.degrees(i) for i in limit]
return limit
@property
def last_used_angles(self):
return [angle if self._default_is_radian else math.degrees(angle) for angle in self._last_angles]
@property
def last_used_joint_speed(self):
return self._last_joint_speed if self._default_is_radian else math.degrees(self._last_joint_speed)
@property
def last_used_joint_acc(self):
return self._last_joint_acc if self._default_is_radian else math.degrees(self._last_joint_acc)
@property
def position_offset(self):
return [math.degrees(self._position_offset[i]) if 2 < i < 6 and not self._default_is_radian
else self._position_offset[i] for i in range(len(self._position_offset))]
@property
def world_offset(self):
return [math.degrees(self._world_offset[i]) if 2 < i < 6 and not self._default_is_radian
else self._world_offset[i] for i in range(len(self._world_offset))]
@property
def state(self):
if not self._enable_report:
self.get_state()
return self._state
@property
def mode(self):
return self._mode
@property
def joints_torque(self):
return self._joints_torque
@property
def tcp_load(self):
return self._tcp_load
@property
def collision_sensitivity(self):
return self._collision_sensitivity
@property
def teach_sensitivity(self):
return self._teach_sensitivity
@property
def motor_brake_states(self):
return self._arm_motor_brake_states
@property
def motor_enable_states(self):
return self._arm_motor_enable_states
@property
def temperatures(self):
return self._temperatures
@property
def error_code(self):
if not self._enable_report:
self.get_err_warn_code()
return self._error_code
@property
def warn_code(self):
if not self._enable_report:
self.get_err_warn_code()
return self._warn_code
@property
def has_error(self):
return self.error_code != 0
@property
def has_warn(self):
return self.warn_code != 0
@property
def has_err_warn(self):
return self.has_error or self._warn_code != 0 or (self.arm_cmd and self.arm_cmd.has_err_warn)
@property
def cmd_num(self):
if not self._enable_report:
self.get_cmdnum()
return self._cmd_num
@property
def device_type(self):
return self._arm_type
@property
def axis(self):
return self._arm_axis
@property
def master_id(self):
return self._arm_master_id
@property
def slave_id(self):
return self._arm_slave_id
@property
def motor_tid(self):
return self._arm_motor_tid
@property
def motor_fid(self):
return self._arm_motor_fid
@property
def gravity_direction(self):
return self._gravity_direction
@property
def gpio_reset_config(self):
return [self._cgpio_reset_enable, self._tgpio_reset_enable]
@property
def count(self):
return self._count
@property
def servo_codes(self):
return self._servo_codes
@property
def is_stop(self):
return self.state in [4, 5]
@property
def voltages(self):
return self._voltages
@property
def currents(self):
return self._currents
@property
def cgpio_states(self):
return self._cgpio_states
@property
def self_collision_params(self):
return [self._is_collision_detection, self._collision_tool_type, self._collision_tool_params]
@property
def ft_ext_force(self):
return self._ft_ext_force
@property
def ft_raw_force(self):
return self._ft_raw_force
def version_is_ge(self, major, minor=0, revision=0):
if self._version is None:
self._check_version()
return self._major_version_number > major or (
self._major_version_number == major and self._minor_version_number > minor) or (
self._major_version_number == major and self._minor_version_number == minor and
self._revision_version_number >= revision)
def check_is_pause(self):
if self._check_is_pause:
if self.state == 3 and self._enable_report:
with self._pause_cond:
with self._pause_lock:
self._pause_cnts += 1
self._pause_cond.wait()
with self._pause_lock:
self._pause_cnts -= 1
@property
def state_is_ready(self):
if self._check_is_ready and not self.version_is_ge(1, 5, 20):
return self.ready
else:
return True
def _timed_comm_thread(self):
self._timed_comm_t_alive = True
cnt = 0
while self.connected and self._timed_comm_t_alive:
if self.arm_cmd and time.time() - self.arm_cmd.last_comm_time > self._timed_comm_interval:
try:
if cnt == 0:
code, _ = self.get_cmdnum()
elif cnt == 1:
code, _ = self.get_state()
else:
code, _ = self.get_err_warn_code()
cnt = (cnt + 1) % 3
except:
pass
time.sleep(0.5)
def _clean_thread(self):
self._thread_manage.join(1)
if self._pool:
try:
self._pool.close()
self._pool.join()
except:
pass
def connect(self, port=None, baudrate=None, timeout=None, axis=None, arm_type=None):
if self.connected:
return
if axis in [5, 6, 7]:
self._arm_axis = axis
if arm_type in [3, 5, 6, 7]:
self._arm_type = arm_type
self._is_ready = True
self._port = port if port is not None else self._port
self._baudrate = baudrate if baudrate is not None else self._baudrate
self._timeout = timeout if timeout is not None else self._timeout
if not self._port:
raise Exception('can not connect to port/ip {}'.format(self._port))
if self._timed_comm_t is not None:
try:
self._timed_comm_t_alive = False
self._timed_comm_t.join()
self._timed_comm_t = None
except:
pass
self._is_first_report = True
self._first_report_over = False
self._init()
if isinstance(self._port, (str, bytes)):
if self._port == 'localhost' or re.match(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
self._port):
self._stream = SocketPort(self._port, XCONF.SocketConf.TCP_CONTROL_PORT,
heartbeat=self._enable_heartbeat,
buffer_size=XCONF.SocketConf.TCP_CONTROL_BUF_SIZE, forbid_uds=self._forbid_uds)
if not self.connected:
raise Exception('connect socket failed')
self._report_error_warn_changed_callback()
self.arm_cmd = UxbusCmdTcp(self._stream)
self._stream_type = 'socket'
try:
if self._timed_comm:
self._timed_comm_t = threading.Thread(target=self._timed_comm_thread, daemon=True)
self._timed_comm_t.start()
except:
pass
self._stream_report = None
try:
self._connect_report()
except:
self._stream_report = None
if self._check_version(is_first=True) < 0:
self.disconnect()
raise Exception('failed to check version, close')
self.arm_cmd.set_debug(self._debug)
if self._max_callback_thread_count < 0 and asyncio is not None:
self._asyncio_loop = asyncio.new_event_loop()
self._asyncio_loop_thread = threading.Thread(target=self._run_asyncio_loop, daemon=True)
self._thread_manage.append(self._asyncio_loop_thread)
self._asyncio_loop_thread.start()
elif self._max_callback_thread_count > 0 and ThreadPool is not None:
self._pool = ThreadPool(self._max_callback_thread_count)
if self._stream.connected and self._enable_report:
self._report_thread = threading.Thread(target=self._report_thread_handle, daemon=True)
self._report_thread.start()
self._thread_manage.append(self._report_thread)
self._report_connect_changed_callback()
else:
self._stream = SerialPort(self._port)
if not self.connected:
raise Exception('connect serail failed')
self._report_error_warn_changed_callback()
self.arm_cmd = UxbusCmdSer(self._stream)
self._stream_type = 'serial'
if self._max_callback_thread_count < 0 and asyncio is not None:
self._asyncio_loop = asyncio.new_event_loop()
self._asyncio_loop_thread = threading.Thread(target=self._run_asyncio_loop, daemon=True)
self._thread_manage.append(self._asyncio_loop_thread)
self._asyncio_loop_thread.start()
elif self._max_callback_thread_count > 0 and ThreadPool is not None:
self._pool = ThreadPool(self._max_callback_thread_count)
if self._enable_report:
self._report_thread = threading.Thread(target=self._auto_get_report_thread, daemon=True)
self._report_thread.start()
self._report_connect_changed_callback(True, True)
self._thread_manage.append(self._report_thread)
else:
self._report_connect_changed_callback(True, False)
self._check_version(is_first=True)
self.arm_cmd.set_debug(self._debug)
self.set_timeout(self._cmd_timeout)
if self._rewrite_modbus_baudrate_method:
setattr(self.arm_cmd, 'set_modbus_baudrate_old', self.arm_cmd.set_modbus_baudrate)
setattr(self.arm_cmd, 'set_modbus_baudrate', self._core_set_modbus_baudrate)
if asyncio:
def _run_asyncio_loop(self):
@asyncio.coroutine
def _asyncio_loop():
logger.debug('asyncio thread start ...')
while self.connected:
yield from asyncio.sleep(0.001)
logger.debug('asyncio thread exit ...')
try:
asyncio.set_event_loop(self._asyncio_loop)
self._asyncio_loop_alive = True
self._asyncio_loop.run_until_complete(_asyncio_loop())
except Exception as e:
pass
self._asyncio_loop_alive = False
@staticmethod
@asyncio.coroutine
def _async_run_callback(callback, msg):
yield from callback(msg)
def _run_callback(self, callback, msg, name='', enable_callback_thread=True):
try:
if self._asyncio_loop_alive and enable_callback_thread:
coroutine = self._async_run_callback(callback, msg)
asyncio.run_coroutine_threadsafe(coroutine, self._asyncio_loop)
elif self._pool is not None and enable_callback_thread:
self._pool.apply_async(callback, args=(msg,))
else:
callback(msg)
except Exception as e:
logger.error('run {} callback exception: {}'.format(name, e))
def _core_set_modbus_baudrate(self, baudrate, use_old=False):
"""
此函数是用于覆盖core.set_modbus_baudrate方法,主要用于兼容旧代码
新代码建议直接使用set_tgpio_modbus_baudrate此接口
:param baudrate:
:param use_old:
为True时调用原来的core.set_modbus_baudrate方法
为False时使用新的set_tgpio_modbus_baudrate
:return [code, ...]
"""
if not use_old:
ret = self.set_tgpio_modbus_baudrate(baudrate)
return [ret, self.modbus_baud]
else:
return self.arm_cmd.set_modbus_baudrate_old(baudrate)
def disconnect(self):
try:
self._stream.close()
except:
pass
if self._stream_report:
try:
self._stream_report.close()
except:
pass
self._is_ready = False
try:
self._stream.join()
except:
pass
if self._stream_report:
try:
self._stream_report.join()
except:
pass
self._report_connect_changed_callback(False, False)
with self._pause_cond:
self._pause_cond.notifyAll()
self._clean_thread()
def set_timeout(self, timeout):
self._cmd_timeout = timeout
if self.arm_cmd is not None:
self._cmd_timeout = self.arm_cmd.set_timeout(self._cmd_timeout)
return self._cmd_timeout
def _connect_report(self):
if self._enable_report:
if self._stream_report:
try:
self._stream_report.close()
except:
pass
time.sleep(2)
if self._report_type == 'real':
self._stream_report = SocketPort(
self._port, XCONF.SocketConf.TCP_REPORT_REAL_PORT,
buffer_size=1024 if not self._is_old_protocol else 87,
forbid_uds=self._forbid_uds)
elif self._report_type == 'normal':
self._stream_report = SocketPort(
self._port, XCONF.SocketConf.TCP_REPORT_NORM_PORT,
buffer_size=XCONF.SocketConf.TCP_REPORT_NORMAL_BUF_SIZE if not self._is_old_protocol else 87,
forbid_uds=self._forbid_uds)
else:
self._stream_report = SocketPort(
self._port, XCONF.SocketConf.TCP_REPORT_RICH_PORT,
buffer_size=1024 if not self._is_old_protocol else 187,
forbid_uds=self._forbid_uds)
def __report_callback(self, report_id, item, name=''):
if report_id in self._report_callbacks.keys():
for callback in self._report_callbacks[report_id]:
self._run_callback(callback, item, name=name)
def _report_connect_changed_callback(self, main_connected=None, report_connected=None):
if self.REPORT_CONNECT_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_CONNECT_CHANGED_ID]:
self._run_callback(callback, {
'connected': self._stream and self._stream.connected if main_connected is None else main_connected,
'reported': self._stream_report and self._stream_report.connected if report_connected is None else report_connected,
}, name='connect_changed')
def _report_state_changed_callback(self):
if self._ignore_state:
return
self.__report_callback(self.REPORT_STATE_CHANGED_ID, {'state': self._state}, name='state_changed')
def _report_mode_changed_callback(self):
self.__report_callback(self.REPORT_MODE_CHANGED_ID, {'mode': self._mode}, name='mode_changed')
def _report_mtable_mtbrake_changed_callback(self):
self.__report_callback(self.REPORT_MTABLE_MTBRAKE_CHANGED_ID, {
'mtable': [bool(i) for i in self._arm_motor_enable_states],
'mtbrake': [bool(i) for i in self._arm_motor_brake_states]
}, name='(mtable/mtbrake)_changed')
def _report_error_warn_changed_callback(self):
if self._ignore_error:
return
self.__report_callback(self.REPORT_ERROR_WARN_CHANGED_ID, {
'warn_code': self._warn_code,
'error_code': self._error_code,
}, name='(error/warn)_changed')
def _report_cmdnum_changed_callback(self):
self.__report_callback(self.REPORT_CMDNUM_CHANGED_ID, {
'cmdnum': self._cmd_num
}, name='cmdnum_changed')
def _report_temperature_changed_callback(self):
self.__report_callback(self.REPORT_TEMPERATURE_CHANGED_ID, {
'temperatures': self.temperatures
}, name='temperature_changed')
def _report_count_changed_callback(self):
self.__report_callback(self.REPORT_COUNT_CHANGED_ID, {'count': self._count}, name='count_changed')
def _report_iden_progress_changed_callback(self):
self.__report_callback(self.REPORT_IDEN_PROGRESS_CHANGED_ID, {'progress': self._iden_progress}, name='iden_progress_changed')
def _report_location_callback(self):
if self.REPORT_LOCATION_ID in self._report_callbacks.keys():
for item in self._report_callbacks[self.REPORT_LOCATION_ID]:
callback = item['callback']
ret = {}
if item['cartesian']:
ret['cartesian'] = self.position.copy()
if item['joints']:
ret['joints'] = self.angles.copy()
self._run_callback(callback, ret, name='location')
def _report_callback(self):
if self.REPORT_ID in self._report_callbacks.keys():
for item in self._report_callbacks[self.REPORT_ID]:
callback = item['callback']
ret = {}
if item['cartesian']:
ret['cartesian'] = self.position.copy()
if item['joints']:
ret['joints'] = self.angles.copy()
if item['error_code']:
ret['error_code'] = self._error_code
if item['warn_code']:
ret['warn_code'] = self._warn_code
if item['state']:
ret['state'] = self._state
if item['mtable']:
mtable = [bool(i) for i in self._arm_motor_enable_states]
ret['mtable'] = mtable.copy()
if item['mtbrake']:
mtbrake = [bool(i) for i in self._arm_motor_brake_states]
ret['mtbrake'] = mtbrake.copy()
if item['cmdnum']:
ret['cmdnum'] = self._cmd_num
self._run_callback(callback, ret, name='report')
def _report_thread_handle(self):
main_socket_connected = self._stream and self._stream.connected
report_socket_connected = self._stream_report and self._stream_report.connected
while self.connected:
try:
if not self._stream_report or not self._stream_report.connected:
self.get_err_warn_code()
if report_socket_connected:
report_socket_connected = False
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
self._connect_report()
continue
if not report_socket_connected:
report_socket_connected = True
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
recv_data = self._stream_report.read(1)
if recv_data != -1:
size = convert.bytes_to_u32(recv_data)
if self._is_old_protocol and size > 256:
self._is_old_protocol = False
self._handle_report_data(recv_data)
else:
if self.connected:
code, err_warn = self.get_err_warn_code()
if code == -1 or code == 3:
break
if not self.connected:
break
elif not self._stream_report or not self._stream_report.connected:
self._connect_report()
except Exception as e:
logger.error(e)
if self.connected:
code, err_warn = self.get_err_warn_code()
if code == -1 or code == 3:
break
if not self.connected:
break
if not self._stream_report or not self._stream_report.connected:
self._connect_report()
time.sleep(0.001)
self.disconnect()
def _handle_report_data(self, data):
def __handle_report_normal_old(rx_data):
# print('length:', convert.bytes_to_u32(rx_data[0:4]))
state, mtbrake, mtable, error_code, warn_code = rx_data[4:9]
angles = convert.bytes_to_fp32s(rx_data[9:7 * 4 + 9], 7)
pose = convert.bytes_to_fp32s(rx_data[37:6 * 4 + 37], 6)
cmd_num = convert.bytes_to_u16(rx_data[61:63])
pose_offset = convert.bytes_to_fp32s(rx_data[63:6 * 4 + 63], 6)
if error_code != self._error_code or warn_code != self._warn_code:
if error_code != self._error_code:
self._error_code = error_code
if self._error_code != 0:
pretty_print('Error, code: {}'.format(self._error_code), color='red')
else:
pretty_print('Error had clean', color='blue')
if warn_code != self._warn_code:
self._warn_code = warn_code
if self._warn_code != 0:
pretty_print('Warn, code: {}'.format(self._warn_code), color='yellow')
else:
pretty_print('Warnning had clean', color='blue')
self._report_error_warn_changed_callback()
logger.info('OnReport -> err={}, warn={}, state={}, cmdnum={}, mtbrake={}, mtable={}'.format(
error_code, warn_code, state, cmd_num, mtbrake, mtable
))
elif not self._only_report_err_warn_changed:
self._report_error_warn_changed_callback()
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
mtbrake = [mtbrake & 0x01, mtbrake >> 1 & 0x01, mtbrake >> 2 & 0x01, mtbrake >> 3 & 0x01,
mtbrake >> 4 & 0x01, mtbrake >> 5 & 0x01, mtbrake >> 6 & 0x01, mtbrake >> 7 & 0x01]
mtable = [mtable & 0x01, mtable >> 1 & 0x01, mtable >> 2 & 0x01, mtable >> 3 & 0x01,
mtable >> 4 & 0x01, mtable >> 5 & 0x01, mtable >> 6 & 0x01, mtable >> 7 & 0x01]
if mtbrake != self._arm_motor_brake_states or mtable != self._arm_motor_enable_states:
self._arm_motor_enable_states = mtable
self._arm_motor_brake_states = mtbrake
self._report_mtable_mtbrake_changed_callback()
if not self._is_first_report:
if state in [4, 5] or not all([bool(item[0] & item[1]) for item in zip(mtbrake, mtable)][:self.axis]):
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
else:
self._is_ready = False
self._is_first_report = False
if not self._is_ready:
self._sleep_finish_time = 0
reset_tgpio_params = False
reset_linear_track_params = False
if 0 < error_code <= 17:
reset_tgpio_params = True
reset_linear_track_params = True
elif error_code in [19, 28]:
reset_tgpio_params = True
elif error_code == 111:
reset_linear_track_params = True
if reset_tgpio_params:
self.modbus_baud = -1
self.robotiq_is_activated = False
self.gripper_is_enabled = False
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
if reset_linear_track_params:
self.linear_track_baud = -1
self.linear_track_is_enabled = False
self.linear_track_speed = 1
# if error_code in [1, 10, 11, 12, 13, 14, 15, 16, 17, 19, 28]:
# self.modbus_baud = -1
# self.robotiq_is_activated = False
# self.gripper_is_enabled = False
# self.bio_gripper_is_enabled = False
# self.bio_gripper_speed = 0
# self.gripper_is_enabled = False
# self.gripper_speed = 0
# self.gripper_version_numbers = [-1, -1, -1]
# self.linear_track_is_enabled = False
# self.linear_track_speed = 0
self._error_code = error_code
self._warn_code = warn_code
self.arm_cmd.has_err_warn = error_code != 0 or warn_code != 0
_state = self._state
self._state = state
if self.state != 3 and (_state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
self._cmd_num = cmd_num
self._arm_motor_brake_states = mtbrake
self._arm_motor_enable_states = mtable
update_time = time.time()
self._last_update_cmdnum_time = update_time
self._last_update_state_time = update_time
self._last_update_err_time = update_time
for i in range(len(pose)):
pose[i] = filter_invaild_number(pose[i], 3 if i < 3 else 6, default=self._position[i])
for i in range(len(angles)):
angles[i] = filter_invaild_number(angles[i], 6, default=self._angles[i])
for i in range(len(pose_offset)):
pose_offset[i] = filter_invaild_number(pose_offset[i], 3 if i < 3 else 6, default=self._position_offset[i])
if not (0 < self._error_code <= 17):
self._position = pose
if not (0 < self._error_code <= 17):
self._angles = angles
if not (0 < self._error_code <= 17):
self._position_offset = pose_offset
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._error_code == 0 and self._state not in [4, 5]:
self._sync()
self._is_sync = True
def __handle_report_rich_old(rx_data):
report_time = time.time()
interval = report_time - self._last_report_time
self._max_report_interval = max(self._max_report_interval, interval)
self._last_report_time = report_time
__handle_report_normal(rx_data)
(self._arm_type,
arm_axis,
self._arm_master_id,
self._arm_slave_id,
self._arm_motor_tid,
self._arm_motor_fid) = rx_data[87:93]
if 7 >= arm_axis >= 5:
self._arm_axis = arm_axis
if self._arm_type == 5:
self._arm_axis = 5
elif self._arm_type == 6:
self._arm_axis = 6
elif self._arm_type == 3:
self._arm_axis = 7
ver_msg = rx_data[93:122]
# self._version = str(ver_msg, 'utf-8')
trs_msg = convert.bytes_to_fp32s(rx_data[123:143], 5)
# trs_msg = [i[0] for i in trs_msg]
(self._tcp_jerk,
self._min_tcp_acc,
self._max_tcp_acc,
self._min_tcp_speed,
self._max_tcp_speed) = trs_msg
# print('tcp_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._tcp_jerk, self._min_tcp_acc, self._max_tcp_acc, self._min_tcp_speed, self._max_tcp_speed
# ))
p2p_msg = convert.bytes_to_fp32s(rx_data[143:163], 5)
# p2p_msg = [i[0] for i in p2p_msg]
(self._joint_jerk,
self._min_joint_acc,
self._max_joint_acc,
self._min_joint_speed,
self._max_joint_speed) = p2p_msg
# print('joint_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._joint_jerk, self._min_joint_acc, self._max_joint_acc,
# self._min_joint_speed, self._max_joint_speed
# ))
rot_msg = convert.bytes_to_fp32s(rx_data[163:171], 2)
# rot_msg = [i[0] for i in rot_msg]
self._rot_jerk, self._max_rot_acc = rot_msg
# print('rot_jerk: {}, mac_acc: {}'.format(self._rot_jerk, self._max_rot_acc))
sv3_msg = convert.bytes_to_u16s(rx_data[171:187], 8)
self._first_report_over = True
def __handle_report_real(rx_data):
state, mode = rx_data[4] & 0x0F, rx_data[4] >> 4
cmd_num = convert.bytes_to_u16(rx_data[5:7])
angles = convert.bytes_to_fp32s(rx_data[7:7 * 4 + 7], 7)
pose = convert.bytes_to_fp32s(rx_data[35:6 * 4 + 35], 6)
torque = convert.bytes_to_fp32s(rx_data[59:7 * 4 + 59], 7)
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
if state in [4, 5]:
self._is_ready = False
else:
self._is_ready = True
if mode != self._mode:
self._mode = mode
self._report_mode_changed_callback()
for i in range(len(pose)):
pose[i] = filter_invaild_number(pose[i], 3 if i < 3 else 6, default=self._position[i])
for i in range(len(angles)):
angles[i] = filter_invaild_number(angles[i], 6, default=self._angles[i])
if not (0 < self._error_code <= 17):
self._position = pose
if not (0 < self._error_code <= 17):
self._angles = angles
self._joints_torque = torque
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._state not in [4, 5]:
self._sync()
self._is_sync = True
length = len(rx_data)
if length >= 135:
# FT_SENSOR
self._ft_ext_force = convert.bytes_to_fp32s(rx_data[87:111], 6)
self._ft_raw_force = convert.bytes_to_fp32s(rx_data[111:135], 6)
def __handle_report_normal(rx_data):
# print('length:', convert.bytes_to_u32(rx_data[0:4]), len(rx_data))
state, mode = rx_data[4] & 0x0F, rx_data[4] >> 4
# if state != self._state or mode != self._mode:
# print('mode: {}, state={}, time={}'.format(mode, state, time.time()))
cmd_num = convert.bytes_to_u16(rx_data[5:7])
angles = convert.bytes_to_fp32s(rx_data[7:7 * 4 + 7], 7)
pose = convert.bytes_to_fp32s(rx_data[35:6 * 4 + 35], 6)
torque = convert.bytes_to_fp32s(rx_data[59:7 * 4 + 59], 7)
mtbrake, mtable, error_code, warn_code = rx_data[87:91]
pose_offset = convert.bytes_to_fp32s(rx_data[91:6 * 4 + 91], 6)
tcp_load = convert.bytes_to_fp32s(rx_data[115:4 * 4 + 115], 4)
collis_sens, teach_sens = rx_data[131:133]
# if (collis_sens not in list(range(6)) or teach_sens not in list(range(6))) \
# and ((error_code != 0 and error_code not in controller_error_keys) or (warn_code != 0 and warn_code not in controller_warn_keys)):
# self._stream_report.close()
# logger.warn('ReportDataException: data={}'.format(rx_data))
# return
length = convert.bytes_to_u32(rx_data[0:4])
data_len = len(rx_data)
if (length != data_len and (length != 233 or data_len != 245)) or collis_sens not in list(range(6)) or teach_sens not in list(range(6)) \
or mode not in list(range(12)) or state not in list(range(10)):
self._stream_report.close()
logger.warn('ReportDataException: length={}, data_len={}, '
'state={}, mode={}, collis_sens={}, teach_sens={}, '
'error_code={}, warn_code={}'.format(
length, data_len,
state, mode, collis_sens, teach_sens, error_code, warn_code
))
return
self._gravity_direction = convert.bytes_to_fp32s(rx_data[133:3*4 + 133], 3)
reset_tgpio_params = False
reset_linear_track_params = False
if 0 < error_code <= 17:
reset_tgpio_params = True
reset_linear_track_params = True
elif error_code in [19, 28]:
reset_tgpio_params = True
elif error_code == 111:
reset_linear_track_params = True
if reset_tgpio_params:
self.modbus_baud = -1
self.robotiq_is_activated = False
self.gripper_is_enabled = False
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
if reset_linear_track_params:
self.linear_track_baud = -1
self.linear_track_is_enabled = False
self.linear_track_speed = 0
# if error_code in [1, 10, 11, 12, 13, 14, 15, 16, 17, 19, 28]:
# self.modbus_baud = -1
# self.robotiq_is_activated = False
# self.gripper_is_enabled = False
# self.bio_gripper_is_enabled = False
# self.bio_gripper_speed = -1
# self.gripper_speed = -1
# self.gripper_version_numbers = [-1, -1, -1]
# self.linear_track_is_enabled = False
# self.linear_track_speed = -1
# print('torque: {}'.format(torque))
# print('tcp_load: {}'.format(tcp_load))
# print('collis_sens: {}, teach_sens: {}'.format(collis_sens, teach_sens))
if error_code != self._error_code or warn_code != self._warn_code:
if error_code != self._error_code:
self._error_code = error_code
if self._error_code != 0:
pretty_print('ControllerError, code: {}'.format(self._error_code), color='red')
else:
pretty_print('ControllerError had clean', color='blue')
if warn_code != self._warn_code:
self._warn_code = warn_code
if self._warn_code != 0:
pretty_print('ControllerWarning, code: {}'.format(self._warn_code), color='yellow')
else:
pretty_print('ControllerWarning had clean', color='blue')
self._report_error_warn_changed_callback()
logger.info('OnReport -> err={}, warn={}, state={}, cmdnum={}, mtbrake={}, mtable={}, mode={}'.format(
error_code, warn_code, state, cmd_num, mtbrake, mtable, mode
))
elif not self._only_report_err_warn_changed:
self._report_error_warn_changed_callback()
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
if not self._has_motion_cmd and self._state in [0, 1] and state not in [0, 1]:
self._need_sync = True
if self._state in [0, 1] and state not in [0, 1]:
self._has_motion_cmd = False
# print('old_state: {}, new_state: {}, has_motion_cmd={}, need_sync: {}'.format(self._state, state, self._has_motion_cmd, self._need_sync))
self._state = state
self._report_state_changed_callback()
if mode != self._mode:
self._mode = mode
self._report_mode_changed_callback()
mtbrake = [mtbrake & 0x01, mtbrake >> 1 & 0x01, mtbrake >> 2 & 0x01, mtbrake >> 3 & 0x01,
mtbrake >> 4 & 0x01, mtbrake >> 5 & 0x01, mtbrake >> 6 & 0x01, mtbrake >> 7 & 0x01]
mtable = [mtable & 0x01, mtable >> 1 & 0x01, mtable >> 2 & 0x01, mtable >> 3 & 0x01,
mtable >> 4 & 0x01, mtable >> 5 & 0x01, mtable >> 6 & 0x01, mtable >> 7 & 0x01]
if mtbrake != self._arm_motor_brake_states or mtable != self._arm_motor_enable_states:
self._arm_motor_enable_states = mtable
self._arm_motor_brake_states = mtbrake
self._report_mtable_mtbrake_changed_callback()
if not self._is_first_report:
if state in [4, 5] or not all([bool(item[0] & item[1]) for item in zip(mtbrake, mtable)][:self.axis]):
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
else:
self._is_ready = False
self._is_first_report = False
if not self._is_ready:
self._sleep_finish_time = 0
self._error_code = error_code
self._warn_code = warn_code
self.arm_cmd.has_err_warn = error_code != 0 or warn_code != 0
_state = self._state
self._state = state
if self.state != 3 and (_state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
self._mode = mode
self._cmd_num = cmd_num
update_time = time.time()
self._last_update_cmdnum_time = update_time
self._last_update_state_time = update_time
self._last_update_err_time = update_time
self._arm_motor_brake_states = mtbrake
self._arm_motor_enable_states = mtable
self._joints_torque = torque
if compare_version(self.version_number, (0, 2, 0)):
self._tcp_load = [float('{:.3f}'.format(tcp_load[0])), [float('{:.3f}'.format(i)) for i in tcp_load[1:]]]
else:
self._tcp_load = [float('{:.3f}'.format(tcp_load[0])), [float('{:.3f}'.format(i * 1000)) for i in tcp_load[1:]]]
self._collision_sensitivity = collis_sens
self._teach_sensitivity = teach_sens
for i in range(len(pose)):
pose[i] = filter_invaild_number(pose[i], 3 if i < 3 else 6, default=self._position[i])
for i in range(len(angles)):
angles[i] = filter_invaild_number(angles[i], 6, default=self._angles[i])
for i in range(len(pose_offset)):
pose_offset[i] = filter_invaild_number(pose_offset[i], 3 if i < 3 else 6, default=self._position_offset[i])
if not (0 < self._error_code <= 17):
self._position = pose
if not (0 < self._error_code <= 17):
self._angles = angles
if not (0 < self._error_code <= 17):
self._position_offset = pose_offset
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._error_code == 0 and self._state not in [4, 5]:
self._sync()
self._is_sync = True
elif self._need_sync:
self._need_sync = False
# self._sync()
def __handle_report_rich(rx_data):
report_time = time.time()
interval = report_time - self._last_report_time
self._max_report_interval = max(self._max_report_interval, interval)
self._last_report_time = report_time
# print('interval={}, max_interval={}'.format(interval, self._max_report_interval))
__handle_report_normal(rx_data)
(self._arm_type,
arm_axis,
self._arm_master_id,
self._arm_slave_id,
self._arm_motor_tid,
self._arm_motor_fid) = rx_data[145:151]
if 7 >= arm_axis >= 5:
self._arm_axis = arm_axis
# self._version = str(rx_data[151:180], 'utf-8')
trs_msg = convert.bytes_to_fp32s(rx_data[181:201], 5)
# trs_msg = [i[0] for i in trs_msg]
(self._tcp_jerk,
self._min_tcp_acc,
self._max_tcp_acc,
self._min_tcp_speed,
self._max_tcp_speed) = trs_msg
# print('tcp_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._tcp_jerk, self._min_tcp_acc, self._max_tcp_acc, self._min_tcp_speed, self._max_tcp_speed
# ))
p2p_msg = convert.bytes_to_fp32s(rx_data[201:221], 5)
# p2p_msg = [i[0] for i in p2p_msg]
(self._joint_jerk,
self._min_joint_acc,
self._max_joint_acc,
self._min_joint_speed,
self._max_joint_speed) = p2p_msg
# print('joint_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._joint_jerk, self._min_joint_acc, self._max_joint_acc,
# self._min_joint_speed, self._max_joint_speed
# ))
rot_msg = convert.bytes_to_fp32s(rx_data[221:229], 2)
# rot_msg = [i[0] for i in rot_msg]
self._rot_jerk, self._max_rot_acc = rot_msg
# print('rot_jerk: {}, mac_acc: {}'.format(self._rot_jerk, self._max_rot_acc))
servo_codes = [val for val in rx_data[229:245]]
for i in range(self.axis):
if self._servo_codes[i][0] != servo_codes[i * 2] or self._servo_codes[i][1] != servo_codes[i * 2 + 1]:
print('servo_error_code, servo_id={}, status={}, code={}'.format(i + 1, servo_codes[i * 2], servo_codes[i * 2 + 1]))
self._servo_codes[i][0] = servo_codes[i * 2]
self._servo_codes[i][1] = servo_codes[i * 2 + 1]
self._first_report_over = True
# length = convert.bytes_to_u32(rx_data[0:4])
length = len(rx_data)
if length >= 252:
temperatures = list(map(int, rx_data[245:252]))
if temperatures != self.temperatures:
self._temperatures = temperatures
self._report_temperature_changed_callback()
if length >= 284:
speeds = convert.bytes_to_fp32s(rx_data[252:8 * 4 + 252], 8)
self._realtime_tcp_speed = speeds[0]
self._realtime_joint_speeds = speeds[1:]
# print(speeds[0], speeds[1:])
if length >= 288:
count = convert.bytes_to_u32(rx_data[284:288])
# print(count, rx_data[284:288])
if self._count != -1 and count != self._count:
self._count = count
self._report_count_changed_callback()
self._count = count
if length >= 312:
world_offset = convert.bytes_to_fp32s(rx_data[288:6 * 4 + 288], 6)
for i in range(len(world_offset)):
if i < 3:
world_offset[i] = float('{:.3f}'.format(world_offset[i]))
else:
world_offset[i] = float('{:.6f}'.format(world_offset[i]))
if math.inf not in world_offset and -math.inf not in world_offset and not (10 <= self._error_code <= 17):
self._world_offset = world_offset
if length >= 314:
self._cgpio_reset_enable, self._tgpio_reset_enable = rx_data[312:314]
if length >= 417:
self._is_simulation_robot = bool(rx_data[314])
self._is_collision_detection, self._collision_tool_type = rx_data[315:317]
self._collision_tool_params = convert.bytes_to_fp32s(rx_data[317:341], 6)
voltages = convert.bytes_to_u16s(rx_data[341:355], 7)
voltages = list(map(lambda x: x / 100, voltages))
self._voltages = voltages
currents = convert.bytes_to_fp32s(rx_data[355:383], 7)
self._currents = currents
cgpio_states = []
cgpio_states.extend(rx_data[383:385])
cgpio_states.extend(convert.bytes_to_u16s(rx_data[385:401], 8))
cgpio_states[6:10] = list(map(lambda x: x / 4095.0 * 10.0, cgpio_states[6:10]))
cgpio_states.append(list(map(int, rx_data[401:409])))
cgpio_states.append(list(map(int, rx_data[409:417])))
if self._control_box_type_is_1300 and length >= 433:
cgpio_states[-2].extend(list(map(int, rx_data[417:425])))
cgpio_states[-1].extend(list(map(int, rx_data[425:433])))
self._cgpio_states = cgpio_states
if length >= 481:
# FT_SENSOR
self._ft_ext_force = convert.bytes_to_fp32s(rx_data[433:457], 6)
self._ft_raw_force = convert.bytes_to_fp32s(rx_data[457:481], 6)
if length >= 482:
iden_progress = rx_data[481]
if iden_progress != self._iden_progress:
self._iden_progress = iden_progress
self._report_iden_progress_changed_callback()
if length >= 494:
pose_aa = convert.bytes_to_fp32s(rx_data[482:494], 3)
for i in range(len(pose_aa)):
pose_aa[i] = filter_invaild_number(pose_aa[i], 6, default=self._pose_aa[i])
self._pose_aa = self._position[:3] + pose_aa
try:
if self._report_type == 'real':
__handle_report_real(data)
elif self._report_type == 'rich':
if self._is_old_protocol:
__handle_report_rich_old(data)
else:
__handle_report_rich(data)
else:
if self._is_old_protocol:
__handle_report_normal_old(data)
else:
__handle_report_normal(data)
except Exception as e:
logger.error(e)
def _auto_get_report_thread(self):
logger.debug('get report thread start')
while self.connected:
try:
cmd_num = self._cmd_num
state = self._state
error_code = self._error_code
warn_code = self._warn_code
self.get_cmdnum()
time.sleep(0.01)
self.get_state()
time.sleep(0.01)
self.get_err_warn_code()
time.sleep(0.01)
self.get_servo_angle()
time.sleep(0.01)
self.get_position()
if self.state != 3 and (state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
if cmd_num != self._cmd_num:
self._report_cmdnum_changed_callback()
if state != self._state:
self._report_state_changed_callback()
if state in [4, 5]:
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._sleep_finish_time = 0
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
if error_code != self._error_code or warn_code != self._warn_code:
self._report_error_warn_changed_callback()
elif not self._only_report_err_warn_changed and (self._error_code != 0 or self._warn_code != 0):
self._report_error_warn_changed_callback()
self._report_location_callback()
self._report_callback()
if self._cmd_num >= self._max_cmd_num:
time.sleep(1)
self._first_report_over = True
time.sleep(0.1)
except:
pass
self.disconnect()
logger.debug('get report thread stopped')
def _sync_tcp(self, index=None):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_angles = self._angles.copy()
if index is None:
self._last_position = self._position.copy()
elif isinstance(index, int) and 0 <= index < 6:
self._last_position[index] = self._position[index]
# print('=============sync_tcp: index={}'.format(index))
def _sync_joints(self, index=None):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_position = self._position.copy()
if index is None:
self._last_angles = self._angles.copy()
elif isinstance(index, int) and 0 <= index < 7:
self._last_angles[index] = self._angles[index]
# print('=============sync_joint: index={}'.format(index))
def _sync(self):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_position = self._position.copy()
self._last_angles = self._angles.copy()
# print('=============sync_all')
def _set_params(self, **kwargs):
is_radian = kwargs.get('is_radian', self._default_is_radian)
if 'X' in kwargs and isinstance(kwargs['X'], (int, float)):
self._last_position[0] = kwargs.get('X')
if 'Y' in kwargs and isinstance(kwargs['Y'], (int, float)):
self._last_position[1] = kwargs.get('Y')
if 'Z' in kwargs and isinstance(kwargs['Z'], (int, float)):
self._last_position[2] = kwargs.get('Z')
if 'A' in kwargs and isinstance(kwargs['A'], (int, float)):
self._last_position[3] = kwargs.get('A') if is_radian else math.radians(kwargs.get('A'))
if 'B' in kwargs and isinstance(kwargs['B'], (int, float)):
self._last_position[4] = kwargs.get('B') if is_radian else math.radians(kwargs.get('B'))
if 'C' in kwargs and isinstance(kwargs['C'], (int, float)):
self._last_position[5] = kwargs.get('C') if is_radian else math.radians(kwargs.get('C'))
# if 'R' in kwargs and isinstance(kwargs['R'], (int, float)):
# self._last_position[6] = kwargs.get('R')
if 'I' in kwargs and isinstance(kwargs['I'], (int, float)):
self._last_angles[0] = kwargs.get('I') if is_radian else math.radians(kwargs.get('I'))
if 'J' in kwargs and isinstance(kwargs['J'], (int, float)):
self._last_angles[1] = kwargs.get('J') if is_radian else math.radians(kwargs.get('J'))
if 'K' in kwargs and isinstance(kwargs['K'], (int, float)):
self._last_angles[2] = kwargs.get('K') if is_radian else math.radians(kwargs.get('K'))
if 'L' in kwargs and isinstance(kwargs['L'], (int, float)):
self._last_angles[3] = kwargs.get('L') if is_radian else math.radians(kwargs.get('L'))
if 'M' in kwargs and isinstance(kwargs['M'], (int, float)):
self._last_angles[4] = kwargs.get('M') if is_radian else math.radians(kwargs.get('M'))
if 'N' in kwargs and isinstance(kwargs['N'], (int, float)):
self._last_angles[5] = kwargs.get('N') if is_radian else math.radians(kwargs.get('N'))
if 'O' in kwargs and isinstance(kwargs['O'], (int, float)):
self._last_angles[6] = kwargs.get('O') if is_radian else math.radians(kwargs.get('O'))
if 'F' in kwargs and isinstance(kwargs['F'], (int, float)):
self._last_tcp_speed = kwargs.get('F')
self._last_tcp_speed = min(max(self._last_tcp_speed, self._min_tcp_speed), self._max_tcp_speed)
if 'Q' in kwargs and isinstance(kwargs['Q'], (int, float)):
self._last_tcp_acc = kwargs.get('Q')
self._last_tcp_acc = min(max(self._last_tcp_acc, self._min_tcp_acc), self._max_tcp_acc)
if 'F2' in kwargs and isinstance(kwargs['F2'], (int, float)):
self._last_joint_speed = kwargs.get('F2')
if not is_radian:
self._last_joint_speed = math.radians(self._last_joint_speed)
self._last_joint_speed = min(max(self._last_joint_speed, self._min_joint_speed), self._max_joint_speed)
if 'Q2' in kwargs and isinstance(kwargs['Q2'], (int, float)):
self._last_joint_acc = kwargs.get('Q2')
if not is_radian:
self._last_joint_acc = math.radians(self._last_joint_acc)
self._last_joint_acc = min(max(self._last_joint_acc, self._min_joint_acc), self._max_joint_acc)
if 'T' in kwargs and isinstance(kwargs['T'], (int, float)):
self._mvtime = kwargs.get('T')
if 'LIMIT_VELO' in kwargs and isinstance(kwargs['LIMIT_VELO'], (list, tuple)) \
and len(kwargs['LIMIT_VELO']) == 2 and isinstance(kwargs['LIMIT_VELO'][0], (int, float)) \
and isinstance(kwargs['LIMIT_VELO'][1], (int, float)) \
and kwargs['LIMIT_VELO'][0] <= kwargs['LIMIT_VELO'][1]:
self._min_tcp_speed, self._max_tcp_speed = kwargs.get('LIMIT_VELO')
if 'LIMIT_ACC' in kwargs and isinstance(kwargs['LIMIT_ACC'], (list, tuple)) \
and len(kwargs['LIMIT_ACC']) == 2 and isinstance(kwargs['LIMIT_ACC'][0], (int, float)) \
and isinstance(kwargs['LIMIT_ACC'][1], (int, float)) \
and kwargs['LIMIT_ACC'][0] <= kwargs['LIMIT_ACC'][1]:
self._min_tcp_acc, self._max_tcp_acc = kwargs.get('LIMIT_ACC')
def _get_params(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
if is_radian:
return {
'lastPosition': self._last_position,
'lastAngles': self._last_angles,
'mvvelo': self._last_tcp_speed,
'mvacc': self._last_tcp_acc,
'tcpJerk': self._tcp_jerk,
'jointJerk': self._joint_jerk,
'angle_mvvelo': self._last_joint_speed,
'angle_mvacc': self._last_joint_acc,
'mvtime': self._mvtime,
'LIMIT_VELO': [self._min_tcp_speed, self._max_tcp_speed],
'LIMIT_ACC': [self._min_tcp_acc, self._max_tcp_acc],
'LIMIT_ANGLE_VELO': [self._min_joint_speed, self._max_joint_speed],
'LIMIT_ANGLE_ACC': [self._min_joint_acc, self._max_joint_acc],
}
else:
return {
'lastPosition': [math.degrees(self._last_position[i]) if 2 < i < 6 else self._last_position[i] for i in range(len(self._last_position))],
'lastAngles': [math.degrees(angle) for angle in self._last_angles],
'mvvelo': round(self._last_tcp_speed),
'mvacc': round(self._last_tcp_acc),
'tcpJerk': round(self._tcp_jerk),
'jointJerk': round(math.degrees(self._joint_jerk)),
'angle_mvvelo': round(math.degrees(self._last_joint_speed)),
'angle_mvacc': round(math.degrees(self._last_joint_acc)),
'mvtime': self._mvtime,
'LIMIT_VELO': list(map(round, [self._min_tcp_speed, self._max_tcp_speed])),
'LIMIT_ACC': list(map(round, [self._min_tcp_acc, self._max_tcp_acc])),
'LIMIT_ANGLE_VELO': list(map(round, [math.degrees(self._min_joint_speed), math.degrees(self._max_joint_speed)])),
'LIMIT_ANGLE_ACC': list(map(round, [math.degrees(self._min_joint_acc), math.degrees(self._max_joint_acc)])),
}
def _check_code(self, code, is_move_cmd=False):
if is_move_cmd:
if code in [0, XCONF.UxbusState.WAR_CODE]:
if self.arm_cmd.state_is_ready:
return 0
else:
return XCONF.UxbusState.STATE_NOT_READY
else:
return code
# return 0 if code in [0, XCONF.UxbusState.WAR_CODE] and self.arm_cmd.state_is_ready else XCONF.UxbusState.STATE_NOT_READY if not self.arm_cmd.state_is_ready else code
else:
return 0 if code in [0, XCONF.UxbusState.ERR_CODE, XCONF.UxbusState.WAR_CODE, XCONF.UxbusState.STATE_NOT_READY] else code
def _check_mode_is_correct(self, mode, timeout=1):
if self._enable_report and self._stream_type == 'socket':
cnt = int(10 * timeout)
while cnt > 0 and self.mode != mode:
time.sleep(0.1)
cnt -= 1
if self.mode != mode:
return False
return True
def wait_until_cmdnum_lt_max(self):
if not self._check_cmdnum_limit or self._stream_type != 'socket' or not self._enable_report:
return
# if time.time() - self._last_report_time > 0.4:
# self.get_cmdnum()
if self._max_cmd_num / 2 < self.cmd_num < self._max_cmd_num:
self.get_cmdnum()
while self.cmd_num >= self._max_cmd_num:
if not self.connected:
return APIState.NOT_CONNECTED
elif self.has_error:
return APIState.HAS_ERROR
elif not self.state_is_ready:
return APIState.NOT_READY
elif self.is_stop:
return APIState.EMERGENCY_STOP
time.sleep(0.05)
@xarm_is_connected(_type='get')
def get_version(self):
ret = self.arm_cmd.get_version()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
version = ''.join(list(map(chr, ret[1:])))
self._version = version[:version.find('\0')]
return ret[0], self._version
@xarm_is_connected(_type='get')
def get_robot_sn(self):
ret = self.arm_cmd.get_robot_sn()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
robot_sn = ''.join(list(map(chr, ret[1:])))
split_inx = robot_sn.find('\0')
self._robot_sn = robot_sn[:split_inx]
control_box_sn = robot_sn[split_inx+1:]
self._control_box_sn = control_box_sn[:control_box_sn.find('\0')].strip()
self._arm_type_is_1300 = int(self._robot_sn[2:6]) >= 1300 if self._robot_sn[2:6].isdigit() else False
self._control_box_type_is_1300 = int(self._control_box_sn[2:6]) >= 1300 if self._control_box_sn[2:6].isdigit() else False
return ret[0], self._robot_sn
@xarm_is_connected(_type='get')
def check_verification(self):
ret = self.arm_cmd.check_verification()
ret[0] = self._check_code(ret[0])
return ret[0], ret[1]
@xarm_is_connected(_type='get')
def get_position(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_tcp_pose()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
self._position = [filter_invaild_number(ret[i], 6, default=self._position[i-1]) for i in range(1, 7)]
return ret[0], [float(
'{:.6f}'.format(math.degrees(self._position[i]) if 2 < i < 6 and not is_radian else self._position[i])) for
i in range(len(self._position))]
@xarm_is_connected(_type='get')
def get_servo_angle(self, servo_id=None, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_joint_pos()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 7:
self._angles = [filter_invaild_number(ret[i], 6, default=self._angles[i-1]) for i in range(1, 8)]
if servo_id is None or servo_id == 8 or len(self._angles) < servo_id:
return ret[0], list(
map(lambda x: float('{:.6f}'.format(x if is_radian else math.degrees(x))), self._angles))
else:
return ret[0], float(
'{:.6f}'.format(self._angles[servo_id - 1] if is_radian else math.degrees(self._angles[servo_id - 1])))
@xarm_is_connected(_type='get')
def get_position_aa(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_position_aa()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
self._pose_aa = [filter_invaild_number(ret[i], 6, default=self._pose_aa[i - 1]) for i in range(1, 7)]
return ret[0], [float(
'{:.6f}'.format(math.degrees(self._pose_aa[i]) if 2 < i < 6 and not is_radian else self._pose_aa[i]))
for i in range(len(self._pose_aa))]
@xarm_is_connected(_type='get')
def get_pose_offset(self, pose1, pose2, orient_type_in=0, orient_type_out=0, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
_pose1 = [pose1[i] if i <= 2 or is_radian else math.radians(pose1[i]) for i in range(6)]
_pose2 = [pose2[i] if i <= 2 or is_radian else math.radians(pose2[i]) for i in range(6)]
ret = self.arm_cmd.get_pose_offset(_pose1, _pose2, orient_type_in, orient_type_out)
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
pose = [float('{:.6f}'.format(ret[i] if i <= 3 or is_radian else math.degrees(ret[i]))) for i in
range(1, 7)]
return ret[0], pose
return ret[0], ret[1:7]
def get_is_moving(self):
self.get_state()
return self._state == 1
@xarm_is_connected(_type='get')
def get_state(self):
ret = self.arm_cmd.get_state()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
# if ret[1] != self._state:
# self._state = ret[1]
# self._report_state_changed_callback()
self._state = ret[1]
self._last_update_state_time = time.time()
return ret[0], ret[1] if ret[0] == 0 else self._state
@xarm_is_connected(_type='set')
def set_state(self, state=0):
_state = self._state
ret = self.arm_cmd.set_state(state)
ret[0] = self._check_code(ret[0])
if state == 4 and ret[0] == 0:
# self._last_position[:6] = self.position
# self._last_angles = self.angles
self._sleep_finish_time = 0
# self._is_sync = False
self.get_state()
if _state != self._state:
self._report_state_changed_callback()
if self.state != 3 and (_state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[set_state], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[set_state], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> set_state({}) -> code={}, state={}'.format(state, ret[0], self._state), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_mode(self, mode=0):
ret = self.arm_cmd.set_mode(mode)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_mode({}) -> code={}'.format(mode, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_cmdnum(self):
ret = self.arm_cmd.get_cmdnum()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
if ret[1] != self._cmd_num:
self._report_cmdnum_changed_callback()
self._cmd_num = ret[1]
self._last_update_cmdnum_time = time.time()
return ret[0], self._cmd_num
@xarm_is_connected(_type='get')
def get_err_warn_code(self, show=False, lang='en'):
ret = self.arm_cmd.get_err_code()
lang = lang if lang == 'cn' else 'en'
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
# if ret[1] != self._error_code or ret[2] != self._warn_code:
# self._error_code, self._warn_code = ret[1:3]
# self._report_error_warn_changed_callback()
self._error_code, self._warn_code = ret[1:3]
self._last_update_err_time = time.time()
if show:
pretty_print('************* {}, {}: {} **************'.format(
'获取控制器错误警告码' if lang == 'cn' else 'GetErrorWarnCode',
'状态' if lang == 'cn' else 'Status',
ret[0]), color='light_blue')
controller_error = ControllerError(self._error_code, status=0)
controller_warn = ControllerWarn(self._warn_code, status=0)
pretty_print('* {}: {}, {}: {}'.format(
'错误码' if lang == 'cn' else 'ErrorCode',
controller_error.code,
'信息' if lang == 'cn' else 'Info',
controller_error.title[lang]),
color='red' if self._error_code != 0 else 'white')
pretty_print('* {}: {}, {}: {}'.format(
'警告码' if lang == 'cn' else 'WarnCode',
controller_warn.code,
'信息' if lang == 'cn' else 'Info',
controller_warn.title[lang]),
color='yellow' if self._warn_code != 0 else 'white')
pretty_print('*' * 50, color='light_blue')
return ret[0], ret[1:3] if ret[0] == 0 else [self._error_code, self._warn_code]
@xarm_is_connected(_type='set')
def clean_error(self):
ret = self.arm_cmd.clean_err()
self.get_state()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[clean_error], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[clean_error], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> clean_error -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def clean_warn(self):
ret = self.arm_cmd.clean_war()
self.log_api_info('API -> clean_warn -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
@xarm_is_not_simulation_mode(ret=0)
def motion_enable(self, enable=True, servo_id=None):
assert servo_id is None or (isinstance(servo_id, int) and 1 <= servo_id <= 8)
if servo_id is None or servo_id == 8:
ret = self.arm_cmd.motion_en(8, int(enable))
else:
ret = self.arm_cmd.motion_en(servo_id, int(enable))
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
self._is_ready = bool(enable)
self.get_state()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[motion_enable], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[motion_enable], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> motion_enable -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
def wait_move(self, timeout=None):
if timeout is not None:
expired = time.time() + timeout + (self._sleep_finish_time if self._sleep_finish_time > time.time() else 0)
else:
expired = 0
count = 0
_, state = self.get_state()
max_cnt = 4 if _ == 0 and state == 1 else 10
while timeout is None or time.time() < expired:
if not self.connected:
self.log_api_info('wait_move, xarm is disconnect', code=APIState.NOT_CONNECTED)
return APIState.NOT_CONNECTED
if time.time() - self._last_report_time > 0.4:
self.get_state()
self.get_err_warn_code()
if self.error_code != 0:
self.log_api_info('wait_move, xarm has error, error={}'.format(self.error_code), code=APIState.HAS_ERROR)
return APIState.HAS_ERROR
# no wait in velocity mode
if self.mode in [4, 5]:
return 0
if self.is_stop:
_, state = self.get_state()
if _ != 0 or state not in [4, 5]:
time.sleep(0.02)
continue
self._sleep_finish_time = 0
self.log_api_info('wait_move, xarm is stop, state={}'.format(self.state), code=APIState.EMERGENCY_STOP)
return APIState.EMERGENCY_STOP
if time.time() < self._sleep_finish_time or self.state == 3:
time.sleep(0.02)
count = 0
continue
if self.state != 1:
count += 1
if count >= max_cnt:
_, state = self.get_state()
self.get_err_warn_code()
if _ == 0 and state != 1:
return 0
else:
count = 0
# return 0
# if count % 4 == 0:
# self.get_state()
# self.get_err_warn_code()
else:
count = 0
time.sleep(0.05)
return APIState.WAIT_FINISH_TIMEOUT
@xarm_is_connected(_type='set')
def _check_modbus_code(self, ret, length=2, only_check_code=False, host_id=XCONF.TGPIO_HOST_ID):
code = ret[0]
if self._check_code(code) == 0:
if not only_check_code:
if len(ret) < length:
return APIState.MODBUS_ERR_LENG
if ret[1] != host_id:
return APIState.HOST_ID_ERR
if code != 0:
if host_id == XCONF.TGPIO_HOST_ID:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code != 19 and self.error_code != 28:
code = 0
else:
if self.error_code != 100 + host_id:
self.get_err_warn_code()
if self.error_code != 100 + host_id:
code = 0
return code
@xarm_is_connected(_type='set')
def checkset_modbus_baud(self, baudrate, check=True, host_id=XCONF.TGPIO_HOST_ID):
if check and ((host_id == XCONF.TGPIO_HOST_ID and self.modbus_baud == baudrate) or (host_id == XCONF.LINEER_TRACK_HOST_ID and self.linear_track_baud == baudrate)):
return 0
if baudrate not in self.arm_cmd.BAUDRATES:
return APIState.MODBUS_BAUD_NOT_SUPPORT
ret, cur_baud_inx = self._get_modbus_baudrate_inx(host_id=host_id)
if ret == 0:
baud_inx = self.arm_cmd.BAUDRATES.index(baudrate)
if cur_baud_inx != baud_inx:
try:
self._ignore_error = True
self._ignore_state = True if self.state not in [4, 5] else False
state = self.state
# self.arm_cmd.tgpio_addr_w16(XCONF.ServoConf.MODBUS_BAUDRATE, baud_inx)
self.arm_cmd.tgpio_addr_w16(0x1A0B, baud_inx, bid=host_id)
time.sleep(0.3)
self.arm_cmd.tgpio_addr_w16(XCONF.ServoConf.SOFT_REBOOT, 1, bid=host_id)
if host_id == XCONF.TGPIO_HOST_ID:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code == 19 or self.error_code == 28:
self.clean_error()
if self._ignore_state:
self.set_state(state if state >= 3 else 0)
time.sleep(1)
else:
if self.error_code != 100 + host_id:
self.get_err_warn_code()
if self.error_code == 100 + host_id:
self.clean_error()
if self._ignore_state:
self.set_state(state if state >= 3 else 0)
time.sleep(1)
except Exception as e:
self._ignore_error = False
self._ignore_state = False
logger.error('checkset_modbus_baud error: {}'.format(e))
return APIState.API_EXCEPTION
self._ignore_error = False
self._ignore_state = False
ret, cur_baud_inx = self._get_modbus_baudrate_inx(host_id=host_id)
self.log_api_info('API -> checkset_modbus_baud -> code={}, baud_inx={}'.format(ret, cur_baud_inx), code=ret)
# if ret == 0 and cur_baud_inx < len(self.arm_cmd.BAUDRATES):
# self.modbus_baud = self.arm_cmd.BAUDRATES[cur_baud_inx]
if host_id == XCONF.TGPIO_HOST_ID:
return 0 if self.modbus_baud == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
elif host_id == XCONF.LINEER_TRACK_HOST_ID:
return 0 if self.linear_track_baud == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
else:
if ret == 0 and 0 <= cur_baud_inx < len(self.arm_cmd.BAUDRATES):
return 0 if self.arm_cmd.BAUDRATES[cur_baud_inx] == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
return APIState.MODBUS_BAUD_NOT_CORRECT
@xarm_is_connected(_type='get')
def _get_modbus_baudrate_inx(self, host_id=XCONF.TGPIO_HOST_ID):
ret = self.arm_cmd.tgpio_addr_r16(XCONF.ServoConf.MODBUS_BAUDRATE & 0x0FFF, bid=host_id)
if ret[0] in [XCONF.UxbusState.ERR_CODE, XCONF.UxbusState.WAR_CODE]:
if host_id == XCONF.TGPIO_HOST_ID:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code != 19 and self.error_code != 28:
ret[0] = 0
else:
if self.error_code != 100 + host_id:
self.get_err_warn_code()
if self.error_code != 100 + host_id:
ret[0] = 0
if ret[0] == 0 and 0 <= ret[1] < len(self.arm_cmd.BAUDRATES):
if host_id == XCONF.TGPIO_HOST_ID:
self.modbus_baud = self.arm_cmd.BAUDRATES[ret[1]]
elif host_id == XCONF.LINEER_TRACK_HOST_ID:
self.linear_track_baud = self.arm_cmd.BAUDRATES[ret[1]]
return ret[0], ret[1]
@xarm_is_connected(_type='set')
def set_tgpio_modbus_timeout(self, timeout):
ret = self.arm_cmd.set_modbus_timeout(timeout)
self.log_api_info('API -> set_tgpio_modbus_timeout -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_tgpio_modbus_baudrate(self, baud):
code = self.checkset_modbus_baud(baud, check=False)
self.log_api_info('API -> set_tgpio_modbus_baudrate -> code={}'.format(code), code=code)
return code
@xarm_is_connected(_type='get')
def get_tgpio_modbus_baudrate(self):
code, baud_inx = self._get_modbus_baudrate_inx()
# if code == 0 and baud_inx < len(self.arm_cmd.BAUDRATES):
# self.modbus_baud = self.arm_cmd.BAUDRATES[baud_inx]
return code, self.modbus_baud
def getset_tgpio_modbus_data(self, datas, min_res_len=0, ignore_log=False):
if not self.connected:
return APIState.NOT_CONNECTED, []
ret = self.arm_cmd.tgpio_set_modbus(datas, len(datas))
ret[0] = self._check_modbus_code(ret, min_res_len + 2)
if not ignore_log:
self.log_api_info('API -> getset_tgpio_modbus_data -> code={}, response={}'.format(ret[0], ret[2:]), code=ret[0])
return ret[0], ret[2:]
@xarm_is_connected(_type='set')
def set_simulation_robot(self, on_off):
ret = self.arm_cmd.set_simulation_robot(on_off)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_simulation_robot({}) -> code={}'.format(on_off, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
@xarm_is_pause(_type='set')
@xarm_wait_until_cmdnum_lt_max(only_wait=False)
def set_tcp_load(self, weight, center_of_gravity):
if compare_version(self.version_number, (0, 2, 0)):
_center_of_gravity = center_of_gravity
else:
_center_of_gravity = [item / 1000.0 for item in center_of_gravity]
ret = self.arm_cmd.set_tcp_load(weight, _center_of_gravity)
self.log_api_info('API -> set_tcp_load -> code={}, weight={}, center={}'.format(ret[0], weight, _center_of_gravity), code=ret[0])
return ret[0]
| xArm-Developer/xArm-Python-SDK | xarm/x3/base.py | Python | bsd-3-clause | 102,034 | 0.002388 |
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="violin", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/violin/_width.py | Python | mit | 472 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# author :Ghislain Vieilledent
# email :ghislain.vieilledent@cirad.fr, ghislainv@gmail.com
# web :https://ecology.ghislainv.fr
# python_version :>=2.7
# license :GPLv3
# ==============================================================================
import os
import numpy as np
def test_make_dir():
assert os.path.exists("output")
def test_data():
assert os.path.exists("data")
def test_plot_fcc23():
assert os.path.exists("output/fcc23.png")
def test_sample():
assert os.path.exists("output/sample.txt")
def test_dataset(gstart):
assert gstart["dataset"].iloc[0, 0] == 30.0
def test_cellneigh(gstart):
a = np.array([3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 8, 8, 8, 8, 8,
8, 8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5,
8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8,
8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 8,
8, 8, 8, 8, 8, 8, 8, 8, 5, 5, 8, 8, 8, 8, 8, 8, 8,
8, 8, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3])
b = np.array([1, 11, 12, 0, 2, 11, 12, 13, 1, 3, 12, 13, 14, 2, 4,
13, 14, 15, 3, 5, 14, 15, 16, 4, 6, 15, 16, 17, 5,
7, 16, 17, 18, 6, 8, 17, 18, 19, 7, 9, 18, 19, 20,
8, 10, 19, 20, 21, 9, 20, 21, 0, 1, 12, 22, 23, 0,
1, 2, 11, 13, 22, 23, 24, 1, 2, 3, 12, 14, 23, 24,
25, 2, 3, 4, 13, 15, 24, 25, 26, 3, 4, 5, 14, 16,
25, 26, 27, 4, 5, 6, 15, 17, 26, 27, 28, 5, 6, 7,
16, 18, 27, 28, 29, 6, 7, 8, 17, 19, 28, 29, 30, 7,
8, 9, 18, 20, 29, 30, 31, 8, 9, 10, 19, 21, 30, 31,
32, 9, 10, 20, 31, 32, 11, 12, 23, 33, 34, 11, 12,
13, 22, 24, 33, 34, 35, 12, 13, 14, 23, 25, 34, 35,
36, 13, 14, 15, 24, 26, 35, 36, 37, 14, 15, 16, 25,
27, 36, 37, 38, 15, 16, 17, 26, 28, 37, 38, 39, 16,
17, 18, 27, 29, 38, 39, 40, 17, 18, 19, 28, 30, 39,
40, 41, 18, 19, 20, 29, 31, 40, 41, 42, 19, 20, 21,
30, 32, 41, 42, 43, 20, 21, 31, 42, 43, 22, 23, 34,
44, 45, 22, 23, 24, 33, 35, 44, 45, 46, 23, 24, 25,
34, 36, 45, 46, 47, 24, 25, 26, 35, 37, 46, 47, 48,
25, 26, 27, 36, 38, 47, 48, 49, 26, 27, 28, 37, 39,
48, 49, 50, 27, 28, 29, 38, 40, 49, 50, 51, 28, 29,
30, 39, 41, 50, 51, 52, 29, 30, 31, 40, 42, 51, 52,
53, 30, 31, 32, 41, 43, 52, 53, 54, 31, 32, 42, 53,
54, 33, 34, 45, 55, 56, 33, 34, 35, 44, 46, 55, 56,
57, 34, 35, 36, 45, 47, 56, 57, 58, 35, 36, 37, 46,
48, 57, 58, 59, 36, 37, 38, 47, 49, 58, 59, 60, 37,
38, 39, 48, 50, 59, 60, 61, 38, 39, 40, 49, 51, 60,
61, 62, 39, 40, 41, 50, 52, 61, 62, 63, 40, 41, 42,
51, 53, 62, 63, 64, 41, 42, 43, 52, 54, 63, 64, 65,
42, 43, 53, 64, 65, 44, 45, 56, 66, 67, 44, 45, 46,
55, 57, 66, 67, 68, 45, 46, 47, 56, 58, 67, 68, 69,
46, 47, 48, 57, 59, 68, 69, 70, 47, 48, 49, 58, 60,
69, 70, 71, 48, 49, 50, 59, 61, 70, 71, 72, 49, 50,
51, 60, 62, 71, 72, 73, 50, 51, 52, 61, 63, 72, 73,
74, 51, 52, 53, 62, 64, 73, 74, 75, 52, 53, 54, 63,
65, 74, 75, 76, 53, 54, 64, 75, 76, 55, 56, 67, 77,
78, 55, 56, 57, 66, 68, 77, 78, 79, 56, 57, 58, 67,
69, 78, 79, 80, 57, 58, 59, 68, 70, 79, 80, 81, 58,
59, 60, 69, 71, 80, 81, 82, 59, 60, 61, 70, 72, 81,
82, 83, 60, 61, 62, 71, 73, 82, 83, 84, 61, 62, 63,
72, 74, 83, 84, 85, 62, 63, 64, 73, 75, 84, 85, 86,
63, 64, 65, 74, 76, 85, 86, 87, 64, 65, 75, 86, 87,
66, 67, 78, 88, 89, 66, 67, 68, 77, 79, 88, 89, 90,
67, 68, 69, 78, 80, 89, 90, 91, 68, 69, 70, 79, 81,
90, 91, 92, 69, 70, 71, 80, 82, 91, 92, 93, 70, 71,
72, 81, 83, 92, 93, 94, 71, 72, 73, 82, 84, 93, 94,
95, 72, 73, 74, 83, 85, 94, 95, 96, 73, 74, 75, 84,
86, 95, 96, 97, 74, 75, 76, 85, 87, 96, 97, 98, 75,
76, 86, 97, 98, 77, 78, 89, 77, 78, 79, 88, 90, 78,
79, 80, 89, 91, 79, 80, 81, 90, 92, 80, 81, 82, 91,
93, 81, 82, 83, 92, 94, 82, 83, 84, 93, 95, 83, 84,
85, 94, 96, 84, 85, 86, 95, 97, 85, 86, 87, 96, 98,
86, 87, 97])
assert (np.array_equal(gstart["nneigh"], a) and
np.array_equal(gstart["adj"], b))
def test_model_binomial_iCAR(gstart):
p = np.array([0.34388896, 0.29002158, 0.51594223, 0.48436339,
0.60838453, 0.61257058, 0.55034979, 0.58819568,
0.51087469, 0.58819568, 0.64149789, 0.57400436,
0.59570952, 0.63212285, 0.566676, 0.62562204,
0.55379459, 0.15644965, 0.61284327, 0.36638686,
0.55439297, 0.57325744, 0.62562204, 0.17995823,
0.4930868, 0.54641479, 0.59782004, 0.48159526,
0.62882886, 0.59831051, 0.76245777, 0.74576097,
0.77356767, 0.73863295, 0.78188891, 0.75056545,
0.60775752, 0.64978574, 0.74654465, 0.77378323,
0.53994416, 0.75852715, 0.77754366, 0.60053684,
0.71543739, 0.74565542, 0.7555028, 0.44598923,
0.76401273, 0.75953027, 0.49027142, 0.69610182,
0.75679461, 0.78543649, 0.76863321, 0.6209473,
0.77653139, 0.76182804, 0.78169681, 0.58816002,
0.50453473, 0.77980428, 0.76084413, 0.73054832,
0.78289747, 0.71858934, 0.78362842, 0.74702923,
0.67357571, 0.78940242, 0.75358937, 0.66791346,
0.75602843, 0.42494845, 0.77653139, 0.60509306,
0.60846943, 0.76187008, 0.73278992, 0.72792572,
0.47661681, 0.59456417, 0.71894598, 0.6731302,
0.74964489, 0.77247818, 0.78289747, 0.74200682,
0.78940242, 0.78508877, 0.73153419, 0.65636031,
0.78607775, 0.59738545, 0.72596162, 0.78216462,
0.75078253, 0.77527468, 0.69907386, 0.71991522])
assert np.allclose(gstart["pred_icar"][0:100], p)
def test_rho(gstart):
r = np.array([-3.72569484e-02, -1.16871478e-01, -1.82400711e-01,
2.13446770e-01, -6.44591325e-01, -9.89850864e-02,
1.10439030e-01, -2.31551563e-02, -3.30273946e-01,
-2.66995061e-01, -3.84426210e-01, 5.73572517e-02,
-5.73353804e-02, -3.12497338e-01, -8.37127591e-01,
7.62072575e-02, 3.86361945e-01, 1.26487021e-02,
-8.22069815e-02, -3.60656850e-01, -5.46586761e-01,
-4.17346094e-01, 1.05212875e+00, -4.32508096e-02,
-4.49589533e-01, -6.89872259e-01, -4.91230799e-01,
-3.84040358e-01, 5.67299746e-01, -2.10071117e-01,
-1.07456253e+00, -6.69339978e-01, -6.21974970e-01,
2.15020267e+00, -7.16437085e-02, -4.46424607e-01,
-2.17259138e-01, -3.30043032e-01, -2.59613996e-01,
2.68845283e-01, -3.78046974e-01, -5.18108829e-01,
-6.18235133e-01, -7.59652734e-01, 1.51771355e+00,
1.75357016e+00, -8.01814048e-02, 1.99270623e-01,
-1.75157345e-01, -6.10561635e-02, -1.26099802e-01,
-1.77864133e-01, -3.03381214e-01, -5.29892286e-01,
-5.47125418e-01, 1.30320979e+00, 2.37670385e+00,
4.97829325e-01, 8.88668246e-01, 3.92682659e-01,
-6.56913949e-03, -2.95774565e-01, -5.15489012e-01,
-6.01407176e-01, -5.67695385e-01, -6.48479745e-01,
1.47482553e+00, 1.45848019e+00, 4.05321503e-01,
1.06327906e+00, 4.37780456e-01, -1.12202021e-01,
-7.22139489e-01, -7.33312519e-01, -6.68442058e-01,
-7.76218335e-01, -8.02763852e-01, 1.41620727e+00,
1.56564133e+00, 1.24252305e+00, 9.07095194e-01,
4.38959947e-01, -2.95546782e-01, -4.92024764e-01,
-9.62965263e-01, -8.93107795e-01, -9.80673724e-01,
-9.94878624e-01, 1.41460696e+00, 1.38942057e+00,
1.97092977e+00, 1.06797639e+00, 4.36803818e-01,
2.15296806e-03, -6.14110567e-01, -7.76157636e-01,
-9.47693103e-01, -1.05424592e+00, -1.12226096e+00])
assert np.allclose(gstart["rho"], r)
def test_interpolate_rho():
assert os.path.exists("output/rho.tif")
def test_predict_raster_binomial_iCAR():
assert os.path.exists("output/prob.tif")
def test_countpix(gstart):
assert gstart["fc"] == [83999.25, 79015.5]
def test_deforest():
assert os.path.exists("output/fcc_2050.tif")
def test_plot_fcc123():
assert os.path.exists("output/fcc123.png")
def test_plot_rho():
assert os.path.exists("output/rho_orig.png")
def test_plot_prob():
assert os.path.exists("output/prob.png")
def test_plot_fcc():
assert os.path.exists("output/fcc_2050.png")
# End Of File
| ghislainv/deforestprob | test/test_get_started.py | Python | gpl-3.0 | 9,570 | 0 |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.utils.validate.path
~~~~~~~~~~~~~~~~~~~~~~~~
Several path related validators
'''
# Import python libs
import os
def is_writeable(path, check_parent=False):
'''
Check if a given path is writeable by the current user.
:param path: The path to check
:param check_parent: If the path to check does not exist, check for the
ability to write to the parent directory instead
:returns: True or False
'''
if os.access(path, os.F_OK) and os.access(path, os.W_OK):
# The path exists and is writeable
return True
if os.access(path, os.F_OK) and not os.access(path, os.W_OK):
# The path exists and is not writeable
return False
# The path does not exists or is not writeable
if check_parent is False:
# We're not allowed to check the parent directory of the provided path
return False
# Lets get the parent directory of the provided path
parent_dir = os.path.dirname(path)
if not os.access(parent_dir, os.F_OK):
# Parent directory does not exit
return False
# Finally, return if we're allowed to write in the parent directory of the
# provided path
return os.access(parent_dir, os.W_OK)
| MadeiraCloud/salt | sources/salt/utils/validate/path.py | Python | apache-2.0 | 1,466 | 0 |
# Copyright (C) 2017 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class CheckPatchRelevance(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.group,
]
jsc_paths = [
"JSTests/",
"Source/JavaScriptCore/",
"Source/WTF/"
"Source/bmalloc/",
]
group_to_paths_mapping = {
'jsc': jsc_paths,
}
def _changes_are_relevant(self, changed_files):
# In the default case, all patches are relevant
if self._options.group != 'jsc':
return True
patterns = self.group_to_paths_mapping[self._options.group]
for changed_file in changed_files:
for pattern in patterns:
if re.search(pattern, changed_file, re.IGNORECASE):
return True
return False
def run(self, state):
_log.info("Checking relevance of patch")
change_list = self._tool.scm().changed_files()
if self._changes_are_relevant(change_list):
return True
_log.info("This patch does not have relevant changes.")
raise ScriptError(message="This patch does not have relevant changes.")
| Debian/openjfx | modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/checkpatchrelevance.py | Python | gpl-2.0 | 2,708 | 0 |
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING
from synapse.util.async_helpers import Linearizer
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReadMarkerHandler:
def __init__(self, hs: "HomeServer"):
self.server_name = hs.config.server.server_name
self.store = hs.get_datastores().main
self.account_data_handler = hs.get_account_data_handler()
self.read_marker_linearizer = Linearizer(name="read_marker")
async def received_client_read_marker(
self, room_id: str, user_id: str, event_id: str
) -> None:
"""Updates the read marker for a given user in a given room if the event ID given
is ahead in the stream relative to the current read marker.
This uses a notifier to indicate that account data should be sent down /sync if
the read marker has changed.
"""
with await self.read_marker_linearizer.queue((room_id, user_id)):
existing_read_marker = await self.store.get_account_data_for_room_and_type(
user_id, room_id, "m.fully_read"
)
should_update = True
if existing_read_marker:
# Only update if the new marker is ahead in the stream
should_update = await self.store.is_event_after(
event_id, existing_read_marker["event_id"]
)
if should_update:
content = {"event_id": event_id}
await self.account_data_handler.add_account_data_to_room(
user_id, room_id, "m.fully_read", content
)
| matrix-org/synapse | synapse/handlers/read_marker.py | Python | apache-2.0 | 2,249 | 0.001334 |
from model_mommy import mommy
from django.test import TestCase
from ..models import Department, Employee
class DepartmentTestMommy(TestCase):
"""Department's modle test case."""
def test_department_creation_mommy(self):
"""Test create department's model."""
new_department = mommy.make('employees.Department')
self.assertTrue(isinstance(new_department, Department))
self.assertEqual(new_department.__str__(), new_department.name)
class EmployeeTestMommy(TestCase):
"""Employee's model test cazse."""
def test_employee_creation_mommy(self):
"""Test create department's model."""
new_employee = mommy.make('employees.Employee')
self.assertTrue(isinstance(new_employee, Employee))
self.assertEqual(new_employee.__str__(), '%s %s' % (new_employee.first_name, new_employee.last_name))
| maurobaraldi/ll_interview_application | luizalabs/employees/tests/tests_models.py | Python | gpl-3.0 | 866 | 0.001155 |
import datetime
import mock
from django.utils import timezone
from mock import Mock, call, PropertyMock
from django.test import TestCase
from django.contrib.sessions.models import Session
from mysite.celery import send_outcome, check_anonymous
class CeleryTasksTest(TestCase):
@mock.patch('mysite.celery.UserSession.objects.filter')
@mock.patch('mysite.celery.User.objects.filter')
def test_check_anonymous_user_session_no_session(self, mock_User_filter, mock_UserSession_filter):
mock_user = Mock(id=1)
call_mock_User_filter = [mock_user]
mock_session = Mock(id=2)
# user_session.session
p = PropertyMock(return_value=3, side_effect=Session.DoesNotExist('Object Does not exist'))
type(mock_session).session = p
call_mock_UserSession_filter = [mock_session]
mock_User_filter.return_value = call_mock_User_filter
mock_UserSession_filter.return_value = call_mock_UserSession_filter
mock_user_del = Mock()
mock_user.delete = mock_user_del
response = check_anonymous()
mock_user_del.assert_called_once_with()
mock_User_filter.assert_called_with(groups__name='Temporary')
mock_UserSession_filter.assert_called_with(user__groups__name='Temporary')
@mock.patch('mysite.celery.UserSession.objects.filter')
@mock.patch('mysite.celery.User.objects.filter')
def test_check_anonymous_user_session_has_session(self, mock_User_filter, mock_UserSession_filter):
mock_user = Mock(id=1)
call_mock_User_filter = [mock_user]
mock_session = Mock(id=2)
# user_session.session
mock_session.session.expire_date = timezone.now() - datetime.timedelta(days=1)
sess_session_del = Mock()
sess_user_del = Mock()
mock_session.session.delete = sess_session_del
mock_session.user.delete = sess_user_del
call_mock_UserSession_filter = [mock_session]
mock_User_filter.return_value = call_mock_User_filter
mock_UserSession_filter.return_value = call_mock_UserSession_filter
mock_user_del = Mock()
mock_user.delete = mock_user_del
response = check_anonymous()
sess_session_del.assert_called_once_with()
sess_user_del.assert_called_once_with()
mock_user_del.assert_called_once_with()
mock_User_filter.assert_called_with(groups__name='Temporary')
mock_UserSession_filter.assert_called_with(user__groups__name='Temporary')
@mock.patch('mysite.celery.GradedLaunch.objects.get')
@mock.patch('mysite.celery.send_score_update')
def test_send_outcome(self, mock_send_score_update, mock_GradedLaunch_get):
get_mock_ret_val = Mock()
mock_GradedLaunch_get.return_value = get_mock_ret_val
result = send_outcome('0', assignment_id=1)
mock_GradedLaunch_get.assert_called_once_with(id=1)
mock_send_score_update.assert_called_once_with(get_mock_ret_val, '0')
| raccoongang/socraticqs2 | mysite/mysite/tests/celery.py | Python | apache-2.0 | 2,987 | 0.002343 |
#
# Copyright (c) 2009-2019 Tom Keffer <tkeffer@gmail.com> and
# Gary Roderick
#
# See the file LICENSE.txt for your full rights.
#
"""Module to interact with Cumulus monthly log files and import raw
observational data for use with weeimport.
"""
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# Python imports
import csv
import glob
import io
import logging
import os
import time
# WeeWX imports
from . import weeimport
import weewx
from weeutil.weeutil import timestamp_to_string
from weewx.units import unit_nicknames
log = logging.getLogger(__name__)
# Dict to lookup rainRate units given rain units
rain_units_dict = {'inch': 'inch_per_hour', 'mm': 'mm_per_hour'}
# ============================================================================
# class CumulusSource
# ============================================================================
class CumulusSource(weeimport.Source):
"""Class to interact with a Cumulus generated monthly log files.
Handles the import of data from Cumulus monthly log files.Cumulus stores
observation data in monthly log files. Each log file contains a month of
data in CSV format. The format of the CSV data (eg date separator, field
delimiter, decimal point character) depends upon the settings used in
Cumulus.
Data is imported from all month log files found in the source directory one
log file at a time. Units of measure are not specified in the monthly log
files so the units of measure must be specified in the wee_import config
file. Whilst the Cumulus monthly log file format is well defined, some
pre-processing of the data is required to provide data in a format the
suitable for use in the wee_import mapping methods.
"""
# List of field names used during import of Cumulus log files. These field
# names are for internal wee_import use only as Cumulus monthly log files
# do not have a header line with defined field names. Cumulus monthly log
# field 0 and field 1 are date and time fields respectively. getRawData()
# combines these fields to return a formatted date-time string that is later
# converted into a unix epoch timestamp.
_field_list = ['datetime', 'cur_out_temp', 'cur_out_hum',
'cur_dewpoint', 'avg_wind_speed', 'gust_wind_speed',
'avg_wind_bearing', 'cur_rain_rate', 'day_rain', 'cur_slp',
'rain_counter', 'curr_in_temp', 'cur_in_hum',
'lastest_wind_gust', 'cur_windchill', 'cur_heatindex',
'cur_uv', 'cur_solar', 'cur_et', 'annual_et',
'cur_app_temp', 'cur_tmax_solar', 'day_sunshine_hours',
'cur_wind_bearing', 'day_rain_rg11', 'midnight_rain']
# Dict to map all possible Cumulus field names (refer _field_list) to WeeWX
# archive field names and units.
_header_map = {'datetime': {'units': 'unix_epoch', 'map_to': 'dateTime'},
'cur_out_temp': {'map_to': 'outTemp'},
'curr_in_temp': {'map_to': 'inTemp'},
'cur_dewpoint': {'map_to': 'dewpoint'},
'cur_slp': {'map_to': 'barometer'},
'avg_wind_bearing': {'units': 'degree_compass',
'map_to': 'windDir'},
'avg_wind_speed': {'map_to': 'windSpeed'},
'cur_heatindex': {'map_to': 'heatindex'},
'gust_wind_speed': {'map_to': 'windGust'},
'cur_windchill': {'map_to': 'windchill'},
'cur_out_hum': {'units': 'percent', 'map_to': 'outHumidity'},
'cur_in_hum': {'units': 'percent', 'map_to': 'inHumidity'},
'midnight_rain': {'map_to': 'rain'},
'cur_rain_rate': {'map_to': 'rainRate'},
'cur_solar': {'units': 'watt_per_meter_squared',
'map_to': 'radiation'},
'cur_uv': {'units': 'uv_index', 'map_to': 'UV'},
'cur_app_temp': {'map_to': 'appTemp'}
}
def __init__(self, config_dict, config_path, cumulus_config_dict, import_config_path, options):
# call our parents __init__
super(CumulusSource, self).__init__(config_dict,
cumulus_config_dict,
options)
# save our import config path
self.import_config_path = import_config_path
# save our import config dict
self.cumulus_config_dict = cumulus_config_dict
# wind dir bounds
self.wind_dir = [0, 360]
# field delimiter used in monthly log files, default to comma
self.delimiter = str(cumulus_config_dict.get('delimiter', ','))
# decimal separator used in monthly log files, default to decimal point
self.decimal = cumulus_config_dict.get('decimal', '.')
# date separator used in monthly log files, default to solidus
separator = cumulus_config_dict.get('separator', '/')
# we combine Cumulus date and time fields to give a fixed format
# date-time string
self.raw_datetime_format = separator.join(('%d', '%m', '%y %H:%M'))
# Cumulus log files provide a number of cumulative rainfall fields. We
# cannot use the daily rainfall as this may reset at some time of day
# other than midnight (as required by WeeWX). So we use field 26, total
# rainfall since midnight and treat it as a cumulative value.
self.rain = 'cumulative'
# initialise our import field-to-WeeWX archive field map
self.map = None
# Cumulus log files have a number of 'rain' fields that can be used to
# derive the WeeWX rain field. Which one is available depends on the
# Cumulus version that created the logs. The preferred field is field
# 26(AA) - total rainfall since midnight but it is only available in
# Cumulus v1.9.4 or later. If that field is not available then the
# preferred field in field 09(J) - total rainfall today then field
# 11(L) - total rainfall counter. Initialise the rain_source_confirmed
# property now and we will deal with it later when we have some source
# data.
self.rain_source_confirmed = None
# Units of measure for some obs (eg temperatures) cannot be derived from
# the Cumulus monthly log files. These units must be specified by the
# user in the import config file. Read these units and fill in the
# missing unit data in the header map. Do some basic error checking and
# validation, if one of the fields is missing or invalid then we need
# to catch the error and raise it as we can't go on.
# Temperature
try:
temp_u = cumulus_config_dict['Units'].get('temperature')
except KeyError:
_msg = "No units specified for Cumulus temperature " \
"fields in %s." % (self.import_config_path, )
raise weewx.UnitError(_msg)
else:
# temperature units vary between unit systems so we can verify a
# valid temperature unit simply by checking for membership of
# weewx.units.conversionDict keys
if temp_u in weewx.units.conversionDict.keys():
self._header_map['cur_out_temp']['units'] = temp_u
self._header_map['curr_in_temp']['units'] = temp_u
self._header_map['cur_dewpoint']['units'] = temp_u
self._header_map['cur_heatindex']['units'] = temp_u
self._header_map['cur_windchill']['units'] = temp_u
self._header_map['cur_app_temp']['units'] = temp_u
else:
_msg = "Unknown units '%s' specified for Cumulus " \
"temperature fields in %s." % (temp_u,
self.import_config_path)
raise weewx.UnitError(_msg)
# Pressure
try:
press_u = cumulus_config_dict['Units'].get('pressure')
except KeyError:
_msg = "No units specified for Cumulus pressure " \
"fields in %s." % (self.import_config_path, )
raise weewx.UnitError(_msg)
else:
if press_u in ['inHg', 'mbar', 'hPa']:
self._header_map['cur_slp']['units'] = press_u
else:
_msg = "Unknown units '%s' specified for Cumulus " \
"pressure fields in %s." % (press_u,
self.import_config_path)
raise weewx.UnitError(_msg)
# Rain
try:
rain_u = cumulus_config_dict['Units'].get('rain')
except KeyError:
_msg = "No units specified for Cumulus " \
"rain fields in %s." % (self.import_config_path, )
raise weewx.UnitError(_msg)
else:
if rain_u in rain_units_dict:
self._header_map['midnight_rain']['units'] = rain_u
self._header_map['cur_rain_rate']['units'] = rain_units_dict[rain_u]
else:
_msg = "Unknown units '%s' specified for Cumulus " \
"rain fields in %s." % (rain_u,
self.import_config_path)
raise weewx.UnitError(_msg)
# Speed
try:
speed_u = cumulus_config_dict['Units'].get('speed')
except KeyError:
_msg = "No units specified for Cumulus " \
"speed fields in %s." % (self.import_config_path, )
raise weewx.UnitError(_msg)
else:
# speed units vary between unit systems so we can verify a valid
# speed unit simply by checking for membership of
# weewx.units.conversionDict keys
if speed_u in weewx.units.conversionDict.keys():
self._header_map['avg_wind_speed']['units'] = speed_u
self._header_map['gust_wind_speed']['units'] = speed_u
else:
_msg = "Unknown units '%s' specified for Cumulus " \
"speed fields in %s." % (speed_u,
self.import_config_path)
raise weewx.UnitError(_msg)
# get our source file path
try:
self.source = cumulus_config_dict['directory']
except KeyError:
_msg = "Cumulus monthly logs directory not specified in '%s'." % import_config_path
raise weewx.ViolatedPrecondition(_msg)
# get the source file encoding, default to utf-8-sig
self.source_encoding = self.cumulus_config_dict.get('source_encoding',
'utf-8-sig')
# property holding the current log file name being processed
self.file_name = None
# Now get a list on monthly log files sorted from oldest to newest
month_log_list = glob.glob(self.source + '/?????log.txt')
_temp = [(fn, fn[-9:-7], time.strptime(fn[-12:-9], '%b').tm_mon) for fn in month_log_list]
self.log_list = [a[0] for a in sorted(_temp,
key=lambda el: (el[1], el[2]))]
if len(self.log_list) == 0:
raise weeimport.WeeImportIOError(
"No Cumulus monthly logs found in directory '%s'." % self.source)
# tell the user/log what we intend to do
_msg = "Cumulus monthly log files in the '%s' directory will be imported" % self.source
print(_msg)
log.info(_msg)
_msg = "The following options will be used:"
if self.verbose:
print(_msg)
log.debug(_msg)
_msg = " config=%s, import-config=%s" % (config_path,
self.import_config_path)
if self.verbose:
print(_msg)
log.debug(_msg)
if options.date:
_msg = " date=%s" % options.date
else:
# we must have --from and --to
_msg = " from=%s, to=%s" % (options.date_from, options.date_to)
if self.verbose:
print(_msg)
log.debug(_msg)
_msg = " dry-run=%s, calc_missing=%s, " \
"ignore_invalid_data=%s" % (self.dry_run,
self.calc_missing,
self.ignore_invalid_data)
if self.verbose:
print(_msg)
log.debug(_msg)
_msg = " tranche=%s, interval=%s" % (self.tranche,
self.interval)
if self.verbose:
print(_msg)
log.debug(_msg)
_msg = " UV=%s, radiation=%s" % (self.UV_sensor, self.solar_sensor)
if self.verbose:
print(_msg)
log.debug(_msg)
_msg = "Using database binding '%s', which is bound " \
"to database '%s'" % (self.db_binding_wx,
self.dbm.database_name)
print(_msg)
log.info(_msg)
_msg = "Destination table '%s' unit system " \
"is '%#04x' (%s)." % (self.dbm.table_name,
self.archive_unit_sys,
unit_nicknames[self.archive_unit_sys])
print(_msg)
log.info(_msg)
if self.calc_missing:
print("Missing derived observations will be calculated.")
if not self.UV_sensor:
print("All WeeWX UV fields will be set to None.")
if not self.solar_sensor:
print("All WeeWX radiation fields will be set to None.")
if options.date or options.date_from:
print("Observations timestamped after %s and "
"up to and" % timestamp_to_string(self.first_ts))
print("including %s will be imported." % timestamp_to_string(self.last_ts))
if self.dry_run:
print("This is a dry run, imported data will not be saved to archive.")
def getRawData(self, period):
"""Get raw observation data and construct a map from Cumulus monthly
log fields to WeeWX archive fields.
Obtain raw observational data from Cumulus monthly logs. This raw data
needs to be cleaned of unnecessary characters/codes, a date-time field
generated for each row and an iterable returned.
Input parameters:
period: the file name, including path, of the Cumulus monthly log
file from which raw obs data will be read.
"""
# period holds the filename of the monthly log file that contains our
# data. Does our source exist?
if os.path.isfile(period):
# It exists. The source file may use some encoding, if we can't
# decode it raise a WeeImportDecodeError.
try:
with io.open(period, mode='r', encoding=self.source_encoding) as f:
_raw_data = f.readlines()
except UnicodeDecodeError as e:
# not a utf-8 based encoding, so raise a WeeImportDecodeError
raise weeimport.WeeImportDecodeError(e)
else:
# If it doesn't we can't go on so raise it
raise weeimport.WeeImportIOError(
"Cumulus monthly log file '%s' could not be found." % period)
# Our raw data needs a bit of cleaning up before we can parse/map it.
_clean_data = []
for _row in _raw_data:
# check for and remove any null bytes
clean_row = _row
if "\x00" in _row:
clean_row = clean_row.replace("\x00", "")
_msg = "One or more null bytes found in and removed " \
"from monthly log file '%s'" % (period, )
print(_msg)
log.info(_msg)
# make sure we have full stops as decimal points
_line = clean_row.replace(self.decimal, '.')
# ignore any blank lines
if _line != "\n":
# Cumulus has separate date and time fields as the first 2
# fields of a row. It is easier to combine them now into a
# single date-time field that we can parse later when we map the
# raw data.
_datetime_line = _line.replace(self.delimiter, ' ', 1)
# Save what's left
_clean_data.append(_datetime_line)
# if we haven't confirmed our source for the WeeWX rain field we need
# to do so now
if self.rain_source_confirmed is None:
# The Cumulus source field depends on the Cumulus version that
# created the log files. Unfortunately, we can only determine
# which field to use by looking at the mapped Cumulus data. If we
# look at our DictReader we have no way to reset it, so we create
# a one off DictReader to use instead.
_rain_reader = csv.DictReader(_clean_data, fieldnames=self._field_list,
delimiter=self.delimiter)
# now that we know what Cumulus fields are available we can set our
# rain source appropriately
self.set_rain_source(_rain_reader)
# Now create a dictionary CSV reader
_reader = csv.DictReader(_clean_data, fieldnames=self._field_list,
delimiter=self.delimiter)
# Finally, get our database-source mapping
self.map = self.parseMap('Cumulus', _reader, self.cumulus_config_dict)
# Return our dict reader
return _reader
def period_generator(self):
"""Generator function yielding a sequence of monthly log file names.
This generator controls the FOR statement in the parents run() method
that loops over the monthly log files to be imported. The generator
yields a monthly log file name from the list of monthly log files to
be imported until the list is exhausted.
"""
# step through each of our file names
for self.file_name in self.log_list:
# yield the file name
yield self.file_name
@property
def first_period(self):
"""True if current period is the first period otherwise False.
Return True if the current file name being processed is the first in
the list or it is None (the initialisation value).
"""
return self.file_name == self.log_list[0] if self.file_name is not None else True
@property
def last_period(self):
"""True if current period is the last period otherwise False.
Return True if the current file name being processed is the last in
the list.
"""
return self.file_name == self.log_list[-1]
def set_rain_source(self, _data):
"""Set the Cumulus field to be used as the WeeWX rain field source.
"""
_row = next(_data)
if _row['midnight_rain'] is not None:
# we have data in midnight_rain, our default source, so leave
# things as they are and return
pass
elif _row['day_rain'] is not None:
# we have data in day_rain so use that as our rain source
self._header_map['day_rain'] = self._header_map['midnight_rain']
del self._header_map['midnight_rain']
elif _row['rain_counter'] is not None:
# we have data in rain_counter so use that as our rain source
self._header_map['rain_counter'] = self._header_map['midnight_rain']
del self._header_map['midnight_rain']
else:
# We should never end up in this state but....
# We have no suitable rain source so we can't import so remove the
# rain field entry from the header map.
del self._header_map['midnight_rain']
# we only need to do this once so set our flag to True
self.rain_source_confirmed = True
return
| weewx/weewx | bin/weeimport/cumulusimport.py | Python | gpl-3.0 | 20,404 | 0.000784 |
__author__ = 'joseph'
import statistics
import numpy as np
class AccelData(object):
def __init__(self,Accel):
#Static accelerometer data
self.Accel = Accel
def applyCalib(self,params,Accel):
ax = params['ax']
ay = params['ay']
az = params['az']
scaling_Matrix = np.diag([params['kx'], params['ky'],params['kz']])
misalignment_Matrix = np.array([[1.0, -ax,ay],
[0, 1.0, -az],
[0,0,1.0]])
adjustment_matrix = np.dot(misalignment_Matrix,scaling_Matrix)
bx = params['bx']
by = params['by']
bz = params['bz']
# apply theta to the accelerometer
Accel[0,:] = Accel[0,:] - bx
Accel[1,:] = Accel[1,:] - by
Accel[2,:] = Accel[2,:] - bz
Accel = np.dot(adjustment_matrix,Accel)
return Accel # probally not necessary as it may of been passed by reference
class GyroData(object):
def __init__(self,Gyro,bx,by,bz):
self.bx = bx
self.by = by
self.bz = bz
self.Gyro = Gyro
def applyCalib(self,params,Gyro):
scaling_Matrix = np.diag([params['sx'], params['sy'],params['sz']])
misalignment_Matrix = np.array([
[1, params['gamma_yz'],params['gamma_zy']],
[params['gamma_xz'], 1, params['gamma_zx']],
[params['gamma_xy'],params['gamma_yx'],1]])
adjustment_matrix = np.dot(misalignment_Matrix,scaling_Matrix)
Gyro[0,:] = Gyro[0,:] - self.bx
Gyro[1,:] = Gyro[1,:] - self.by
Gyro[2,:] = Gyro[2,:] - self.bz
Gyro = np.dot(adjustment_matrix,Gyro)
return Gyro
class RollingStatistic(object):
def __init__(self, window_size):
self.N = window_size
self.window = window_size * [0]
self.average = 0
self.variance = 0
self.stddev = 0
self.index = 0
def update(self,new):
# Preload
if(self.index < self.N):
self.window[self.index] = new
self.index += 1
# If Window preloaded - start rolling statistics
if(self.index == self.N):
self.average = statistics.mean(self.window)
self.variance = statistics.variance(self.window)
return
# Push element into window list and remove the old element
old = self.window[0]
self.window.pop(0)
self.window.append(new)
oldavg = self.average
newavg = oldavg + (new - old)/self.N
self.average = newavg
if(self.N > 1):
self.variance += (new-old)*(new-newavg+old-oldavg)/(self.N-1)
def getVar(self):
if(self.index == 1):
return 0
elif(self.index < self.N):
return statistics.variance(self.window[0:self.index]) # Make return 0?
return self.variance
def reset(self):
self.index = 0
def static_invertal_detection(Data_in, Time, options,var_mult):
total_samples = len(Time)
Initial_Static = options[0]
index = 0
static_timer = 0
static_window = options[1]
running_var_x = RollingStatistic(25)
running_var_y = RollingStatistic(25)
running_var_z = RollingStatistic(25)
# Find the total number of entries in the initial wait period
while (static_timer <= Initial_Static):
static_timer = static_timer + Time[index]
index = index +1
Static_var_X = statistics.variance(Data_in[0:index,0])
Static_var_Y = statistics.variance(Data_in[0:index,1])
Static_var_Z = statistics.variance(Data_in[0:index,2])
Static_Th = Static_var_X**2 + Static_var_Y**2 + Static_var_Z**2 #Static threshold
static_timer = 0
current_interval_start = 1
current_interval_end = current_interval_start + 1
Valid_intervals_starts = []
Valid_intervals_ends = []
num_static = 0
Max = -999999
Min = 999999
#loop through the dataset and map the static intervals
for i in range(0,total_samples):
# update time
static_timer = static_timer + Time[i]
running_var_x.update(Data_in[i,0])
running_var_y.update(Data_in[i,1])
running_var_z.update(Data_in[i,2])
m = max([Data_in[i,0],Data_in[i,1],Data_in[i,2]])
mn = min([Data_in[i,0],Data_in[i,1],Data_in[i,2]])
# Store maximum for constructing the visualization of this later
if(m > Max):
Max = m
if(mn < Min):
Min = mn
# Check current (rolling) variance
current_norm = running_var_x.getVar()**2 + running_var_y.getVar()**2 + running_var_z.getVar()**2
if(current_norm > Static_Th*var_mult):
#check if the latest interval is valid length
if(static_timer >= static_window):
num_static += 1
current_interval_end = i -1 # skip the point that caused it to go beyond threshold
Valid_intervals_starts.append(current_interval_start)
Valid_intervals_ends.append(current_interval_end)
# Reset running variances
running_var_x.reset()
running_var_y.reset()
running_var_z.reset()
# Reset the current static interval starting and ending index
current_interval_end = i
current_interval_start = current_interval_end
# Reset timer
static_timer = 0
# Main loop ended
visualize = total_samples * [28000]
for i in range(0,num_static):
length = Valid_intervals_ends[i] - Valid_intervals_starts[i] + 1
visualize[Valid_intervals_starts[i]:(Valid_intervals_ends[i]+1)] = [.6*Max]*length
return Valid_intervals_starts, Valid_intervals_ends, visualize, index
def accel_resid(params, accel_staticx,accel_staticy,accel_staticz):
scaling_Matrix = np.diag([params['kx'], params['ky'],params['kz']])
misalignment_Matrix = np.array([[1, -params['ax'],params['ay']],
[0, 1, -params['az']],
[0,0,1]])
adjustment_matrix = np.dot(misalignment_Matrix,scaling_Matrix)
local_gravity = 9.81744
bx = params['bx']
by = params['by']
bz = params['bz']
# apply theta to the accelerometer
accel_static = np.zeros((3,len(accel_staticx)))
accel_static[0,:] = accel_staticx - bx
accel_static[1,:] = accel_staticy - by
accel_static[2,:] = accel_staticz - bz
accel_static = np.dot(adjustment_matrix,accel_static)
residual = len(accel_staticx)*[0.0]
for i in range (0,len(accel_staticx)):
residual[i] = (local_gravity**2 - (accel_static[0,i]**2 + accel_static[1,i]**2 + accel_static[2,i]**2))
return residual
def gyro_resid(params,GyroData,AccelData,Time):
index = 0
interval_count = len(GyroData.Gyro)
resid = interval_count*[0.0]
for Gyro in GyroData.Gyro:
# Apply calibration of the gyroscope
dt = Time[index]
G = np.array(Gyro)
G_calib = GyroData.applyCalib(params,G.transpose())
R = quaternion_RK4(G_calib,dt)
# Extract gravity vector from accelerometer
a = AccelData.Accel[:,index]
Ua = AccelData.Accel[:,index+1]
# Apply predicted rotation to accelerometer and compare to observed
Ug = np.dot(R,a)
diff = Ua - Ug
# store the magnitude of the difference and update the static interval index
resid[index] = diff[0]**2 + diff[1]**2 + diff[2]**2
index += 1
return resid
#TODO: Move to misc. kinematics
def quaternion_RK4(gyro,dt):
num_samples = gyro.shape[1]
q_k = np.array([1,0,0,0])
# RK loop
for i in range(0,(num_samples-1)):
q1 = q_k
S1 = gyro_cross4(gyro[:,i])
k_1 = 1.0/2.0*np.dot(S1,q1)
q2 = q_k + dt*1.0/2.0*k_1
half_gyro_left = 1.0/2.0*(gyro[:,i] + gyro[:,i+1])
S_half = gyro_cross4(half_gyro_left)
k_2 = 1.0/2.0*np.dot(S_half,q2)
q3 = q_k + dt*1.0/2.0*k_2
k_3 = 1.0/2.0*np.dot(S_half,q3)
q4 = q_k + dt*k_3
S_2 = gyro_cross4(gyro[:,i+1])
k_4 = 1.0/2.0*np.dot(S_2,q4)
q_k = q_k + dt*(1.0/6.0*k_1 + 1.0/3.0*k_2 + 1.0/3.0*k_3 + 1.0/6.0*k_4)
q_k = q_k*1.0/np.linalg.norm(q_k)
# Convert quaternion to rotation matrix
# TODO: MOVE TO MISC KIN
r11 = q_k[0]**2 + q_k[1]**2 - q_k[2]**2 - q_k[3]**2
r12 = 2.0*(q_k[1]*q_k[2] - q_k[0]*q_k[3])
r13 = 2.0*(q_k[1]*q_k[3] + q_k[0]*q_k[2])
r21 = 2.0*(q_k[1]*q_k[2] + q_k[0]*q_k[3])
r22 = q_k[0]**2 - q_k[1]**2 + q_k[2]**2 - q_k[3]**2
r23 = 2.0*(q_k[2]*q_k[3] - q_k[0]*q_k[1])
r31 = 2.0*(q_k[1]*q_k[3] - q_k[0]*q_k[2])
r32 = 2.0*(q_k[2]*q_k[3] + q_k[0]*q_k[1])
r33 = q_k[0]**2 - q_k[1]**2 - q_k[2]**2 + q_k[3]**2
# Note that R is actually the transpose of what it would normally be
R = np.array([[r11, r21, r31],
[r12, r22, r32],
[r13, r23, r33]])
return R
def gyro_cross4(gyro):
gx = gyro[0]
gy = gyro[1]
gz = gyro[2]
Sx = np.array([[0, -gx, -gy, -gz],
[gx, 0, gz, -gy],
[gy, -gz, 0, gx],
[gz, gy, -gx, 0]])
return Sx | jchrismer/PiQuad | Calibration/Inertial_Calibration.py | Python | gpl-3.0 | 9,316 | 0.012988 |
from sympy import (
Symbol, gamma, I, oo, nan, zoo, factorial, sqrt, Rational, log,
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin,
cos, O, cancel, lowergamma, exp, erf, beta, exp_polar, harmonic, zeta,
factorial)
from sympy.core.function import ArgumentIndexError
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
test_numerically as tn)
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1, 2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(Rational(
-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(
-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(
14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(
17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(
19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x - 1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x + 1)*gamma(x)
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
assert gamma(3*exp_polar(I*pi)/4).is_nonnegative is False
assert gamma(3*exp_polar(I*pi)/4).is_nonpositive is True
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/x + EulerGamma - 1 + x*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma) \
+ x**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 -
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O(x**3)
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, y).diff(y) == y**(x - 1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert lowergamma(x, y).diff(x) == \
gamma(x)*polygamma(0, x) - uppergamma(x, y)*log(y) \
+ meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(S(1)/3, lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert lowergamma(
x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(
k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x - 1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
assert uppergamma(S.Half, x) == sqrt(pi)*(1 - erf(sqrt(x)))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(S(1)/3, uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + \
gamma(y)*(1 - exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
def test_polygamma():
from sympy import I
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369, 20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert polygamma(0, x).rewrite(zeta) == polygamma(0, x)
assert polygamma(1, x).rewrite(zeta) == zeta(2, x)
assert polygamma(2, x).rewrite(zeta) == -2*zeta(3, x)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
assert polygamma(0, x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert polygamma(2, x).rewrite(harmonic) == 2*harmonic(x - 1, 3) - 2*zeta(3)
ni = Symbol("n", integer=True)
assert polygamma(ni, x).rewrite(harmonic) == (-1)**(ni + 1)*(-harmonic(x - 1, ni + 1)
+ zeta(ni + 1))*factorial(ni)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k, exp_polar(2*I*pi)*x) == polygamma(k, x)
# but negative integers are branched!
k = Symbol('n', integer=True)
assert polygamma(k, exp_polar(2*I*pi)*x).args == (k, exp_polar(2*I*pi)*x)
# Polygamma of order -1 is loggamma:
assert polygamma(-1, x) == loggamma(x)
# But smaller orders are iterated integrals and don't have a special name
assert polygamma(-2, x).func is polygamma
# Test a bug
assert polygamma(0, -x).expand(func=True) == polygamma(0, -x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func=True, basic=False) == e
def test_loggamma():
raises(TypeError, lambda: loggamma(2, 3))
raises(ArgumentIndexError, lambda: loggamma(x).fdiff(2))
assert loggamma(x).diff(x) == polygamma(0, x)
s1 = loggamma(1/(x + sin(x)) + cos(x)).nseries(x, n=4)
s2 = (-log(2*x) - 1)/(2*x) - log(x/pi)/2 + (4 - log(2*x))*x/24 + O(x**2)
assert (s1 - s2).expand(force=True).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x - S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert ((s1 - s2).expand(force=True)).removeO() == 0
assert loggamma(x).rewrite('intractable') == log(gamma(x))
assert loggamma(x).is_real is None
y, z = Symbol('y', real=True), Symbol('z', imaginary=True)
assert loggamma(y).is_real
assert loggamma(z).is_real is False
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x, n=N, logx=None).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) == \
-log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) == \
x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=8) == \
2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
def test_beta_function():
x, y = Symbol('x'), Symbol('y')
assert beta(x, y) == gamma(x)*gamma(y)/gamma(x + y)
assert beta(x, y) == beta(y, x) # Symmetric
| lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/functions/special/tests/test_gamma_functions.py | Python | gpl-3.0 | 12,392 | 0.000484 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/lok/shared_mining_cave_01.iff"
result.attribute_template_id = -1
result.stfName("building_name","cave")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/building/lok/shared_mining_cave_01.py | Python | mit | 440 | 0.047727 |
# -*- coding: utf-8 -*-
"""
2020-09-07 Cornelius Kölbel <cornelius.koelbel@netknights.it>
Add exception
2017-04-26 Friedrich Weber <friedrich.weber@netknights.it>
Make it possible to check for correct LDAPS/STARTTLS settings
2017-01-08 Cornelius Kölbel <cornelius.koelbel@netknights.it>
Remove objectGUID. Since we stick with ldap3 version 2.1,
the objectGUID is returned in a human readable format.
2016-12-05 Martin Wheldon <martin.wheldon@greenhills-it.co.uk>
Fixed issue creating ldap entries with objectClasses defined
Fix problem when searching for attribute values containing the
space character.
2016-05-26 Martin Wheldon <martin.wheldon@greenhills-it.co.uk>
Rewrite of search functionality to add recursive parsing
of ldap search filters
Fixed issue searching for attributes with multiple values
Added ability to use ~= in searches
Created unittests for mock
2016-02-19 Cornelius Kölbel <cornelius.koelbel@netknights.it>
Add the possibility to check objectGUID
2015-01-31 Change responses.py to be able to run with SMTP
Cornelius Kölbel <cornelius@privacyidea.org>
Original responses.py is:
Copyright 2013 Dropbox, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import (
absolute_import, division, unicode_literals
)
from passlib.hash import ldap_salted_sha1
from ast import literal_eval
import uuid
from ldap3.utils.conv import escape_bytes
import ldap3
import re
import pyparsing
from .smtpmock import get_wrapped
from collections import namedtuple, Sequence, Sized
from privacyidea.lib.utils import to_bytes, to_unicode
DIRECTORY = "tests/testdata/tmp_directory"
Call = namedtuple('Call', ['request', 'response'])
_wrapper_template = """\
def wrapper%(signature)s:
with ldap3mock:
return func%(funcargs)s
"""
def _convert_objectGUID(item):
item = uuid.UUID("{{{0!s}}}".format(item)).bytes_le
item = escape_bytes(item)
return item
class CallList(Sequence, Sized):
def __init__(self):
self._calls = []
def __iter__(self):
return iter(self._calls)
def __len__(self):
return len(self._calls)
def __getitem__(self, idx):
return self._calls[idx]
def setdata(self, request, response):
self._calls.append(Call(request, response))
def reset(self):
self._calls = []
class Connection(object):
class Extend(object):
class Standard(object):
def __init__(self, connection):
self.connection = connection
def paged_search(self, **kwargs):
self.connection.search(search_base=kwargs.get("search_base"),
search_scope=kwargs.get("search_scope"),
search_filter=kwargs.get(
"search_filter"),
attributes=kwargs.get("attributes"),
paged_size=kwargs.get("page_size"),
size_limit=kwargs.get("size_limit"),
paged_cookie=None)
result = self.connection.response
if kwargs.get("generator", False):
# If ``generator=True`` is passed, ``paged_search`` should return an iterator.
result = iter(result)
return result
def __init__(self, connection):
self.standard = self.Standard(connection)
def __init__(self, directory=None):
if directory is None:
directory = []
import copy
self.directory = copy.deepcopy(directory)
self.bound = False
self.start_tls_called = False
self.extend = self.Extend(self)
self.operation = {
"!" : self._search_not,
"&" : self._search_and,
"|" : self._search_or,
}
def set_directory(self, directory):
self.directory = directory
def _find_user(self, dn):
return next(i for (i, d) in enumerate(self.directory) if d["dn"] == dn)
@staticmethod
def open(read_server_info=True):
return
def bind(self, read_server_info=True):
return self.bound
def start_tls(self, read_server_info=True):
self.start_tls_called = True
def add(self, dn, object_class=None, attributes=None):
self.result = { 'dn' : '',
'referrals' : None,
'description' : 'success',
'result' : 0,
'message' : '',
'type' : 'addResponse'}
# Check to see if the user exists in the directory
try:
index = self._find_user(dn)
except StopIteration:
# If we get here the user doesn't exist so continue
# Create a entry object for the new user
entry = {}
entry['dn'] = dn
entry['attributes'] = attributes
if object_class != None:
entry['attributes'].update( {'objectClass': object_class} )
else:
# User already exists
self.result["description"] = "failure"
self.result["result"] = 68
self.result["message"] = \
"Error entryAlreadyExists for {0}".format(dn)
return False
# Add the user entry to the directory
self.directory.append(entry)
# Attempt to write changes to disk
with open(DIRECTORY, 'w+') as f:
f.write(str(self.directory))
return True
def delete(self, dn, controls=None):
self.result = { 'dn' : '',
'referrals' : None,
'description' : 'success',
'result' : 0,
'message' : '',
'type' : 'addResponse'}
# Check to see if the user exists in the directory
try:
index = self._find_user(dn)
except StopIteration:
# If we get here the user doesn't exist so continue
self.result["description"] = "failure"
self.result["result"] = 32
self.result["message"] = "Error no such object: {0}".format(dn)
return False
# Delete the entry object for the user
self.directory.pop(index)
# Attempt to write changes to disk
with open(DIRECTORY, 'w+') as f:
f.write(str(self.directory))
return True
def modify(self, dn, changes, controls=None):
self.result = { 'dn' : '',
'referrals' : None,
'description' : 'success',
'result' : 0,
'message' : '',
'type' : 'modifyResponse'}
# Check to see if the user exists in the directory
try:
index = self._find_user(dn)
except StopIteration:
# If we get here the user doesn't exist so continue
self.result["description"] = "failure"
self.result["result"] = 32
self.result["message"] = "Error no such object: {0!s}".format(dn)
return False
# extract the hash we are interested in
entry = self.directory[index].get("attributes")
# Loop over the changes hash and apply them
for k, v in changes.items():
if v[0] == "MODIFY_DELETE":
entry.pop(k)
elif v[0] == "MODIFY_REPLACE" or v[0] == "MODIFY_ADD":
entry[k] = v[1][0]
else:
self.result["result"] = 2
self.result["message"] = "Error bad/missing/not implemented" \
"modify operation: %s" % k[1]
# Place the attributes back into the directory hash
self.directory[index]["attributes"] = entry
# Attempt to write changes to disk
with open(DIRECTORY, 'w+') as f:
f.write(str(self.directory))
return True
@staticmethod
def _match_greater_than_or_equal(search_base, attribute, value, candidates):
matches = list()
for entry in candidates:
dn = entry.get("dn")
if not dn.endswith(search_base):
continue
value_from_directory = entry.get("attributes").get(attribute)
if str(value_from_directory) >= str(value):
entry["type"] = "searchResEntry"
matches.append(entry)
return matches
@staticmethod
def _match_greater_than(search_base, attribute, value, candidates):
matches = list()
for entry in candidates:
dn = entry.get("dn")
if not dn.endswith(search_base):
continue
value_from_directory = entry.get("attributes").get(attribute)
if str(value_from_directory) > str(value):
entry["type"] = "searchResEntry"
matches.append(entry)
return matches
@staticmethod
def _match_less_than_or_equal(search_base, attribute, value, candidates):
matches = list()
for entry in candidates:
dn = entry.get("dn")
if not dn.endswith(search_base):
continue
value_from_directory = entry.get("attributes").get(attribute)
if str(value_from_directory) <= str(value):
entry["type"] = "searchResEntry"
matches.append(entry)
return matches
@staticmethod
def _match_less_than(search_base, attribute, value, candidates):
matches = list()
for entry in candidates:
dn = entry.get("dn")
if not dn.endswith(search_base):
continue
value_from_directory = entry.get("attributes").get(attribute)
if str(value_from_directory) < str(value):
entry["type"] = "searchResEntry"
matches.append(entry)
return matches
@staticmethod
def _match_equal_to(search_base, attribute, value, candidates):
matches = list()
match_using_regex = False
if "*" in value:
match_using_regex = True
#regex = check_escape(value)
regex = value.replace('*', '.*')
regex = "^{0}$".format(regex)
for entry in candidates:
dn = to_unicode(entry.get("dn"))
if attribute not in entry.get("attributes") or not dn.endswith(search_base):
continue
values_from_directory = entry.get("attributes").get(attribute)
if isinstance(values_from_directory, list):
for item in values_from_directory:
if attribute == "objectGUID":
item = _convert_objectGUID(item)
if match_using_regex:
m = re.match(regex, str(item), re.I)
if m:
entry["type"] = "searchResEntry"
matches.append(entry)
else:
if item == value:
entry["type"] = "searchResEntry"
matches.append(entry)
else:
if attribute == "objectGUID":
values_from_directory = _convert_objectGUID(values_from_directory)
if match_using_regex:
m = re.match(regex, str(values_from_directory), re.I)
if m:
entry["type"] = "searchResEntry"
matches.append(entry)
else:
# The value, which we compare is unicode, so we convert
# the values_from_directory to unicode rather than str.
if isinstance(values_from_directory, bytes):
values_from_directory = values_from_directory.decode(
"utf-8")
elif type(values_from_directory) == int:
values_from_directory = u"{0!s}".format(values_from_directory)
if value == values_from_directory:
entry["type"] = "searchResEntry"
matches.append(entry)
return matches
@staticmethod
def _match_notequal_to(search_base, attribute, value, candidates):
matches = list()
match_using_regex = False
if "*" in value:
match_using_regex = True
#regex = check_escape(value)
regex = value.replace('*', '.*')
regex = "^{0}$".format(regex)
for entry in candidates:
found = False
dn = entry.get("dn")
if not dn.endswith(search_base):
continue
values_from_directory = entry.get("attributes").get(attribute)
if isinstance(values_from_directory, list):
for item in values_from_directory:
if attribute == "objectGUID":
item = _convert_objectGUID(item)
if match_using_regex:
m = re.match(regex, str(item), re.I)
if m:
found = True
else:
if item == value:
found = True
if found is False:
entry["type"] = "searchResEntry"
matches.append(entry)
else:
if attribute == "objectGUID":
values_from_directory = _convert_objectGUID(values_from_directory)
if match_using_regex:
m = re.match(regex, str(values_from_directory), re.I)
if not m:
entry["type"] = "searchResEntry"
matches.append(entry)
else:
if str(value) != str(values_from_directory):
entry["type"] = "searchResEntry"
matches.append(entry)
return matches
@staticmethod
def _parse_filter():
op = pyparsing.oneOf('! & |')
lpar = pyparsing.Literal('(').suppress()
rpar = pyparsing.Literal(')').suppress()
k = pyparsing.Word(pyparsing.alphanums)
# NOTE: We may need to expand on this list, but as this is not a real
# LDAP server we should be OK.
# Value to contain:
# numbers, upper/lower case letters, astrisk, at symbol, minus, full
# stop, backslash or a space
v = pyparsing.Word(pyparsing.alphanums + "-*@.\\ äöü")
rel = pyparsing.oneOf("= ~= >= <=")
expr = pyparsing.Forward()
atom = pyparsing.Group(lpar + op + expr + rpar) \
| pyparsing.Combine(lpar + k + rel + v + rpar)
expr << atom + pyparsing.ZeroOrMore( expr )
return expr
@staticmethod
def _deDuplicate(results):
found = dict()
deDuped = list()
for entry in results:
dn = entry.get("dn")
if not dn in found:
found[dn] = 1
deDuped.append(entry)
return deDuped
def _invert_results(self, candidates):
inverted_candidates = list(self.directory)
for candidate in candidates:
try:
inverted_candidates.remove(candidate)
except ValueError:
pass
return inverted_candidates
def _search_not(self, base, search_filter, candidates=None):
# Create empty candidates list as we need to use self.directory for
# each search
candidates = list()
this_filter = list()
index = 0
search_filter.remove("!")
for condition in search_filter:
if not isinstance(condition, list):
this_filter.append(condition)
index +=1
# Remove this_filter items from search_filter list
for condition in this_filter:
search_filter.remove(condition)
try:
search_filter = list(search_filter[0])
for sub_filter in search_filter:
if not isinstance(sub_filter, list):
candidates = self.operation.get(sub_filter)(base,
search_filter,
candidates)
else:
candidates = self.operation.get(sub_filter[0])(base,
sub_filter,
candidates)
except IndexError:
pass
candidates = self._invert_results(candidates)
for item in this_filter:
if ">=" in item:
k, v = item.split(">=")
candidates = Connection._match_less_than(base, k, v,
self.directory)
elif "<=" in item:
k, v = item.split("<=")
candidates = Connection._match_greater_than(base, k, v,
self.directory)
# Emulate AD functionality, same as "="
elif "~=" in item:
k, v = item.split("~=")
candidates = Connection._match_notequal_to(base, k, v,
self.directory)
elif "=" in item:
k, v = item.split("=")
candidates = Connection._match_notequal_to(base, k, v,
self.directory)
return candidates
def _search_and(self, base, search_filter, candidates=None):
# Load the data from the directory, if we aren't passed any
if candidates == [] or candidates is None:
candidates = self.directory
this_filter = list()
index = 0
search_filter.remove("&")
for condition in search_filter:
if not isinstance(condition, list):
this_filter.append(condition)
index +=1
# Remove this_filter items from search_filter list
for condition in this_filter:
search_filter.remove(condition)
try:
search_filter = list(search_filter[0])
for sub_filter in search_filter:
if not isinstance(sub_filter, list):
candidates = self.operation.get(sub_filter)(base,
search_filter,
candidates)
else:
candidates = self.operation.get(sub_filter[0])(base,
sub_filter,
candidates)
except IndexError:
pass
for item in this_filter:
if ">=" in item:
k, v = item.split(">=")
candidates = Connection._match_greater_than_or_equal(base, k, v,
candidates)
elif "<=" in item:
k, v = item.split("<=")
candidates = Connection._match_less_than_or_equal(base, k, v,
candidates)
# Emulate AD functionality, same as "="
elif "~=" in item:
k, v = item.split("~=")
candidates = Connection._match_equal_to(base, k, v,
candidates)
elif "=" in item:
k, v = item.split("=")
candidates = Connection._match_equal_to(base, k, v,
candidates)
return candidates
def _search_or(self, base, search_filter, candidates=None):
# Create empty candidates list as we need to use self.directory for
# each search
candidates = list()
this_filter = list()
index = 0
search_filter.remove("|")
for condition in search_filter:
if not isinstance(condition, list):
this_filter.append(condition)
index +=1
# Remove this_filter items from search_filter list
for condition in this_filter:
search_filter.remove(condition)
try:
search_filter = list(search_filter[0])
for sub_filter in search_filter:
if not isinstance(sub_filter, list):
candidates += self.operation.get(sub_filter)(base,
search_filter,
candidates)
else:
candidates += self.operation.get(sub_filter[0])(base,
sub_filter,
candidates)
except IndexError:
pass
for item in this_filter:
if ">=" in item:
k, v = item.split(">=")
candidates += Connection._match_greater_than_or_equal(base, k, v,
self.directory)
elif "<=" in item:
k, v = item.split("<=")
candidates += Connection._match_less_than_or_equal(base, k, v,
self.directory)
# Emulate AD functionality, same as "="
elif "~=" in item:
k, v = item.split("~=")
candidates += Connection._match_equal_to(base, k, v,
self.directory)
elif "=" in item:
k, v = item.split("=")
candidates += Connection._match_equal_to(base, k, v,
self.directory)
return candidates
def search(self, search_base=None, search_scope=None,
search_filter=None, attributes=None, paged_size=5,
size_limit=0, paged_cookie=None):
s_filter = list()
candidates = list()
self.response = list()
self.result = dict()
try:
if isinstance(search_filter, bytes):
# We need to convert to unicode otherwise pyparsing will not
# find the u"ö"
search_filter = to_unicode(search_filter)
expr = Connection._parse_filter()
s_filter = expr.parseString(search_filter).asList()[0]
except pyparsing.ParseBaseException as exx:
# Just for debugging purposes
s = "{!s}".format(exx)
for item in s_filter:
if item[0] in self.operation:
candidates = self.operation.get(item[0])(search_base,
s_filter)
self.response = Connection._deDuplicate(candidates)
return True
def unbind(self):
return True
class Ldap3Mock(object):
def __init__(self):
self._calls = CallList()
self._server_mock = None
self.directory = []
self.exception = None
self.reset()
def reset(self):
self._calls.reset()
def setLDAPDirectory(self, directory=None):
if directory is None:
self.directory = []
else:
try:
with open(DIRECTORY, 'w+') as f:
f.write(str(directory))
self.directory = directory
except OSError as e:
raise
def set_exception(self, exc=True):
self.exception = exc
def _load_data(self, directory):
try:
with open(directory, 'r') as f:
data = f.read()
return literal_eval(data)
except OSError as e:
raise
@property
def calls(self):
return self._calls
def __enter__(self):
self.start()
def __exit__(self, *args):
self.stop()
self.reset()
def activate(self, func):
evaldict = {'ldap3mock': self, 'func': func}
return get_wrapped(func, _wrapper_template, evaldict)
def _on_Server(self, host, port, use_ssl, connect_timeout, get_info=None,
tls=None):
# mangle request packet
return "FakeServerObject"
def _on_Connection(self, server, user, password,
auto_bind=None, client_strategy=None,
authentication=None, check_names=None,
auto_referrals=None, receive_timeout=None):
"""
We need to create a Connection object with
methods:
add()
modify()
search()
unbind()
and object
response
"""
# Raise an exception, if we are told to do so
if self.exception:
raise Exception("LDAP request failed")
# check the password
correct_password = False
# Anonymous bind
# Reload the directory just in case a change has been made to
# user credentials
self.directory = self._load_data(DIRECTORY)
if authentication == ldap3.ANONYMOUS and user == "":
correct_password = True
for entry in self.directory:
if to_unicode(entry.get("dn")) == user:
pw = entry.get("attributes").get("userPassword")
# password can be unicode
if to_bytes(pw) == to_bytes(password):
correct_password = True
elif pw.startswith('{SSHA}'):
correct_password = ldap_salted_sha1.verify(password, pw)
else:
correct_password = False
self.con_obj = Connection(self.directory)
self.con_obj.bound = correct_password
return self.con_obj
def start(self):
import mock
def unbound_on_Server(host, port,
use_ssl,
connect_timeout, *a, **kwargs):
return self._on_Server(host, port,
use_ssl,
connect_timeout, *a, **kwargs)
self._server_mock = mock.MagicMock()
self._server_mock.side_effect = unbound_on_Server
self._patcher = mock.patch('ldap3.Server',
self._server_mock)
self._patcher.start()
def unbound_on_Connection(server, user,
password,
auto_bind,
client_strategy,
authentication,
check_names,
auto_referrals, *a, **kwargs):
return self._on_Connection(server, user,
password,
auto_bind,
client_strategy,
authentication,
check_names,
auto_referrals, *a,
**kwargs)
self._patcher2 = mock.patch('ldap3.Connection',
unbound_on_Connection)
self._patcher2.start()
def stop(self):
self._patcher.stop()
self._patcher2.stop()
self._server_mock = None
def get_server_mock(self):
return self._server_mock
# expose default mock namespace
mock = _default_mock = Ldap3Mock()
__all__ = []
for __attr in (a for a in dir(_default_mock) if not a.startswith('_')):
__all__.append(__attr)
globals()[__attr] = getattr(_default_mock, __attr)
| privacyidea/privacyidea | tests/ldap3mock.py | Python | agpl-3.0 | 28,972 | 0.002106 |
import os, random, struct, sys
from Crypto.Cipher import AES
import getpass
from optparse import OptionParser
import hashlib
parser = OptionParser()
parser.add_option("-p")
(options, args) = parser.parse_args()
if(len(sys.argv) < 2):
print "usage: python aes_cmdl.py input_file_name <output_file_name> -p <password>"
sys.exit()
in_file = sys.argv[1]
if(len(sys.argv) == 3):
out_file = sys.argv[2]
else:
no_out_file =True
out_filename = in_file + '3125-9680.enc'
cwd = os.getcwd()
if(options.p):
password = options.p
else:
#password = raw_input("please specify your password")
password = getpass.getpass("please specify your password")
key = hashlib.sha256(password).digest()
def encrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):
""" Encrypts a file using AES (CBC mode) with the
given key.
key:
The encryption key - a string that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the input file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16.
"""
if not out_filename:
#no_out = True
out_filename = in_filename + '3125-9680.enc'
iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
encrypt_file(key, in_file)
with open(cwd + "/" + out_filename,"r") as f:
#minlen = 12
for line in f:
sys.stdout.write(line)
if(no_out_file):
if sys.platform.startswith("linux"):
os.system("shred "+ cwd + "/" + out_filename)
os.remove(cwd + "/" + out_filename)
else:
os.remove(cwd + "/" + out_filename)
sys.exit(0)
| Bergurth/aes_cmdl.py | aes_cmdl.py | Python | gpl-3.0 | 2,518 | 0.004369 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ccx', '0026_auto_20170831_0420'),
('ccx', '0026_auto_20170831_0554'),
]
operations = [
]
| mbareta/edx-platform-ft | lms/djangoapps/ccx/migrations/0027_merge.py | Python | agpl-3.0 | 289 | 0 |
# File: htmllib-example-1.py
import htmllib
import formatter
import string
class Parser(htmllib.HTMLParser):
# return a dictionary mapping anchor texts to lists
# of associated hyperlinks
def __init__(self, verbose=0):
self.anchors = {}
f = formatter.NullFormatter()
htmllib.HTMLParser.__init__(self, f, verbose)
def anchor_bgn(self, href, name, type):
self.save_bgn()
self.anchor = p
def anchor_end(self):
text = string.strip(self.save_end())
if self.anchor and text:
self.anchors[text] = self.anchors.get(text, []) + [self.anchor]
file = open("contemplate_his_majestic_personhood.html")
html = file.read()
file.close()
p = Parser()
p.feed(html)
p.close()
for k, v in p.anchors.items():
print k, "=>", v
print
| gregpuzzles1/Sandbox | htmllib-example-1.py | Python | gpl-3.0 | 812 | 0.002463 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeoImage.name'
db.delete_column('lizard_damage_geoimage', 'name')
# Adding field 'DamageEvent.landuse_slugs'
db.add_column('lizard_damage_damageevent', 'landuse_slugs', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'DamageEvent.height_slugs'
db.add_column('lizard_damage_damageevent', 'height_slugs', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'GeoImage.name'
db.add_column('lizard_damage_geoimage', 'name', self.gf('django.db.models.fields.CharField')(default='name', max_length=80), keep_default=False)
# Deleting field 'DamageEvent.landuse_slugs'
db.delete_column('lizard_damage_damageevent', 'landuse_slugs')
# Deleting field 'DamageEvent.height_slugs'
db.delete_column('lizard_damage_damageevent', 'height_slugs')
models = {
'lizard_damage.ahnindex': {
'Meta': {'object_name': 'AhnIndex', 'db_table': "u'data_index'"},
'ar': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bladnr': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'cellsize': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'datum': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'lo_x': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'lo_y': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'max_datum': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'min_datum': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'update': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_damage.benefitscenario': {
'Meta': {'object_name': 'BenefitScenario'},
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'zip_result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zip_risk_a': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'zip_risk_b': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.benefitscenarioresult': {
'Meta': {'object_name': 'BenefitScenarioResult'},
'benefit_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.BenefitScenario']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageevent': {
'Meta': {'object_name': 'DamageEvent'},
'floodmonth': ('django.db.models.fields.IntegerField', [], {'default': '9'}),
'floodtime': ('django.db.models.fields.FloatField', [], {}),
'height_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'landuse_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repairtime_buildings': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repairtime_roads': ('django.db.models.fields.FloatField', [], {'default': '432000'}),
'repetition_time': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'table': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_damage.damageeventresult': {
'Meta': {'object_name': 'DamageEventResult'},
'damage_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.damageeventwaterlevel': {
'Meta': {'ordering': "(u'index',)", 'object_name': 'DamageEventWaterlevel'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'waterlevel': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'lizard_damage.damagescenario': {
'Meta': {'object_name': 'DamageScenario'},
'calc_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'damagetable': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scenario_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'lizard_damage.geoimage': {
'Meta': {'object_name': 'GeoImage'},
'east': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'north': ('django.db.models.fields.FloatField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'south': ('django.db.models.fields.FloatField', [], {}),
'west': ('django.db.models.fields.FloatField', [], {})
},
'lizard_damage.roads': {
'Meta': {'object_name': 'Roads', 'db_table': "u'data_roads'"},
'gid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'gridcode': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'typeinfr_1': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'typeweg': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
},
'lizard_damage.unit': {
'Meta': {'object_name': 'Unit'},
'factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_damage']
| lizardsystem/lizard-damage | lizard_damage/migrations/0004_auto__del_field_geoimage_name__add_field_damageevent_landuse_slugs__ad.py | Python | gpl-3.0 | 10,066 | 0.007451 |
# encoding: utf-8
# module PyKDE4.kdecore
# from /usr/lib/python3/dist-packages/PyKDE4/kdecore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtNetwork as __PyQt4_QtNetwork
from .KTimeZone import KTimeZone
class KTzfileTimeZone(KTimeZone):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdecore/KTzfileTimeZone.py | Python | gpl-2.0 | 414 | 0.009662 |
# coding=utf-8
"""Auto pull request pep8 plugin"""
import subprocess
from git import Repo
from . import MASTER_BRANCH
from .base import AutoPullRequestPluginInterface, section_order
from ..nodes import NumberedList, DescriptionNode, CodeNode, NodeList, HeaderNode
class Pep8Plugin(AutoPullRequestPluginInterface):
def _get_diff_against_master(self):
repo = Repo('.git')
return repo.git.diff(MASTER_BRANCH)
def _get_pep8_compliance(self, diff):
process = subprocess.Popen(['pep8', '--diff'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = process.communicate(diff)
if errors:
raise Exception(errors)
else:
return filter(None, output.strip().split('\n'))
@section_order(10)
def section_pep8_standards_compliance(self):
diff = self._get_diff_against_master()
pep8_compliance = self._get_pep8_compliance(diff)
if pep8_compliance:
value = NodeList([
HeaderNode('%d pep8 errors' % len(pep8_compliance), level=4),
NumberedList([CodeNode(item) for item in pep8_compliance])
])
else:
value = DescriptionNode('100% pep8 compliant!')
return value
| gxx/auto_pull_request | auto_pull_request/plugins/pep8_info.py | Python | gpl-2.0 | 1,315 | 0.001521 |
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
class GPRRFA:
"""Random Feature Approximation for Gaussian Process Regression
Estimation and prediction of Bayesian linear regression models
Basic usage::
R = GPRRFA()
hyp = R.estimate(hyp0, X, y)
ys,s2 = R.predict(hyp, X, y, Xs)
where the variables are
:param hyp: vector of hyperparmaters.
:param X: N x D data array
:param y: 1D Array of targets (length N)
:param Xs: Nte x D array of test cases
:param hyp0: starting estimates for hyperparameter optimisation
:returns: * ys - predictive mean
* s2 - predictive variance
The hyperparameters are::
hyp = [ log(sn), log(ell), log(sf) ] # hyp is a numpy array
where sn^2 is the noise variance, ell are lengthscale parameters and
sf^2 is the signal variance. This provides an approximation to the
covariance function::
k(x,z) = x'*z + sn2*exp(0.5*(x-z)'*Lambda*(x-z))
where Lambda = diag((ell_1^2, ... ell_D^2))
Written by A. Marquand
"""
def __init__(self, hyp=None, X=None, y=None, n_feat=None,
n_iter=100, tol=1e-3, verbose=False):
self.hyp = np.nan
self.nlZ = np.nan
self.tol = tol # not used at present
self.Nf = n_feat
self.n_iter = n_iter
self.verbose = verbose
self._n_restarts = 5
if (hyp is not None) and (X is not None) and (y is not None):
self.post(hyp, X, y)
def _numpy2torch(self, X, y=None, hyp=None):
if type(X) is torch.Tensor:
pass
elif type(X) is np.ndarray:
X = torch.from_numpy(X)
else:
raise(ValueError, 'Unknown data type (X)')
X = X.double()
if y is not None:
if type(y) is torch.Tensor:
pass
elif type(y) is np.ndarray:
y = torch.from_numpy(y)
else:
raise(ValueError, 'Unknown data type (y)')
if len(y.shape) == 1:
y.resize_(y.shape[0],1)
y = y.double()
if hyp is not None:
if type(hyp) is torch.Tensor:
pass
else:
hyp = torch.tensor(hyp, requires_grad=True)
return X, y, hyp
def get_n_params(self, X):
return X.shape[1] + 2
def post(self, hyp, X, y):
""" Generic function to compute posterior distribution.
This function will save the posterior mean and precision matrix as
self.m and self.A and will also update internal parameters (e.g.
N, D and the prior covariance (Sigma) and precision (iSigma).
"""
# make sure all variables are the right type
X, y, hyp = self._numpy2torch(X, y, hyp)
self.N, self.Dx = X.shape
# ensure the number of features is specified (use 75% as a default)
if self.Nf is None:
self.Nf = int(0.75 * self.N)
self.Omega = torch.zeros((self.Dx, self.Nf), dtype=torch.double)
for f in range(self.Nf):
self.Omega[:,f] = torch.exp(hyp[1:-1]) * \
torch.randn((self.Dx, 1), dtype=torch.double).squeeze()
XO = torch.mm(X, self.Omega)
self.Phi = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \
torch.cat((torch.cos(XO), torch.sin(XO)), 1)
# concatenate linear weights
self.Phi = torch.cat((self.Phi, X), 1)
self.D = self.Phi.shape[1]
if self.verbose:
print("estimating posterior ... | hyp=", hyp)
self.A = torch.mm(torch.t(self.Phi), self.Phi) / torch.exp(2*hyp[0]) + \
torch.eye(self.D, dtype=torch.double)
self.m = torch.mm(torch.solve(torch.t(self.Phi), self.A)[0], y) / \
torch.exp(2*hyp[0])
# save hyperparameters
self.hyp = hyp
# update optimizer iteration count
if hasattr(self,'_iterations'):
self._iterations += 1
def loglik(self, hyp, X, y):
""" Function to compute compute log (marginal) likelihood """
X, y, hyp = self._numpy2torch(X, y, hyp)
# always recompute the posterior
self.post(hyp, X, y)
#logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A))))
try:
# compute the log determinants in a numerically stable way
logdetA = 2*torch.sum(torch.log(torch.diag(torch.cholesky(self.A))))
except Exception as e:
print("Warning: Estimation of posterior distribution failed")
print(e)
#nlZ = torch.tensor(1/np.finfo(float).eps)
nlZ = torch.tensor(np.nan)
self._optim_failed = True
return nlZ
# compute negative marginal log likelihood
nlZ = -0.5 * (self.N*torch.log(1/torch.exp(2*hyp[0])) -
self.N*np.log(2*np.pi) -
torch.mm(torch.t(y - torch.mm(self.Phi,self.m)),
(y - torch.mm(self.Phi,self.m))) /
torch.exp(2*hyp[0]) -
torch.mm(torch.t(self.m), self.m) - logdetA)
if self.verbose:
print("nlZ= ", nlZ, " | hyp=", hyp)
# save marginal likelihood
self.nlZ = nlZ
return nlZ
def dloglik(self, hyp, X, y):
""" Function to compute derivatives """
print("derivatives not available")
return
def estimate(self, hyp0, X, y, optimizer='lbfgs'):
""" Function to estimate the model """
if type(hyp0) is torch.Tensor:
hyp = hyp0
hyp0.requires_grad_()
else:
hyp = torch.tensor(hyp0, requires_grad=True)
# save the starting values
self.hyp0 = hyp
if optimizer.lower() == 'lbfgs':
opt = torch.optim.LBFGS([hyp])
else:
raise(ValueError, "Optimizer " + " not implemented")
self._iterations = 0
def closure():
opt.zero_grad()
nlZ = self.loglik(hyp, X, y)
if not torch.isnan(nlZ):
nlZ.backward()
return nlZ
for r in range(self._n_restarts):
self._optim_failed = False
nlZ = opt.step(closure)
if self._optim_failed:
print("optimization failed. retrying (", r+1, "of",
self._n_restarts,")")
hyp = torch.randn_like(hyp, requires_grad=True)
self.hyp0 = hyp
else:
print("Optimzation complete after", self._iterations,
"evaluations. Function value =",
nlZ.detach().numpy().squeeze())
break
return self.hyp.detach().numpy()
def predict(self, hyp, X, y, Xs):
""" Function to make predictions from the model """
X, y, hyp = self._numpy2torch(X, y, hyp)
Xs, *_ = self._numpy2torch(Xs)
if (hyp != self.hyp).all() or not(hasattr(self, 'A')):
self.post(hyp, X, y)
# generate prediction tensors
XsO = torch.mm(Xs, self.Omega)
Phis = torch.exp(hyp[-1])/np.sqrt(self.Nf) * \
torch.cat((torch.cos(XsO), torch.sin(XsO)), 1)
# add linear component
Phis = torch.cat((Phis, Xs), 1)
ys = torch.mm(Phis, self.m)
# compute diag(Phis*(Phis'\A)) avoiding computing off-diagonal entries
s2 = torch.exp(2*hyp[0]) + \
torch.sum(Phis * torch.t(torch.solve(torch.t(Phis), self.A)[0]), 1)
# return output as numpy arrays
return ys.detach().numpy().squeeze(), s2.detach().numpy().squeeze()
| amarquand/nispat | pcntoolkit/model/rfa.py | Python | gpl-3.0 | 7,985 | 0.007013 |
# -*- coding: utf-8 -*-
from django.http import HttpResponse, HttpRequest, QueryDict, HttpResponseRedirect
import json
import conekta
from store.models import *
from store.forms import *
### PETICIONES API PARA EL CARRITO
def delBasket(request):
id = str(request.GET.get('id'))
if request.GET.get('id'):
liston = request.session['basket']
if id in liston:
liston.remove(id)
request.session['basket'] = liston
msg = 'Success'
status = 'ok'
else:
msg = 'Error: Product not found in basket'
status = 'failed'
else:
msg = "Success"
status = 'ok'
try:
del request.session['basket']
except KeyError:
msg = 'Error: Cant delete basket'
status = 'failed'
"""
response_data = {}
response_data['result'] = status
response_data['message'] = msg
callback = request.GET.get('callback', '')
response = json.dumps(response_data)
response = callback + '(' + response + ');'
return HttpResponse(response,content_type="application/json")
"""
return HttpResponseRedirect("/store/checkout/")
def setBasket(request):
id = str(request.GET.get('id'))
if id.isdigit():
if request.session.get('basket',False):
# Se ha definido anteriormente
liston = request.session['basket']
if id in liston:
msg = 'Error: product already exists'
status = 'failed'
else:
liston.append(id)
request.session['basket'] = liston
msg = 'Success'
status = 'ok'
else:
# No se ha definido
msg = 'Success'
status = 'ok'
request.session['basket'] = [id]
else:
msg = 'Error en la peticion'
status = 'failed'
response_data = {}
response_data['result'] = status
response_data['message'] = msg
callback = request.GET.get('callback', '')
response = json.dumps(response_data)
response = callback + '(' + response + ');'
return HttpResponse(response,content_type="application/json")
import pprint
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def conektaio(request):
try:
data = json.loads(request.body)
except:
data = False
if data:
try:
pedido = Pedido.objects.get(custom=data['data']['object']['reference_id'])
except:
pedido = False
if pedido:
dato = { "status": "success" ,"id": pedido.id, "nombre":pedido.payment }
if data['data']['object']['status'] == "paid":
pedido.paid=True
pedido.save()
numero = 200
else:
debug = Debug.objects.create(texto=data)
debug.save()
dato = { "status":"ergo" }
numero = 200
else:
dato = { "status":"error" }
numero = 400
return HttpResponse(dato['status'],content_type="application/json",status=numero)
#### END API | zeickan/Django-Store | store/api.py | Python | apache-2.0 | 3,203 | 0.029044 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import itertools
import json
import time
from collections import defaultdict
from eventlet import Timeout
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
get_reconciler_container_name, get_row_to_q_entry_translator)
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DeviceUnavailable
from swift.common.http import is_success
from swift.common.db import DatabaseAlreadyExists
from swift.common.utils import (Timestamp, hash_path,
storage_directory, majority_size)
class ContainerReplicator(db_replicator.Replicator):
server_type = 'container'
brokerclass = ContainerBroker
datadir = DATADIR
default_port = 6201
def report_up_to_date(self, full_info):
reported_key_map = {
'reported_put_timestamp': 'put_timestamp',
'reported_delete_timestamp': 'delete_timestamp',
'reported_bytes_used': 'bytes_used',
'reported_object_count': 'count',
}
for reported, value_key in reported_key_map.items():
if full_info[reported] != full_info[value_key]:
return False
return True
def _gather_sync_args(self, replication_info):
parent = super(ContainerReplicator, self)
sync_args = parent._gather_sync_args(replication_info)
if len(POLICIES) > 1:
sync_args += tuple(replication_info[k] for k in
('status_changed_at', 'count',
'storage_policy_index'))
return sync_args
def _handle_sync_response(self, node, response, info, broker, http,
different_region):
parent = super(ContainerReplicator, self)
if is_success(response.status):
remote_info = json.loads(response.data)
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time())
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at.internal)
sync_timestamps = ('created_at', 'put_timestamp',
'delete_timestamp')
if any(info[key] != remote_info[key] for key in sync_timestamps):
broker.merge_timestamps(*(remote_info[key] for key in
sync_timestamps))
rv = parent._handle_sync_response(
node, response, info, broker, http, different_region)
return rv
def find_local_handoff_for_part(self, part):
"""
Look through devices in the ring for the first handoff device that was
identified during job creation as available on this node.
:returns: a node entry from the ring
"""
nodes = self.ring.get_part_nodes(part)
more_nodes = self.ring.get_more_nodes(part)
for node in itertools.chain(nodes, more_nodes):
if node['id'] in self._local_device_ids:
return node
return None
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable to Handoff reconciler '
'container %s in partition %s' % (container, part))
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, node['device'], db_dir, hsh + '.db')
broker = ContainerBroker(db_path, account=account, container=container)
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, 0)
except DatabaseAlreadyExists:
pass
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
def feed_reconciler(self, container, item_list):
"""
Add queue entries for rows in item_list to the local reconciler
container database.
:param container: the name of the reconciler container
:param item_list: the list of rows to enqueue
:returns: True if successfully enqueued
"""
try:
reconciler = self.get_reconciler_broker(container)
except DeviceUnavailable as e:
self.logger.warning('DeviceUnavailable: %s', e)
return False
self.logger.debug('Adding %d objects to the reconciler at %s',
len(item_list), reconciler.db_file)
try:
reconciler.merge_items(item_list)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: trying to merge '
'%d items to reconciler container %s',
len(item_list), reconciler.db_file)
return False
return True
def dump_to_reconciler(self, broker, point):
"""
Look for object rows for objects updates in the wrong storage policy
in broker with a ``ROWID`` greater than the rowid given as point.
:param broker: the container broker with misplaced objects
:param point: the last verified ``reconciler_sync_point``
:returns: the last successful enqueued rowid
"""
max_sync = broker.get_max_row()
misplaced = broker.get_misplaced_since(point, self.per_diff)
if not misplaced:
return max_sync
translator = get_row_to_q_entry_translator(broker)
errors = False
low_sync = point
while misplaced:
batches = defaultdict(list)
for item in misplaced:
container = get_reconciler_container_name(item['created_at'])
batches[container].append(translator(item))
for container, item_list in batches.items():
success = self.feed_reconciler(container, item_list)
if not success:
errors = True
point = misplaced[-1]['ROWID']
if not errors:
low_sync = point
misplaced = broker.get_misplaced_since(point, self.per_diff)
return low_sync
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s' %
broker.db_file)
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
broker.update_reconciler_sync(info['max_row'])
return
max_sync = self.dump_to_reconciler(broker, point)
success = responses.count(True) >= majority_size(len(responses))
if max_sync > point and success:
# to be safe, only slide up the sync point with a majority on
# replication
broker.update_reconciler_sync(max_sync)
def delete_db(self, broker):
"""
Ensure that reconciler databases are only cleaned up at the end of the
replication run.
"""
if (self.reconciler_cleanups is not None and
broker.account == MISPLACED_OBJECTS_ACCOUNT):
# this container shouldn't be here, make sure it's cleaned up
self.reconciler_cleanups[broker.container] = broker
return
try:
# DB is going to get deleted. Be preemptive about it
self.sync_store.remove_synced_container(broker)
except Exception:
self.logger.exception('Failed to remove sync_store entry %s' %
broker.db_file)
return super(ContainerReplicator, self).delete_db(broker)
def replicate_reconcilers(self):
"""
Ensure any items merged to reconciler containers during replication
are pushed out to correct nodes and any reconciler containers that do
not belong on this node are removed.
"""
self.logger.info('Replicating %d reconciler containers',
len(self.reconciler_containers))
for part, reconciler, node_id in self.reconciler_containers.values():
self.cpool.spawn_n(
self._replicate_object, part, reconciler.db_file, node_id)
self.cpool.waitall()
# wipe out the cache do disable bypass in delete_db
cleanups = self.reconciler_cleanups
self.reconciler_cleanups = self.reconciler_containers = None
self.logger.info('Cleaning up %d reconciler containers',
len(cleanups))
for reconciler in cleanups.values():
self.cpool.spawn_n(self.delete_db, reconciler)
self.cpool.waitall()
self.logger.info('Finished reconciler replication')
def run_once(self, *args, **kwargs):
self.reconciler_containers = {}
self.reconciler_cleanups = {}
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
if any([self.reconciler_containers, self.reconciler_cleanups]):
self.replicate_reconcilers()
return rv
class ContainerReplicatorRpc(db_replicator.ReplicatorRpc):
def _parse_sync_args(self, args):
parent = super(ContainerReplicatorRpc, self)
remote_info = parent._parse_sync_args(args)
if len(args) > 9:
remote_info['status_changed_at'] = args[7]
remote_info['count'] = args[8]
remote_info['storage_policy_index'] = args[9]
return remote_info
def _get_synced_replication_info(self, broker, remote_info):
"""
Sync the remote_info storage_policy_index if needed and return the
newly synced replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
info = broker.get_replication_info()
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time()).internal
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at)
info = broker.get_replication_info()
return info
| larsbutler/swift | swift/container/replicator.py | Python | apache-2.0 | 12,083 | 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils.user import get_enabled_system_users
from frappe.utils import cint
@frappe.whitelist()
def get_list(arg=None):
"""get list of messages"""
frappe.form_dict['limit_start'] = int(frappe.form_dict['limit_start'])
frappe.form_dict['limit_page_length'] = int(frappe.form_dict['limit_page_length'])
frappe.form_dict['user'] = frappe.session['user']
# set all messages as read
frappe.db.begin()
frappe.db.sql("""UPDATE `tabCommunication` set seen = 1
where
communication_type in ('Chat', 'Notification')
and reference_doctype = 'User'
and reference_name = %s""", frappe.session.user)
delete_notification_count_for("Messages")
frappe.local.flags.commit = True
if frappe.form_dict['contact'] == frappe.session['user']:
# return messages
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and (owner=%(contact)s
or reference_name=%(user)s
or owner=reference_name)
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and ((owner=%(contact)s and reference_name=%(user)s)
or (owner=%(contact)s and reference_name=%(contact)s))
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
# make sure current user is at the top, using has_session = 100
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
# in case of administrator
data.append({"name": frappe.session.user, "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
"""post message"""
d = frappe.new_doc('Communication')
d.communication_type = 'Notification' if parenttype else 'Chat'
d.subject = subject
d.content = txt
d.reference_doctype = 'User'
d.reference_name = contact
d.sender = frappe.session.user
d.insert(ignore_permissions=True)
delete_notification_count_for("Messages")
if notify and cint(notify):
if contact==frappe.session.user:
_notify([user.name for user in get_enabled_system_users()], txt)
else:
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Communication", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or "New Message from " + get_fullname(frappe.session.user),
message=frappe.get_template("templates/emails/new_message.html").render({
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
}),
bulk=True)
except frappe.OutgoingEmailError:
pass
| vCentre/vFRP-6233 | frappe/desk/page/messages/messages.py | Python | mit | 3,886 | 0.024447 |
import re
import sublime
import sublime_plugin
from ..show_error import show_error
from ..settings import pc_settings_filename
class AddChannelCommand(sublime_plugin.WindowCommand):
"""
A command to add a new channel (list of repositories) to the user's machine
"""
def run(self):
self.window.show_input_panel('Channel JSON URL', '',
self.on_done, self.on_change, self.on_cancel)
def on_done(self, input):
"""
Input panel handler - adds the provided URL as a channel
:param input:
A string of the URL to the new channel
"""
input = input.strip()
if re.match('https?://', input, re.I) == None:
show_error(u"Unable to add the channel \"%s\" since it does not appear to be served via HTTP (http:// or https://)." % input)
return
settings = sublime.load_settings(pc_settings_filename())
channels = settings.get('channels', [])
if not channels:
channels = []
channels.append(input)
settings.set('channels', channels)
sublime.save_settings(pc_settings_filename())
sublime.status_message(('Channel %s successfully ' +
'added') % input)
def on_change(self, input):
pass
def on_cancel(self):
pass
| koery/win-sublime | Data/Packages/Package Control/package_control/commands/add_channel_command.py | Python | mit | 1,328 | 0.003012 |
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abseil Python logging module implemented on top of standard logging.
Simple usage:
from absl import logging
logging.info('Interesting Stuff')
logging.info('Interesting Stuff with Arguments: %d', 42)
logging.set_verbosity(logging.INFO)
logging.log(logging.DEBUG, 'This will *not* be printed')
logging.set_verbosity(logging.DEBUG)
logging.log(logging.DEBUG, 'This will be printed')
logging.warning('Worrying Stuff')
logging.error('Alarming Stuff')
logging.fatal('AAAAHHHHH!!!!') # Process exits.
Usage note: Do not pre-format the strings in your program code.
Instead, let the logging module perform argument interpolation.
This saves cycles because strings that don't need to be printed
are never formatted. Note that this module does not attempt to
interpolate arguments when no arguments are given. In other words
logging.info('Interesting Stuff: %s')
does not raise an exception because logging.info() has only one
argument, the message string.
"Lazy" evaluation for debugging:
If you do something like this:
logging.debug('Thing: %s', thing.ExpensiveOp())
then the ExpensiveOp will be evaluated even if nothing
is printed to the log. To avoid this, use the level_debug() function:
if logging.level_debug():
logging.debug('Thing: %s', thing.ExpensiveOp())
Notes on Unicode:
The log output is encoded as UTF-8. Don't pass data in other encodings in
bytes() instances -- instead pass unicode string instances when you need to
(for both the format string and arguments).
Note on critical and fatal:
Standard logging module defines fatal as an alias to critical, but it's not
documented, and it does NOT actually terminate the program.
This module only defines fatal but not critical, and it DOES terminate the
program.
The differences in behavior are historical and unfortunate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import getpass
import io
import itertools
import logging
import os
import socket
import struct
import sys
import time
import traceback
import warnings
from absl import flags
from absl.logging import converter
import six
if six.PY2:
import thread as _thread_lib # For .get_ident().
else:
import threading as _thread_lib # For .get_ident().
FLAGS = flags.FLAGS
# Logging levels.
FATAL = converter.ABSL_FATAL
ERROR = converter.ABSL_ERROR
WARNING = converter.ABSL_WARNING
WARN = converter.ABSL_WARNING # Deprecated name.
INFO = converter.ABSL_INFO
DEBUG = converter.ABSL_DEBUG
# Regex to match/parse log line prefixes.
ABSL_LOGGING_PREFIX_REGEX = (
r'^(?P<severity>[IWEF])'
r'(?P<month>\d\d)(?P<day>\d\d) '
r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
r'\.(?P<microsecond>\d\d\d\d\d\d) +'
r'(?P<thread_id>-?\d+) '
r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
# Mask to convert integer thread ids to unsigned quantities for logging purposes
_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
# Extra property set on the LogRecord created by ABSLLogger when its level is
# CRITICAL/FATAL.
_ABSL_LOG_FATAL = '_absl_log_fatal'
# Extra prefix added to the log message when a non-absl logger logs a
# CRITICAL/FATAL message.
_CRITICAL_PREFIX = 'CRITICAL - '
# Used by findCaller to skip callers from */logging/__init__.py.
_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
# The ABSL logger instance, initialized in _initialize().
_absl_logger = None
# The ABSL handler instance, initialized in _initialize().
_absl_handler = None
_CPP_NAME_TO_LEVELS = {
'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
'info': '0',
'warning': '1',
'warn': '1',
'error': '2',
'fatal': '3'
}
_CPP_LEVEL_TO_NAMES = {
'0': 'info',
'1': 'warning',
'2': 'error',
'3': 'fatal',
}
class _VerbosityFlag(flags.Flag):
"""Flag class for -v/--verbosity."""
def __init__(self, *args, **kwargs):
super(_VerbosityFlag, self).__init__(
flags.IntegerParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
self._update_logging_levels()
def _update_logging_levels(self):
"""Updates absl logging levels to the current verbosity."""
if not _absl_logger:
return
if self._value <= converter.ABSL_DEBUG:
standard_verbosity = converter.absl_to_standard(self._value)
else:
# --verbosity is set to higher than 1 for vlog.
standard_verbosity = logging.DEBUG - (self._value - 1)
# Also update root level when absl_handler is used.
if _absl_handler in logging.root.handlers:
logging.root.setLevel(standard_verbosity)
class _StderrthresholdFlag(flags.Flag):
"""Flag class for --stderrthreshold."""
def __init__(self, *args, **kwargs):
super(_StderrthresholdFlag, self).__init__(
flags.ArgumentParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
if v in _CPP_LEVEL_TO_NAMES:
# --stderrthreshold also accepts numberic strings whose values are
# Abseil C++ log levels.
cpp_value = int(v)
v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
elif v.lower() in _CPP_NAME_TO_LEVELS:
v = v.lower()
if v == 'warn':
v = 'warning' # Use 'warning' as the canonical name.
cpp_value = int(_CPP_NAME_TO_LEVELS[v])
else:
raise ValueError(
'--stderrthreshold must be one of (case-insensitive) '
"'debug', 'info', 'warning', 'error', 'fatal', "
"or '0', '1', '2', '3', not '%s'" % v)
self._value = v
flags.DEFINE_boolean('logtostderr',
False,
'Should only log to stderr?', allow_override_cpp=True)
flags.DEFINE_boolean('alsologtostderr',
False,
'also log to stderr?', allow_override_cpp=True)
flags.DEFINE_string('log_dir',
os.getenv('TEST_TMPDIR', ''),
'directory to write logfiles into',
allow_override_cpp=True)
flags.DEFINE_flag(_VerbosityFlag(
'verbosity', -1,
'Logging verbosity level. Messages logged at this level or lower will '
'be included. Set to 1 for debug logging. If the flag was not set or '
'supplied, the value will be changed from the default of -1 (warning) to '
'0 (info) after flags are parsed.',
short_name='v', allow_hide_cpp=True))
flags.DEFINE_flag(_StderrthresholdFlag(
'stderrthreshold', 'fatal',
'log messages at this level, or more severe, to stderr in '
'addition to the logfile. Possible values are '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'Obsoletes --alsologtostderr. Using --alsologtostderr '
'cancels the effect of this flag. Please also note that '
'this flag is subject to --verbosity and requires logfile '
'not be stderr.', allow_hide_cpp=True))
flags.DEFINE_boolean('showprefixforinfo', True,
'If False, do not prepend prefix to info messages '
'when it\'s logged to stderr, '
'--verbosity is set to INFO level, '
'and python logging is used.')
def get_verbosity():
"""Returns the logging verbosity."""
return FLAGS['verbosity'].value
def set_verbosity(v):
"""Sets the logging verbosity.
Causes all messages of level <= v to be logged,
and all messages of level > v to be silently discarded.
Args:
v: int|str, the verbosity level as an integer or string. Legal string values
are those that can be coerced to an integer as well as case-insensitive
'debug', 'info', 'warning', 'error', and 'fatal'.
"""
try:
new_level = int(v)
except ValueError:
new_level = converter.ABSL_NAMES[v.upper()]
FLAGS.verbosity = new_level
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s)))
def fatal(msg, *args, **kwargs):
"""Logs a fatal message."""
log(FATAL, msg, *args, **kwargs)
def error(msg, *args, **kwargs):
"""Logs an error message."""
log(ERROR, msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""Logs a warning message."""
log(WARNING, msg, *args, **kwargs)
if six.PY2:
warn = warning # Deprecated function.
else:
def warn(msg, *args, **kwargs):
"""Deprecated, use 'warning' instead."""
warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
DeprecationWarning, 2)
log(WARNING, msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""Logs an info message."""
log(INFO, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""Logs a debug message."""
log(DEBUG, msg, *args, **kwargs)
def exception(msg, *args):
"""Logs an exception, with traceback and message."""
error(msg, *args, exc_info=True)
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
def _get_next_log_count_per_token(token):
"""Wrapper for _log_counter_per_token. Thread-safe.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0).
"""
# Can't use a defaultdict because defaultdict isn't atomic, whereas
# setdefault is.
return next(_log_counter_per_token.setdefault(token, itertools.count()))
def log_every_n(level, msg, n, *args):
"""Logs 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the number of times this should be called before it is logged.
*args: The args to be substitued into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, not (count % n), *args)
def log_first_n(level, msg, n, *args):
"""Logs 'msg % args' at level 'level' only first 'n' times.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the maximal number of times the message is logged.
*args: The args to be substitued into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, count < n, *args)
def log_if(level, msg, condition, *args):
"""Logs 'msg % args' at level 'level' only if condition is fulfilled."""
if condition:
log(level, msg, *args)
def log(level, msg, *args, **kwargs):
"""Logs 'msg % args' at absl logging level 'level'.
If no args are given just print msg, ignoring any interpolation specifiers.
Args:
level: int, the absl logging level at which to log the message
(logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
level constants are also supported, callers should prefer explicit
logging.vlog() calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substitued into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
_absl_logger.log(standard_level, msg, *args, **kwargs)
def vlog(level, msg, *args, **kwargs):
"""Log 'msg % args' at C++ vlog level 'level'.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer logging.log|debug|info|... calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substitued into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
log(level, msg, *args, **kwargs)
def flush():
"""Flushes all log files."""
get_absl_handler().flush()
def level_debug():
"""Returns True if debug logging is turned on."""
return get_verbosity() >= DEBUG
def level_info():
"""Returns True if info logging is turned on."""
return get_verbosity() >= INFO
def level_warning():
"""Returns True if warning logging is turned on."""
return get_verbosity() >= WARNING
level_warn = level_warning # Deprecated function.
def level_error():
"""Returns True if error logging is turned on."""
return get_verbosity() >= ERROR
def get_log_file_name(level=INFO):
"""Returns the name of the log file.
For Python logging, only one file is used and level is ignored. And it returns
empty string if it logs to stderr/stdout or the log stream has no `name`
attribute.
Args:
level: int, the absl.logging level.
Raises:
ValueError: Raised when `level` has an invalid value.
"""
if level not in converter.ABSL_LEVELS:
raise ValueError('Invalid absl.logging level {}'.format(level))
stream = get_absl_handler().python_handler.stream
if (stream == sys.stderr or stream == sys.stdout or
not hasattr(stream, 'name')):
return ''
else:
return stream.name
def find_log_dir_and_names(program_name=None, log_dir=None):
"""Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
'usr/bin/foobar.py' this method should probably be called with
program_name='foobar' However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with py_ if it is the program_name
argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix)
"""
if not program_name:
# Strip the extension (foobar.par becomes foobar, and
# fubar.py becomes fubar). We do this so that the log
# file names are similar to C++ log file names.
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# Prepend py_ to files so that python code gets a unique file, and
# so that C++ libraries do not try to write to the same log files as us.
program_name = 'py_%s' % program_name
actual_log_dir = find_log_dir(log_dir=log_dir)
username = getpass.getuser()
hostname = socket.gethostname()
file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
return actual_log_dir, file_prefix, program_name
def find_log_dir(log_dir=None):
"""Returns the most suitable directory to put log files into.
Args:
log_dir: str|None, if specified, the logfile(s) will be created in that
directory. Otherwise if the --log_dir command-line flag is provided,
the logfile will be created in that directory. Otherwise the logfile
will be created in a standard location.
"""
# Get a list of possible log dirs (will try to use them in order).
if log_dir:
# log_dir was explicitly specified as an arg, so use it and it alone.
dirs = [log_dir]
elif FLAGS['log_dir'].value:
# log_dir flag was provided, so use it and it alone (this mimics the
# behavior of the same flag in logging.cc).
dirs = [FLAGS['log_dir'].value]
else:
dirs = ['/tmp/', './']
# Find the first usable log dir.
for d in dirs:
if os.path.isdir(d) and os.access(d, os.W_OK):
return d
_absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs)
def get_absl_log_prefix(record):
"""Returns the absl log prefix for the log record.
Args:
record: logging.LogRecord, the record to get prefix for.
"""
created_tuple = time.localtime(record.created)
created_microsecond = int(record.created % 1.0 * 1e6)
critical_prefix = ''
level = record.levelno
if _is_non_absl_fatal_record(record):
# When the level is FATAL, but not logged from absl, lower the level so
# it's treated as ERROR.
level = logging.ERROR
critical_prefix = _CRITICAL_PREFIX
severity = converter.get_initial_for_level(level)
return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
severity,
created_tuple.tm_mon,
created_tuple.tm_mday,
created_tuple.tm_hour,
created_tuple.tm_min,
created_tuple.tm_sec,
created_microsecond,
_get_thread_id(),
record.filename,
record.lineno,
critical_prefix)
def skip_log_prefix(func):
"""Skips reporting the prefix of a given function or name by ABSLLogger.
This is a convenience wrapper function / decorator for
`ABSLLogger.register_frame_to_skip`.
If a callable function is provided, only that function will be skipped.
If a function name is provided, all functions with the same name in the
file that this is called in will be skipped.
This can be used as a decorator of the intended function to be skipped.
Args:
func: Callable function or its name as a string.
Returns:
func (the input, unchanged).
Raises:
ValueError: The input is callable but does not have a function code object.
TypeError: The input is neither callable nor a string.
"""
if callable(func):
func_code = getattr(func, '__code__', None)
if func_code is None:
raise ValueError('Input callable does not have a function code object.')
file_name = func_code.co_filename
func_name = func_code.co_name
func_lineno = func_code.co_firstlineno
elif isinstance(func, six.string_types):
file_name = get_absl_logger().findCaller()[0]
func_name = func
func_lineno = None
else:
raise TypeError('Input is neither callable nor a string.')
ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
return func
def _is_non_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
def _is_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
log_record.__dict__.get(_ABSL_LOG_FATAL, False))
# Indicates if we still need to warn about pre-init logs going to stderr.
_warn_preinit_stderr = True
class PythonHandler(logging.StreamHandler):
"""The handler class used by Abseil Python logging implementation."""
def __init__(self, stream=None, formatter=None):
super(PythonHandler, self).__init__(stream)
self.setFormatter(formatter or PythonFormatter())
def start_logging_to_file(self, program_name=None, log_dir=None):
"""Starts logging messages to files instead of standard error."""
FLAGS.logtostderr = False
actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
program_name=program_name, log_dir=log_dir)
basename = '%s.INFO.%s.%d' % (
file_prefix,
time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
os.getpid())
filename = os.path.join(actual_log_dir, basename)
if six.PY2:
self.stream = open(filename, 'a')
else:
self.stream = open(filename, 'a', encoding='utf-8')
# os.symlink is not available on Windows Python 2.
if getattr(os, 'symlink', None):
# Create a symlink to the log file with a canonical name.
symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
try:
if os.path.islink(symlink):
os.unlink(symlink)
os.symlink(os.path.basename(filename), symlink)
except EnvironmentError:
# If it fails, we're sad but it's no error. Commonly, this
# fails because the symlink was created by another user and so
# we can't modify it
pass
def use_absl_log_file(self, program_name=None, log_dir=None):
"""Conditionally logs to files, based on --logtostderr."""
if FLAGS['logtostderr'].value:
self.stream = sys.stderr
else:
self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
def flush(self):
"""Flushes all log files."""
self.acquire()
try:
self.stream.flush()
except (EnvironmentError, ValueError):
# A ValueError is thrown if we try to flush a closed file.
pass
finally:
self.release()
def _log_to_stderr(self, record):
"""Emits the record to stderr.
This temporarily sets the handler stream to stderr, calls
StreamHandler.emit, then reverts the stream back.
Args:
record: logging.LogRecord, the record to log.
"""
# emit() is protected by a lock in logging.Handler, so we don't need to
# protect here again.
old_stream = self.stream
self.stream = sys.stderr
try:
super(PythonHandler, self).emit(record)
finally:
self.stream = old_stream
def emit(self, record):
"""Prints a record out to some streams.
If FLAGS.logtostderr is set, it will print to sys.stderr ONLY.
If FLAGS.alsologtostderr is set, it will print to sys.stderr.
If FLAGS.logtostderr is not set, it will log to the stream
associated with the current thread.
Args:
record: logging.LogRecord, the record to emit.
"""
# People occasionally call logging functions at import time before
# our flags may have even been defined yet, let alone even parsed, as we
# rely on the C++ side to define some flags for us and app init to
# deal with parsing. Match the C++ library behavior of notify and emit
# such messages to stderr. It encourages people to clean-up and does
# not hide the message.
level = record.levelno
if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
# Die when the record is created from ABSLLogger and level is FATAL.
if _is_absl_fatal_record(record):
self.flush() # Flush the log before dying.
# In threaded python, sys.exit() from a non-main thread only
# exits the thread in question.
os.abort()
def close(self):
"""Closes the stream to which we are writing."""
self.acquire()
try:
self.flush()
try:
# Do not close the stream if it's sys.stderr|stdout. They may be
# redirected or overridden to files, which should be managed by users
# explicitly.
if self.stream not in (sys.stderr, sys.stdout) and (
not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
self.stream.close()
except ValueError:
# A ValueError is thrown if we try to run isatty() on a closed file.
pass
super(PythonHandler, self).close()
finally:
self.release()
class ABSLHandler(logging.Handler):
"""Abseil Python logging module's log handler."""
def __init__(self, python_logging_formatter):
super(ABSLHandler, self).__init__()
self._python_handler = PythonHandler(formatter=python_logging_formatter)
self.activate_python_handler()
def format(self, record):
return self._current_handler.format(record)
def setFormatter(self, fmt):
self._current_handler.setFormatter(fmt)
def emit(self, record):
self._current_handler.emit(record)
def flush(self):
self._current_handler.flush()
def close(self):
super(ABSLHandler, self).close()
self._current_handler.close()
def handle(self, record):
rv = self.filter(record)
if rv:
return self._current_handler.handle(record)
return rv
@property
def python_handler(self):
return self._python_handler
def activate_python_handler(self):
"""Uses the Python logging handler as the current logging handler."""
self._current_handler = self._python_handler
def use_absl_log_file(self, program_name=None, log_dir=None):
self._current_handler.use_absl_log_file(program_name, log_dir)
def start_logging_to_file(self, program_name=None, log_dir=None):
self._current_handler.start_logging_to_file(program_name, log_dir)
class PythonFormatter(logging.Formatter):
"""Formatter class used by PythonHandler."""
def format(self, record):
"""Appends the message from the record to the results of the prefix.
Args:
record: logging.LogRecord, the record to be formatted.
Returns:
The formatted string representing the record.
"""
if (not FLAGS['showprefixforinfo'].value and
FLAGS['verbosity'].value == converter.ABSL_INFO and
record.levelno == logging.INFO and
_absl_handler.python_handler.stream == sys.stderr):
prefix = ''
else:
prefix = get_absl_log_prefix(record)
return prefix + super(PythonFormatter, self).format(record)
class ABSLLogger(logging.getLoggerClass()):
"""A logger that will create LogRecords while skipping some stack frames.
This class maintains an internal list of filenames and method names
for use when determining who called the currently execuing stack
frame. Any method names from specific source files are skipped when
walking backwards through the stack.
Client code should use the register_frame_to_skip method to let the
ABSLLogger know which method from which file should be
excluded from the walk backwards through the stack.
"""
_frames_to_skip = set()
def findCaller(self, stack_info=False):
"""Finds the frame of the calling method on the stack.
This method skips any frames registered with the
ABSLLogger and any methods from this file, and whatever
method is currently being used to generate the prefix for the log
line. Then it returns the file name, line number, and method name
of the calling method.
Args:
stack_info: bool, when using Python 3 and True, include the stack trace as
the fourth item returned instead of None.
Returns:
(filename, lineno, methodname[, sinfo]) of the calling method.
"""
f_to_skip = ABSLLogger._frames_to_skip
# Use sys._getframe(2) instead of logging.currentframe(), it's slightly
# faster because there is one less frame to traverse.
frame = sys._getframe(2) # pylint: disable=protected-access
while frame:
code = frame.f_code
if (_LOGGING_FILE_PREFIX not in code.co_filename and
(code.co_filename, code.co_name,
code.co_firstlineno) not in f_to_skip and
(code.co_filename, code.co_name) not in f_to_skip):
if six.PY2:
return (code.co_filename, frame.f_lineno, code.co_name)
else:
sinfo = None
if stack_info:
out = io.StringIO()
out.write('Stack (most recent call last):\n')
traceback.print_stack(frame, file=out)
sinfo = out.getvalue().rstrip('\n')
out.close()
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
frame = frame.f_back
def critical(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'CRITICAL'."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def fatal(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'FATAL'."""
self.log(logging.FATAL, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'ERROR'."""
self.log(logging.ERROR, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'WARN'."""
if six.PY3:
warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
DeprecationWarning, 2)
self.log(logging.WARN, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'WARNING'."""
self.log(logging.WARNING, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'INFO'."""
self.log(logging.INFO, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'DEBUG'."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""Logs a message at a cetain level substituting in the supplied arguments.
This method behaves differently in python and c++ modes.
Args:
level: int, the standard logging level at which to log the message.
msg: str, the text of the message to log.
*args: The arguments to substitute in the message.
**kwargs: The keyword arguments to substitute in the message.
"""
if level >= logging.FATAL:
# Add property to the LogRecord created by this logger.
# This will be used by the ABSLHandler to determine whether it should
# treat CRITICAL/FATAL logs as really FATAL.
extra = kwargs.setdefault('extra', {})
extra[_ABSL_LOG_FATAL] = True
super(ABSLLogger, self).log(level, msg, *args, **kwargs)
def handle(self, record):
"""Calls handlers without checking Logger.disabled.
Non-root loggers are set to disabled after setup with logging.config if
it's not explicitly specified. Historically, absl logging will not be
disabled by that. To maintaining this behavior, this function skips
checking the Logger.disabled bit.
This logger can still be disabled by adding a filter that filters out
everything.
Args:
record: logging.LogRecord, the record to handle.
"""
if self.filter(record):
self.callHandlers(record)
@classmethod
def register_frame_to_skip(cls, file_name, function_name, line_number=None):
"""Registers a function name to skip when walking the stack.
The ABSLLogger sometimes skips method calls on the stack
to make the log messages meaningful in their appropriate context.
This method registers a function from a particular file as one
which should be skipped.
Args:
file_name: str, the name of the file that contains the function.
function_name: str, the name of the function to skip.
line_number: int, if provided, only the function with this starting line
number will be skipped. Otherwise, all functions with the same name
in the file will be skipped.
"""
if line_number is not None:
cls._frames_to_skip.add((file_name, function_name, line_number))
else:
cls._frames_to_skip.add((file_name, function_name))
def _get_thread_id():
"""Gets id of current thread, suitable for logging as an unsigned quantity.
If pywrapbase is linked, returns GetTID() for the thread ID to be
consistent with C++ logging. Otherwise, returns the numeric thread id.
The quantities are made unsigned by masking with 2*sys.maxint + 1.
Returns:
Thread ID unique to this process (unsigned)
"""
thread_id = _thread_lib.get_ident()
return thread_id & _THREAD_ID_MASK
def get_absl_logger():
"""Returns the absl logger instance."""
return _absl_logger
def get_absl_handler():
"""Returns the absl handler instance."""
return _absl_handler
def use_python_logging(quiet=False):
"""Uses the python implementation of the logging code.
Args:
quiet: No logging message about switching logging type.
"""
get_absl_handler().activate_python_handler()
if not quiet:
info('Restoring pure python logging')
def use_absl_handler():
"""Uses the ABSL logging handler for logging if not yet configured.
The absl handler is already attached to root if there are no other handlers
attached when importing this module.
Otherwise, this method is called in app.run() so absl handler is used.
"""
absl_handler = get_absl_handler()
if absl_handler not in logging.root.handlers:
logging.root.addHandler(absl_handler)
FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access
def _initialize():
"""Initializes loggers and handlers."""
global _absl_logger, _absl_handler
if _absl_logger:
return
original_logger_class = logging.getLoggerClass()
logging.setLoggerClass(ABSLLogger)
_absl_logger = logging.getLogger('absl')
logging.setLoggerClass(original_logger_class)
python_logging_formatter = PythonFormatter()
_absl_handler = ABSLHandler(python_logging_formatter)
# The absl handler logs to stderr by default. To prevent double logging to
# stderr, the following code tries its best to remove other handlers that emit
# to stderr. Those handlers are most commonly added when logging.info/debug is
# called before importing this module.
handlers = [
h for h in logging.root.handlers
if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
for h in handlers:
logging.root.removeHandler(h)
# The absl handler will always be attached to root, not the absl logger.
if not logging.root.handlers:
# Attach the absl handler at import time when there are no other handlers.
# Otherwise it means users have explicitly configured logging, and the absl
# handler will only be attached later in app.run(). For App Engine apps,
# the absl handler is not used.
logging.root.addHandler(_absl_handler)
# Initialize absl logger.
# Must be called after logging flags in this module are defined.
_initialize()
| ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/absl/logging/__init__.py | Python | mit | 35,420 | 0.007143 |
"""Test queues inspection SB APIs."""
from __future__ import print_function
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestQueues(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues()
@skipUnlessDarwin
@add_test_categories(['pyapi'])
def test_with_python_api_queues_with_backtrace(self):
"""Test queues inspection SB APIs."""
self.build()
self.queues_with_libBacktraceRecording()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
def check_queue_for_valid_queue_id(self, queue):
self.assertTrue(
queue.GetQueueID() != 0, "Check queue %s for valid QueueID (got 0x%x)" %
(queue.GetName(), queue.GetQueueID()))
def check_running_and_pending_items_on_queue(
self, queue, expected_running, expected_pending):
self.assertTrue(
queue.GetNumPendingItems() == expected_pending,
"queue %s should have %d pending items, instead has %d pending items" %
(queue.GetName(),
expected_pending,
(queue.GetNumPendingItems())))
self.assertTrue(
queue.GetNumRunningItems() == expected_running,
"queue %s should have %d running items, instead has %d running items" %
(queue.GetName(),
expected_running,
(queue.GetNumRunningItems())))
def describe_threads(self):
desc = []
for x in self.inferior_process:
id = x.GetIndexID()
reason_str = lldbutil.stop_reason_to_str(x.GetStopReason())
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
desc.append(
"thread %d: %s (queue id: %s) at\n\t%s" %
(id, reason_str, x.GetQueueID(), location))
print('\n'.join(desc))
def check_number_of_threads_owned_by_queue(self, queue, number_threads):
if (queue.GetNumThreads() != number_threads):
self.describe_threads()
self.assertTrue(
queue.GetNumThreads() == number_threads,
"queue %s should have %d thread executing, but has %d" %
(queue.GetName(),
number_threads,
queue.GetNumThreads()))
def check_queue_kind(self, queue, kind):
expected_kind_string = "Unknown"
if kind == lldb.eQueueKindSerial:
expected_kind_string = "Serial queue"
if kind == lldb.eQueueKindConcurrent:
expected_kind_string = "Concurrent queue"
actual_kind_string = "Unknown"
if queue.GetKind() == lldb.eQueueKindSerial:
actual_kind_string = "Serial queue"
if queue.GetKind() == lldb.eQueueKindConcurrent:
actual_kind_string = "Concurrent queue"
self.assertTrue(
queue.GetKind() == kind,
"queue %s is expected to be a %s but it is actually a %s" %
(queue.GetName(),
expected_kind_string,
actual_kind_string))
def check_queues_threads_match_queue(self, queue):
for idx in range(0, queue.GetNumThreads()):
t = queue.GetThreadAtIndex(idx)
self.assertTrue(
t.IsValid(), "Queue %s's thread #%d must be valid" %
(queue.GetName(), idx))
self.assertTrue(
t.GetQueueID() == queue.GetQueueID(),
"Queue %s has a QueueID of %d but its thread #%d has a QueueID of %d" %
(queue.GetName(),
queue.GetQueueID(),
idx,
t.GetQueueID()))
self.assertTrue(
t.GetQueueName() == queue.GetName(),
"Queue %s has a QueueName of %s but its thread #%d has a QueueName of %s" %
(queue.GetName(),
queue.GetName(),
idx,
t.GetQueueName()))
self.assertTrue(
t.GetQueue().GetQueueID() == queue.GetQueueID(),
"Thread #%d's Queue's QueueID of %d is not the same as the QueueID of its owning queue %d" %
(idx,
t.GetQueue().GetQueueID(),
queue.GetQueueID()))
def queues(self):
"""Test queues inspection SB APIs without libBacktraceRecording."""
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
process = target.LaunchSimple(
[], None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
# We have threads running with all the different dispatch QoS service
# levels - find those threads and check that we can get the correct
# QoS name for each of them.
user_initiated_thread = lldb.SBThread()
user_interactive_thread = lldb.SBThread()
utility_thread = lldb.SBThread()
unspecified_thread = lldb.SBThread()
background_thread = lldb.SBThread()
for th in process.threads:
if th.GetName() == "user initiated QoS":
user_initiated_thread = th
if th.GetName() == "user interactive QoS":
user_interactive_thread = th
if th.GetName() == "utility QoS":
utility_thread = th
if th.GetName() == "unspecified QoS":
unspecified_thread = th
if th.GetName() == "background QoS":
background_thread = th
self.assertTrue(
user_initiated_thread.IsValid(),
"Found user initiated QoS thread")
self.assertTrue(
user_interactive_thread.IsValid(),
"Found user interactive QoS thread")
self.assertTrue(utility_thread.IsValid(), "Found utility QoS thread")
self.assertTrue(
unspecified_thread.IsValid(),
"Found unspecified QoS thread")
self.assertTrue(
background_thread.IsValid(),
"Found background QoS thread")
stream = lldb.SBStream()
self.assertTrue(
user_initiated_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for user initiated QoS thread")
self.assertTrue(
stream.GetData() == "User Initiated",
"user initiated QoS thread name is valid")
stream.Clear()
self.assertTrue(
user_interactive_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for user interactive QoS thread")
self.assertTrue(
stream.GetData() == "User Interactive",
"user interactive QoS thread name is valid")
stream.Clear()
self.assertTrue(
utility_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for utility QoS thread")
self.assertTrue(
stream.GetData() == "Utility",
"utility QoS thread name is valid")
stream.Clear()
self.assertTrue(
unspecified_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for unspecified QoS thread")
qosName = stream.GetData()
self.assertTrue(
qosName == "User Initiated" or qosName == "Default",
"unspecified QoS thread name is valid")
stream.Clear()
self.assertTrue(
background_thread.GetInfoItemByPathAsString(
"requested_qos.printable_name",
stream),
"Get QoS printable string for background QoS thread")
self.assertTrue(
stream.GetData() == "Background",
"background QoS thread name is valid")
@skipIfDarwin # rdar://50379398
def queues_with_libBacktraceRecording(self):
"""Test queues inspection SB APIs with libBacktraceRecording present."""
exe = self.getBuildArtifact("a.out")
if not os.path.isfile(
'/Applications/Xcode.app/Contents/Developer/usr/lib/libBacktraceRecording.dylib'):
self.skipTest(
"Skipped because libBacktraceRecording.dylib was present on the system.")
if not os.path.isfile(
'/usr/lib/system/introspection/libdispatch.dylib'):
self.skipTest(
"Skipped because introspection libdispatch dylib is not present.")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
break1 = target.BreakpointCreateByName("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
libbtr_path = "/Applications/Xcode.app/Contents/Developer/usr/lib/libBacktraceRecording.dylib"
if self.getArchitecture() in ['arm', 'arm64', 'arm64e', 'arm64_32', 'armv7', 'armv7k']:
libbtr_path = "/Developer/usr/lib/libBacktraceRecording.dylib"
process = target.LaunchSimple(
[],
[
'DYLD_INSERT_LIBRARIES=%s' % (libbtr_path),
'DYLD_LIBRARY_PATH=/usr/lib/system/introspection'],
self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
self.inferior_process = process
libbtr_module_filespec = lldb.SBFileSpec("libBacktraceRecording.dylib")
libbtr_module = target.FindModule(libbtr_module_filespec)
if not libbtr_module.IsValid():
self.skipTest(
"Skipped because libBacktraceRecording.dylib was not loaded into the process.")
self.assertTrue(
process.GetNumQueues() >= 4,
"Found the correct number of queues.")
queue_submittor_1 = lldb.SBQueue()
queue_performer_1 = lldb.SBQueue()
queue_performer_2 = lldb.SBQueue()
queue_performer_3 = lldb.SBQueue()
for idx in range(0, process.GetNumQueues()):
q = process.GetQueueAtIndex(idx)
if "LLDB_COMMAND_TRACE" in os.environ:
print("Queue with id %s has name %s" % (q.GetQueueID(), q.GetName()))
if q.GetName() == "com.apple.work_submittor_1":
queue_submittor_1 = q
if q.GetName() == "com.apple.work_performer_1":
queue_performer_1 = q
if q.GetName() == "com.apple.work_performer_2":
queue_performer_2 = q
if q.GetName() == "com.apple.work_performer_3":
queue_performer_3 = q
if q.GetName() == "com.apple.main-thread":
if q.GetNumThreads() == 0:
print("Cannot get thread <=> queue associations")
return
self.assertTrue(
queue_submittor_1.IsValid() and queue_performer_1.IsValid() and queue_performer_2.IsValid() and queue_performer_3.IsValid(),
"Got all four expected queues: %s %s %s %s" %
(queue_submittor_1.IsValid(),
queue_performer_1.IsValid(),
queue_performer_2.IsValid(),
queue_performer_3.IsValid()))
self.check_queue_for_valid_queue_id(queue_submittor_1)
self.check_queue_for_valid_queue_id(queue_performer_1)
self.check_queue_for_valid_queue_id(queue_performer_2)
self.check_queue_for_valid_queue_id(queue_performer_3)
self.check_running_and_pending_items_on_queue(queue_submittor_1, 1, 0)
self.check_running_and_pending_items_on_queue(queue_performer_1, 1, 3)
self.check_running_and_pending_items_on_queue(
queue_performer_2, 1, 9999)
self.check_running_and_pending_items_on_queue(queue_performer_3, 4, 0)
self.check_number_of_threads_owned_by_queue(queue_submittor_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_1, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_2, 1)
self.check_number_of_threads_owned_by_queue(queue_performer_3, 4)
self.check_queue_kind(queue_submittor_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_1, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_2, lldb.eQueueKindSerial)
self.check_queue_kind(queue_performer_3, lldb.eQueueKindConcurrent)
self.check_queues_threads_match_queue(queue_submittor_1)
self.check_queues_threads_match_queue(queue_performer_1)
self.check_queues_threads_match_queue(queue_performer_2)
self.check_queues_threads_match_queue(queue_performer_3)
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
0).IsValid(), "queue 2's pending item #0 is valid")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(0).GetAddress().GetSymbol(
).GetName() == "doing_the_work_2", "queue 2's pending item #0 should be doing_the_work_2")
self.assertTrue(
queue_performer_2.GetNumPendingItems() == 9999,
"verify that queue 2 still has 9999 pending items")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
9998).IsValid(), "queue 2's pending item #9998 is valid")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(9998).GetAddress().GetSymbol(
).GetName() == "doing_the_work_2", "queue 2's pending item #0 should be doing_the_work_2")
self.assertTrue(queue_performer_2.GetPendingItemAtIndex(
9999).IsValid() == False, "queue 2's pending item #9999 is invalid")
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/macosx/queues/TestQueues.py | Python | bsd-3-clause | 17,019 | 0.001351 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from devpi_common.request import new_requests_session
from devpi_slack import __version__
def devpiserver_indexconfig_defaults():
return {"slack_icon": None, "slack_hook": None, "slack_user": None}
def devpiserver_on_upload_sync(log, application_url, stage, project, version):
slack_hook = stage.ixconfig.get("slack_hook") or os.getenv("SLACK_HOOK")
slack_icon = stage.ixconfig.get("slack_icon") or os.getenv(
"SLACK_ICON", "http://doc.devpi.net/latest/_static/devpicat.jpg")
slack_user = stage.ixconfig.get(
"slack_user") or os.getenv("SLACK_USER", "devpi")
if not slack_hook:
return
session = new_requests_session(agent=("devpi-slack", __version__))
try:
r = session.post(
slack_hook,
data={
'payload': json.dumps({
"text": "Uploaded {}=={} to {}".format(
project,
version,
application_url
),
"icon_url": slack_icon,
"username": slack_user,
})
})
except session.Errors:
raise RuntimeError("%s: failed to send Slack notification %s",
project, slack_hook)
if 200 <= r.status_code < 300:
log.info("successfully sent Slack notification: %s", slack_hook)
else:
log.error("%s: failed to send Slack notification: %s", r.status_code,
slack_hook)
log.debug(r.content.decode('utf-8'))
raise RuntimeError("%s: failed to send Slack notification: %s",
project, slack_hook)
| innoteq/devpi-slack | devpi_slack/main.py | Python | bsd-3-clause | 1,761 | 0 |
########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Lib/Terminal.py,v 1.6.4.1 2006/09/18 17:05:25 jkloth Exp $
"""
Provides some of the information from the terminfo database.
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import os, re, sys
from Ft.Lib.Terminfo import TERMTYPES as _ANSITERMS
from Ft.Lib.Terminfo import DEFAULT_LINES as _LINES
from Ft.Lib.Terminfo import DEFAULT_COLUMNS as _COLUMNS
if sys.platform == 'win32':
import msvcrt
from Ft.Lib import _win32con
elif os.name == 'posix':
_HAVE_TIOCGWINSZ = False
try:
import fcntl, termios, struct
except ImportError:
pass
else:
_HAVE_TIOCGWINSZ = hasattr(termios, 'TIOCGWINSZ')
# ISO 6429 color sequences are composed of sequences of numbers
# separated by semicolons. The most common codes are:
#
# 0 to restore default color
# 1 for brighter colors
# 4 for underlined text
# 5 for flashing text
# 30 for black foreground
# 31 for red foreground
# 32 for green foreground
# 33 for yellow (or brown) foreground
# 34 for blue foreground
# 35 for purple foreground
# 36 for cyan foreground
# 37 for white (or gray) foreground
# 40 for black background
# 41 for red background
# 42 for green background
# 43 for yellow (or brown) background
# 44 for blue background
# 45 for purple background
# 46 for cyan background
# 47 for white (or gray) background
class AnsiEscapes:
class Colors:
DEFAULT = '\033[0m'
BOLD = '\033[1m'
FOREGROUND_BLACK = '\033[30m'
FOREGROUND_MAROON = '\033[31m'
FOREGROUND_GREEN = '\033[32m'
FOREGROUND_BROWN = FOREGROUND_OLIVE = '\033[33m'
FOREGROUND_NAVY = '\033[34m'
FOREGROUND_PURPLE = '\033[35m'
FOREGROUND_TEAL = '\033[36m'
FOREGROUND_SILVER = '\033[37m'
FOREGROUND_GRAY = '\033[1;30m'
FOREGROUND_RED = '\033[1;31m'
FOREGROUND_LIME = '\033[1;32m'
FOREGROUND_YELLOW = '\033[1;33m'
FOREGROUND_BLUE = '\033[1;34m'
FOREGROUND_MAGENTA = FOREGROUND_FUCHSIA = '\033[1;35m'
FOREGROUND_CYAN = FOREGROUND_AQUA = '\033[1;36m'
FOREGROUND_WHITE = '\033[1;37m'
BACKGROUND_BLACK = '\033[40m'
BACKGROUND_MAROON = '\033[41m'
BACKGROUND_GREEN = '\033[42m'
BACKGROUND_BROWN = BACKGROUND_OLIVE = '\033[43m'
BACKGROUND_NAVY = '\033[44m'
BACKGROUND_PURPLE = '\033[45m'
BACKGROUND_TEAL = '\033[46m'
BACKGROUND_SILVER = '\033[47m'
# Methods/members a Terminal instance should expose from its underly stream.
_file_methods = ('flush', 'write', 'read', 'isatty', 'encoding')
class Terminal:
def __init__(self, stream, keepAnsiEscapes=True):
self._stream = stream
for name in _file_methods:
method = getattr(stream, name, None)
if method is not None:
setattr(self, name, method)
if self.isatty():
if sys.platform == 'win32':
self._init_win32(stream, keepAnsiEscapes)
elif os.name == 'posix' and os.environ.get('TERM') in _ANSITERMS:
self._init_posix(stream, keepAnsiEscapes)
return
def _init_win32(self, stream, keepAnsiEscapes):
# Apparently there exists an IDE where isatty() is True, but
# the stream doesn't have a backing file descriptor.
try:
fileno = stream.fileno()
except AttributeError:
return
# Get the Windows console handle of the file descriptor.
try:
self._handle = msvcrt.get_osfhandle(fileno)
except IOError:
return
if keepAnsiEscapes:
self._write_escape = self._escape_win32
self._default_attribute = \
_win32con.GetConsoleScreenBufferInfo(self._handle)[2]
self.size = self._size_win32
return
def _init_posix(self, stream, keepAnsiEscapes):
if keepAnsiEscapes:
# stream handles ANSI escapes natively
self.writetty = stream.write
if _HAVE_TIOCGWINSZ:
self.size = self._size_termios
return
def lines(self):
return self.size()[0]
def columns(self):
return self.size()[1]
def size(self):
return (_LINES, _COLUMNS)
# noop method for underlying streams which do not implement it
def flush(self):
return
# noop method for underlying streams which do not implement it
def write(self, str):
return
# noop method for underlying streams which do not implement it
def read(self, size=-1):
return ''
# noop method for underlying streams which do not implement it
def isatty(self):
return False
def close(self):
# don't attempt to close a tty streams
if self.isatty():
return
# ignore any errors closing the underlying stream
try:
self._stream.close()
except:
pass
return
# ANSI Set Display Mode: ESC[#;...;#m
_ansi_sdm = re.compile('\033\\[([0-9]+)(?:;([0-9]+))*m')
def writetty(self, bytes):
start = 0
match = self._ansi_sdm.search(bytes)
while match is not None:
# write everything up to the escape sequence
self._stream.write(bytes[start:match.start()])
# process the color codes
self._write_escape(match.groups())
# skip over the escape sequence
start = match.end()
# find the next sequence
match = self._ansi_sdm.search(bytes, start)
# write the remainder
self._stream.write(bytes[start:])
return
def _write_escape(self, codes):
"""
Escape function for handling ANSI Set Display Mode.
Default behavior is to simply ignore the call (e.g. nothing is added
to the output).
"""
return
# -- Terminal specific functions -----------------------------------
def _size_termios(self):
ws = struct.pack("HHHH", 0, 0, 0, 0)
ws = fcntl.ioctl(self._stream.fileno(), termios.TIOCGWINSZ, ws)
lines, columns, x, y = struct.unpack("HHHH", ws)
return (lines, columns)
def _escape_win32(self, codes):
"""Translates the ANSI color codes into the Win32 API equivalents."""
# get the current text attributes for the stream
size, cursor, attributes, window = \
_win32con.GetConsoleScreenBufferInfo(self._handle)
for code in map(int, filter(None, codes)):
if code == 0: # normal
# the default attribute
attributes = self._default_attribute
elif code == 1: # bold
# bold only applies to the foreground color
attributes |= _win32con.FOREGROUND_INTENSITY
elif code == 30: # black
attributes &= _win32con.BACKGROUND
elif code == 31: # red
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= _win32con.FOREGROUND_RED
elif code == 32: # green
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= _win32con.FOREGROUND_GREEN
elif code == 33: # brown (bold: yellow)
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= (_win32con.FOREGROUND_RED |
_win32con.FOREGROUND_GREEN)
elif code == 34: # blue
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= _win32con.FOREGROUND_BLUE
elif code == 35: # purple (bold: magenta)
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= (_win32con.FOREGROUND_RED |
_win32con.FOREGROUND_BLUE)
elif code == 36: # cyan
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= (_win32con.FOREGROUND_BLUE |
_win32con.FOREGROUND_GREEN)
elif code == 37: # gray (bold: white)
attributes &= (_win32con.FOREGROUND_INTENSITY |
_win32con.BACKGROUND)
attributes |= (_win32con.FOREGROUND_RED |
_win32con.FOREGROUND_GREEN |
_win32con.FOREGROUND_BLUE)
elif code == 40: # black
attributes &= _win32con.FOREGROUND
elif code == 41: # red
attributes &= _win32con.FOREGROUND
attributes |= _win32con.BACKGROUND_RED
elif code == 42: # green
attributes &= _win32con.FOREGROUND
attributes |= _win32con.BACKGROUND_GREEN
elif code == 43: # brown
attributes &= _win32con.FOREGROUND
attributes |= (_win32con.BACKGROUND_RED |
_win32con.BACKGROUND_GREEN)
elif code == 44: # blue
attributes &= _win32con.FOREGROUND
attributes |= _win32con.BACKGROUND_BLUE
elif code == 45: # purple
attributes &= _win32con.FOREGROUND
attributes |= (_win32con.BACKGROUND_RED |
_win32con.BACKGROUND_BLUE)
elif code == 46: # cyan
attributes &= _win32con.FOREGROUND
attributes |= (_win32con.BACKGROUND_BLUE |
_win32con.BACKGROUND_GREEN)
elif code == 47: # gray
attributes &= _win32con.FOREGROUND
attributes |= (_win32con.BACKGROUND_RED |
_win32con.BACKGROUND_GREEN |
_win32con.BACKGROUND_BLUE)
_win32con.SetConsoleTextAttribute(self._handle, attributes)
return
def _size_win32(self):
size, cursor, attributes, window = \
_win32con.GetConsoleScreenBufferInfo(self._handle)
left, top, right, bottom = window
# use the buffer size for the column width as Windows wraps text
# there instead of at the displayed window size
columns, lines = size
return (bottom - top, columns - 1)
| Pikecillo/genna | external/4Suite-XML-1.0.2/Ft/Lib/Terminal.py | Python | gpl-2.0 | 11,043 | 0.002083 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
def _check_penalty_number(x):
"""check penalty number availability, raise ValueError if failed."""
if not isinstance(x, (float, int)):
raise ValueError(('Value: {} is not a valid regularization penalty number, '
'expected an int or float value').format(x))
if math.isinf(x) or math.isnan(x):
raise ValueError(
('Value: {} is not a valid regularization penalty number, '
'a positive/negative infinity or NaN is not a property value'
).format(x))
def _none_to_default(inputs, default):
return default if inputs is None else default
@keras_export('keras.regularizers.Regularizer')
class Regularizer(object):
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API will
depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and
`Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers and
the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=tf.keras.regularizers.L1(0.01),
... activity_regularizer=tf.keras.regularizers.L2(0.01))
>>> tensor = tf.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size) is 5
>>> tf.math.reduce_sum(layer.losses)
<tf.Tensor: shape=(), dtype=float32, numpy=5.25>
## Available penalties
```python
tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty
tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty
tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = tf.keras.regularizers.L2(2.)
>>> tensor = tf.ones(shape=(5, 5))
>>> regularizer(tensor)
<tf.Tensor: shape=(), dtype=float32, numpy=50.0>
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
... def l1_reg(weight_matrix):
... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
...
>>> layer = tf.keras.layers.Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
... class L2Regularizer(tf.keras.regularizers.Regularizer):
... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * tf.math.reduce_sum(tf.math.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this functionality,
you must make sure any python process running your model has also defined
and registered your custom regularizer.
`tf.keras.utils.register_keras_serializable` is only available in TF 2.1 and
beyond. In earlier versions of TensorFlow you must pass your custom
regularizer to the `custom_objects` argument of methods that expect custom
regularizers to be registered as serializable.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(str(self) + ' does not implement get_config()')
@keras_export('keras.regularizers.L1L2')
class L1L2(Regularizer):
"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as
`loss = l2 * reduce_sum(square(x))`
L1L2 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2')
In this case, the default values used are `l1=0.01` and `l2=0.01`.
Attributes:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name
# The default value for l1 and l2 are different from the value in l1_l2
# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
# and no l1 penalty.
l1 = 0. if l1 is None else l1
l2 = 0. if l2 is None else l2
_check_penalty_number(l1)
_check_penalty_number(l2)
self.l1 = backend.cast_to_floatx(l1)
self.l2 = backend.cast_to_floatx(l2)
def __call__(self, x):
regularization = backend.constant(0., dtype=x.dtype)
if self.l1:
regularization += self.l1 * math_ops.reduce_sum(math_ops.abs(x))
if self.l2:
regularization += self.l2 * math_ops.reduce_sum(math_ops.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1), 'l2': float(self.l2)}
@keras_export('keras.regularizers.L1', 'keras.regularizers.l1')
class L1(Regularizer):
"""A regularizer that applies a L1 regularization penalty.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
L1 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1')
In this case, the default value used is `l1=0.01`.
Attributes:
l1: Float; L1 regularization factor.
"""
def __init__(self, l1=0.01, **kwargs): # pylint: disable=redefined-outer-name
l1 = kwargs.pop('l', l1) # Backwards compatibility
if kwargs:
raise TypeError('Argument(s) not recognized: %s' % (kwargs,))
l1 = 0.01 if l1 is None else l1
_check_penalty_number(l1)
self.l1 = backend.cast_to_floatx(l1)
def __call__(self, x):
return self.l1 * math_ops.reduce_sum(math_ops.abs(x))
def get_config(self):
return {'l1': float(self.l1)}
@keras_export('keras.regularizers.L2', 'keras.regularizers.l2')
class L2(Regularizer):
"""A regularizer that applies a L2 regularization penalty.
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
L2 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2')
In this case, the default value used is `l2=0.01`.
Attributes:
l2: Float; L2 regularization factor.
"""
def __init__(self, l2=0.01, **kwargs): # pylint: disable=redefined-outer-name
l2 = kwargs.pop('l', l2) # Backwards compatibility
if kwargs:
raise TypeError('Argument(s) not recognized: %s' % (kwargs,))
l2 = 0.01 if l2 is None else l2
_check_penalty_number(l2)
self.l2 = backend.cast_to_floatx(l2)
def __call__(self, x):
return self.l2 * math_ops.reduce_sum(math_ops.square(x))
def get_config(self):
return {'l2': float(self.l2)}
@keras_export('keras.regularizers.l1_l2')
def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
r"""Create a regularizer that applies both L1 and L2 penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
Args:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
Returns:
An L1L2 Regularizer with the given regularization factors.
"""
return L1L2(l1=l1, l2=l2)
# Deserialization aliases.
l1 = L1
l2 = L2
@keras_export('keras.regularizers.serialize')
def serialize(regularizer):
return serialize_keras_object(regularizer)
@keras_export('keras.regularizers.deserialize')
def deserialize(config, custom_objects=None):
if config == 'l1_l2':
# Special case necessary since the defaults used for "l1_l2" (string)
# differ from those of the L1L2 class.
return L1L2(l1=0.01, l2=0.01)
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
@keras_export('keras.regularizers.get')
def get(identifier):
"""Retrieve a regularizer instance from a config or identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError(
'Could not interpret regularizer identifier: {}'.format(identifier))
| annarev/tensorflow | tensorflow/python/keras/regularizers.py | Python | apache-2.0 | 12,860 | 0.003421 |
#!/usr/bin/python2
import check
from fractions import gcd
# Algorithm taken from en.wikipedia.org/wiki/Line-line-_intersection
# All code written by Joel Williamson
## intersection: Int Int Int Int Int Int Int Int -> (union "parallel" (tuple Int Int Int Int))
##
## Purpose: Treating the input as 4 pairs of integers, each representing the
## endpoint of a line, returns the intersection of the two lines, or
## "parallel" if they are parallel
##
## Effects:
##
## Example: intersection(-15,15,15,-15,-10,-10,10,10) => [0,1,0,1]
def intersection(x1, y1, x2, y2, x3, y3, x4, y4):
x_numerator = ((x1*y2-y1*x2)*(x3-x4) - (x1-x2)*(x3*y4-y3*x4))
denominator = (x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)
if (denominator == 0) :
return "parallel"
x_gcd = gcd(x_numerator,denominator)
y_numerator = (x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4)
y_gcd = gcd(y_numerator,denominator)
return (x_numerator/x_gcd,denominator/x_gcd,
y_numerator/y_gcd,denominator/y_gcd)
## Tests:
check.expect('Sample test', intersection(-15,15,15,-15,-10,-10,10,10), (0,1,0,1))
check.expect('Parallel', intersection(-10,-10,10,10,-20,-10,0,10),"parallel")
## point_range: (listof Int) (listof Int) (listof Int) (listof Int) (optional (tuple Int Int Int Int))
## -> (iterable (tuple Int Int Int Int))
##
## Purpose: Merges four lists of equal length into an iterable of points,
## optionally starting after the point specified by (init_x1,init_y1,initx2,inity2)
##
## Example: i_p = point_range([1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16])
## i_p.next() = (1,5,9,13)
## i_p.next() = (2,6,10,14)
def point_range(X1,Y1,X2,Y2,(init_x1 ,init_y1, init_x2, init_y2 )= (None,None,None,None)) :
if (init_x1 == None) :
started = True
else :
started = False
for i in range(len(X1)) :
if (not started and not((X1[i],Y1[i],X2[i],Y2[i]) == (init_x1,init_y1,init_x2,init_y2))) :
continue
elif (not started) :
started = True
continue
yield (X1[i],Y1[i],X2[i],Y2[i])
## pieces: Int Int (listof Int) (listof Int) (listof Int) (listof Int) -> Int
##
## Purpose: pieces takes the radius of a circle, N is the number of lines dividing
## the circle and the four lists correspond to the endpoints of the lines
## It produces the number of segments the lines divide the circle into.
##
## Effects:
##
## Examples: pieces(10,3,[-15,1,10],[15,12,4],[15,-6,-10],[-15,-12,-8]) => 7
## pieces(10,3,[0,-11,-11],[11,3,-1],[0,11,11],[-11,3,7]) => 6
def pieces(R, N, X1, Y1, X2, Y2):
segments = 1
for l1 in point_range(X1,Y1,X2,Y2) :
segments += 1
intersections = {}
for l2 in point_range(X1,Y1,X2,Y2,(l1[0],l1[1],l1[2],l1[3])) :
inter = intersection(l1[0],l1[1],l1[2],l1[3],l2[0],l2[1],l2[2],l2[3])
if (inter == "parallel") :
continue
if inter in intersections :
continue
if ((inter[0]*inter[0])/(inter[1]*inter[1]) + (inter[2]*inter[2])/(inter[3]*inter[3]) >= R*R) :
continue
intersections[inter] = True
segments += 1
return segments
## Tests:
check.expect('Example 1',pieces(10,3,[-15,1,10],[15,12,4],[15,-6,-10],[-15,-12,-8]),7)
check.expect('Example 2',pieces(10,3,[0,-11,-11],[11,3,-1],[0,11,11],[-11,3,7]),6)
# Be sure to do lots more of your own testing!
| joelwilliamson/cs234 | a1/a01q2b.py | Python | gpl-2.0 | 3,280 | 0.051829 |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from neo4j._exceptions import BoltHandshakeError
from neo4j.exceptions import ServiceUnavailable
from tests.integration.examples import DriverSetupExample
# isort: off
# tag::config-unencrypted-import[]
from neo4j import GraphDatabase
# end::config-unencrypted-import[]
# isort: on
# python -m pytest tests/integration/examples/test_config_unencrypted_example.py -s -v
class ConfigUnencryptedExample(DriverSetupExample):
# tag::config-unencrypted[]
def __init__(self, uri, auth):
self.driver = GraphDatabase.driver(uri, auth=auth, encrypted=False)
# end::config-unencrypted[]
def test_example(uri, auth):
try:
ConfigUnencryptedExample.test(uri, auth)
except ServiceUnavailable as error:
if isinstance(error.__cause__, BoltHandshakeError):
pytest.skip(error.args[0])
| neo4j/neo4j-python-driver | tests/integration/examples/test_config_unencrypted_example.py | Python | apache-2.0 | 1,486 | 0.000673 |
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.osm import Osm
from geocoder.w3w import W3W
from geocoder.bing import Bing
from geocoder.here import Here
from geocoder.yahoo import Yahoo
from geocoder.baidu import Baidu
from geocoder.tomtom import Tomtom
from geocoder.arcgis import Arcgis
from geocoder.ottawa import Ottawa
from geocoder.yandex import Yandex
from geocoder.google import Google
from geocoder.mapbox import Mapbox
from geocoder.maxmind import Maxmind
from geocoder.location import Location
from geocoder.opencage import OpenCage
from geocoder.geonames import Geonames
from geocoder.mapquest import Mapquest
from geocoder.distance import Distance
from geocoder.geolytica import Geolytica
from geocoder.freegeoip import FreeGeoIP
from geocoder.canadapost import Canadapost
from geocoder.w3w_reverse import W3WReverse
from geocoder.here_reverse import HereReverse
from geocoder.bing_reverse import BingReverse
from geocoder.yandex_reverse import YandexReverse
from geocoder.mapbox_reverse import MapboxReverse
from geocoder.google_reverse import GoogleReverse
from geocoder.google_timezone import Timezone
from geocoder.google_elevation import Elevation
from geocoder.mapquest_reverse import MapquestReverse
from geocoder.opencage_reverse import OpenCageReverse
def get(location, **kwargs):
"""Get Geocode
:param ``location``: Your search location you want geocoded.
:param ``provider``: The geocoding engine you want to use.
:param ``method``: Define the method (geocode, method).
"""
provider = kwargs.get('provider', 'bing').lower().strip()
method = kwargs.get('method', 'geocode').lower().strip()
options = {
'osm': {'geocode': Osm},
'here': {
'geocode': Here,
'reverse': HereReverse,
},
'baidu': {'geocode': Baidu},
'yahoo': {'geocode': Yahoo},
'tomtom': {'geocode': Tomtom},
'arcgis': {'geocode': Arcgis},
'ottawa': {'geocode': Ottawa},
'mapbox': {
'geocode': Mapbox,
'reverse': MapboxReverse,
},
'maxmind': {'geocode': Maxmind},
'geonames': {'geocode': Geonames},
'freegeoip': {'geocode': FreeGeoIP},
'w3w': {
'geocode': W3W,
'reverse': W3WReverse,
},
'yandex': {
'geocode': Yandex,
'reverse': YandexReverse,
},
'mapquest': {
'geocode': Mapquest,
'reverse': MapquestReverse,
},
'geolytica': {'geocode': Geolytica},
'canadapost': {'geocode': Canadapost},
'opencage': {
'geocode': OpenCage,
'reverse': OpenCageReverse,
},
'bing': {
'geocode': Bing,
'reverse': BingReverse,
},
'google': {
'geocode': Google,
'reverse': GoogleReverse,
'timezone': Timezone,
'elevation': Elevation,
},
}
if isinstance(location, (list, dict)) and method == 'geocode':
raise ValueError("Location should be a string")
if provider not in options:
raise ValueError("Invalid provider")
else:
if method not in options[provider]:
raise ValueError("Invalid method")
return options[provider][method](location, **kwargs)
def distance(*args, **kwargs):
"""Distance tool measures the distance between two or multiple points.
:param location: (min 2x locations) Your search location you want geocoded.
:param units: (default=kilometers) Unit of measurement.
> kilometers
> miles
> feet
> meters
"""
return Distance(*args, **kwargs)
def location(location, **kwargs):
"""Parser for different location formats
"""
return Location(location, **kwargs)
def google(location, **kwargs):
"""Google Provider
:param location: Your search location you want geocoded.
:param method: (default=geocode) Use the following:
> geocode
> reverse
> batch
> timezone
> elevation
"""
return get(location, provider='google', **kwargs)
def mapbox(location, **kwargs):
"""Mapbox Provider
:param location: Your search location you want geocoded.
:param proximity: Search nearby [lat, lng]
:param method: (default=geocode) Use the following:
> geocode
> reverse
> batch
"""
return get(location, provider='mapbox', **kwargs)
def yandex(location, **kwargs):
"""Yandex Provider
:param location: Your search location you want geocoded.
:param lang: Chose the following language:
> ru-RU — Russian (by default)
> uk-UA — Ukrainian
> be-BY — Belarusian
> en-US — American English
> en-BR — British English
> tr-TR — Turkish (only for maps of Turkey)
:param kind: Type of toponym (only for reverse geocoding):
> house - house or building
> street - street
> metro - subway station
> district - city district
> locality - locality (city, town, village, etc.)
"""
return get(location, provider='yandex', **kwargs)
def w3w(location, **kwargs):
"""what3words Provider
:param location: Your search location you want geocoded.
:param key: W3W API key.
:param method: Chose a method (geocode, method)
"""
return get(location, provider='w3w', **kwargs)
def baidu(location, **kwargs):
"""Baidu Provider
:param location: Your search location you want geocoded.
:param key: Baidu API key.
:param referer: Baidu API referer website.
"""
return get(location, provider='baidu', **kwargs)
def ottawa(location, **kwargs):
"""Ottawa Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='ottawa', **kwargs)
def elevation(location, **kwargs):
"""Elevation - Google Provider
:param location: Your search location you want to retrieve elevation data.
"""
return get(location, method='elevation', provider='google', **kwargs)
def timezone(location, **kwargs):
"""Timezone - Google Provider
:param location: Your search location you want to retrieve timezone data.
:param timestamp: Define your own specified time to calculate timezone.
"""
return get(location, method='timezone', provider='google', **kwargs)
def reverse(location, provider='google', **kwargs):
"""Reverse Geocoding
:param location: Your search location you want to reverse geocode.
:param key: (optional) use your own API Key from Bing.
:param provider: (default=google) Use the following:
> google
> bing
"""
return get(location, method='reverse', provider=provider, **kwargs)
def bing(location, **kwargs):
"""Bing Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from Bing.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='bing', **kwargs)
def yahoo(location, **kwargs):
"""Yahoo Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='yahoo', **kwargs)
def geolytica(location, **kwargs):
"""Geolytica (Geocoder.ca) Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='geolytica', **kwargs)
def opencage(location, **kwargs):
"""Opencage Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from OpenCage.
"""
return get(location, provider='opencage', **kwargs)
def arcgis(location, **kwargs):
"""ArcGIS Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='arcgis', **kwargs)
def here(location, **kwargs):
"""HERE Provider
:param location: Your search location you want geocoded.
:param app_code: (optional) use your own Application Code from HERE.
:param app_id: (optional) use your own Application ID from HERE.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='here', **kwargs)
def nokia(location, **kwargs):
"""HERE Provider
:param location: Your search location you want geocoded.
:param app_code: (optional) use your own Application Code from HERE.
:param app_id: (optional) use your own Application ID from HERE.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='here', **kwargs)
def tomtom(location, **kwargs):
"""TomTom Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from TomTom.
"""
return get(location, provider='tomtom', **kwargs)
def mapquest(location, **kwargs):
"""MapQuest Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from MapQuest.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='mapquest', **kwargs)
def osm(location, **kwargs):
"""OSM Provider
:param location: Your search location you want geocoded.
:param url: Custom OSM Server URL location
(ex: http://nominatim.openstreetmap.org/search)
"""
return get(location, provider='osm', **kwargs)
def maxmind(location='me', **kwargs):
"""MaxMind Provider
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='maxmind', **kwargs)
def freegeoip(location, **kwargs):
"""FreeGeoIP Provider
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='freegeoip', **kwargs)
def ip(location, **kwargs):
"""IP Address lookup
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='maxmind', **kwargs)
def canadapost(location, **kwargs):
"""CanadaPost Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) API Key from CanadaPost Address Complete.
"""
return get(location, provider='canadapost', **kwargs)
def postal(location, **kwargs):
"""CanadaPost Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from
CanadaPost Address Complete.
"""
return get(location, provider='canadapost', **kwargs)
def geonames(location, **kwargs):
"""GeoNames Provider
:param ``location``: Your search location you want geocoded.
:param ``username``: (required) needs to be passed with each request.
"""
return get(location, provider='geonames', **kwargs)
| miraculixx/geocoder | geocoder/api.py | Python | mit | 11,482 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from multiprocessing import Pool
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.plasma as plasma
import subprocess
import time
import multimerge
# To run this example, you will first need to run "python setup.py install" in
# this directory to build the Cython module.
#
# You will only see speedups if you run this code on more data, this is just a
# small example that can run on a laptop.
#
# The values we used to get a speedup (on a m4.10xlarge instance on EC2) were
# object_store_size = 84 * 10 ** 9
# num_cores = 20
# num_rows = 10 ** 9
# num_cols = 1
client = None
object_store_size = 2 * 10 ** 9 # 2 GB
num_cores = 8
num_rows = 200000
num_cols = 2
column_names = [str(i) for i in range(num_cols)]
column_to_sort = column_names[0]
# Connect to clients
def connect():
global client
client = plasma.connect('/tmp/store')
np.random.seed(int(time.time() * 10e7) % 10000000)
def put_df(df):
record_batch = pa.RecordBatch.from_pandas(df)
# Get size of record batch and schema
mock_sink = pa.MockOutputStream()
stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
stream_writer.write_batch(record_batch)
data_size = mock_sink.size()
# Generate an ID and allocate a buffer in the object store for the
# serialized DataFrame
object_id = plasma.ObjectID(np.random.bytes(20))
buf = client.create(object_id, data_size)
# Write the serialized DataFrame to the object store
sink = pa.FixedSizeBufferWriter(buf)
stream_writer = pa.RecordBatchStreamWriter(sink, record_batch.schema)
stream_writer.write_batch(record_batch)
# Seal the object
client.seal(object_id)
return object_id
def get_dfs(object_ids):
"""Retrieve dataframes from the object store given their object IDs."""
buffers = client.get_buffers(object_ids)
return [pa.RecordBatchStreamReader(buf).read_next_batch().to_pandas()
for buf in buffers]
def local_sort(object_id):
"""Sort a partition of a dataframe."""
# Get the dataframe from the object store.
[df] = get_dfs([object_id])
# Sort the dataframe.
sorted_df = df.sort_values(by=column_to_sort)
# Get evenly spaced values from the dataframe.
indices = np.linspace(0, len(df) - 1, num=num_cores, dtype=np.int64)
# Put the sorted dataframe in the object store and return the corresponding
# object ID as well as the sampled values.
return put_df(sorted_df), sorted_df.as_matrix().take(indices)
def local_partitions(object_id_and_pivots):
"""Take a sorted partition of a dataframe and split it into more pieces."""
object_id, pivots = object_id_and_pivots
[df] = get_dfs([object_id])
split_at = df[column_to_sort].searchsorted(pivots)
split_at = [0] + list(split_at) + [len(df)]
# Partition the sorted dataframe and put each partition into the object
# store.
return [put_df(df[i:j]) for i, j in zip(split_at[:-1], split_at[1:])]
def merge(object_ids):
"""Merge a number of sorted dataframes into a single sorted dataframe."""
dfs = get_dfs(object_ids)
# In order to use our multimerge code, we have to convert the arrays from
# the Fortran format to the C format.
arrays = [np.ascontiguousarray(df.as_matrix()) for df in dfs]
for a in arrays:
assert a.dtype == np.float64
assert not np.isfortran(a)
# Filter out empty arrays.
arrays = [a for a in arrays if a.shape[0] > 0]
if len(arrays) == 0:
return None
resulting_array = multimerge.multimerge2d(*arrays)
merged_df2 = pd.DataFrame(resulting_array, columns=column_names)
return put_df(merged_df2)
if __name__ == '__main__':
# Start the plasma store.
p = subprocess.Popen(['plasma_store',
'-s', '/tmp/store',
'-m', str(object_store_size)])
# Connect to the plasma store.
connect()
# Connect the processes in the pool.
pool = Pool(initializer=connect, initargs=(), processes=num_cores)
# Create a DataFrame from a numpy array.
df = pd.DataFrame(np.random.randn(num_rows, num_cols),
columns=column_names)
partition_ids = [put_df(partition) for partition
in np.split(df, num_cores)]
# Begin timing the parallel sort example.
parallel_sort_start = time.time()
# Sort each partition and subsample them. The subsampled values will be
# used to create buckets.
sorted_df_ids, pivot_groups = list(zip(*pool.map(local_sort,
partition_ids)))
# Choose the pivots.
all_pivots = np.concatenate(pivot_groups)
indices = np.linspace(0, len(all_pivots) - 1, num=num_cores,
dtype=np.int64)
pivots = np.take(np.sort(all_pivots), indices)
# Break all of the sorted partitions into even smaller partitions. Group
# the object IDs from each bucket together.
results = list(zip(*pool.map(local_partitions,
zip(sorted_df_ids,
len(sorted_df_ids) * [pivots]))))
# Merge each of the buckets and store the results in the object store.
object_ids = pool.map(merge, results)
resulting_ids = [object_id for object_id in object_ids
if object_id is not None]
# Stop timing the paralle sort example.
parallel_sort_end = time.time()
print('Parallel sort took {} seconds.'
.format(parallel_sort_end - parallel_sort_start))
serial_sort_start = time.time()
original_sorted_df = df.sort_values(by=column_to_sort)
serial_sort_end = time.time()
# Check that we sorted the DataFrame properly.
sorted_dfs = get_dfs(resulting_ids)
sorted_df = pd.concat(sorted_dfs)
print('Serial sort took {} seconds.'
.format(serial_sort_end - serial_sort_start))
assert np.allclose(sorted_df.values, original_sorted_df.values)
# Kill the object store.
p.kill()
| tebeka/arrow | python/examples/plasma/sorting/sort_df.py | Python | apache-2.0 | 6,843 | 0 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import abc
import re
import os
import glob
import shutil
import warnings
from itertools import chain
from copy import deepcopy
import six
import numpy as np
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.sites import PeriodicSite
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings and user_kpoints_settings are absolute. Any new sets you
implement must obey this. If a user wants to override your settings,
you assume he knows what he is doing. Do not magically override user
supplied settings. You can issue a warning if you think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
__author__ = "Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 28 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class VaspInputSet(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@abc.abstractproperty
def incar(self):
"""Incar object"""
pass
@abc.abstractproperty
def kpoints(self):
"""Kpoints object"""
pass
@abc.abstractproperty
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self.config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]['symbol']
if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
return Potcar(self.potcar_symbols, functional=self.potcar_functional)
@property
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
kpoints = self.kpoints
incar = self.incar
if np.product(kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
return {'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': self.poscar,
'POTCAR': self.potcar}
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.all_input.items():
v.write_file(os.path.join(output_dir, k))
if include_cif:
s = self.all_input["POSCAR"].structure
fname = os.path.join(output_dir, "%s.cif" % re.sub(r'\s', "",
s.formula))
s.to(filename=fname)
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
user_kpoints_settings (dict): Allow user to override kpoints setting by
supplying a dict. E.g., {"reciprocal_density": 1000}. Default is
None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
potcar_functional (str): Functional to use. Default (None) is to use
the functional in Potcar.DEFAULT_FUNCTIONAL. Valid values:
"PBE", "LDA", "PW91", "LDA_US"
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
"""
def __init__(self, structure, config_dict,
files_to_transfer=None, user_incar_settings=None,
user_kpoints_settings=None,
constrain_total_magmom=False, sort_structure=True,
potcar_functional="PBE", force_gamma=False,
reduce_structure=None):
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
self.structure = structure
self.config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.potcar_functional = potcar_functional
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings
@property
def incar(self):
settings = dict(self.config_dict["INCAR"])
settings.update(self.user_incar_settings)
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, 'magmom'):
mag.append(site.magmom)
elif hasattr(site.specie, 'spin'):
mag.append(site.specie.spin)
elif str(site.specie) in v:
mag.append(v.get(str(site.specie)))
else:
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ('LDAUU', 'LDAUJ', 'LDAUL'):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = dict([(site.specie.symbol, getattr(site, k.lower()))
for site in structure])
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys():
if isinstance(v[most_electroneg], dict):
incar[k] = [v[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols]
# else, use fallback LDAU value if it exists
else:
incar[k] = [v.get(sym, 0) for sym in poscar.site_symbols]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar['LDAUU']) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if 'LMAXMIX' not in settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar['LMAXMIX'] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar['LMAXMIX'] = 4
else:
for key in list(incar.keys()):
if key.startswith('LDAU'):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0
for mag in incar['MAGMOM']])
incar['NUPDOWN'] = nupdown
return incar
@property
def poscar(self):
return Poscar(self.structure)
@property
def nelect(self):
"""
Gets the default number of electrons for a given structure.
"""
n = 0
for ps in self.potcar:
n += self.structure.composition[ps.element] * ps.ZVAL
return n
@property
def kpoints(self):
"""
Writes out a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
settings = self.user_kpoints_settings or self.config_dict["KPOINTS"]
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get('grid_density'):
return Kpoints.automatic_density(
self.structure, int(settings['grid_density']),
self.force_gamma)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
elif settings.get('reciprocal_density'):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings['reciprocal_density']),
self.force_gamma)
# If length is in the kpoints_settings use Kpoints.automatic
elif settings.get('length'):
return Kpoints.automatic(settings['length'])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation")
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
super(DictSet, self).write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
for k, v in self.files_to_transfer.items():
shutil.copy(v, os.path.join(output_dir, k))
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MITRelaxSet.yaml"))
def __init__(self, structure, **kwargs):
super(MITRelaxSet, self).__init__(
structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPRelaxSet.yaml"))
def __init__(self, structure, **kwargs):
super(MPRelaxSet, self).__init__(
structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPHSERelaxSet.yaml"))
def __init__(self, structure, **kwargs):
super(MPHSERelaxSet, self).__init__(
structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
def __init__(self, structure, prev_incar=None, prev_kpoints=None,
lepsilon=False, lcalcpol=False, reciprocal_density=100,
**kwargs):
"""
Run a static calculation.
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
reciprocal_density (int): For static calculations,
we usually set the reciprocal density by volume. This is a
convenience arg to change that, rather than using
user_kpoints_settings. Defaults to 100, which is ~50% more than
that of standard relaxation calculations.
\\*\\*kwargs: kwargs supported by MPRelaxSet.
"""
super(MPStaticSet, self).__init__(structure, **kwargs)
if isinstance(prev_incar, six.string_types):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, six.string_types):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.structure = structure
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
@property
def incar(self):
parent_incar = super(MPStaticSet, self).incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else \
Incar(parent_incar)
incar.update(
{"IBRION": -1, "ISMEAR": -5, "LAECHG": True, "LCHARG": True,
"LORBIT": 11, "LVHAR": True, "LWAVE": False, "NSW": 0,
"ICHARG": 0, "ALGO": "Normal"})
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(self.kwargs.get(
"user_incar_settings", {}).keys()):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get('LDAU'):
u = incar.get('LDAUU', [])
j = incar.get('LDAUJ', [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ('LDAUU', 'LDAUL', 'LDAUJ'):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self):
self.config_dict["KPOINTS"]["reciprocal_density"] = \
self.reciprocal_density
kpoints = super(MPStaticSet, self).kpoints
# Prefer to use k-point scheme from previous run
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst:
k_div = [kp + 1 if kp % 2 == 1 else kp
for kp in kpoints.kpts[0]]
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
@classmethod
def from_prev_calc(cls, prev_calc_dir, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPStaticSet,
other than prev_incar and prev_structure and prev_kpoints which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
prev_incar = vasprun.incar
prev_kpoints = vasprun.kpoints
# We will make a standard structure for the given symprec.
prev_structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return MPStaticSet(
structure=prev_structure, prev_incar=prev_incar,
prev_kpoints=prev_kpoints,
reciprocal_density=reciprocal_density, **kwargs)
class MPHSEBSSet(MPHSERelaxSet):
def __init__(self, structure, user_incar_settings=None, added_kpoints=None,
mode="Uniform", reciprocal_density=None,
kpoints_line_density=20, **kwargs):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid
reciprocal_density (int): k-point density to use for uniform mesh
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictVaspInputSet
"""
super(MPHSEBSSet, self).__init__(structure, **kwargs)
self.structure = structure
self.user_incar_settings = user_incar_settings or {}
self.config_dict["INCAR"].update(
{"NSW": 0, "ISMEAR": 0, "SIGMA": 0.05, "ISYM": 3, "LCHARG": False, "NELMIN": 5})
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
self.reciprocal_density = reciprocal_density or \
self.kpoints_settings['reciprocal_density']
self.kpoints_line_density = kpoints_line_density
@property
def kpoints(self):
kpts = []
weights = []
all_labels = []
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(self.structure, symprec=0.1)\
.get_ir_reciprocal_mesh(grid[0])
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
comment = "HSE run along symmetry lines" if self.mode.lower() == "line" \
else "HSE run on uniform grid"
return Kpoints(comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts), kpts=kpts, kpts_weights=weights,
labels=all_labels)
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="gap",
reciprocal_density=50, copy_chgcar=True, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run. if mode=="gap", it explicitly adds VBM and CBM
of the prev. run to the k-point list of this run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
mode (str): Either "uniform", "gap" or "line"
reciprocal_density (int): density of k-mesh
copy_chgcar (bool): whether to copy CHGCAR of previous run
\\*\\*kwargs: All kwargs supported by MPHSEBSStaticSet,
other than prev_structure which is determined from the previous
calc dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
# note: don't standardize the cell because we want to retain k-points
prev_structure = get_structure_from_prev_run(vasprun, outcar,
sym_prec=0)
added_kpoints = []
if mode.lower() == "gap":
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
return MPHSEBSSet(
structure=prev_structure,
added_kpoints=added_kpoints, reciprocal_density=reciprocal_density,
mode=mode, files_to_transfer=files_to_transfer, **kwargs)
class MPNonSCFSet(MPRelaxSet):
def __init__(self, structure, prev_incar=None,
mode="line", nedos=601, reciprocal_density=100, sym_prec=0.1,
kpoints_line_density=20, optics=False, **kwargs):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line or Uniform mode supported.
nedos (int): nedos parameter. Default to 601.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
\\*\\*kwargs: kwargs supported by MPVaspInputSet.
"""
super(MPNonSCFSet, self).__init__(structure, **kwargs)
if isinstance(prev_incar, six.string_types):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
if self.mode.lower() not in ["line", "uniform"]:
raise ValueError("Supported modes for NonSCF runs are 'Line' and "
"'Uniform'!")
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn("It is recommended to use Uniform mode with a high "
"NEDOS for optics calculations.")
@property
def incar(self):
incar = super(MPNonSCFSet, self).incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"IBRION": -1, "ISMEAR": 0, "SIGMA": 0.001,
"LCHARG": False, "LORBIT": 11, "LWAVE": False,
"NSW": 0, "ISYM": 0, "ICHARG": 11})
incar.update(self.kwargs.get("user_incar_settings", {}))
if self.mode.lower() == "uniform":
# Set smaller steps for DOS output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self):
if self.mode == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points, labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points))
else:
kpoints = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(
self.structure,
symprec=self.sym_prec).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts, kpts_weights=weights)
return kpoints
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
kpoints_line_density=20, small_gap_multiply=None,
**kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume in uniform mode (defaults to 100)
kpoints_line_density (int): density of k-mesh in line mode
(defaults to 20)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPNonSCFSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Get a Magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i['tot'] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
kpoints_line_density = kpoints_line_density * \
small_gap_multiply[1]
return MPNonSCFSet(structure=structure, prev_incar=incar,
reciprocal_density=reciprocal_density,
kpoints_line_density=kpoints_line_density,
files_to_transfer=files_to_transfer, **kwargs)
class MPSOCSet(MPStaticSet):
def __init__(self, structure, saxis=(0, 0, 1), prev_incar=None,
reciprocal_density=100, **kwargs):
"""
Init a MPSOCSet.
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg:- magmom = [[0,0,2], ...]
saxis (tuple): magnetic moment orientation
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
\\*\\*kwargs: kwargs supported by MPVaspInputSet.
"""
if not hasattr(structure[0], "magmom") and \
not isinstance(structure[0].magmom, list):
raise ValueError("The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]")
self.saxis = saxis
super(MPSOCSet, self).__init__(
structure, prev_incar=prev_incar,
reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
incar = super(MPSOCSet, self).incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"ISYM": -1, "LSORBIT": "T", "ICHARG": 11,
"SAXIS": list(self.saxis)})
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPSOCSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Get a magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# override magmom if provided
if kwargs.get("magmom", None):
structure = structure.copy(
site_properties={"magmom": kwargs["magmom"]})
kwargs.pop("magmom", None)
# magmom has to be 3D for SOC calculation.
if hasattr(structure[0], "magmom"):
if not isinstance(structure[0].magmom, list):
structure = structure.copy(site_properties={
"magmom": [[0, 0, site.magmom] for site in structure]})
else:
raise ValueError("Neither the previous structure has mamgom "
"property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"NBANDS": nbands})
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return MPSOCSet(structure, prev_incar=incar,
files_to_transfer=files_to_transfer,
reciprocal_density=reciprocal_density, **kwargs)
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
"""
def __init__(self, structure, potim=0.015, **kwargs):
super(MVLElasticSet, self).__init__(structure, **kwargs)
self.config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2,
"POTIM": potim})
self.config_dict["INCAR"].pop("NPAR", None)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
Args:
k_product: default to 50, kpoint number * length for a & b directions,
also for c direction in bulk calculations
bulk (bool): Set to True for bulk calculation. Defaults to False.
**kwargs:
Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, k_product=50, bulk=False, **kwargs):
super(MVLSlabSet, self).__init__(structure, **kwargs)
self.structure = structure
self.k_product = k_product
self.bulk = bulk
self.kwargs = kwargs
slab_incar = {"EDIFF": 1e-6, "EDIFFG": -0.01, "ENCUT": 400,
"ISMEAR": 0, "SIGMA": 0.05, "ISIF": 3}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
self.config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super(MVLSlabSet, self).kpoints
kpt.comment = "Automatic mesh"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
abc = self.structure.lattice.abc
kpt_calc = [int(self.k_product/abc[0]+0.5),
int(self.k_product/abc[1]+0.5), 1]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product/abc[2]+0.5)
kpt.kpts[0] = kpt_calc
return kpt
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
Args:
unset_encut (bool): Whether to unset ENCUT.
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super(MITNEBSet, self).__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self.config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self.config_dict["INCAR"]:
self.config_dict["INCAR"]["EDIFF"] = self.config_dict[
"INCAR"].pop("EDIFF_PER_ATOM")
# NEB specific defaults
defaults = {'IMAGES': len(structures) - 2, 'IBRION': 1, 'ISYM': 0,
'LCHARG': False, "LDAU": False}
self.config_dict["INCAR"].update(defaults)
@property
def poscar(self):
return Poscar(self.structures[0])
@property
def poscars(self):
return [Poscar(s) for s in self.structures]
def _process_structures(self, structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.any(np.abs(t)>0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(self, output_dir, make_dir_if_not_present=True,
write_cif=False, write_path_cif=False,
write_endpoint_inputs=False):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
self.incar.write_file(os.path.join(output_dir, 'INCAR'))
self.kpoints.write_file(os.path.join(output_dir, 'KPOINTS'))
self.potcar.write_file(os.path.join(output_dir, 'POTCAR'))
for i, p in enumerate(self.poscars):
d = os.path.join(output_dir, str(i).zfill(2))
if not os.path.exists(d):
os.makedirs(d)
p.write_file(os.path.join(d, 'POSCAR'))
if write_cif:
p.structure.to(filename=os.path.join(d, '{}.cif'.format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(
self.structures[0],
user_incar_settings=self.user_incar_settings)
for image in ['00', str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(os.path.join(output_dir, image, 'INCAR'))
end_point_param.kpoints.write_file(os.path.join(output_dir, image, 'KPOINTS'))
end_point_param.potcar.write_file(os.path.join(output_dir, image, 'POTCAR'))
if write_path_cif:
sites = set()
l = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species_and_occu, site.frac_coords, l))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=os.path.join(output_dir, 'path.cif'))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
Args:
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. The NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
sort_structure (bool): Whether to sort structure. Defaults to False
(different behavior from standard input sets).
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2,
spin_polarized=False, **kwargs):
# MD default settings
defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps,
'EDIFF_PER_ATOM': 0.000001, 'LSCALU': False,
'LCHARG': False,
'LPLANE': False, 'LWAVE': True, 'ISMEAR': 0,
'NELMIN': 4, 'LREAL': True, 'BMIX': 1,
'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0,
'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100,
'SMASS': 0, 'POTIM': time_step, 'PREC': 'Normal',
'ISPIN': 2 if spin_polarized else 1,
"LDAU": False}
super(MITMDSet, self).__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self.config_dict["INCAR"].pop('ENCUT', None)
if defaults['ISPIN'] == 1:
self.config_dict["INCAR"].pop('MAGMOM', None)
self.config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
return Kpoints.gamma_automatic()
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
vruns = list(glob.glob(os.path.join(path, "vasprun.xml*")))
outcars = list(glob.glob(os.path.join(path, "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError(
"Unable to get vasprun.xml/OUTCAR from prev calculation in %s" %
path)
vsfile_fullpath = os.path.join(path, "vasprun.xml")
outcarfile_fullpath = os.path.join(path, "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
return Vasprun(str(vsfile), parse_dos=parse_dos, parse_eigen=parse_eigen), \
Outcar(str(outcarfile))
def get_structure_from_prev_run(vasprun, outcar=None, sym_prec=0.1,
international_monoclinic=True):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
sym_prec (float): Tolerance for symmetry finding for standardization. If
no standardization is desired, set to 0 or a False.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l.append(m[site.specie.symbol])
if len(l) == len(structure):
site_properties.update({k.lower(): l})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l))
structure = structure.copy(site_properties=site_properties)
if sym_prec:
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new))
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError(
"Standardizing cell failed! Old structure doesn't match new.")
structure = new_structure
return structure
def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".",
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
\\*\\*kwargs: Additional kwargs are passed to the vasp_input_set class in
addition to structure.
"""
for i, s in enumerate(structures):
formula = re.sub(r'\s+', "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = os.path.join(output_dir, subdir)
else:
d = os.path.join(output_dir, '{}_{}'.format(formula, i))
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(d, make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
| tallakahath/pymatgen | pymatgen/io/vasp/sets.py | Python | mit | 58,824 | 0.000357 |
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add support for UCSM VNIC Templates
Revision ID: b29f1026b281
Revises: 13bd9ebffbf5
Create Date: 2016-02-18 15:12:31.294651
"""
# revision identifiers, used by Alembic.
revision = 'b29f1026b281'
down_revision = '13bd9ebffbf5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('ml2_ucsm_vnic_templates',
sa.Column('vlan_id', sa.Integer(), nullable=False),
sa.Column('vnic_template', sa.String(length=64), nullable=False),
sa.Column('device_id', sa.String(length=64), nullable=False),
sa.Column('physnet', sa.String(length=32), nullable=False),
sa.Column('updated_on_ucs', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('vlan_id', 'vnic_template', 'device_id')
)
| Tehsmash/networking-cisco | networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/b29f1026b281_add_support_for_ucsm_vnic_templates.py | Python | apache-2.0 | 1,374 | 0.002183 |
import subprocess
import time
import sys
import re
class checkIfUp:
__shellPings = []
__shell2Nbst = []
__ipsToCheck = []
checkedIps = 0
onlineIps = 0
unreachable = 0
timedOut = 0
upIpsAddress = []
computerName = []
completeMacAddress = []
executionTime = 0
def __init__(self,fromIp,toIp):
startTime = time.time()
self.fromIp = fromIp # from 192.168.1.x
self.toIp = toIp # to 192.168.x.x
self.__checkIfIpIsValid(fromIp)
self.__checkIfIpIsValid(toIp)
self.__getRange(fromIp,toIp)
self.__shellToQueue()
#self.__checkIfUp() # run by the shellToQueue queue organizer
self.__computerInfoInQueue()
endTime = time.time()
self.executionTime = round(endTime - startTime,3)
def __checkIfIpIsValid(self,ip):
def validateRange(val):
# valid range => 1 <-> 255
try:
val = int(val)
if val < 0 or val > 255:
print "Invalid IP Range ("+str(val)+")"
sys.exit(0)
except:
print "Invalid IP"
sys.exit(0)
ip = ip.split(".")
firstVal = validateRange(ip[0])
secondVal = validateRange(ip[1])
thirdVal = validateRange(ip[2])
fourthVal = validateRange(ip[3])
return True
def __getRange(self,fromIp,toIp):
fromIp = fromIp.split(".")
toIp = toIp.split(".")
# toIp must be > fromIp
def ip3chars(ipBlock):
# input 1; output 001
ipBlock = str(ipBlock)
while len(ipBlock) != 3:
ipBlock = "0"+ipBlock
return ipBlock
fromIpRaw = ip3chars(fromIp[0])+ip3chars(fromIp[1])+ip3chars(fromIp[2])+ip3chars(fromIp[3])
toIpRaw = ip3chars(toIp[0])+ip3chars(toIp[1])+ip3chars(toIp[2])+ip3chars(toIp[3])
if fromIpRaw > toIpRaw:
# if from is bigger switch the order
temp = fromIp
fromIp = toIp
toIp = temp
currentIp = [0,0,0,0]
# all to integers
currentIp0 = int(fromIp[0])
currentIp1 = int(fromIp[1])
currentIp2 = int(fromIp[2])
currentIp3 = int(fromIp[3])
toIp0 = int(toIp[0])
toIp1 = int(toIp[1])
toIp2 = int(toIp[2])
toIp3 = int(toIp[3])
firstIp = str(currentIp0)+"."+str(currentIp1)+"."+str(currentIp2)+"."+str(currentIp3)
self.__ipsToCheck = [firstIp]
while currentIp3 != toIp3 or currentIp2 != toIp2 or currentIp1 != toIp1 or currentIp0 != toIp0:
currentIp3 += 1
if currentIp3 > 255:
currentIp3 = 0
currentIp2 += 1
if currentIp2 > 255:
currentIp2 = 0
currentIp1 += 1
if currentIp1 > 255:
currentIp1 = 0
currentIp0 += 1
addIp = str(currentIp0)+"."+str(currentIp1)+"."+str(currentIp2)+"."+str(currentIp3)
self.__ipsToCheck.append(addIp)
def __shellToQueue(self):
# write them in the shell queue
maxPingsAtOnce = 200
currentQueuedPings = 0
for pingIp in self.__ipsToCheck:
proc = subprocess.Popen(['ping','-n','1',pingIp],stdout=subprocess.PIPE,shell=True)
self.__shellPings.append(proc)
currentQueuedPings += 1
if currentQueuedPings >= maxPingsAtOnce:
#execute shells
self.__checkIfUp()
currentQueuedPings = 0
self.__shellPings = []
self.__checkIfUp() # execute last queue
def __checkIfUp(self):
# execute the shells & determine whether the host is up or not
for shellInQueue in self.__shellPings:
pingResult = ""
shellInQueue.wait()
while True:
line = shellInQueue.stdout.readline()
if line != "":
pingResult += line
else:
break;
self.checkedIps += 1
if 'unreachable' in pingResult:
self.unreachable += 1
elif 'timed out' in pingResult:
self.timedOut += 1
else:
self.onlineIps += 1
currentIp = self.__ipsToCheck[self.checkedIps-1]
self.upIpsAddress.append(currentIp)
def __computerInfoInQueue(self):
# shell queue for online hosts
maxShellsAtOnce = 255
currentQueuedNbst = 0
for onlineIp in self.upIpsAddress:
proc = subprocess.Popen(['\\Windows\\sysnative\\nbtstat.exe','-a',onlineIp],stdout=subprocess.PIPE,shell=True)
self.__shell2Nbst.append(proc)
currentQueuedNbst += 1
if currentQueuedNbst >= maxShellsAtOnce:
# execute shells
self.__gatherComputerInfo()
currentQueuedNbst = 0
self.__shell2Nbst = []
self.__gatherComputerInfo() # execute last queue
def __gatherComputerInfo(self):
# execute the shells and find host Name and MAC
for shellInQueue in self.__shell2Nbst:
nbstResult = ""
shellInQueue.wait()
computerNameLine = ""
macAddressLine = ""
computerName = ""
macAddress = ""
while True:
line = shellInQueue.stdout.readline()
if line != "":
if '<00>' in line and 'UNIQUE' in line:
computerNameLine = line
if 'MAC Address' in line:
macAddressLine = line
else:
break;
computerName = re.findall('([ ]+)(.*?)([ ]+)<00>', computerNameLine)
macAddress = re.findall('([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)-([A-Z0-9]+)',macAddressLine)
try:
self.computerName.append(computerName[0][1])
except:
self.computerName.append("")
completeMacAddress = ""
firstMacElement = 0
try:
for macEach in macAddress[0]:
if firstMacElement == 0:
firstMacElement += 1
else:
completeMacAddress += ":"
completeMacAddress += macEach
firstMacElement = 0
except:
completeMacAddress = ""
self.completeMacAddress.append(completeMacAddress)
def readValue(self):
# debugging use only
ips = []
for ip in self.completeMacAddress:
ips.append(ip)
return ips
print "\t\t---LANScanner v1.0---\n"
# brief tutorial
print "Sample input data:"
print "FromIP: 192.168.1.50"
print "ToIP: 192.168.1.20"
print "---"
# input
fromIp = raw_input("From: ")
toIp = raw_input("To: ")
# enter values to class
userRange = checkIfUp(fromIp,toIp)
# read class values
print ""
#print userRange.readValue() # debugging use only
print "Checked",userRange.checkedIps,"IPs"
print ""
print "Online:",str(userRange.onlineIps)+"/"+str(userRange.checkedIps)
print "Unreachable:",userRange.unreachable,"Timed out:",userRange.timedOut
print "" # newline
print "Online IPs:"
print "IP\t\tNAME\t\tMAC"
counter = 0
for onlineIp in userRange.upIpsAddress:
print onlineIp+"\t"+userRange.computerName[counter]+"\t"+userRange.completeMacAddress[counter]
counter += 1
print ""
print "Took",userRange.executionTime,"seconds" | mixedup4x4/Speedy | Contents/LanScan.py | Python | gpl-3.0 | 7,956 | 0.007793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.