repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
endlessm/chromium-browser
|
tools/perf/benchmarks/speedometer2.py
|
Python
|
bsd-3-clause
| 2,350 | 0.008936 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Apple's Speedometer 2 performance benchmark.
"""
import os
import re
from benchmarks import press
from core import path_util
from telemetry import benchmark
from telemetry import story
from page_sets import speedometer2_pages
_SPEEDOMETER_DIR = os.path.join(path_util.GetChromiumSrcDir(),
'third_party', 'blink', 'perf_tests', 'speedometer')
@benchmark.Info(emails=['hablich@chromium.org'],
component='Blink')
class Speedometer2(press._PressBenchmark): # pylint: disable=protected-access
"""Speedometer2 Benchmark.
Runs all the speedometer 2 suites by default. Add --suite=<regex> to filter
out suites, and only run suites whose names are matched by the regular
expression provided.
"""
enable_smoke_test_mode = False
@classmethod
def Name(cls):
return 'speedometer2'
def CreateStorySet(self, options):
should_filter_suites = bool(options.suite)
filtered_suite_names = map(
speedometer2_pages.Speedometer2Story.GetFullSuiteName,
speedometer2_pages.Speedometer2Story.GetSuites(options.suite))
ps = story.StorySet(base_dir=
|
_SPEEDOMETER_DIR)
ps.AddStory(speedometer2_pages.Speedometer2Story(ps, should_filter_suites,
filtered_suite_names, self.enable_smoke_tes
|
t_mode))
return ps
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--suite', type="string",
help="Only runs suites that match regex provided")
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.suite:
try:
if not speedometer2_pages.Speedometer2Story.GetSuites(args.suite):
raise parser.error('--suite: No matches.')
except re.error:
raise parser.error('--suite: Invalid regex.')
@benchmark.Info(emails=['hablich@chromium.org'],
component='Blink')
class V8Speedometer2Future(Speedometer2):
"""Speedometer2 benchmark with the V8 flag --future.
Shows the performance of upcoming V8 VM features.
"""
@classmethod
def Name(cls):
return 'speedometer2-future'
def SetExtraBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-features=V8VmFuture')
|
rwth-ti/gr-ofdm
|
python/ofdm/qa_moms_ff.py
|
Python
|
gpl-3.0
| 1,256 | 0.007962 |
#!/usr/bin/env python
#
# Copyright 2014 Institute for Theoretical Information Technology,
# RWT
|
H Aachen University
# www.ti.rwth-aachen.de
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General
|
Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import ofdm_swig as ofdm
class qa_moms_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_moms_ff, "qa_moms_ff.xml")
|
Gaurang033/Selenium2Library
|
src/Selenium2Library/keywords/_browsermanagement.py
|
Python
|
apache-2.0
| 28,305 | 0.004911 |
import os.path
from robot.errors import DataError
from robot.utils import secs_to_timestr, timestr_to_secs
from selenium import webdriver
from selenium.common.exceptions import NoSuchWindowException
from Selenium2Library import webdrivermonkeypatches
from Selenium2Library.utils import BrowserCache
from Selenium2Library.locators import WindowManager
from .keywordgroup import KeywordGroup
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera' : "_make_opera",
'phantomjs' : "_make_phantomjs",
'htmlunit' : "_make_htmlunit",
'htmlunitwithjs' : "_make_htmlunitwithjs",
'android': "_make_android",
'iphone': "_make_iphone",
'safari': "_make_safari",
'edge': "_make_edge"
}
class _BrowserManagementKeywords(KeywordGroup):
def __init__(self):
self._cache = BrowserCache()
self._window_manager = WindowManager()
self._speed_in_secs = float(0)
self._timeout_in_secs = float(5)
self._implicit_wait_in_secs = float(0)
# Public, open and close
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self._debug('Closing all browsers')
self._cache.close_all()
def close_browser(self):
"""Closes the current browser."""
if self._cache.current:
self._debug('Closing browser with session id %s'
% self._cache.current.session_id)
self._cache.close()
def open_browser(self, url, browser='firefox', alias=None,remote_url=False,
desired_capabilities=None,ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Inte
|
rnet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| phantomjs | PhantomJS |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascip
|
t support |
| android | Android |
| iphone | Iphone |
| safari | Safari |
| edge | Edge |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1:4444/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com. 'desired_capabilities' can also be a dictonary
(created with 'Create Dictionary') to allow for more complex configurations.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if remote_url:
self._info("Opening browser '%s' to base url '%s' through remote server at '%s'"
% (browser, url, remote_url))
else:
self._info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name,desired_capabilities,ff_profile_dir,remote_url)
try:
browser.get(url)
except:
self._cache.register(browser, alias)
self._debug("Opened browser with session id %s but failed to open url '%s'"
% (browser.session_id, url))
raise
self._debug('Opened browser with session id %s'
% browser.session_id)
return self._cache.register(browser, alias)
def create_webdriver(self, driver_name, alias=None, kwargs={}, **init_kwargs):
"""Creates an instance of a WebDriver.
Like `Open Browser`, but allows passing arguments to a WebDriver's
__init__. _Open Browser_ is preferred over _Create Webdriver_ when
feasible.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
`driver_name` must be the exact name of a WebDriver in
_selenium.webdriver_ to use. WebDriver names include: Firefox, Chrome,
Ie, Opera, Safari, PhantomJS, and Remote.
Use keyword arguments to specify the arguments you want to pass to
the WebDriver's __init__. The values of the arguments are not
processed in any way before being passed on. For Robot Framework
< 2.8, which does not support keyword arguments, create a keyword
dictionary and pass it in as argument `kwargs`. See the
[http://selenium.googlecode.com/git/docs/api/py/api.html|Selenium API Documentation]
for information about argument names and appropriate argument values.
Examples:
| # use proxy for Firefox | | | |
| ${proxy}= | Evaluate | sys.modules['selenium.webdriver'].Proxy() | sys, selenium.webdriver |
| ${proxy.http_proxy}= | Set Variable | localhost:8888 | |
| Create Webdriver | Firefox | proxy=${proxy} | |
| # use a proxy for PhantomJS | | | |
| ${service args}= | Create List | --proxy=192.168.132.104:8888 | |
| Create Webdriver | PhantomJS | service_args=${service args} | |
Example for Robot Framework < 2.8:
| # debug IE driver | | | |
| ${kwargs}= | Create Dictionary | log_level=DEBUG | log_file=%{HOMEPATH}${/}ie.log |
| Create Webdriver | Ie | kwargs=${kwargs} | |
"""
if not isinstance(kwargs, dict):
raise RuntimeError("kwargs must be a dictionary.")
for arg_na
|
deepmind/enn
|
enn/experiments/neurips_2021/base.py
|
Python
|
apache-2.0
| 2,522 | 0.00912 |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================
|
==================
|
==========
"""Base classes for GP testbed."""
import abc
from typing import Any, Dict, NamedTuple, Optional
import chex
import dataclasses
import typing_extensions
# Maybe this Data class needs to be a tf.Dataset
class Data(NamedTuple):
x: chex.Array
y: chex.Array
@dataclasses.dataclass
class PriorKnowledge:
input_dim: int
num_train: int
num_classes: int = 1
layers: Optional[int] = None
noise_std: Optional[float] = None
temperature: Optional[float] = None
extra: Optional[Dict[str, Any]] = None
@dataclasses.dataclass
class ENNQuality:
kl_estimate: float
extra: Optional[Dict[str, Any]] = None
class EpistemicSampler(typing_extensions.Protocol):
"""Interface for drawing posterior samples from distribution.
We are considering a model of data: y_i = f(x_i) + e_i.
In this case the sampler should only model f(x), not aleatoric y.
"""
def __call__(self, x: chex.Array, seed: int = 0) -> chex.Array:
"""Generate a random sample for epistemic f(x)."""
class TestbedAgent(typing_extensions.Protocol):
"""An interface for specifying a testbed agent."""
def __call__(self,
data: Data,
prior: Optional[PriorKnowledge] = None) -> EpistemicSampler:
"""Sets up a training procedure given ENN prior knowledge."""
class TestbedProblem(abc.ABC):
"""An interface for specifying a generative GP model of data."""
@abc.abstractproperty
def train_data(self) -> Data:
"""Access training data from the GP for ENN training."""
@abc.abstractmethod
def evaluate_quality(self, enn_sampler: EpistemicSampler) -> ENNQuality:
"""Evaluate the quality of a posterior sampler."""
@abc.abstractproperty
def prior_knowledge(self) -> PriorKnowledge:
"""Information describing the problem instance."""
|
morissette/devopsdays-hackathon-2016
|
venv/lib/python2.7/site-packages/boto3/resources/model.py
|
Python
|
gpl-3.0
| 20,675 | 0.000048 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
The models defined in this file represent the resource JSON description
format and provide a layer of abstraction from the raw JSON. The advantages
of this are:
* Pythonic interface (e.g. ``action.request.operation``)
* Consumers need not change for minor JSON changes (e.g. renamed field)
These models are used both by the resource factory to generate resource
classes as well as by the documentation generator.
"""
import logging
from botocore import xform_name
logger = logging.getLogger(__name__)
class Identifier(object):
"""
A resource identifier, given by its name.
:type name: string
:param name: The name of the identifier
"""
def __init__(self, name, member_name=None):
#: (``string``) The name of the identifier
self.name = name
self.member_name = member_name
class Action(object):
"""
A service operation action.
:type name: string
:param name: The name of the action
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
#: (``string``) The name of the action
self.name = name
#: (:py:class:`Request`) This action's request or ``None``
self.request = None
if 'request' in definition:
self.request = Request(definition.get('request', {}))
#: (:py:class:`ResponseResource`) This action's resource or ``None``
self.resource = None
if 'resource' in definition:
self.resource = ResponseResource(definition.get('resource', {}),
resource_defs)
#: (``string``) The JMESPath search path or ``None``
self.path = definition.get('path')
class DefinitionWithParams(object):
"""
An item which has parameters exposed via the ``params`` property.
A request has an operation and parameters, while a waiter has
a name, a low-level waiter name and parameters.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
self._definition = definition
@
|
property
def params(self):
"""
Get a list of auto-filled parameters for this request.
:type: list(:py:class:`Parameter`)
"""
params = []
for item in self._definition.get('params', []):
params.appen
|
d(Parameter(**item))
return params
class Parameter(object):
"""
An auto-filled parameter which has a source and target. For example,
the ``QueueUrl`` may be auto-filled from a resource's ``url`` identifier
when making calls to ``queue.receive_messages``.
:type target: string
:param target: The destination parameter name, e.g. ``QueueUrl``
:type source_type: string
:param source_type: Where the source is defined.
:type source: string
:param source: The source name, e.g. ``Url``
"""
def __init__(self, target, source, name=None, path=None, value=None,
**kwargs):
#: (``string``) The destination parameter name
self.target = target
#: (``string``) Where the source is defined
self.source = source
#: (``string``) The name of the source, if given
self.name = name
#: (``string``) The JMESPath query of the source
self.path = path
#: (``string|int|float|bool``) The source constant value
self.value = value
# Complain if we encounter any unknown values.
if kwargs:
logger.warning('Unknown parameter options found: %s', kwargs)
class Request(DefinitionWithParams):
"""
A service operation action request.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
super(Request, self).__init__(definition)
#: (``string``) The name of the low-level service operation
self.operation = definition.get('operation')
class Waiter(DefinitionWithParams):
"""
An event waiter specification.
:type name: string
:param name: Name of the waiter
:type definition: dict
:param definition: The JSON definition
"""
PREFIX = 'WaitUntil'
def __init__(self, name, definition):
super(Waiter, self).__init__(definition)
#: (``string``) The name of this waiter
self.name = name
#: (``string``) The name of the underlying event waiter
self.waiter_name = definition.get('waiterName')
class ResponseResource(object):
"""
A resource response to create after performing an action.
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
#: (``string``) The name of the response resource type
self.type = definition.get('type')
#: (``string``) The JMESPath search query or ``None``
self.path = definition.get('path')
@property
def identifiers(self):
"""
A list of resource identifiers.
:type: list(:py:class:`Identifier`)
"""
identifiers = []
for item in self._definition.get('identifiers', []):
identifiers.append(
Parameter(**item))
return identifiers
@property
def model(self):
"""
Get the resource model for the response resource.
:type: :py:class:`ResourceModel`
"""
return ResourceModel(self.type, self._resource_defs[self.type],
self._resource_defs)
class Collection(Action):
"""
A group of resources. See :py:class:`Action`.
:type name: string
:param name: The name of the collection
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
@property
def batch_actions(self):
"""
Get a list of batch actions supported by the resource type
contained in this action. This is a shortcut for accessing
the same information through the resource model.
:rtype: list(:py:class:`Action`)
"""
return self.resource.model.batch_actions
class ResourceModel(object):
"""
A model representing a resource, defined via a JSON description
format. A resource has identifiers, attributes, actions,
sub-resources, references and collections. For more information
on resources, see :ref:`guide_resources`.
:type name: string
:param name: The name of this resource, e.g. ``sqs`` or ``Queue``
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
self._renamed = {}
#: (``string``) The name of this resource
self.name = name
#: (``string``) The service shape name for this resource or ``None``
self.shape = definition.get('shape')
def load_rename_map(self, shape=None):
"""
Load a name translation map given a shape. This will set
up renamed v
|
yddgit/hello-python
|
network/tcp_client.py
|
Python
|
apache-2.0
| 1,642 | 0.003584 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 创建一个基于TCP连接的Socket
import socket
# 创建一个socket
# AF_INET指定使用IPv4协议,要使用IPv6则指定为AF_INET6
# SOCK_STREAM指定使用面向流的TCP协议
# 此时,Socket对象创建成功,但还没有建立连接
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接,参数是一个tuple,包含地址和端口号
s.connect(('www.baidu.com', 80))
# TCP连接创建的是双向通道,双方谁先发谁后发,怎么协调要根据具体协议来决定
# HTTP协议规定客户端必须先发请求给服务器,服务器收到后才发数据给客户端
s.send('GET / HTTP/1.1\r\nHost: www.baidu.com\r\nConnection: close\r\n\r\n')
# 发送的文件必须符合HTTP标准,如果格式没问题,就可以接收服务器返回的数据了
buffer = []
while True:
d = s.recv(1024) # 每次最多接收1K字节
if d:
buffer.append(d)
else:
break
data = ''.join(buffer)
# 数据接收完后调用close()方法关闭Socket
s.close()
# 接收到的数据包括HTTP头和网页本身,只需要把HTTP头和网页分离,打印HTTP头,保存网页内容到文件
header, html = data.split('\r\n\r\n', 1)
print header
# 把接收的数据写入文件
#with open('baidu.html', 'wb') as f:
# f.write(html)
# tcp_server.py的客户端程序
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接
s.connect(('127.0.0.1', 9999))
# 接收欢迎消息
print s.recv(1024)
for data
|
in ['Michael', 'Tracy', 'Sarah']:
s.send(data) # 发送数据
print s.re
|
cv(1024)
s.send('exit')
s.close()
|
ypu/tp-qemu
|
qemu/tests/live_snapshot.py
|
Python
|
gpl-2.0
| 3,334 | 0 |
import time
import logging
from autotest.client.shared import error
from virttest import utils_test
from generic.tests import file_transfer
def run(test, params, env):
"""
live_snapshot test:
1). Create live snapshot during big file creating
2). Create live snapshot when guest reboot
3). Check if live snapshot is created
4). Shutdown guest
:param test: Kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
@error.context_aware
def create_snapshot(vm):
"""
Create live snapshot:
1). Check which monitor is used
2). Get device info
3). Create snapshot
"""
error.context("Creating live snapshot ...", logging.info)
block_info = vm.monitor.info("block")
if vm.monitor.protocol == 'qmp':
device = block_info[0]["device"]
else:
device = "".join(block_info).split(":")[0]
snapshot_name = params.get(
|
"snapshot_nam
|
e")
format = params.get("snapshot_format", "qcow2")
vm.monitor.live_snapshot(device, snapshot_name, format)
logging.info("Check snapshot is created ...")
snapshot_info = str(vm.monitor.info("block"))
if snapshot_name not in snapshot_info:
logging.error(snapshot_info)
raise error.TestFail("Snapshot doesn't exist")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
dd_timeout = int(params.get("dd_timeout", 900))
session = vm.wait_for_login(timeout=timeout)
def runtime_test():
try:
clean_cmd = params.get("clean_cmd")
file_create = params.get("file_create")
clean_cmd += " %s" % file_create
logging.info("Clean file before creation")
session.cmd(clean_cmd)
logging.info("Creating big file...")
create_cmd = params.get("create_cmd") % file_create
args = (create_cmd, dd_timeout)
bg = utils_test.BackgroundTest(session.cmd_output, args)
bg.start()
time.sleep(5)
create_snapshot(vm)
if bg.is_alive():
try:
bg.join()
except Exception:
raise
finally:
session.close()
def reboot_test():
try:
bg = utils_test.BackgroundTest(vm.reboot, (session,))
logging.info("Rebooting guest ...")
bg.start()
sleep_time = int(params.get("sleep_time"))
time.sleep(sleep_time)
create_snapshot(vm)
finally:
bg.join()
def file_transfer_test():
try:
bg_cmd = file_transfer.run_file_transfer
args = (test, params, env)
bg = utils_test.BackgroundTest(bg_cmd, args)
bg.start()
sleep_time = int(params.get("sleep_time"))
time.sleep(sleep_time)
create_snapshot(vm)
if bg.is_alive():
try:
bg.join()
except Exception:
raise
finally:
session.close()
subcommand = params.get("subcommand")
eval("%s_test()" % subcommand)
|
Bachmann1234/hn-tldr
|
constants.py
|
Python
|
apache-2.0
| 586 | 0 |
import os
TOP_STORIES_KEY = b'top_30'
TITLE = 'title'
URL = 'url'
BODY = 'body'
SENTENCES = 'sentences'
HACKER_NEWS_ID = 'hn_id'
TEXT = 'text'
DATE_FOUND = 'date_found'
AYLIEN_ID = 'AYLIENID'
AYLIEN_KEY = 'AYLIENKEY'
REDIS_HOST = 'REDIS_HOST'
REDIS_PORT = 'REDIS_PORT'
REDIS_PASS = 'REDIS_PASS'
def
|
get_environment():
return {
AYLIEN_ID: os.environ.get(AYLIEN_ID),
AYLIEN_KEY: os.environ.get(AYLIEN_KEY),
REDIS_HOST: os.environ.get(REDIS_HOST),
R
|
EDIS_PORT: int(os.environ.get(REDIS_PORT), 0),
REDIS_PASS: os.environ.get(REDIS_PASS),
}
|
owainkenwayucl/utils
|
src/fortwrangler.py
|
Python
|
mit
| 6,251 | 0.007359 |
#!/usr/bin/env python3
# Fortwrangler is a tool that attempts to resolve issues with fortran lines over standard length.
# Global libraries
import sys
# Global variables
# Strings inserted for continuation
CONTINUATION_ENDLINE = "&\n"
CONTINUATION_STARTLINE = " &"
# Line length settings
MIN_LENGTH = len(CONTINUATION_STARTLINE) + len(CONTINUATION_ENDLINE) + 1
FIXED_LINE_LENGTH = 80 # We don't actually do fixed format files, but I prefer 80 col anyway.
FREE_LINE_LENGTH = 132
DEFAULT_LINE_LENGTH = FREE_LINE_LENGTH
# I/O settings
STDERR = sys.stderr
STDOUT = sys.stdout
# We can't use Python's string splitter as we want to handle string literals properly.
def string_split(s, sep=" "):
inquotes=False
retlist = []
token = ""
for character in s.strip():
if character == sep and not inquotes:
if not (token == ""):
token = token + sep
retlist.append(token)
token = ""
else:
token = token + character
elif character == '"' and not inquotes:
inquotes = True
token = token + character
elif character == '"' and inquotes:
inquotes = False
token = token + character
else:
token = token + character
if not (token == ""):
retlist.append(token)
return retlist
# Fix a given file.
def force_fix_file(filename, maxlength=DEFAULT_LINE_LENGTH, output=STDOUT):
with open(filename) as infile:
for line in infile:
if len(line) > maxlength + 1:
tempstr=line[:(len(line) - (len(line.lstrip())-1)-1)]
tokens = string_split(line)
index = 0
for t in tokens:
if t == "!":
# Comments can be longer because the compiler just ignores them.
tempstr = tempstr + " ".join(tokens[index:len(tokens)])
break
else:
if (len(tempstr + t + " " + CONTINUATION_ENDLINE)) < maxlength + 1:
tempstr = tempstr + t + " "
else:
if (t.startswith('"') and t.endswith('"')):
tempstr = tempstr + t + " "
while (len(tempstr) > maxlength + 1):
outstr = tempstr[:(maxlength-1)] + CONTINUATION_ENDLINE
output.write(outstr)
tempstr = CONTINUATION_STARTLINE + tempstr[(maxlength-1):]
output.write(tempstr)
tempstr=""
else:
output.write(tempstr + " " + CONTINUATION_ENDLINE)
tempstr=CONTINUATION_STARTLINE + " " + t + " "
index += 1
output.write(tempstr + "\n")
else:
output.write(line)
# Only fix files if the violate the length rules!
def fix_file(filename, maxlength=DEFAULT_LINE_LENGTH, output=STDOUT):
if not check_file(filename):
force_fix_file(filename, maxlength, output)
else:
STDERR.write(filename + " not over line length, not modifying\n")
# Check to see if a file has lines longer than allowed, optionally report.
def check_file(filename, maxlength=DEFAULT_LINE_LENGTH, report=None):
overlengthlines = {}
counter = 0
with open(filename) as f:
for line in f:
counter += 1
if (len(line)) > maxlength + 1: # New lines count in Python line length.
overlengthlines[counter] = len(line)
if report != None:
report.write(filename + ": " + str(len(overlengthlines)) + "\n")
for a in sorted(overlengthlines.keys()):
report.write(str(a) + ": " + str(overlengthlines[a]) + "\n")
return len(overlengthlines) == 0
# Our main procedure.
# Arguments at the command-line:
# -o <file> - write out to file instead of stdout
# -i <extension> - do in place
# -c - check only
# -w <number> - set line length
def main():
import argparse
#check_file("example.f90", report=STDERR)
#fix_file("example.f")
maxlength = DEFAULT_LINE_LENGTH
output = STDOUT
parser = argparse.ArgumentParser(description="Fix free format Fortran files with invalid line lengths.")
parser.add_argument("-c", action="store_true", help="Check only.")
parser.add_argument("-i", metavar="ext", type=str, help="Do in place, back up copy with extension specified.")
parser.add_argument("-w", metavar="linelength", type=int, help="Custom line length.")
parser.add_argument("-o", metavar="outputfilename", type=str, help="Output to a file instead of STDOUT.")
parser.add_argument("files", metavar="file", type=str, nargs="+",help="Files to fix.")
args=parser.parse_args()
if args.w != None:
if args.w >= MIN_LENGTH:
maxlength = args.w
else:
STDERR.write("Error - you have specified a length [" + str(args.w) + "] smaller than the minimum possible ["+ str(MIN_LENGTH) + "]\n")
sys.exit(2)
if args.o and args.i:
STDER
|
R.write("Error - you cannot both write output to a separate file and writ
|
e it in place.\n")
sys.exit(1)
else:
if args.o != None:
outfile = open(args.o, 'w')
output = outfile
if args.c:
for a in args.files:
check_file(a, maxlength=maxlength, report=output)
elif args.i != None:
import os
for a in args.files:
if not check_file(a):
STDERR.write("Fixing file: " + a + "\n")
os.rename(a, a + args.i)
inplacefile = open(a, 'w')
force_fix_file(a + args.i, maxlength=maxlength, output=inplacefile)
inplacefile.close()
else:
for a in args.files:
fix_file(a, maxlength=maxlength, output=output)
if args.o != None:
outfile.close()
if __name__ == "__main__":
main()
|
jatchley/OSU-Online-CS
|
325/Final Project/Final.py
|
Python
|
mit
| 4,765 | 0.004617 |
#**********************************************************************
# CS 325 - Project Group 9
# Joshua Atchley
# Aalon Cole
# Patrick Kilgore
#
# Project - Solving the Travelling Salesman Problem with Approximation
#
# Algorithm - Simulated Annealing as described in:
# Hansen, Per Brinch, "Simulated Annealing" (1992). Electrical
# Engineering and Computer Science Techni
|
cal Reports. Paper 170.
# http://surface.syr.edu/eecs_techreports/170
#**********************************************************************
import math
import sys
import time
import random
from timeit import default_timer as timer
class City:
def __init__(self, numbe
|
r, xc, yc):
self.cNum = number
self.x = xc
self.y = yc
def distanceTo(self, endpoint):
xdiff = endpoint.x - self.x
ydiff = endpoint.y - self.y
dist = math.sqrt(xdiff*xdiff + ydiff*ydiff)
return int(round(dist))
def tourLength(tourArray):
n = len(tourArray)
dSum = tourArray[n-1].distanceTo(tourArray[0])
for i in range(n-1):
dSum += tourArray[i].distanceTo(tourArray[i+1])
return dSum
def initTour(inFile):
cities = []
for line in inFile:
if line != "":
cParams = [int(n) for n in line.split()]
cities.append(City(cParams[0], cParams[1], cParams[2]))
return cities
def anneal(tour, Tmax, alpha, steps, attempts, changes, startTime):
temp = Tmax
for k in range(steps):
# changed to loop up to
#while temp > 1e-6:
print("Temperature = {}, Tour Length = {}, Time Elapsed = {}".format(temp, tourLength(tour), timer() - startTime))
tour = tSearch(tour, temp, attempts, changes)
temp *= alpha
return tour
def tSearch(tour, temp, attempts, changes):
nAtt = 0
nChg = 0
while nAtt < attempts and nChg < changes:
# tSelect will return the tuple ci, cj, dE
selectionTuple = tSelect(tour)
if accept(selectionTuple[2], temp):
tour = tChange(tour, selectionTuple[0], selectionTuple[1])
nChg += 1
nAtt += 1
if nAtt >= attempts:
print("Max number of attempts reached, cooling...")
if nChg >= changes:
print("Max number of tour changes reached, cooling...")
return tour
def tSelect(tour):
# pick random cities in tour
ci = random.randint(0, len(tour) - 1)
cj = random.randint(0, len(tour) - 1)
# find the cities directly after ci and cj
cinx = (ci + 1) % len(tour)
cjnx = (cj + 1) % len(tour)
# calculate energy change , i.e. tour length change, for reversing the sequence
# between ci and cj
if ci != cj:
dE = (tour[ci].distanceTo(tour[cj]) + tour[cinx].distanceTo(tour[cjnx])
- tour[ci].distanceTo(tour[cinx]) - tour[cj].distanceTo(tour[cjnx]))
else:
dE = 0
return ci, cj, float(dE)
def accept(dE, temp):
if dE > 0:
acceptance = (math.exp(-dE / temp) > random.random())
else:
acceptance = True
return acceptance
def tChange(tour, ci, cj):
n = len(tour)
# snippet does not wrap around end of list
if ci < cj:
tSnip = tour[(ci+1):(cj+1)]
rSnip = list(reversed(tSnip))
tour[(ci + 1):(cj + 1)] = rSnip[:]
else:
# the snippet wraps around the end of the list, so ninjutsu is needed...
tSnip = tour[(ci+1):] + tour[:(cj+1)]
rSnip = list(reversed(tSnip))
divider = len(tour[(ci+1):])
tour[(ci+1):] = rSnip[:divider]
tour[:(cj + 1)] = rSnip[divider:]
return tour
def main():
random.seed(time.clock())
# set up I/O files
inputFileName = str(sys.argv[1])
#inputFileName = sys.path[0] + "/tsp_example_3.txt"
inputFile = open(inputFileName, 'r')
outputFileName = inputFileName + ".tour"
outputFile = open(outputFileName, 'w')
# the cityTour list will hold the current tour sequence
cityTour = initTour(inputFile)
random.shuffle(cityTour)
# initialize simulation parameters per recommendations by Hansen
n = len(cityTour)
Tmax = round(math.sqrt(n))
alpha = 0.95
steps = 20 * int(round(math.log1p(n + 1)))
attempts = 100 * n
changes = 10 * n
# call the annealing function with the defined parameters
startTime = timer()
cityTour = anneal(cityTour, Tmax, alpha, steps, attempts, changes, startTime)
end = timer()
print("Algorithm ran in {} seconds".format(end - startTime))
# write output file
outputFile.write(str(tourLength(cityTour)) + '\n')
for k in range(n):
outstring = str(cityTour[k].cNum) + '\n'
outputFile.write(outstring)
inputFile.close()
outputFile.close()
if __name__ == '__main__':
main()
|
banga/powerline-shell
|
test/repo_stats_test.py
|
Python
|
mit
| 687 | 0 |
import unittest
from powerline_shell.utils import RepoStats
class RepoStatsTest(unittest.TestCase):
def setUp(self):
self.repo_stats = RepoStats()
self.repo_stats.changed = 1
self.repo_stats.conflicted = 4
def test_dirty(self):
|
self.assertTrue(self.repo_stats.dirty)
def test_simple(self):
self.assertEqual(self.repo_stats.new, 0)
def test_n_or_empty__empty(self):
self.assertEqual(self.repo_stats.n_or_empty("changed"), u"")
def test_n_or_empty__n(self):
self.assertEqual(self.repo_stats.n_or_empty("conflicted"), u"4")
def test_index(self):
self.assertEqual(self.repo_stats["changed"],
|
1)
|
Dru89/2do
|
project_2do/wsgi.py
|
Python
|
mit
| 399 | 0 |
"""
WSGI config for project_2do project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
""
|
"
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdef
|
ault("DJANGO_SETTINGS_MODULE", "project_2do.settings")
application = get_wsgi_application()
|
lizardsystem/lizard-security
|
lizard_security/admin.py
|
Python
|
gpl-3.0
| 5,909 | 0 |
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt
# -*- coding: utf-8 -*-
"""
Lizard-security's ``admin.py`` contains two kinds of model admins:
- Our own model admins to make editing data sets, permission mappers and user
groups easier.
- ``SecurityFilteredAdmin`` as a base class for admins of models that use
lizard-security's data set mechanism.
"""
from django.contrib import admin
from django.contrib.auth.models import Permission
from tls import request as tls_request
from django.forms import ModelForm
from lizard_security.models import DataSet
from lizard_security.models import PermissionMapper
from lizard_security.models import UserGroup
from lizard_security.middleware import USER_GROUP_IDS
class DataSetAdmin(admin.ModelAdmin):
"""Unmodified admin for data sets."""
model = DataSet
class UserGroupAdminForm(ModelForm):
"""Custom form for user groups: ensures managers are also members.
A user group's manager should also automatically be a member. Otherwise
we'd need two queries to determine user group membership, now only one.
"""
class Meta:
model = UserGroup
def clean(self):
"""Make sure all managers are also members."""
members = list(self.cleaned_data['members'])
for manager in self.cleaned_data['ma
|
nagers']:
if manager not in members:
members.append(manager)
self.cleaned_data['members'] = members
return self.cleaned_data
class UserGroupAdmin(admin.ModelAdmin):
"""Custom admin for user groups: show manager/membership info directly.
User groups are also filtered to only those you are a manager of.
"""
mo
|
del = UserGroup
form = UserGroupAdminForm
list_display = ('name', 'manager_info', 'number_of_members')
search_fields = ('name', )
filter_horizontal = ('managers', 'members')
def queryset(self, request):
"""Limit user groups to those you manage.
The superuser can edit all user groups, of course.
"""
qs = super(UserGroupAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(id__in=request.user.managed_user_groups.all())
class PermissionMapperAdmin(admin.ModelAdmin):
"""Custom admin for permission mapper: editable in the list display.
The most important items, data set and permission group, are editable in
the list display. The list display also gives you a good view on all data,
which is needed to keep track of all the various security settings if you
have more than a handful of permission mappers.
"""
model = PermissionMapper
list_display = ('name', 'user_group', 'data_set', 'permission_group')
list_editable = ('user_group', 'data_set', 'permission_group')
list_filter = ('user_group', 'data_set', 'permission_group')
search_fields = ('name', 'data_set__name')
class SecurityFilteredAdmin(admin.ModelAdmin):
"""Custom admin base class for models that use lizard-security data sets.
Django's default admin looks at global permissions to determine if you can
even view a certain model in the admin. SecurityFilteredAdmin takes
lizard-security's permission mapper into account.
"""
def _available_permissions(self):
"""Return all permissions we have through user group membership.
This method is used by the ``has_{add|change|delete}_permission()``
methods. They have to determine whether we have rights to
add/change/delete *some* instance of the model we're the admin for. So
we don't have to look at data sets, only at which permissions are
somehow connected to the user groups we're a member of.
"""
user_group_ids = getattr(tls_request, USER_GROUP_IDS, None)
if user_group_ids:
permissions = Permission.objects.filter(
group__permissionmapper__user_group__id__in=user_group_ids)
permissions = [(perm.content_type.app_label + '.' + perm.codename)
for perm in permissions]
return permissions
return []
def has_add_permission(self, request):
"""Return True if the given request has permission to add an object.
"""
opts = self.opts
perm = opts.app_label + '.' + opts.get_add_permission()
if request.user.has_perm(perm):
return True
return perm in self._available_permissions()
def has_change_permission(self, request, obj=None):
"""Return True if we have permission to change the object.
If ``obj`` is None, we just have to check if we have global
permissions or if we have the permission through a permission mapper.
TODO: specific check for object permissions.
"""
opts = self.opts
perm = opts.app_label + '.' + opts.get_change_permission()
# TODO: object permissions
if request.user.has_perm(perm):
return True
result = perm in self._available_permissions()
print "%r in %s: %s" % (perm, self._available_permissions(), result)
return result
def has_delete_permission(self, request, obj=None):
"""Return True if we have permission to delete the object.
If ``obj`` is None, we just have to check if we have global
permissions or if we have the permission through a permission mapper.
TODO: specific check for object permissions.
"""
opts = self.opts
perm = opts.app_label + '.' + opts.get_delete_permission()
# TODO: object permissions
if request.user.has_perm(perm):
return True
return perm in self._available_permissions()
admin.site.register(DataSet, DataSetAdmin)
admin.site.register(UserGroup, UserGroupAdmin)
admin.site.register(PermissionMapper, PermissionMapperAdmin)
|
JohanComparat/pySU
|
spm/bin_SMF/create_table_snr.py
|
Python
|
cc0-1.0
| 8,566 | 0.025683 |
import astropy.io.fits as fits
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
from scipy.stats import scoreatpercentile as sc
from scipy.interpolate import interp1d
survey = sys.argv[1]
z_min, z_max = 0., 1.6
imfs = ["Chabrier_ELODIE_", "Chabrier_MILES_", "Chabrier_STELIB_", "Kroupa_ELODIE_", "Kroupa_MILES_", "Kroupa_STELIB_", "Salpeter_ELODIE_", "Salpeter_MILES_", "Salpeter_STELIB_" ]
z_bins = n.array([0, 0.025, 0.375, 0.7, 0.85, 1.6])
key_SNR = 'SNR_ALL'
SNR_keys = n.array([ 'SNR_32_35', 'SNR_35_39', 'SNR_39_41', 'SNR_41_55', 'SNR_55_68', 'SNR_68_74', 'SNR_74_93' ])
SNR_w_min = n.array([ 32, 35, 39, 41, 55, 68, 74 ])
SNR_w_max = n.array([ 35, 39, 41, 55, 68, 74, 93 ])
wl_40 = ((z_bins[1:]+z_bins[:-1]) * 0.5 + 1)*40.
snr_ids = n.searchsorted(SNR_w_max, wl_40)
print(SNR_keys[snr_ids])
out_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'results')
#path_2_MAG_cat = os.path.join( os.environ['HOME'], 'SDSS', "dr14_specphot_gri.fits" )
#hd = fits.open(path_2_MAG_cat)
#path_2_sdss_cat = os.path.join( os.environ['HOME'], 'SDSS', '26', 'catalogs', "FireFly.fits" )
#path_2_eboss_cat = os.path.join( os.environ['HOME'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly.fits" )
path_2_sdss_cat = os.
|
path.join( os.environ['OBS_REPO'], 'SDSS', '26', 'catalogs', "FireFly.fits" )
path_2_eboss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', 'v5_10
|
_0', 'catalogs', "FireFly.fits" )
# OPENS THE CATALOGS
print("Loads catalog")
if survey =='deep2':
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.fits" )
catalog = fits.open(path_2_deep2_cat)[1].data
if survey =='sdss':
catalog = fits.open(path_2_sdss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z', 'Z_ERR', 'CLASS', 'ZWARNING'
if survey =='boss':
catalog = fits.open(path_2_eboss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO'
IMF = imfs[0]
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
print(IMF, prf)
name, zflg_val, prefix = prf, 0., IMF
catalog_0 = (catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_zOk = catalog_0 & (catalog['SNR_ALL']>0)
converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.8 )
dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.4 )
#target_bits
program_names = n.array(list(set( catalog['PROGRAMNAME'] )))
program_names.sort()
sourcetypes = n.array(list(set( catalog['SOURCETYPE'] )))
sourcetypes.sort()
length = lambda selection : len(selection.nonzero()[0])
pcs_ref = list(n.arange(0., 101, 5))
g = lambda key, s1, pcs = pcs_ref : n.hstack(( length(s1), sc(catalog[key][s1], pcs) ))
sel_pg = lambda pgr : (catalog_zOk) & (catalog['PROGRAMNAME']==pgr)
sel_st = lambda pgr : (catalog_zOk) & (catalog['SOURCETYPE']==pgr)
sel0_pg = lambda pgr : (catalog_0) & (catalog['PROGRAMNAME']==pgr)
sel0_st = lambda pgr : (catalog_0) & (catalog['SOURCETYPE']==pgr)
all_galaxies = []
tpps = []
for pg in sourcetypes:
sel_all = sel_st(pg)
n_all = length( sel_all )
if n_all > 100 :
#print(pg, n_all)
all_galaxies.append(n_all)
all_out = []
for z_Min, z_Max, snr_key in zip(z_bins[:-1], z_bins[1:], SNR_keys[snr_ids]):
s_z = sel_all &(catalog[z_name] >= z_Min) & (catalog[z_name] < z_Max)
n_z = length(s_z)
#print(z_Min, z_Max, n_z)
if n_z > 0 :
#print(n.min(catalog[snr_key][s_z]), n.max(catalog[snr_key][s_z]))
itp = interp1d(sc(catalog[snr_key][s_z], pcs_ref), pcs_ref, kind='linear', fill_value= 100., bounds_error=False)
#print(itp.x, itp.y)
all_out.append( [n_z, itp(5), itp(20)] )
else :
all_out.append([0., -1, -1])
all_out = n.hstack((all_out))
tpp = pg + " & " + str(int(n_all)) + " & " + " & ".join(n.array([ str(int(el)) for el in all_out]) ) + ' \\\\ \n'
print( tpp)
tpps.append(tpp)
all_galaxies = n.array(all_galaxies)
tpps = n.array(tpps)
ids = n.argsort(all_galaxies)[::-1]
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_sourcetype_SNR_moments.tex")
f=open(out_file, 'w')
#f.write('source type & N & \multicolumn{c}{2}{N galaxies} && \multicolumn{c}{2}{SNR ALL$>0$} & \\multicolumn{c}{2}{frefly converged} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.4$} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.2$} \\\\ \n')
#f.write(' & & N & % & & N & % & N & % & N & % \\\\ \n')
for jj in ids :
f.write( tpps[jj] )
f.close()
sys.exit()
#converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
#dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.8 )
#dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.4 )
#m_catalog = n.log10(catalog[prefix+'stellar_mass'])
#w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
#print(ld(catalog_zOk))
#return name + " & $"+ sld(converged)+"$ ("+str(n.round(ld(converged)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex04)+"$ ("+str(n.round(ld(dex04)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex02)+ "$ ("+str(n.round(ld(dex02)/ld(catalog_zOk)*100.,1))+r") \\\\"
##return catalog_sel, m_catalog, w_catalog
sys.exit()
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=False)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(boss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(sdss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(sdss, 'Z', 'Z_E
|
sanctuaryaddon/sanctuary
|
script.module.liveresolver/lib/liveresolver/resolvers/playwire.py
|
Python
|
gpl-2.0
| 1,286 | 0.01944 |
# -*- coding: utf-8 -*-
import re,urlparse,json
from liveresolver.modules import client
from BeautifulSoup import BeautifulSoup as bs
import xbmcgui
def resolve(url):
try:
result = client.request(url)
html = result
result = json.loads(result)
try:
f4m=result['content']['media']['f4m']
except:
reg=re.compile('"src":"http://(.+?).f4m"')
f4m=re.findall(reg,html)[0]
f4m='http://'+pom+'.f4m'
result = client.request(f4m)
soup = bs(result)
try:
base=soup.find('baseURL').getText()+'/'
except:
base=soup.find('baseurl').getText()+'/'
linklist = soup.findAll('media')
choices,links=[],[]
for link in linklist:
url = base + link['url']
bitrate = link['bitrate']
choices.append(bitrate)
links.append(url)
if len(links)==1:
return links[0]
if len(links)>1:
|
dialog = xbmcgui.Dialog()
index = dialog.select('Select bitrate', choices)
if index>-1:
|
return links[index]
return
except:
return
|
Awesomecase/Speedtest
|
speedtest_sendtest/__init__.py
|
Python
|
gpl-3.0
| 71 | 0 |
__all__ = ["speedtest_
|
exceptions", "
|
speedtest"]
from . import sendtest
|
julien6387/supervisors
|
supvisors/client/subscriber.py
|
Python
|
apache-2.0
| 6,689 | 0.001196 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import threading
import zmq
from supervisor import loggers
from supervisor.loggers import LevelsByName
from supvisors.supvisorszmq import EventSubscriber
from supvisors.utils import EventHeaders
def create_logger(logfile=r'subscriber.log', loglevel=LevelsByNam
|
e.INFO,
fmt='%(asctime)s %(levelname)s %(message)s\n',
rotating=True, maxbytes=10 * 1024 * 1024, backups=1, stdout=True):
""" Return a Supervisor logger. """
logger = loggers.getLogger(loglevel)
if stdout:
loggers.handle_stdout(logger, fmt)
loggers.handle_file(logger, logfile, fmt, rotating, maxbytes, ba
|
ckups)
return logger
class SupvisorsEventInterface(threading.Thread):
""" The SupvisorsEventInterface is a python thread that connects
to **Supvisors** and receives the events published.
The subscriber attribute shall be used to define the event types of interest.
The SupvisorsEventInterface requires:
- a ZeroMQ context,
- the event port number used by **Supvisors** to publish its events,
- a logger reference to log traces.
This event port number MUST correspond to the ``event_port`` value set
in the ``[supvisors]`` section of the Supervisor configuration file.
The default behaviour is to print the messages received.
For any other behaviour, just specialize the methods `on_xxx_status`.
Attributes:
- logger: the reference to the logger,
- subscriber: the wrapper of the ZeroMQ socket connected to **Supvisors**,
- stop_event: when set, breaks the infinite loop of the thread.
Constants:
- _Poll_timeout: duration used to time out the ZeroMQ poller, defaulted to 500 milli-seconds.
"""
_Poll_timeout = 500
def __init__(self, zmq_context, event_port, logger):
""" Initialization of the attributes. """
# thread attributes
threading.Thread.__init__(self)
# store the parameters
self.zmq_context = zmq_context
self.event_port = event_port
self.logger = logger
# create stop event
self.stop_event = threading.Event()
def stop(self):
""" This method stops the main loop of the thread. """
self.logger.info('request to stop main loop')
self.stop_event.set()
def run(self):
""" Main loop of the thread. """
# create event socket
self.subscriber = EventSubscriber(self.zmq_context, self.event_port, self.logger)
self.configure()
# create poller and register event subscriber
poller = zmq.Poller()
poller.register(self.subscriber.socket, zmq.POLLIN)
# poll events every seconds
self.logger.info('entering main loop')
while not self.stop_event.is_set():
socks = dict(poller.poll(self._Poll_timeout))
# check if something happened on the socket
if self.subscriber.socket in socks and \
socks[self.subscriber.socket] == zmq.POLLIN:
self.logger.debug('got message on subscriber')
try:
message = self.subscriber.receive()
except Exception as e:
self.logger.error(
'failed to get data from subscriber: {}'.format(e.message))
else:
if message[0] == EventHeaders.SUPVISORS:
self.on_supvisors_status(message[1])
elif message[0] == EventHeaders.ADDRESS:
self.on_address_status(message[1])
elif message[0] == EventHeaders.APPLICATION:
self.on_application_status(message[1])
elif message[0] == EventHeaders.PROCESS_EVENT:
self.on_process_event(message[1])
elif message[0] == EventHeaders.PROCESS_STATUS:
self.on_process_status(message[1])
self.logger.warn('exiting main loop')
self.subscriber.close()
def configure(self):
""" Default is subscription to everything. """
self.logger.info('subscribe to all messages')
self.subscriber.subscribe_all()
def on_supvisors_status(self, data):
""" Just logs the contents of the Supvisors Status message. """
self.logger.info('got Supvisors Status message: {}'.format(data))
def on_address_status(self, data):
""" Just logs the contents of the Address Status message. """
self.logger.info('got Address Status message: {}'.format(data))
def on_application_status(self, data):
""" Just logs the contents of the Application Status message. """
self.logger.info('got Application Status message: {}'.format(data))
def on_process_event(self, data):
""" Just logs the contents of the Process Event message. """
self.logger.info('got Process Event message: {}'.format(data))
def on_process_status(self, data):
""" Just logs the contents of the Process Status message. """
self.logger.info('got Process Status message: {}'.format(data))
if __name__ == '__main__':
import argparse
import time
# get arguments
parser = argparse.ArgumentParser(description='Start a subscriber to Supvisors events.')
parser.add_argument('-p', '--port', type=int, default=60002,
help="the event port of Supvisors")
parser.add_argument('-s', '--sleep', type=int, metavar='SEC', default=10,
help="the duration of the subscription")
args = parser.parse_args()
# create test subscriber
loop = SupvisorsEventInterface(zmq.Context.instance(), args.port, create_logger())
loop.subscriber.subscribe_all()
# start thread and sleep for a while
loop.start()
time.sleep(args.sleep)
# stop thread and halt
loop.stop()
loop.join()
|
williechen/DailyApp
|
12/py201501/Ch01_03/__init__.py
|
Python
|
lgpl-3.0
| 62 | 0.020833 |
#-*-
|
coding
|
: utf-8 -*-
'''
留下最後 N 個項目
'''
|
universalcore/unicore.comments
|
unicore/comments/service/tests/test_schema.py
|
Python
|
bsd-2-clause
| 9,303 | 0 |
import uuid
from datetime import datetime
from unittest import TestCase
import pytz
import colander
from unicore.comments.service.models import (
COMMENT_MAX_LENGTH, COMMENT_CONTENT_TYPES, COMMENT_MODERATION_STATES,
COMMENT_STREAM_STATES)
from unicore.comments.service.schema import (
Comment, Flag, BannedUser, StreamMetadata)
from unicore.comments.service.tests.test_models import (
comment_data as comment_model_data,
flag_data as flag_model_data,
banneduser_data as banneduser_model_data,
streammetadata_data as streammetadata_model_data)
def simple_serialize(data):
for key in data.keys():
value = data[key]
if isinstance(value, bool):
data[key] = 'true' if value else 'false'
elif isinstance(value, int):
data[key] = str(value)
elif isinstance(value, datetime):
data[key] = value.isoformat()
elif isinstance(value, uuid.UUID):
|
data[key] = value.hex
elif isinstance(value, dict):
data[key] = value.copy()
else:
data[key] = unicode(value)
comment_data = comment_model_data.copy()
flag_data = flag_model
|
_data.copy()
banneduser_data = banneduser_model_data.copy()
streammetadata_data = streammetadata_model_data.copy()
for data in (comment_data, flag_data, banneduser_data, streammetadata_data):
simple_serialize(data)
class CommentTestCase(TestCase):
def test_deserialize(self):
schema = Comment().bind()
clean = schema.deserialize(comment_data)
# must remove flag_count so that it doesn't get updated directly
self.assertNotIn('flag_count', clean)
# check typed fields
self.assertIsInstance(clean.pop('submit_datetime'), datetime)
self.assertEqual(clean.pop('is_removed'), False)
self.assertEqual(len(clean), len(comment_model_data) - 3)
self.assertDictContainsSubset(clean, comment_model_data)
# check that missing required fields raise an exception
incomplete_data = comment_data.copy()
required_fields = (
'app_uuid', 'content_uuid', 'user_uuid', 'comment', 'user_name',
'submit_datetime', 'content_type', 'content_title', 'content_url',
'locale')
for field in required_fields:
del incomplete_data[field]
try:
schema.deserialize(incomplete_data)
self.fail('Expected colander.Invalid to be raised')
except colander.Invalid as e:
self.assertEqual(len(e.children), len(required_fields))
# check that missing fields with model defaults are dropped
missing_data = comment_data.copy()
fields_with_model_default = (
'uuid', 'flag_count', 'is_removed', 'moderation_state',
'ip_address')
for field in fields_with_model_default:
del missing_data[field]
clean = schema.deserialize(missing_data)
for field in fields_with_model_default:
self.assertNotIn(field, clean)
def test_serialize(self):
schema = Comment(include_all=True).bind()
clean = schema.serialize(comment_model_data)
self.assertEqual(clean, comment_data)
# check that flag_count got serialized
self.assertIn('flag_count', clean)
# check that missing/None fields are 'None'
missing_and_none_data = comment_model_data.copy()
del missing_and_none_data['ip_address']
clean = schema.serialize(missing_and_none_data)
self.assertEqual(clean['ip_address'], 'None')
missing_and_none_data['ip_address'] = None
clean = schema.serialize(missing_and_none_data)
self.assertEqual(clean['ip_address'], 'None')
class FlagTestCase(TestCase):
def test_deserialize(self):
schema = Flag().bind()
clean = schema.deserialize(flag_data)
self.assertEqual(
clean.pop('submit_datetime'),
flag_model_data['submit_datetime'].replace(tzinfo=pytz.UTC))
self.assertEqual(len(clean), len(flag_model_data) - 1)
self.assertDictContainsSubset(clean, flag_model_data)
# check that missing required fields raise an exception
# all flag fields are required
incomplete_data = {}
try:
schema.deserialize(incomplete_data)
self.fail('Expected colander.Invalid to be raised')
except colander.Invalid as e:
self.assertEqual(len(e.children), len(flag_data))
def test_serialize(self):
schema = Flag().bind()
clean = schema.serialize(flag_model_data)
self.assertEqual(clean, flag_data)
class BannedUserTestCase(TestCase):
def test_deserialize(self):
schema = BannedUser().bind()
clean = schema.deserialize(banneduser_data)
self.assertEqual(
clean.pop('created'),
banneduser_model_data['created'].replace(tzinfo=pytz.UTC))
self.assertEqual(len(clean), len(banneduser_model_data) - 1)
self.assertDictContainsSubset(clean, banneduser_model_data)
copy = banneduser_data.copy()
del copy['created']
clean = schema.deserialize(copy)
self.assertNotIn('created', clean)
def test_serialize(self):
schema = BannedUser().bind()
clean = schema.serialize(banneduser_model_data)
self.assertEqual(clean, banneduser_data)
class StreamMetadataTestCase(TestCase):
def test_deserialize(self):
schema = StreamMetadata().bind()
clean = schema.deserialize(streammetadata_data)
self.assertEqual(clean, streammetadata_model_data)
copy = streammetadata_data.copy()
del copy['metadata']
clean = schema.deserialize(copy)
self.assertEqual(clean.get('metadata', None), {})
# dropped because unknown and no X- prefix
copy['metadata'] = {'unknown': 'value'}
clean = schema.deserialize(copy)
self.assertEqual(clean.get('metadata', None), {})
def test_serialize(self):
schema = StreamMetadata().bind()
clean = schema.serialize(streammetadata_model_data)
self.assertEqual(clean, streammetadata_data)
class ValidatorTestCase(TestCase):
schema_flag = Flag().bind()
schema_comment = Comment().bind()
schema_streammetadata = StreamMetadata().bind()
def setUp(self):
self.data_flag = flag_data.copy()
self.data_comment = comment_data.copy()
def test_uuid_validator(self):
self.data_flag['app_uuid'] = 'notauuid'
self.assertRaisesRegexp(
colander.Invalid, "'app_uuid'",
self.schema_flag.deserialize, self.data_flag)
def test_comment_uuid_validator(self):
comment_uuid = self.data_flag['comment_uuid']
schema = Flag().bind(comment_uuid=comment_uuid)
self.assertEqual(
schema.deserialize(self.data_flag)['comment_uuid'],
uuid.UUID(comment_uuid))
other_uuid = uuid.uuid4().hex
schema = Flag().bind(comment_uuid=other_uuid)
self.assertRaisesRegexp(
colander.Invalid, "is not one of %s" % uuid.UUID(other_uuid),
schema.deserialize, self.data_flag)
def test_ip_address_validator(self):
self.data_comment['ip_address'] = 'notanipaddress'
self.assertRaisesRegexp(
colander.Invalid, "'ip_address'",
self.schema_comment.deserialize, self.data_comment)
def test_locale_validator(self):
self.data_comment['locale'] = 'notalocale'
self.assertRaisesRegexp(
colander.Invalid, "'locale'",
self.schema_comment.deserialize, self.data_comment)
def test_comment_validator(self):
for val in ('', 'a' * (COMMENT_MAX_LENGTH + 1)):
self.data_comment['comment'] = val
self.assertRaisesRegexp(
colander.Invalid, "'comment'",
self.schema_comment.deserialize, self.data_comment)
def test_content_type_validator(self):
self.data_comment['content_type'] = 'notacontenttype'
types = ', '.join(COMMENT_CONTENT_TYPES)
self.assertRaisesRegexp
|
mathemage/h2o-3
|
h2o-py/tests/testdir_parser/pyunit_parquet_parser_simple.py
|
Python
|
apache-2.0
| 999 | 0.008008 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def parquet_parse_simple():
"""
Tests Parquet parser by comparing the summary of the original csv frame with the h2o parsed Parquet frame.
Basic use case of importing files with auto-detection of column types.
:return: None if pa
|
ssed. Otherwise, an exception will be thrown.
"""
csv = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
parquet = h2o.import_file(path=pyunit_utils.locate("smalldata/parser/parquet/airlines-simple.snappy.parquet"))
csv.summary()
csv_summary = h2o.frame(csv.frame_id)["frames"][0]["columns"]
parquet.summary()
parquet_summary = h2o.frame(parquet.frame_id)["frames"][0]["columns"]
pyunit_utils.compare_frame_summary(csv_summary, pa
|
rquet_summary)
if __name__ == "__main__":
pyunit_utils.standalone_test(parquet_parse_simple)
else:
parquet_parse_simple()
|
scott-w/pyne-django-tutorial
|
chatter/chatter/base/migrations/0001_initial.py
|
Python
|
mit
| 799 | 0.001252 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(verbose_name='ID'
|
, serialize=False, auto_created=True, primary_key=True)),
('content', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AU
|
TH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
rawkintrevo/sharky
|
mysharky/writers.py
|
Python
|
apache-2.0
| 2,074 | 0.047734 |
from multiprocessing import Process, Pipe,Value, Queue
from time import sleep, clock
from solr import Solr
#### EVERY connection must be a class with a .commit() method.
#### Starbase and solr already have these. If you want to make
#### a csv method, you need to define it as a custom class.
####
#### commit() would either be to open the file and append everyone 20 lines or so
#### OR you would append every line as it comes in, and commit is a dummy fu
|
ntion, but i
|
t
#### needs to be there.
class SharkyWriterConnection():
def __init__(self, name):
self.name= name
def establishConnection(self):
# Expected Parameters ()
pass
def setEstablishConnectionFn(self, fn):
self.establishConnection= fn
def processMessage(self, message, target_queue):
# Expected Parameters (message, target_queue)
pass
def setProcessMessage(self, fn):
self.processMessage= fn
def writeOne(self,message,batch):
pass
def setWriteOne(self, fn):
self.writeOne= fn
def writeBatch(self):
pass
def setWriteBatch(self,fn):
self.writeBatch= fn
class SharkyWriter(Process):
def __init__(self, queue, health_pipe, sharky_writer_conn, beaver_shark_q):
Process.__init__(self)
self.queue= queue ## type: Queue (multiprocessor)
self.health_pipe= health_pipe ## type: Pipe (multiprocessor)
self.sharky_writer_conn= sharky_writer_conn
self.beaver_shark_q= beaver_shark_q ## Info for the logger.
self.batch = []
self.MaxBatchSize= 20
def run(self):
self.writeOne= self.sharky_writer_conn.writeOne
self.writeBatch= self.sharky_writer_conn.writeBatch
try:
self.conn= self.sharky_writer_conn.establishConnection(self)
self.beaver_shark_q.put(['info','Write connection %s established' % self.sharky_writer_conn.name])
except Exception,e:
self.beaver_shark_q.put(['exception',e])
while True:
while not self.queue.empty():
doc= self.queue.get()
self.writeOne(self, doc)
if len(self.batch) > self.MaxBatchSize:
# try/except built into function
self.writeBatch(self)
sleep(5)
|
chromium/chromium
|
tools/perf/contrib/leak_detection/page_sets.py
|
Python
|
bsd-3-clause
| 10,066 | 0.001987 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import py_utils
from telemetry import story as story_module
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class LeakDetectionSharedState(shared_page_state.SharedDesktopPageState):
def ShouldReuseBrowserForAllStoryRuns(self):
return True
class LeakDetectionPage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(LeakDetectionPage, self).__init__(
url=url, page_set=page_set, name=name,
shared_page_state_class=LeakDetectionSharedState)
def RunNavigateSteps(self, action_runner):
tabs = action_runner.tab.browser.tabs
new_tab = tabs.New()
new_tab.action_runner.Navigate('about:blank')
new_tab.action_runner.PrepareForLeakDetection()
new_tab.action_runner.MeasureMemory()
new_tab.action_runner.Navigate(self.url)
self._WaitForPageLoadToComplete(new_tab.action_runner)
new_tab.action_runner.Navigate('about:blank')
new_tab.action_runner.PrepareForLeakDetection()
new_tab.action_runner.MeasureMemory()
new_tab.Close()
def _WaitForPageLoadToComplete(self, action_runner):
py_utils.WaitFor(action_runner.tab.HasReachedQuiescence, timeout=30)
# Some websites have a script that loads resources continuously, in which cases
# HasReachedQuiescence would not be reached. This class waits for document ready
# state to be complete to avoid timeout for those pages.
class ResourceLoadingLeakDetectionPage(LeakDetectionPage):
def _WaitForPageLoadToComplete(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeComplete()
class LeakDetectionStorySet(story_module.StorySet):
def __init__(self):
super(LeakDetectionStorySet, self).__init__(
archive_data_file='data/leak_detection.json',
cloud_storage_bucket=story_module.PARTNER_BUCKET)
urls_list = [
# Alexa top websites
'https://www.google.com',
'https://www.youtube.com',
'https://www.facebook.com',
'https://www.baidu.com',
'https://www.wikipedia.org',
'https://world.taobao.com/',
'https://www.tmall.com/',
'http://www.amazon.com',
'http://www.twitter.com',
'https://www.instagram.com/',
'http://www.jd.com/',
'https://vk.com/',
'https://outlook.live.com',
'https://www.reddit.com/',
'https://weibo.com/',
'https://www.sina.com.cn/',
'https://www.360.cn/',
'https://yandex.ru/',
'https://www.blogger.com/',
'https://www.netflix.com/',
'https://www.pornhub.com/',
'https://www.linkedin.com/',
'https://www.yahoo.co.jp/',
'https://www.csdn.net/',
'https://www.alipay.com/',
'https://www.twitch.tv/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.ebay.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.microsoft.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.xvideos.com/',
'https://mail.ru/',
'https://www.bing.com/',
'http://www.wikia.com/',
'https://www.office.com/',
'https://www.imdb.com/',
'https://www.aliexpress.com/',
'https://www.msn.com/',
'https://news.google.com/',
'https://www.theguardian.com/',
'https://www.indiatimes.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'http://www.foxnews.com/',
'https://weather.com/',
'https://www.shutterstock.com/',
'https://docs.google.com/',
'https://wordpress.com/',
# TODO(yuzus): This test crashes.
# 'https://www.apple.com/',
'https://play.google.com/store',
'https://www.dropbox.com/',
'https://soundcloud.com/',
'https://vimeo.com/',
'https://www.slideshare.net/',
'https://www.mediafire.com/',
'https://www.etsy.com/',
'https://www.ikea.com/',
'https://www.bestbuy.com/',
'https://www.homedepot.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.target.com/',
'https://www.booking.com/',
'https://www.tripadvisor.com/',
'https://9gag.com/',
'https://www.expedia.com/',
'https://www.roblox.com/',
'https://www.gamespot.com/',
'https://www.blizzard.com',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://ign.com/',
'https://www.yelp.com/',
# Times out waiting for HasReachedQuiescence - crbug.com/927427
# 'https://gizmodo.com/',
'https://www.gsmarena.com/',
'http
|
s://www.theverge.com/',
'https://www.nlm.nih.gov/',
'https://archive.org/',
'https://www.udemy.com/',
'https://answers.yahoo.com/',
# TODO(crbug.com/985552): Memory dump fails flakily.
|
# 'https://www.goodreads.com/',
'https://www.cricbuzz.com/',
'http://www.goal.com/',
'http://siteadvisor.com/',
'https://www.patreon.com/',
'https://www.jw.org/',
'http://europa.eu/',
'https://translate.google.com/',
'https://www.epicgames.com/',
'http://www.reverso.net/',
'https://play.na.leagueoflegends.com/',
'https://www.thesaurus.com/',
'https://www.weebly.com/',
'https://www.deviantart.com/',
'https://www.scribd.com/',
'https://www.hulu.com/',
'https://www.xfinity.com/',
# India Alexa top websites
'https://porn555.com/',
'https://www.onlinesbi.com/',
'https://www.flipkart.com/',
'https://www.hotstar.com/',
'https://www.incometaxindiaefiling.gov.in/',
'https://stackoverflow.com/',
# TODO(crbug.com/1005035) Memory dump fails flakily.
# 'https://www.irctc.co.in/nget/',
'https://www.hdfcbank.com/',
'https://www.whatsapp.com/',
'https://uidai.gov.in/',
'https://billdesk.com/',
'https://www.icicibank.com/',
# US Alexa top websites
'https://imgur.com/',
'https://www.craigslist.org/',
'https://www.chase.com/',
# TODO(892352): tumblr started timing out due to a catapult roll. See
# https://crbug.com/892352
# 'https://www.tumblr.com/',
'https://www.paypal.com/',
# TODO(yuzus): espn.com is flaky. https://crbug.com/959796
#'http://www.espn.com/',
'https://edition.cnn.com/',
'https://www.pinterest.com/',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'https://www.nytimes.com/',
'https://github.com/',
'https://www.salesforce.com/',
# Japan Alexa top websites
'https://www.rakuten.co.jp/',
'http://www.nicovideo.jp/',
'https://fc2.com/',
'https://ameblo.jp/',
'http://kakaku.com/',
'https://www.goo.ne.jp/',
'https://www.pixiv.net/',
# websites which were found to be leaking in the past
'https://www.prezi.com',
# TODO(keishi): Memory dump fails flakily crbug.com/963273
#'http://www.time.com',
'http://www.cheapoair.com',
'http://www.onlinedown.net',
'http://www.dailypost.ng',
'http://www.aljazeera.net',
'http://www.googleapps.com',
'http://www.airbnb.ch',
'http://www.livedoor.jp',
'http://www.blu-ray.com',
# TODO(953195): Test times out.
# 'http://www.block.io',
'http://www.hockeybuzz.com',
'http://www.silverpop.com',
'http://www.ansa.it',
'http://www.gulfair.com',
'http://www.nusatrip.com',
'http://www.samsung-fun.ru',
'http://www.opentable.com',
'http://www.magnetmail.net',
'http://zzz.com.ua',
'http://a-rakumo.appspot.com',
'http://www.sakurafile.com',
'http://www.psiexams.com',
'http://www.contentful.com',
'http://www.estibot.com',
'http://www.mbs.de',
'http://www.zhengjie.com',
'http://www.sjp.pl',
'http://www.mastodon.social',
'http://www.horairetrain.net',
'http://www.torrentzeu.to',
'http://www.inbank.it',
'http://www.gradpoint.co
|
NicLew/CS360-Practive-CI-Testing
|
unittestExample.py
|
Python
|
gpl-2.0
| 1,699 | 0.030606 |
#!/usr/bin/python3
################################
# File Name: unittestExample.py
# Author: Chadd Williams
# Date: 10/20/2014
# Class: CS 360
# Assignment: Lecture Examples
# Purpose: Demonstrate unit tests
################################
# adapted from https://docs.python.org/3/library/unittest.html
# python3 -m unittest unittestExample -v
import random
import unittest
class TestListFunctions(unittest.TestCase):
listSize = 10
def setUp(self):
""" the text fixture, necessary setup for the tests to run
"""
self.theList = list(range(self.listSize))
# shuffle the list
random.shuffle(self.theList)
def tearDown(self):
""" nothing to tear down here
If your test created a database or built a network connection
you might delete the database or close the network connection
here. You might also close files you opened, close your
TK windows if this is GUI program, or kill threads if this is
a multithreaded application
"""
pass # nothing to do
def test_sort(self):
""" make sure sort works correctly
"""
self.theList.
|
sort()
self.assertEqual(self.theList, list(range(self.listSize)))
def test_append(self):
""" make sure append works correctly
"""
self.theList.append(self.listSize+1)
self.assertEqual(self.theList[-1], self.listSize+1)
def test_exceptions(self):
"""test some exceptions
"""
# theList does not contain -1. Make sure remove
# raised the correct exception
self.assertRaises(ValueError, self.theList.remove, -1)
"""def test_thistestwillfail(self):
|
# theList DOES contain 1.
# remove will not raise the expected exception
self.assertRaises(ValueError, self.theList.remove, 0)"""
|
rsoumyassdi/nuxeo-drive
|
nuxeo-drive-client/nxdrive/wui/conflicts.py
|
Python
|
lgpl-2.1
| 3,320 | 0.001506 |
'''
Created on 10 mars 2015
@author: Remi Cattiau
'''
from nxdrive.logging_config import get_logger
from nxdriv
|
e.wui.dialog import WebDialog, WebDriveApi
from nxdrive.wui.translator import Translator
from PyQt4 import QtCore
log = get_logger(__name__)
class WebConflictsApi(WebDriveApi):
def __init__(self, application, engine, dlg=None):
super(WebConflictsApi, self).__init__(application, dlg)
self._manager = application.manager
self._application = application
self._dialog = dlg
self._engine = engine
def set_engine(self, engine):
self._engine = engine
|
@QtCore.pyqtSlot(result=str)
def get_errors(self):
return super(WebConflictsApi, self).get_errors(self._engine._uid)
@QtCore.pyqtSlot(result=str)
def get_conflicts(self):
return super(WebConflictsApi, self).get_conflicts(self._engine._uid)
@QtCore.pyqtSlot(int)
def resolve_with_local(self, state_id):
try:
self._engine.resolve_with_local(state_id)
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def resolve_with_remote(self, state_id):
try:
self._engine.resolve_with_remote(state_id)
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def resolve_with_duplicate(self, state_id):
try:
self._engine.resolve_with_duplicate(state_id)
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def retry_pair(self, state_id):
try:
self._engine.retry_pair(int(state_id))
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(int)
def unsynchronize_pair(self, state_id):
try:
self._engine.unsynchronize_pair(int(state_id))
except Exception as e:
log.exception(e)
@QtCore.pyqtSlot(str, result=str)
def open_local(self, path):
return super(WebConflictsApi, self).open_local(self._engine._uid, path)
@QtCore.pyqtSlot(str, str, result=str)
def open_remote(self, remote_ref, remote_name):
remote_ref = str(remote_ref)
remote_name = unicode(remote_name)
log.debug("Should open this : %s (%s)", remote_name, remote_ref)
try:
self._engine.open_edit(remote_ref, remote_name)
except Exception as e:
log.exception(e)
return ""
def _export_state(self, state):
if state is None:
return None
result = super(WebConflictsApi, self)._export_state(state)
result["last_contributor"] = " " if state.last_remote_modifier is None \
else self._engine.get_user_full_name(state.last_remote_modifier)
date_time = self.get_date_from_sqlite(state.last_remote_updated)
result["last_remote_update"] = "" if date_time == 0 else Translator.format_datetime(date_time)
date_time = self.get_date_from_sqlite(state.last_local_updated)
result["last_local_update"] = "" if date_time == 0 else Translator.format_datetime(date_time)
result["remote_can_update"] = state.remote_can_update
return result
class WebConflictsDialog(WebDialog):
def set_engine(self, engine):
self._api.set_engine(engine)
|
varunarya10/python-openstackclient
|
openstackclient/identity/v2_0/ec2creds.py
|
Python
|
apache-2.0
| 5,662 | 0 |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under
|
the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 EC2 Credentials action implementations"""
import loggin
|
g
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
class CreateEC2Creds(show.ShowOne):
"""Create EC2 credentials"""
log = logging.getLogger(__name__ + ".CreateEC2Creds")
def get_parser(self, prog_name):
parser = super(CreateEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify a project [admin only]'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
# Get the project from the current auth
project = identity_client.auth_tenant_id
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.create(user, project)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
class DeleteEC2Creds(command.Command):
"""Delete EC2 credentials"""
log = logging.getLogger(__name__ + '.DeleteEC2Creds')
def get_parser(self, prog_name):
parser = super(DeleteEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
identity_client.ec2.delete(user, parsed_args.access_key)
class ListEC2Creds(lister.Lister):
"""List EC2 credentials"""
log = logging.getLogger(__name__ + '.ListEC2Creds')
def get_parser(self, prog_name):
parser = super(ListEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
columns = ('access', 'secret', 'tenant_id', 'user_id')
column_headers = ('Access', 'Secret', 'Project ID', 'User ID')
data = identity_client.ec2.list(user)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ShowEC2Creds(show.ShowOne):
"""Show EC2 credentials"""
log = logging.getLogger(__name__ + '.ShowEC2Creds')
def get_parser(self, prog_name):
parser = super(ShowEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.get(user, parsed_args.access_key)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
|
MingfeiPan/leetcode
|
string/6.py
|
Python
|
apache-2.0
| 526 | 0.015209 |
import functools
class Solution:
d
|
ef convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
ret = [[] for _ in range(numRows)]
pattern = numRows*2 - 2
for i in range(len(s)):
if i % pattern < numRows:
ret[i % pattern].append(s[i])
else:
|
ret[pattern - (i % pattern)].append(s[i])
return functools.reduce(lambda a, b : a + b ,[''.join(c) for c in ret])
|
stryder199/RyarkAssignments
|
Assignment2/ttt/archive/_old/other/echo_io.py
|
Python
|
mit
| 750 | 0.042667 |
import question_template
game_type = 'input_output'
source_language = 'C'
parameter_list = [
['$x0','string'],['$x1','string'],['$x2','string'],
['$y0','string'],['$y1','string'],['$y2','string']
]
tuple_list = [
['echo_io_forward_',['a','b','c',None,None,None]],
]
global_code_template = '''\
d #include <stdio.h>
x #include <stdio.h>
dx
'''
main_code_template = '''\
dx int i;
dx
dx for (i = 1; i < argc; i++)
dx printf("%s\\n", argv[i]);
'''
argv_template = '$x0 $x1 $x2'
stdin_template = '''\
'''
stdout_template = '''\
$y0
$y1
$y2
'''
question = question_template.Que
|
stion_template(game_type,source_language,
parameter_list,tuple_list,global_code_template,main_code_template,
argv_templat
|
e,stdin_template,stdout_template)
|
gcobos/rft
|
config.py
|
Python
|
agpl-3.0
| 673 | 0.002972 |
# Author: Drone
import web
from app.helpers import utils
from app.helpers import formatting
projectName = 'Remote Function Trainer'
listLimit = 40
# connect to database
db
|
= web.database(dbn='mysql', db='rft', user='root', passwd='1234')
t = db.transaction()
#t.commit()
# in development debug error messages and reloader
web.config.debug = False
# in develpment template caching is set to false
cache = False
# template global functions
globals = utils.get_all_
|
functions(formatting)
# set global base template
view = web.template.render('app/views', cache=cache, globals=globals)
# in production the internal errors are emailed to us
web.config.email_errors = ''
|
achalddave/simple-amt
|
reject_assignments.py
|
Python
|
mit
| 970 | 0.016495 |
import argparse, json
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
args = parser.parse_args()
mtc = simpleamt.get_mturk_connectio
|
n_from_args(args)
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
for hit_id in hit_ids:
for a in mtc.get_assignments(hit_id):
reject_ids.append(a.AssignmentId)
print ('This will reject %d assignments with '
'sandbox=%s' % (len(reject_ids), st
|
r(args.sandbox)))
print 'Continue?'
s = raw_input('(Y/N): ')
if s == 'Y' or s == 'y':
print 'Rejecting assignments'
for idx, assignment_id in enumerate(reject_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(reject_ids))
mtc.reject_assignment(assignment_id, feedback='Invalid results')
else:
print 'Aborting'
|
iulian787/spack
|
var/spack/repos/builtin/packages/mariadb-c-client/package.py
|
Python
|
lgpl-2.1
| 3,917 | 0.007148 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class MariadbCClient(CMakePackage):
"""MariaDB turns data into structured information in a wide array of
applications, ranging from banking to websites. It is an enhanced,
drop-in replacement for MySQL. MariaDB is used because it is fast,
scalable and robust, with a rich ecosystem of storage engines,
plugins and many other tools make it very versatile for a wide
variety of use cases. This package comprises only the standalone 'C
Connector', which enables connections to MariaDB and MySQL servers.
"""
homepage = "http://mariadb.org/about/"
url = "https://downloads.mariadb.com/Connectors/c/connector-c-3.0.3/mariadb-connector-c-3.0.3-src.tar.gz"
list_url = "https://downloads.mariadb.com/Connectors/c/"
list_depth = 1
version('3.1.9', sha256='108d99bf2add434dcb3bd9526ba1d89a2b9a943b62dcd9d0a41fcbef8ffbf2c7')
version('3.1.6', sha256='d266bb67df83c088c4fb05392713d2504c67be620894cedaf758a9561c116720')
version('3.1.5', sha256='a9de5fedd1a7805c86e23be49b9ceb79a86b090ad560d51495d7ba5952a9d9d5')
version('3.1.4', sha256='7a1a72fee00e4c28060f96c3efbbf38aabcbbab17903e82fce85a85002565316')
version('3.0.9', sha256='7277c0caba6f50b1d07e1d682baf0b962a63e2e6af9e00e09b8dcf36a7858641')
version('3.0.8', sha256='2ca368fd79e87e80497a5c9fd18922d8316af8584d87cecb35bd5897cb1efd05')
version('3.0.7', sha256='f63883c9360675d111646fba5c97feb0d08e0def5873dd189d78bafbb75fa004')
version('3.0.6', sha256='2b2d18dc969dc385f7f740e4db112300e11bc626c9ba9aa05c284704095b9e48')
version('3.0.5', sha256='940017f13a13846153eb9d36290824c4615c8a8be4142b6bbaeb698609f02667')
version('3.0.4', sha256='6eff680cd429fdb32940f6ea4755a997dda1bb00f142f439071f752fd0b200cf')
version('3.0.3', sha256='210f0ee3414b235d3db8e98e9e5a0a98381ecf771e67ca4a688036368984eeea')
version('3.0.2', sha256='518d14b8d77838370767d73f9bf1674f46232e1a2a34d4195bd38f52a3033758')
version('2.3.7', sha256='94f9582da738809ae1d9f1813185165ec7c8caf9195bdd04e511f6bdcb883f8e')
version('2.3.6', sha256='6b271d25dddda15f1c2328eee64f6
|
46a2e8b116ea21b04ece24b5a70712c3e96')
version('2.3.5', sha256='2f3bf4c326d74284debf7099f30cf3615f7978d1ec22b8c1083676688a76746f')
version('2.3.4', sha256='8beb0513da8a24ed2cb47836564c8b57045c3b36f933362f74b3676567c13abc')
version('2.3.3', sha256='82a5710134e7654b9cad58964d6a25ed91b3dc1804ff51e8be2def0032914089')
version('2.3.2', sha256='4063c8655dc37608d4eade981e25b76f67f5d36e8426dc7f20d59e48ebba628a')
|
version('2.3.1', sha256='6ab7e1477ae1484939675a3b499f98148980a0bd340d15d22df00a5c6656c633')
version('2.3.0', sha256='37faae901ca77bd48d2c6286f2e19e8c1abe7cac6fb1b128bd556617f4335c8a')
version('2.2.3', sha256='cd01ce2c418382f90fd0b21c3c756b89643880efe3447507bf740569b9d08eed')
version('2.2.2', sha256='93f56ad9f08bbaf0da8ef03bc96f7093c426ae40dede60575d485e1b99e6406b')
version('2.2.1', sha256='c30ba19be03a6ac8688ef7620aed0eabdf34ca9ee886c017c56b013b5f8ee06a')
version('2.2.0', sha256='3825b068d38bc19d6ad1eaecdd74bcd49d6ddd9d00559fb150e4e851a55bbbd4')
version('2.1.0', sha256='568050b89463af7610d458669fd9eee06dcc9405689aca8a526ac8c013b59167')
provides('mariadb-client')
provides('mysql-client')
depends_on('cmake@2.6:', type='build')
depends_on('curl')
depends_on('pcre')
depends_on('openssl')
depends_on('zlib')
def url_for_version(self, version):
url = "https://downloads.mariadb.com/Connectors/c/connector-c-{0}/mariadb-connector-c-{1}-src.tar.gz"
return url.format(version.up_to(3), version)
def cmake_args(self):
args = ['-DWITH_EXTERNAL_ZLIB=ON', '-DWITH_MYSQLCOMPAT=ON']
return args
|
usc-isi-i2/etk
|
etk/data_extractors/htiExtractors/unicode_decoder.py
|
Python
|
mit
| 22,630 | 0.068714 |
### @author Rishi Jatia
import json
import re
import string
def decode_unicode(data, replace_boo=True):
# dictionary which direct maps unicode values to its letters
dictionary = {'0030':'0','0031':'1','0032':'2','0033':'3','0034':'4','0035':'5','0036':'6','0037':'7','0038':'8','0039':'9','0024':'$','0040':'@','00A2':'cents','00A3':'pounds','00A5':'yen','00C7':'C','00D0':'D','00D1':'N','00DD':'Y','00E7':'c','00F1':'n','00FD':'y','00FF':'y','010E':'D','010F':'F','0110':'D','0111':'D','0130':'I','0134':'J','0135':'J','0136':'K','0137':'K','0138':'K','0160':'S','0161':'S','0191':'F','0192':'F','0193':'G','0198':'K','0199':'K',
'019D':'N','019E':'N','01A4':'P','01A5':'P','01AC':'T','01AF':'U','01B5':'Z','01CD':'A','01CE':'A','01CF':'I','01D0':'I','01D1':'O','01D2':'O','01DE':'A','01DF':'A','01E0':'A','01E1':'A','01F4':'G','01F5':'G','01F8':'N','01F9':'N','01FA':'A','01FB':'A','021E':'H',
'021F':'H','0224':'Z','2113':'L','2718':'X','0225':'Z','2134':'O','0226':'A','0227':'A','0228':'E','0229':'E','0386':'A','0388':'E','0389':'H','038A':'I','0391':'A','0392':'B','0395':'E','0396':'Z','0397':'H','0399':'I','039A':'K','039C':'M','039D':'N','039F':'O','03A1':'P','03A4':'T','03A5':'Y','03A7':'X','03AA':'I','03AB':'B','1E10':'D','1E11':'D','1E12':'D','1E13':'D','1E1E':'F','1E1F':'F','1E20':'G','1E21':'H','1E2C':'I','1E2D':'I','1E2E':'I','1E2F':'I','1E3E':'M','1E3F':'M','1E70':'T','1E71':'T','1E8E':'Y','1E8F':'Y','1EE0':'O','1EE1':'O','1EE2':'O','1EE3':'O','1EE4':'O','1EF0':'U','1EF1':'U'}
# dictionary in which patterns (prefixes and suffixes) are matched to possible letter choices
pattern_dict = {'00C':'AEI', '00D':'OU','00E':'AEI','00F':'OU','010':'AC','011':'EG','012':'GHI','013':'L','014':'LNO','015':'RS','016':'TU','017':'UWYZ', '018':'BCD','01D':'U','01E':'GKO','020':'AEIO','021':'RUST','022':'O','1E0':'ABCD','1E1':'E','1E3':'KL','1E4':'MNO','1E5':'OPR','1E6':'ST','1E7':'UV','1E8':'WX','1E9':'Z','1EB':'A','1EC':'EIO','1ED':'O','1EE':'U','1EF':'Y','216':'greeknum','217':'greeknum','246':'consecnum','247':'numfrom17'}
#dictionary which matches patterns for emoticons
hex_dict = {'A':'10','B':'11','C':'12','D':'13','E':'14','F':'15','a':'10','b':'11','c':'12','d':'13','e':'14','f':'15'}
happy_dict = ['1F600','263A','1F601','1F602','1F603','1F604','1F605','1F606','1F60A','263A','1F642','1F607','1F60C','1F643','1F62C','1F63A','1F638','1F639']
sad_dict = ['1F610','1F611','1F623','1F494','1F625','1F62B','1F613','1F614','1F615','2639','1F641','1F616','1F61E','1F61F','1F624','1F622','1F62D','1F629','1F630','1F620']
sexual_dict = ['1F609','1F6C0','2B50','1F445','1F525','1F36D','2606','1F60D','1F460','1F618','1F617','1F61A','1F917','1F60F','1F63B','1F63D','1F483','1F46F','1F48F','1F444','1F48B','1F459','1F484','1F34C','1F4AF','264B']
hearts=['1F498','2664','2764','2661','2665','1F493','1F495','1F496','1F497','1F499','1F49A','1F49B','1F49C','1F49D','1F49E','1F49F','2763']
baseball_dict=['26BE', '1F3C0', '1F3CF']
count=0
misc_code = ' *misc* '
if not replace_boo:
misc_code = ''
retval=''
# first I am filtering out all the non-unicode characters from the data
regex=re.compile(r'\\u[0-9ABCDEFabcdef]{1,4}')
regex2=re.compile(r'\\U[0-9ABCDEFabcdef]{1,8}') #this is so that both types of unicode representations are filtered
lowers = list('abcdef')
uppers = [c.upper() for c in lowers]
ndata = set()
data = data.encode('unicode-escape').decode('utf-8')
data = re.sub(r'(?:\\x(?:[0-9]|[a-f]){2})+', ' ', data, flags=re.IGNORECASE)
for val in re.finditer(regex,data):
to_append=val.group()
#converting unicode to standard representation
for c in lowers:
if c in to_append:
to_append = to_append.replace(c, c.lower())
ndata.add(to_append)
for val in re.finditer(regex2,data):
to_append = '\u' + val.group()[5:]
for c in lowers:
if c in to_append:
to_append = to_append.replace(c, c.lower())
ndata.add(to_append)
ndata = list(ndata)
"""
Process of parsing:
-> Convert unicode into standard form
-> Convert each character of the unicode symbol to its numerical equivalent
-> Mapping Process:
- First check in pattern dictionary to map suffix/prefix
- Check Emoticon Dictionary
- Replace value pair with Key whenever found
- Then check direct dictionary
- Append to .txt file if unicode not found in any dictionary
"""
for unicode_str in ndata:
uni=unicode_str[2:]
if unicode_str not in data:
unicode_str='\U000' + unicode_str[2:]
#converting to standard representation
for c in uppers:
if c in unicode_str:
unicode_str = unicode_str.replace(c, c.lower())
if uni in baseball_dict:
retval+=' *baseball* '
#detecting baseball emoticons and converting to '*baseball*' and similar conversions for other categories of emoticons
data=string.replace(data,unicode_str,' *baseball* ')
if uni in happy_dict:
retval+=' *happy* '
if replace_boo:
data=string.replace(data,unicode_str,' *happy* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in sad_dict:
retval+=' *sad* '
if replace_boo:
data=string.replace(data,unicode_str,' *sad* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in sexual_dict:
retval+=' *sexual* '
if replace_boo:
data=string.replace(data,unicode_str,' *sexual* ')
else:
data=string.replace(d
|
ata,unicode_str,' ')
elif uni in hearts:
retval+=' *hearts* '
if replace_boo:
data=string.replace(data,unicode_str,' *hearts* ')
else:
data=string.replace(data,unicode_str,' ')
elif uni in dictionary:
retval+=dictionary[uni]
data=string.replace(data,unicode_str,dictionary[u
|
ni])
elif uni[0:3]=='004' or uni[0:3]=='005':
#replacing unicodes for digits and before that, replacing hexadecimals with their numerical value
last_dig=uni[3:]
if last_dig in hex_dict:
last_dig=int(hex_dict[last_dig])
else:
last_dig=int(last_dig)
second_last_dig= int(uni[2:3])
num= (second_last_dig-4)*16 + last_dig
retval+=chr(64+num)
data=string.replace(data,unicode_str,chr(64+num))
elif uni[0:3]=='006' or uni[0:3]=='007':
last_dig=uni[3:]
if last_dig in hex_dict:
last_dig=int(hex_dict[last_dig])
else:
last_dig=int(last_dig)
second_last_dig= int(uni[2:3])
#parsing letters
num= (second_last_dig-6)*16 + last_dig
retval+=chr(64+num)
data=string.replace(data,unicode_str,chr(64+num))
elif uni[0:3] in pattern_dict:
val = pattern_dict[uni[0:3]]
if len(val)==1:
retval+=val
data=string.replace(data,unicode_str,val)
elif uni[0:3]=='00C':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
#parsing miscelleneous
data=string.replace(data,unicode_str,misc_code)
if last>=0 and last<=5:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last<=11:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=val[2]
data=string.replace(data,unicode_str,val[2])
elif uni[0:3]=='00D':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
pass
if last>=2 and last<=6:
retval+=val[0]
data=string.replace(data,unicode_str,val[0])
elif last>=9 and last<=12:
retval+=val[1]
data=string.replace(data,unicode_str,val[1])
else:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
elif uni[0:3]=='00E':
last=uni[3:]
if last in hex_dict:
last=hex_dict[last]
try:
last=int(last)
except:
retval+=misc_code
data=string.replace(data,unicode_str,misc_code)
if last>=0 and las
|
royveshovda/pifog
|
source/piclient/doorpi/door_runner.py
|
Python
|
apache-2.0
| 4,850 | 0.003093 |
import json
import time
import settings
from shared import common
from datetime import datetime
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import logging
handler = None
pin_door1 = 11 #BCM17
pin_door2 = 12 # BCM18
pin_door1_led = 13 # BCM27
pin_door2_led = 15 # BCM22
def init():
global handler
if settings.is_fake():
from doorpi import gpio_faker
handler = gpio_faker
else:
from doorpi import gpio
handler = gpio
return
# Custom Shadow callback
def customShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
reported = payloadDict["state"]["reported"]
if "FemaleDoor" in reported:
print("FemaleDoor: " + str(payloadDict["state"]["reported"]["FemaleDoor"]))
if "MaleDoor" in reported:
print("MaleDoor: " + str(payloadDict["state"]["reported"]["MaleDoor"]))
if "connected" in reported:
print("connected: " + str(payloadDict["state"]["reported"]["connected"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def handle_command(client, message):
payload = message.payload.decode('utf-8')
print("Command received:")
print(payload)
#cmd = json.loads(payload)
#command = cmd["command"]
#cmd_id = cmd["id"]
#if command == "ping":
# common.send_pong(client, cmd_id, settings.topic_doorpi_event)
def handle_notification(message):
print("Notification received: " + str(message.payload))
def on_message(client, userdata, msg):
if msg.topic == settings.topic_doorpi_command:
handle_command(client, msg)
return
if msg.topic == settings.topic_doorpi_notify:
handle_notification(msg)
return
print("Spam received: " + str(msg.payload))
def send_data(client, door1_closed, door2_closed):
if door1_closed:
door1_message = "closed"
else:
door1_message = "open"
if door2_closed:
door2_message = "closed"
else:
door2_message = "open"
# Prepare our sensor data in JSON format.
payload = json.dumps({
"state": {
"reported": {
"FemaleDoor": door1_message,
"MaleDoor": door2_message
}
}
})
client.shadowUpdate(payload, customShadowCallback_Update, 5)
def new_state(pin, old_state):
new_state_first = handler.get_state(pin)
if new_state_first != old_state:
time.sleep(0.5)
new_state_verify = handler.get_state(pin)
if new_state_verify != old_state:
return True, new_state_verify
|
else:
return False, old_state
else:
return False, old_state
def set_led_state(door1_state, door2_state):
handler.set_state(pin_door1_led, door1_state)
handler.set_state(pin_door2_led, door2_state)
def start():
shadow, client = common.setup_aws_shadow_client(settings.aws_endpoint,
settings.aws_root_certificate,
|
settings.aws_private_key,
settings.aws_certificate,
settings.device_name)
JSONPayload = '{"state":{"reported":{"connected":"true"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
handler.setup(pin_door1, pin_door2, pin_door1_led, pin_door2_led)
handler.signal_startup(pin_door1_led, pin_door2_led)
# Get initial state
door1 = handler.get_state(pin_door1)
door2 = handler.get_state(pin_door2)
set_led_state(door1, door2)
send_data(client, door1, door2)
time.sleep(2)
states_reported = 1
try:
while True:
door1_changed, door1_state = new_state(pin_door1, door1)
door2_changed, door2_state = new_state(pin_door2, door2)
if door1_changed or door2_changed:
door1 = door1_state
door2 = door2_state
set_led_state(door1, door2)
send_data(client, door1, door2)
states_reported += 1
print('States reported: '+str(states_reported))
time.sleep(0.2)
except KeyboardInterrupt:
JSONPayload = '{"state":{"reported":{"connected":"false"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
shadow.disconnect()
handler.cleanup()
print('stopped')
def stop():
return
|
batpad/osmcha-django
|
osmchadjango/changeset/migrations/0023_userdetail_contributor_uid.py
|
Python
|
gpl-3.0
| 441 | 0 |
# -*- coding: ut
|
f-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changeset', '0022_auto_20160222_2358'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='contributor_uid',
field=models.IntegerField(db_index=True, null=True, blank=True),
|
),
]
|
Kwentar/Dream-Crusher
|
app/main_views.py
|
Python
|
apache-2.0
| 2,225 | 0.001348 |
from flask import Blueprint, render_template, g, request, jsonify
from time import gmtime, strftime
from flask_login import login_required, current_user
from app.models import Month, DreamDay, Dream
import datetime
main_module = Blueprint('main', __name__, template_folder='templates')
@main_module.route('/old')
@login_required
def index():
current_month = strftime("%B", gmtime())
current_n_month = datetime.datetime.today().month
current_year = datetime.datetime.today().year
exist = False
for m in g.user.months:
if m.n_month == current_n_month and m.year == current_year:
exist = True
break
if not exist:
month = Month(title=current_month, n_month=current_n_month, year=current_year)
month.dreams.append
|
(Dream(title="be better than yesterday"))
month.dreams.append(Dream(title="collect all pokemons"))
month.dreams.append(Dream(title="learn to fly"))
g.user.months.append(month)
g.user.save()
return rende
|
r_template('index.html', current_n_month=current_n_month)
@main_module.route('/add_half_hour', methods=['POST'])
@login_required
def add_half_hour():
dream_id = request.form['dream_id']
curr_month = g.user.get_current_month()
curr_dream = next(x for x in curr_month.dreams if str(x.id_) == dream_id)
curr_dream.current_time += 1
curr_dream_day = next((x for x in curr_month.dream_days if
x.number == datetime.datetime.today().day and x.dream_id == curr_dream.id_), None)
if curr_dream_day:
curr_dream_day.current_time += 1
else:
dream_day = DreamDay(dream_id=dream_id)
curr_month.dream_days.append(dream_day)
g.user.save()
return jsonify({'id_': dream_id, 'day_number': datetime.datetime.today().day})
@main_module.route('/change_slogan', methods=['POST'])
@login_required
def change_slogan():
new_slogan = request.form['slogan_value']
curr_month = g.user.get_current_month()
if curr_month.slogan != new_slogan:
curr_month.slogan = new_slogan
g.user.save()
return jsonify({'slogan_value': new_slogan})
@main_module.before_request
def before_request():
g.user = current_user
|
openhealthalgorithms/openhealthalgorithms
|
OHA/helpers/converters/LengthConverter.py
|
Python
|
apache-2.0
| 1,483 | 0 |
import abc
from OHA.helpers.converters.BaseConverter import BaseConverter
__author__ = 'indrajit'
__email__ = 'eendroroy@gmail.com'
class LengthConverter(BaseConverter):
def __init__(self, _va
|
lue, _from=None, _to=None):
super(LengthConverter, self).__init__(_value, _from, _to)
def _supported_units(self):
return ['ft', 'in', 'm', 'cm']
@abc.abstractmethod
def _default_from_unit(sel
|
f):
raise NotImplementedError('method not implemented')
@abc.abstractmethod
def _default_to_unit(self):
raise NotImplementedError('method not implemented')
def _convert(self):
if self._from == self._to:
return self._value
elif self._to == 'm' and self._from == 'ft':
return self._value * 3.28084
elif self._to == 'm' and self._from == 'in':
return self._value * 39.3701
elif self._to == 'cm' and self._from == 'ft':
return self._value * 0.0328084
elif self._to == 'cm' and self._from == 'in':
return self._value * 0.393701
elif self._to == 'ft' and self._from == 'm':
return self._value * 0.3048
elif self._to == 'ft' and self._from == 'cm':
return self._value * 30.48
elif self._to == 'in' and self._from == 'm':
return self._value * 0.0254
elif self._to == 'in' and self._from == 'cm':
return self._value * 2.54
else:
return None
|
kishori82/MetaPathways_Python.3.0
|
utilities/extract_flat_files.py
|
Python
|
mit
| 7,627 | 0.023994 |
#!/usr/bin/python
"""This script run the pathologic """
try:
import optparse, sys, re, csv, traceback
from optparse import OptionGroup
import pickle
import math
from libs.python_modules.taxonomy.LCAComputation import *
import operator
from os import path, _exit, remove, rename
import logging.handlers
from glob import glob
from libs.python_modules.utils.sysutil import pathDelim
from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, exit_process
from libs.python_modules.utils.sysutil import getstatusoutput
from libs.python_modules.utils.pathwaytoolsutils import *
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
print traceback.print_exc(10)
sys.exit(3)
PATHDELIM=pathDelim()
def fprintf(file, fmt, *args):
file.write(fmt % args)
def printf(fmt, *args):
sys.stdout.write(fmt % args)
def files_exist( files , errorlogger = None):
status = True
for file in files:
if not path.exists(file):
if errorlogger:
errorlogger.write( 'ERROR\tCould not find ptools input file : ' + file )
status = False
return not status
usage = sys.argv[0] + """ -s sample -p pgdb_dir --ptoolsExec pathwaytools_executable """
parser = None
def createParser():
global parser
epilog = """The flat file extraction script"""
epilog = re.sub(r'\s+', ' ', epilog)
parser = optparse.OptionParser(usage=usage, epilog = epilog)
standard_options_group = OptionGroup(parser, "Standard Ptools group" )
# Input options
standard_options_group.add_option('-s', '--sample', dest='sample_name', default=None,
help='sample name')
standard_options_group.add_option('-p', '--pgdb', dest='pgdbdir', default=None,
help='folder of the PGDB')
standard_options_group.add_option('--ptoolsExec', dest='ptoolsExec', default=None,
help='PathoLogic Executable')
standard_options_group.add_option("-o", "--output-pwy-table", dest="table_out",
help='the output table for the pathways [REQUIRED]')
import os, signal
TIME = 10
def __StopPathwayTools():
processPATT = re.compile(r'pathway-tools-runtime')
for line in os.popen("ps xa"):
fields = line.split()
pid = fields[0]
process = fields[4]
result = processPATT.search(process)
if result :
os.kill(int(pid), signal.SIGHUP)
def StopPathwayTools():
try:
__StopPathwayTools()
time.sleep(TIME)
__StopPathwayTools()
time.sleep(TIME)
if path.exists("/tmp/ptools-socket"):
remove("/tmp/ptools-socket")
except:
pass
def main(argv, errorlogger = None, runcommand = None, runstatslogger = None):
global parser
options, args = parser.parse_args(argv)
# is there a pathwaytools executable installed
if False and not path.exists(options.ptoolsExec):
eprintf("ERROR\tPathwayTools executable %s not found!\n", options.ptoolsExec)
if errorlogger:
errorlogger.printf("ERROR\tPathwayTools executable %s not found!\n", options.ptoolsExec)
exit_process("ERROR\tPathwayTools executable %s not found!\n" %(options.ptoolsExec))
# command to build the ePGDB
command = "%s " %(options.ptoolsExec)
command += " -api"
pythonCyc = startPathwayTools(options.sample_name.lower(), options.ptoolsExec, True)
#resultLines = pythonCyc.getReactionListLines()
|
resultLines = p
|
ythonCyc.getFlatFiles()
StopPathwayTools()
try:
if False:
pythonCyc = startPathwayTools(options.sample_name.lower(), options.ptoolsExec, True)
pythonCyc.setDebug() # disable pathway debug statements
printf("INFO\tExtracting the reaction list from ePGDB " + options.sample_name + "\n")
resultLines = pythonCyc.getReactionListLines()
#pythonCyc.stopPathwayTools()
reaction_list_file = open(options.reactions_list + ".tmp", 'w')
for line in resultLines:
fprintf(reaction_list_file,"%s\n",line.strip())
reaction_list_file.close()
StopPathwayTools()
except:
print traceback.print_exc(10)
eprintf("ERROR\tFailed to run extract pathways for %s : \n" %(options.sample_name))
eprintf("INFO\tKill any other PathwayTools instance running on the machine and try again")
if errorlogger:
errorlogger.write("ERROR\tFailed to run extract pathways for %s : " %(options.sample_name))
errorlogger.write("INFO\tKill any other PathwayTools instance running on the machine and try again\n")
StopPathwayTools()
def startPathwayTools(organism, ptoolsExec, debug):
StopPathwayTools()
pythonCyc = PythonCyc()
pythonCyc.setDebug(debug = debug)
pythonCyc.setOrganism(organism)
pythonCyc.setPToolsExec(ptoolsExec)
pythonCyc.startPathwayTools()
return pythonCyc
def runPathologicCommand(runcommand = None):
if runcommand == None:
return False
result = getstatusoutput(runcommand)
return result[0]
# this is the portion of the code that fixes the name
def split_attributes(str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
insert_attribute(attributes, attribStr)
return attributes
def fixLine(line, id):
fields = line.split('\t')
if len(fields)==2:
return fields[0]+'\t' + id
def getID(line):
fields = line.split('\t')
if len(fields)==2:
return fields[1]
def write_new_file(lines, output_file):
print "Fixing file " + output_file
try:
outputfile = open(output_file,'w')
pass
except IOError:
print "ERROR :Cannot open output file " + output_file
for line in lines:
fprintf(outputfile, "%s\n", line)
outputfile.close()
def cleanup(string):
"""
Cleans up pathway long-names for presentation.
:param string:
:return:
"""
string = re.sub("|", "", string) # vertical bar
string = re.sub("&", "", string) # ampersand
string = re.sub(";", "", string) # semicolon
string = re.sub("<[^<]+?>", '', string) # HTML tags
string = re.sub("\'", "", string) # remove quotes
return string
def get_preferred_taxa_name(taxa_id, megan_map, id_to_name):
"""
Helper function to format NCBI IDs into preferred names. First checks for MEGAN name,
if not found moves to current taxonomy in loaded NCBI taxonomy tree, failing that
gives the taxonomy of 'Unknown', but still provides the id, e.g., 'Unknown (12345)'.
:param taxa_id: numeric taxa id to translate
:param megan_map: preferred megan mapping hash
:param id_to_name: local ncbi tree hash
:return: "perferred name (id)"
"""
taxa_id = str(taxa_id)
if taxa_id in megan_map:
taxa = megan_map[ taxa_id ] + " (" + taxa_id + ")"
elif taxa_id in id_to_name:
taxa = id_to_name[ taxa_id ] + " (" + taxa_id + ")"
else:
taxa = "Unknown" + " (" + taxa_id + ")"
return taxa
def MetaPathways_run_pathologic(argv, extra_command = None, errorlogger = None, runstatslogger =None):
if errorlogger != None:
errorlogger.write("#STEP\tBUILD_PGDB\n")
createParser()
main(argv, errorlogger = errorlogger, runcommand= extra_command, runstatslogger = runstatslogger)
return (0,'')
if __name__ == '__main__':
createParser()
main(sys.argv[1:])
|
mozaik-association/mozaik
|
mozaik_mass_mailing_access_rights/__manifest__.py
|
Python
|
agpl-3.0
| 625 | 0 |
# Copyright 2021 ACSONE SA/NV
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik Mass Mailing Access Rights",
"summary": """
New group: Mass Mailing Manager. Managers can edit
and unlink mass mailings.""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"depends": [
"mass_
|
mailing",
],
"data": [
"security/groups.xml",
"security/ir.model.access.csv",
"views/mailing_mailing.xml",
"views/mail_template.xml",
],
"demo": [],
}
|
johan--/Geotrek
|
geotrek/tourism/migrations/0022_auto__add_field_touristiceventtype_pictogram__add_field_touristicconte.py
|
Python
|
bsd-2-clause
| 14,288 | 0.006859 |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TouristicEventType.pictogram'
db.add_column('t_b_evenement_touristique_type', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto'),
keep_default=False)
# Adding field 'TouristicContentType.pictogram'
db.add_column('t_b_contenu_touristique_type', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TouristicEventType.pictogram'
db.delete_column('t_b_evenement_touristique_type', 'picto')
# Deleting field 'TouristicContentType.pictogram'
db.delete_column('t_b_contenu_touristique_type', 'picto')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'common.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.datasource': {
'Meta': {'ordering': "['title', 'url']", 'object_name': 'DataSource', 'db_table': "'t_t_source_donnees'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'targets': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'titre'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_column': "'type'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'db_column': "'url'"})
},
u'tourism.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'t_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_len
|
gth': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '256', 'n
|
ull': 'True', 'db_column': "'commune'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'telephone'", 'blank': 'True'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'photo'", 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'db_column': "'code'", 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'rue'", 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'desks'", 'db_column': "'type'", 'to': u"orm['tourism.InformationDeskType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.informationdesktype': {
'Meta': {'ordering': "['label']", 'object_name': 'InformationDeskType', 'db_table': "'t_b_type_renseignement'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.touristiccontent': {
'Meta': {'object_name': 'TouristicContent', 'db_table': "'t_t_contenu_touristique'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contents'", 'db_column': "'categorie'", 'to': u"orm['tourism.TouristicContentCategory']"}),
'contact': ('django.db.models.fields.TextField', [], {'db_column': "'contact'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': str(settings.SRID)}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'practical_info': ('django.db.models.fields.TextField', [], {'db_column': "'infos_pratiques'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristiccontents'", 'to': u"orm['common.Theme']", 'db_table': "'t_r_contenu_touristique_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'type1': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'contents1'", 'blank': 'True', 'db_table': "'t_r_contenu_touristique_type1'", 'to': u"orm['tourism.TouristicContentType']"}),
'type2': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'contents2'", 'blank': 'True', 'db_table': "'t_r_contenu_touristique_type2'", 'to': u"orm['tourism.TouristicContentType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.touristiccontentcategory': {
'Meta': {'ordering': "['label']", 'object_name': 'TouristicContentCategory', 'db_table': "'t_b_contenu_touristique_categorie'"},
'
|
tomachalek/kontext
|
lib/mailing.py
|
Python
|
gpl-2.0
| 2,086 | 0.000479 |
# Copyright (c) 2016 Czech National Corpus
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the impli
|
ed warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import smtplib
from email.mime.tex
|
t import MIMEText
import logging
import settings
def smtp_factory():
"""
Create a new SMTP instance with some predefined stuff
:return:
"""
username = settings.get('mailing', 'auth_username')
password = settings.get('mailing', 'auth_password')
port = settings.get_int('mailing', 'smtp_port', 25)
use_tls = settings.get_bool('mailing', 'use_tls', False)
server = smtplib.SMTP(settings.get('mailing', 'smtp_server'), port=port)
if use_tls:
server.starttls()
if username and password:
server.login(username, password)
return server
def message_factory(recipients, subject, text, reply_to=None):
"""
Create message instance with some predefined properties
"""
msg = MIMEText(text, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = settings.get('mailing', 'sender')
msg['To'] = recipients[0]
if reply_to:
msg.add_header('Reply-To', reply_to)
return msg
def send_mail(server, msg, recipients):
sender = settings.get('mailing', 'sender')
try:
server.sendmail(sender, recipients, msg.as_string())
ans = True
except Exception as ex:
logging.getLogger(__name__).warn(
'There were errors sending e-mail: %s' % (ex,))
ans = False
finally:
server.quit()
return ans
|
DataDog/gunicorn
|
tests/requests/valid/004.py
|
Python
|
mit
| 164 | 0 |
request = {
"method": "GET",
"uri": uri(
|
"/silly"),
"version": (1, 1),
"headers": [
("AAAAAAAAAAAAA", "++++++++++")
|
],
"body": b""
}
|
rainwoodman/fastpm-python
|
fastpm/tests/test_state.py
|
Python
|
gpl-3.0
| 600 | 0.005 |
from fastpm.state import StateVector, Matter, Baryon, CDM, NCDM
from runtests.mpi import MPITest
from nbodykit.cosmo
|
logy import Planck15 as cosmo
import numpy
BoxSize = 100.
Q = numpy.zeros((100, 3))
@MPITest([1, 4])
def test_create(comm):
matter = Matter(cosmo, BoxSize, Q, comm)
cdm = CDM(cosmo, BoxSize, Q, comm)
cdm.a['S'] = 1.0
cdm.a['P'] = 1.0
baryon = Baryon(cosmo, BoxSize, Q, comm)
baryon.
|
a['S'] = 1.0
baryon.a['P'] = 1.0
state = StateVector(cosmo, {'0': baryon, '1' : cdm}, comm)
state.a['S'] = 1.0
state.a['P'] = 1.0
state.save("state")
|
tfroehlich82/django-propeller
|
manage.py
|
Python
|
mit
| 819 | 0.001221 |
#!/usr/bin/env python
import os
import
|
sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_propeller_demo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise
|
ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractLimostnWordpressCom.py
|
Python
|
bsd-3-clause
| 380 | 0.028947 |
def extractLimostnWordpressCom(item):
|
'''
Parser for 'limostn.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "The Outcast" in item['tags']:
return buildReleaseMessageWithType(item, "The Outcast", vol, chp, frag=frag, postfix=postfix)
r
|
eturn False
|
GFibrizo/TPS_7529
|
TDA Grafo/Dijkstra.py
|
Python
|
apache-2.0
| 1,960 | 0.003061 |
from grafo import Digraph
from CaminoMinimo import CaminoMinimo
import heapq
class Dijkstra(CaminoMinimo):
def __init__(self, grafo, origen, destino):
CaminoMinimo.__init__(self, grafo, origen, destino)
self.dist_heap = []
#heapq.heapify(self.dist_heap)
for i in xrange(self.grafo.V()):
self.distancias.append(self.INFINITE)
heapq.heappush(self.dist_heap, [self.distancia(i), i])
self.distancia_actual = 0
self._camino_minimo()
def _camino_minimo(self):
vertice = self.origen
self.distancias[vertice] = self.distancia_actual
self.padre[vertice] = None
anterior = None
while (len(self.visitados) < self.grafo.V()):
for vecino in self.grafo.adj(vertice):
nueva_dist = self.distancia_actual + self.grafo.obtener_arista(vertice, vecino).weight()
if (not self.visitado(vecino)) or (nueva_dist < self.distancias[vecino]):
self.distancias[vecino] = nueva_dist
self.padre[vecino] = vertice
self.visitados.add(vertice)
self.distancia_actual, vertice = self._obtener_siguiente()
def _obtener_siguiente(self):
heap = []
heapq.heapify(heap)
for i in xrange(self.grafo.V()):
if (i not in self.visitados):
heapq.heappush(heap, [self.distancia(i), i])
if len(heap) == 0:
return self.distan
|
cia_actual, None
return heapq.heappop(heap)
graph = Digraph(8)
graph.add_edge(0, 1, 1) # 1 --
graph.add_edge(0, 2, 3) # 3
graph.add_edge(2, 3, 1) # 1
graph.add_edge(3, 4, 1) # 1
graph.add_edge(1, 4, 1) # 4
graph.add_edge(1, 2, 1) # 1
graph.add_edge(4, 5, 5) # 2
graph.add_edge(5, 6, 1) # 1
graph.add_edge(5, 7, 4) # 4
gra
|
ph.add_edge(6, 7, 1) # 1
search = Dijkstra(graph, 0, 7)
print search.camino(7)
print search.distancia(7)
|
craws/OpenAtlas
|
openatlas/util/changelog.py
|
Python
|
gpl-2.0
| 26,790 | 0.000037 |
versions = {
'7.1.1': ['2022-03-04', {
'fix': {
'1649': 'Minor label error in model image',
'1650': 'Obsolete database version in install SQL'}}],
'7.1.0': ['2022-02-15', {
'feature': {
'1506': 'Update CIDOC CRM to 7.1.1',
'1285': 'Improved value types display',
'1593': 'Adding multiple aliases at once instead one at a time',
'1629': 'Improved mail function',
'1624': 'Minor improvements and refactor',
'1599': 'API: Search parameter include all subtypes',
'1600': 'API: Search for values in value types',
'1623': 'API: Show types for view name'},
'fix': {
'1626':
"API: typed_entities doesn't show types in geojson format"}}],
'7.0.4': ['2022-02-10', {
'fix': {
'1616': 'Error at inserting an administrative unit'}}],
'7.0.3': ['2022-02-02', {
'fix': {
'1634': 'Value type with subtype error'}}],
'7.0.2': ['2022-01-20', {
'fix': {
'1632': 'Multiple flag gets lost when updating a hierarchy'}}],
'7.0.1': ['2022-01-05', {
'fix': {
'1627': 'Error when creating a source from file view'}}],
'7.0.0': ['2022-01-01', {
'feature': {
'1566': 'Update OpenAtlas software to Debian/bullseye',
'1297': 'Connecting events sequentially',
'1577': 'Show actors at places from events',
'1615': 'Additional step by step examples in manual',
'1549': 'API: deprecation of node and subunit functions',
'1579': 'API: Include Swagger documentation',
'1598': 'API: Offset Pagination',
'1603': 'API: Specialized GeoJSON format for subunits',
'1622': 'API: search with dates',
'1605': 'Refactor'},
'fix': {
'1614': 'Custom folders in uploads causing errors'}}],
'6.6.4': ['2021-12-23', {
'fix': {'1621': 'Error at CSV export'}}],
'6.6.3': ['2021-12-08', {
'fix': {'1616': 'Error at inserting an administrative unit'}}],
'6.6.2': ['2021-11-23', {
'fix': {'1609': 'Problem with types'}}],
'6.6.1': ['2021-11-20', {
'fix': {'1607': 'Error at profile for readonly users'}}],
'6.6.0': ['2021-11-18', {
'feature': {
'1500': 'Production of artifacts',
'1563': 'OpenAtlas model to database',
'1597': 'Join artifacts and finds',
'1584': 'Track needed and actual database version',
'1589': 'Additional and improved system warnings',
'1546': 'API: New search parameter',
'1583': 'Refactor'}}],
'6.5.2': ['2021-11-07', {
'fix': {'#1596:': 'Sometimes unavailable add custom type button'}}],
'6.5.1': ['2021-10-28', {
'fix': {'#1592:': 'Error at import when using type ids'}}],
'6.5.0': ['2021-09-19', {
'feature': {
'1462': 'Current owner of artifacts',
'1562': 'Update manual overlay',
'1184': 'API: add ad
|
ditional output format RDFS',
'1551': 'API: Relation type adaptions, adding relationDescription',
'1561': 'Refactor'},
'fix': {
'1557': 'Save buttons blocked by map',
'1580': 'H
|
idden error messages for reference systems',
'1570': 'API: Wrong type signature in OpenApi'}}],
'6.4.1': ['2021-08-11', {
'fix': {
'1559': 'Installation problem because of missing upload folder'}}],
'6.4.0': ['2021-08-10', {
'feature': {
'1280': 'Picture Preview',
'1492': 'Image Processing',
'1552': 'External reference systems for sources',
'1538': 'Focus on table filter at overview pages',
'1531': 'Map overlay improved',
'1523': 'Performance issues while linking pictures',
'1558': 'Manual entry profession',
'1536': 'Refactor',
'1426': 'API: Display image smaller size',
'1495': 'API: Additional Geojson output for QGIS imports',
'1529': 'API: Increase request performance',
'1530': 'API: Geometries endpoint for frontend map',
'1535': 'API: Get all entities linked to an entity',
'1537': 'API: Type entities for actor types',
'1545': 'API: Filter entities by types'},
'fix': {
'1414': 'Enlarged Description Field Covers up Entry Buttons',
'1539': 'Pagination not shown for tables sometimes',
'1554': 'Error at value type view'}}],
'6.3.0': ['2021-06-13', {
'feature': {
'1513': 'Add reference page for multiple files',
'1520': 'Better value type display',
'1527': 'Improved tab system',
'1502': 'Show count of finds when entering additional',
'1509': 'Manual - examples for use cases',
'1512': 'Refactor',
'1478': 'API: latest with pagination',
'1516': 'API: implement Google JSON style',
'1526': 'API: Refactor'},
'fix': {
'1515': 'API: Paging count faulty'}}],
'6.2.1': ['2021-05-12', {
'fix': {
'1514': 'End dates of entities are not displayed correctly'}}],
'6.2.0': ['2021-05-08', {
'feature': {
'940': 'Multiple file upload',
'1284': 'Show image when editing a place or artifact',
'1428': 'Configuration of frontend site name',
'1476': 'Show/hide button for multiple reference systems',
'1494': 'Refactor',
'1496': 'API: Endpoints for entities of type',
'1490': 'API: Refactor'}}],
'6.1.0': ['2021-04-05', {
'feature': {
'1215': 'Time spans for types',
'1443': 'List view for untyped entities',
'1457': 'Public notes',
'963': 'API: Add type of places to export',
'1402': 'API: CSV export in API',
'1487': 'API: Endpoint for type hierarchies',
'1489': 'API: Geometry for artifacts'}}],
'6.0.1': ['2021-03-15', {
'fix': {
'1485': 'Cannot choose multiple for custom type'}}],
'6.0.0': ['2021-03-13', {
'feature': {
'1091': 'Reference systems for types',
'1109': 'Sustainable web map services',
'1456': 'Artifacts',
'1187': 'Add files for artifacts',
'1465':
'Merge legal body to group, information carrier to artifact',
'1461': 'Also search in date comments',
'1398': 'Compress SQL export files',
'1274': 'API: Automatic documentation for code',
'1390': 'API: Swagger file in OpenAtlas repository',
'1479': 'API: get by view name and system class',
'1484': 'API: Add new functions',
'1447': 'Refactor'},
'fix': {
'1477': 'Unable to select an entity with single quote in name',
'1452': 'API: "type" is empty if more entities are requested',
'1471': 'API: Url to linked places deprecated'}}],
'5.7.2': ['2021-01-27', {
'fix': {
'1455': 'Network graphic error'}}],
'5.7.1': ['2021-01-26', {
'fix': {
'1454': 'Error in install instructions'}}],
'5.7.0': ['2021-01-16', {
'feature': {
'1292': 'External reference systems',
'1440': 'Search with unaccented characters',
'1386': 'API: Flask restful framework'},
'fix': {
'1434': 'Errors with types if named like standard types',
'1427': 'API: Paging is broken'}}],
'5.6.0': ['2020-11-30', {
'feature': {
'930': 'Wikidata API',
'1409': 'Redesign forms',
'1393': 'Split profile display options',
'1395': 'Content for frontends',
'1347': 'All icons to Font Awesome icons',
'1379': 'Feature votes',
'1407': 'Extend session availability (prevent CSRF token timeout)',
'1412': 'AP
|
harikishen/addons-server
|
src/olympia/devhub/tests/test_cron.py
|
Python
|
bsd-3-clause
| 1,566 | 0 |
import datetime
import os
from django.conf import settings
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
from olympia.devhub.cron import update_blog_posts
from olympia.devhub.tasks import convert_purified
from olympia.devhub.
|
models import BlogPost
class TestRSS(TestCase):
def test_rss_cron(self):
url = os.path.join(
settings.ROOT, 'src', 'olympia', 'devhub', 'tests',
'rss_feeds', 'blog.xml')
settings.DEVELOPER_BLOG_URL = url
update_blog_posts()
assert BlogPost.objects.count() == 5
bp = BlogPost.objects.all()[0]
url = ("http://blog.mozilla.com/addons/2011/06/10/"
"update-in-time-for-thunderbird-5/")
assert bp.title =
|
= 'Test!'
assert bp.date_posted == datetime.date(2011, 6, 10)
assert bp.permalink == url
class TestPurify(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestPurify, self).setUp()
self.addon = Addon.objects.get(pk=3615)
def test_no_html(self):
self.addon.the_reason = 'foo'
self.addon.save()
last = Addon.objects.get(pk=3615).modified
convert_purified([self.addon.pk])
addon = Addon.objects.get(pk=3615)
assert addon.modified == last
def test_has_html(self):
self.addon.the_reason = 'foo <script>foo</script>'
self.addon.save()
convert_purified([self.addon.pk])
addon = Addon.objects.get(pk=3615)
assert addon.the_reason.localized_string_clean
|
mfnch/pyrtist
|
pyrtist/gui/dox/dox.py
|
Python
|
lgpl-2.1
| 2,729 | 0.009894 |
# Copyright (C) 2011, 2012 by Matteo Franchin
#
# This file is part of Pyrtist.
#
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
|
warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import fnmatch
from logger import l
|
og_msg, set_log_context
from tree import DoxType, DoxProc, DoxInstance, DoxTree
from context import Context
import builder
class Dox(object):
def __init__(self):
self.file = None
self.tree = DoxTree()
self.context = Context()
def log(self, msg, show_hdr=False):
hdr = ""
if show_hdr and self.file != None:
hdr = self.file.filename
if self.file.nr_line != None:
hdr += "(%d)" % (self.file.nr_line)
hdr += ": "
log_msg(hdr + msg)
def read_recursively(self, rootpath,
extensions = ["*.dox", "*.bxh", "*.box"]):
for dirpath, dirnames, filenames in os.walk(rootpath):
doxfiles = []
for extension in extensions:
doxfiles.extend(fnmatch.filter(filenames, extension))
if doxfiles:
doxfiles = list(set(doxfiles)) # Remove duplicates
for filename in doxfiles:
self.read_file(os.path.join(dirpath, filename))
def read_file(self, filename):
"""Read documentation content from the given file."""
set_log_context("File '%s'" % filename)
with open(filename, "r") as f:
text = f.read()
slices = builder.create_classified_slices_from_text(text)
blocks = builder.create_blocks_from_classified_slices(slices)
builder.associate_targets_to_blocks(slices)
context = self.context.create_context(sourcefile=filename,
section=None)
self.context = builder.associate_contexts_to_blocks(blocks, context)
builder.add_blocks_to_tree(self.tree, blocks)
if __name__ == "__main__":
import sys
dox = Dox()
dox.read_recursively(sys.argv[1])
tree = dox.tree
tree.process()
from rst import RSTWriter
docinfo = \
{"title": "Box Reference Manual",
"has_index": True,
"index_title": "Index of available object types",
"out_file": "out"}
writer = RSTWriter(dox.tree, docinfo=docinfo)
writer.save()
|
sosguns2002/interactive-mining
|
interactive-mining-3rdparty-madis/madis/src/functions/vtable/timeslidingwindow.py
|
Python
|
gpl-3.0
| 6,001 | 0.0025 |
"""
.. function:: timeslidingwindow(timewindow, timecolumn) -> query results
Returns the query input results annotated with the window id as an extra column.
The following arguments can be passed as parameters:
timewindow: It can be a numeric value that specifies the time length of
the window (in seconds).
timecolumn: It is the index of the temporal column (starting from 0) For
the moment, we assume that the data is ordered by the temporal column that
the user gives as input in ascending order.
Examples::
>>> table1('''
... "12.05.2010 00:00:00"
... "12.05.2010 00:01:00"
... "12.05.2010 00:02:00"
... "12.05.2010 00:03:00"
... "12.05.2010 00:04:00"
... ''')
>>> sql("timeslidingwindow timewindow:180 timecolumn:0 select * from table1")
wid | a
-------------------------
0 | 12.05.2010 00:00:00
0 | 12.05.2010 00:01:00
0 | 12.05.2010 00:02:00
0 | 12.05.2010 00:03:00
1 | 12.05.2010 00:0
|
1:00
1 | 12.05.2010 00:02:00
1 | 12.05.2010 00:03:00
1 | 12.05.2010 00:04:00
>>> table1('''
... "12.05.2010 00:00:00"
... "12.05.2010 00:01:00"
... "12.05.2010 00:01:00"
|
... "12.05.2010 00:02:00"
... "12.05.2010 00:03:00"
... "12.05.2010 00:04:00"
... "12.05.2010 00:05:00"
... ''')
... ''')
>>> sql("timeslidingwindow timewindow:120 timecolumn:0 select * from table1")
wid | a
-------------------------
0 | 12.05.2010 00:00:00
0 | 12.05.2010 00:01:00
0 | 12.05.2010 00:01:00
0 | 12.05.2010 00:02:00
1 | 12.05.2010 00:01:00
1 | 12.05.2010 00:01:00
1 | 12.05.2010 00:02:00
1 | 12.05.2010 00:03:00
2 | 12.05.2010 00:02:00
2 | 12.05.2010 00:03:00
2 | 12.05.2010 00:04:00
3 | 12.05.2010 00:03:00
3 | 12.05.2010 00:04:00
3 | 12.05.2010 00:05:00
>>> table2('''
... "12/05/2010 00:00:00"
... "12/05/2010 00:01:00"
... "12/05/2010 00:02:00"
... ''')
... ''')
>>> sql("timeslidingwindow timewindow:180 timecolumn:0 select * from table2")
wid | a
-------------------------
0 | 12/05/2010 00:00:00
0 | 12/05/2010 00:01:00
0 | 12/05/2010 00:02:00
"""
import setpath
import vtbase
import functions
from collections import deque
import time
from lib.dateutil import parser
### Classic stream iterator
registered = True
class TimeSlidingWindow(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No query argument ")
query = dictargs['query']
if 'timewindow' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No TimeWindow argument ")
else:
winlen = int(dictargs['timewindow'])
if 'timecolumn' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No timecolumn argument ")
else:
timecolumn = int(dictargs['timecolumn'])
cur = envars['db'].cursor()
c = cur.execute(query, parse=False)
try:
yield [('wid', 'integer')] + list(cur.getdescriptionsafe())
except StopIteration:
try:
raise
finally:
try:
c.close()
except:
pass
wid = 0
secs = 0
row = c.next()
firstTime = int(time.mktime(parser.parse(row[timecolumn], fuzzy=True).timetuple()))
head = {firstTime: [row]}
window = deque([])
while row:
prev = row
try:
row = c.next()
except StopIteration:
if wid == 0:
for k in head.keys():
for t in head[k]:
yield (wid,) + t
for rl in window:
for k in rl.keys():
for t in rl[k]:
yield (wid,) + t
break
secs = int(time.mktime(parser.parse(row[timecolumn], fuzzy=True).timetuple()))
if secs <= firstTime + winlen:
if prev[0] == row[timecolumn] and window:
old = window.pop()[secs]
old.append(row)
rowlist = {secs: old}
else:
rowlist = {secs: [row]}
window.append(rowlist)
else:
if wid == 0:
for k in head.keys():
for t in head[k]:
yield (wid,) + t
for rl in window:
for k in rl.keys():
for t in rl[k]:
yield (wid,) + t
while secs > firstTime + winlen and window:
try:
head = window.popleft()
firstTime = head.keys()[0]
except IndexError:
break
rowlist = {secs: [row]}
window.append(rowlist)
wid += 1
for k in head.keys():
for t in head[k]:
yield (wid,) + t
for rl in window:
for k in rl.keys():
for t in rl[k]:
yield (wid,) + t
def Source():
return vtbase.VTGenerator(TimeSlidingWindow)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
ahuarte47/QGIS
|
tests/src/python/test_qgsvectorfilewriter.py
|
Python
|
gpl-2.0
| 49,644 | 0.002397 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
from builtins import str
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsField,
QgsGeometry,
QgsPointXY,
QgsCoordinateReferenceSystem,
QgsVectorFileWriter,
QgsFeatureRequest,
QgsProject,
QgsWkbTypes,
QgsRectangle,
QgsCoordinateTransform
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir, QByteArray
import os
import tempfile
import osgeo.gdal # NOQA
from osgeo import gdal, ogr
from qgis.testing import start_app, unittest
from utilities import writeShape, compareWkt, unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestFieldValueConverter(QgsVectorFileWriter.FieldValueConverter):
def __init__(self, layer):
QgsVectorFileWriter.FieldValueConverter.__init__(self)
self.layer = layer
def fieldDefinition(self, field):
idx = self.layer.fields().indexFromName(field.name())
if idx == 0:
return self.layer.fields()[idx]
elif idx == 2:
return QgsField('conv_attr', QVariant.String)
return QgsField('unexpected_idx')
def convert(self, idx, value):
if idx == 0:
return value
elif idx == 2:
if value == 3:
return 'converted_val'
else:
return 'unexpected_val!'
return 'unexpected_idx'
class TestQgsVectorFileWriter(unittest.TestCase):
mMemoryLayer = None
def testWrite(self):
"""Check we can write a vector file."""
self.mMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
self.assertIsNotNone(self.mMemoryLayer, 'Provider not initialized')
myProvider = self.mMemoryLayer.dataProvider()
self.assertIsNotNone(myProvider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(['Johny', 20, 0.3])
myResult, myFeatures = myProvider.addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
writeShape(self.mMemoryLayer, 'writetest.shp')
def testWriteWithLongLongField(self):
ml = QgsVectorLayer('NoGeometry?crs=epsg:4326&field=fldlonglong:long',
'test2', 'memory')
provider = ml.dataProvider()
feat = QgsFeature()
feat.setAttributes([2262000000])
provider.addFeatures([feat])
filename = os.path.join(str(QDir.tempPath()), 'with_longlong_field')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
|
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(ml, file
|
name, 'utf-8', crs, 'GPKG')
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
# test values
idx = vl.fields().indexFromName('fldlonglong')
self.assertEqual(vl.getFeature(1).attributes()[idx], 2262000000)
del vl
os.unlink(filename + '.gpkg')
def testWriteWithBoolField(self):
# init connection string
dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
dbconn = os.environ['QGIS_PGTEST_DB']
# create a vector layer
vl = QgsVectorLayer('{} table="qgis_test"."boolean_table" sql='.format(dbconn), "testbool", "postgres")
self.assertTrue(vl.isValid())
# check that 1 of its fields is a bool
fields = vl.fields()
self.assertEqual(fields.at(fields.indexFromName('fld1')).type(), QVariant.Bool)
# write a gpkg package with a bool field
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
filename = os.path.join(str(QDir.tempPath()), 'with_bool_field')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
crs,
'GPKG')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('fld1')
self.assertEqual(fields.at(idx).type(), QVariant.Bool)
# test values
self.assertEqual(vl.getFeature(1).attributes()[idx], 1)
self.assertEqual(vl.getFeature(2).attributes()[idx], 0)
del vl
os.unlink(filename + '.gpkg')
def testDateTimeWriteShapefile(self):
"""Check writing date and time fields to an ESRI shapefile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertTrue(ml.isValid())
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
# shapefiles do not support time types, result should be string
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.String)
# shapefiles do not support datetime types, result should be string
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.String)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
# shapefiles do not support time types
self.assertIsInstance(f.attributes()[time_idx], str)
self.assertEqual(f.attributes()[time_idx], '13:45:22')
# shapefiles do not support datetime types
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], str)
self.
|
knuu/competitive-programming
|
atcoder/dp/edu_dp_o.py
|
Python
|
mit
| 818 | 0.002445 |
import sys
import ctypes
def popcount(N):
if sys.platform.startswith('linux'):
libc = ctypes.cdll.LoadLibrary('libc.so.6')
return
|
libc.__sched_cpucount(ctypes.sizeof(ctypes.c_long), (ctypes.c_long * 1)(N))
elif sys.platform == 'darwin':
libc = ctypes.cdll.LoadLibrary('libSystem.dylib')
return libc.__popcountdi2(N)
else:
assert(False)
def main
|
():
N = int(input())
mod = 10 ** 9 + 7
A = [[int(x) for x in input().split()] for _ in range(N)]
dp = [0] * (1 << N)
dp[0] = 1
for state in range(1 << N):
dp[state] %= mod
i = popcount(state)
for j in range(N):
if (state >> j & 1) == 0 and A[i][j]:
dp[state | (1 << j)] += dp[state]
print(dp[-1])
if __name__ == '__main__':
main()
|
alfateam123/Teca
|
tests/test_utils.py
|
Python
|
mit
| 902 | 0.001109 |
import teca.utils as tecautils
import teca.ConfigHandler as tecaconf
impor
|
t unittest
class TestFileFilter(unittest.TestCase):
def setUp(self):
self.conf = tecaconf.ConfigHandler(
"tests/test_data/configuration.json",
{"starting_path": "tests/test_data/images"}
)
self.files_list = [
"foo.doc",
"yukinon.jpg",
"cuteflushadoingflushathings.webm"
]
def test_dothefiltering(self):
|
self.assertTrue("foo.doc" not in
tecautils.filterImages(self.files_list,
self.conf))
self.assertTrue("yukinon.jpg" in
tecautils.filterImages(self.files_list,
self.conf))
def test_nofiles(self):
self.assertEqual(0, len(tecautils.filterImages([], self.conf)))
|
l33tdaima/l33tdaima
|
local_packages/binary_tree.py
|
Python
|
mit
| 1,599 | 0.001251 |
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val: int = 0, left: "TreeNode" = None, right: "TreeNode" = None):
self.val = val
self.left = left
self.right = right
@classmethod
def serialize(cls, root: "TreeNode") -> str:
"""Encodes a tree to a single string."""
buffer = []
def _serialize(root: "TreeNode"):
if root is None:
buffer.append("#")
return
buffer.append(str(root.val))
_serialize(root.left)
_serialize(root.right)
_serialize(root)
return ",".join(buffer)
@classmethod
def deserialize(cls, data: str) -> "TreeNode":
"""Decodes your encoded data to tree."""
buffer = data.split(",")
def _deserialize(buffer: List[str]):
if len(buffer) == 0:
return None
val = buffer.pop(0)
if val == "#" or val == "":
return None
node = TreeNode(int(val))
node.left = _deserialize(buffer)
node.right = _deserialize(buffer)
return node
return _deserialize(buffer)
if __name
|
__ == "__main__":
tests = [
"#",
"1,#,#",
"1,2,#,#,#",
"1,#,2,#,#",
"1,2,#,#,3,#,#",
"1,2,#,#,3,4,5,#,#,#,#",
]
for t in tests:
actual = TreeNode.serialize(TreeNode.deserialize(t))
print("serialize(deserialize
|
) ->", actual)
assert t == TreeNode.serialize(TreeNode.deserialize(t))
|
fireeye/flare-floss
|
floss/api_hooks.py
|
Python
|
apache-2.0
| 14,484 | 0.001105 |
# Copyright (C) 2017 FireEye, Inc. All Rights Reserved.
import contextlib
import envi
import viv_utils
class ApiMonitor(viv_utils.emulator_drivers.Monitor):
"""
The ApiMonitor observes emulation and cleans up API function returns.
"""
def __init__(self, vw, function_index):
viv_utils.emulator_drivers.Monitor.__init__(self, vw)
self.function_index = function_index
def apicall(self, emu, op, pc, api, argv):
# overridden from Monitor
self.d("apicall: %s %s %s %s %s", emu, op, pc, api, argv)
def prehook(self, emu, op, startpc):
# overridden from Monitor
pass
def posthook(self, emu, op, endpc):
# overridden from Monitor
if op.mnem == "ret":
try:
self._check_return(emu, op)
except Exception as e:
self.d(str(e))
def _check_return(self, emu, op):
"""
Ensure that the target of the return is within the allowed set of functions.
Do nothing, if return address is valid. If return address is invalid:
_fix_return modifies program counter and stack pointer if a valid return address is found
on the stack or raises an Exception if no valid return address is found.
"""
function_start = self.function_index[op.va]
return_addresses = self._get_return_vas(emu, function_start)
if op.opers:
# adjust stack in case of `ret imm16` instruction
emu.setStackCounter(emu.getStackCounter() - op.opers[0].imm)
return_address = self.getStackValue(emu, -4)
if return_address not in return_addresses:
self._logger.debug(
"Return address 0x%08X is invalid, expected one of: %s",
return_address,
", ".join(map(hex, return_addresses)),
)
self._fix_return(emu, return_address, return_addresses)
# TODO return, handle Exception
else:
self._logger.debug("Return address 0x%08X is valid, returning", return_address)
# TODO return?
def _get_return_vas(self, emu, function_start):
"""
Get the list of valid addresses to which a function should return.
"""
return_vas = []
callers = self._vw.getCallers(function_start)
for caller in callers:
call_op = emu.parseOpcode(caller)
return_va = call_op.va + call_op.size
return_vas.append(return_va)
return return_vas
def _fix_return(self, emu, return_address, return_addresses):
"""
Find a valid return address from return_addresses on the stack. Adjust the stack accordingly
or raise an Exception if no valid address is found within the search boundaries.
Modify program counter and stack pointer, so the emulator does not return to a garbage address.
"""
self.dumpStack(emu)
NUM_ADDRESSES = 4
pointer_size = emu.getPointerSize()
STACK_SEARCH_WINDOW = pointer_size * NUM_ADDRESSES
esp = emu.getStackCounter()
for offset in range(0, STACK_SEARCH_WINDOW, pointer_size):
ret_va_candidate = self.getStackValue(emu, offset)
if ret_va_candidate in return_addresses:
emu.setProgramCounter(ret_va_candidate)
emu.setStackCounter(esp + offset + pointer_size)
self._logger.debug("Returning to 0x%08X, adjusted stack:", ret_va_candidate)
self.dumpStack(emu)
return
self.dumpStack(emu)
raise Exception("No valid return address found...")
def dumpStack(self, emu):
"""
Convenience debugging routine for showing
state current state of the stack.
"""
esp = emu.getStackCounter()
stack_str = ""
for i in range(16, -16, -4):
if i == 0:
sp = "<= SP"
else:
sp = "%02x" % (-i)
stack_str = "%s\n0x%08x - 0x%08x %s" % (stack_str, (esp - i), self.getStackValue(emu, -i), sp)
self.d(stack_str)
def dumpState(self, emu):
self.i("eip: 0x%x", emu.getRegisterByName("eip"))
self.i("esp: 0x%x", emu.getRegisterByName("esp"))
self.i("eax: 0x%x", emu.getRegisterByName("eax"))
self.i("ebx: 0x%x", emu.getRegisterByName("ebx"))
self.i("ecx: 0x%x", emu.getRegisterByName("ecx"))
self.i("edx: 0x%x", emu.getRegisterByName("edx"))
self.dumpStack(emu)
def pointerSize(emu):
"""
Convenience method whose name might be more readable
than fetching emu.imem_psize.
Returns the size of a pointer in bytes for the given emulator.
:rtype: int
"""
return emu.imem_psize
def popStack(emu):
"""
Remove the element at the top of the stack.
:rtype: int
"""
v = emu.readMemoryFormat(emu.getStackCounter(), "<P")[0]
emu.setStackCounter(emu.getStackCounter() + pointerSize(emu))
return v
class GetProcessHeapHook(viv_utils.emulator_drivers.Hook):
"""
Hook and handle calls to GetProcessHeap, returning 0.
"""
def hook(self, callname, emu, callconv, api, argv):
if callname == "kernel32.GetProcessHeap":
# nop
callconv.execCallReturn(emu, 42, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
def round(i, size):
"""
Round `i` to the nearest greater-or-equal-to multiple of `size`.
:type i: int
:type size: int
:rtype: int
"""
if i % size == 0:
return i
return i + (size - (i % size))
class RtlAllocateHeapHook(viv_utils.emulator_drivers.Hook):
"""
Hook calls to RtlAllocateHeap, allocate memory in a "heap"
section, and return pointers to this memory.
The base heap address is 0x96960000.
The max allocation size is 10 MB.
"""
def __init__(self, *args, **kwargs):
super(RtlAllocateHeapHook, self).__init__(*args, **kwargs
|
)
self._heap_addr = 0x96960000
MAX_ALLOCATION_SIZE = 10 * 1024 * 1024
def _allocate_mem(self, emu, size):
size = round(size, 0x1000)
if size > self.MAX_ALLOCATION_SIZE:
size = self.MAX_ALLOCATION_SIZE
va = self._heap_addr
self.d("RtlAllocateHeap: mapping %s bytes at %s", hex(size), hex(va))
emu.addMemoryMap(va, envi.memory.MM_RWX, "[h
|
eap allocation]", b"\x00" * (size + 4))
emu.writeMemory(va, b"\x00" * size)
self._heap_addr += size
return va
def hook(self, callname, driver, callconv, api, argv):
# works for kernel32.HeapAlloc
if callname == "ntdll.RtlAllocateHeap":
emu = driver
hheap, flags, size = argv
va = self._allocate_mem(emu, size)
callconv.execCallReturn(emu, va, len(argv))
return True
raise viv_utils.emulator_drivers.UnsupportedFunction()
class AllocateHeap(RtlAllocateHeapHook):
"""
Hook calls to AllocateHeap and handle them like calls to RtlAllocateHeapHook.
"""
def __init__(self, *args, **kwargs):
super(AllocateHeap, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if (
callname == "kernel32.LocalAlloc"
or callname == "kernel32.GlobalAlloc"
or callname == "kernel32.VirtualAlloc"
):
size = argv[1]
elif callname == "kernel32.VirtualAllocEx":
size = argv[2]
else:
raise viv_utils.emulator_drivers.UnsupportedFunction()
va = self._allocate_mem(driver, size)
callconv.execCallReturn(driver, va, len(argv))
return True
class MallocHeap(RtlAllocateHeapHook):
"""
Hook calls to malloc and handle them like calls to RtlAllocateHeapHook.
"""
def __init__(self, *args, **kwargs):
super(MallocHeap, self).__init__(*args, **kwargs)
def hook(self, callname, driver, callconv, api, argv):
if callname == "msvcrt.malloc" or callname == "msvcrt.calloc":
size = argv[0]
va = self._
|
lebauce/artub
|
bike/parsing/pathutils.py
|
Python
|
gpl-2.0
| 5,507 | 0.005084 |
# -*- coding: iso-8859-1 -*-
#
# Bicycle Repair Man - the Python Refactoring Browser
# Copyright (C) 2001-2006 Phil Dawes <phil@phildawes.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
# A some of this code is take from Pythius -
# Copyright (GPL) 2001 Jurgen Hermann <jh@web.de>
import os
def containsAny(str, set):
""" Check whether 'str' contains ANY of the chars in 'set'
"""
return 1 in [c in str for c in set]
def getPathOfModuleOrPackage(dotted_name, pathlist):
""" Get the filesystem path for a module or a package.
Return the file system path to a file for a module,
and to a directory for a package. Return None if
the name is not found, or is a builtin or extension module.
"""
import imp
# split off top-most name
parts = dotted_name.split('.', 1)
if len(parts) > 1:
# we have a dotted path, import top-level package
try:
file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close()
except ImportError:
return None
# check if it's indeed a package
if description[2] == imp.PKG_DIRECTORY:
# recursively handle the remaining name parts
pathname = getPathOfModuleOrPackage(parts[1], [pathname])
else:
pathname = None
else:
# plain name
try:
file, pathname, description = imp.find_module(dotted_name, pathlist)
if file: file.close()
if description[2]not in[imp.PY_SOURCE, imp.PKG_DIRECTORY]:
pathname = None
except ImportError:
pathname = None
return pathname
def getFilesForName(name):
""" Get a list of module files for a filename, a module or package name,
or a directory.
"""
import imp
if not os.path.exists(name):
# check for glob chars
if containsAny(name, "*?[]"):
import glob
files = glob.glob(name)
list = []
for file in
|
files:
list.extend(getFilesForName(file))
return list
# try to find module or package
name = getPathOfModuleOrPackage(name,[])
if not name:
return[]
if os.path.isdir(
|
name):
# find all python files in directory
list = []
os.path.walk(name, _visit_pyfiles, list)
return list
elif os.path.exists(name) and not name.startswith("."):
# a single file
return [name]
return []
def _visit_pyfiles(list, dirname, names):
""" Helper for getFilesForName().
"""
# get extension for python source files
if not globals().has_key('_py_ext'):
import imp
global _py_ext
_py_ext = [triple[0]for triple in imp.get_suffixes()if triple[2] == imp.PY_SOURCE][0]
# don't recurse into CVS or Subversion directories
if 'CVS'in names:
names.remove('CVS')
if '.svn'in names:
names.remove('.svn')
names_copy = [] + names
for n in names_copy:
if os.path.isdir(os.path.join(dirname, n))and \
not os.path.exists(os.path.join(dirname, n, "__init__.py")):
names.remove(n)
# add all *.py files to list
list.extend(
[os.path.join(dirname, file)
for file in names
if os.path.splitext(file)[1] == _py_ext and not file.startswith(".")])
# returns the directory which holds the first package of the package
# hierarchy under which 'filename' belongs
def getRootDirectory(filename):
if os.path.isdir(filename):
dir = filename
else:
dir = os.path.dirname(filename)
while dir != "" and \
os.path.exists(os.path.join(dir, "__init__.py")):
dir = os.path.dirname(dir)
return dir
# Returns the higher most package directoryname of the package hierarchy
# under which 'filename' belongs
# **** NOT THE SAME AS THE ROOT DIRECTORY OF THE PACKAGE ***
def getPackageBaseDirectory(filename):
if os.path.isdir(filename):
dir = filename
else:
dir = os.path.dirname(filename)
if not os.path.exists(os.path.join(dir, "__init__.py")):
# parent dir is not a package
return dir
while dir != "" and \
os.path.exists(os.path.join(os.path.dirname(dir), "__init__.py")):
dir = os.path.dirname(dir)
return dir
def filenameToModulePath(fname):
directoriesPreceedingRoot = getRootDirectory(fname)
import os
# strip off directories preceeding root package directory
if directoriesPreceedingRoot != "" and directoriesPreceedingRoot != ".":
mpath = fname.replace(directoriesPreceedingRoot, "")
else:
if fname.startswith("."+os.sep): # if starts with './', lob it off
fname = fname[len("."+os.sep):]
mpath = fname
if(mpath[0] == os.path.normpath("/")):
mpath = mpath[1:]
mpath, ext = os.path.splitext(mpath)
mpath = mpath.replace(os.path.normpath("/"), ".")
return mpath
def filenameToModulePath(filename):
filename = os.path.abspath(filename)
package = ""
dot = ""
dir,modname = os.path.split(filename)
while dir != ""and \
os.path.exists(os.path.join(dir, "__init__.py")):
dir, dirname = os.path.split(dir)
package = dirname+dot+package
dot = "."
return package + dot + modname[:-3]
|
fermat618/pida
|
pida-plugins/quickopen/quickopen.py
|
Python
|
gpl-2.0
| 7,589 | 0.003031 |
# -*- coding: utf-8 -*-
"""
:copyright: 2005-2008 by The PIDA Project
:license: GPL 2 or later (see README/COPYING/LICENSE)
"""
# stdlib
import os.path
# gtk
import gtk, gobject
# PIDA Imports
# core
from kiwi.ui.objectlist import Column
from pida.core.service import Service
from pida.core.features import FeaturesConfig
from pida.core.events import EventsConfig
from pida.core.actions import (ActionsConfig, TYPE_NORMAL)
from pida.core.options import OptionsConfig
from pida.ui.views import PidaView, WindowConfig
from pida.services.language import DOCTYPES
from pida.core.indexer import Result
from pygtkhelpers.gthreads import gcall
import time
# locale
from pida.core.locale import Locale
locale = Locale('')
_ = locale.gettext
class QItem(object):
name = ''
path = ''
class QOpenView(PidaView):
key = 'qopen.view'
gladefile = 'qopen'
label_text = _('Quick Open')
def create_ui(self):
self._history = gtk.ListStore(gobject.TYPE_STRING)
self.filter.set_model(self._history)
self.filter.set_text_column(0)
self.last_entered = 0
self.olist.set_columns(
[
Column('basename', title=_('Name')),
Column('relpath', title=_('Path')),
]
)
self.olist.set_selection_mode(gtk.SELECTION_MULTIPLE)
self.filter.child.connect("changed", self.on_filter_changed)
self.filter.child.connect("activate", self.on_filter_activate)
self.filter.child.connect("key-press-event", self.on_filter_keypress)
#self.toplevel.connect_after("map", self.on_show)
self.filter.connect_after("map", self.on_show)
def set_filter(self, text, time_check=None):
if time_check and self.last_entered > time_check:
return False
self._history.insert(0, (text,))
self.olist.clear()
tokens = text.split()
if not len(tokens):
return
ftypes = []
fnames = []
fall = []
filters = self.svc.boss.get_service('filemanager').\
features['file_hidden_check']
for tok in tokens:
if not tok:
continue
if tok[0] == "#" and len(tok) > 1:
for lang in DOCTYPES.get_fuzzy_list(tok[1:]):
ftypes.append(lang.internal)
elif tok[0] == "!" and len(tok) > 1:
fnames.append(tok[1:])
else:
fall.append(tok)
def do_filter(item):
if len(self.olist) > 200:
Result(abort=True)
if not len(item.basename) or not len(item.relpath):
return
if "/." in item.relpath or item.relpath[0] == ".":
return
for chk in filters:
if not chk(item.basename, item.relpath, ''):
return
if item.is_dir:
return
if all((x in item.relpath for x in fall)) and \
all((x in item.basename for x in fnames)):
if len(ftypes):
if item.doctype in ftypes:
return Res
|
ult(accept=True)
else:
return Result(accept=True)
project = self.svc.boss.cmd('project', 'get_current_project')
if not project:
return
for result in project.indexer.query(do_filter):
self.olist.append(result)
return False
def on_show(self, *args):
gcall(self.filter.child.grab_focus)
|
def on_olist__key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Escape and self.pane.get_params().detached:
self.can_be_closed()
def on_filter_keypress(self, widget, event):
if event.keyval == gtk.keysyms.Tab and len(self.olist):
gcall(self.olist.grab_focus)
if event.keyval == gtk.keysyms.Escape and self.pane.get_params().detached:
self.can_be_closed()
def on_filter_activate(self, *args):
if len(self.olist):
self.svc.open(self.olist[0])
def on_filter_changed(self, *args):
self.last_entered = time.time()
gobject.timeout_add(self.svc.opt('start_delay'),
self.set_filter, self.filter.child.props.text,
self.last_entered)
def on_olist__row_activated(self, widget, item):
self.svc.open(item)
def on_button_open__clicked(self, button):
for item in self.olist.get_selected_rows():
self.svc.open(item)
if self.pane.get_params().detached:
self.on_button_close__clicked(button)
def can_be_closed(self):
self.svc.boss.cmd('window', 'remove_view', view=self)
def on_button_close__clicked(self, button):
self.svc.boss.cmd('window', 'remove_view', view=self)
class QopenEventsConfig(EventsConfig):
def create(self):
#self.publish('something')
pass
def subscribe_all_foreign(self):
#self.subscribe_foreign('buffer', 'document-changed',
# self.on_document_changed)
pass
def on_document_changed(self, document):
pass
class QopenWindowConfig(WindowConfig):
key = QOpenView.key
label_text = QOpenView.label_text
class QopenFeaturesConfig(FeaturesConfig):
def subscribe_all_foreign(self):
self.subscribe_foreign('window', 'window-config',
QopenWindowConfig)
class QopenOptionsConfig(OptionsConfig):
def create_options(self):
self.create_option(
'start_delay',
_('Start after'),
int, # type of variable, like int, str, bool, ..
800,
_('Start search after n milliseconds'),
)
class QopenActionsConfig(ActionsConfig):
def create_actions(self):
QopenWindowConfig.action = self.create_action(
'qopen_show',
TYPE_NORMAL,
_('Open in project'),
_('Open file in project.'),
gtk.STOCK_OPEN,
self.on_qopen_show,
'' # default shortcut or '' to enable shortcut for action
)
def on_qopen_show(self, action):
self.svc.show_qopen()
class QuickOpen(Service):
#features_config = QopenFeaturesConfig
actions_config = QopenActionsConfig
options_config = QopenOptionsConfig
#events_config = QopenEventsConfig
label = "Quick Open"
def pre_start(self):
self._view = None
pass
def start(self):
pass
def stop(self):
pass
def show_qopen(self):
if not self._view:
self._view = QOpenView(self)
if not self.boss.cmd('window', 'is_added', view=self._view):
self.boss.cmd('window', 'add_detached_view',
paned='Buffer', view=self._view,
)
else:
self.boss.cmd('window', 'present_view', view=self._view)
def open(self, item):
project = self.boss.cmd('project', 'get_current_project')
if not project:
return
path = os.path.join(project.source_directory, item.relpath)
if item.is_dir:
self.boss.cmd('filemanager', 'browse', new_path=path)
self.boss.cmd('filemanager', 'present_view')
else:
self.boss.cmd('buffer', 'open_file', file_name=path)
# Required Service attribute for service loading
Service = QuickOpen
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
lfblogs/aiopy
|
aiopy/required/aiohttp/web_ws.py
|
Python
|
gpl-3.0
| 8,998 | 0 |
__all__ = ('WebSocketResponse', 'MsgType')
import asyncio
import warnings
from . import hdrs
from .errors import HttpProcessingError, ClientDisconnectedError
from .websocket import do_handshake, Message, WebSocketError
from .websocket_client import MsgType, closedMessage
from .web_exceptions import (
HTTPBadRequest, HTTPMethodNotAllowed, HTTPInternalServerError)
from aiopy.required.aiohttp.web_reqrep import StreamResponse
THRESHOLD_CONNLOST_ACCESS = 5
class WebSocketResponse(StreamResponse):
def __init__(self, *,
timeout=10.0, autoclose=True, autoping=True, protocols=()):
super().__init__(status=101)
self._protocols = protocols
self._protocol = None
self._writer = None
self._reader = None
self._closed = False
self._closing = False
self._conn_lost = 0
self._close_code = None
self._loop = None
self._waiting = False
self._exception = None
self._timeout = timeout
self._autoclose = autoclose
self._autoping = autoping
def start(self, request):
# make pre-check to don't hide it by do_handshake() exceptions
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
try:
status, headers, parser, writer, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError as err:
if err.code == 405:
raise HTTPMethodNotAllowed(
request.method, [hdrs.METH_GET], body=b'')
elif err.code == 400:
raise HTTPBadRequest(text=err.message, headers=err.headers)
else: # pragma: no cover
raise HTTPInternalServerError() from err
if self.status != status:
self.set_status(status)
for k, v in headers:
self.headers[k] = v
self.force_close()
resp_impl = super().start(request)
self._reader = request._reader.set_parser(parser)
self._writer = writer
self._protocol = protocol
self._loop = request.app.loop
return resp_impl
def can_start(self, request):
if self._writer is not None:
raise RuntimeError('Already started')
try:
_, _, _, _, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError:
return False, None
else:
return True, protocol
@property
def closed(self):
return self._closed
@property
def close_code(self):
return self._close_code
@property
def protocol(self):
return self._protocol
def exception(self):
return self._exception
def ping(self, message='b'):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.ping(message)
def pong(self, message='b'):
# unsolicited pong
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.pong(message)
def send_str(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, str):
raise TypeError('data argument must be str (%r)' % type(data))
self._writer.send(data, binary=False)
def send_bytes(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)' %
|
type(data))
self._writer.send(data, binary=True)
@asyncio.coroutine
def wait_closed(self): # pragma: no cover
warnings.warn(
'wait_closed() coroutine is deprecated. use close() instead',
DeprecationWarning)
return (yield from self.close())
@asyncio.coroutine
def write_eof(s
|
elf):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self.close()
self._eof_sent = True
@asyncio.coroutine
def close(self, *, code=1000, message=b''):
if self._writer is None:
raise RuntimeError('Call .start() first')
if not self._closed:
self._closed = True
try:
self._writer.close(code, message)
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if self._closing:
return True
while True:
try:
msg = yield from asyncio.wait_for(
self._reader.read(),
timeout=self._timeout, loop=self._loop)
except asyncio.CancelledError:
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if msg.tp == MsgType.close:
self._close_code = msg.data
return True
else:
return False
@asyncio.coroutine
def receive(self):
if self._reader is None:
raise RuntimeError('Call .start() first')
if self._waiting:
raise RuntimeError('Concurrent call to receive() is not allowed')
self._waiting = True
try:
while True:
if self._closed:
self._conn_lost += 1
if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
raise RuntimeError('WebSocket connection is closed.')
return closedMessage
try:
msg = yield from self._reader.read()
except (asyncio.CancelledError, asyncio.TimeoutError):
raise
except WebSocketError as exc:
self._close_code = exc.code
yield from self.close(code=exc.code)
return Message(MsgType.error, exc, None)
except ClientDisconnectedError:
self._closed = True
self._close_code = 1006
return Message(MsgType.close, None, None)
except Exception as exc:
self._exception = exc
self._closing = True
self._close_code = 1006
yield from self.close()
return Message(MsgType.error, exc, None)
if msg.tp == MsgType.close:
self._closing = True
self._close_code = msg.data
if not self._closed and self._autoclose:
yield from self.close()
return msg
elif not self._closed:
if msg.tp == MsgType.ping and self._autoping:
self._writer.pong(msg.data)
elif msg.tp == MsgType.pong and self._autoping:
continue
else:
return msg
finally:
self._waiting = False
@asyncio.coroutine
def receive_msg(self): # pragma: no cover
warnings.warn(
'
|
isislab/CTFd
|
tests/test_plugin_utils.py
|
Python
|
apache-2.0
| 7,989 | 0.001377 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.plugins import (
bypass_csrf_protection,
get_admin_plugin_menu_bar,
get_user_page_menu_bar,
override_template,
register_admin_plugin_menu_bar,
register_admin_plugin_script,
register_admin_plugin_stylesheet,
register_plugin_asset,
register_plugin_assets_directory,
register_plugin_script,
register_user_page_menu_bar,
)
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_challenge,
login_as_user,
setup_ctfd,
)
def test_register_plugin_asset():
"""Test that plugin asset registration works"""
app = create_ctfd(setup=False)
register_plugin_asset(app, asset_path="/plugins/__init__.py")
app = setup_ctfd(app)
with app.app_context():
with app.test_client() as client:
r = client.get("/plugins/__init__.py")
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
destroy_ctfd(app)
def test_register_plugin_assets_directory():
"""Test that plugin asset directory registration works"""
app = create_ctfd(setup=False)
register_plugin_assets_directory(app, base_path="/plugins/")
app = setup_ctfd(app)
with app.app_context():
with app.test_client() as client:
r = client.get("/plugins/__init__.py")
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
r = client.get("/plugins/challenges/__init__.py")
assert len(r.get_data(as_text=True)) > 0
assert r.status_code == 200
destroy_ctfd(app)
def test_override_template():
"""Does override_template work properly for regular themes when used from a plugin"""
app = create_ctfd()
with app.app_context():
override_template("login.html", "LOGIN OVERRIDE")
with app.test_client() as client:
r = client.get("/login")
assert r.status_code == 200
output = r.get_data(as_text=True)
assert "LOGIN OVERRIDE" in output
destroy_ctfd(app)
def test_admin_override_template():
"""Does override_template work properly for the admin panel when used from a plugin"""
app = create_ctfd()
with app.app_context():
override_template("admin/users/user.html", "ADMIN USER OVERRIDE")
client = login_as_user(app, name="admin", password="password")
r = client.get("/admin/users/1")
assert r.status_code == 200
output = r.get_data(as_text=True)
assert "ADMIN USER OVERRIDE" in output
destroy_ctfd(app)
def test_register_plugin_script():
"""Test that register_plugin_script adds script paths to the core theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_plugin_script("/fake/script/path.js")
register_plugin_script("http://examplectf.com/fake/script/path.js")
with app.test_client() as client:
r = client.get("/")
output = r.get_data(as_text=True)
assert "/fake/script/path.js" in output
assert "http://examplectf.com/fake/script/path.js" in output
destroy_ctfd(app)
def test_register_plugin_stylesheet():
"""Test that register_plugin_stylesheet adds stylesheet paths to the core theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_plugin_script("/fake/stylesheet/path.css")
register_plugin_script("http://examplectf.com/fake/stylesheet/path.css")
with app.test_client() as client:
r = client.get("/")
output = r.get_data(as_text=True)
assert "/fake/stylesheet/path.css" in output
assert "http://examplectf.com/fake/stylesheet/path.css" in output
destroy_ctfd(app)
def test_register_admin_plugin_script():
"""Test that register_admin_plugin_script adds script paths to the admin theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_script("/fake/script/path.js")
register_admin_plugin_script("http://examplectf.com/fake/script/path.js")
with login_as_user(app, name="admin") as client:
r = client.get("/admin/statistics")
output = r.get_data(as_text=True)
assert "/fake/script/path.js" in output
assert "http://examplectf.com/fake/script/path.js" in output
destroy_ctfd(app)
def test_register_admin_plugin_stylesheet():
"""Test that register_admin_plugin_stylesheet adds stylesheet paths to the admin theme when used from a plugin"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_stylesheet("/fake/stylesheet/path.css")
register_admin_plugin_stylesheet(
"http://examplectf.com/fake/stylesheet/path.css"
)
with login_as_user(app, name="admin") as client:
r = client.get("/admin/statistics")
output = r.get_data(as_text=True)
assert "/fake/stylesheet/path.css" in output
assert "http://examplectf.com/fake/stylesheet/path.css" in output
destroy_ctfd(app)
def test_register_admin_plugin_menu_bar():
"""
Test that register_admin_plugin_menu_bar() properly inserts into HTML and get_admin_plugin_menu_bar()
returns the proper list.
"""
app = create_ctfd()
with app.app_context():
register_admin_plugin_menu_bar(
title="test_admin_plugin_name", route="/test_plugin"
)
client = login_as_user(app, name="admin", password="password")
r = client.get("/admin/statistics")
output = r.get_data(as_text=True)
assert "/test_plugin" in output
assert "test_admin_plugin_name" in output
menu_item = get_admin_plugin_menu_bar()[0]
assert menu_item.title == "test_admin_plugin_name"
assert menu_item.route == "/test_plugin"
destroy_ctfd(app)
def test_register_user_page_menu_bar():
"""
Test that the register_user_page_menu_bar() properly inserts into HTML and get_user_page_menu_bar() returns the
proper list.
"""
app = create_ctfd()
with app.app_context():
register_user_page_menu_bar(
title="test_user_menu_link", route="/test_user_href"
)
with app.test_client() as client:
r = client.get("/")
output = r.get_data(as_text=True)
assert "/test_user_href" in output
assert "test_user_menu_link" in output
with app.test_request_context():
menu_item = get_user_page_menu_bar()[0]
assert menu_item.title == "test_user_menu_link"
assert menu_item.route == "/test_user_href"
destroy_ctfd(app)
def test_bypass_csrf_protection():
"""
Test that the bypass_csrf_protection decorator functions properly
"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.post("/login")
output = r.get_data(as_text=True)
assert r.status_code == 403
def bypass_csrf_protection_test_route():
return "Success", 200
# Hijack an existing route to avoid any kind of hacks to create a test route
app.view_functions["auth.login"] = bypass_
|
csrf_protection(
bypass_csrf_protection_test_route
)
with app.test_client() as client:
r = client.post("/login")
output = r.get_data(as_text=True)
assert r.
|
status_code == 200
assert output == "Success"
destroy_ctfd(app)
def test_challenges_model_access_plugin_class():
"""
Test that the Challenges model can access its plugin class
"""
app = create_ctfd()
with app.app_context():
from CTFd.plugins.challenges import get_chal_class
chal = gen_challenge(app.db)
assert chal.plugin_class == get_chal_class("standard")
destroy_ctfd(app)
|
sketchfab/io_object_mu
|
import_mu.py
|
Python
|
gpl-2.0
| 15,783 | 0.003485 |
# vim:ts=4:et
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from struct import unpack
import os.path
from math import pi, sqrt
import bpy
from bpy_extras.object_utils import object_data_add
from mathutils import Vector,Matrix,Quaternion
from bpy_extras.io_utils import ImportHelper
from bpy.props import BoolProperty, FloatProperty, StringProperty, EnumProperty
from bpy.props import FloatVectorProperty, PointerProperty
from .mu import MuEnum, Mu, MuColliderMesh, MuColliderSphere, MuColliderCapsule
from .mu import MuColliderBox, MuColliderWheel
from .shader import make_shader
from .material import make_material
from . import collider, properties
EXCLUDED_OBJECTS=['flare', 'busted', 'flag']
def create_uvs(mu, uvs, mesh, name):
uvlay = mesh.uv_textures.new(name)
uvloop = mesh.uv_layers[name]
for i, uvl in enumerate(uvloop.data):
v = mesh.loops[i].vertex_index
uvl.uv = uvs[v]
def create_mesh(mu, mumesh, name):
mesh = bpy.data.meshes.new(name)
faces = []
for sm in mumesh.submeshes:
faces.extend(sm)
mesh.from_pydata(mumesh.verts, [], faces)
if mumesh.uvs:
create_uvs(mu, mumesh.uvs, mesh, name + ".UV")
if mumesh.uv2s:
create_uvs(mu, mumesh.uv2s, mesh, name + ".UV2")
return mesh
def create_mesh_object(name, mesh, transform):
obj = bpy.data.objects.new(name, mesh)
obj.rotation_mode = 'QUATERNION'
if transform:
obj.location = Vector(transform.localPosition)
obj.rotation_quaternion = Quaternion(transform.localRotation)
obj.scale = Vector(transform.localScale)
else:
obj.location = Vector((0, 0, 0))
obj.rotation_quaternion = Quaternion((1,0,0,0))
obj.scale = Vector((1,1,1))
bpy.context.scene.objects.link(obj)
return obj
def copy_spring(dst, src):
dst.spring = src.spring
dst.damper = src.damper
dst.targetPosition = src.targetPosition
def copy_friction(dst, src):
dst.extremumSlip = src.extremumSlip
dst.extremumValue = src.extremumValue
dst.asymptoteSlip = src.asymptoteSlip
dst.extremumValue = src.extremumValue
dst.stiffness = src.stiffness
def create_light(mu, mulight, transform):
ltype = ('SPOT', 'SUN', 'POINT', 'AREA')[mulight.type]
light = bpy.data.lamps.new(transform.name, ltype)
light.color = mulight.color[:3]
light.distance = mulight.range
light.energy = mulight.intensity
if ltype == 'SPOT' and hasattr(mulight, "spotAngle"):
light.spot_size = mulight.spotAngle * pi / 180
obj = bpy.data.objects.new(transform.name, light)
obj.rotation_mode = 'QUATERNION'
obj.location = Vector(transform.localPosition)
# Blender points spotlights along local -Z, unity along local +Z
# which is Blender's +Y, so rotate 90 degrees around local X to
# go from Unity to Blender
rot = Quaternion((0.5**0.5,0.5**0.5,0,0))
obj.rotation_quaternion = rot * Quaternion(transform.localRotation)
obj.scale = Vector(transform.localScale)
properties.SetPropMask(obj.muproperties.cullingMask, mulight.cullingMask)
bpy.context.scene.objects.link(obj)
return obj
property_map = {
"m_LocalPosition.x": ("obj", "location", 0, 1),
"m_LocalPosition.y": ("obj", "location", 2, 1),
"m_LocalPosition.z": ("obj", "location", 1, 1),
"m_LocalRotation.x": ("obj", "rotation_quaternion", 1, -1),
"m_LocalRotation.y": ("obj", "rotation_quaternion", 3, -1),
"m_LocalRotation.z": ("obj", "rotation_quaternion", 2, -1),
"m_LocalRotation.w": ("obj", "rotation_quaternion", 0, 1),
"m_LocalScale.x": ("obj", "scale", 0, 1),
"m_LocalScale.y": ("obj", "scale", 2, 1),
"m_LocalScale.z": ("obj", "scale", 1, 1),
"m_Intensity": ("data", "energy", 0, 1),
}
def create_fcurve(action, curve, propmap):
dp, ind, mult = propmap
fps = bpy.context.scene.render.fps
fc = action.fcurves.new(data_path = dp, index = ind)
fc.keyframe_points.add(len(curve.keys))
for i, key in enumerate(curve.keys):
x,y = key.time * fps, key.value * mult
fc.keyframe_points[i].co = x, y
fc.keyframe_points[i].handle_left_type = 'FREE'
fc.keyframe_points[i].handle_right_t
|
ype = 'FREE'
if i > 0:
dist = (key.time - curve.keys[i - 1].time) / 3
|
dx, dy = dist * fps, key.tangent[0] * dist * mult
else:
dx, dy = 10, 0.0
fc.keyframe_points[i].handle_left = x - dx, y - dy
if i < len(curve.keys) - 1:
dist = (curve.keys[i + 1].time - key.time) / 3
dx, dy = dist * fps, key.tangent[1] * dist * mult
else:
dx, dy = 10, 0.0
fc.keyframe_points[i].handle_right = x + dx, y + dy
return True
def create_action(mu, path, clip):
#print(clip.name)
actions = {}
for curve in clip.curves:
if not curve.path:
mu_path = path
else:
mu_path = "/".join([path, curve.path])
if mu_path not in mu.objects:
print("Unknown path: %s" % (mu_path))
continue
obj = mu.objects[mu_path]
if curve.property not in property_map:
print("%s: Unknown property: %s" % (mu_path, curve.property))
continue
propmap = property_map[curve.property]
subpath, propmap = propmap[0], propmap[1:]
if subpath != "obj":
obj = getattr (obj, subpath)
name = ".".join([clip.name, curve.path, subpath])
if name not in actions:
actions[name] = bpy.data.actions.new(name), obj
act, obj = actions[name]
if not create_fcurve(act, curve, propmap):
continue
for name in actions:
act, obj = actions[name]
if not obj.animation_data:
obj.animation_data_create()
track = obj.animation_data.nla_tracks.new()
track.name = clip.name
track.strips.new(act.name, 1.0, act)
def create_collider(mu, muobj):
col = muobj.collider
name = muobj.transform.name
if type(col) == MuColliderMesh:
name = name + ".collider"
mesh = create_mesh(mu, col.mesh, name)
else:
mesh = bpy.data.meshes.new(name)
obj = create_mesh_object(name, mesh, None)
obj.muproperties.isTrigger = False
if type(col) != MuColliderWheel:
obj.muproperties.isTrigger = col.isTrigger
if type(col) == MuColliderMesh:
obj.muproperties.collider = 'MU_COL_MESH'
elif type(col) == MuColliderSphere:
obj.muproperties.radius = col.radius
obj.muproperties.center = col.center
obj.muproperties.collider = 'MU_COL_SPHERE'
elif type(col) == MuColliderCapsule:
obj.muproperties.radius = col.radius
obj.muproperties.height = col.height
obj.muproperties.direction = properties.dir_map[col.direction]
obj.muproperties.center = col.center
obj.muproperties.collider = 'MU_COL_CAPSULE'
elif type(col) == MuColliderBox:
obj.muproperties.size = col.size
obj.muproperties.center = col.center
obj.muproperties.collider = 'MU_COL_BOX'
elif type(col) == MuColliderWheel:
obj.muproperties.radius = col.radius
obj.muproperties.suspensionDistance = col.suspensionDistance
obj.muproperties.center = col.center
obj.muproperties.mass = col.mass
copy_spring(obj.muproperties.suspensionSpring, col.suspensionSpring)
co
|
mazelife/django-scaffold
|
docs/settings.py
|
Python
|
bsd-3-clause
| 698 | 0 |
# A very basic settings file that allows Sphinx to build
# the docs (this is becuase autodoc is used).
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
SITE_ID = 303
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {"default": {
"NAME": ":memory:",
"ENGINE": "django.db.backends.sqlite3",
"USER": '',
"PASSWORD": '',
"PORT": '',
}}
INSTALLED_APPS = (
'django.contrib.au
|
th',
'django.contrib.contenttypes'
|
,
'django.contrib.sessions',
'django.contrib.sites',
'scaffold',
)
SECRET_KEY = "NULL"
SCAFFOLD_EXTENDING_APP_NAME = "scaffold"
SCAFFOLD_EXTENDING_MODEL_PATH = "scaffold.models.BaseSection"
|
rishig/zulip
|
zerver/tests/test_docs.py
|
Python
|
apache-2.0
| 24,048 | 0.002828 |
# -*- coding: utf-8 -*-
import os
import subprocess
import ujson
from django.conf import settings
from django.test import TestCase, override_settings
from django.http import HttpResponse
from typing import Any, Dict, List
from zproject.settings import DEPLOY_ROOT
from zerver.lib.integrations import INTEGRATIONS
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.test_runner import slow
from zerver.lib.utils import split_by
from zerver.models import Realm, get_realm
from zerver.views.documentation import (
add_api_uri_context,
)
class DocPageTest(ZulipTestCase):
def get_doc(self, url: str, subdomain: str) -> HttpResponse:
if url[0:23] == "/integrations/doc-html/":
return self.client_get(url, subdomain=subdomain, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
return self.client_get(url, subdomain=subdomain)
def print_msg_if_error(self, response: HttpResponse) -> None: # nocoverage
if response.status_code != 200 and response.get('Content-Type') == 'application/json':
content = ujson.loads(response.content)
print()
print("======================================================================")
print("ERROR: {}".format(content.get('msg')))
print()
def _test(self, url: str, expected_content: str, extra_strings: List[str]=[],
landing_missing_strings: List[str]=[], landing_page: bool=True,
doc_html_str: bool=False) -> None:
# Test the URL on the "zephyr" subdomain
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
# Test the URL on the root subdomain
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
for s in extra_strings:
self.assertIn(s, str(result.content))
if not landing_page:
return
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# Test the URL on the root subdomain with the landing page setting
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
for s in landing_missing_strings:
self.assertNotIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="description" content="Zulip combines'], result)
self.assert_not_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
# Test the URL on the "zephyr" subdomain with the landing page setting
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
@slow("Tests dozens of endpoints")
def test_api_doc_endpoints(self) -> None:
current_dir = os.path.dirname(os.path.abspath(__file__))
api_docs_dir = os.path.join(current_dir, '..', '..', 'templates/zerver/api/')
files = os.listdir(api_docs_dir)
def _filter_func(fp: str) -> bool:
ignored_files = ['sidebar_index.md', 'index.md', 'missing.md']
return fp.endswith('.md') and fp not in ignored_files
files = list(filter(_filter_func, files))
for f in files:
endpoint = '/api/{}'.format(os.path.splitext(f)[0])
self._test(endpoint, '', doc_html_str=True)
@slow("Tests dozens of endpoints, including generating lots of emails")
def test_doc_endpoints(self) -> None:
self._test('/api/', 'The Zulip API')
self._test('/api/api-keys', 'be careful with it')
self._test('/api/installation-instructions', 'No download required!')
self._test('/api/send-message', 'steal away your hearts')
self._test('/api/render-message', '**foo**')
self._test('/api/get-all-streams', 'include_public')
self._test('/api/get-stream-id', 'The name of the stream to retrieve the ID for.')
self._test('/api/get-subscribed-streams', 'Get all streams that the user is subscribed to.')
self._test('/api/get-all-users', 'client_gravatar')
self._test('/api/register-queue', 'apply_markdown')
self._test('/api/get-events-from-queue', 'dont_block')
self._test('/api/delete-queue', 'Delete a previously registered queue')
self._test('/api/update-message', 'propagate_mode')
self._test('/api/get-profile', 'takes no arguments')
self._test('/api/add-subscriptions', 'authorization_errors_fatal')
self._test('/api/create-user', 'zuliprc-admin')
self._test('/api/remove-subscriptions', 'not_subscribed')
self._test('/team/', 'industry veterans')
self._test('/history/', 'Cambridge, Massachusetts')
# Test the i18n version of one of these pages.
self._test('/en/history/', 'Cambridge, Massachusetts')
self._test('/apps/', 'Apps for every platform.')
self._test('/features/', 'Beautiful messaging')
self._test('/hello/', 'productive team chat', landing_missing_strings=["Login"])
self._test('/why-zulip/', 'Why Zulip?')
self._test('/for/open-source/', 'for open source projects')
self._test('/for/companies/', 'in a company')
self._test('/for/working-groups-and-communities/', 'standards bodies')
self._test('/for/mystery-hunt/', 'four SIPB alums')
self._test('/security/', 'TLS encryption')
self._test('/atlassian/', 'HipChat')
self._test('/devlogin/', 'Normal users', landing_page=False)
self._test('/devtools/', 'Useful development URLs')
self._test('/errors/404/', 'Page not found')
self._test('/errors/5xx/', 'Internal server error')
self._test('/emails/', 'manually generate most of the emails by clicking')
result = self.client_get('/integrations/doc-html/nonexistent_integration', follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(result.status_code, 404)
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
result = self.client_get('/static/favicon.ico')
self.assertEqual(result.status_code, 200)
@slow("Tests dozens of endpoints, including all our integrations docs")
def test_integration_doc_endpoints(self) -> None:
self._test('/integrations/',
'native integrations.',
extra_strings=[
'And hundreds more through',
'Hubot',
'Zapier',
'IFTTT'
|
])
for integration in INTEGRATIONS.keys():
url = '/integrations/doc-html/{}'.format(integration)
self._test(url, '', doc_html_str=True)
def test_integration_pages_open_graph_metadata(self) -> None:
url = '/integrations/doc/github'
|
title = '<meta property="og:title" content="Connect GitHub to Zul
|
ujjwal96/mitmproxy
|
mitmproxy/io/db.py
|
Python
|
mit
| 1,107 | 0.000903 |
import sqlite3
import os
from mitmproxy.io import protobuf
class DBHandler:
"""
This class is wrapping up connection to SQLITE DB.
"""
def __init__(self, db_path, mode='load'):
if mode == 'write':
if os.path.isfile(db_path):
os.remove(d
|
b_path)
self.db_path = db_path
self._con = sqlite3.connect(self.db_path)
self._c = self._con.cursor()
self._create_db()
def _create_db(self):
with self._con:
self._co
|
n.execute('CREATE TABLE IF NOT EXISTS FLOWS('
'id INTEGER PRIMARY KEY,'
'pbuf_blob BLOB)')
def store(self, flows):
blobs = []
for flow in flows:
blobs.append((protobuf.dumps(flow),))
with self._con:
self._con.executemany('INSERT INTO FLOWS (pbuf_blob) values (?)', blobs)
def load(self):
flows = []
self._c.execute('SELECT pbuf_blob FROM FLOWS')
for row in self._c.fetchall():
flows.append((protobuf.loads(row[0])))
return flows
|
valesi/electrum
|
gui/kivy/uix/dialogs/wallets.py
|
Python
|
gpl-3.0
| 1,677 | 0.001193 |
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from elect
|
rum.i18n import _
from electrum.util import base_units
import os
from label_dialog import LabelDialog
Builder.load_string('''
#:import os os
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: ''
BoxLayout:
orientation: 'vertical'
FileChooserListView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '
|
*.*'
path: os.path.dirname(app.wallet.storage.path)
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 2
size_hint_y: 0.1
Button:
size_hint: 0.1, None
height: '48dp'
text: _('Cancel')
on_release:
popup.dismiss()
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open') if wallet_selector.selection else _('New Wallet')
on_release:
popup.dismiss()
root.new_wallet(app, wallet_selector.path)
''')
class WalletDialog(Factory.Popup):
def new_wallet(self, app, dirname):
def cb(text):
if text:
app.load_wallet_by_name(os.path.join(dirname, text))
if self.ids.wallet_selector.selection:
app.load_wallet_by_name(self.ids.wallet_selector.selection[0])
else:
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
|
tobiz/OGN-Flight-Logger_V3
|
flogger_email_msg.py
|
Python
|
gpl-3.0
| 1,212 | 0.012376 |
import smtplib
import base64
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
from __bui
|
ltin__ import file
import flogger_settings
import os
import datetime
def email_msg(sender, receiver, msg, date, settings):
# print "Send take off msg"
if settings.FLOGGER_TAKEOFF_EMAIL != "y" and settings.FLOGGER_TAKEOFF_EMAIL != "Y":
# Don't send take off email msg
return
# body = "Msg from %s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
body = "%s. %s taken off @ %s" % (settings.APRS_USER, msg
|
, date)
print body
msg = MIMEMultipart()
msg.attach(MIMEText(body, 'plain'))
fromaddr = sender
toaddr = receiver
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = body
server = smtplib.SMTP(settings.FLOGGER_SMTP_SERVER_URL, settings.FLOGGER_SMTP_SERVER_PORT)
text = msg.as_string()
# print "Msg string is: ", text
try:
server.sendmail(fromaddr, toaddr, text)
except Exception as e:
print "Send email_msg failed, reason: ", e
server.quit()
return
|
RPGOne/Skynet
|
pytorch-master/torch/legacy/nn/ParallelCriterion.py
|
Python
|
bsd-3-clause
| 1,404 | 0.001425 |
import torch
from .Criterion import Criterion
from .utils import recursiveResizeAs, recursiveFill, recursiveAdd
class ParallelCriterion(Criterion):
def __init__(self, repeatTarget=False):
super(ParallelCriterion, self).__init__()
self.criterions = []
self.weights = []
self.gradInput = []
self.repeatTa
|
rget = repeatTarget
def add(self, criterion, weight=1):
self.criterions.append(criterion)
self.weights.append(weight)
return self
def updateOutput(self, input, target):
self.output = 0
for i, criterion in enumerate(self.criterions):
current_target = target if self.repeatTarget else target[i]
self.outpu
|
t += self.weights[i] * criterion.updateOutput(input[i], current_target)
return self.output
def updateGradInput(self, input, target):
self.gradInput = recursiveResizeAs(self.gradInput, input)[0]
recursiveFill(self.gradInput, 0)
for i, criterion in enumerate(self.criterions):
current_target = target if self.repeatTarget else target[i]
recursiveAdd(self.gradInput[i], self.weights[i], criterion.updateGradInput(input[i], current_target))
return self.gradInput
def type(self, type=None, tensorCache=None):
self.gradInput = []
return super(ParallelCriterion, self).type(type, tensorCache)
|
fountainment/FountainEngineImproved
|
fountain/render/convert_shader.py
|
Python
|
mit
| 396 | 0.007576 |
#!/usr/bin/env python
import sys
def convert_str(infile, outfile):
f = open(infile, 'r')
lines = f.readlines()
f.close()
|
f = open(outfile, 'w')
f.writelines(['"%s\\n"\n' % i.rstrip() for i in lines])
f.close()
def main():
convert_str('fountain.vert', 'fountain.vert.inc')
convert_str('fountain.frag', 'fountain.frag.inc')
if
|
__name__ == '__main__':
main()
|
Vagab0nd/SiCKRAGE
|
lib3/imdb/parser/http/searchCompanyParser.py
|
Python
|
gpl-3.0
| 2,406 | 0.001247 |
# Copyright 2008-2018 Davide Alberani <da@erlug.linux.it>
# 2008-2018 H. Turgut Uyar <uyar@tekir.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# ME
|
RCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of th
|
e GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the classes (and the instances) that are used to parse
the results of a search for a given company.
For example, when searching for the name "Columbia Pictures", the parsed page
would be:
http://www.imdb.com/find?q=Columbia+Pictures&s=co
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from imdb.utils import analyze_company_name
from .piculet import Path, Rule, Rules, reducers
from .searchMovieParser import DOMHTMLSearchMovieParser
from .utils import analyze_imdbid
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
"""A parser for the company search page."""
rules = [
Rule(
key='data',
extractor=Rules(
foreach='//td[@class="result_text"]',
rules=[
Rule(
key='link',
extractor=Path('./a/@href', reduce=reducers.first)
),
Rule(
key='name',
extractor=Path('./a/text()')
),
Rule(
key='notes',
extractor=Path('./text()')
)
],
transform=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name') + x.get('notes', ''), stripNotes=True)
)
)
)
]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,), {'kind': 'company'})
}
|
races1986/SafeLanguage
|
CEM/families/i18n_family.py
|
Python
|
epl-1.0
| 6,800 | 0.000294 |
# -*- coding: utf-8 -*-
__version__ = '$Id: 7e07cc8b51fa2cdfb23c34c8652adf4a94003dc8 $'
import family
# The Wikimedia i18n family
class Family(family.Family):
def __init__(self):
|
family.Family.__init__(self)
self.name = 'i18n'
self.langs = {
'i18n': 'translatewiki.net',
}
self.namespaces[4] = {
'_default': [u'Project'],
}
self.namespaces[5] = {
'_default': [u'Project talk'],
}
self.namespaces[6] = {
'_default': [u'File'],
}
self.namespaces[7] = {
|
'_default': [u'File talk'],
}
self.namespaces[90] = {
'_default': [u'Thread'],
}
self.namespaces[91] = {
'_default': [u'Thread talk'],
}
self.namespaces[92] = {
'_default': [u'Summary'],
}
self.namespaces[93] = {
'_default': [u'Summary talk'],
}
self.namespaces[100] = {
'_default': [u'Portal'],
}
self.namespaces[101] = {
'_default': [u'Portal talk'],
}
self.namespaces[202] = {
'_default': [u'Property'],
}
self.namespaces[203] = {
'_default': [u'Property talk'],
}
self.namespaces[206] = {
'_default': [u'Form'],
}
self.namespaces[207] = {
'_default': [u'Form talk'],
}
self.namespaces[208] = {
'_default': [u'Concept'],
}
self.namespaces[209] = {
'_default': [u'Concept talk'],
}
self.namespaces[420] = {
'_default': [u'Layer'],
}
self.namespaces[421] = {
'_default': [u'Layer talk'],
}
self.namespaces[1102] = {
'_default': [u'Translating'],
}
self.namespaces[1103] = {
'_default': [u'Translating talk'],
}
self.namespaces[1198] = {
'_default': [u'Translations'],
}
self.namespaces[1199] = {
'_default': [u'Translations talk'],
}
self.namespaces[1200] = {
'_default': [u'Voctrain'],
}
self.namespaces[1201] = {
'_default': [u'Voctrain talk'],
}
self.namespaces[1202] = {
'_default': [u'FreeCol'],
}
self.namespaces[1203] = {
'_default': [u'FreeCol talk'],
}
self.namespaces[1204] = {
'_default': [u'Nocc'],
}
self.namespaces[1205] = {
'_default': [u'Nocc talk'],
}
self.namespaces[1206] = {
'_default': [u'Wikimedia'],
}
self.namespaces[1207] = {
'_default': [u'Wikimedia talk'],
}
self.namespaces[1208] = {
'_default': [u'StatusNet'],
}
self.namespaces[1209] = {
'_default': [u'StatusNet talk'],
}
self.namespaces[1210] = {
'_default': [u'Mantis'],
}
self.namespaces[1211] = {
'_default': [u'Mantis talk'],
}
self.namespaces[1212] = {
'_default': [u'Mwlib'],
}
self.namespaces[1213] = {
'_default': [u'Mwlib talk'],
}
self.namespaces[1214] = {
'_default': [u'Commonist'],
}
self.namespaces[1215] = {
'_default': [u'Commonist talk'],
}
self.namespaces[1216] = {
'_default': [u'OpenLayers'],
}
self.namespaces[1217] = {
'_default': [u'OpenLayers talk'],
}
self.namespaces[1218] = {
'_default': [u'FUDforum'],
}
self.namespaces[1219] = {
'_default': [u'FUDforum talk'],
}
self.namespaces[1220] = {
'_default': [u'Okawix'],
}
self.namespaces[1221] = {
'_default': [u'Okawix talk'],
}
self.namespaces[1222] = {
'_default': [u'Osm'],
}
self.namespaces[1223] = {
'_default': [u'Osm talk'],
}
self.namespaces[1224] = {
'_default': [u'WikiReader'],
}
self.namespaces[1225] = {
'_default': [u'WikiReader talk'],
}
self.namespaces[1226] = {
'_default': [u'Shapado'],
}
self.namespaces[1227] = {
'_default': [u'Shapado talk'],
}
self.namespaces[1228] = {
'_default': [u'iHRIS'],
}
self.namespaces[1229] = {
'_default': [u'iHRIS talk'],
}
self.namespaces[1230] = {
'_default': [u'Mifos'],
}
self.namespaces[1231] = {
'_default': [u'Mifos talk'],
}
self.namespaces[1232] = {
'_default': [u'Wikia'],
}
self.namespaces[1233] = {
'_default': [u'Wikia talk'],
}
self.namespaces[1234] = {
'_default': [u'OpenImages'],
}
self.namespaces[1235] = {
'_default': [u'OpenImages talk'],
}
self.namespaces[1236] = {
'_default': [u'Europeana'],
}
self.namespaces[1237] = {
'_default': [u'Europeana talk'],
}
self.namespaces[1238] = {
'_default': [u'Pywikipedia'],
}
self.namespaces[1239] = {
'_default': [u'Pywikipedia talk'],
}
self.namespaces[1240] = {
'_default': [u'Toolserver'],
}
self.namespaces[1241] = {
'_default': [u'Toolserver talk'],
}
self.namespaces[1242] = {
'_default': [u'EOL'],
}
self.namespaces[1243] = {
'_default': [u'EOL talk'],
}
self.namespaces[1244] = {
'_default': [u'Kiwix'],
}
self.namespaces[1245] = {
'_default': [u'Kiwix talk'],
}
self.namespaces[1246] = {
'_default': [u'Mozilla'],
}
self.namespaces[1247] = {
'_default': [u'Mozilla talk'],
}
self.namespaces[1248] = {
'_default': [u'FrontlineSMS'],
}
self.namespaces[1249] = {
'_default': [u'FrontlineSMS talk'],
}
self.namespaces[1250] = {
'_default': [u'EtherpadLite'],
}
self.namespaces[1251] = {
'_default': [u'EtherpadLite talk'],
}
self.namespaces[1252] = {
'_default': [u'Vicuna'],
}
self.namespaces[1253] = {
'_default': [u'Vicuna talk'],
}
def version(self, code):
return "1.21alpha"
|
fake-name/IntraArchiveDeduplicator
|
Tests/Test_db_BKTree_Compare.py
|
Python
|
bsd-3-clause
| 1,765 | 0.026062 |
import unittest
import time
import pprint
import logging
import scanner.logSetup as logSetup
import pyximport
print("Have Cython")
pyximport.install()
import dbPhashApi
class TestCompareDatabaseInterface(unittest.TestCase):
def __init__(self, *args, **kwargs):
logSetup.initLogging()
super().__init__(*args, **kwargs)
def setUp(self):
# We set up and tear down the tree a few times to validate the dropTree function
self.log = logging.getLogger("Main.TestCompareDatabaseInterface")
self.tree = dbPhashApi.PhashDbApi()
self.tree.forceRe
|
load()
def dist_check(self, distance, dbid, phash):
qtime1 = time.time()
have1 = self.tree.ge
|
tWithinDistance_db(phash, distance=distance)
qtime2 = time.time()
qtime3 = time.time()
have2 = self.tree.getIdsWithinDistance(phash, distance=distance)
qtime4 = time.time()
# print(dbid, have1)
if have1 != have2:
self.log.error("Mismatch!")
for line in pprint.pformat(have1).split("\n"):
self.log.error(line)
for line in pprint.pformat(have2).split("\n"):
self.log.error(line)
self.assertTrue(dbid in have1)
self.assertTrue(dbid in have2)
self.assertEqual(have1, have2)
self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3)
def test_0(self):
rand_r = self.tree.getRandomPhashRows(0.001)
self.log.info("Have %s items to test with", len(rand_r))
stepno = 0
for dbid, phash in rand_r:
self.dist_check(1, dbid, phash)
self.dist_check(2, dbid, phash)
self.dist_check(3, dbid, phash)
self.dist_check(4, dbid, phash)
self.dist_check(5, dbid, phash)
self.dist_check(6, dbid, phash)
self.dist_check(7, dbid, phash)
self.dist_check(8, dbid, phash)
stepno += 1
self.log.info("On step %s of %s", stepno, len(rand_r))
|
Lukasa/spdypy
|
test/test_api.py
|
Python
|
mit
| 368 | 0 |
# -*- coding: utf-8 -*-
"""
test/test_api
~~~~~~~~~
Tests of the top-level SPDYPy API. These will be relatively sparse for the
m
|
oment.
"""
# Nasty little path hack.
import sys
sys.path.append('.')
class TestAPI(object):
"""
Tests for the top-level spdypy API.
"""
def test_can_import_spdypy_on_py_33(self):
import spdypy
assert Tr
|
ue
|
SauloAislan/ironic
|
ironic/api/app.py
|
Python
|
apache-2.0
| 3,844 | 0 |
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystonemiddleware.audit as audit_middleware
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
from ironic.api.middleware import auth_token
from ironic.common import exception
from ironic.conf import CONF
class IronicCORS(cors_middleware.CORS):
"""Ironic-specific CORS class
We're adding the Ironic-specific version headers to the list of simple
headers in order that a request bearing those headers might be accepted by
the Ironic REST API.
"""
simple_headers = cors_middleware.CORS.simple_headers + [
'X-Auth-Token',
base.Version.max_string,
base.Version.min_string,
base.Version.string
]
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.pecan_debug,
static_root=pecan_config.app.static_root if CONF.pecan_debug else None,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if CONF.audit.enabled:
try:
app = audit_middleware.AuditMiddleware(
a
|
pp,
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list
)
except (EnvironmentError, OSError,
audit_middleware.PycadfAuditApiConfigError) as e:
raise exception.InputFileError(
file_name=CONF.audit.audit_map_file,
reason=e
)
if CON
|
F.auth_strategy == "keystone":
app = auth_token.AuthTokenMiddleware(
app, dict(cfg.CONF),
public_api_routes=pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = IronicCORS(app, CONF)
cors_middleware.set_defaults(
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string]
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
|
cwoebker/pen
|
pen/__init__.py
|
Python
|
bsd-3-clause
| 182 | 0 |
# -*- coding: utf-8 -*-
"""
pen: terminal notes
"""
__title__ = 'pen'
|
__author__ = 'cwoebker'
__version__ = '0.4.2'
__license__ = 'BSD'
__copyright__ = '© 2013-2018 C
|
ecil Wöbker'
|
elena/dev-parse
|
posts/migrations/0002_auto__add_field_post_posts.py
|
Python
|
bsd-2-clause
| 3,048 | 0.007874 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migrat
|
ion(SchemaMigration):
def fo
|
rwards(self, orm):
# Adding field 'Post.posts'
db.add_column(u'posts_post', 'posts',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.posts'
db.delete_column(u'posts_post', 'posts')
models = {
u'posts.author': {
'Meta': {'object_name': 'Author'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'posts.post': {
'Meta': {'object_name': 'Post'},
'author_original': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Author']"}),
'contents': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'posted_at': ('django.db.models.fields.DateTimeField', [], {}),
'posts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tickets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['posts.Ticket']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'posts.postchild': {
'Meta': {'object_name': 'PostChild'},
'author_original': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Author']"}),
'contents': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Post']"}),
'posted_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'posts.ticket': {
'Meta': {'object_name': 'Ticket'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['posts']
|
tima/ansible
|
lib/ansible/modules/network/aci/aci_interface_policy_leaf_policy_group.py
|
Python
|
gpl-3.0
| 15,589 | 0.003207 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_leaf_policy_group
short_description: Add Fabric Interface Policy Leaf Policy Groups on Cisco ACI fabrics.
description:
- Add Fabric Interface Policy Leaf Policy Groups on Cisco ACI fabrics.
- More information from the internal APIC class I(infra:AccBndlGrp), I(infra:AccPortGrp) at
U(https://developer.cisco.com/site/aci/docs/apis/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
notes:
- When using the module please select the appropriate link_aggregation_type (lag_type).
C(link) for Port Channel(PC), C(node) for Virtual Port Channel(VPC) and C(leaf) for Leaf Access Port Policy Group.
options:
policy_group:
description:
- Name of the leaf policy group to be added/deleted.
aliases: [ name, policy_group_name ]
description:
description:
- Description for the leaf policy group to be created.
aliases: [ descr ]
lag_type:
description:
- Selector for the type of leaf policy group we want to create.
aliases: [ lag_type_name ]
link_level_policy:
description:
- Choice of link_level_policy to be used as part of the leaf policy group to be created.
aliases: [ link_level_policy_name ]
cdp_policy:
description:
- Choice of cdp_policy to be used as part of the leaf policy group to be created.
aliases: [ cdp_policy_name ]
mcp_policy:
description:
- Choice of mcp_policy to be used as part of the leaf policy group to be created.
aliases: [ mcp_policy_name ]
lldp_policy:
description:
- Choice of lldp_policy to be used as par
|
t of the leaf policy group to be created.
a
|
liases: [ lldp_policy_name ]
stp_interface_policy:
description:
- Choice of stp_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ stp_interface_policy_name ]
egress_data_plane_policing_policy:
description:
- Choice of egress_data_plane_policing_policy to be used as part of the leaf policy group to be created.
aliases: [ egress_data_plane_policing_policy_name ]
ingress_data_plane_policing_policy:
description:
- Choice of ingress_data_plane_policing_policy to be used as part of the leaf policy group to be created.
aliases: [ ingress_data_plane_policing_policy_name ]
priority_flow_control_policy:
description:
- Choice of priority_flow_control_policy to be used as part of the leaf policy group to be created.
aliases: [ priority_flow_control_policy_name ]
fibre_channel_interface_policy:
description:
- Choice of fibre_channel_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ fibre_channel_interface_policy_name ]
slow_drain_policy:
description:
- Choice of slow_drain_policy to be used as part of the leaf policy group to be created.
aliases: [ slow_drain_policy_name ]
port_channel_policy:
description:
- Choice of port_channel_policy to be used as part of the leaf policy group to be created.
aliases: [ port_channel_policy_name ]
monitoring_policy:
description:
- Choice of monitoring_policy to be used as part of the leaf policy group to be created.
aliases: [ monitoring_policy_name ]
storm_control_interface_policy:
description:
- Choice of storm_control_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ storm_control_interface_policy_name ]
l2_interface_policy:
description:
- Choice of l2_interface_policy to be used as part of the leaf policy group to be created.
aliases: [ l2_interface_policy_name ]
port_security_policy:
description:
- Choice of port_security_policy to be used as part of the leaf policy group to be created.
aliases: [ port_security_policy_name ]
aep:
description:
- Choice of attached_entity_profile (AEP) to be used as part of the leaf policy group to be created.
aliases: [ aep_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: creating a Port Channel (PC) Interface Policy Group
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
description: policygroupname description
lag_type: link
link_level_policy: whateverlinklevelpolicy
fibre_channel_interface_policy: whateverfcpolicy
state: present
- name: creating a Virtual Port Channel (VPC) Interface Policy Group (no description)
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
lag_type: node
link_level_policy: whateverlinklevelpolicy
fibre_channel_interface_policy: whateverfcpolicy
state: present
- name: creating a Leaf Access Port Policy Group (no description)
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
lag_type: leaf
link_level_policy: whateverlinklevelpolicy
fibre_channel_interface_policy: whateverfcpolicy
state: present
- name: deleting an Interface policy Leaf Policy Group
aci_interface_policy_leaf_policy_group:
hostname: apic
username: yourusername
password: yourpassword
policy_group: policygroupname
lag_type: type_name
state: absent
'''
RETURN = ''' # '''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update({
'policy_group': dict(type='str', aliases=['name', 'policy_group_name']),
'description': dict(type='str', aliases=['descr']),
# NOTE: Since this module needs to include both infra:AccBndlGrp (for PC andVPC) and infra:AccPortGrp (for leaf access port policy group):
# NOTE: I'll allow the user to make the choice here (link(PC), node(VPC), leaf(leaf-access port policy group))
'lag_type': dict(type='str', aliases=['lag_type_name']),
'link_level_policy': dict(type='str', aliases=['link_level_policy_name']),
'cdp_policy': dict(type='str', aliases=['cdp_policy_name']),
'mcp_policy': dict(type='str', aliases=['mcp_policy_name']),
'lldp_policy': dict(type='str', aliases=['lldp_policy_name']),
'stp_interface_policy': dict(type='str', aliases=['stp_interface_policy_name']),
'egress_data_plane_policing_policy': dict(type='str', aliases=['egress_data_plane_policing_policy_name']),
'ingress_data_plane_policing_policy': dict(type='str', aliases=['ingress_data_plane_policing_policy_name']),
'priority_flow_control_policy': dict(type='str', aliases=['priority_flow_control_policy_name']),
'fibre_channel_interface_policy': dict(type='str', aliases=['fibre_channel_interface_policy_name']),
'slow_drain_policy': dict(type='str', aliases=['slow_drain_policy_name']),
'port_channel_policy': dict(type='str', aliases=['port_channel_policy_name']),
'monitoring_policy': dict(type='str', aliases=['monitoring_policy_name']),
'storm_control_interface_policy': dict(type='str', aliases=['storm_control_interface_policy_name']),
'l2_interface_policy': dict(type='str', aliases=['l2_interface_policy_name']),
'port_security_policy': dict(type='str', aliases=['port_security_policy_name']),
'aep': dict(type='str', aliases=['aep_name']),
'state': dict(type='str', default='present', choices=['absent', 'present', 'query'])
})
module
|
wdbm/media_editing
|
media_editing.py
|
Python
|
gpl-3.0
| 2,382 | 0.010915 |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# media_editing #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides media editing utilities. #
# #
# copyright (C) 2018 Will Breaden Madden, wbm@protonmail.ch #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>.
|
#
#
|
#
################################################################################
"""
|
PC-fit-Christian-Rupp/serverstatepage
|
code/testroutine/dm_server.py
|
Python
|
mit
| 1,222 | 0.047463 |
import os
import enums
class dm_server:
def __init__ (self, Name, Ip, statistics, Os = None):
self.__name = Name
self.__ip = Ip
self.__services = []
self.__statistics = statistics
if Os:
self.__os = Os
else:
self.__os = None
def addService (self, Service):
self.__services.append(Service)
def check (self):
# for i in self.__services:
# i.check()
self.__test()
def __test(self):
a = 0
for i in range(10):
a += os.system("ping -c 1" + self.__ip)
self.__pingProb = a/10
self.__setState()
def __setState(self):
if self.__pingProb <= 1 and self.__pingProb >= 0.8:
self.__state = 1
elif self.__pingProb <= 0.79 and self.__pingProb >= 0.41:
self.__state = 2
elif self.__pingProb <= 0.4 and self.__pingProb >= 0:
self.__state = 3
def __
|
checkOs(self):
pass
def __checkVersionMac(self):
pass
def __checkVersionLinux(self):
pass
def __checkVersionWin(self):
pass
def getOs(self):
return self.__os
def getVersion(self):
pass
def getList(self):
return self.__services
def getState(self):
return self.__state
def getName(self):
return self.
|
__name
def getIp(self):
return self.__ip
def getStatistic(self):
return self.__statistic
|
DoubleNegativeVisualEffects/gaffer
|
python/GafferUI/Slider.py
|
Python
|
bsd-3-clause
| 14,206 | 0.062509 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## The Slider class allows a user to specify a number of positions on a scale of 0.0 at one end
# of the Widget and 1.0 at the other. Positions off the ends of the widget are mapped
# to negative numbers and numbers greater than 1.0 respectively. Derived classes may
# provide alternative interpretations for the scale and clamp values as appropriate. In
# particular see the NumericSlider which allows the specification of the values at either
# end of the scale along with hard minimum and maximum values.
class Slider( GafferUI.Widget ) :
PositionChangedReason = IECore.Enum.create( "Invalid", "SetPositions", "Click", "IndexAdded", "IndexRemoved", "DragBegin", "DragMove", "DragEnd", "Increment" )
def __init__( self, position=None, positions=None, **kw ) :
GafferUI.Widget.__init__( self, _Widget(), **kw )
assert( ( position is None ) or ( positions is None ) )
if positions is not None :
self.__positions = positions
else :
self.__positions = [ 0.5 if position is None else position ]
self.__selectedIndex = None
self.__sizeEditable = False
self.__minimumSize = 1
self._entered = False
self.__enterConnection = self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ) )
self.__leaveConnection = self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ) )
self.__mouseMoveConnection = self.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ) )
self.__buttonPressConnection = self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__dragBeginConnection = self.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ) )
self.__dragEnterConnection = self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragMoveConnection = self.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ) )
self.__dragEndConnection = self.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
self.__keyPressConnection = self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
## Convenience function to call setPositions( [ position ] )
def setPosition( self, p ) :
self.setPositions( [ p ] )
## Convenience function returning getPositions()[0] if there
# is only one position, and raising ValueError if not.
def getPosition( self ) :
if len( self.__positions ) != 1 :
raise ValueError
return self.__positions[0]
def setPositions( self, positions ) :
self._setPositionsInternal( positions, self.PositionChangedReason.SetPositions )
def getPositions( self ) :
return self.__positions
## A signal emitted whenever a position has been changed. Slots should
# have the signature slot( Slider, PositionChangedReason ).
def positionChangedSignal( self ) :
signal = getattr( self, "_positionChangedSignal", None )
if signal is None :
signal = Gaffer.Signal2()
self._positionChangedSignal = signal
return signal
## Returns True if a user would expect the specified sequence
# of changes to be merged into one undoable event.
@classmethod
def changesShouldBeMerged( cls, firstReason, secondReason ) :
if type( firstReason ) != type( secondReason ) :
return False
return ( firstReason, secondReason ) in (
# click and drag
( cls.PositionChangedReason.Click, cls.PositionChangedReason.DragBegin ),
( cls.PositionChangedReason.DragBegin, cls.PositionChangedReason.DragMove ),
( cls.PositionChangedReason.DragMove, cls.PositionChangedReason.DragMove ),
( cls.PositionChangedReason.DragMove, cls.PositionChangedReason.DragEnd ),
# increment
( cls.PositionChangedReason.Increment, cls.PositionChangedReason.Increment ),
)
def indexRemovedSignal( self ) :
signal = getattr( self, "_indexRemovedSignal", None )
if signal is None :
signal = GafferUI.WidgetEventSignal()
self._indexRemovedSignal = signal
return signal
def setSelectedIndex( self, index ) :
if self.__selectedIndex == index :
return
if index is not None :
if not len( self.__positions ) or index < 0 or index >= len( self.__positions ) :
raise IndexError
self.__selectedIndex = index
self._qtWidget().update()
signal = getattr( self, "_selectedIndexChangedSignal", None )
if signal is not None :
signal( self )
## May return None to indicate that no index is selected.
def getSelectedInd
|
ex( self ) :
return self.__selectedIndex
def selectedIndexChangedSignal( self ) :
signal = getattr( self, "_selectedIndexChangedSignal", None )
if signal is None :
signal = GafferUI.WidgetSignal()
self._selectedIndexChangedSignal = signal
return signal
## Determines whether or not positions may be added/removed
def setSizeEditable( self, editable ) :
|
self.__sizeEditable = editable
def getSizeEditable( self ) :
return self.__sizeEditable
## Sets a size after which no more positions can
# be removed.
def setMinimumSize( self, minimumSize ) :
self.__minimumSize = minimumSize
def getMinimumSize( self ) :
return self.__minimumSize
## May be overridden by derived classes if necessary, but
# implementations must call the base class implementation
# after performing their own work, as the base class is
# responsible for emitting positionChangedSignal().
def _setPositionsInternal( self, positions, reason ) :
dragBeginOrEnd = reason in ( self.PositionChangedReason.DragBegin, self.PositionChangedReason.DragEnd )
if positions == self.__positions and not dragBeginOrEnd :
# early out if the positions haven't changed, but not if the
# reason is either end of a drag - we always signal those so
# that they will always come in matching pairs.
return
self.__positions = positions
self._qtWidget().update()
self.__emitPositionChanged( reason )
## \todo Colours should come from some unified style somewhere
def _drawBackground( self, painter ) :
size = self.size()
pen = QtGui.QPen( QtGui.QColor( 0, 0, 0 ) )
pen.setWidth( 1 )
painter.setPen( pen )
painter.drawLine( 0, size.y / 2, size.x, size.y / 2 )
def _drawPosition( self, painter, position, highlighted, opacity=1 )
|
ryfx/modrana
|
core/configs.py
|
Python
|
gpl-3.0
| 6,907 | 0.00304 |
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# ModRana config files handling
#----------------------------------------------------------------------------
# Copyright 2012, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
import os
import shutil
from configobj import ConfigObj
import logging
log = logging.getLogger("core.config")
CONFIGS = ["map_config.conf", "user_config.conf"]
class Configs(object):
def __init__(self, modrana):
self.modrana = modrana
self.paths = modrana.paths
self.userConfig = {}
self.mapConfig = {}
# check if config files exist
self.checkConfigFilesExist()
def checkConfigFilesExist(self):
"""
assure that configuration files are available in the profile folder
- provided the default configuration files exist and that the profile folder
exists and is writable
"""
profilePath = self.modrana.paths.getProfilePath()
for config in CONFIGS:
configPath = os.path.join(profilePath, config)
if not os.path.exists(configPath):
try:
source = os.path.join("data/default_configuration_files", config)
log.info(" ** copying default configuration file to profile folder")
log.info(" ** from: %s", source)
log.info(" ** to: %s", configPath)
shutil.copy(source, configPath)
log.info(" ** default config file copying DONE")
except Exception:
log.exception("copying default configuration file to profile folder failed")
def upgradeConfigFiles(self):
"""
upgrade config files, if needed
"""
upgradeCount = 0
profilePath = self.modrana.paths.getProfilePath()
log.info("upgrading modRana configuration files in %s", profilePath)
# first check the configs actually exist
self.checkConfigFilesExist()
for config in CONFIGS:
# load default config
defaultConfigPath = os.path.join("data/default_configuration_files", config)
installedConfigPath = os.path.join(profilePath, config)
try:
defaultRev = int(ConfigObj(defaultConfigPath).get("revision", 0))
installedRev = int(ConfigObj(installedConfigPath).get("revision", 0))
if defaultRev > installedRev: # is installed config is outdated ?
log.info('config file %s is outdated, upgrading', config)
# rename installed config as the user might have modified it
newName = "%s_old_revision_%d" % (config, installedRev)
newPath = os.path.join(profilePath, newName)
shutil.move(installedConfigPath, newPath)
log.info('old config file renamed to %s' % newName)
# install the (newer) default config
shutil.copy(defaultConfigPath, profilePath)
# update upgrade counter
|
upgradeCount += 1
except Exception:
log.exception("upgrading config file: %s failed", config)
if upgradeCount:
log.info("%d configuration files upgraded", upgradeCount)
else:
log.info("no configuration files needed upgrade")
def loadAll(self):
"""
load all configuration files
"""
sel
|
f.loadMapConfig()
self.loadUserConfig()
def getUserConfig(self):
return self.userConfig
def loadUserConfig(self):
"""load the user oriented configuration file."""
path = os.path.join(self.modrana.paths.getProfilePath(), "user_config.conf")
try:
config = ConfigObj(path)
if 'enabled' in config:
if config['enabled'] == 'True':
self.userConfig = config
except Exception:
msg = "loading user_config.conf failed, check the syntax\n" \
"and if the config file is present in the modRana profile directory"
log.exception(msg)
def getMapConfig(self):
"""
get the "raw" map config
"""
return self.mapConfig
def loadMapConfig(self):
"""
load the map configuration file
"""
configVariables = {
'label': 'label',
'url': 'tiles',
'max_zoom': 'maxZoom',
'min_zoom': 'minZoom',
'type': 'type',
'folder_prefix': 'folderPrefix',
'coordinates': 'coordinates',
}
def allNeededIn(needed, layerDict):
"""
check if all required values are filled in
"""
# TODO: optimize this ?
for key in needed:
if key in layerDict:
continue
else:
return False
return True
mapConfigPath = os.path.join(self.modrana.paths.getProfilePath(), 'map_config.conf')
# check if the map configuration file is installed
if not os.path.exists(mapConfigPath):
# nothing in profile folder -> try to use the default config
log.info("no config in profile folder, using default map layer configuration file")
mapConfigPath = os.path.join("data/default_configuration_files", 'map_config.conf')
if not os.path.exists(mapConfigPath):
# no map layer config available
log.info("map layer configuration file not available")
return False
try:
self.mapConfig = ConfigObj(mapConfigPath)
except Exception:
log.exception("loading map_config.conf failed")
return False
return True
def getUserAgent(self):
"""return the default modRana User-Agent"""
#debugging:
# return "Mozilla/5.0 (compatible; MSIE 5.5; Linux)"
#TODO: setting from configuration file, CLI & interface
return "modRana flexible GPS navigation system (compatible; Linux)"
|
archives-new-zealand/archwayimportgenerator
|
libs/unicodecsv.py
|
Python
|
gpl-3.0
| 7,077 | 0.001559 |
# -*- coding: utf-8 -*-
import sys
import csv
from itertools import izip
# https://pypi.python.org/pypi/unicodecsv
# http://semver.org/
VERSION = (0, 9, 4)
__version__ = ".".join(map(str, VERSION))
pass_throughs = [
'register_dialect',
'unregister_dialect',
'get_dialect',
'list_dialects',
'field_size_limit',
'Dialect',
'excel',
'excel_tab',
'Sniffer',
'QUOTE_ALL',
'QUOTE_MINIMAL',
'QUOTE_NONNUMERIC',
'QUOTE_NONE',
'Error'
]
__all__ = [
'reader',
'writer',
'DictReader',
'DictWriter',
] + pass_throughs
for prop in pass_throughs:
globals()[prop] = getattr(csv, prop)
def _stringify(s, encoding, errors):
if s is None:
return ''
if isinstance(s, unicode):
return s.encode(encoding, errors)
elif isinstance(s, (int, float)):
pass # let csv.QUOTE_NONNUMERIC do its thing.
elif not isinstance(s, str):
s = str(s)
return s
def _stringify_list(l, encoding, errors='strict'):
try:
return [_stringify(s, encoding, errors) for s in iter(l)]
except TypeError, e:
raise csv.Error(str(e))
def _unicodify(s, encoding):
if s is None:
return None
if isinstance(s, (unicode, int, float)):
return s
elif isinstance(s, str):
return s.decode(encoding)
return s
class UnicodeWriter(object):
"""
>>> import unicodecsv
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = unicodecsv.writer(f, encoding='utf-8')
>>> w.writerow((u'é', u'ñ'))
>>> f.seek(0)
>>> r = unicodecsv.reader(f, encoding='utf-8')
>>> row = r.next()
>>> row[0] == u'é'
True
>>> row[1] == u'ñ'
True
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
*args, **kwds):
self.encoding = encoding
self.writer = csv.writer(f, dialect, *args, **kwds)
self.encoding_errors = errors
def writerow(self, row):
self.writer.writerow(
_stringify_list(row, self.encoding, self.encoding_errors))
def writerows(self, rows):
for row in rows:
self.writerow(row)
@property
def dialect(self):
return self.writer.dialect
writer = UnicodeWriter
class UnicodeReader(object):
def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
**kwds):
format_params = ['delimiter', 'doublequote', 'escapechar',
'lineterminator', 'quotechar', 'quoting', 'skipinitialspace']
if dialect is None:
if not any([kwd_name in format_params for kwd_name in kwds.keys()]):
dialect = csv.excel
self.reader = csv.reader(f, dialect, **kwds)
self.encoding = encoding
self.encoding_errors = errors
def next(self):
row = self.reader.next()
encoding = self.encoding
encoding_errors = self.encoding_errors
float_ = float
unicode_ = unicode
try:
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
except UnicodeDecodeError as e:
# attempt a different encoding...
encoding = 'ISO-8859-1'
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
return val
def __iter__(self):
return self
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
reader = UnicodeReader
class DictWriter(csv.DictWriter):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, ['a', u'ñ', 'b'], restval=u'î')
>>> w.writerow({'a':'1', u'ñ':'2'})
>>> w.writerow({'a':'1', u'ñ':'2', 'b':u'ø'})
>>> w.writerow({'a':u'é', u'ñ':'2'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['a', u'ñ'], restkey='r')
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'î']}
True
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'\xc3\xb8']}
True
>>> r.next() == {'a': u'\xc3\xa9', u'ñ':'2', 'r': [u'\xc3\xae']}
True
"""
def __init__(self, csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', errors='strict', *args, **kwds):
self.encoding = encoding
csv.DictWriter.__init__(
self, csvfile, fieldnames, restval, extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(
csvfile, dialect, encoding=encoding, errors=errors, *args, **kwds)
self.encoding_errors = errors
def writeheader(self):
fieldnames = _stringify_list(
self.fieldnames, self.encoding, self.encoding_errors)
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
class DictReader(csv.DictReader):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, fieldnames=['name', 'place'])
>>> w.writerow({'name': 'Cary Grant', 'place': 'hollywood'})
>>> w.writerow({'name': 'Nathan Brillstone', 'place': u'øLand'})
>>>
|
w.writerow({'name': u'Willam ø. Unicoder', 'place': u'éSpandland'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['name', 'place'])
>>> print r.next() == {'name': 'Cary Grant', 'place': 'hollywood'}
True
>>> print r.next() == {'name': 'Na
|
than Brillstone', 'place': u'øLand'}
True
>>> print r.next() == {'name': u'Willam ø. Unicoder', 'place': u'éSpandland'}
True
"""
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
if fieldnames is not None:
fieldnames = _stringify_list(fieldnames, encoding)
csv.DictReader.__init__(
self, csvfile, fieldnames, restkey, restval, dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
if fieldnames is None and not hasattr(csv.DictReader, 'fieldnames'):
# Python 2.5 fieldnames workaround.
# (http://bugs.python.org/issue3436)
reader = UnicodeReader(
csvfile, dialect, encoding=encoding, *args, **kwds)
self.fieldnames = _stringify_list(reader.next(), reader.encoding)
self.unicode_fieldnames = [_unicodify(f, encoding) for f in
self.fieldnames]
self.unicode_restkey = _unicodify(restkey, encoding)
def next(self):
row = csv.DictReader.next(self)
result = dict((uni_key, row[str_key]) for (str_key, uni_key) in
izip(self.fieldnames, self.unicode_fieldnames))
rest = row.get(self.restkey)
if rest:
result[self.unicode_restkey] = rest
return result
|
uploadcare/pyuploadcare
|
tests/functional/ucare_cli/test_update_webhook.py
|
Python
|
mit
| 484 | 0 |
import pytest
from tests.functional.ucare_cli.helpers import arg_namespace
from pyuploadcare.ucare_cli.commands.update_webhook import update_webhook
@pyte
|
st.mark.vcr
def test_update_webhooks(capsys, uploadcare):
update_webhook(
arg_namespace(
"update_webhook 865715 --deactivate "
"--target_url=https://webhook.site/updated"
),
|
uploadcare,
)
captured = capsys.readouterr()
assert '"is_active": false' in captured.out
|
plaid/plaid-python
|
plaid/model/link_token_create_request_update.py
|
Python
|
mit
| 6,717 | 0.000447 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class LinkTokenCreateRequestUpdate(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'account_selection_enabled': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_selection_enabled': 'account_selection_enabled', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LinkTokenCreateRequestUpdate - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
|
raised if the wrong type is input.
|
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_selection_enabled (bool): If `true`, enables [update mode with Account Select](https://plaid.com/docs/link/update-mode/#using-update-mode-to-request-new-accounts).. [optional] if omitted the server will use the default value of False # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
gamesun/MyTerm-for-WangH
|
GUI.py
|
Python
|
bsd-3-clause
| 16,544 | 0.001572 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 (standalone edition) on Tue Jan 14 10:41:03 2014
#
import wx
import wx.grid
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.statusbar = self.CreateStatusBar(5, wx.ST_SIZEGRIP)
self.SplitterWindow = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_3D | wx.SP_BORDER)
self.window_1_pane_1 = wx.ScrolledWindow(self.SplitterWindow, wx.ID_ANY, style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.pnlSettingBar = wx.Panel(self.window_1_pane_1, wx.ID_ANY)
self.btnHideBar = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Hide")
self.btnEnumPorts = wx.Button(self.pnlSettingBar, wx.ID_ANY, "EnumPorts")
self.label_1 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Port")
self.cmbPort = wx.ComboBox(self.pnlSettingBar, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.label_2 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Baud Rate")
self.cmbBaudRate = wx.ComboBox(self.pnlSettingBar, wx.ID_ANY, choices=["300", "600", "1200", "1800", "2400", "4800", "9600", "19200", "38400", "57600", "115200", "230400", "460800", "500000", "576000", "921600", "1000000", "1152000", "1500000", "2000000", "2500000", "3000000", "3500000", "4000000"], style=wx.CB_DROPDOWN)
self.label_3 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Data Bits")
self.choiceDataBits = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["5", "6", "7", "8"])
self.label_4 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Parity")
self.choiceParity = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["None", "Even", "Odd", "Mark", "Space"])
self.label_5 = wx.StaticText(self.pnlSettingBar, wx.ID_ANY, "Stop Bits")
self.choiceStopBits = wx.Choice(self.pnlSettingBar, wx.ID_ANY, choices=["1", "1.5", "2"])
self.chkboxrtscts = wx.CheckBox(self.pnlSettingBar, wx.ID_ANY, "RTS/CTS")
self.chkboxxonxoff = wx.CheckBox(self.pnlSettingBar, wx.ID_ANY, "Xon/Xoff")
self.sizer_6_staticbox = wx.StaticBox(self.pnlSettingBar, wx.ID_ANY, "HandShake")
self.btnOpen = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Open")
self.btnClear = wx.Button(self.pnlSettingBar, wx.ID_ANY, "Clear Screen")
self.window_1_pane_2 = wx.Panel(self.SplitterWindow, wx.ID_ANY)
self.pnlGrid = wx.ScrolledWindow(self.window_1_pane_2, wx.ID_ANY, style=wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.grid_csv = wx.grid.Grid(self.pnlGrid, wx.ID_ANY, size=(1, 1))
self.button_1 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send1")
self.button_2 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send2")
self.button_3 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send3")
self.button_4 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send4")
self.button_5 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send5")
self.button_6 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send6")
self.button_7 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send7")
self.button_8 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send8")
self.button_9 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send9")
self.button_10 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send10")
self.button_11 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 11")
self.button_12 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 12")
self.button_13 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 13")
self.button_14 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 14")
self.button_15 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 15")
self.button_16 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 16")
self.button_17 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 17")
self.button_18 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 18")
self.button_19 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 19")
self.button_20 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 20")
self.button_21 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 21")
self.button_22 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 22")
self.button_23 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 23")
self.button_24 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 24")
self.button_25 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 25")
self.button_26 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 26")
self.button_27 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 27")
self.button_28 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 28")
self.button_29 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 29")
self.button_30 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 30")
self.button_31 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 31")
self.button_32 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 32")
self.button_33 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 33")
self.button_34 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 34")
self.button_35 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 35")
self.button_36 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 36")
self.button_37 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 37")
self.button_38 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 38")
self.button_39 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 39")
self.button_40 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 40")
self.button_41 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 41")
self.button_42 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 42")
self.button_43 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 43")
self.button_44 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 44")
self.button_45 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 45")
self.button_46 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 46")
self.button_47 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 47")
self.button_48 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 48")
self.button_49 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 49")
self.button_50 = wx.Button(self.pnlGrid, wx.ID_ANY, "Send 50")
self.txtctlMain = wx.TextCtrl(self.window_1_pane_2, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_RICH2 | wx.TE_AUTO_URL | wx.TE_LINEWRAP | wx.TE_WORDWRAP)
self.pnlTransmitHex = wx.Panel(self.window_1_pane_2, wx.ID_ANY)
self.label_6 = wx.StaticText(self.pnlTransmitHex, wx.ID_ANY, "Transmit Hex")
self.btnTransmitHex = wx.Button(self.pnlTransmitHex, wx.ID_ANY, "Transmit")
self.txtTransmitHex = wx.TextCtrl(self.pnlTransmitHex, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_RICH | wx.TE_RICH2 | wx.TE_AUTO_URL | wx.TE_LINEWRAP | wx.TE_WORDWRAP)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("MyTerm")
self.SetSize((834, 603))
self.statusbar.SetStatusWidths([-28, -10, -10, 55, 105])
# statusbar fields
statusbar_fields = ["", "Rx:0", "Tx:0", "Rx:Ascii", "Local echo:Off"]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
self.cmbBaudRate.SetSelection(7)
self.choiceDataBits.SetSelection(3)
self.choiceParity.SetSelection(0)
self.choiceStopBits.SetSelection(0)
self.btnOpen.SetMinSize((-1, 30))
self.btnClear.SetMinSize((-1, 30))
self.pnlSettingBar.SetMinSize((158, -1))
self.window_1_pane_1.SetScrollRate(1, 1)
self.grid_csv.C
|
reateGrid(50, 9)
self.grid_csv.SetRowLabelSize(25)
self.grid_c
|
sv.SetColLabelSize(21)
self.button_1.SetMinSize((-1, 20))
self.button_2.SetMinSize((-1, 20))
self.button_3.SetMinSize((-1, 20))
self.button_4.SetMinSize((-1, 20
|
harry-ops/opencloud
|
webvirtcloud/networks/views.py
|
Python
|
gpl-2.0
| 4,905 | 0.001427 |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from computes.models import Compute
from networks.forms import AddNetPool
from vrtManager.network import wvmNetwork, wvmNetworks
from vrtManager.network import network_size
from libvirt import libvirtError
def networks(request, compute_id):
"""
:param request:
:return:
"""
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse('index'))
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
try:
conn = wvmNetworks(compute.hostname,
compute.login,
compute.password,
compute.type)
networks = conn.get_networks_info()
if request.method == 'POST':
if 'create' in request.POST:
form = AddNetPool(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['name'] in networks:
msg = _("Pool name already in use")
error_messages.append(msg)
if data['forward'] == 'bridge' and data['bridge_name'] == '':
error_messages.append('Please enter bridge name')
try:
gateway, netmask, dhcp = network_size(data['subnet'], data['dhcp'])
except:
error_msg = _("Input subnet pool error")
error_messages.append(error_msg)
if not error_messages:
conn.create_network(data['name'], data['forward'], gateway, netmask,
dhcp, data['bridge_name'], data['openvswitch'], data['fixed'])
return HttpResp
|
onseRedirec
|
t(reverse('network', args=[compute_id, data['name']]))
else:
for msg_err in form.errors.values():
error_messages.append(msg_err.as_text())
conn.close()
except libvirtError as lib_err:
error_messages.append(lib_err)
return render(request, 'networks.html', locals())
def network(request, compute_id, pool):
"""
:param request:
:return:
"""
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse('index'))
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
try:
conn = wvmNetwork(compute.hostname,
compute.login,
compute.password,
compute.type,
pool)
networks = conn.get_networks()
state = conn.is_active()
device = conn.get_bridge_device()
autostart = conn.get_autostart()
ipv4_forward = conn.get_ipv4_forward()
ipv4_dhcp_range_start = conn.get_ipv4_dhcp_range_start()
ipv4_dhcp_range_end = conn.get_ipv4_dhcp_range_end()
ipv4_network = conn.get_ipv4_network()
fixed_address = conn.get_mac_ipaddr()
except libvirtError as lib_err:
error_messages.append(lib_err)
if request.method == 'POST':
if 'start' in request.POST:
try:
conn.start()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'stop' in request.POST:
try:
conn.stop()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'delete' in request.POST:
try:
conn.delete()
return HttpResponseRedirect(reverse('networks', args=[compute_id]))
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'set_autostart' in request.POST:
try:
conn.set_autostart(1)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'unset_autostart' in request.POST:
try:
conn.set_autostart(0)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
conn.close()
return render(request, 'network.html', locals())
|
gabegaster/connectedness
|
experiment.py
|
Python
|
mit
| 748 | 0.022727 |
import numpy as np
from itertools import combinations, imap
from timer import show_progress
from connectedness
|
import is_connected
def brute_main():
for N in show_progress(xrange(4,1000)):
print N,brute(N)
def brute(N):
return sum(is_connected(s)
for s in combinations(xrange(N),3))
def mu(N,k=3):
data = [is_connected(np.random.choice(N, k, replace=False))
for _ in xrange(10**3)
|
]
return np.mean(data)
def bootstrap(N,k=3):
data = [mu(N,k) for _ in xrange(10)]
return np.mean(data), (max(data)-min(data))/2
def boot_main():
for N in show_progress(xrange(10,10**3)):
print ",".join(map(str,[N]+list(bootstrap(N,k=int(N**.5)))))
if __name__=="__main__":
boot_main()
|
lssfau/walberla
|
tests/field/codegen/JacobiKernel.py
|
Python
|
gpl-3.0
| 1,066 | 0.00469 |
import numpy as np
import pystencils as ps
from pystencils_walberla import CodeGeneration, generate_sweep
with CodeGeneration() as ctx:
# ----- Stencil 2D - created by specifying weights in nested list --------------------------
src, dst = ps.fields("src, src_tmp: [2D]", layout='fzyx')
stencil = [[1.11, 2.22, 3.33],
[4.44, 5.55, 6.66],
[7.77, 8.88, 9.99]]
assignments = ps.assignment_from_stencil(stencil, src, dst, normalization_factor=1 / np.sum(stencil))
generate_sweep(ctx, 'JacobiKernel2D', assignments, field_swaps=[(src, dst)])
# ----- Stencil 3D - created by using kernel_decorator with assignments in '@=' format -----
src, dst = ps.fields("src, src_tmp: [3D]", layout='fzyx')
@ps.kernel
def kernel_fu
|
nc():
dst[0, 0, 0] @= (3 * src[1, 0, 0] + 4 * src[-1, 0, 0]
+ 5 * src[0, 1, 0] + 6 * src[0, -1, 0]
|
+ 7 * src[0, 0, 1] + 8 * src[0, 0, -1]) / 33
generate_sweep(ctx, 'JacobiKernel3D', kernel_func, field_swaps=[(src, dst)])
|
pengkobe/leetcode
|
questions/Palindrome_Number.py
|
Python
|
gpl-3.0
| 1,166 | 0.014315 |
# -*- coding: utf-8 -*-
# 本题难度:★
# 不使用额外储存空间,判断一个整数是否为回文数字。例如:
# -22 -> false
# 1221 -> true
# 1221221 -> true
# 1234321 -> true
# 234 -> false
# 需要注意的是:
# 考虑负数
# 不允许使用额外储存空间,比如将数字转换成字符串
# 反转数字需要考虑溢出问题
# 参考答案:https://github.com/barretlee/daily-algorithms/blob/master/answers/7.md
import math
def Palindrome_Number(num):
if num < 0:
return False;
# 计算数字所拥有的位数
LenOfNum = 0;
while(10**LenOfNum <= num):
LenOfNum = LenOfNum+1;
if LenOfNum == 1:
return True;
while(LenOfNum >= 2):
a = num % 10;
b = num / 10**(LenOfNum-1);
if (a == b):
num = num % (10**(LenOfNum-1))
num = num /10;
LenOfNum -= 2;
|
else:
return False;
return True;
print(Palindrome_Number(10))
print(Palindrome_Number(1))
print(Palindrome_Number(-22))
print(Palindrome_Number(1221))
print(Palindrome_Number(1221221))
print(Palind
|
rome_Number(1234321))
print(Palindrome_Number(234))
|
spulec/moto
|
tests/test_es/test_server.py
|
Python
|
apache-2.0
| 352 | 0 |
import json
|
import sure # noqa # pylint: disable=unused-import
import moto.server as server
def test_es_list():
backend = server.create_backend_app("es")
test_client = backend.test_client()
resp = test_client.get("/2015-01-01/domain")
resp.status_code.should.equal(200)
json.loads(resp.data
|
).should.equals({"DomainNames": []})
|
jonathanf/infosalud
|
visit/visit.py
|
Python
|
agpl-3.0
| 3,450 | 0.002319 |
#
# Copyright (C) 2014 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
The patient visit module
========================
Implements the classes:
* Visit: Main visit module
* ConsultingRoom: Consultings room module
"""
from openerp.osv import osv, fields
class Visit(osv.osv):
"""
The visit module
"""
_name = 'visit'
_description = 'The visit module'
_states = [
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('canceled', 'Canceled'),
('assisted', 'Assisted'),
]
def _default_room(self, cr, uid, id, context=None):
consulroom_obj = self.pool.get('consulting.room')
room = consulroom_obj.search(cr, uid, [('default', '=', '1')])
if room:
return room[0]
return 1
def check_duration(self, cr, uid, id, context=None):
"""
Check the consistency of the visit duration
:param cr:
:param uid:
:param id:
:param context:
:return:
"""
return {}
def onchange_consulting_room(self, cr, uid, id, consulting_room, context=None):
"""
:param cr:
:param uid:
:param id:
:param starts:
:param consulting_room:
:param
|
context:
:return:
"""
if consulting_room:
consulroom_obj = self.pool.get('consulting.room')
duration = consulroom_obj.browse(cr, uid, consulting_room, context=context)[0].duration
else:
duration = 0.0
vals = {
'value': {
'duration': duration,
}
}
return vals
_columns = {
|
'name': fields.char('Identifier'),
'starts': fields.datetime('Start date'),
'duration': fields.float('Duration',
help='Duration in minutes'),
'patient_id': fields.many2one('patient', 'Patient'),
'consultingroom_id': fields.many2one('consulting.room',
'Consulting room'),
'state': fields.selection(_states, 'State')
}
_defaults = {
'consultingroom_id': _default_room,
}
class ConsultingRoom(osv.osv):
""" Consulting rooms """
_name = 'consulting.room'
_description = 'Consulting rooms configuration module'
_columns = {
'name': fields.char('Name'),
'duration': fields.float('Standard duration',
help='Visit standard duration time in minutes'),
'price': fields.float('Price',
help='Standard consultation fee'),
'address': fields.text('Address'),
'default': fields.boolean('Default', help='Set as default consulting room'),
}
|
bigvat/vat
|
common.py
|
Python
|
apache-2.0
| 3,323 | 0.040024 |
#!/usr/bin/env python
#-*- coding: ascii -*-
from __future__ import print_function
import sys
import platform
def copy_to_dst(src_name, dst_dir):
print("Copy %s to %s" % (src_name, dst_dir))
import shutil
shutil.copy(src_name, dst_dir)
#cfg, address_model=32/64, version_type=debug/release;
def getEnvInfo(address_model, version_type) :
import os
env = os.environ
plat = sys.platform
if 0 == plat.find("linux"):
plat = "linux"
print("\nplatform="+plat)
# print("\nplatform="+env)
cfg = ""
arch = ""
## if "" == cfg:
if "win32" == plat:
if "VS140COMNTOOLS" in env:
cfg = "vc15"
print("platform1.0 : " + cfg)
elif "VS120COMNTOOLS" in env:
cfg = "vc12"
print("platform1.1 : " + cfg)
elif "VS110COMNTOOLS" in env:
cfg = "vc11"
print("platform1 : " + cfg)
elif "VS100COMNTOOLS" in env:
cfg = "vc10"
print("platform2 : " + cfg)
elif "VS90COMNTOOLS" in env:
cfg = "vc9"
print("platform3 : " + cfg)
elif "VS80COMNTOOLS" in env:
cfg = "vc8"
print("platform4 : " + cfg)
## elif os.path.exists("C:\MinGW\bin\gcc.exe"):
## print("platform5 : " + cfg)
## cfg = "mingw"
else:
print("Unsupported vin32 develop!\n")
elif "linux" == plat:
cfg = "gcc"
print("platform6 : " + cfg)
elif "cygwin" == plat:
cfg = "gcc"
print("platform7 : " + cfg)
else:
print("Unsupported platform!\n")
sys.exit(1)
print("platform8 : " + cfg)
if "vc15" == cfg :
generator = "Visual Studio 14 2015"
compiler_name = "vc"
compiler_version = 14
elif "vc12" == cfg :
generator = "Visual Studio 12"
compiler_name = "vc"
compiler_version = 12
elif "vc11" == cfg :
generator = "Visual Studio 11"
compiler_name = "vc"
compiler_version = 11
elif "vc10" == cfg:
generator = "Visual Studio 10"
comp
|
iler_name = "vc"
compiler_version = 10
elif "vc9" == cfg:
generator = "Visual Studio 9 2008"
compiler_name = "vc"
compiler_version = 9
elif "vc8" == cfg:
generator = "Visual Studio 8 2005"
compiler_name = "vc"
compiler_version = 8
elif "mingw" == cfg:
|
generator = "MinGW Makefiles"
compiler_name = "gcc"
compiler_version = 0
elif "gcc" == cfg:
generator = "Unix Makefiles"
compiler_name = "gcc"
compiler_version = 0
else:
print("Wrong compiler configuration\n")
sys.exit(1)
# prepare file suffix
if "win32" == plat:
bat_suffix = "bat"
dll_suffix = "dll"
exe_suffix = "exe"
elif "linux" == plat:
bat_suffix = "sh"
dll_suffix = "so"
exe_suffix = ""
elif "cygwin" == plat:
bat_suffix = "sh"
dll_suffix = "dll"
exe_suffix = "exe"
# set evn for boost
## try:
## boost_root = os.environ["BOOST_ROOT"]
## os.environ["BOOST_LIBRARYDIR"] = "%s/lib_%s%d_m%s_%s/lib" % (boost_root, compiler_name, compiler_version, address_model, version_type) # not update?????
## print("boost_lib_path="+os.environ["BOOST_LIBRARYDIR"])
## except KeyError:
## print("Please set the environment variable BOOST_ROOT")
## sys.exit(1)
if "win32" == plat:
machine = platform.machine()
print("arch="+machine)
if(machine == "x86_64" or machine == "AMD64"):
generator = generator + " Win64"
# if "win32" == plat:
# if (CMAKE_CL_64 or CMAKE_GENERATOR MATCHES Win64)
# generator = generator + " Win64"
return (plat, compiler_name, compiler_version, generator, bat_suffix, dll_suffix, exe_suffix)
|
erinspace/osf.io
|
website/project/views/drafts.py
|
Python
|
apache-2.0
| 13,819 | 0.003329 |
import functools
import httplib as http
import itertools
from operator import itemgetter
from dateutil.parser import parse as parse_date
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils import timezone
from flask import request, redirect
import pytz
from framework.database import get_or_http_error, autoload
from framework.exceptions import HTTPError
from framework.status import push_status_message
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN
from osf.utils.functional import rapply
from osf.models import NodeLog, RegistrationSchema, DraftRegistration, Sanction
from website.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project,
must_have_permission,
http_error_if_disk_saving_mode
)
from website import language, settings
from website.ember_osf_web.decorators import ember_flag_is_active
from website.prereg import utils as prereg_utils
from website.project import utils as project_utils
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION, METASCHEMA_ORDERING
from website.project.metadata.utils import serialize_meta_schema, serialize_draft_registration
from website.project.utils import serialize_node
ge
|
t_schema_or_fail = lambda query: get_or_http_error(RegistrationSchema, query)
autoload_draft = functools.partial(autoload, DraftRegistration, 'draft_id', 'draft')
def must_be_branched_from_node(func):
@autoload_draft
@must_be_valid_project
@functools.wraps(func)
def wrapper(*args, **kwargs):
node = kwargs['node']
draft = kwargs['draft']
if draft.deleted:
rais
|
e HTTPError(http.GONE)
if not draft.branched_from._id == node._id:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Not a draft of this node',
'message_long': 'This draft registration is not created from the given node.'
}
)
return func(*args, **kwargs)
return wrapper
def validate_embargo_end_date(end_date_string, node):
"""
Our reviewers have a window of time in which to review a draft reg. submission.
If an embargo end_date that is within that window is at risk of causing
validation errors down the line if the draft is approved and registered.
The draft registration approval window is always greater than the time span
for disallowed embargo end dates.
:raises: HTTPError if end_date is less than the approval window or greater than the
max embargo end date
"""
end_date = parse_date(end_date_string, ignoretz=True).replace(tzinfo=pytz.utc)
today = timezone.now()
if (end_date - today) <= settings.DRAFT_REGISTRATION_APPROVAL_PERIOD:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date for this submission must be at least {0} days in the future.'.format(settings.DRAFT_REGISTRATION_APPROVAL_PERIOD)
})
elif not node._is_embargo_date_valid(end_date):
max_end_date = today + settings.DRAFT_REGISTRATION_APPROVAL_PERIOD
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date must on or before {0}.'.format(max_end_date.isoformat())
})
def validate_registration_choice(registration_choice):
if registration_choice not in ('embargo', 'immediate'):
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': "Invalid 'registrationChoice'",
'message_long': "Values for 'registrationChoice' must be either 'embargo' or 'immediate'."
}
)
def check_draft_state(draft):
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not registered_and_deleted:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been registered',
'message_long': 'This draft has already been registered and cannot be modified.'
})
if draft.is_pending_review:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft is pending review',
'message_long': 'This draft is pending review and cannot be modified.'
})
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been approved',
'message_long': 'This draft has already been approved and cannot be modified.'
})
@must_have_permission(ADMIN)
@must_be_branched_from_node
def submit_draft_for_review(auth, node, draft, *args, **kwargs):
"""Submit for approvals and/or notifications
:return: serialized registration
:rtype: dict
:raises: HTTPError if embargo end date is invalid
"""
data = request.get_json()
meta = {}
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
if registration_choice == 'embargo':
# Initiate embargo
end_date_string = data['embargoEndDate']
validate_embargo_end_date(end_date_string, node)
meta['embargo_end_date'] = end_date_string
meta['registration_choice'] = registration_choice
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='This draft has already been registered, if you wish to '
'register it again or submit it for review please create '
'a new draft.'))
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
draft.submit_for_review(
initiated_by=auth.user,
meta=meta,
save=True
)
if prereg_utils.get_prereg_schema() == draft.registration_schema:
node.add_log(
action=NodeLog.PREREG_REGISTRATION_INITIATED,
params={'node': node._primary_key},
auth=auth,
save=False
)
node.save()
push_status_message(language.AFTER_SUBMIT_FOR_REVIEW,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def draft_before_register_page(auth, node, draft, *args, **kwargs):
"""Allow the user to select an embargo period and confirm registration
:return: serialized Node + DraftRegistration
:rtype: dict
"""
ret = serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
@http_error_if_disk_saving_mode
def register_draft_registration(auth, node, draft, *args, **kwargs):
"""Initiate a registration from a draft registration
:return: success message; url to registrations page
:rtype: dict
"""
data = request.get_json()
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
register = draft.register(auth)
draft.save()
if registration_choice == 'embargo':
# Initiate embargo
embargo_end_date = parse_date(data['embargoEndDate'], ignoretz=True).replace(tzinfo=pytz.utc)
try:
regis
|
deafhhs/adapt
|
clients/migrations/0018_merge.py
|
Python
|
mit
| 297 | 0 |
# -*- coding: utf-8 -*-
from __f
|
uture__ import unicode_literals
from django.db import migrations, models
class
|
Migration(migrations.Migration):
dependencies = [
('clients', '0017_auto_20151025_1240'),
('clients', '0015_auto_20151025_1209'),
]
operations = [
]
|
rackerlabs/horizon
|
openstack_dashboard/dashboards/admin/projects/urls.py
|
Python
|
apache-2.0
| 1,755 | 0.00057 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns
from django.conf.urls.defaults import url
from openstack_dashboard.dashboards.admin.projects.views \
import CreateProjectView
from openstack_dashboard.dashboards.admin.projects.views import CreateUserView
from openstack_dashboard.dashboards.admin.projects.views impor
|
t IndexView
from openstack_dashboard.dashboards.admin.projects.views \
import ProjectUsageView
from openstack_dashboard.dashboards.admin.projects.views \
import UpdateProjectView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^create$', CreateProjectView.as_view(), name='create'),
url(r'^(?P<tenant_id>[^/]+)/update/$',
UpdateProjectView.as_view(), name='update'),
url(r'^(?P<tenant_id>[^/]+)/us
|
age/$',
ProjectUsageView.as_view(), name='usage'),
url(r'^(?P<tenant_id>[^/]+)/create_user/$',
CreateUserView.as_view(), name='create_user'),
)
|
hanlind/nova
|
nova/conf/cells.py
|
Python
|
apache-2.0
| 15,948 | 0.003386 |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cells_group = cfg.OptGroup('cells',
title='Cells Options',
help="""
Cells options allow you to use cells functionality in openstack
deployment.
""")
cells_opts = [
cfg.StrOpt('topic',
default='cells',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
Configurable RPC topics provide little value and can result in a wide variety
of errors. They should not be used.
""",
help="""
Topic.
This is the message queue topic that cells nodes listen on. It is
used when the cells service is started up to configure the queue,
and whenever an RPC call to the scheduler is made.
Possible values:
* cells: This is the recommended and the default value.
"""),
cfg.BoolOpt('enable',
default=False,
help="""
Enable cell functionality.
When this functionality is enabled, it lets you to scale an OpenStack
Compute cloud in a more distributed fashion without having to use
complicated technologies like database and message queue clustering.
Cells are configured as a tree. The top-level cell should have a host
that runs a nova-api service, but no nova-compute services. Each
child cell should run all of the typical nova-* services in a regular
Compute cloud except for nova-api. You can think of cells as a normal
Compute deployment in that each cell has its own database server and
message queue broker.
Related options:
* name: A unique cell name must be given when this functionality
is enabled.
* cell_type: Cell type should be defined for all cells.
"""),
cfg.StrOpt('name',
default='nova',
help="""
Name of the current cell.
This value must be unique for each cell. Name of a cell is used as
its id, leaving this option unset or setting the same name for
two or more cells may cause unexpected behaviour.
Related options:
* enabled: This option is meaningful only when cells service
is enabled
"""),
cfg.ListOpt('capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help="""
Cell capabilities.
List of arbitrary key=value pairs defining capabilities of the
current cell to be sent to the parent cells. These capabilities
are intended to be used in cells scheduler filters/weighers.
Possible values:
* key=value pairs list for example;
``hypervisor=xenserver;kvm,os=linux;windows``
"""),
cfg.IntOpt('call_timeout',
default=60,
min=0,
help="""
Call timeout.
Cell messaging module waits for response(s) to be put into the
eventlet queue. This option defines the seconds waited for
response from a call to a cell.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('reserve_percent',
default=10.0,
help="""
Reserve percentage
Percentage of cell capacity to hold in reserve, so the minimum
amount of free resource is considered to be;
min_free = total * (reserve_percent / 100.0)
This option affects both memory and disk utilization.
The primary purpose of this reserve is to ensure some space is
available for users who want to resize their instance to be larger.
Note that currently once the capacity expands into this reserve
space this option is ignored.
Possible values:
* An integer or float, corresponding to the percentage of cell capacity to
be held in reserve.
"""),
cfg.StrOpt('cell_type',
default='compute',
choices=('api', 'compute'),
help="""
Type of cell.
When cells feature is enabled the hosts in the OpenStack Compute
cloud are partitioned into groups. Cells are configured as a tree.
The top-level cell's cell_type must be set to ``api``. All other
cells are defined as a ``compute cell`` by default.
Related option:
* quota_driver: Disable quota checking for the child cells.
(nova.quota.NoopQuotaDriver)
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('mute_child_interval',
default=300,
help="""
Mute child interval.
Number of seconds after which a lack of capability and capacity
update the child cell is to be treated as a mute cell. Then the
child cell will be weighed as recommend highly that it be skipped.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('bandwidth_update_interval',
default=600,
help="""
Bandwidth update interval.
Seconds between bandwidth usage cache updates for cells.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_update_sync_database_limit',
default=100,
help="""
Instance update sync database limit.
Number of instances to pull from the database at one time for
a sync. If there are more instances to update the results will
be paged through.
Possible values:
* An integer, corresponding to a number of instances.
"""),
]
mute_weigher_opts = [
# TODO(sfinucan): Add max parameter
cfg.FloatOpt('mute_weight_multiplier',
default=-10000.0,
help="""
Mute weight multiplier.
Multiplier used to weigh mute children. Mute children cells are
recommended to be skipped so their weight is multiplied by this
negative value.
Possible values:
* Negative numeric number
"""),
]
ram_weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help="""
Ram weight multiplier.
Multiplier used for weighing ram. Negative numbers indicate that
Compute should stack VMs on one host instead of spreading out new
VMs to more hosts in the cell.
Possible values:
* Numeric multiplier
"""),
]
weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('offset_weight_multiplier',
default=1.0,
help="""
Offset weight multiplier
Multiplier used to weigh offset weigher. Cells with higher
weight_offsets in the DB will be preferred. The weight_offset
is a property of a cell stored in the database. It can be used
by a deployer to have scheduling decisions favor or disfavor
cells based on the setting.
Possible values:
* Numeric multiplier
"""),
]
cell_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_updated_at_threshold',
default=3600,
help="""
Instance updated at threshold
Number of seconds after an instance was updated or deleted to
continue to update cells. This option lets cells manager to only
attempt to sync instances that have been updated recently.
i.e., a threshold of 3600 means to only update instances that
have modified in the last hour.
Possible values:
* Threshold in seconds
Related options:
* This value is used with the ``instance_update_num_instances``
value in a periodi
|
c task run.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt("instance_
|
update_num_instances",
default=1,
help="""
Instance update num instances
On every run of the periodic task, nova cells manager will attempt to
sync instance_updated_at_threshold number of instances. When the
manager gets the list of instances, it shuffles them so that multiple
nova-cells services do not attempt to sync the same instances in
lockstep.
Possible values:
* Positive integer number
Related options:
* This value is used with the ``instance_updated_at_threshold``
value in a periodic task run.
""")
]
cell_messaging_op
|
kvar/ansible
|
lib/ansible/modules/cloud/amazon/aws_codecommit.py
|
Python
|
gpl-3.0
| 6,565 | 0.00198 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_codecommit
version_added: "2.8"
short_description: Manage repositories in AWS CodeCommit
description:
- Supports creation and deletion of CodeCommit repositories.
- See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
author: Shuang Wang (@ptux)
requirements:
- botocore
- boto3
- python >= 2.6
options:
name:
description:
- name of repository.
required: true
comment:
description:
- description or comment of repository.
required: false
state:
description:
- Specifies the state of repository.
required: true
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
repository_metadata:
description: "Information about the repository."
returned: always
type: complex
contains:
account_id:
description: "The ID of the AWS account asso
|
ciated with the repository."
returned: when state is present
type: str
sample: "268342293637"
arn:
description: "The Amazon Resource Name (ARN) of the repository."
returned: when state is present
type: str
sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username"
clone_url_http:
|
description: "The URL to use for cloning the repository over HTTPS."
returned: when state is present
type: str
sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
clone_url_ssh:
description: "The URL to use for cloning the repository over SSH."
returned: when state is present
type: str
sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
creation_date:
description: "The date and time the repository was created, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
last_modified_date:
description: "The date and time the repository was last modified, in timestamp format."
returned: when state is present
type: str
sample: "2018-10-16T13:21:41.261000+09:00"
repository_description:
description: "A comment or description about the repository."
returned: when state is present
type: str
sample: "test from ptux"
repository_id:
description: "The ID of the repository that was created or deleted"
returned: always
type: str
sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
repository_name:
description: "The repository's name."
returned: when state is present
type: str
sample: "reponame"
response_metadata:
description: "Information about the response."
returned: always
type: complex
contains:
http_headers:
description: "http headers of http response"
returned: always
type: dict
http_status_code:
description: "http status code of http response"
returned: always
type: str
sample: "200"
request_id:
description: "http request id"
returned: always
type: str
sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
retry_attempts:
description: "numbers of retry attempts"
returned: always
type: str
sample: "0"
'''
EXAMPLES = '''
# Create a new repository
- aws_codecommit:
name: repo
state: present
# Delete a repository
- aws_codecommit:
name: repo
state: absent
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class CodeCommit(object):
def __init__(self, module=None):
self._module = module
self._client = self._module.client('codecommit')
self._check_mode = self._module.check_mode
def process(self):
result = dict(changed=False)
if self._module.params['state'] == 'present' and not self._repository_exists():
if not self._module.check_mode:
result = self._create_repository()
result['changed'] = True
if self._module.params['state'] == 'absent' and self._repository_exists():
if not self._module.check_mode:
result = self._delete_repository()
result['changed'] = True
return result
def _repository_exists(self):
try:
paginator = self._client.get_paginator('list_repositories')
for page in paginator.paginate():
repositories = page['repositories']
for item in repositories:
if self._module.params['name'] in item.values():
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't get repository")
return False
def _create_repository(self):
try:
result = self._client.create_repository(
repositoryName=self._module.params['name'],
repositoryDescription=self._module.params['comment']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't create repository")
return result
def _delete_repository(self):
try:
result = self._client.delete_repository(
repositoryName=self._module.params['name']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self._module.fail_json_aws(e, msg="couldn't delete repository")
return result
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(choices=['present', 'absent'], required=True),
comment=dict(default='')
)
ansible_aws_module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
aws_codecommit = CodeCommit(module=ansible_aws_module)
result = aws_codecommit.process()
ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()
|
mattmillr/utaka
|
src/exceptions/BadRequestException.py
|
Python
|
apache-2.0
| 11,951 | 0.038407 |
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created July, 2009
BadRequestException and subclasses, all subclass UtakaException with an httpStatus of 400
@author: Andrew
'''
from utaka.src.exceptions.UtakaException import UtakaException
import utaka.src.Config as Config
#400
class BadRequestException(UtakaException):
def __init__(self, args):
UtakaException.__init__(self, args, 400)
class AmbiguousGrantByEmailAddress(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The e-mail address you provided is associated with more than one account.',
'Code' : 'BadRequest'})
class BadDigestException(BadRequestException):
def __init__(self, expectedDigest, calculatedDigest):
BadRequestException.__init__(self,
{'Message' : 'The Content-MD5 you specified did not match what we received',
'ExpectedDigest' : expectedDigest,
'CalculatedDigest' : calculatedDigest,
'Code' : 'BadDigest'})
class CredentialsNotSupported(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'This request does not support credentials',
'Code' : 'CredentialsNotSupported'})
class EntityTooSmallException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your proposed upload is smaller than the minimum allowed object size',
'Code' : 'EntityTooSmall'})
class EntityTooLargeException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your proposed upload exceeds the maximum allowed object size',
'Code' : 'EntityTooLarge'})
class ExpiredTokenException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The provided token has expired.',
'Code' : 'ExpiredToken'})
class IncompleteBodyException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'You did not provide the number of bytes specified by the Content-Length HTTP Header',
'Code' : 'IncompleteBody'})
class IncorrectNumberOfFilesInPostRequestException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'POST requires exactly one file upload per request',
'Code' : 'IncorrectNumberOfFilesInPostRequest'})
class InlineDataTooLargeException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Inline data exceeds the maximum allowed size',
'Code' : 'InlineDataTooLarge'})
class InvalidArgumentException(BadRequestException):
def __init__(self, argValue, argName, msg='Invalid Argument'):
BadRequestException.__init__(self,
{'Message' : msg,
'Code' : 'InvalidA
|
rgument',
'ArgumentValue' : argValue,
'ArgumentName' : argName})
class InvalidArgumentAuthorizationException(InvalidArgumentException):
def __init__(self, argValue):
headerPrefix = str(Config.get('authentication', 'prefix'))
Invali
|
dArgumentException.__init__(self, argValue, 'Authorization', ("Authorization header is invalid. Expected " + headerPrefix + " AccessKeyId:signature"))
class InvalidArgumentAuthorizationSpacingException(InvalidArgumentException):
def __init__(self, argValue):
InvalidArgumentException.__init__(self, argValue, 'Authorization', "Authorization header is invalid -- one and only one ' '(space) required")
class InvalidArgumentMetadataDirectiveException(InvalidArgumentException):
def __init__(self, argValue):
InvalidArgumentException.__init__(self, argValue, 'MetadataDirective', 'A specified metadata directive value must be either REPLACE or COPY.')
class InvalidArgumentQueryStringConflictException(InvalidArgumentException):
def __init__(self, conflictParamA, conflictParamB):
InvalidArgumentException.__init__(self, conflictParamA, 'ResourceType', "Conflicting query string parameters: %s and %s" % (str(conflictParamA), str(conflictParamB)))
class InvalidBucketNameException(BadRequestException):
def __init__(self, bucketName):
BadRequestException.__init__(self,
{'Message' : 'The specified bucket is not valid',
'Code' : 'InvalidBucketName',
'BucketName' : bucketName})
class InvalidDigestException(BadRequestException):
def __init__(self, contentMD5):
BadRequestException.__init__(self,
{'Message' : 'The Content-MD5 you specified is not valid',
'Code' : 'InvalidDigest',
'Content-MD5' : contentMD5})
class InvalidLocationConstraintException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The specified location constraint is not valid',
'Code' : 'InvalidLocationConstraint'})
class InvalidPolicyDocumentException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The content of the form does not meet the conditions specified in the policy document',
'Code' : 'InvalidPolicyDocument'})
class InvalidSOAPRequestException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The SOAP request body is invalid',
'Code' : 'InvalidSOAPRequest'})
class InvalidStorageClassException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The storage class you specified is not valid',
'Code' : 'InvalidStorageClass'})
class InvalidTargetBucketForLoggingException(BadRequestException):
def __init__(self, targetBucket):
BadRequestException.__init__(self,
{'Message' : 'The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.',
'Code' : 'InvalidTargetBucketForLogging',
'TargetBucket' : targetBucket})
class InvalidTokenException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The provided token is malformed or otherwise invalid',
'Code' : 'InvalidTokenException'})
class InvalidURIException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : "Couldn't parse the specified URI.",
'Code' : 'InvalidURI'})
class KeyTooLongException(BadRequestException):
def __init__(self, args):
BadRequestException.__init__(self,
{'Message' : 'Your key is too long',
'Code' : 'KeyTooLong'})
class MalformedACLErrorException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' :'The XML you provided was not well-formed or did not validate against our published schema',
'Code' : 'MalformedACL'})
class MalformedPOSTRequestException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The body of your POST request is not well-formed multipart/form-data.',
'Code' : 'MalformedPOSTRequest'})
class MalformedXMLException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'The XML you provided was not well-formed or did not validate against our published schema',
'Code' : 'MalformedXML'})
class MaxMessageLengthExceededException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your request was too big',
'Code' : 'MaxMessageLengthExceeded'})
class MaxPostPreDataLengthExceededErrorException(BadRequestException):
def __init__(self):
BadRequestException.__init__(self,
{'Message' : 'Your POST request fields preceding the upload file were too large.',
'Code' : 'MaxPostPreDataLengthExceededError'})
class MetadataTooLargeException(BadRequestException):
def __init__(self):
BadRequestException.__init__(se
|
niespodd/flyer_generator
|
main.py
|
Python
|
mit
| 2,191 | 0.006846 |
# Script Name : main.py
# Author : Shy Ruparel
# Created : September 8 2015
# Pulls in data from "data.csv" which is 2 columns wide
# Uses a base image as the background
# Uses the data - school name, and venue address -
# and prints onto the base image
# and saves every image as a .PNG
from PIL import Image, ImageDraw,ImageFont
import csv
# Main image from base.jpg
im = Image.open('base.jpg').convert('RGBA')
W, H = im.size
MaxSize = 200
maxFontW = W * .90
# Text writing onto image
with open('data.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
im = Image.open('base.jpg').convert('RGBA')
venueSize = MaxSize
addressSize = MaxSize/2
# Grab name and address
venueName = row[0].decode('utf-8')
addressDetails = row[1].decode('utf-8')
# Set font and size
venue = ImageFont.truetype('fonts/Outage.ttf', venueSize)
address = ImageFont.truetype('fonts/Lato.ttf', addressSize)
draw = ImageDraw.Draw(im)
# Find size of text
wVenue, hVenue = draw.textsize(venueName,font=venue)
# Make size smaller until width is less than size of maxFontW
while (wVenue > maxFontW):
venueSize = venueSize - 10
venue = ImageFont.truetype('fonts/Outage.ttf', venueSize)
wVenue, hVenue = draw.textsize(venueName,font=venue)
wAddress, hAddress = draw.textsize(addressDetails,font=address)
while (wAddress > maxFontW):
addressSize = addressSize - 10
address = ImageFont.truetype('fonts/OpenSansRegular.ttf', addressSize)
wAddress, hAddress = draw.textsize(addressDetails,font=address)
# Put text onto the image
draw.text(((W-wVenue)/2,(H-hVenue)/2 + 100), venueName,font=venue
|
, fill="white")
draw.text(((W-wAddress)/2,((H-hAddress)/2)+hVenue+125), addressDetails,font=address, fill="white")
# Save out the image
filename = 'output/' + venueName.strip() + '.png'
|
filename = filename.replace (" ", "_")
print filename
im.save(filename,'PNG')
|
SciLifeLab/scilifelab
|
scripts/xls2ped.py
|
Python
|
mit
| 751 | 0 |
""" Convert Excel document (.xls) delivered from MAF to a ped.txt file.
"""
import argpa
|
rse
import csv
import xlrd
def main(xls_file, out_file, sheet_name):
with xlrd.open_workbook(xls_file) as workbook:
worksheet = workbook.sheet_by_name(sheet_name)
with open(out_file, 'w') as fh:
c = csv.write
|
r(fh, delimiter='\t')
for row in range(worksheet.nrows):
c.writerow(worksheet.row_values(row))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('xls_file')
parser.add_argument('out_file')
parser.add_argument('--sheet_name', default='HaploView_ped_0')
args = parser.parse_args()
main(args.xls_file, args.out_file, args.sheet_name)
|
leppa/home-assistant
|
homeassistant/components/pilight/binary_sensor.py
|
Python
|
apache-2.0
| 6,144 | 0.000488 |
"""Support for Pilight binary sensors."""
import datetime
import logging
import voluptuous as vol
from homeassistant.components import pilight
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import (
CONF_DISARM_AFTER_TRIGGER,
CONF_NAME,
CONF_PAYLOAD,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import track_point_in_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_VARIABLE = "variable"
CONF_RESET_DELAY_SEC = "reset_delay_sec"
DEFAULT_NAME = "Pilight Binary Sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_VARIABLE): cv.string,
vol.Required(CONF_PAYLOAD): vol.Schema(dict),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default="on"): vol.Any(
cv.positive_int, cv.small_float, cv.string
),
vol.Optional(CONF_PAYLOAD_OFF, default="off"): vol.Any(
cv.positive_int, cv.small_float, cv.string
),
vol.Optional(CONF_DISARM_AFTER_TRIGGER, default=False): cv.boolean,
vol.Optional(CONF_RESET_DELAY_SEC, default=30): cv.positive_int,
|
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Pilight Binary Sensor."""
disarm = config.get(CONF_DISARM_AFTER_TRIGGER)
if disarm:
add_entities(
[
PilightTriggerSensor(
|
hass=hass,
name=config.get(CONF_NAME),
variable=config.get(CONF_VARIABLE),
payload=config.get(CONF_PAYLOAD),
on_value=config.get(CONF_PAYLOAD_ON),
off_value=config.get(CONF_PAYLOAD_OFF),
rst_dly_sec=config.get(CONF_RESET_DELAY_SEC),
)
]
)
else:
add_entities(
[
PilightBinarySensor(
hass=hass,
name=config.get(CONF_NAME),
variable=config.get(CONF_VARIABLE),
payload=config.get(CONF_PAYLOAD),
on_value=config.get(CONF_PAYLOAD_ON),
off_value=config.get(CONF_PAYLOAD_OFF),
)
]
)
class PilightBinarySensor(BinarySensorDevice):
"""Representation of a binary sensor that can be updated using Pilight."""
def __init__(self, hass, name, variable, payload, on_value, off_value):
"""Initialize the sensor."""
self._state = False
self._hass = hass
self._name = name
self._variable = variable
self._payload = payload
self._on_value = on_value
self._off_value = off_value
hass.bus.listen(pilight.EVENT, self._handle_code)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._state
def _handle_code(self, call):
"""Handle received code by the pilight-daemon.
If the code matches the defined payload
of this sensor the sensor state is changed accordingly.
"""
# Check if received code matches defined playoad
# True if payload is contained in received code dict
payload_ok = True
for key in self._payload:
if key not in call.data:
payload_ok = False
continue
if self._payload[key] != call.data[key]:
payload_ok = False
# Read out variable if payload ok
if payload_ok:
if self._variable not in call.data:
return
value = call.data[self._variable]
self._state = value == self._on_value
self.schedule_update_ha_state()
class PilightTriggerSensor(BinarySensorDevice):
"""Representation of a binary sensor that can be updated using Pilight."""
def __init__(
self, hass, name, variable, payload, on_value, off_value, rst_dly_sec=30
):
"""Initialize the sensor."""
self._state = False
self._hass = hass
self._name = name
self._variable = variable
self._payload = payload
self._on_value = on_value
self._off_value = off_value
self._reset_delay_sec = rst_dly_sec
self._delay_after = None
self._hass = hass
hass.bus.listen(pilight.EVENT, self._handle_code)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._state
def _reset_state(self, call):
self._state = False
self._delay_after = None
self.schedule_update_ha_state()
def _handle_code(self, call):
"""Handle received code by the pilight-daemon.
If the code matches the defined payload
of this sensor the sensor state is changed accordingly.
"""
# Check if received code matches defined payload
# True if payload is contained in received code dict
payload_ok = True
for key in self._payload:
if key not in call.data:
payload_ok = False
continue
if self._payload[key] != call.data[key]:
payload_ok = False
# Read out variable if payload ok
if payload_ok:
if self._variable not in call.data:
return
value = call.data[self._variable]
self._state = value == self._on_value
if self._delay_after is None:
self._delay_after = dt_util.utcnow() + datetime.timedelta(
seconds=self._reset_delay_sec
)
track_point_in_time(self._hass, self._reset_state, self._delay_after)
self.schedule_update_ha_state()
|
insomnia-lab/calibre
|
setup/extensions.py
|
Python
|
gpl-3.0
| 24,319 | 0.005222 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import textwrap, os, shlex, subprocess, glob, shutil
from distutils import sysconfig
from multiprocessing import cpu_count
from PyQt4.pyqtconfig import QtGuiModuleMakefile
from setup import Command, islinux, isbsd, isosx, SRC, iswindows
from setup.build_environment import (chmlib_inc_dirs,
podofo_inc, podofo_lib, podofo_error, pyqt, OSX_SDK, NMAKE, QMAKE,
msvc, MT, win_inc, win_lib, win_ddk, magick_inc_dirs, magick_lib_dirs,
magick_libs, chmlib_lib_dirs, sqlite_inc_dirs, icu_inc_dirs,
icu_lib_dirs, win_ddk_lib_dirs, ft_libs, ft_lib_dirs, ft_inc_dirs,
zlib_libs, zlib_lib_dirs, zlib_inc_dirs, is64bit, qt_private_inc)
MT
isunix = islinux or isosx or isbsd
make = 'make' if isunix else NMAKE
class Extension(object):
def absolutize(self, paths):
return list(set([x if os.path.isabs(x) else os.path.join(SRC, x.replace('/',
os.sep)) for x in paths]))
def __init__(self, name, sources, **kwargs):
self.name = name
self.needs_cxx = bool([1 for x in sources if os.path.splitext(x)[1] in
('.cpp', '.c++', '.cxx')])
self.sources = self.absolutize(sources)
self.headers = self.absolutize(kwargs.get('headers', []))
self.sip_files = self.absolutize(kwargs.get('sip_files', []))
self.inc_dirs = self.absolutize(kwargs.get('inc_dirs', []))
self.lib_dirs = self.absolutize(kwargs.get('lib_dirs', []))
self.extra_objs = self.absolutize(kwargs.get('extra_objs', []))
self.error = kwargs.get('error', None)
self.libraries = kwargs.get('libraries', [])
self.cflags = kwargs.get('cflags', [])
self.ldflags = kwargs.get('ldflags', [])
self.optional = kwargs.get('optional', False)
self.needs_ddk = kwargs.get('needs_ddk', False)
of = kwargs.get('optimize_level', None)
if of is None:
of = '/Ox' if iswindows else '-O3'
else:
flag = '/O%d' if iswindows else '-O%d'
of = flag % of
self.cflags.insert(0, of)
def preflight(self, obj_dir, compiler, linker, builder, cflags, ldflags):
pass
reflow_sources = glob.glob(os.path.join(SRC, 'calibre', 'ebooks', 'pdf', '*.cpp'))
reflow_headers = glob.glob(os.path.join(SRC, 'calibre', 'ebooks', 'pdf', '*.h'))
icu_libs = ['icudata', 'icui18n', 'icuuc', 'icuio']
icu_cflags = []
if iswindows:
icu_libs = ['icudt', 'icuin', 'icuuc', 'icuio']
if isosx:
icu_libs = ['icucore']
icu_cflags = ['-DU_DISABLE_RENAMING'] # Needed to use system libicucore.dylib
extensions = [
Extension('hunspell',
['hunspell/'+x for x in
'affentry.cxx affixmgr.cxx csutil.cxx dictmgr.cxx filemgr.cxx hashmgr.cxx hunspell.cxx phonet.cxx replist.cxx suggestmgr.cxx'.split()
] + ['calibre/utils/spell/hunspell_wrapper.cpp',],
inc_dirs=['hunspell'],
cflags='/DHUNSPELL_STATIC /D_CRT_SECURE_NO_WARNINGS /DUNICODE /D_UNICODE'.split() if iswindows else ['-DHUNSPELL_STATIC'],
optimize_level=2,
),
Extension('_regex',
['regex/_regex.c', 'regex/_regex_unicode.c'],
headers=['regex/_regex.h'],
optimize_level=2,
),
Extension('speedup',
['calibre/utils/speedup.c'],
),
Extension('_patiencediff_c',
['calibre/gui2/tweak_book/diff/_patiencediff_c.c'],
),
Extension('icu',
['calibre/utils/icu.c'],
headers=['calibre/utils/icu_calibre_utils.h'],
libraries=icu_libs,
lib_dirs=icu_lib_dirs,
inc_dirs=icu_inc_dirs,
cflags=icu_cflags
),
Extension('sqlite_custom',
['calibre/library/sqlite_custom.c'],
inc_dirs=sqlite_inc_dirs
),
Extension('chmlib',
['calibre/utils/chm/swig_chm.c'],
libraries=['ChmLib' if iswindows else 'chm'],
inc_dirs=chmlib_inc_dirs,
lib_dirs=chmlib_lib_dirs,
cflags=["-DSWIG_COBJECT_TYPES"]),
Extension('chm_extra',
['calibre/utils/chm/extra.c'],
libraries=['ChmLib' if iswindows else 'chm'],
inc_dirs=chmlib_inc_dirs,
lib_dirs=chmlib_lib_dirs,
cflags=["-D__PYTHON__"]),
Extension('magick',
['calibre/utils/magick/magick.c'],
headers=['calibre/utils/magick/magick_constants.h'],
libraries=magick_libs,
lib_dirs=magick_lib_dirs,
inc_dirs=magick_inc_dirs,
cflags=['-DMAGICKCORE_QUANTUM_DEPTH=16', '-DMAGICKCORE_HDRI_ENABLE=0']
),
Extension('lzx',
['calibre/utils/lzx/lzxmodule.c',
'calibre/utils/lzx/compressor.c',
'calibre/utils/lzx/lzxd.c',
'calibre/utils/lzx/lzc.c',
'calibre/utils/lzx/lzxc.c'],
headers=['calibre/utils/lzx/msstdint.h',
'calibre/utils/lzx/lzc.h',
'calibre/utils/lzx/lzxmodule.h',
'calibre/utils/lzx/system.h',
'calibre/utils/lzx/lzxc.h',
'calibre/utils/lzx/lzxd.h',
'calibre/utils/lzx/mspack.h'],
inc_dirs=['calibre/utils/lzx']),
Extension('freetype',
['calibre/utils/fonts/freetype.cpp'],
inc_dirs=ft_inc_dirs,
libraries=ft_libs,
lib_dirs=ft_lib_dirs),
Extension('woff',
['calibre/utils/fonts/woff/main.c',
'calibre/utils/fonts/woff/woff.c'],
headers=[
'calibre/utils/fonts/woff/woff.h',
'calibre/utils/fonts/woff/woff-private.h'],
libraries=zlib_libs,
lib_dirs=zlib_lib_dirs,
inc_dirs=zlib_inc_dirs,
),
Ext
|
ension('msdes',
['calibre/utils/msdes/msdesmodule.c',
'calibre/utils/msdes/des.c'],
headers=['calibre/utils/msdes/spr.h',
'calibre/utils/msdes/d3des.h'],
inc_dirs=['calibre/utils/msdes']),
Extension('cPalmdoc',
['calibre/ebooks/compression/palmdoc.c']),
Extension('bzzdec',
[
|
'calibre/ebooks/djvu/bzzdecoder.c'],
inc_dirs=(['calibre/utils/chm'] if iswindows else []) # For stdint.h
),
Extension('matcher',
['calibre/utils/matcher.c'],
headers=['calibre/utils/icu_calibre_utils.h'],
libraries=icu_libs,
lib_dirs=icu_lib_dirs,
cflags=icu_cflags,
inc_dirs=icu_inc_dirs
),
Extension('podofo',
[
'calibre/utils/podofo/utils.cpp',
'calibre/utils/podofo/output.cpp',
'calibre/utils/podofo/doc.cpp',
'calibre/utils/podofo/outline.cpp',
'calibre/utils/podofo/podofo.cpp',
],
headers=[
'calibre/utils/podofo/global.h',
],
libraries=['podofo'],
lib_dirs=[podofo_lib],
inc_dirs=[podofo_inc, os.path.dirname(podofo_inc)],
error=podofo_error),
Extension('pictureflow',
['calibre/gui2/pictureflow/pictureflow.cpp'],
inc_dirs=['calibre/gui2/pictureflow'],
headers=['calibre/gui2/pictureflow/pictureflow.h'],
sip_files=['calibre/gui2/pictureflow/pictureflow.sip']
),
Extension('progress_indicator',
['calibre/gui2/progress_indicator/QProgressIndicator.cpp'],
inc_dirs=['calibre/gui2/progress_indicator'],
headers=['calibre/gui2/progress_indicator/QProgressIndicator.h'],
sip_files=['calibre/gui2/progress_indicator/QProgressIndicator.sip']
),
Extension('qt_hack',
['calibre/ebooks/pdf/render/qt_
|
guorendong/iridium-browser-ubuntu
|
tools/metrics/rappor/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 1,156 | 0.007785 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for rappor.xml.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks that rappor.xml is pretty-printed and well-formatted."""
for f in input_api.AffectedTextFiles():
p = f.AbsoluteLocalPat
|
h()
if (input_api.basename(p) == 'rappor.xml'
and input_api.os_path.dirname(p) == input_api.PresubmitLocalPath()):
cwd = input_api.os_path.dirname(p)
exit_code = input_api.subprocess.call(
['python', 'pretty_print.py', '--presubmit'], cwd=cwd)
if exit_code != 0:
return [output_api.PresubmitError(
'rappor.xml is not formatted correctly; run pretty_print.py '
'to fix')]
return []
def CheckCh
|
angeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.