repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
our-city-app/oca-backend
|
src/shop/cron.py
|
Python
|
apache-2.0
| 827 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WA
|
RRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import webapp2
from shop.jobs import clean_unverified_signups
class CleanupUnverifi
|
edSignupRequests(webapp2.RequestHandler):
def get(self):
clean_unverified_signups.job()
|
device42/servicenow_to_device42_sync
|
starter.py
|
Python
|
mit
| 2,360 | 0.013136 |
#!/usr/bin/python
""" README
This script is deprecated. Please use https://github.com/device42/servicenow_device42_mapping
This script reads CIs from Servicenow and uploads them to Device42.
It has 2 modes:
1. Full migration - when the TIMEFRAME is set to 0
2. Synchronization - when the TIMEFRAME is set to anything else but 0
GOTCHAS
* In order for hardwares to be migrated, hardware must have unique name.
* When there are multiple devices with the same name, device name is constructed as: "device_name" + "_" + "servicenow_sys_id"
i.e. " MacBook Air 13" " will become " MacBook Air 13"_01a9280d3790200044e0bfc8bcbe5d79 "
*
"""
import sys
from srvnow2d42 import ServiceNow
__version__ = "2.0.2"
__status__ = "Production"
# ===== Device42 ===== #
D42_USER = 'admin'
D42_PWD = 'adm!nd42'
D42_URL = 'https://192.168.3.30'
# ===== ServiceNow ===== #
USERNAME = 'admin'
PASSWORD = 'admin123'
BASE_URL = 'https://dev13852.service-now.com/api/now/table/'
LIMIT = 1000000 # number of CIs to retrieve from ServiceNow
HEADERS = {"Content-Type":"application/json",
|
"Accept":"application/json"}
TABLES = ['cmdb_ci_server' , 'cmdb_ci_computer', 'cmdb_ci_app_server', 'cmdb_ci_database', 'cmdb_ci_email_server',
'cmdb_ci_ftp_server', 'cmdb_ci_directory_server', 'cmdb_ci_ip_server']
# ===== Other ===== #
DEBUG = True # print to STDOUT
DRY_RUN = False # Upload to Device42 or not
ZONE_AS_ROOM = True # for the explanation take a look at get_zones() docstring
TI
|
MEFRAME = 0 # Value represents hours. If set to 0, script does full migration, if set to any other value,
# script syncs changes back from till now(). now() refers to current localtime.
if __name__ == '__main__':
snow = ServiceNow(D42_URL, D42_USER, D42_PWD, USERNAME, PASSWORD, BASE_URL, LIMIT,
HEADERS, DEBUG, DRY_RUN, ZONE_AS_ROOM, TIMEFRAME)
snow.create_db()
snow.get_relationships()
snow.get_manufacturers()
snow.get_hardware()
snow.get_locations()
snow.get_buildings()
snow.get_rooms()
if ZONE_AS_ROOM:
snow.get_zones()
snow.get_racks()
for table in TABLES:
snow.get_computers(table)
snow.get_adapters()
snow.get_ips()
snow.upload_adapters()
sys.exit()
|
JeffPaine/subaru_search
|
subaru/spiders/subaru_spider.py
|
Python
|
mit
| 3,059 | 0.002942 |
import urlparse
from scrapy.spider import BaseSpider
from scrapy.selector import Selector
from scrapy.http import Request
from ..items import Car
# Add the model(s) below you want to search for
# e.g. MODELS = ['Outback']
MODELS = []
# Enter the domain name(s) here you have permission to retrieve data from
# e.g. DOMAINS = ['http://www.example.com', 'http://www.example.com']
DOMAINS = []
class SubaruSpider(BaseSpider):
name = 'subaru'
def start_requests(self):
for domain in DOMAINS:
for model in MODELS:
url = urlparse.urljoin(domain, 'used-inventory/index.htm?listingConfigId=auto-used&make=Subaru&model=%s' % model)
yield Request(url)
def parse(self, response):
sel = Selector(response)
# Extract any cars found
cars = sel.xpath('//*[contains(@class, "inv-type-used")]')
for c in cars:
car = Car()
# Title and year
car['title'] = c.xpath('.//div/div/h1/a/text()').extract()[0].strip()
car['year'] = car['title'][0:4]
# Price, but remove non-number characters.
# Examples: '$12,000', 'Please Call', etc.
price = c.xpath('.//*[contains(@class, "value")]/text()').extract()[0]
car['price'] = ''.join(d for d in price if d.isdigit())
# url
path = c.xpath('.//div/div/h1/a/@href').extract()[0]
url = urlparse.urlparse(response.url)
car['url'] = urlparse.urlunsplit([url.scheme, url.netloc, path, None, None])
# Certain specs are frequently missing, so we need to handle
# them with try / except
specs = [
{
'name': 'vin',
'xpath': './/*/dt[text()="VIN:"]/following-sibling::dd/text()'
},
{
'name': 'color',
'xpath': './/*/dt[text()="Exterior Color:"]/following-sibling::dd/text()'
},
{
'name': 'miles',
'xpath': './/*/dt[text()="Mileage:"]/following-sibling::dd/text()'
},
{
'name': 'transmission',
'xpath': './/*/dt[text()="Transmission:"]/following-sibling::dd/text()'
}
]
for s in specs:
try:
car[s['name']] = c.xpath(s['xpath']).extract()[0]
except IndexError:
car[s['name']] = None
yiel
|
d car
# If there's a next page
|
link, parse it for cars as well
next_links = sel.xpath('//*[@rel="next"]/@href').extract()
if len(next_links) > 0:
query = next_links[0]
url = urlparse.urlparse(response.url)
base = urlparse.urlunsplit([url.scheme, url.netloc, url.path, None, None])
next_url = urlparse.urljoin(base, query)
# Construct url
yield Request(next_url, callback=self.parse)
|
pkdevbox/trac
|
trac/dist.py
|
Python
|
bsd-3-clause
| 22,240 | 0.000899 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Extra commands for setup.py.
In addition to providing a few extra command classes in `l10n_cmdclass`,
we also modify the standard `distutils.command.build` and
`setuptools.command.install_lib` classes so that the relevant l10n commands
for compiling catalogs are issued upon install.
"""
from StringIO import StringIO
from itertools import izip
import os
import re
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from distutils import log
from distutils.cmd import Command
from distutils.command.build import build as _build
from distutils.errors import DistutilsOptionError
from setuptools.command.install_lib import install_lib as _install_lib
try:
from babel.messages.catalog import TranslationError
from babel.messages.extract import extract_javascript
from babel.messages.frontend import extract_messages, init_catalog, \
compile_catalog, update_catalog
from babel.messages.pofile import read_po
from babel.support import Translations
from babel.util import parse_encoding
_GENSHI_MARKUP_SEARCH = re.compile(r'\[[0-9]+:').search
_DEFAULT_KWARGS_MAPS = {
'Opti
|
on': {'doc': 4},
'BoolOption': {'doc': 4},
'IntOption': {'doc': 4},
'FloatOption': {'doc': 4},
'ListOption': {'doc': 6},
'ChoiceOption': {'doc': 4},
'PathOption': {'doc': 4},
'ExtensionOption': {'doc': 5},
'OrderedExtensionsOption': {'
|
doc': 6},
}
_DEFAULT_CLEANDOC_KEYWORDS = (
'ConfigSection', 'Option', 'BoolOption', 'IntOption', 'FloatOption',
'ListOption', 'ChoiceOption', 'PathOption', 'ExtensionOption',
'OrderedExtensionsOption', 'cleandoc_',
)
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code, This is patched
extract_python from Babel to support keyword argument mapping.
`kwargs_maps` option: names of keyword arguments will be mapping to
index of messages array.
`cleandoc_keywords` option: a list of keywords to clean up the
extracted messages with `cleandoc`.
"""
from trac.util.text import cleandoc
funcname = lineno = message_lineno = None
kwargs_maps = func_kwargs_map = None
call_stack = -1
buf = []
messages = []
messages_kwargs = {}
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = str(parse_encoding(fileobj) or
options.get('encoding', 'iso-8859-1'))
kwargs_maps = _DEFAULT_KWARGS_MAPS.copy()
if 'kwargs_maps' in options:
kwargs_maps.update(options['kwargs_maps'])
cleandoc_keywords = set(_DEFAULT_CLEANDOC_KEYWORDS)
if 'cleandoc_keywords' in options:
cleandoc_keywords.update(options['cleandoc_keywords'])
tokens = generate_tokens(fileobj.readline)
tok = value = None
for _ in tokens:
prev_tok, prev_value = tok, value
tok, value, (lineno, _), _, _ = _
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
kwarg_name = None
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue
# appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
for name, message in messages_kwargs.iteritems():
if name not in func_kwargs_map:
continue
index = func_kwargs_map[name]
while index >= len(messages):
messages.append(None)
messages[index - 1] = message
if funcname in cleandoc_keywords:
messages = [m and cleandoc(m) for m in messages]
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
kwarg_name = func_kwargs_map = None
call_stack = -1
messages = []
messages_kwargs = {}
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == '=' and prev_tok == NAME:
kwarg_name = prev_value
elif tok == OP and value == ',':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
|
ColdMatter/EDMSuite
|
EDMScripts/OldScripts/MonitorRFDischargesScanSynthAmp.py
|
Python
|
mit
| 1,182 | 0.032995 |
# This loop monitors the rf Discharges for a particular amplitude, then repeats for other amplitudes
# n
from DAQ.Environment import *
def scanRF(LowestAmp, HighestAmp, step, numScans):
# setup
AmpList = []
fileSystem = Environs.FileSystem
file = \
fileSystem.GetDataDirectory(\
fileSystem.Paths["scanMasterDataPath"])\
+ fileSystem.Gen
|
erateNextDataFileName()
print("Saving as " + file + "_" + "MeasuredRF1Amp" + "*.zip")
print("")
# start looping
r = range(int(10*LowestAmp), int(10*Highes
|
tAmp), int(10*step))
for i in range(len(r)):
print "hc:rf1 Amplitude -> " + str(float(r[i])/10)
hc.SetGreenSynthAmp(float(r[i])/10)
# hc.GreenSynthOnAmplitude = double(r[i]/10)
hc.EnableGreenSynth( False )
hc.EnableGreenSynth( True )
hc.UpdateRFPowerMonitor()
rfAmpMeasured = hc.RF1PowerCentre
hc.StepTarget(2)
System.Threading.Thread.Sleep(500)
sm.AcquireAndWait(numScans)
scanPath = file + "_" + str(i) + "_" + str(rfAmpMeasured) + ".zip"
sm.SaveData(scanPath)
AmpList.append(str(rfAmpMeasured))
print "List of Measured Amplitudes =" + str(AmpList).strip('[]')
def run_script():
print "Use scanRF(LowestAmp, HighestAmp, step, numScans)"
|
jnnk/pyethereum
|
pyethereum/blocks.py
|
Python
|
mit
| 32,057 | 0.000655 |
import time
import rlp
import trie
import db
import utils
import processblock
import transactions
import logging
import copy
import sys
from repoze.lru import lru_cache
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
INITIAL_DIFFICULTY = 2 ** 17
GENESIS_PREVHASH = '\00' * 32
GENESIS_COINBASE = "0" * 40
GENESIS_NONCE = utils.sha3(chr(42))
GENESIS_GAS_LIMIT = 10 ** 6
MIN_GAS_LIMIT = 125000
GASLIMIT_EMA_FACTOR = 1024
BLOCK_REWARD = 1500 * utils.denoms.finney
UNCLE_REWARD = 15 * BLOCK_REWARD / 16
NEPHEW_REWARD = BLOCK_REWARD / 32
BLOCK_DIFF_FACTOR = 1024
GENESIS_MIN_GAS_PRICE = 0
BLKLIM_FACTOR_NOM = 6
BLKLIM_FACTOR_DEN = 5
DIFF_ADJUSTMENT_CUTOFF = 5
RECORDING = 1
NONE = 0
VERIFYING = -1
GENESIS_INITIAL_ALLOC = \
{"51ba59315b3a95761d0863b05ccc7a7f54703d99": 2 ** 200, # (G)
"e6716f9544a56c530d868e4bfbacb172315bdead": 2 ** 200, # (J)
"b9c015918bdaba24b4ff057a92a3873d6eb201be": 2 ** 200, # (V)
"1a26338f0d905e295fccb71fa9ea849ffa12aaf4": 2 ** 200, # (A)
"2ef47100e0787b915105fd5e3f4ff6752079d5cb": 2 ** 200, # (M)
"cd2a3d9f938e13cd947ec05abc7fe734df8dd826": 2 ** 200, # (R)
"6c386a4b26f73c802f34673f7248bb118f97424a": 2 ** 200, # (HH)
"e4157b34ea9615cfbde6b4fda419828124b70c78": 2 ** 200, # (CH)
}
block_structure = [
["prevhash", "bin", "\00" * 32],
["uncles_hash", "bin", utils.sha3(rlp.encode([]))],
["coinbase", "addr", GENESIS_COINBASE],
["state_root", "trie_root", trie.BLANK_ROOT],
["tx_list_root", "trie_root", trie.BLANK_ROOT],
["difficulty", "int", INITIAL_DIFFICULTY],
["number", "int", 0],
["min_gas_price", "int", GENESIS_MIN_GAS_PRICE],
["gas_limit", "int", GENESIS_GAS_LIMIT],
["gas_used", "int", 0],
["timestamp", "int", 0],
["extra_data", "bin", ""],
["nonce", "bin", ""],
]
block_structure_rev = {}
for i, (name, typ, default) in enumerate(block_structure):
block_structure_rev[name] = [i, typ, default]
acct_structure = [
["nonce", "int", 0],
["balance", "int", 0],
["storage", "trie_root", trie.BLANK_ROOT],
["code", "hash", ""],
]
acct_structure_rev = {}
for i, (name, typ, default) in enumerate(acct_structure):
acct_structure_rev[name] = [i, typ, default]
def calc_difficulty(parent, timestamp):
offset = parent.difficulty / BLOCK_DIFF_FACTOR
sign = 1 if timestamp - parent.timestamp < DIFF_ADJUSTMENT_CUTOFF else -1
return parent.difficulty + offset * sign
def calc_gaslimit(parent):
prior_contribution = parent.gas_limit * (GASLIMIT_EMA_FACTOR - 1)
new_contribution = parent.gas_used * BLKLIM_FACTOR_NOM / BLKLIM_FACTOR_DEN
gl = (prior_contribution + new_contribution) / GASLIMIT_EMA_FACTOR
return max(gl, MIN_GAS_LIMIT)
class UnknownParentException(Exception):
pass
class TransientBlock(object):
"""
Read only, non persisted, not validated representation of a block
"""
def __init__(self, rlpdata):
self.rlpdata = rlpdata
self.header_args, transaction_list, uncles = rlp.decode(rlpdata)
self.hash = utils.sha3(rlp.encode(self.header_args))
self.transaction_list = transaction_list # rlp encoded transactions
self.uncles = uncles
for i, (name, typ, default) in enumerate(block_structure):
setattr(self, name, utils.decoders[typ](self.header_args[i]))
def __repr__(self):
return '<TransientBlock(#%d %s %s)>' %\
(self.number, self.hash.encode('hex')[
:4], self.prevhash.encode('hex')[:4])
def check_header_pow(header):
assert len(header[-1]) == 32
rlp_Hn = rlp.encode(header[:-1])
nonce = header[-1]
diff = utils.decoders['int'](header[block_structure_rev['difficulty'][0]])
h = utils.sha3(utils.sha3(rlp_Hn) + nonce)
return utils.big_endian_to_int(h) < 2 ** 256 / diff
class Block(object):
def __init__(self,
prevhash='\00' * 32,
uncles_hash=block_structure_rev['uncles_hash'][2],
coinbase=block_structure_rev['coinbase'][2],
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
difficulty=block_structure_rev['difficulty'][2],
number=0,
min_gas_price=block_structure_rev['min_gas_price'][2],
gas_limit=block_structure_rev['gas_limit'][2],
ga
|
s_used=0, timestamp=0, extra_data='', nonce='',
trans
|
action_list=[],
uncles=[],
header=None):
self.prevhash = prevhash
self.uncles_hash = uncles_hash
self.coinbase = coinbase
self.difficulty = difficulty
self.number = number
self.min_gas_price = min_gas_price
self.gas_limit = gas_limit
self.gas_used = gas_used
self.timestamp = timestamp
self.extra_data = extra_data
self.nonce = nonce
self.uncles = uncles
self.suicides = []
self.postqueue = []
self.caches = {
'balance': {},
'nonce': {},
'code': {},
'all': {}
}
self.journal = []
self.transactions = trie.Trie(utils.get_db_path(), tx_list_root)
self.transaction_count = 0
self.state = trie.Trie(utils.get_db_path(), state_root)
self.proof_mode = None
self.proof_nodes = []
# If transaction_list is None, then it's a block header imported for
# SPV purposes
if transaction_list is not None:
# support init with transactions only if state is known
assert self.state.root_hash_valid()
for tx_lst_serialized, state_root, gas_used_encoded \
in transaction_list:
self._add_transaction_to_list(
tx_lst_serialized, state_root, gas_used_encoded)
if tx_list_root != self.transactions.root_hash:
raise Exception("Transaction list root hash does not match!")
if not self.is_genesis() and self.nonce and\
not check_header_pow(header or self.list_header()):
raise Exception("PoW check failed")
# make sure we are all on the same db
assert self.state.db.db == self.transactions.db.db
# use de/encoders to check type and validity
for name, typ, d in block_structure:
v = getattr(self, name)
assert utils.decoders[typ](utils.encoders[typ](v)) == v
# Basic consistency verifications
if not self.state.root_hash_valid():
raise Exception(
"State Merkle root not found in database! %r" % self)
if not self.transactions.root_hash_valid():
raise Exception(
"Transactions root not found in database! %r" % self)
if len(self.extra_data) > 1024:
raise Exception("Extra data cannot exceed 1024 bytes")
if self.coinbase == '':
raise Exception("Coinbase cannot be empty address")
def validate_uncles(self):
if utils.sha3(rlp.encode(self.uncles)) != self.uncles_hash:
return False
# Check uncle validity
ancestor_chain = [self]
# Uncle can have a block from 2-7 blocks ago as its parent
for i in [1, 2, 3, 4, 5, 6, 7]:
if ancestor_chain[-1].number > 0:
ancestor_chain.append(ancestor_chain[-1].get_parent())
ineligible = []
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
for ancestor in ancestor_chain[1:]:
ineligible.extend(ancestor.uncles)
ineligible.extend([b.list_header() for b in ancestor_chain])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in self.uncles:
if not check_header_pow(uncle):
sys.stderr.write('1\n\n')
return False
# uncle's parent cannot be the block's own parent
prevhash = uncle[block_structure_rev['prevhash'][0]]
if prevhash not in eligible_ancestor_hashes:
|
nwiizo/workspace_2017
|
pipng/imagescale-c.py
|
Python
|
mit
| 4,916 | 0.002035 |
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "todo copied scaled name")
def main():
size, smooth, source, target, concurrency = handle_commandline()
Qtrac.report("starting...")
canceled = False
try:
scale(size, smooth, source, target, concurrency)
except KeyboardInterrupt:
Qtrac.report("canceling...")
canceled = True
summarize(concurrency, canceled)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count(),
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
parser.add_argument("-s", "--size", default=400, type=int,
help="make a scaled image that fits the given dimension "
"[default: %(default)d]")
parser.add_argument("-S", "--smooth", action="store_true",
help="use smooth scaling (slow but good for text)")
parser.add_argument("source",
help="the directory containing the original .xpm images")
parser.add_argument("target",
help="the directory for the scaled .xpm images")
args = parser.parse_args()
source = os.path.abspath(args.source)
target = os.path.abspath(args.target)
if source == target:
args.error("source and target must be different")
if not os.path.exists(args.target):
os.makedirs(target)
return args.size, args.smooth, source, target, args.concurrency
def scale(size, smooth, source, target, concurrency):
pipeline = create_pipeline(size, smooth, concurrency)
for i, (sourceImage, targetImage) in enumerate(
get_jobs(source, target)):
pipeline.send((sourceImage, targetImage, i % concurrency))
def create_pipeline(size, smooth, concurrency):
pipeline = None
sink = results()
for who in range(concurrency):
pipeline = scaler(pipeline, sink, size, smooth, who)
return pipeline
def get_jobs(source, target):
for name in os.listdir(source):
yield os.path.join(source, name), os.path.join(target, name)
@Qtrac.coroutine
def scaler(receiver, sink, size, smooth, me):
while True:
sourceImage, targetImage, who = (yield)
if who == me:
try:
result = scale_one(size, smooth, sourceImage, targetImage)
sink.send(result)
except Image.Error as err:
Qtrac.report(str(err), True)
elif receiver is not None:
receiver.send((sourceImage, targetImage, who))
@Qtrac.coroutine
def results():
while True:
result = (yield)
results.todo += result.todo
results.copied += result.copied
results.scaled += result.scaled
Qtrac.report("{} {}".format("copied" if result.copied else "scaled",
os.path.basename(result.name)))
results.todo = results.copied = results.scaled = 0
def scale_one(size, smooth, sourceImage, targetImage):
oldImage = Image.from_file(sourceImage)
if oldImage.width <= size and oldImage.height <= size:
oldImage.save(targetImage)
return Result(1, 1, 0, targetImage)
else:
if smooth:
scale = min(size / oldImage.width, size / oldImage.height)
newImage = oldImage.scale(scale)
else:
stride = int(math.ceil(max(oldImage.width / size,
oldImage.height / size)))
newImage = oldImage.subsample(stride)
newImage.save(targetImage)
return Result(1, 0, 1, targetImage)
def summarize(concurrency, canceled):
message = "copied {} scaled {} ".format(results
|
.copied, re
|
sults.scaled)
difference = results.todo - (results.copied + results.scaled)
if difference:
message += "skipped {} ".format(difference)
message += "using {} coroutines".format(concurrency)
if canceled:
message += " [canceled]"
Qtrac.report(message)
print()
if __name__ == "__main__":
main()
|
aptivate/django-spreadsheetresponsemixin
|
setup.py
|
Python
|
gpl-3.0
| 1,365 | 0.001465 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
LICENSE = open(os.path.join(os.path.dirname(__file__), 'LICENSE.txt')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-spreadsheetresponsemixin',
version='0.1.10',
packages=['spreadsheetresponsemixin'],
include_package_data=True,
license=LICENSE,
description='A mixin for views with a queryset that provides a CSV/Excel export.',
long_description=README,
url='https://github.com/birdsarah/django-spreadsheetresponsemixin',
author='Sarah Bird',
author_email='sarah@bonvaya.com',
install_requires=['django>=1.5', 'openpyxl>=2.0.3'],
classifiers=[
'Devel
|
opment Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python ::
|
2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'],
)
|
HBEE/odoo-addons
|
sale_restrict_partners/__openerp__.py
|
Python
|
agpl-3.0
| 1,762 | 0.001135 |
# -*- coding: ut
|
f-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This progra
|
m is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Restrict Partners',
'version': '8.0.1.0.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Sales, Product, Category, Clasification',
'description': """
Sale Restrict Partners
======================
Users with group "Sale - Own Leads" can only see partners that are assigned to him or partners assigned to no one.
It also add actual user as default salesman for new partners
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'sale',
],
'data': [
'security/security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Eveler/libs
|
__Python__/ufms_blanks/appy3/gen/mixins/ToolMixin.py
|
Python
|
gpl-3.0
| 65,134 | 0.00281 |
# ------------------------------------------------------------------------------
import os, os.path, sys, re, time, random, types, base64
from appy import Object
import appy.gen
from appy.gen import Search, UiSearch, String, Page
from appy.gen.layout import ColumnLayout
from appy.gen import utils as gutils
from appy.gen.mixins import BaseMixin
from appy.gen.wrappers import AbstractWrapper
from appy.gen.descriptors import ClassDescriptor
from appy.gen.mail import sendMail
from appy.shared import mimeTypes
from appy.shared import utils as sutils
from appy.shared.data import languages
from appy.shared.ldap_connector import LdapConnector
import collections
try:
from AccessControl.ZopeSecurityPolicy import _noroles
except ImportError:
_noroles = []
# Global JS internationalized messages that will be computed in every page -----
jsMessages = ('no_elem_selected', 'action_confirm', 'save_confirm',
'warn_leave_form')
# ---------
|
---------------------------------------------------------------------
class ToolMixin(BaseMixin):
_appy_meta_type = 'Tool'
xhtmlEncoding = 'text/html;charset=UTF-8'
def getPortalType(self, metaTypeOrAppyClass):
'''Returns the name of the portal_type that is based on
p_metaTypeOrAppyType.'''
appName = self.getProductConfig().PROJECTNAME
res = metaTypeOrAppyClass
if not isin
|
stance(metaTypeOrAppyClass, str):
res = gutils.getClassName(metaTypeOrAppyClass, appName)
if res.find('_wrappers') != -1:
elems = res.split('_')
res = '%s%s' % (elems[1], elems[4])
if res in ('User', 'Group', 'Translation'): res = appName + res
return res
def home(self):
'''Returns the content of px ToolWrapper.pxHome.'''
tool = self.appy()
return tool.pxHome({'obj': None, 'tool': tool})
def query(self):
'''Returns the content of px ToolWrapper.pxQuery.'''
tool = self.appy()
return tool.pxQuery({'obj': None, 'tool': tool})
def search(self):
'''Returns the content of px ToolWrapper.pxSearch.'''
tool = self.appy()
return tool.pxSearch({'obj': None, 'tool': tool})
def getHomePage(self):
'''Return the home page when a user hits the app.'''
# If the app defines a method "getHomePage", call it.
tool = self.appy()
url = None
try:
url = tool.getHomePage()
except AttributeError:
pass
if not url:
# Bring Managers to the config, lead others to pxHome.
user = self.getUser()
if user.has_role('Manager'):
url = self.goto(self.absolute_url())
else:
url = self.goto('%s/home' % self.absolute_url())
return url
def getHomeObject(self):
'''The concept of "home object" is the object where the user must "be",
even if he is "nowhere". For example, if the user is on a search
screen, there is no contextual object. In this case, if we have a
home object for him, we will use it as contextual object, and its
portlet menu will nevertheless appear: the user will not have the
feeling of being lost.'''
# If the app defines a method "getHomeObject", call it.
try:
return self.appy().getHomeObject()
except AttributeError:
# For managers, the home object is the config. For others, there is
# no default home object.
if self.getUser().has_role('Manager'): return self.appy()
def getCatalog(self):
'''Returns the catalog object.'''
return self.getParentNode().catalog
def getApp(self):
'''Returns the root Zope object.'''
return self.getPhysicalRoot()
def getSiteUrl(self):
'''Returns the absolute URL of this site.'''
return self.getApp().absolute_url()
def getIncludeUrl(self, name, bg=False):
'''Gets the full URL of an external resource, like an image, a
Javascript or a CSS file, named p_name. If p_bg is True, p_name is
an image that is meant to be used in a "style" attribute for defining
the background image of some XHTML tag.'''
# If no extension is found in p_name, we suppose it is a png image.
if '.' not in name: name += '.png'
url = '%s/ui/%s' % (self.getPhysicalRoot().absolute_url(), name)
if not bg: return url
return 'background-image: url(%s)' % url
def doPod(self):
'''Performs an action linked to a pod field: generate, freeze,
unfreeze... a document from a pod field.'''
rq = self.REQUEST
# Get the object that is the target of this action.
obj = self.getObject(rq.get('objectUid'), appy=True)
return obj.getField(rq.get('fieldName')).onUiRequest(obj, rq)
def getAppName(self):
'''Returns the name of the application.'''
return self.getProductConfig().PROJECTNAME
def getPath(self, path):
'''Returns the folder or object whose absolute path p_path.'''
res = self.getPhysicalRoot()
if path == '/': return res
path = path[1:]
if '/' not in path: return res._getOb(path) # For performance
for elem in path.split('/'): res = res._getOb(elem)
return res
def showLanguageSelector(self):
'''We must show the language selector if the app config requires it and
it there is more than 2 supported languages. Moreover, on some pages,
switching the language is not allowed.'''
cfg = self.getProductConfig(True)
if not cfg.languageSelector: return
if len(cfg.languages) < 2: return
page = self.REQUEST.get('ACTUAL_URL').split('/')[-1]
return page not in ('edit', 'query', 'search', 'do')
def showForgotPassword(self):
'''We must show link "forgot password?" when the app requires it.'''
return self.getProductConfig(True).activateForgotPassword
def getLanguages(self):
'''Returns the supported languages. First one is the default.'''
return self.getProductConfig(True).languages
def getLanguageName(self, code):
'''Gets the language name (in this language) from a 2-chars language
p_code.'''
return languages.get(code)[2]
def changeLanguage(self):
'''Sets the language cookie with the new desired language code that is
in request["language"].'''
rq = self.REQUEST
rq.RESPONSE.setCookie('_ZopeLg', rq['language'], path='/')
return self.goto(rq['HTTP_REFERER'])
def flipLanguageDirection(self, align, dir):
'''According to language direction p_dir ('ltr' or 'rtl'), this method
turns p_align from 'left' to 'right' (or the inverse) when
required.'''
if dir == 'ltr': return align
if align == 'left': return 'right'
if align == 'right': return 'left'
return align
def getGlobalCssJs(self, dir):
'''Returns the list of CSS and JS files to include in the main template.
The method ensures that appy.css and appy.js come first. If p_dir
(=language *dir*rection) is "rtl" (=right-to-left), the stylesheet
for rtl languages is also included.'''
names = self.getPhysicalRoot().ui.objectIds('File')
# The single Appy Javascript file
names.remove('appy.js'); names.insert(0, 'appy.js')
# CSS changes for left-to-right languages
names.remove('appyrtl.css')
if dir == 'rtl': names.insert(0, 'appyrtl.css')
names.remove('appy.css'); names.insert(0, 'appy.css')
return names
def consumeMessages(self):
'''Returns the list of messages to show to a web page and clean it in
the session.'''
rq = self.REQUEST
res = rq.SESSION.get('messages', '')
if res:
del rq.SESSION['messages']
res = ' '.join([m[1] for m in res])
return res
def getRootClasses(self):
'''Returns the list of root classes for th
|
Daarknes/Gadakeco
|
src/util/directions.py
|
Python
|
gpl-3.0
| 328 | 0 |
from enum import
|
Enum
class Direction(Enum):
i
|
nvalid = (0.0, 0.0)
up = (0.0, -1.0)
down = (0.0, 1.0)
left = (-1.0, 0.0)
right = (1.0, 0.0)
def x(self):
return self.value[0]
def y(self):
return self.value[1]
def __str__(self):
return str(self.value)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/py-readme-renderer/package.py
|
Python
|
lgpl-2.1
| 1,901 | 0.001052 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
########################################
|
######################################
from spack import *
class PyReadmeRenderer(PythonPackage):
"""readme_renderer is a library for rendering "readme" descriptions
for Warehouse."""
homepage = "
|
https://github.com/pypa/readme_renderer"
url = "https://pypi.io/packages/source/r/readme_renderer/readme_renderer-16.0.tar.gz"
version('16.0', '70321cea986956bcf2deef9981569f39')
depends_on('python@2.6:2.8,3.2:3.3')
depends_on('py-setuptools', type='build')
depends_on('py-bleach', type=('build', 'run'))
depends_on('py-docutils@0.13.1:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
|
environmentalomics/iso-to-image
|
uploader/test/test_sanitise_machine_name.py
|
Python
|
mit
| 1,066 | 0.020657 |
#!/usr/bin/env python3
# encod
|
ing: UTF-8
import unittest
from vCloudOVFMunger import sanitise_machine_name
# Test my basic st
|
ring sanitiser. This needs no setup, files, or network stuff.
# python -m unittest test.test_sanitise_machine_name
class XMLTests(unittest.TestCase):
def setUp(self):
self.alltests = [
'same-string' ,'same-string',
'lOWER cASE' ,'lower-case',
'L0@d$@ jµnk' ,'l0-d-j-nk',
' trim my e\nds \n\n' ,'trim-my-e-ds'
]
def tearDown(self):
pass
#Before Python 3.4, each test needs a separate function, so I need
#to do this long-hand.
def test_santise_0(self):
self._t(0)
def test_santise_1(self):
self._t(1)
def test_santise_2(self):
self._t(2)
def test_santise_3(self):
self._t(3)
def _t(self, idx):
fromstr = self.alltests[idx * 2]
tostr = self.alltests[idx * 2 + 1]
self.assertEqual(sanitise_machine_name(fromstr), tostr)
if __name__ == '__main__':
unittest.main()
|
thegoonies/tools
|
sopel-modules/grep-logs.py
|
Python
|
mit
| 2,449 | 0.003675 |
import re, os, datetime
from sopel import module
from sopel.config.types import StaticSection, ValidatedAttribute, FilenameAttribute
DEFAULT_CHANLOGS_DIR = os.getenv("HOME") + "/chanlogs"
DEFAULT_LINE_PATTERN = re.compile(r"^([^\s]*) <([^>]*)> (.*)$")
class GrepLogsSection(StaticSection):
dir = FilenameAttribute('dir', directory=True, default=DEFAULT_CHANLOGS_DIR)
def configure(config):
config.define_section('greplogs', GrepLogsSection, validate=False)
config.greplogs.configure_setting('dir','Path to channel log storage directory')
return
def setup(bot):
bot.config.define_section('greplogs', GrepLogsSection)
return
def get_log_files_for_channel(dpath, name):
for fname in os.listdir(dpath):
if not fname.startswith(name):
continue
fpath = "{}/{}".format(dpath, fname)
if not os.access(fpath, os.R_OK):
continue
yield fpath
return
def parse_logline(bot, line):
# in log file, pattern always is
# date <nick> msg
date, nick, msg = [x.strip() for x in re.split(DEFAULT_LINE_PATTERN, line) if len(x.strip()) ]
date = date.replace("+00:00", "+0000")
date_obj = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S%z")
return (date_obj, nick, msg)
@module.commands("grep-logs")
@module.example(".grep-logs http(s)?://")
def grep_logs(bot, trigger):
pattern_str = trigger.group(2)
if not pattern_str:
bot.reply("Missing pattern")
return
pattern = re.compile(pattern_str, re.IGNORECASE)
dpath = bot.config.greplogs.dir
channel_name = trigger.sender
found = 0
for log_file in get_log_files_for_channel(dpath, channel_name):
with open(log_file, "r") as f:
for i, line in enumerate(f.readlines()):
try:
date, nick, msg = parse_logline(bot, line)
|
if pattern.search(msg):
bot.say("On {}, {} said: {}".format(date.strftime("%c"), nick, msg))
found += 1
except Exception as e:
continue
if found == 0:
bot.reply("No entries found matching '{}'".format(pattern_str))
else:
bot.reply("Found {} entr{} matching '{}'".format(found,
"ies" if found > 1 else "y",
pattern_str))
return
|
|
smistad/FAST
|
source/FAST/Examples/Python/stream_from_webcamera.py
|
Python
|
bsd-2-clause
| 428 | 0.004673 |
## @example stream_from_webcamera.py
# This example will stream images from your webcamera and run it through
|
a simple edge detection filter (LaplacianOfGaussian)
# and display it in real-time.
import fast
streamer = fast.CameraStreamer.create()
filter = fast.LaplacianOfGaussian.create().connect(streamer)
renderer
|
= fast.ImageRenderer.create().connect(filter)
window = fast.SimpleWindow2D.create().connect(renderer).run()
|
Atoku/siku
|
samples/beaufort.py
|
Python
|
gpl-2.0
| 14,677 | 0.025346 |
'''Siku scenario
Sea ice element simple free drift example
Creates a (large) polygon in polar region and sets some basic winds
in still water. No interaction with any boundaries, just a free
float of the polygon in the area and output in KML format of its
locations.
Be sure that siku module is in your PYTHONPATH.
Use python3 for checking. It is not compatible with python2.x
(c)2014 UAF, written by Anton Kulchitsky
GPLv3 or later license (same as siku)
'''
import subprocess
import os
import math
import sys
import datetime
import mathutils
import numpy
import siku
from siku import polygon
from siku import element
from siku import material
from siku import geocoords
from siku import regrid
from siku import gmt_Plotter
GMT_Plotter = gmt_Plotter.GMT_Plotter
from siku import poly_voronoi
PolyVor = poly_voronoi.PolyVor
from siku import h5load
hload = h5load.Loader
from siku import wnd
def main():
# ---------------------------------------------------------------------
# Define material
# ---------------------------------------------------------------------
ice = material.Material() # default ice values, 10 thicknesses
ice.name = 'ice' # prefer to use our own name instead
# of default
siku.materials.append( ice ) # list of all materials
# table of material names for convenience
matnames = {
'ice': 0,
}
# ---------------------------------------------------------------------
# Wind initializations (NMC grid example)
# ---------------------------------------------------------------------
siku.uw = wnd.NMCVar( 'u1994.nc', 'uwnd' )
siku.vw = wnd.NMCVar( 'v1994.nc', 'vwnd' )
start = datetime.datetime ( 1994, 2, 16, 00, 00, 00 )
for i in range(len( siku.uw.times )):
if siku.uw.times[i] >= start:
break
st_t_ind = i
siku.time.update_index = i - 1
print( 'start time: ' + str( start ) + ' at position: ' + str( i ) + \
' of ' + str( len( siku.uw.times ) ) + '\n\n' )
siku.wind = wnd.NMCSurfaceVField( siku.uw, siku.vw, st_t_ind )
siku.settings.wind_source_type = siku.WIND_SOURCES['NMC']
siku.settings.wind_source_names = [ 'u1994.nc', 'v1994.nc' ]
## w = wnd.NMCSurfaceVField( siku.uw, siku.vw, st_t_ind )
## w.make_test_field( 0.,0. )
## siku.wind = w
# ---------------------------------------------------------------------
# date/time settings
# ---------------------------------------------------------------------
#siku.time.start = datetime.datetime ( 2012, 3, 12, 00, 00, 00 )
#siku.time.finish = datetime.datetime ( 2012, 3, 13 )
#siku.time.finish = datetime.datetime ( 2012, 3, 12, 00, 00, 10 )
#siku.time.dt = datetime.timedelta ( seconds = 1 )
siku.time.dts = datetime.timedelta ( seconds = 600 )
#siku.time.last = siku.time.start
hour = datetime.timedelta ( minutes = 60 )
## time inits by NMC grid times
siku.time.start = siku.uw.times[st_t_ind]
siku.time.last = siku.uw.times[st_t_ind]
siku.time.last_update = siku.time.last
siku.time.finish = siku.uw.times[st_t_ind] + hour * 90
#siku.time.dt = datetime.timedelta ( milliseconds = 1 )
siku.time.dt = ( siku.time.finish - siku.time.start ) / 3600
# ---------------------------------------------------------------------
# elements
# ---------------------------------------------------------------------
coords = []
siku.elements = []
# ---------------------- voronoi initialization ------------------------
print('\nLoading polygons')
## North cap
PV = PolyVor( 'alaska.voronoi.xyz', 'alaska.voronoi.xyzf' )
## Channel (handmade)
## PC = PolyVor( 'alaska.voronoi.xyz', 'alaska.voronoi.xyzf' )
PV.filter_( 0, 360, 60, 90 )
## PC.filter_( 179, 187, 54, 60 )
##TESTING!
#### PV.filter_( 190, 230, 62, 82 )
## PC.filter_( 190, 230, 62, 82 )
##/TESTING
print('Deleting land polygons')
PV.clear_the_land()
coords = PV.coords
## coords = coords + PC.coords
siku.tempc = coords # for debug
### Initializing elements with polygon vertices
for c in coords:
siku.P.update( c )
# Element declaration
E = element.Element( polygon = siku.P, imat = matnames['ice'] )
E.monitor = "drift_monitor"
gh = [ 0.2, 0.2, 0.4, 0.2, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0 ]
E.set_gh( gh, ice )
# all elements in the list
siku.elements.append( E )
## Core will mark polygons, those contain at leas one point from next
## file as 'static'
siku.settings.border_mark = 1
siku.settings.borders = 'contours.ll'
print('Marking borders with GMT')
bor = PV.get_border_by_gmt()
for b in bor:
siku.elements[ b ].flag_state = element.Element.f_static
print('Done\n\n')
# ---------------------- loading from file ----------------------------
## print('file start atempt\n')
##
## hl = hload('save_test.h5')
#### #hl = hload('siku-2014-01-01-12:50:46.h5')
####
#### #hl.load()
## hl.load_fnames()
## hl.load_mats()
## hl.load_els()
## print('\n')
##
## siku.elements = hl.extract_els()
## siku.materials = hl.extract_mats()
##
## hl = None
# ---------------------------------------------------------------------
# Monitor function for the polygon
# ---------------------------------------------------------------------
## Plotter initialization
siku.plotter = GMT_Plotter( 'beaufort94_plot.py' )
### period of picturing
siku.diagnostics.monitor_period = 30
siku.drift_monitor = drift_monitor
siku.diagnostics.step_count = 0
siku.settings.contact_method = siku.CONTACT_METHODS['sweep']
siku.settings.force_model = \
siku.CONTACT_FORCE_MODEL['distributed_spring']
# name of file to load from
#siku.settings.loadfile = 'siku-2014-01-01-12:00:00.h5'
siku.settings.loadfile = 'save_test.h5'
## siku.settings.phys_consts = [ 5000 , 10000000 , 0.75, -0.00003, 1, \
## 1, 1, 1, 1, 1 ]
siku.settings.phys_consts = { 'rigidity' : 10.0,#10,
'vi
|
scosity' : 1.0,#1.0,#1
'rotatability' : 0.750,#0.75
'tan
|
gency' : -0.00003,#-0.00003
'elasticity' :-50000000.0,#-5000000.0,
'bendability' : 1.0,#1.0,
'solidity' : 0.05,#0.05,
'tensility' : 0.30,#0.615,
'anchority' : 0.0005,
'windage': 0.05, #0.05
'fastency' : 0.50, #0.5
'sigma' : 1.0, # -//- rigidity
'etha' : 1.0 # -//- viscosity
}
## siku.settings.contact_freq_met = siku.CONTACT_DET_FREQ_MET['speed']
## siku.settings.contact_value = 1000
# ---------------------------------------------------------------------
# Diagnostics function for the winds
# ------------------------------abs2( e.V )---------------------------------------
## # We create a grid and append it to monitor grids
## siku.diagnostics.wind_counter = 0
## rg = regrid.Regrid()
## mesh_01 = rg.globe_coverage( 5.0 )
## siku.diagnostics.meshes.append( mesh_01 )
## siku.diagnostics.wind.append(
## ( winds_diag, 0, siku.time.start, 2*siku.time.dt ) )
# ---------------------------------------------------------------------
# Settings
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Callback flag-mask generator
# -------------------------------------------------------------------
|
gunan/tensorflow
|
tensorflow/python/util/nest_test.py
|
Python
|
apache-2.0
| 54,367 | 0.003532 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections_abc.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class _CustomSequenceThatRaisesException(collections.Sequence):
def __len__(self):
return 1
def __getitem__(self, item):
raise ValueError("Cannot get item: %s" % item)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
unsafe_map_pattern = ("nest cannot guarantee that it is safe to map one to "
"the other.")
bad_pack_pattern = ("Attempted to pack value:\n .+\ninto a sequence, but "
"found incompatible type `<(type|class) 'str'>` instead.")
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("
|
f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(r
|
estructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(
ValueError, self.unsafe_map_pattern):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPackMappingViews(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
# test flattening
ordered_keys_flat = nest.flatten(ordered.keys())
ordered_values_flat = nest.flatten(ordered.values())
ordered_items_flat = nest.flatten(ordered.items())
self.assertEqual([3, 1, 0, 2], ordered_values_flat)
self.assertEqual(["d", "b", "a", "c"], ordered_keys_flat)
self.assertEqual(["d", 3, "b", 1, "a", 0, "c", 2], ordered_items_flat)
# test packing
self.assertEqual([("d", 3), ("b", 1), ("a", 0), ("c", 2)],
nest.pack_sequence_as(ordered.items(), ordered_items_flat))
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
|
citrix-openstack-build/python-keystoneclient
|
keystoneclient/tests/v3/test_endpoints.py
|
Python
|
apache-2.0
| 3,490 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import endpoints
class EndpointTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(EndpointTests, self).setUp()
self.key = 'endpoint'
self.collection_key = 'endpoints'
self.model = endpoints.Endpoint
self.manager = self.client.endpoints
def new_ref(self, **kwargs):
kwargs = super(EndpointTests, self).new_ref(**kwargs)
kwargs.setdefault('interface', 'public')
kwargs.setdefault('region', uuid.uuid4().hex)
kwargs.setdefault('service_id', uuid.uuid4().hex)
kwargs.setdefault('url', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
return kwargs
def test_create_public_interface(self):
ref = self.new_ref(interface='public')
self.test_create(ref)
def test_create_admin_interface(self):
ref = self.new_ref(interface='admin')
self.test_create(ref)
def test_create_internal_interface(self):
ref = self.new_ref(interface='internal')
self.test_create(ref)
def test_create_invalid_interface(self):
ref = self.new_ref(interface=uuid.uuid4().hex)
self.assertRaises(Exception, self.manager.create,
**utils.parameterize(ref))
def test_update_public_interface(self):
ref = self.new_ref(interface='public')
self.test_update(ref)
def test_update_admin_interface(self):
ref = self.new_ref(interface='admin')
self.test_update(ref)
def test_update_internal_interface(self):
ref = self.new_ref(interface='internal')
self.test_update(ref)
def test_update_invalid_interface(self):
ref = self.new_ref(interface=uuid.uuid4().hex)
self.assertRaises(Exception, self.manager.update,
**utils.parameterize(ref))
|
def test_list_public_interface(self):
interface = 'public'
expected_path = 'v3/%
|
s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_admin_interface(self):
interface = 'admin'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_internal_interface(self):
interface = 'admin'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_invalid_interface(self):
interface = uuid.uuid4().hex
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.assertRaises(Exception, self.manager.list,
expected_path=expected_path, interface=interface)
|
mpkato/mobileclick
|
tests/summary_test.py
|
Python
|
mit
| 3,680 | 0.003261 |
# -*- coding:u
|
tf-8 -*-
import unittest
import nose
from mobileclick import Iunit, Intent, Summary, SummaryError
class SummaryTestCase(unittest.TestCase):
def setUp(self):
self.qid = 'MC2-E-0001'
self.i1 = Intent(self.qid, '%s-INTENT000
|
1' % self.qid, 'Link')
self.i2 = Intent(self.qid, '%s-INTENT0002' % self.qid, 'Link')
self.u1 = Iunit(self.qid, '%s-0001' % self.qid, 'A')
self.u2 = Iunit(self.qid, '%s-0002' % self.qid, 'A')
self.u3 = Iunit(self.qid, '%s-0003' % self.qid, 'A')
self.u4 = Iunit(self.qid, '%s-0004' % self.qid, 'A')
self.first = [self.u1, self.i1, self.i2]
self.seconds = {
self.i1.iid: [self.u2],
self.i2.iid: [self.u3, self.u4]
}
self.summary = Summary(self.qid, self.first, self.seconds)
def test_summary_init(self):
'''
Summary.__init__ (validation)
'''
self.assertRaises(SummaryError, Summary, self.qid, self.first, {})
self.assertRaises(SummaryError, Summary, self.qid, [], self.seconds)
self.assertRaises(SummaryError, Summary, self.qid, [1], {})
self.assertRaises(SummaryError, Summary, self.qid,
[self.i1], {self.i1.iid: [self.i2]})
self.assertRaises(SummaryError, Summary, self.qid,
[self.i1, self.i1], {self.i1.iid: [self.u2]})
self.assertRaises(SummaryError, Summary, self.qid,
[Iunit('MC2-E-0002', '0001', 'A')])
def test_summary_property(self):
'''
Summary.first and Summary.second(iid)
'''
self.assertEqual(self.summary.qid, self.qid)
self.assertEqual(len(self.summary.first), 3)
self.assertIsInstance(self.summary.first, tuple)
self.assertEqual(self.summary.first[0].uid, 'MC2-E-0001-0001')
iid = 'MC2-E-0001-INTENT0001'
self.assertIsInstance(self.summary.second(iid), tuple)
self.assertEqual(self.summary.second(iid)[0].uid, 'MC2-E-0001-0002')
iid = 'MC2-E-0001-INTENT0002'
self.assertEqual(self.summary.second(iid)[0].uid, 'MC2-E-0001-0003')
def test_summary_add(self):
'''
Summary.add
'''
s = Summary(self.qid)
s.add(self.i1)
self.assertRaises(SummaryError, s.add, self.i1)
s.add(self.u1)
s.add(self.u2, self.i1.iid)
self.assertRaises(SummaryError, s.add, self.u3, self.i2.iid)
self.assertRaises(SummaryError, s.add, self.i2, self.i2.iid)
s.add(self.i2)
s.add(self.u3, self.i2.iid)
s.add(self.u4, self.i2.iid)
self.assertRaises(SummaryError, s.add, self.i2)
self.assertEqual(s.first[0].iid, self.i1.iid)
self.assertEqual(s.first[1].uid, self.u1.uid)
self.assertEqual(s.second(self.i1.iid)[0].uid, self.u2.uid)
self.assertEqual(s.first[2].iid, self.i2.iid)
self.assertEqual(s.second(self.i2.iid)[0].uid, self.u3.uid)
self.assertEqual(s.second(self.i2.iid)[1].uid, self.u4.uid)
def test_summary_to_xml(self):
'''
Summary.to_xml
'''
from xml.etree.ElementTree import tostring
xml = self.summary.to_xml()
xmlstr = tostring(xml, 'utf-8')
self.assertEqual(xmlstr,
'''<result qid="MC2-E-0001"><first><iunit uid="MC2-E-0001-0001" /><link iid="MC2-E-0001-INTENT0001" /><link iid="MC2-E-0001-INTENT0002" /></first><second iid="MC2-E-0001-INTENT0001"><iunit uid="MC2-E-0001-0002" /></second><second iid="MC2-E-0001-INTENT0002"><iunit uid="MC2-E-0001-0003" /><iunit uid="MC2-E-0001-0004" /></second></result>''')
if __name__ == '__main__':
nose.main(argv=['nose', '-v'])
|
arnaudsj/titanium_mobile
|
support/module/builder.py
|
Python
|
apache-2.0
| 7,110 | 0.03488 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# module builder script
#
import os, sys, shutil, tempfile, subprocess, platform
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
support_dir = os.path.join(template_dir, 'support')
sdk_dir = os.path.dirname(template_dir)
android_support_dir = os.path.join(sdk_dir, 'android')
sys.path.extend([sdk_dir, support_dir, android_support_dir])
from androidsdk import AndroidSDK
from manifest import Manifest
import traceback, uuid, time, thread, string, markdown
from os.path import join, splitext, split, exists
def run_pipe(args, cwd=None):
return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, cwd=cwd)
def print_emulator_line(line):
if line:
s = line.strip()
if s!='':
if s.startswith("["):
print s
else:
print "[DEBUG] %s" % s
sys.stdout.flush()
def run_python(args, cwd=None):
args.insert(0, sys.executable)
return run(args, cwd=cwd)
def run(args, cwd=None):
proc = run_pipe(args, cwd)
rc = None
while True:
print_emulator_line(proc.stdout.readline())
rc = proc.poll()
if rc!=None: break
return rc
def run_ant(project_dir):
build_xml = os.path.join(project_dir, 'build.xml')
ant = 'ant'
if 'ANT_HOME' in os.environ:
ant = os.path.join(os.environ['ANT_HOME'], 'bin', 'ant')
if platform.system() == 'Windows':
ant += '.bat'
ant_args = [ant, '-f', build_xml]
if platform.system() == 'Windows':
ant_args = ['cmd.exe', '/C'] + ant_args
else:
# wrap with /bin/sh in Unix, in some cases the script itself isn't executable
ant_args = ['/bin/sh'] + ant_args
run(ant_args, cwd=project_dir)
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn','CVS'];
android_sdk = None
def copy_resources(source, target):
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
from_ = os.path.join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
def is_ios(platform):
return platform == 'iphone' or platform == 'ipad' or platform == 'ios'
def is_android(platform):
return platform == 'android'
def stage(platform, project_dir, manifest, callback):
dont_delete = True
dir = tempfile.mkdtemp('ti','m')
print '[DEBUG] Staging module project at %s' % dir
try:
name = manifest.name
moduleid = manifest.moduleid
version = manifest.version
script = os.path.join(template_dir,'..','project.py')
# create a temporary proj
create_projec
|
t_args = [script, name, moduleid, dir, platform]
if is_android(platform):
create_project_args.append(android_sdk.get_android_sdk())
|
run_python(create_project_args)
gen_project_dir = os.path.join(dir, name)
gen_resources_dir = os.path.join(gen_project_dir, 'Resources')
# copy in our example source
copy_resources(os.path.join(project_dir,'example'), gen_resources_dir)
# patch in our tiapp.xml
tiapp = os.path.join(gen_project_dir, 'tiapp.xml')
xml = open(tiapp).read()
tiappf = open(tiapp,'w')
xml = xml.replace('<guid/>','<guid></guid>')
xml = xml.replace('</guid>','</guid>\n<modules>\n<module version="%s">%s</module>\n</modules>\n' % (version,moduleid))
# generate a guid since this is currently done by developer
guid = str(uuid.uuid4())
xml = xml.replace('<guid></guid>','<guid>%s</guid>' % guid)
tiappf.write(xml)
tiappf.close()
module_dir = os.path.join(gen_project_dir,'modules',platform)
if not os.path.exists(module_dir):
os.makedirs(module_dir)
module_zip_name = '%s-%s-%s.zip' % (moduleid.lower(), platform, version)
module_zip = os.path.join(project_dir, 'dist', module_zip_name)
if is_ios(platform):
module_zip = os.path.join(project_dir, module_zip_name)
script = os.path.join(project_dir,'build.py')
run_python([script])
elif is_android(platform):
run_ant(project_dir)
shutil.copy(module_zip, gen_project_dir)
callback(gen_project_dir)
except:
dont_delete = True
traceback.print_exc(file=sys.stderr)
sys.exit(1)
finally:
if not dont_delete: shutil.rmtree(dir)
def docgen(module_dir, dest_dir):
if not os.path.exists(dest_dir):
print "Creating dir: %s" % dest_dir
os.makedirs(dest_dir)
doc_dir = os.path.join(module_dir, 'documentation')
if not os.path.exists(doc_dir):
print "Couldn't find documentation file at: %s" % doc_dir
return
for file in os.listdir(doc_dir):
if file in ignoreFiles or os.path.isdir(os.path.join(doc_dir, file)):
continue
md = open(os.path.join(doc_dir, file), 'r').read()
html = markdown.markdown(md)
filename = string.replace(file, '.md', '.html')
filepath = os.path.join(dest_dir, filename)
print 'Generating %s' % filepath
open(filepath, 'w+').write(html)
# a simplified .properties file parser
def read_properties(file):
properties = {}
for line in file.read().splitlines():
line = line.strip()
if len(line) > 0 and line[0] == '#': continue
if len(line) == 0 or '=' not in line: continue
key, value = line.split('=', 1)
properties[key.strip()] = value.strip().replace('\\\\', '\\')
return properties
def main(args):
global android_sdk
# command platform project_dir
command = args[1]
platform = args[2]
project_dir = os.path.expanduser(args[3])
manifest = Manifest(os.path.join(project_dir, 'manifest'))
error = False
if is_android(platform):
build_properties = read_properties(open(os.path.join(project_dir, 'build.properties')))
android_sdk_path = os.path.dirname(os.path.dirname(build_properties['android.platform']))
android_sdk = AndroidSDK(android_sdk_path)
if command == 'run':
def run_callback(gen_project_dir):
script = os.path.abspath(os.path.join(template_dir,'..',platform,'builder.py'))
script_args = [script, 'run', gen_project_dir]
if is_android(platform):
script_args.append(android_sdk.get_android_sdk())
rc = run_python(script_args)
# run the project
if rc==1:
if is_ios(platform):
error = os.path.join(gen_project_dir,'build','iphone','build','build.log')
print "[ERROR] Build Failed. See: %s" % os.path.abspath(error)
else:
print "[ERROR] Build Failed."
stage(platform, project_dir, manifest, run_callback)
elif command == 'run-emulator':
if is_android(platform):
def run_emulator_callback(gen_project_dir):
script = os.path.abspath(os.path.join(template_dir, '..', platform, 'builder.py'))
run_python([script, 'run-emulator', gen_project_dir, android_sdk.get_android_sdk()])
stage(platform, project_dir, manifest, run_emulator_callback)
elif command == 'docgen':
if is_android(platform):
dest_dir = args[4]
docgen(project_dir, dest_dir)
if error:
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
|
marvinlenk/subsystem_entropy_epsplots
|
pyplot_eps/ent_eps.py
|
Python
|
bsd-2-clause
| 6,836 | 0.00395 |
import numpy as np
import os
from mpEntropy import mpSystem
import matplotlib as mpl
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
from scipy.signal import savgol_filter
from scipy.optimize import curve_fit
# This is a workaround until scipy fixes the issue
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# Load sysVar
sysVar = mpSystem("../interact_0.ini", plotOnly=True)
# Create plot folder
pltfolder = "./epsplots/"
if not os.path.exists(pltfolder):
os.mkdir(pltfolder)
print("Plotting", end='')
mpl.use('Agg')
# minimum and maximum times to plot
min_time = 0
max_time = 3
inlay_min_time = 10
inlay_max_time = 100
inlay_log_min_time = 0
inlay_log_max_time = 3
# styles and stuff
avgstyle = 'dashed'
avgsize = 0.6
expectstyle = 'solid'
expectsize = 1
legend_size = 10
font_size = 10
# https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inches
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
# padding in units of fontsize
padding = 0.32
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'lines.linewidth': 1,
'figure.figsize': fig_size,
'legend.frameon': False,
'legend.loc': 'best',
'mathtext.default': 'rm' # see http://matplotlib.org/users/customizing.html
}
plt.rcParams['agg.path.chunksize'] = 0
plt.rcParams.update(params)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Arial']})
loavgpercent = sysVar.plotLoAvgPerc # percentage of time evolution to start averaging
loavgind = int(loavgpercent * sysVar.dataPoints) # index to start at when calculating average and stddev
loavgtime = np.round(loavgpercent * (sysVar.deltaT * sysVar.steps * sysVar.plotTimeScale), 2)
# stuff for averaging
if sysVar.boolPlotAverages:
print(' with averaging from Jt=%.2f' % loavgtime, end='')
fwidth = sysVar.plotSavgolFrame
ford = sysVar.plotSavgolOrder
ent_array = np.loadtxt('../data/entropy.txt')
# multiply step array with time scale
step_array = ent_array[:, 0] * sysVar.plotTimeScale
min_index = int(min_time / step_array[-1] * len(step_array))
max_index = int(max_time / step_array[-1] * len(step_array))
inlay_min_index = int(inlay_min_time / step_array[-1] * len(step_array))
inlay_max_index = int(inlay_max_time / step_array[-1] * len(step_array))
inlay_log_min_index = int(inlay_log_min_time / step_array[-1] * len(step_array))
inlay_log_max_index = int(inlay_log_max_time / step_array[-1] * len(step_array))
#### Complete system Entropy
if os.path.isfile('../data/total_entropy.txt'):
totent_array = np.loadtxt('../data/total_entropy.txt')
plt.plot(totent_array[min_index:max_index, 0] * sysVar.plotTimeScale, totent_array[min_index:max_index, 1] * 1e13,
linewidth=0.6, color='r')
plt.grid()
plt.xlabel(r'$J\,t$')
plt.ylabel(r'Total system entropy $/ 10^{-13}$')
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'entropy_total.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
### Subsystem Entropy
fldat = open(pltfolder + 'ent_fluctuation_N' + str(sysVar.N) + '.txt', 'w')
fldat.write('N_tot: %i\n' % sysVar.N)
avg = np.mean(ent_array[loavgind:, 1], dtype=np.float64)
stddev = np.std(ent_array[loavgind:, 1], dtype=np.float64)
fldat.write('ssent_average: %.16e\n' % avg)
fldat.write('ssent_stddev: %.16e\n' % stddev)
fldat.write('ssent_rel._fluctuation: %.16e\n' % (stddev / avg))
fldat.close()
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'entropy_subsystem.eps', format='eps', dpi=1000)
plt.clf()
# Subsystem entropy with logarithmic inlay
plt.plot(step_array[m
|
in_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J
|
\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
a = plt.axes([.5, .35, .4, .4])
plt.semilogy(step_array[inlay_log_min_index:inlay_log_max_index],
np.abs(avg - ent_array[inlay_log_min_index:inlay_log_max_index, 1]), color='r')
plt.ylabel(r'$|\,\overline{S}\textsubscript{sys} - S\textsubscript{sys}(t)|$')
plt.yticks([])
plt.savefig(pltfolder + 'entropy_subsystem_inlay_log.eps', format='eps', dpi=1000)
plt.clf()
# Subsystem entropy with inlay
plt.plot(step_array[min_index:max_index], ent_array[min_index:max_index, 1], color='r')
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:, 1], fwidth, ford)
plt.plot(step_array[loavgind:], tavg[loavgind:], linewidth=avgsize, linestyle=avgstyle, color='black')
plt.xlabel(r'$J\,t$')
plt.ylabel(r'$S\textsubscript{sys}$')
a = plt.axes([.45, .35, .4, .4])
plt.plot(step_array[inlay_min_index:inlay_max_index], avg - ent_array[inlay_min_index:inlay_max_index, 1],
linewidth=0.2, color='r')
plt.ylabel(r'$\overline{S}\textsubscript{sys} - S\textsubscript{sys}(t)$')
a.yaxis.tick_right()
tmp_ticks = list(a.get_xticks())
tmp_ticks.pop(0)
if tmp_ticks[-1] >= inlay_max_time:
tmp_ticks.pop(-1)
a.set_xticks(tmp_ticks + [inlay_min_time])
plt.savefig(pltfolder + 'entropy_subsystem_inlay.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
# histogram of fluctuations
n, bins, patches = plt.hist(ent_array[loavgind:, 1] - avg, 51, normed=1, rwidth=0.8, align='mid')
(mu, sigma) = norm.fit(ent_array[loavgind:, 1] - avg)
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r--')
mu_magnitude = np.floor(np.log10(np.abs(mu)))
mu /= np.power(10, mu_magnitude)
sigma_magnitude = np.floor(np.log10(sigma))
sigma /= np.power(10, sigma_magnitude)
plt.figtext(0.965, 0.80,
'$\mu = %.2f \cdot 10^{%i}$\n$\sigma = %.2f \cdot 10^{%i}$' % (mu, mu_magnitude, sigma, sigma_magnitude),
ha='right', va='bottom', multialignment="left")
plt.xlabel(r'$\Delta S_{sub}$')
plt.ylabel(r'PD')
plt.tight_layout(padding)
plt.savefig(pltfolder + 'entropy_subsystem_fluctuations.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
print(" done!")
|
mitchcapper/mythbox
|
resources/lib/mysql-connector-python/python2/examples/unicode.py
|
Python
|
gpl-2.0
| 2,921 | 0.003766 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Gene
|
ral Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
import mysql.connector
"""
Example using MySQL Connector/Python showing:
* the usefulness of unicode, if it works correctly..
* dropping and creating a table
* inserting and selecting a row
"""
info = """
For th
|
is to work you need to make sure your terminal can output
unicode character correctly. Check if the encoding of your terminal
is set to UTF-8.
"""
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Show the unicode string we're going to use
unistr = u"\u00bfHabla espa\u00f1ol?"
output.append("Unicode string: %s" % unistr.encode('utf8'))
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS unicode"
cursor.execute(stmt_drop)
stmt_create = """
CREATE TABLE unicode (
id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
str VARCHAR(50) DEFAULT '' NOT NULL,
PRIMARY KEY (id)
) CHARACTER SET 'utf8'"""
cursor.execute(stmt_create)
# Insert a row
stmt_insert = "INSERT INTO unicode (str) VALUES (%s)"
cursor.execute(stmt_insert, (unistr,))
# Select it again and show it
stmt_select = "SELECT str FROM unicode WHERE id = %s"
cursor.execute(stmt_select, (1,))
row = cursor.fetchone()
output.append("Unicode string coming from db: %s" % row[0].encode('utf8'))
# Cleaning up, dropping the table again
cursor.execute(stmt_drop)
cursor.close()
db.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
from config import Config
config = Config.dbinfo().copy()
print info
out = main(config)
print '\n'.join(out)
|
disqus/Diamond
|
src/collectors/snmpraw/snmpraw.py
|
Python
|
mit
| 6,089 | 0.000493 |
# coding=utf-8
"""
The SNMPRawCollector is designed for collecting data from SNMP-enables devices,
using a set of specified OIDs
#### Configuration
Below is an example configuration for the SNMPRawCollector. The collector
can collect data any number of devices by adding configuration sections
under the *devices* header. By default the collector will collect every 60
seconds. This might be a bit excessive and put unnecessary load on the
devices being polled. You may wish to change this to every 300 seconds. However
you need modify your graphite data retentions to handle this properly.
```
# Options for SNMPRawCollector
enabled = True
interval = 60
[devices]
# Start the device configuration
# Note: this name will be used in the metric path.
[[my-identification-for-this-host]]
host = localhost
port = 161
community = public
# Start the OID list for this device
# Note: the value part will be used in the metric path.
[[[oids]]]
1.3.6.1.4.1.2021.10.1.3.1 = cpu.load.1min
1.3.6.1.4.1.2021.10.1.3.2 = cpu.load.5min
1.3.6.1.4.1.2021.10.1.3.3 = cpu.load.15min
# If you want another host, you can. But you probably won't need it.
[[another-identification]]
host = router1.example.com
port = 161
community = public
[[[oids]]]
oid = metric.path
oid = metric.path
```
Note: If you modify the SNMPRawCollector configuration, you will need to
restart diamond.
#### Dependencies
* pysmnp (which depends on pyasn1 0.1.7 and pycrypto)
"""
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'snmp'))
from snmp import SNMPCollector as parent_SNMPCollector
from diamond.metric import Metric
class SNMPRawCollector(parent_SNMPCollector):
def __init__(self, *args, **kwargs):
super(SNMPRawCollector, self).__init__(*args, **kwargs)
# list to save non-existing oid's per device, to avoid repetition of
# errors in logging. restart diamond/collector to flush this
self.skip_list = []
def get_default_config(self):
"""
Override SNMPCollector.get_default_config method to provide
default_config for the SNMPInterfaceCollector
"""
default_config = super(SNMPRawCollector,
self).get_default_config()
default_config.update
|
({
'oids': {},
'path_prefix': 'servers',
'path_suffix': 'snmp',
})
return default_config
def _precision(self, value):
"""
Return the precision of the number
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _skip(self, device, oid, reason=None):
s
|
elf.skip_list.append((device, oid))
if reason is not None:
self.log.warn('Muted \'{0}\' on \'{1}\', because: {2}'.format(
oid, device, reason))
def _get_value_walk(self, device, oid, host, port, community):
data = self.walk(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#2)')
return
self.log.debug('Data received from WALK \'{0}\': [{1}]'.format(
device, data))
if len(data) != 1:
self._skip(
device,
oid,
'unexpected response, data has {0} entries'.format(
len(data)))
return
# because we only allow 1-key dicts, we can pick with absolute index
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#1)')
return
self.log.debug('Data received from GET \'{0}\': [{1}]'.format(
device, data))
if len(data) == 0:
self._skip(device, oid, 'empty response, device down?')
return
if oid not in data:
# oid is not even in hierarchy, happens when using 9.9.9.9
# but not when using 1.9.9.9
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if value == 'No Such Object currently exists at this OID':
self._skip(device, oid, 'no object at OID (#2)')
return
if value == 'No Such Instance currently exists at this OID':
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
self.log.debug(
'Collecting raw SNMP statistics from device \'{0}\''.format(device))
dev_config = self.config['devices'][device]
if 'oids' in dev_config:
for oid, metricName in dev_config['oids'].items():
if (device, oid) in self.skip_list:
self.log.debug(
'Skipping OID \'{0}\' ({1}) on device \'{2}\''.format(
oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if value is None:
continue
self.log.debug(
'\'{0}\' ({1}) on device \'{2}\' - value=[{3}]'.format(
oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device,
self.config['path_suffix'], metricName])
metric = Metric(path=path, value=value, timestamp=timestamp,
precision=self._precision(value),
metric_type='GAUGE')
self.publish_metric(metric)
|
kanishka-linux/AnimeWatch
|
AnimeWatch-Debian-PyQt5/create_deb.py
|
Python
|
gpl-3.0
| 1,480 | 0.032432 |
import os
import sys
import shutil
import subprocess
BASEDIR,BASEFILE = os.path.split(os.path.abspath(__file__))
print(BASEDIR,BASEFILE,os.getcwd())
par_dir,cur_dir = os.path.split(BASEDIR)
src_dir = os.path.join(par_dir,'AnimeWatch-PyQt5')
deb_config_dir = os.path.join(BASEDIR,'DEBIAN')
control_file = os.path.join(deb_config_dir,'control')
lines = open(control_file,'r').readlines()
dest
|
_dir = None
exec_file = os.path.join(BASEDIR,'anime-watch')
desk_file = os.path.join(BASEDIR,'AnimeWatch.desktop')
for i in lines:
i = i.strip()
if i.startswith('Version:'):
version_num = i.replace('Version:','',1).strip()
dest_dir = os.path.join(BASEDIR,'AnimeWatch-'+version_num)
break
usr_share = os.path.join(dest_dir,'usr','share','applications')
usr_bin = os.path.join(dest_dir,'usr','bin')
u
|
sr_share_animewatch = os.path.join(dest_dir,'usr','share','AnimeWatch')
if dest_dir:
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
os.makedirs(usr_share)
os.makedirs(usr_bin)
shutil.copytree(deb_config_dir,os.path.join(dest_dir,'DEBIAN'))
shutil.copy(exec_file,usr_bin)
shutil.copy(desk_file,usr_share)
shutil.copytree(src_dir,usr_share_animewatch)
subprocess.call(['dpkg-deb','--build',dest_dir])
deb_pkg = os.path.basename(dest_dir)+'.deb'
print('deb package created successfully in current directory. Now install the package using command: \n\nsudo gdebi {0}\n\n'.format(deb_pkg))
else:
print('no version number in control file')
|
tequa/ammisoft
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/jedi/evaluate/iterable.py
|
Python
|
bsd-3-clause
| 31,484 | 0.00127 |
"""
Contains all classes and functions to deal with lists, dicts, generators and
iterators in general.
Array modifications
*******************
If the content of an array (``set``/``list``) is requested somewhere, the
current module will be checked for appearances of ``arr.append``,
``arr.insert``, etc. If the ``arr`` name points to an actual array, the
content will be added
This can be really cpu intensive, as you can imagine. Because |jedi| has to
follow **every** ``append`` and check wheter it's the right array. However this
works pretty good, because in *slow* cases, the recursion detector and other
settings will stop this process.
It is important to note that:
1. Array modfications work only in the current module.
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
"""
from jedi import debug
from jedi import settings
from jedi import common
from jedi.common import unite, safe_property
from jedi._compatibility import unicode, zip_longest, is_py3
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate import pep0484
from jedi.evaluate import context
from jedi.evaluate import precedence
from jedi.evaluate import recursion
from jedi.evaluate.cache import memoize_default
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, \
ParserTreeFilter
class AbstractSequence(context.Context):
builtin_methods = {}
api_type = 'instance'
def __init__(self, evaluator):
super(AbstractSequence, self).__init__(evaluator, evaluator.BUILTINS)
def get_filters(self, search_global, until_position=None, origin_scope=None):
raise NotImplementedError
@property
def name(self):
return compiled.CompiledContextName(self, self.array_type)
class BuiltinMethod(object):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
def __init__(self,
|
builtin_context, method, builtin_func):
self._builtin_context = builtin_context
self._method = method
self._builtin_func = builtin_func
def py__call__(self, params):
return self._method(self._builtin_context)
def __getattr__(self, name):
return getattr(self._builtin_func, name)
class SpecialMethodFilter(D
|
ictFilter):
"""
A filter for methods that are defined in this module on the corresponding
classes like Generator (for __next__, etc).
"""
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_context):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_context = builtin_context
def infer(self):
filter = next(self._builtin_context.get_filters())
# We can take the first index, because on builtin methods there's
# always only going to be one name. The same is true for the
# inferred values.
builtin_func = next(iter(filter.get(self.string_name)[0].infer()))
return set([BuiltinMethod(self.parent_context, self._callable, builtin_func)])
def __init__(self, context, dct, builtin_context):
super(SpecialMethodFilter, self).__init__(dct)
self.context = context
self._builtin_context = builtin_context
"""
This context is what will be used to introspect the name, where as the
other context will be used to execute the function.
We distinguish, because we have to.
"""
def _convert(self, name, value):
return self.SpecialMethodName(self.context, name, value, self._builtin_context)
def has_builtin_methods(cls):
base_dct = {}
# Need to care properly about inheritance. Builtin Methods should not get
# lost, just because they are not mentioned in a class.
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.builtin_methods)
except AttributeError:
pass
cls.builtin_methods = base_dct
for func in cls.__dict__.values():
try:
cls.builtin_methods.update(func.registered_builtin_methods)
except AttributeError:
pass
return cls
def register_builtin_method(method_name, python_version_match=None):
def wrapper(func):
if python_version_match and python_version_match != 2 + int(is_py3):
# Some functions do only apply to certain versions.
return func
dct = func.__dict__.setdefault('registered_builtin_methods', {})
dct[method_name] = func
return func
return wrapper
@has_builtin_methods
class GeneratorMixin(object):
array_type = None
@register_builtin_method('send')
@register_builtin_method('next', python_version_match=2)
@register_builtin_method('__next__', python_version_match=3)
def py__next__(self):
# TODO add TypeError if params are given.
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def get_filters(self, search_global, until_position=None, origin_scope=None):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
yield SpecialMethodFilter(self, self.builtin_methods, gen_obj)
for filter in gen_obj.get_filters(search_global):
yield filter
def py__bool__(self):
return True
def py__class__(self):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
return gen_obj.py__class__()
@property
def name(self):
return compiled.CompiledContextName(self, 'generator')
class Generator(GeneratorMixin, context.Context):
"""Handling of `yield` functions."""
def __init__(self, evaluator, func_execution_context):
super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS)
self._func_execution_context = func_execution_context
def py__iter__(self):
return self._func_execution_context.get_yield_values()
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
class CompForContext(context.TreeContext):
@classmethod
def from_comp_for(cls, parent_context, comp_for):
return cls(parent_context.evaluator, parent_context, comp_for)
def __init__(self, evaluator, parent_context, comp_for):
super(CompForContext, self).__init__(evaluator, parent_context)
self.tree_node = comp_for
def get_node(self):
return self.tree_node
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield ParserTreeFilter(self.evaluator, self)
class Comprehension(AbstractSequence):
@staticmethod
def from_atom(evaluator, context, atom):
bracket = atom.children[0]
if bracket == '{':
if atom.children[1].children[1] == ':':
cls = DictComprehension
else:
cls = SetComprehension
elif bracket == '(':
cls = GeneratorComprehension
elif bracket == '[':
cls = ListComprehension
return cls(evaluator, context, atom)
def __init__(self, evaluator, defining_context, atom):
super(Comprehension, self).__init__(evaluator)
self._defining_context = defining_context
self._atom = atom
def _get_comprehension(self):
# The atom contains a testlist_comp
return self._atom.children[1]
def _get_comp_for(self):
# The atom contains a testlist_comp
return self._get_comprehension().children[1]
def _eval_node(self, index=0):
"""
The first part `x + 1` of the list comprehension:
[x + 1 for x in foo]
"""
return self._get_comprehension().children[index]
@memoize_default()
def _get_comp_for_context(self, parent_context, comp_for):
# TODO shouldn't this be part of create_context?
return CompForContext.from_comp_for(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None
|
alex/flanker
|
flanker/mime/message/errors.py
|
Python
|
apache-2.0
| 223 | 0 |
class MimeError(Exception):
pass
class DecodingError(MimeError):
"""Thrown when there is
|
an encoding error."""
pass
class EncodingError(MimeError):
|
"""Thrown when there is an decoding error."""
pass
|
ylow/SFrame
|
oss_src/unity/python/sframe/util/metric_mock.py
|
Python
|
bsd-3-clause
| 1,457 | 0.019218 |
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import logging
import pprint
class ConsumerMock:
def __init__(self):
self._num_flushes = 0
def flush(self):
self._num_flushes += 1
class MetricMock:
def __init__(self, event_limit=-1):
|
self._consumer = ConsumerMock()
self._cal
|
ls = []
self._pp = pprint.PrettyPrinter(indent=2)
self._event_limit = event_limit
self.logger = logging.getLogger(__name__)
def track(self, distinct_id, event_name, properties={}, meta={}):
if self._event_limit < 0 or len(_calls) < self._event_limit:
self._calls.append( {'method':'track',
'distinct_id':distinct_id,
'event_name':event_name,
'properties':properties,
'meta':meta,
})
def submit(self, name, value, type, source, attributes):
self._calls.append({'method':'submit',
'name':name,
'value':value,
'type':type,
'source':source,
'attributes':attributes})
def dump_calls(self):
#self._pp.pprint(self._calls)
self.logger.info(self._calls)
def dump(self):
self.logger.info("Number of flushes: %g" % self._consumer._num_flushes)
self.dump_calls()
|
kvs6rj/cs3240-labdemo
|
helper.py
|
Python
|
mit
| 61 | 0.032787 |
__autho
|
r__ = 'k-sthan(II)'
def greeting(msg):
|
print(msg)
|
nephthys/insidetags
|
functions.py
|
Python
|
agpl-3.0
| 5,173 | 0.010632 |
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2010 Camille "nephthys" Bouiller <aftercem@gmail.com>
InsideTags is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db.models import Q
from django.core.cache import cache
from django.http import HttpRequest
from django.utils.cache import get_cache_key
from django.utils.safestring import mark_safe
from django.conf import settings
import re, urllib
def expire_page(path):
request = HttpRequest()
request.path = path
key = get_cache_key(request)
if cache.has_key(key):
cache.delete(key)
def not_combining(char):
return unicodedata.category(char) != 'Mn'
def strip_accents(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii','ignore')
return value
def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
'''
Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
Source : http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
'''
Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
Source : http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def url_encode(url):
dict = urllib.urlencode({'key': url})
return dict[4:]
def url_decode(url):
return urllib.unquote_plus(url)
BITLY_LOGIN = getattr(settings, 'BITLY_LOGIN', None)
BITLY_APIKEY = getattr(settings, 'BITLY_APIKEY', None)
TWITTER_LOGIN = getattr(settings, 'TWITTER_LOGIN', None)
TWITTER_PASS = getattr(settings, 'TWITTER_PASSWORD', None)
def shorten_url(long_url, login_user, api_key):
values = {
'version': '2.0.1',
'longUrl': long_url,
'login': BITLY_LOGIN,
'apiKey': BITLY_APIKEY
}
params = urllib.urlencode(values)
request = urllib.urlopen('http://api.bit.ly/shorten?%s' % params)
responde = request.read()
request.close()
responde_dict = eval(responde)
try:
short_url = responde_dict['results'][long_url]['shortUrl']
except:
print responde_dict
pass
return short_url
def post_to_twitter(url, title, tags):
if not BITLY_LOGIN or not BITLY_APIKEY or not TWITTER_LOGIN or not TWITTER_PASS:
return
import twitter
url = shorten_url(url, BITLY_LOGIN, BITLY_APIKEY)
tweet = '%s %s' % (title, url)
|
hashtags = ''
if tags:
tags = tags.replace(',', '')
new_tags = list()
for tag in tags.split():
new_tags.append('#%s' % tag)
hashtags = ' '.join(new_tags)
if len(tweet) > 140:
title = truncate_chars(title, 140-4-len(url))
tweet = '%s %s' % (title, url)
for tag in hashtags.split():
if (len(tweet) + len(tag) + 1) <= 140:
tweet += ' %s' % tag
api = twitter.
|
Api(username=TWITTER_LOGIN, password=TWITTER_PASS)
api.PostUpdates(tweet)
return url
def twitterfy(text):
'''
Parse links, @replies and #hashtags
Source : http://teebes.com/blog/17/simple-python-twitter-rss-feed-parser
'''
text = re.sub(r'(http://(\w|\.|/|\?|=|%|&)+)', \
lambda x: '<a href="%s">%s</a>' % (x.group().strip(), x.group().strip()), text)
text = re.sub(r'@(\w+)', lambda x: '<a href="http://twitter.com/%s">%s</a>' \
% (x.group()[1:], x.group()), text)
text = re.sub(r'#(\w+)', lambda x: '<a href="http://twitter.com/search?q=%%23%s">%s</a>' \
% (x.group()[1:], x.group()), text)
return mark_safe(text)
|
grensjo/borg-summon
|
test/test_config_parser.py
|
Python
|
mit
| 1,894 | 0.000528 |
import pytest
from unittest.mock import patch
from borg_summon import config_parser
from .util import mock_globbing, mock_m
|
ultiple_opens
def test_merge():
d1 = {
'a': 'a',
'b': {
'c': 'c',
'd': [1, 2, 3],
'e': [1, 2, 3],
},
'c': {
'd': 3,
},
'd': 3,
}
d2 = {
'b': {
'c': 'C',
'd': [3, 4, 5],
'e': 0,
},
'c': 0,
'd': 'd',
'g': 'g',
}
res = {
'a': 'a'
|
,
'b': {
'c': 'C',
'd': [1, 2, 3, 3, 4, 5],
'e': 0,
},
'c': 0,
'd': 'd',
'g': 'g',
}
config_parser.merge(d1, d2)
assert str(d1) == str(res)
def test_cyclic_include():
mock_globbing()
m_open = mock_multiple_opens([
'include = ["b.toml"]',
'include = ["c.toml"]',
'include = ["a.toml"]',
'include = ["b.toml"]',
])
with patch('borg_summon.config_parser.open', m_open, create=True):
with pytest.raises(config_parser.CyclicIncludeError) as excinfo:
config_parser.get_from_file('a.toml')
assert 'includes itself' in str(excinfo.value)
@patch('logging.warning')
def test_multiple_include(warning):
mock_globbing()
m_open = mock_multiple_opens([
'include = ["b.toml", "c.toml"]',
'include = ["d.toml"]',
'log_level = "info"',
'include = ["d.toml"]',
'log_level = "info"',
])
with patch('borg_summon.config_parser.open', m_open, create=True):
config_parser.get_from_file('a.toml')
assert warning.call_count == 1
assert 'included multiple times' in str(warning.mock_calls[0])
|
stdweird/aquilon
|
tests/broker/test_del_disk.py
|
Python
|
apache-2.0
| 2,362 | 0.00127 |
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del disk command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelDisk(TestBrokerCommand):
def testdelut3c1n3sda(self):
self.noouttest(["del", "disk", "--machine", "ut3c1n3",
"--controller", "scsi", "--size", "68"])
def testdelut3c1n3sdb(self):
self.noouttest(["del", "disk", "--machine", "ut3c1n3",
"--disk", "c0d0"])
def testverifydelut3c1n3sda(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchclean(out, "Disk: sda 68 GB scsi", command)
def testverifydelut3c1n3sdb(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchclean(out, "Disk: c0d0", command)
# This should now list the 34 GB disk that was added previously...
def testverifycatut3c1n3disk(self):
command = "cat --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchclean(out, "harddisks", command)
def testfaildelunknowntype(self):
command = ["del", "disk", "--machine", "ut3c1n3",
"-
|
-type", "type-does-not-exist"]
out = self.badrequesttest(command)
self.matchoutput(out,
"type-does-not-exist is not a valid controller type",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelDisk)
unittest
|
.TextTestRunner(verbosity=2).run(suite)
|
paulopinda/simplemooc
|
simplemooc/courses/migrations/0002_auto_20160625_1845.py
|
Python
|
gpl-2.0
| 454 | 0.002203 |
# -*- coding: utf-8 -*-
# Generated by Django
|
1.9 on 2016-06-25 18:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='course',
options={'ordering': ['n
|
ame'], 'verbose_name': 'Curso', 'verbose_name_plural': 'Cursos'},
),
]
|
mileswwatkins/billy
|
billy/models/feeds.py
|
Python
|
bsd-3-clause
| 5,523 | 0.000362 |
import re
import urlparse
import datetime
from django.template.defaultfilters import truncatewords
from billy.core import mdb as db, feeds_db, settings
from .base import Document
from .metadata import Metadata
class FeedEntry(Document):
collection = feeds_db.entries
def __init__(self, *args, **kw):
super(FeedEntry, self).__init__(*args, **kw)
def build(self, billy_db=db):
'''Mutate the feed entry with hyperlinked entities. Add tagging
data and other template context values, including source.
'''
self_legislator = self.legislator
entity_types = {'L': 'legislator',
'C': 'committee',
'B': 'bill'}
entry = self
summary = truncatewords(entry['summary'], 50)
entity_strings = entry['entity_strings']
entity_ids = entry['entity_ids']
_entity_strings = []
_entity_ids = []
_entity_urls = []
_done = []
if entity_strings:
data = zip(entity_strings, entity_ids)
data = sorted(data, key=lambda t: len(t[0]), reverse=True)
hyperlinked_spans = []
for entity_string, _id in data:
if entity_string in _done:
continue
else:
_done.append(entity_string)
_entity_strings.append(entity_string)
_entity_ids.append(_id)
# Get this entity's url.
collection_name = entity_types[_id[2]] + 's'
collection = getattr(billy_db, collection_name)
if collection_name == 'legislators':
cursor = collection.find({'_all_ids': _id})
assert cursor.count() == 1
instance = cursor.next()
else:
instance = collection.find_one(_id)
url = instance.get_absolute_url()
_entity_urls.append(url)
# This is tricky. Need to hyperlink the entity without mangling
# other previously hyperlinked strings, like Fiona Ma and
# Mark Leno.
matches = re.finditer(entity_string, summary)
if _id != self_legislator.id:
# For other entities, add a hyperlink.
replacer = lambda m: '<a href="%s">%s</a>' % (
url, entity_string)
else:
# If this id refers to the related legislator, bold it.
replacer = lambda m: '<strong>%s</strong>' % entity_string
for match in matches:
# Only hyperlink if no previous hyperlink has been added
# in the same span.
if any((start <= n < stop) for n in match.span()
for (start, stop) in hyperlinked_spans):
|
continue
summary = re.sub(entity_string, replacer, summary)
hyperlinked_spans.append(match.span())
|
# For entity_strings, us modelinstance.display_name strings.
_entity_display_names = []
for _id in _entity_ids:
collection_name = entity_types[_id[2]] + 's'
collection = getattr(billy_db, collection_name)
if collection_name == 'legislators':
cursor = collection.find({'_all_ids': _id})
assert cursor.count() == 1
instance = cursor.next()
else:
instance = collection.find_one(_id)
string = instance.display_name()
_entity_display_names.append(string)
entity_data = zip(_entity_strings, _entity_display_names,
_entity_ids, _entity_urls)
_entity_data = []
seen_display_names = []
for string, display_name, _id, url in entity_data:
if display_name not in seen_display_names:
_entity_data.append((string, display_name, _id, url))
seen_display_names.append(display_name)
entry['summary'] = summary
entry['entity_data'] = _entity_data
entry['id'] = entry['_id']
urldata = urlparse.urlparse(entry['link'])
entry['source'] = urldata.scheme + urldata.netloc
entry['host'] = urldata.netloc
# Prevent obfuscation of `published` method in template rendering.
if 'published' in entry:
del entry['published']
return ''
def display(self):
return self['summary']
def published(self):
if 'published_parsed' in self:
published_parsed = self['published_parsed']
if published_parsed is not None:
return datetime.datetime.fromtimestamp(
self['published_parsed'])
# Try alternative format.
published = self['published']
try:
datetime.datetime.strptime(published, '%b %d %H:%M:%S %Y')
except ValueError:
pass
elif 'updated_parsed' in self:
# Fall back to `updated` date.
return datetime.datetime.fromtimestamp(self['updated_parsed'])
else:
# Let this field be blank.
return
@property
def metadata(self):
return Metadata.get_object(self[settings.LEVEL_FIELD])
|
molliewebb/aston
|
aston/tracefile/AgilentMS.py
|
Python
|
gpl-3.0
| 11,532 | 0.002168 |
# -*- coding: utf-8 -*-
import os.path as op
import gzip
import io
import struct
from datetime import datetime
from xml.etree import ElementTree
import numpy as np
import scipy.sparse
from aston.resources import cache
from aston.trace.Trace import AstonSeries, AstonFrame
from aston.tracefile.TraceFile import TraceFile, ScanListFile
from aston.spectra.Scan import Scan
class AgilentMS(TraceFile):
ext = 'MS'
mgc = '0132'
traces = ['#ms']
def total_trace(self, twin=None):
#TODO: use twin?
f = open(self.filename, 'rb')
# get number of scans to read in
f.seek(0x5)
if f.read(4) == 'GC':
f.seek(0x142)
else:
f.seek(0x118)
nscans = struct.unpack('>H', f.read(2))[0]
# find the starting location of the data
f.seek(0x10A)
f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2)
tme = np.zeros(nscans)
tic = np.zeros(nscans)
for i in range(nscans):
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
tme[i] = struct.unpack('>I', f.read(4))[0] / 60000.
f.seek(npos - 4)
tic[i] = struct.unpack('>I', f.read(4))[0]
f.seek(npos)
f.close()
return AstonSeries(tic, tme, name='TIC')
@property
@cache(maxsize=1)
def data(self):
f = open(self.filename, 'rb')
# get number of scans to read in
# note that GC and LC chemstation store this in slightly different
# places
f.seek(0x5)
if f.read(4) == 'GC':
f.seek(0x142)
else:
f.seek(0x118)
nscans = struct.unpack('>H', f.read(2))[0]
f.seek(0x10A)
f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2)
dstart = f.tell()
# determine total number of measurements in file
tot_pts = 0
rowst = np.empty(nscans + 1, dtype=int)
rowst[0] = 0
for scn in range(nscans):
# get the position of the next scan
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
# keep a running total of how many measurements
tot_pts += (npos - f.tell() - 26) / 4
rowst[scn + 1] = tot_pts
# move forward
f.seek(npos)
# go back to the beginning and load all the other data
f.seek(dstart)
ions = []
i_lkup = {}
|
cols = np.empty(tot_pts, dtype=int)
vals = np.empty(tot_pts, dtype=np.int32)
times = np.empty(nscans)
for scn in range(nscans):
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
# the samp
|
ling rate is evidentally 60 kHz on all Agilent's MS's
times[scn] = struct.unpack('>I', f.read(4))[0] / 60000.
f.seek(f.tell() + 12)
npts = rowst[scn + 1] - rowst[scn]
mzs = struct.unpack('>' + npts * 'HH', f.read(npts * 4))
# there's some bug in the numpy implementation that makes this fail
# after the first time
#mzs = np.fromfile(f, dtype='>H', count=npts * 2)
nions = set(mzs[0::2]).difference(i_lkup)
i_lkup.update({ion: i + len(ions) for i, ion in enumerate(nions)})
ions += nions
cols[rowst[scn]:rowst[scn + 1]] = \
[i_lkup[i] for i in mzs[0::2]]
vals[rowst[scn]:rowst[scn + 1]] = mzs[1::2]
f.seek(npos)
f.close()
vals = ((vals & 16383) * 8 ** (vals >> 14)).astype(float)
data = scipy.sparse.csr_matrix((vals, cols, rowst), \
shape=(nscans, len(ions)), dtype=float)
ions = np.array(ions) / 20.
return AstonFrame(data, times, ions)
@property
@cache(maxsize=1)
def old_data(self):
f = open(self.filename, 'rb')
# get number of scans to read in
# note that GC and LC chemstation store this in slightly different
# places
f.seek(0x5)
if f.read(4) == 'GC':
f.seek(0x142)
else:
f.seek(0x118)
nscans = struct.unpack('>H', f.read(2))[0]
# find the starting location of the data
f.seek(0x10A)
f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2)
# make a list of all of the ions and also read in times
ions = set()
times = np.empty(nscans)
scan_locs = np.empty(nscans, dtype=int)
scan_pts = np.empty(nscans, dtype=int)
for scn in range(nscans):
npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0]
# the sampling rate is evidentally 60 kHz on all Agilent's MS's
times[scn] = struct.unpack('>I', f.read(4))[0] / 60000.
f.seek(f.tell() + 6)
npts = struct.unpack('>H', f.read(2))[0]
# jump to the data and save relevant parameters for later
f.seek(f.tell() + 4)
scan_locs[scn] = f.tell()
scan_pts[scn] = npts
#TODO: use numpy.fromfile?
nions = np.fromfile(f, dtype='>H', count=npts * 2)[0::2]
if scn < 2:
print(npts)
print(nions)
#nions = struct.unpack('>' + npts * 'HH', f.read(npts * 4))[0::2]
ions.update(nions)
f.seek(npos)
ions = np.array(sorted(list(ions)))
data = np.empty((len(times), len(ions)), dtype=float)
for scn in range(nscans):
f.seek(scan_locs[scn])
#TODO: use numpy.fromfile?
mzs = np.fromfile(f, dtype='>H', count=scan_pts[scn] * 2)
#mzs = np.array(struct.unpack('>' + npts * 'HH', f.read(npts * 4)))
if len(mzs) == 0:
continue
ilocs = np.searchsorted(ions, mzs[0::2])
abn = (mzs[1::2] & 16383) * 8 ** (mzs[1::2] >> 14)
data[scn][ilocs] = abn
f.close()
ions /= 20.
return AstonFrame(data, times, ions)
@property
def info(self):
d = super(AgilentMS, self).info
f = open(self.filename, 'rb')
f.seek(0x18)
d['name'] = f.read(struct.unpack('>B', f.read(1))[0]).decode().strip()
f.seek(0x94)
d['operator'] = f.read(struct.unpack('>B', f.read(1))[0]).decode()
f.seek(0xE4)
d['method'] = \
f.read(struct.unpack('>B', f.read(1))[0]).decode().strip()
f.seek(0xB2)
rawdate = f.read(struct.unpack('>B', f.read(1))[0]).decode()
try:
d['date'] = datetime.strptime(rawdate, \
"%d %b %y %H:%M %p").isoformat(' ')
except ValueError:
pass # date is not in correct format to parse?
#TODO: vial number in here too?
f.close()
#TODO: fill this out
## read info from the acqmeth.txt file
#fname = op.join(op.dirname(self.filename), 'acqmeth.txt')
return d
class AgilentMSMSScan(ScanListFile):
ext = 'BIN'
mgc = '0101'
traces = ['#ms']
# TODO: __init__ method that adds mrm trace names to traces
def _scan_iter(self, keylist):
f = open(self.filename, 'rb')
r = ElementTree.parse(op.splitext(self.filename)[0] + '.xsd').getroot()
xml_to_struct = {'xs:int': 'i', 'xs:long': 'q', 'xs:short': 'h', \
'xs:byte': 'b', 'xs:double': 'd', 'xs:float': 'f'}
rfrmt = {}
for n in r.getchildren():
name = n.get('name')
for sn in n.getchildren()[0].getchildren():
if rfrmt.get(name, None) is None:
rfrmt[name] = []
sname = sn.get('name')
stype = sn.get('type')
rfrmt[name].append((sname, xml_to_struct.get(stype, stype)))
def resolve(lookup, recname):
names = [i[0] for i in lookup[recname]]
frmts = [i[1] for i in lookup[recname]]
flatnames = []
flatfrmts = ''
for n, f in zip(names, frmts):
if len(f) != 1:
n, f = resolve(lookup, f)
flatnames += n
else:
flatnames.append(n)
|
r3tard/BartusBot
|
lib/parsedatetime/tests/TestComplexDateTimes.py
|
Python
|
apache-2.0
| 3,487 | 0.009464 |
"""
Test parsing of complex date and times
"""
import unittest, time, datetime
import parsedatetime as pdt
class test(unittest.TestCase):
@pdt.tests.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return pdt.tests.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testDates(self):
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(2006, 8, 25, 17, 0, 0).timetuple()
self.a
|
ssertExpectedResult(self.cal.parse('08/25/2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm on 08.25.2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm August 25, 2006', start), (target,
|
3))
self.assertExpectedResult(self.cal.parse('5pm August 25th, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 25 August, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 25th August, 2006', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 25, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 25th, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('25 Aug, 2006 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('25th Aug 2006, 5pm', start), (target, 3))
if self.mth > 8 or (self.mth == 8 and self.dy > 5):
target = datetime.datetime(self.yr + 1, 8, 5, 17, 0, 0).timetuple()
else:
target = datetime.datetime(self.yr, 8, 5, 17, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('8/5 at 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 8.5', start), (target, 3))
self.assertExpectedResult(self.cal.parse('08/05 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm Aug 05', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 05 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('Aug 05th 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5 August 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5th August 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('5pm 05 Aug', start), (target, 3))
self.assertExpectedResult(self.cal.parse('05 Aug 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('05th Aug 5pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5th 5pm', start), (target, 3))
if self.mth > 8 or (self.mth == 8 and self.dy > 5):
target = datetime.datetime(self.yr + 1, 8, 5, 12, 0, 0).timetuple()
else:
target = datetime.datetime(self.yr, 8, 5, 12, 0, 0).timetuple()
self.assertExpectedResult(self.cal.parse('August 5th 12pm', start), (target, 3))
self.assertExpectedResult(self.cal.parse('August 5th 12:00', start), (target, 3))
if __name__ == "__main__":
unittest.main()
|
hanzz/spectrum
|
spectrumctl/spectrum/__init__.py
|
Python
|
gpl-2.0
| 883 | 0.00453 |
# -*- coding: utf
|
-8 -*-
#
# This file is
|
part of spectrumctl. See spectrumctl.py for a description.
#
# Copyright (C) 2009, 2010 Mathias Ertl
#
# Spectrumctl is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
__all__ = [ 'spectrum', 'spectrumconfigparser', 'env', 'ExistsError', 'config_interface', 'spectrum_group' ]
|
bverdu/onDemand
|
onDemand/plugins/zigbee/light.py
|
Python
|
agpl-3.0
| 95 | 0.021053 |
'''
|
Created on 19 nov. 2015
@author: Bertrand Verdu
'''
if __name__ == '__main__':
pass
| |
eamoncaddigan/Leverage
|
setup.py
|
Python
|
agpl-3.0
| 1,490 | 0.000671 |
import os
from setuptools import find_packages,
|
setup
from pip.req import parse_requirements
with open(os.path.join(os.path.dirname(__file__), 'README.md')
|
) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt', session=False)
# reqs is a list of requirements
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='leverage',
version='0.0.1',
packages=find_packages(),
include_package_data=True,
install_requires=reqs,
license='APGLv3 License', # example license
description='',
long_description=README,
url='',
author='',
author_email='',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
commaai/openpilot
|
selfdrive/hardware/tici/test_agnos_updater.py
|
Python
|
mit
| 595 | 0.011765 |
#!/usr/bin/env python3
import json
import os
import unittest
impo
|
rt requests
AGNOS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
MANIFEST = os.path.join(AGNOS_DIR, "agnos.json")
class TestAgnosUpdater(unittest.TestCase):
def test_manifest(self):
with open(MANIFEST) as f:
m = json.load(f)
for img in m:
r = requests.head(img['url'])
r.raise_for_status()
self.assertEqual(r.headers['Cont
|
ent-Type'], "application/x-xz")
if not img['sparse']:
assert img['hash'] == img['hash_raw']
if __name__ == "__main__":
unittest.main()
|
brandonmbare/csbuild
|
csbuild/_gui.py
|
Python
|
mit
| 88,329 | 0.0338 |
# coding=utf-8
import functools
import re
import stat
import sys
if sys.version_info >= (3,0):
import io
StringIO = io.StringIO
else:
import cStringIO
StringIO = cStringIO.StringIO
import csbuild
from . import log
try:
from PyQt5 import QtCore, QtGui, QtWidgets
QMainWindow = QtWidgets.QMainWindow
QApplication = QtWidgets.QApplication
QtGui.QAbstractItemView = QtWidgets.QAbstractItemView
QtGui.QAction = QtWidgets.QAction
QtGui.QApplication = QtWidgets.QApplication
QtGui.QHBoxLayout = QtWidgets.QHBoxLayout
QtGui.QHeaderView = QtWidgets.QHeaderView
QtGui.QLabel = QtWidgets.QLabel
QtGui.QLineEdit = QtWidgets.QLineEdit
QtGui.QMainWindow = QtWidgets.QMainWindow
QtGui.QMenu = QtWidgets.QMenu
QtGui.QMessageBox = QtWidgets.QMessageBox
QtGui.QPlainTextEdit = QtWidgets.QPlainTextEdit
QtGui.QProgressBar = QtWidgets.QProgressBar
QtGui.QPushButton = QtWidgets.QPushButton
QtGui.QSpacerItem = QtWidgets.QSpacerItem
QtGui.QSizePolicy = QtWidgets.QSizePolicy
QtGui.QSlider = QtWidgets.QSlider
QtGui.QSplitter = QtWidgets.QSplitter
QtGui.QStatusBar = QtWidgets.QStatusBar
QtGui.QStyledItemDelegate = QtWidgets.QStyledItemDelegate
QtGui.QTextEdit = QtWidgets.QTextEdit
QtGui.QTreeWidget = QtWidgets.QTreeWidget
QtGui.QTreeWidgetItem = QtWidgets.QTreeWidgetItem
QtGui.QTabWidget = QtWidgets.QTabWidget
QtGui.QToolTip = QtWidgets.QToolTip
QtGui.QVBoxLayout = QtWidgets.QVBoxLayout
QtGui.QWidget = QtWidgets.QWidget
log.LOG_INFO("Using Qt5")
USING_PYQT5 = True
except:
try:
from PyQt4 import QtCore, QtGui
QMainWindow = QtGui.QMainWindow
QApplication = QtGui.QApplication
log.LOG_INFO("Using Qt4")
USING_PYQT5 = False
except:
log.LOG_ERROR("Either PyQt4 or PyQt5 must be installed on your system to load the CSBuild GUI")
csbuild.Exit( 1 )
import os
import threading
import time
import math
import signal
from . import _shared_globals
class TreeWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self, *args, **kwargs):
QtGui.QTreeWidgetItem.__init__(self, *args, **kwargs)
self.numericColumns = set()
def setColumnNumeric(self, col):
self.numericColumns.add(col)
def __lt__(self, other):
if self.parent():
return False
sortCol = self.treeWidget().sortColumn()
numericColumns = self.treeWidget().headerItem().numericColumns
try:
if sortCol in numericColumns:
myNumber = float(self.text(sortCol))
otherNumber = float(other.text(sortCol))
return myNumber > otherNumber
except:
pass
myText = str(self.text(sortCol))
otherText = str(other.text(sortCol))
return myText > otherText
class TreeWidgetWithBarGraph(QtGui.QTreeWidgetItem):
def __init__(self, parent, renderParent, isFile):
QtGui.QTreeWidgetItem.__init__(self, parent)
self.numericColumns = set()
self.startTime = -1
self.buildEnd = -1
self.linkQueueStart = -1
self.linkStart = -1
self.endTime = -1
self.isFile = isFile
self.m_childrenShowing
|
= False
self.renderParent = renderParent
self.lastUpdate = 0
def setChildrenShowing(self, showing):
self.m_childrenShowing = showing
def childrenShowing(self):
return self.m_childrenSh
|
owing
def setStartTime(self, startTime):
self.startTime = startTime
self.lastUpdate = time.time()
def setBuildEnd(self, buildEnd):
self.buildEnd = buildEnd
def setLinkStart(self, linkStart):
self.linkStart = linkStart
def setLinkQueueStart(self, linkQueueStart):
self.linkQueueStart = linkQueueStart
def setEndTime(self, endTime):
self.endTime = endTime
def draw(self, painter):
rect = self.renderParent.visualItemRect(self)
def drawBar(color, startTime, endTime):
if startTime != -1:
if endTime == -1:
endTime = self.lastUpdate
topLeft = rect.topLeft()
if topLeft.y() < 0:
return
bottomRight = rect.bottomRight()
xoffset = 24
if self.isFile:
xoffset += 20
topLeft.setX(topLeft.x() + (250-xoffset) + math.floor((startTime - _shared_globals.starttime) * 30))
topLeft.setY(topLeft.y())
bottomRight.setX(topLeft.x() + math.ceil((endTime - startTime) * 30))
bottomRight.setY(topLeft.y() + rect.height() - 2)
drawRect = QtCore.QRect(topLeft, bottomRight)
brush = painter.brush()
painter.setBrush(QtGui.QColor(color))
painter.drawRect(drawRect)
painter.setBrush(brush)
if self.isFile:
drawBar("#FF4000", self.startTime, self.buildEnd)
else:
drawBar("#0040FF", self.startTime, self.buildEnd)
drawBar("#008080", self.buildEnd, self.linkQueueStart)
drawBar("#00C0C0", self.linkQueueStart, self.linkStart)
drawBar("#00E080", self.linkStart, self.endTime)
class SyntaxHighlighter( QtGui.QSyntaxHighlighter ):
class HighlightRule( object ):
def __init__(self, pattern, argument):
self.pattern = pattern
self.format = argument
def __init__(self, *args):
QtGui.QSyntaxHighlighter.__init__(self, *args)
self.highlightRules = []
self.commentStart = re.compile("/\\*")
self.commentEnd = re.compile("\\*/")
self.keywordFormat = QtGui.QTextCharFormat()
self.commentFormat = QtGui.QTextCharFormat()
self.stringFormat = QtGui.QTextCharFormat()
self.functionFormat = QtGui.QTextCharFormat()
self.keywordFormat.setForeground(QtGui.QColor("#800000"))
self.keywordFormat.setFontWeight(QtGui.QFont.Bold)
for pattern in [
"\\b__alignof\\b",
"\\b__asm\\b",
"\\b__assume\\b",
"\\b__based\\b",
"\\b__box\\b",
"\\b__cdecl\\b",
"\\b__declspec\\b",
"\\b__delegate\\b",
"\\b__event\\b",
"\\b__except\\b",
"\\b__fastcall\\b",
"\\b__finally\\b",
"\\b__forceinline\\b",
"\\b__gc\\b",
"\\b__hook\\b",
"\\b__identifier\\b",
"\\b__if_exists\\b",
"\\b__if_not_exists\\b",
"\\b__inline\\b",
"\\b__int16\\b",
"\\b__int32\\b",
"\\b__int64\\b",
"\\b__int8\\b",
"\\b__interface\\b",
"\\b__leave\\b",
"\\b__m128\\b",
"\\b__m128d\\b",
"\\b__m128i\\b",
"\\b__m64\\b",
"\\b__multiple_inheritance\\b",
"\\b__nogc\\b",
"\\b__noop\\b",
"\\b__pin\\b",
"\\b__property\\b",
"\\b__raise\\b",
"\\b__restrict\\b",
"\\b__single_inheritance\\b",
"\\b__stdcall\\b",
"\\b__super\\b",
"\\b__thiscall\\b",
"\\b__try\\b",
"\\b__try_cast\\b",
"\\b__unaligned\\b",
"\\b__uuidof\\b",
"\\b__value\\b",
"\\b__virtual_inheritance\\b",
"\\b__w64\\b",
"\\b__wchar_t\\b",
"\\babstract\\b",
"\\barray\\b",
"\\balignas\\b",
"\\balignof\\b",
"\\band\\b",
"\\band_eq\\b",
"\\basm\\b",
"\\bauto\\b",
"\\bbitand\\b",
"\\bbitor\\b",
"\\bbool\\b",
"\\bbreak\\b",
"\\bcase\\b",
"\\bcatch\\b",
"\\bchar\\b",
"\\bchar16_t\\b",
"\\bchar32_t\\b",
"\\bclass\\b",
"\\bcompl\\b",
"\\bconst\\b",
"\\bconst_cast\\b",
"\\bconstexpr\\b",
"\\bcontinue\\b",
"\\bdecltype\\b",
"\\bdefault\\b",
"\\bdelegate\\b",
"\\bdelete\\b",
"\\bdeprecated\\b",
"\\bdllexport\\b",
"\\bdllimport\\b",
"\\bdo\\b",
"\\bdouble\\b",
"\\bdynamic_cast\\b",
"\\belse\\b",
"\\benum\\b",
"\\bevent\\b",
"\\bexplicit\\b",
"\\bexport\\b",
"\\bextern\\b",
"\\bfalse\\b",
"\\bfinal\\b",
"\\bfinally\\b",
"\\bfloat\\b",
"\\bfor\\b",
"\\bfor each\\b",
"\\bfriend\\b",
"\\bfriend_as\\b",
"\\bgcnew\\b",
"\\bgeneric\\b",
"\\bgoto\\b",
"\\bif\\b",
"\\bin\\b",
"\\binitonly\\b",
"\\binline\\b",
"\\bint\\b",
"\\bint16_t\\b",
"\\bint32_t\\b",
"\\bint64_t\\b",
"\\bint8_t\\b",
"\\binterface\\b",
"\\binterior_ptr\\b",
"\\bliteral\\b",
"\\blong\\b",
"\\bmutable\\b",
"\\bnaked\\b",
"\\bnamespace\\b",
"\\bnew\\b",
"\\bnoexcept\\b",
"\\bnoinline\\b",
"\\bnoreturn\\b",
"\\bnot\\b",
"\\bnot_eq\\b",
"\\bnothrow\\b",
"\\bnovtable\\b",
"\\bNULL\\b",
"\\bnullptr\\b",
"\\bnullptr_t\\b",
"\\boperator\\b",
"\\bor\\b",
"\\bor_eq\\b",
"\\boverride\\b",
"\\bproperty\\b",
"\\bprivate\\b",
"\\bprotected\\b",
"\\bpublic\\b",
"\\braise\\b",
"\\bref\\b",
"\\bregister\\b",
"\\breinterpret_cast\\b",
"\\brestrict\\b",
"\\breturn\\b",
"\\bsafecast\\b",
"\\bsealed\\b",
"\\bselectany\\b",
"\\bshort\\b",
"\\bsignals\\b",
|
niavok/perroquet
|
utils/reindent.py
|
Python
|
gpl-3.0
| 8,897 | 0.001236 |
#! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
"""reindent [-d][-r][-v] path ...
-d Dry run. Analyze, but don't make any changes to, files.
-r Recurse. Search for all .py files in subdirectories too.
-v Verbose. Print informative msgs; else no output.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess whitespace from ends of lines, and empty lines at the ends
of files. Ensure the last line ends with a newline.
Pass one or more file and/or directory paths. When a directory path, all
.py files within the directory will be examined, and, if the -r option is
given, likewise recursively for subdirectories.
Overwrites files in place, renaming the originals with a .bak extension.
If reindent finds nothing to change, the file is left alone. If reindent
does change a file, the changed file is a fixed-point for reindent (i.e.,
running reindent on the resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
"""
__version__ = "1"
import tokenize
import os
import sys
verbose = 0
recurse = 0
dryrun = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun
try:
opts, args = getopt.getopt(sys.argv[1:], "drv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-d':
dryrun += 1
elif o == '-r':
recurse += 1
elif o == '-v':
verbose += 1
if not args:
errprint("Usage:", __doc__)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(ful
|
lname) and
not os.path.islink(fullname))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file)
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
if r.run():
if verbose:
print "changed."
if dryrun:
|
print "But this is a dry run, so leaving it alone."
if not dryrun:
bak = file + ".bak"
if os.path.exists(bak):
os.remove(bak)
os.rename(file, bak)
if verbose:
print "renamed", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
else:
if verbose:
print "unchanged."
class Reindenter:
def __init__(self, f, eol="\n"):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
self.eol = eol
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [line.rstrip().expandtabs() + self.eol
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == self.eol:
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == self.eol:
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
|
chrisbarr/bilious-rutabaga
|
bucket_lister/__init__.py
|
Python
|
mit
| 204 | 0.009804 |
from boto.s3.connection import S3Connection
def main():
conn = S3Co
|
nnection()
buc
|
kets = conn.get_all_buckets()
for b in buckets:
print b.name
if __name__ == "__main__":
main()
|
AustereCuriosity/astropy
|
astropy/io/misc/pickle_helpers.py
|
Python
|
bsd-3-clause
| 3,779 | 0.000265 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple input/output related functionality that is not
part of a larger framework or standard.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
from ...extern.six.moves import range
__all__ = ['fnpickle', 'fnunpickle']
def fnunpickle(fileorname, number=0, usecPickle=True):
""" Unpickle pickled objects from a specified file and return the contents.
Parameters
----------
fileorname : str or file-like
The file name or file from which to unpickle objects. If a file object,
it should have been opened in binary mode.
number : int
If 0, a single object will be returned (the first in the file). If >0,
this specifies the number of objects to be unpickled, and a list will
be returned with exactly that many objects. If <0, all objects in the
file will be unpickled and returned as a list.
usecPickle : bool
If True, the :mod:`cPickle` module is to be used in place of
:mod:`pickle` (cPickle is faster). This only applies for python 2.x.
Raises
------
EOFError
If ``number`` is >0 and there are fewer than ``number`` objects in the
pickled file.
Returns
-------
contents : obj or list
If ``number`` is 0, this is a individual object - the first one unpickled
from the file. Otherwise, it is a list of objects unpickled from the
file.
"""
if usecPickle and six.PY2:
import cPickle as pickle
else:
import pickle
if isinstance(fileorname, six.string_types):
f = open(fileorname, 'rb')
close = True
else:
f = fileorname
close = False
try:
if number > 0: # get that number
res = []
for i in range(number):
res.append(pickle.load(f))
elif number < 0: # get all objects
res = []
eof = False
while not eof:
try:
res.append(pickle.load(f))
except EOFError:
eof = True
else: # number==0
res = pickle.load(f)
finally:
if close:
f.close()
return res
def fnpickle(object, fileorname, usecPickle=True, protocol=None, append=False):
"""Pickle an object to a specified file.
Parameters
----------
object
The python object to pickle.
fileorname : str or file-like
|
The filename
|
or file into which the `object` should be pickled. If a
file object, it should have been opened in binary mode.
usecPickle : bool
If True (default), the :mod:`cPickle` module is to be used in place of
:mod:`pickle` (cPickle is faster). This only applies for python 2.x.
protocol : int or None
Pickle protocol to use - see the :mod:`pickle` module for details on
these options. If None, the most recent protocol will be used.
append : bool
If True, the object is appended to the end of the file, otherwise the
file will be overwritten (if a file object is given instead of a
file name, this has no effect).
"""
if usecPickle and six.PY2:
import cPickle as pickle
else:
import pickle
if protocol is None:
protocol = pickle.HIGHEST_PROTOCOL
if isinstance(fileorname, six.string_types):
f = open(fileorname, 'ab' if append else 'wb')
close = True
else:
f = fileorname
close = False
try:
pickle.dump(object, f, protocol=protocol)
finally:
if close:
f.close()
|
wvangeit/AllenSDK
|
doc_template/examples/data_api_client_ex2.py
|
Python
|
gpl-3.0
| 197 | 0 |
from gene_acronym_query
|
import GeneAcronymQuery
query = GeneAcronymQuery()
gene_info = query.get_data('ABAT')
for gene in gene_info:
print "%s (%s)" % (gene['name'], gene['organism']['
|
name'])
|
galbramc/gpkit
|
gpkit/tests/t_model.py
|
Python
|
mit
| 14,190 | 0.001339 |
"""Tests for GP and SP classes"""
import math
import unittest
import numpy as np
from gpkit import (Model, Monomial, settings, VectorVariable, Variable,
SignomialsEnabled, ArrayVariable)
from gpkit.geometric_program import GeometricProgram
from gpkit.small_classes import CootMatrix
from gpkit.feasibility import feasibility_model
NDIGS = {"cvxopt": 5, "mosek": 7, "mosek_cli": 5}
# name: decimal places of accuracy
class TestGP(unittest.TestCase):
"""
Test GeometricPrograms.
This TestCase gets run once for each installed solver.
"""
name = "TestGP_"
# solver and ndig get set in loop at bottom this file, a bit hacky
solver = None
ndig = None
def test_trivial_gp(self):
"""
Create and solve a trivial GP:
minimize x + 2y
subject to xy >= 1
The global optimum is (x, y) = (sqrt(2), 1/sqrt(2)).
"""
x = Monomial('x')
y = Monomial('y')
prob = Model(cost=(x + 2*y),
constraints=[x*y >= 1])
sol = prob.solve(solver=self.solver, verbosity=0)
self.assertEqual(type(prob.latex()), str)
self.assertEqual(type(prob._repr_latex_()), str)
self.assertAlmostEqual(sol("x"), math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol("y"), 1/math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol("x") + 2*sol("y"),
2*math.sqrt(2),
self.ndig)
self.assertAlmostEqual(sol["cost"], 2*math.sqrt(2), self.ndig)
def test_simple_united_gp(self):
R = Variable('R', units="nautical_miles")
a0 = Variable('a0', 340.29, 'm/s')
theta = Variable(r'\theta', 0.7598)
t = Variable('t', 10, 'hr')
T_loiter = Variable('T_{loiter}', 1, 'hr')
T_reserve = Variable('T_{reserve}', 45, 'min')
M = VectorVariable(2, 'M')
if R.units:
prob = Model(1/R,
[t >= sum(R/a0/M/theta**0.5) + T_loiter + T_reserve,
M <= 0.76])
sol = prob.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 0.0005532, self.ndig)
def test_trivial_vector_gp(self):
"""
Create and solve a trivial GP with VectorVariables
"""
x = VectorVariable(2, 'x')
y = VectorVariable(2, 'y')
prob = Model(cost=(sum(x) + 2*sum(y)),
constraints=[x*y >= 1])
sol = prob.solve(solver=self.solver, verbosity=0)
self.assertEqual(sol('x').shape, (2,))
self.assertEqual(sol('y').shape, (2,))
for x, y in zip(sol('x'), sol('y')):
self.assertAlmostEqual(x, math.sqrt(2.), self.ndig)
self.assertAlmostEqual(y, 1/math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol["cost"]/(4*math.sqrt(2)), 1., self.ndig)
def test_zero_lower_unbounded(self):
x = Variable('x', value=4)
y = Variable('y', value=0)
z = Variable('z')
t1 = Variable('t1')
t2 = Variable('t2')
prob = Model(z, [z >= x + t1,
t1 >= t2,
t2 >= y])
sol = prob.solve(verbosity=0)
def test_mdd_example(self):
Cl = Variable("Cl", 0.5, "-", "Lift Coefficient")
Mdd = Variable("Mdd", "-", "Drag Divergence Mach Number")
m1 = Model(1/Mdd, [1 >= 5*Mdd + 0.5, Mdd >= 0.00001])
m2 = Model(1/Mdd, [1 >= 5*Mdd + 0.5])
m3 = Model(1/Mdd, [1 >= 5*Mdd + Cl, Mdd >= 0.00001])
sol1 = m1.solve(solver=self.solver, verbosity=0)
sol2 = m2.solve(solver=self.solver, verbosity=0)
sol3 = m3.solve(solver=self.solver, verbosity=0)
gp1, gp2, gp3 = [m.program for m in [m1, m2, m3]]
self.assertEqual(gp1.A, CootMatrix(row=[0, 1, 2],
col=[0, 0, 0],
data=[-1, 1, -1]))
self.assertEqual(gp2.A, CootMatrix(row=[0, 1],
col=[0, 0],
data=[-1, 1]))
# order of variables within a posynomial is not stable
# (though monomial order is)
equiv1 = gp3.A == CootMatrix(row=[0, 2, 3, 2],
col=[0, 0, 0, 0],
data=[-1, 1, -1, 0])
equiv2 = gp3.A == CootMatrix(row=[0, 1, 3, 2],
col=[0, 0, 0, 0],
data=[-1, 1, -1, 0])
self.assertTrue(equiv1 or equiv2)
self.assertAlmostEqual(sol1(Mdd), sol2(Mdd))
self.assertAlmostEqual(sol1(Mdd), sol3(Mdd))
self.assertAlmostEqual(sol2(Mdd), sol3(Mdd))
def test_additive_constants(self):
x = Variable('x')
m = Model(1/x, [1 >= 5*x + 0.5, 1 >= 10*x])
m.solve(verbosity=0)
gp = m.program
self.assertEqual(gp.cs[1], gp.cs[2])
self.assertEqual(gp.A.data[1], gp.A.data[2])
def test_zeroing(self):
L = Variable("L")
k = Variable("k", 0)
with SignomialsEnabled():
constr = [L-5*k <= 10]
sol = Model(1/L, constr).solve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol(L), 10, self.ndig)
self.assertAlmostEqual(sol["cost"], 0.1, self.ndig)
def test_singular(self):
"""
Create and solve GP with a singular A matrix
"""
if self.solver == 'cvxopt':
# cvxopt can't solve this problem
# (see https://github.com/cvxopt/cvxopt/issues/36)
return
x = Variable('x')
y = Variable('y')
m = Model(y*x, [y*x >= 12])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"], 12, self.ndig)
def test_constants_in_objective_1(self):
'''Issue 296'''
x1 = Variable('x1')
x2 = Variable('x2')
m = Model(1.+ x1 + x2, [x1 >= 1., x2 >= 1.])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"], 3, self.ndig)
def test_constants_in_objective_2(self):
'''Issue 296'''
x1 = Variable('x1')
x2 = Variable('x2')
m = Model(x1**2 + 10
|
0 + 3*x2, [x1 >= 10., x2 >= 15.])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"]/245., 1, self.ndig)
def test_feasibility_gp_(self):
x = Variable('x')
m = Model(x, [x**2 >= 1, x <= 0.5])
self.assertRaises(RuntimeWarning, m.solve, verbosity=0)
|
fm = feasibility_model(m, "max")
sol1 = fm.solve(verbosity=0)
fm = feasibility_model(m, "product")
sol2 = fm.solve(verbosity=0)
self.assertTrue(sol1["cost"] >= 1)
self.assertTrue(sol2["cost"] >= 1)
def test_terminating_constant_(self):
x = Variable('x')
y = Variable('y', value=0.5)
prob = Model(1/x, [x + y <= 4])
sol = prob.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 1/3.5, self.ndig)
def test_check_result(self):
"""issue 361"""
N = 5
L = 5.
dx = L/(N-1)
EI = Variable("EI",10)
p = VectorVariable(N, "p")
p = p.sub(p, 100*np.ones(N))
V = VectorVariable(N, "V")
M = VectorVariable(N, "M")
th = VectorVariable(N, "th")
w = VectorVariable(N, "w")
eps = 1E-6
substitutions = {var: eps for var in [V[-1], M[-1], th[0], w[0]]}
objective = w[-1]
constraints = [EI*V.left[1:N] >= EI*V[1:N] + 0.5*dx*p.left[1:N] + 0.5*dx*p[1:N],
EI*M.left[1:N] >= EI*M[1:N] + 0.5*dx*V.left[1:N] + 0.5*dx*V[1:N],
EI*th.right[0:N-1] >= EI*th[0:N-1] + 0.5*dx*M.right[0:N-1] + 0.5*dx*M[0:N-1],
EI*w.right[0:N-1] >= EI*w[0:N-1] + 0.5*dx*th.right[0:N-1] + 0.5*dx*th[0:N-1]]
m = Model(objective, constraints, substitutions)
sol = m.solve(verbosity=0)
def test_exps_is_tuple(self):
"""issue 407"""
x = Variable('x')
m = Mo
|
freedomboxtwh/Plinth
|
plinth/__init__.py
|
Python
|
agpl-3.0
| 750 | 0 |
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warr
|
anty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.
|
If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth package init file
"""
__version__ = '0.13.1'
|
bioxfu/circRNAFinder
|
src/SeqAnnoDownloadBuild.py
|
Python
|
gpl-3.0
| 7,557 | 0.025275 |
#! /usr/bin/env python
import argparse, re, os
parser = argparse.ArgumentParser(description = 'Download genome/transcript sequences and gene annotations')
parser.add_argument('species', choices=['hg19','mm10','TAIR10'], help='choose a species (Human, Mouse, Arabidopsis)')
parser.add_argument('-d', '--download', action='store_true', help='download sequences or annotations')
parser.add_argument('-b', '--build', action='store_true', help='build sequences or annotations')
parser.add_argument('-g', '--genome', action='store_true', help='download or build genome sequences')
parser.add_argument('-t', '--transcriptome', action='store_true', help='download or build transcriptome sequences')
parser.add_argument('-a', '--annotation', action='store_true', help='download or build gene annotations')
args = parser.parse_args()
genome_dict = {'hg19': 'ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/chromFa.tar.gz',
'mm10': 'ftp://hgdownload.cse.ucsc.edu/goldenPath/mm10/bigZips/chromFa.tar.gz',
'TAIR10': 'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR10.*.dna.toplevel.fa.gz'}
trans_dict = {'hg19': ['ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/cdna/Homo_sapiens.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/cdna/Homo_sapiens.*.cdna.all.fa.gz',
'ftp://ftp.ensembl.org/pub/release-75/fasta/homo_sapiens/ncrna/Homo_sapiens.*.ncrna.fa.gz'],
'mm10': ['ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/cdna/Mus_musculus.*.cdna.all.fa.gz',
'ftp://ftp.ensembl.org/pub/current_fasta/mus_musculus/ncrna/Mus_musculus.*.ncrna.fa.gz'],
'TAIR10': ['ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/cdna/Arabidopsis_thaliana.*.cdna.abinitio.fa.gz',
'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/cdna/Arabidopsis_thaliana.*.cdna.all.fa.gz',
'ftp://ftp.ensemblgenomes.org/pub/current/plants/fasta/arabidopsis_thaliana/ncrna/Arabidopsis_thaliana.*.ncrna.fa.gz']}
anno_dict = {'hg19': 'ftp://ftp.ensembl.org/pub/release-75/gtf/homo_sapiens/*.gtf.gz',
'mm10': 'ftp://ftp.ensembl.org/pub/current_gtf/mus_musculus/*.gtf.gz',
'TAIR10': 'ftp://ftp.ensemblgenomes.org/pub/current/plants//gtf/arabidopsis_thaliana/*.gtf.gz'}
def gtf_build(gtf, build):
input_file = open(gtf,'r')
output_file = open(build,'w')
tx2gene = {}
tx2exon_starts = {}
tx2exon_ends = {}
tx2cds_starts = {}
tx2cds_ends = {}
for line in input_file:
if line.startswith('#'):
continue
line_list = line.strip().split('\t')
chrom, biotype, feature, start, end, strand, ID = (line_list[0],line_list[1],line_list[2],line_list[3],line_list[4],line_list[6],line_list[8])
if gtf == 'hg19.gtf' or gtf == 'mm10.gtf':
chrom = 'chr' + chrom
start = str(int(start) - 1) ## 0-based
if re.search('gene_id \"(.+?)\".+transcript_id \"(.+?)\"', ID) is not None:
gene_id, tx_id = re.search('gene_id \"(.+?)\".+transcript_id \"(.+?)\"', ID).groups()
tx2gene[tx_id] = '%s|%s|%s|%s' % (chrom, strand, gene_id, biotype)
if feature == 'exon':
tx2exon_starts[tx_id] = start + ',' + tx2exon_starts.get(tx_id, '')
tx2exon_ends[tx_id] = end + ',' + tx2exon_ends.get(tx_id, '')
if feature == 'CDS':
tx2cds_starts[tx_id] = start + ',' + tx2cds_starts.get(tx_id, '')
tx2cds_ends[tx_id] = end + ',' + tx2cds_ends.get(tx_id, '')
gene2repretx = {} ## representative transcript (repretx) is the longest transcript for each gene
trans2len = {}
for tx_id in tx2gene:
chrom, strand, gene_id, biotype = tx2gene[tx_id].split('|')
exon_starts = sorted([int(i) for i in tx2exon_starts[tx_id].strip(',').split(',')])
exon_ends = sorted([int(i) for i in tx2exon_ends[tx_id].strip(',').split(',')])
tx_len = 0
for i in range(len(exon_starts)):
tx_len += (exon_ends[i] - exon_starts[i])
trans2len[tx_id] = tx_len
if gene_id in gene2repretx:
if tx_len > trans2len[gene2repretx[gene_id]]:
gene2repretx[gene_id] = tx_id
else:
gene2repretx[gene_id] = tx_id
for tx_id in sorted(tx2gene):
chrom, strand, gene_id, biotype = tx2gene[tx_id].split('|')
if tx_id == gene2repretx[gene_id]:
exon_starts = [str(j) for j in sorted([int(i) for i in tx2exon_starts[tx_id].strip(',').split(',')])]
exon_ends = [str(j) for j in sorted([int(i) for i in tx2exon_ends[tx_id].strip(',').split(',')])]
tx_start = exon_starts[0]
tx_end = exon_ends[-1]
cds_start = '.'
cds_end = '.'
if tx_id in tx2cds_starts:
cds_starts = [str(j) for j in sorted([int(i) for i in tx2cds_starts[tx_id].strip(',').split(',')])]
cds_ends = [str(j) for j in sorted([int(i) for i in tx2cds_ends[tx_id].strip(',').split(',')])]
cds_start = cds_starts[0]
cds_end = cds_ends[-1]
output_file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (chrom, tx_start, tx_end, cds_start, cds_end, strand, ','.join(exon_starts), ','.join(exon_ends), tx_id, gene_id, biotype))
if args.download:
if args.genome:
print '[download %s genome]' % args.species
if args.species == 'hg19' or args.species == 'mm10':
print 'wget -q %s -O %s.tar.gz' % (genome_dict[args.species], args.species)
os.system('wget -q %s -O %s.tar.gz' % (genome_dict[args.species], args.species))
print 'tar -zxf %s.tar.gz' % args.species
os.system('tar -zxf %s.tar.gz' % args.species)
print 'cat chr*.fa > %s_dna.fa' % args.species
os.system('cat chr*.fa > %s_dna.fa' % args.species)
print 'rm chr*.fa'
os.system('rm chr*.fa')
else:
print 'wget -q %s -O %s.fa.gz' % (genome_dict[args.species], args.species)
os.system('wget -q %s -O %s.fa.gz' % (genome_dict[args.species], args.species))
print 'zcat %s.fa.gz > %s_dna.fa' % (args.species, args.species)
os.system('zcat %s.fa.gz > %s_dna.fa' % (args.species, args.species))
print 'rm %s.fa.gz' % args.species
os.system('rm %s.fa.gz' % args.species)
elif args.transcriptome:
print '[download %s transcriptome]' % args.species
for i in trans_dict[args.species]:
print 'wget -q %s' % i
os.system('wget -q %s' % i)
print 'zcat *.fa.gz > %s_trans.fa' % args.species
os.system('zcat *.fa.gz > %s_trans.fa' % args.species)
print 'rm *.fa.gz'
os.system('rm *.fa.gz')
elif args.annotation:
print '[download %s gene annotation]' % args.species
print 'wget -q %s -O %s.gtf.gz' % (anno_dict[args.species], args.species)
os.system('wget -q %s -O %s.gtf.gz' % (anno_dict[args.species], args.species))
print 'gzip -d %s.gtf.gz' % args.species
os.system('gzip -d %s.gtf.gz' % args.species)
else:
print 'please specify -g/--genome or -t/--transcriptome or -a/--annotation'
elif args.build:
if args.genome:
print '[build %s genome]' % args.species
print 'bowtie-build %s_dna.fa %s_dna' % (args.species, args.species)
os.system('bowtie-build %s_dna.fa %s_dna' % (args.species, args.species))
elif args.transcriptome:
print '[build %s transcriptome]' % args.species
print 'bowtie-build %s_trans.fa %s_trans' % (args.species, args.species)
os.system('bowtie-build %s_trans.fa %s_trans' % (args.species, args.species))
elif args.annotation:
print '[build %s gene annotation]' % args.species
print 'gtf_build(%s.gtf, %s.g
|
tf.build)' % (args.species, args.species)
gtf_build(args.species+'.gtf', args.species+'.gtf.buil
|
d')
else:
print 'please specify -g/--genome or -t/--transcriptome or -a/--annotation'
else:
print 'please specify -d/--download or -b/--build'
|
nfedera/rg3-youtube-dl
|
youtube_dl/compat.py
|
Python
|
unlicense
| 21,183 | 0.001275 |
from __future__ import unicode_literals
import binascii
import collections
import email
import getpass
import io
import optparse
import os
import re
import shlex
import shutil
imp
|
ort socket
import subprocess
import sys
import itertools
import xml.etree.ElementTree
try:
import urllib.reque
|
st as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile('([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xm
|
MDSLab/s4t-iotronic
|
iotronic/common/keystone.py
|
Python
|
apache-2.0
| 5,025 | 0 |
# coding=utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as ksexception
from oslo_config import cfg
from six.moves.urllib import parse
from iotronic.common import exception
from iotronic.common.i18n import _
CONF = cfg.CONF
keystone_opts = [
cfg.StrOpt('region_name',
help='The region used for getting endpoints of OpenStack'
'services.'),
]
CONF.register_opts(keystone_opts, group='keystone')
CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
def _is_apiv3(auth_url, auth_version):
"""Checks if V3 version of API is being used or not.
This method inspects auth_url and auth_version, and checks whether V3
version of the API is being used or not.
:param auth_url: a http or https url to be inspected (like
'http://127.0.0.1:9898/').
:param auth_version: a string containing the version (like 'v2', 'v3.0')
:returns: True if V3 of the API is being used.
"""
return auth_version == 'v3.0' or '/v3' in parse.urlparse(auth_url).path
def _get_ksclient(token=None):
auth_url = CONF.keystone_authtoken.auth_uri
if not auth_url:
raise exception.KeystoneFailure(_('Keystone API endpoint is missing'))
auth_version = CONF.keystone_authtoken.auth_version
api_v3 = _is_apiv3(auth_url, auth_version)
if api_v3:
from keystoneclient.v3 import client
else:
from keystoneclient.v2_0 import client
auth_url = get_keystone_url(auth_url, auth_version)
try:
if token:
return client.Client(token=token, auth_url=auth_url)
else:
return client.Client(
username=CONF.keystone_authtoken.admin_user,
password=CONF.keystone_authtoken.admin_password,
tenant_name=CONF.keystone_authtoken.admin_tenant_name,
|
region_name=CONF.keystone.region_name,
auth_url=auth_url)
except ksexception.Unauthorized:
raise exception.KeystoneUnauthorized()
except ksexception.AuthorizationFailure as err:
raise exception.KeystoneFailure(_('Could not authorize in Keystone:'
' %s') % err
|
)
def get_keystone_url(auth_url, auth_version):
"""Gives an http/https url to contact keystone.
Given an auth_url and auth_version, this method generates the url in
which keystone can be reached.
:param auth_url: a http or https url to be inspected (like
'http://127.0.0.1:9898/').
:param auth_version: a string containing the version (like v2, v3.0, etc)
:returns: a string containing the keystone url
"""
api_v3 = _is_apiv3(auth_url, auth_version)
api_version = 'v3' if api_v3 else 'v2.0'
# NOTE(lucasagomes): Get rid of the trailing '/' otherwise urljoin()
# fails to override the version in the URL
return parse.urljoin(auth_url.rstrip('/'), api_version)
def get_service_url(service_type='iot', endpoint_type='internal'):
"""Wrapper for get service url from keystone service catalog.
Given a service_type and an endpoint_type, this method queries keystone
service catalog and provides the url for the desired endpoint.
:param service_type: the keystone service for which url is required.
:param endpoint_type: the type of endpoint for the service.
:returns: an http/https url for the desired endpoint.
"""
ksclient = _get_ksclient()
if not ksclient.has_service_catalog():
raise exception.KeystoneFailure(_('No Keystone service catalog '
'loaded'))
try:
endpoint = ksclient.service_catalog.url_for(
service_type=service_type,
endpoint_type=endpoint_type,
region_name=CONF.keystone.region_name)
except ksexception.EndpointNotFound:
raise exception.CatalogNotFound(service_type=service_type,
endpoint_type=endpoint_type)
return endpoint
def get_admin_auth_token():
"""Get an admin auth_token from the Keystone."""
ksclient = _get_ksclient()
return ksclient.auth_token
def token_expires_soon(token, duration=None):
"""Determines if token expiration is about to occur.
:param duration: time interval in seconds
:returns: boolean : true if expiration is within the given duration
"""
ksclient = _get_ksclient(token=token)
return ksclient.auth_ref.will_expire_soon(stale_duration=duration)
|
LeastAuthority/txkube
|
src/txkube/_memory.py
|
Python
|
mit
| 25,882 | 0.001468 |
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
An in-memory implementation of the Kubernetes client interface.
"""
from json import loads
import attr
from pyrsistent import InvariantException, PClass, field, pset
from zope.interface import Interface, implementer
from twisted.python.url import URL
from twisted.web.http import CREATED, NOT_FOUND, OK
from eliot import start_action
from klein import Klein
from werkzeug.exceptions import NotFound
from treq.testing import RequestTraversalAgent
from . import (
IKubernetes, KubernetesError, network_kubernetes,
v1_5_model,
)
from ._compat import dumps_bytes
def memory_kubernetes():
"""
Create an in-memory Kubernetes-alike service.
This serves as a places to hold state for stateful Kubernetes interactions
allowed by ``IKubernetesClient``. Only clients created against the same
instance will all share state.
:return IKubernetes: The new Kubernetes-alike service.
"""
return _MemoryKubernetes()
@implementer(IKubernetes)
class _MemoryKubernetes(object):
"""
``_MemoryKubernetes`` maintains state in-memory which approximates
the state of a real Kubernetes deployment sufficiently to expose a
subset of the external Kubernetes API.
:ivar model: All of the Kubernetes model objects as understood by this
service.
"""
def __init__(self):
base_url = URL.fromText(u"https://kubernetes.example.invalid./")
self.model = v1_5_model
self._state = _KubernetesState.for_model(self.model)
self._resource = _kubernetes_resource(self, self.model)
self._kubernetes = network_kubernetes(
base_url=base_url,
agent=RequestTraversalAgent(self._resource),
)
def _state_changed(self, state):
"""
The Kubernetes state has been changed. Record the new version.
The state is immutable so any changes must be represented as a brand
new object.
:param _KubernetesState state: The new state.
"""
self._state = state
def versioned_client(self, *args, **kwargs):
"""
:return IKubernetesClient: A new client which interacts with this
object rather than a real Kubernetes deployment.
"""
return self._kubernetes.versioned_client(*args, **kwargs)
def client(self, *args, **kwargs):
"""
:return IKubernetesClient: A new client which interacts with this
object rather than a real Kubernetes deployment.
"""
return self._kubernetes.client(*args, **kwargs)
def _kubernetes_resource(memory_service, model):
return _Kubernetes(memory_service, model).app.resource()
def _incrementResourceVersion(version):
"""
Pyrsistent transformation function which can increment a
``v1.ObjectMeta.resourceVersion`` value (even if it was missing).
:param version: The old version as a ``unicode`` string or ``None`` if
there wasn't one.
:return unicode: The new version, guaranteed to be greater than the old
one.
"""
if version is None:
version = 0
return u"{}".format(int(version) + 1)
def _transform_object(obj, *transformation):
"""
Apply a pyrsistent transformation to an ``IObject``.
In addition to the given transformation, the object's resourceVersion will
be updated.
:param IObject: obj: The object to transform.
:param *transformation: Arguments like those to ``PClass.transform``.
:return: The transformed object.
"""
return obj.transform(
[u"metadata", u"resourceVersion"],
_incrementResourceVersion,
*transformation
)
def _api_group_for_type(cls):
"""
Determine which Kubernetes API group a particular PClass is likely to
belong with.
This is basically nonsense. The question being asked is wrong. An
abstraction has failed somewhere. Fixing that will get rid of the need
for this.
"""
_groups = {
(u"v1beta1", u"Deployment"): u"extensions",
(u"v1beta1", u"DeploymentList"): u"extensions",
(u"v1beta1", u"ReplicaSet"): u"extensions",
(u"v1beta1", u"ReplicaSetList"): u"extensions",
}
key = (
cls.apiVersion,
cls.__name__.rsplit(u".")[-1],
)
group = _groups.get(key, None)
return group
class IAgency(Interface):
"""
An ``IAgency`` implementation can impress certain additional behaviors
upon a ``_KubernetesState``. The latter shall use methods of the former
during state changes to give the former an opportunity to influence the
outcome of the state change.
"""
def before_create(state, obj):
"""
This is called before an object is created.
:param _KubernetesState state: The state in which the object is being
created.
:param IObject obj: A description of the object to be created.
:return IObject: The object to really create. Typically this is some
transformation of ``obj`` (for example, with default values
populated).
"""
def after_create(state, obj):
"""
This is called after an object has been created.
:param _KubernetesState state: The state in which the object is being
created.
:param IObject obj: A description of the object created. Regardless
of the implementation of this method, this is the description
which will be returned in the response to the create operation.
:return IObject: The object to store in the state. Typically this is
some transformation of ``obj`` (for example, with an observed
status attached)l.
"""
def before_replace(state, old, new):
"""
This is called before an existing object is replaced by a new one.
:param _KubernetesState state: The state in which the object is being
replaced.
:param IObject old: A description of the object being replaced.
:param IObject new: A description of the object to replace ``old``.
:raise: Some exception to prevent the replacement from taking place.
:return: ``None``
"""
@implementer(IAgency)
class NullAgency(object):
"""
``NullAgency`` does nothing.
"""
def before_create(self, state, obj):
return obj
def after_create(self, state, obj):
return obj
def before_replace(self, state, old, new):
pass
@implementer(IAgency)
@attr.s(frozen=True
|
)
class AdHocAgency(object):
"""
``AdHocAgency`` implements some object changes which I observed to happen
on a real Kubernetes server while I was working on various parts of
txkube. No attempt at completeness attempted. The system for selecting
changes to implement is to run into im
|
portant inconsistencies between this
and a real Kubernetes while developing other features and then fix those
inconsistencies.
Perhaps in the future this will be replaced by something with less of an
ad hoc nature.
"""
model = attr.ib()
def before_create(self, state, obj):
return obj.fill_defaults()
def after_create(self, state, obj):
if isinstance(obj, self.model.v1beta1.Deployment):
obj = _transform_object(
obj,
[u"metadata", u"annotations", u"deployment.kubernetes.io/revision"],
u"1",
[u"status"],
{},
[u"status", u"observedGeneration"],
1,
[u"status", u"unavailableReplicas"],
1,
)
return obj
def before_replace(self, state, old, new):
if old.metadata.resourceVersion != new.metadata.resourceVersion:
group = _api_group_for_type(type(old))
details = {
u"group": group,
u"kind": old.kind,
u"name": old.metadata.name,
}
raise KubernetesError.object_modified(details)
class _KubernetesState(PClass):
"""
|
dhodhala88/Bosch1
|
weblate/trans/tests/test_models.py
|
Python
|
gpl-3.0
| 23,236 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for translation models.
"""
from django.test import TestCase
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import Permission, User
from django.core.exceptions import ValidationError
import shutil
import os
from weblate.trans.models import (
Project, SubProject, Unit, WhiteboardMessage, Check, get_related_units,
)
from weblate.trans.models.source import Source
from weblate import appsettings
from weblate.trans.tests.utils import get_test_file
from weblate.trans.vcs import GitRepository, HgRepository
REPOWEB_URL = \
'https://github.com/nijel/weblate-test/blob/master/%(file)s#L%(line)s'
GIT_URL = 'git://github.com/nijel/weblate-test.git'
HG_URL = 'https://nijel@bitbucket.org/nijel/weblate-test'
class RepoTestCase(TestCase):
"""
Generic class for tests working with repositories.
"""
def setUp(self):
# Path where to clone remote repo for tests
self.git_base_repo_path = os.path.join(
settings.DATA_DIR,
'test-base-repo.git'
)
# Repository on which tests will be performed
self.git_repo_path = os.path.join(
settings.DATA_DIR,
'test-repo.git'
)
# Path where to clone remote repo for tests
self.hg_base_repo_path = os.path.join(
settings.DATA_DIR,
'test-base-repo.hg'
)
# Repository on which tests will be performed
self.hg_repo_path = os.path.join(
settings.DATA_DIR,
'test-repo.hg'
)
# Clone repo for testing
if not os.path.exists(self.git_base_repo_path):
print(
'Cloning test repository to {0}...'.format(
self.git_base_repo_path
)
)
GitRepository.clone(
GIT_URL,
self.git_base_repo_path,
bare=True
)
# Remove possibly existing directory
if os.path.exists(self.git_repo_path):
shutil.rmtree(self.git_repo_path)
# Create repository copy for the test
shutil.copytree(self.git_base_repo_path, self.git_repo_path)
# Clone repo for testing
if not os.path.exists(self.hg_base_repo_path):
HgRepository.clone(
HG_URL,
self.hg_base_repo_path,
bare=True
)
# Remove possibly existing directory
if os.path.exists(self.hg_repo_path):
shutil.rmtree(self.hg_repo_path)
# Create repository copy for the test
shutil.copytree(self.hg_base_repo_path, self.hg_repo_path)
# Remove possibly existing project directory
test_repo_path = os.path.join(settings.DATA_DIR, 'vcs', 'test')
if os.path.exists(test_repo_path):
shutil.rmtree(test_repo_path)
def create_project(self):
"""
Creates test project.
"""
project = Project.objects.create(
name='Test',
slug='test',
web='http://weblate.org/'
)
self.addCleanup(shutil.rmtree, project.get_path(), True)
return project
def _create_subproject(self, file_format, mask, template='',
new_base='', vcs='git'):
"""
Creates real test subproject.
"""
project = self.create_project()
if vcs == 'mercurial':
branch = 'default'
repo = self.hg_repo_path
push = self.hg_repo_path
else:
branch = 'master'
repo = self.git_repo_path
push = self.git_repo_path
return SubProject.objects.create(
name='Test',
slug='test',
project=project,
repo=repo,
push=push,
branch=branch,
filemask=mask,
template=template,
file_format=file_format,
repoweb=REPOWEB_URL,
save_history=True,
new_base=new_base,
vcs=vcs
)
def create_subproject(self):
"""
Wrapper method for providing test subproject.
"""
return self._create_subproject(
'auto',
'po/*.po',
)
def create_po(self):
return self._create_subproject(
'po',
'po/*.po',
)
def create_po_mercurial(self):
return self._create_subproject(
'po',
'po/*.po',
vcs='mercurial'
)
def create_po_new_base(self):
return self._create_subproject(
'po',
'po/*.po',
new_base='po/hello.pot'
)
def create_po_link(self):
return self._create_subproject(
'po',
'po-link/*.po',
)
def create_po_mono(self):
return self._create_subproject(
'po-mono',
'po-mono/*.po',
'po-mono/en.po',
|
)
def create_ts(self, suffix=''):
return self._create_subproject(
'ts',
'ts{0}/*.ts'.format(suffix),
)
def create_iphone(self):
return self._create_subproject(
'strings',
'iphone/*.lproj/Localizable.strings',
)
def create_android(self):
return self._create_subproject(
'aresource',
'android/values-*/strings.xml',
|
'android/values/strings.xml',
)
def create_json(self):
return self._create_subproject(
'json',
'json/*.json',
)
def create_json_mono(self):
return self._create_subproject(
'json',
'json-mono/*.json',
'json-mono/en.json',
)
def create_java(self):
return self._create_subproject(
'properties',
'java/swing_messages_*.properties',
'java/swing_messages.properties',
)
def create_xliff(self, name='default'):
return self._create_subproject(
'xliff',
'xliff/*/%s.xlf' % name,
)
def create_link(self):
parent = self.create_iphone()
return SubProject.objects.create(
name='Test2',
slug='test2',
project=parent.project,
repo='weblate://test/test',
file_format='po',
filemask='po/*.po',
)
class ProjectTest(RepoTestCase):
"""
Project object testing.
"""
def test_create(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.get_path()))
self.assertTrue(project.slug in project.get_path())
def test_rename(self):
project = self.create_project()
old_path = project.get_path()
self.assertTrue(os.path.exists(old_path))
project.slug = 'changed'
project.save()
new_path = project.get_path()
self.addCleanup(shutil.rmtree, new_path, True)
self.assertFalse(os.path.exists(old_path))
self.assertTrue(os.path.exists(new_path))
def test_delete(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.get_path()))
project.delete()
self.assertFalse(os.path.exists(project.get_path()))
def test_delete_all(self):
|
eduNEXT/edx-platform
|
lms/djangoapps/instructor_task/migrations/0003_alter_task_input_field.py
|
Python
|
agpl-3.0
| 396 | 0 |
# Generated by Django
|
1.11.21 on 2019-07-01 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructor_task', '0002_gradereportsetting'),
]
operations = [
|
migrations.AlterField(
model_name='instructortask',
name='task_input',
field=models.TextField(),
),
]
|
collinstocks/eventlet
|
tests/greenpool_test.py
|
Python
|
mit
| 15,085 | 0.000133 |
import gc
import os
import random
import eventlet
from eventlet import hubs, greenpool, event, pools
from eventlet.support import greenlets as greenlet, six
import tests
def passthru(a):
eventlet.sleep(0.01)
return a
def passthru2(a, b):
eventlet.sleep(0.01)
return a, b
def raiser(exc):
raise exc
class GreenPool(tests.LimitedTestCase):
def test_spawn(self):
p = greenpool.GreenPool(4)
waiters = []
for i in range(10):
waiters.append(p.spawn(passthru, i))
results = [waiter.wait() for waiter in waiters]
self.assertEqual(results, list(range(10)))
def test_spawn_n(self):
p = greenpool.GreenPool(4)
results_closure = []
def do_something(a):
eventlet.sleep(0.01)
results_closure.append(a)
for i in range(10):
p.spawn(do_something, i)
p.waitall()
self.assertEqual(results_closure, list(range(10)))
def test_waiting(self):
pool = greenpool.GreenPool(1)
done = event.Event()
def consume():
done.wait()
def waiter(pool):
gt = pool.spawn(consume)
gt.wait()
waiters = []
self.assertEqual(pool.running(), 0)
waiters.append(eventlet.spawn(waiter, pool))
eventlet.sleep(0)
self.assertEqual(pool.waiting(), 0)
waiters.append(eventlet.spawn(waiter, pool))
eventlet.sleep(0)
self.assertEqual(pool.waiting(), 1)
waiters.append(eventlet.spawn(waiter, pool))
eventlet.sleep(0)
self.assertEqual(pool.waiting(), 2)
self.assertEqual(pool.running(), 1)
done.send(None)
for w in waiters:
w.wait()
self.assertEqual(pool.waiting(), 0)
self.assertEqual(pool.running(), 0)
def test_multiple_coros(self):
evt = event.Event()
results = []
def producer():
results.append('prod')
evt.send()
def consumer():
results.append('cons1')
evt.wait()
results.append('cons2')
pool = greenpool.GreenPool(2)
done = pool.spawn(consumer)
pool.spawn_n(producer)
done.wait()
self.assertEqual(['cons1', 'prod', 'cons2'], results)
def test_timer_cancel(self):
# this test verifies that local timers are not fired
# outside of the context of the spawn
timer_fired = []
def fire_timer():
timer_fired.append(True)
def some_work():
hubs.get_hub().schedule_call_local(0, fire_timer)
pool = greenpool.GreenPool(2)
worker = pool.spawn(some_work)
worker.wait()
eventlet.sleep(0)
eventlet.sleep(0)
self.assertEqual(timer_fired, [])
def test_reentrant(self):
pool = greenpool.GreenPool(1)
def reenter():
waiter = pool.spawn(lambda a: a, 'reenter')
self.assertEqual('reenter', waiter.wait())
outer_waiter = pool.spawn(reenter)
outer_waiter.wait()
evt = event.Event()
def reenter_async():
pool.spawn_n(lambda a: a, 'reenter')
evt.send('done')
pool.spawn_n(reenter_async)
self.assertEqual('done', evt.wait())
def assert_pool_has_free(self, pool, num_free):
self.assertEqual(pool.free(), num_free)
def wait_long_time(e):
e.wait()
timer = eventlet.Timeout(1)
try:
evt = event.Event()
for x in six.moves.range(num_free):
pool.spawn(wait_long_time, evt)
# if the pool has fewer free than we expect,
# then we'll hit the timeout error
finally:
timer.cancel()
# if the runtime error is not raised it means the pool had
# some unexpected free items
timer = eventlet.Timeout(0, RuntimeError)
try:
self.assertRaises(RuntimeError, pool.spawn, wait_long_time, evt)
finally:
timer.cancel()
# clean up by causing all the wait_long_time functions to return
evt.send(None)
eventlet.sleep(0)
eventlet.sleep(0)
def test_resize(self):
pool = greenpool.GreenPool(2)
evt = event.Event()
def wait_long_time(e):
e.wait()
pool.spawn(wait_long_time, evt)
pool.spawn(wait_long_time, evt)
self.assertEqual(pool.free(), 0)
self.assertEqual(pool.running(), 2)
self.assert_pool_has_free(pool, 0)
# verify that the pool discards excess items put into it
pool.resize(1)
# cause the wait_long_time functions to return, which will
# trigger puts to the pool
evt.send(None)
eventlet.sleep(0)
eventlet.sleep(0)
self.assertEqual(pool.free(), 1)
self.assertEqual(pool.running(), 0)
self.assert_pool_has_free(pool, 1)
# resize larger and assert that there are more free items
pool.resize(2)
self.assertEqual(pool.free(), 2)
self.assertEqual(pool.running(), 0)
self.assert_pool_has_free(pool, 2)
def test_pool_smash(self):
# The premise is that a coroutine in a Pool tries to get a token out
# of a token pool but times out before getting the token. We verify
# that neither pool is adversely affected by this situation.
pool = greenpool.GreenPool(1)
tp = pools.TokenPool(max_size=1)
tp.get() # empty out the pool
def do_receive(tp):
timer = eventlet.Timeout(0, RuntimeError())
try:
tp.get()
self.fail("Shouldn't have received anything from the pool")
except RuntimeError:
return 'timed out'
else:
timer.cancel()
# the spawn makes the token pool expect that coroutine, but then
# immediately cuts bait
e1 = pool.spawn(do_receive, tp)
self.assertEqual(e1.wait(), 'timed out')
# the pool can get some random item back
def send_wakeup(tp):
tp.put('wakeup')
gt = eventlet.spawn(send_wakeup, tp)
# now we ask the pool to run something else, which should not
# be affected by the prev
|
ious send at all
def resume():
return 'resumed'
e2 = pool.spawn(resume)
self.assertEqual(e2.wait(), 'resumed')
# we should be able to get out the thing we put in there, too
self.assertEqual(tp.get(), 'wakeup')
gt.wait()
def test_spawn_n_2(self):
p = greenpool.GreenPool(2)
self.assertEqual(p.free(), 2)
r = [
|
]
def foo(a):
r.append(a)
gt = p.spawn(foo, 1)
self.assertEqual(p.free(), 1)
gt.wait()
self.assertEqual(r, [1])
eventlet.sleep(0)
self.assertEqual(p.free(), 2)
# Once the pool is exhausted, spawning forces a yield.
p.spawn_n(foo, 2)
self.assertEqual(1, p.free())
self.assertEqual(r, [1])
p.spawn_n(foo, 3)
self.assertEqual(0, p.free())
self.assertEqual(r, [1])
p.spawn_n(foo, 4)
self.assertEqual(set(r), set([1, 2, 3]))
eventlet.sleep(0)
self.assertEqual(set(r), set([1, 2, 3, 4]))
def test_exceptions(self):
p = greenpool.GreenPool(2)
for m in (p.spawn, p.spawn_n):
self.assert_pool_has_free(p, 2)
m(raiser, RuntimeError())
self.assert_pool_has_free(p, 1)
p.waitall()
self.assert_pool_has_free(p, 2)
m(raiser, greenlet.GreenletExit)
self.assert_pool_has_free(p, 1)
p.waitall()
self.assert_pool_has_free(p, 2)
def test_imap(self):
p = greenpool.GreenPool(4)
result_list = list(p.imap(passthru, range(10)))
self.assertEqual(result_list, list(range(10)))
def test_empty_imap(self):
p = greenpool.GreenPool(4)
result_iter = p.imap(passthru, [])
self.assertRaises(StopIterat
|
JockeTF/fimfarchive
|
tests/tasks/conftest.py
|
Python
|
gpl-3.0
| 2,907 | 0 |
"""
Common task fixtures.
"""
#
# Fimfarchive, preserves stories from Fimfiction.
# Copyright (C) 2020 Joakim Soderlund
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from copy import deepcopy
from typing import Dict
from fimfarchive.exceptions import InvalidStoryError
from fimfarchive.converters import Converter
from fimfarchive.fetchers import Fetcher
from fimfarchive.stories import Story
from fimfarchive.utils import Empty
class DummyConverer(Converter):
"""
Converter that increments a counter.
"""
def __call__(self, story: Story) -> Story:
meta = deepcopy(story.meta)
meta['conversions'] += 1
return story.merge(meta=meta)
class DummyFetcher(Fetcher):
"""
Fetcher with local instance storage.
"""
|
def __init__(self):
"""
Constructor.
"""
self.stories: Dict[int, Story] = dict()
def add(self, key, date, flavors=(), data=Empty):
"""
Adds a story to the fetcher.
"""
meta = {
'id': key,
'title': f't{key}',
'date_modified': date,
'conversions': 0,
'author': {
'id': key,
'name': f'n{key}'
},
'c
|
hapters': [
{'id': key},
],
}
if data is Empty:
text = f'd{key}'
data = text.encode()
story = Story(
key=key,
fetcher=self,
meta=meta,
data=data,
flavors=flavors,
)
self.stories[key] = story
return story
def fetch(self, key, prefetch_meta=None, prefetch_data=None):
"""
Returns a previously stored story.
"""
try:
return self.stories[key]
except KeyError:
raise InvalidStoryError()
def fetch_data(self, key):
"""
Raises exception for missing data.
"""
raise InvalidStoryError()
def fetch_meta(self, key):
"""
Raises exception for missing meta.
"""
raise InvalidStoryError()
def __iter__(self):
"""
Yields all previously stored stories.
"""
for key in sorted(self.stories.keys()):
yield self.stories[key]
|
nutszebra/ddp
|
illust2vecNC.py
|
Python
|
mit
| 4,789 | 0.019661 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os, sys, re
import random
from time import time
import cv2
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
import overwrite
from chainer.functions import caffe
"""
official page: http://illustration2vec.net/
paper: http://illustration2vec.net/papers/illustration2vec-main.pdf
caffe model: http://illustration2vec.net/models/illust2vec_ver200.caffemodel
image_mean: http://illustration2vec.net/models/image_mean.npy
layer structure is like this:
[(u'conv1_1', [u'data'], [u'conv1_1']),
(u'relu1_1', [u'conv1_1'], [u'conv1_1']),
(u'pool1', [u'conv1_1'], [u'pool1']),
(u'conv2_1', [u'pool1'], [u'conv2_1']),
(u'relu2_1',
|
[u'conv2_1'], [u'conv2_1']),
(u'pool2', [u'conv2_1'], [u'pool2']),
(u'conv3_1', [u'pool2'], [u'conv3_1']),
(u'relu3_1', [u'conv3_1'], [u'conv3_1']),
(u'conv3_2', [u'conv3_1'], [u'conv3_2']),
(u'relu3_2', [u'conv3_2'], [u'conv3_2']),
(u'pool3', [u'conv3_2'], [u'pool3']),
(u'conv4_1', [u'pool3'], [u'conv
|
4_1']),
(u'relu4_1', [u'conv4_1'], [u'conv4_1']),
(u'conv4_2', [u'conv4_1'], [u'conv4_2']),
(u'relu4_2', [u'conv4_2'], [u'conv4_2']),
(u'pool4', [u'conv4_2'], [u'pool4']),
(u'conv5_1', [u'pool4'], [u'conv5_1']),
(u'relu5_1', [u'conv5_1'], [u'conv5_1']),
(u'conv5_2', [u'conv5_1'], [u'conv5_2']),
(u'relu5_2', [u'conv5_2'], [u'conv5_2']),
(u'pool5', [u'conv5_2'], [u'pool5']),
(u'conv6_1', [u'pool5'], [u'conv6_1']),
(u'relu6_1', [u'conv6_1'], [u'conv6_1']),
(u'conv6_2', [u'conv6_1'], [u'conv6_2']),
(u'relu6_2', [u'conv6_2'], [u'conv6_2']),
(u'conv6_3', [u'conv6_2'], [u'conv6_3']),
(u'relu6_3', [u'conv6_3'], [u'conv6_3']),
(u'drop6_3', [u'conv6_3'], [u'conv6_3']),
(u'encode1', [u'conv6_3'], [u'encode1']),
(u'encode2', [u'encode1neuron'], [u'encode2'])]
"""
def getNeuralCode(directory, layer="conv5_1", gpu=-1):
model = "illust2vec_ver200.caffemodel"
#use illust2vec_ver200
print('illust2vec_ver200 is being loaded!')
#calculate load time
timeMemory = time()
func = caffe.CaffeFunction(model)
print('illust2vec_ver200 was loaded!')
print('It took ' + str(int(time() - timeMemory)) + " secondes")
#gpu mode
if gpu >= 0:
cuda.init(gpu)
func.to_gpu()
in_size = 224
# Constant mean over spatial pixels
mean_image = np.load("illust2vec_image_mean.npy")
print("neural code is extraced from layer " + layer)
def neuralCode(x): #推測関数
y, = func(inputs={'data': x}, outputs=[layer],
train=False)
return y.data[0]
cropwidth = 256 - in_size
start = cropwidth // 2
stop = start + in_size
mean_image = mean_image[:, start:stop, start:stop].copy()
target_shape = (256, 256)
output_side_length=256
numPic = 0
#count pictures
for folderPath in directory:
#search pictures
picturePath = [picture for picture in os.listdir(folderPath)
if re.findall(r"\.png$|\.jpg$|\.JPG$|\.PNG$|\.JPEG$",picture)]
print("you have " + str(len(picturePath)) + " pictures in " + folderPath)
numPic = numPic + len(picturePath)
print("you have totally " + str(numPic) + " pictures")
count = 0
answer = {}
for folderPath in directory:
#search pictures
picturePath = [picture for picture in os.listdir(folderPath)
if re.findall(r"\.png$|\.jpg$|\.JPG$|\.PNG$|\.JPEG$",picture)]
for picture in picturePath:
timeMemory = time()
count = count + 1
#load image file
image = cv2.imread(folderPath + "/" + picture)
#resize and crop
height, width, depth = image.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height / width
else:
new_width = output_side_length * width / height
resized_img = cv2.resize(image, (new_width, new_height))
height_offset = (new_height - output_side_length) / 2
width_offset = (new_width - output_side_length) / 2
image= resized_img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
#subtract mean image
image = image.transpose(2, 0, 1)
image = image[:, start:stop, start:stop].astype(np.float32)
image -= mean_image
x_batch = np.ndarray(
(1, 3, in_size,in_size), dtype=np.float32)
x_batch[0]=image
if gpu >= 0:
x_batch=cuda.to_gpu(x_batch)
#get neural code
x = chainer.Variable(x_batch, volatile=True)
answer[folderPath + "/" + picture] = neuralCode(x)
sen = overwrite.bar(count,numPic)
overwrite.overwrite(sen)
return answer
|
UdK-VPT/Open_eQuarter
|
mole/extensions/prop_buildings/oeq_HHRS.py
|
Python
|
gpl-2.0
| 1,148 | 0.008711 |
# -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import rb_contemporary_base_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
hhrs = float(oeq_global.OeQ_project_info['heating_degree_days']) * 24
return{'HHRS':{'type': QVariant.Double, 'value': hhrs}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='General',
extension_name='Average Heating Hours
|
',
layer_name= 'Average Heating Hours',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='HHRS',
source_type='none',
par_in=[],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['HHRS'],
description=u"Calculate Average Heating Hou
|
rs",
evaluation_method=calculation)
extension.registerExtension(default=True)
|
nens/model-databank
|
model_databank/conf.py
|
Python
|
gpl-3.0
| 1,241 | 0 |
from django.conf import settings
from appconf import AppConf
class ModelDatabankAppConf(AppConf):
"""App specific settings. Overridable in global settings.
DATA_PATH: path to the real mercurial repositories; these should never
be manipulated directly. Active repositories are symlinked in the
SYMLINK_PATH directory by their slug name.
SYMLINK_PATH: path with symlinks to active model reference repositories.
UPLOAD_PATH: uploaded zip files end up in this directory.
ZIP_EXTRACT_PATH: the uploaded zip files are extracted in this directory.
DOWNLOAD_PATH: repositories that are zipped for download are put in here.
REPOSITORY_URL_ROOT: root url for cloning repositories.
MAX_REVISIONS: maximum total number of revisions shown for a model
|
MAX_REVISIONS_PER_PAGE: maximum number of revisions per page.
"""
|
DATA_PATH = "/tmp/model_databank_repositories"
SYMLINK_PATH = "/tmp/model_databank"
UPLOAD_PATH = "/tmp/uploads"
ZIP_EXTRACT_PATH = "/tmp/extracted_zip_files/"
DOWNLOAD_PATH = "/tmp/downloads"
REPOSITORY_URL_ROOT = 'http://127.0.0.1:8012'
MAX_REVISIONS = 500
MAX_REVISIONS_PER_PAGE = 100
class Meta:
prefix = 'model_databank'
|
yanheven/nova
|
nova/tests/functional/v3/test_deferred_delete.py
|
Python
|
apache-2.0
| 1,610 | 0 |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
class DeferredDeleteSampleJsonTests(test_servers.S
|
erversSampleBase):
extension_name = "os-deferred-delete"
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_force_delete(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
|
cloud-io/CloudUp
|
src/admin_views.py
|
Python
|
mit
| 2,370 | 0.007173 |
from db_utils import deleteLinksByHost
from db_utils import deleteHost
from db_utils import addNewHost
from db_utils import getAllHosts
from error_message import showErrorPage
from error_message import ErrorMessages
import utils
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = utils.getJ
|
injaEnvironment()
class AddHost(webapp2.RequestHandler):
def get(self):
"""
descripion:
adds a new host to the database, and redirect to '/'
params:
name - host name
interval - pinging interval for all the links belonging to the host.
|
response:
redirect to '/admin'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
if ndb.Key('Host', name).get() is not None:
showErrorPage(self, ErrorMessages.duplicatingHostName())
return
try:
interval = int(self.request.get('interval'))
except ValueError:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
if interval == 0:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
addNewHost(name, interval)
self.redirect('/admin')
class DeleteHost(webapp2.RequestHandler):
def get(self):
"""
description:
deletes an existing host, and redirects to '/'. All the links belonging
to the host will also be deleted.
params:
name - host name
response:
redirect to '/'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
hostKey = ndb.Key('Host', name)
if hostKey.get() is None:
showErrorPage(self, ErrorMessages.hostDoesNotExist())
return
deleteLinksByHost(name)
deleteHost(name)
self.redirect('/')
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
hosts = getAllHosts()
template_values = {
'hosts': hosts,
'user': user,
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/admin/host/add', AddHost),
('/admin/host/delete', DeleteHost),
], debug=True)
|
classner/barrista
|
tests.py
|
Python
|
mit
| 108,780 | 0.000276 |
"""Unittests for the barrista project."""
# pylint: disable=F0401, C0330, C0302, C0103, R0201, R0914, R0915, W0212
# pylint: disable=no-name-in-module, no-member
import unittest
import logging
logging.basicConfig(level=logging.WARN)
try:
import cv2 # pylint: disable=W0611
CV2_AVAILABLE = True
except ImportError:
CV2_AVAILABLE = False
class NetSpecificationTestCase(unittest.TestCase):
"""Tests the :py:class:`barrista.design.NetSpecification` class."""
def test_initialization(self):
"""Test initialization and checks."""
import barrista.design as design
# Basic init works.
_ = design.NetSpecification([[2, 2]])
_ = design.NetSpecification([[2, 2, 2, 2]])
# Checks work.
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2, 2, 2], [2, 2]])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_inputs=['test'])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_input_shapes=[[2, 2]])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_inputs=['test'],
predict_input_shapes=[[]])
_ = design.NetSpecification([[10, 3, 51, 51], [10]], # noqa
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
def test_get_predict_net_specification(self):
"""Test the method ``get_predict_net_specification``."""
import barrista.design as design
with self.assertRaises(AssertionError):
netspec = design.NetSpecification([[2, 2]])
netspec.get_predict_net_specification()
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
pred_netspec = netspec.get_predict_net_specification()
self.assertEqual(pred_netspec.name, netspec.name)
self.assertEqual(pred_netspec.debug_info, netspec.debug_info)
self.assertEqual(pred_netspec.stages, ['predict'])
self.assertEqual(pred_netspec.level, netspec.level)
self.assertEqual(pred_netspec.phase, design.Phase.TEST)
self.assertEqual(pred_netspec.force_backward, False)
self.assertEqual(pred_netspec.layers, netspec.layers)
self.assertEqual(pred_netspec.inputs, netspec.predict_inputs)
self.assertEqual(pred_netspec.input_shape,
netspec.predict_input_shapes)
def test_to_pbuf_message(self):
"""Test the method ``to_pbuf_message``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
|
predict_inputs=['data'],
|
predict_input_shapes=[[10, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
netspec_msg = netspec.to_pbuf_message()
self.assertEqual(netspec_msg.IsInitialized(), True)
self.assertEqual(netspec_msg.input, netspec.inputs)
if hasattr(netspec_msg, 'input_shape'):
for msgshape, specshape in zip(netspec_msg.input_shape,
netspec.input_shape):
self.assertEqual(list(msgshape.dim), specshape)
self.assertEqual(len(netspec_msg.layer), len(netspec.layers))
self.assertEqual(netspec_msg.state.phase, netspec.phase)
self.assertEqual(netspec_msg.state.level, netspec.level)
self.assertEqual(netspec_msg.state.stage, netspec.stages)
self.assertEqual(netspec_msg.name, netspec.name)
self.assertEqual(netspec_msg.debug_info, netspec.debug_info)
def test_prototxt_conversion(self):
"""Test the prototxt conversion methods."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
import tempfile
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate()
netspec_rl = design.NetSpecification.from_prototxt(
netspec.to_prototxt())
# Since we have the test for `to_pbuf_message`, we can assume the
# conversion to prototxt works correctly.
self.assertEqual(netspec_rl.to_prototxt(), netspec.to_prototxt())
# Test file io.
with tempfile.NamedTemporaryFile(mode='r',
suffix=".prototxt") as tmpfile:
netspec.to_prototxt(output_filename=tmpfile.name)
tmpfile.file.flush()
netspec_rl = design.NetSpecification.from_prototxt(
filename=tmpfile.name)
# Test instantiation of a loaded net.
_ = netspec_rl.instantiate() # noqa
def test_copy(self):
"""Test the method ``copy``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate()
netptext = netspec.to_prototxt()
netspec_copy = netspec.copy()
_ = netspec_copy.instantiate() # noqa
netcptext = netspec_copy.to_prototxt()
self.assertEqual(netptext, netcptext)
def test_visualize(self):
"""Test the ``visualize`` function."""
import barrista.design as design
# pylint: disable=W0212
if design._draw is None:
return
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
viz = netspec.visualize()
self.assertEqual(viz.ndim, 3)
def test_instantiate(self):
"""Test the method ``instatiate``."""
import barrista.design as design
|
uggla/alexandria
|
alexandria/drivers.py
|
Python
|
apache-2.0
| 4,848 | 0.010111 |
# coding=utf-8
import pprint
import config
import json
import urllib
import requests
class Driver(object):
def __init__(self):
self.driver_type = self.__class__.__name__
# Get credentials from conf files for CMDB
pass
def get_driver_type(self):
return self.driver_type
def get_ci(self, ci):
pass
def set_ci(self, ci):
pass
class Itop(Driver):
def get_ci(self, ci):
print("Get from itop")
return True
def set_ci(self, ci):
username = config.alexandria.conf_file.get_driver_parameters("itop", "loginItop")
password = config.alexandria.conf_file.get_driver_parameters("itop", "passwordItop")
config.logger.debug("login : {}, password : {}".format(
username,
password
)
)
# Craft request body and header
urlbase =
|
config.alexandria.conf_file.get_driver_parameters("itop", "endpoint")
request = '{"operation":"core/create","comment":"Synchronization from Alexandria","class":"Server","output_fields":"id,name,ram", "fields":{"org_id": "3","name":"' + ci.data["Name"] + '","ram":"' +
|
format((ci.data["MemorySummary"])["TotalSystemMemoryGiB"]) + '","serialnumber":"' + ci.data["SerialNumber"] + '"}}'
urlparam = {'version' : '1.0',
'auth_user' : username,
'auth_pwd' : password,
'json_data' : request
}
#header = {'Content-type': 'application/json'}
url = urlbase + '?' + urllib.urlencode(urlparam)
config.logger.debug(url)
#=======================================================================
# answer = requests.post(url,
# headers=header,
# verify="False"
# )
#=======================================================================
answer = requests.post(url,
auth=(username,password)
)
config.logger.debug(answer.status_code)
config.logger.debug(answer.text)
class Redfish(Driver):
def get_ci(self,ci):
print("Get from redfish")
import redfish
print(ci.ip_mgmt + " - " + ci.login + " - " + ci.password)
#remote_mgmt = redfish.connect(ci.ip_mgmt, ci.login, ci.password, verify_cert=False)
remote_mgmt = redfish.connect(ci.ip_mgmt, ci.login, ci.password, simulator=True, enforceSSL=False)
ci.ci_type = remote_mgmt.Systems.systems_list[0].get_parameter("@odata.type")
ci.data = remote_mgmt.Systems.systems_list[0].get_parameters()
#print("Redfish API version : {} \n".format(remote_mgmt.get_api_version()))
return True
def set_ci(self, ci):
print "Push to Redfish"
return True
class Ironic(Driver):
pass
class Mondorescue(Driver):
pass
class Fakecmdb(Driver):
def set_ci(self, ci):
# Determine ci type so we can do the proper action.
pp = pprint.PrettyPrinter(indent=4)
if ci.ci_type == "Manager":
print("We are in Fakecmdb driver !")
pp.pprint(ci.data)
# Simply write a json file with ci.data content.
with open("Fakecmdb.json", "w") as jsonfile:
json.dump(ci.data, jsonfile, indent=4)
jsonfile.close()
#
#=======================================================================
class Fakeprovider(Driver):
def get_ci(self, ci):
# Simulate a driver that will provide Manager data.
# TODO a connect method must be implemented
# Assuming the connection is ok.
# Now create a copy of manager model from reference model.
#ci.ci_type = "Manager"
#ci.data = config.alexandria.model.get_model("Manager")
# Update the structure with data
# TODO : think to encapsulate to not edit ci.data directly.
# This could be also a way to check source of truth.
# If data provided by our driver is not the source of truth
# then discard it.
#ci.data["ManagerType"] = "BMC"
#ci.data["Model"] = "Néné Manager"
#ci.data["FirmwareVersion"] = "1.00"
#if ci.data is config.alexandria.model.Manager:
# print "identical"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(ci.ci_type)
class DriverCollection(list):
pass
|
warmerdam/plcompositor
|
oev_tools/ev_mosaic_viewer.py
|
Python
|
apache-2.0
| 9,675 | 0.007649 |
from gtk import *
import sys
import gview
import string
import gvutils
import GtkExtra
import time
import os
import gviewapp
import gvplot
import gdal
sys.path.append('.')
import ev_profile
from quality_hist_tool import QualityHistogramROITool
LYR_GENERIC = 0
LYR_LANDSAT8 = 1
LYR_SOURCE_TRACE = 2
LYR_QUALITY = 3
LYR_NOT_RASTER = 4
def layer_class(layer):
try:
dataset = layer.get_parent().get_dataset()
except:
return LYR_NOT_RASTER
if (dataset.GetRasterBand(1).DataType == gdal.GDT_UInt16 or dataset.GetRasterBand(1).DataType == gdal.GDT_Int16) and dataset.RasterCount == 4:
return LYR_LANDSAT8
if dataset.GetDescription().find('st-') != -1:
return LYR_SOURCE_TRACE
if dataset.RasterCount > 1 and dataset.GetRasterBand(1).DataType == gdal.GDT_Float32:
return LYR_QUALITY
return LYR_GENERIC
class MosaicViewerTool(gviewapp.Tool_GViewApp):
def __init__(self,app=None):
gviewapp.Tool_GViewApp.__init__(self,app)
self.init_menu()
self.hist_tool = QualityHistogramROITool(app)
self.graphing = False
def launch_dialog(self,*args):
self.win = MosaicDialog(app=gview.app, tool=self)
self.win.show()
self.win.rescale_landsat_cb()
self.win.gui_refresh()
self.track_view_activity()
def key_down_cb(self, viewarea, event):
try:
print 'down %s/%d' % (chr(event.keyval), event.keyval)
except:
print 'down <undefined>/%d' % event.keyval
if event.keyval == ord('g'):
if not self.graphing:
print 'enable graphing'
self.graphing = True
else:
print 'disable graphing'
self.graphing = False
def key_up_cb(self, viewarea, event):
try:
print 'up %s/%d' % (chr(event.keyval), event.keyval)
except:
print 'up <undefined>/%d' % event.keyval
def mouse_cb(self, viewarea, event):
#print 'mouse event:', event.type
if event.type == 4:
print event.type, event.button, event.state, event.x, event.y
if self.graphing and event.button == 1:
ev_profile.graph(viewarea.map_pointer((event.x, event.y)))
elif event.type == 3:
#print event.x, event.y
#print viewarea.map_pointer((event.x, event.y))
#if self.graphing:
# ev_profile.graph(viewarea.map_pointer((event.x, event.y)))
pass
def track_view_activity(self):
view = gview.app.view_manager.get_active_view_window()
view.viewarea.connect('key-press-event', self.key_down_cb)
view.viewarea.connect('key-release-event', self.key_up_cb)
view.viewarea.connect('motion-notify-event', self.mouse_cb)
view.viewarea.connect('button-press-event', self.mouse_cb)
def init_menu(self):
self.menu_entries.set_entry("Tools/Mosaic Viewer",2,
self.launch_dialog)
class MosaicDialog(GtkWindow):
def __init__(self,app=None, tool=None):
self.tool = tool
self.updating = False
GtkWindow.__init__(self)
self.quality_layer = None
self.set_title('Mosaic Viewer')
self.create_gui()
self.show()
self.gui_refresh()
def show(self):
GtkWindow.show_all(self)
def close(self, *args):
self.hide()
self.visibility_flag = 0
return TRUE
def set_quality_band_cb(self,*args):
if self.updating or self.quality_layer is None:
return
try:
scale_min = float(self.min_entry.get_text())
except:
scale_min = 0.0
try:
scale_max = float(self.max_entry.get_text())
except:
scale_max = 1.0;
dataset = self.quality_layer.get_parent().get_dataset()
new_select = None
new_text = self.band_combo.entry.get_text()
for i in range(len(self.quality_band_names)):
if new_text == self.quality_band_names[i]:
new_select = i+1
raster = gview.manager.get_dataset_raster( dataset, new_select)
for isrc in range(3):
self.quality_layer.set_source(isrc, raster, scale_min, scale_max)
self.tool.hist_tool.analyze_cb()
def quality_refresh(self):
assert self.quality_layer is not None
dataset = self.quality_layer.get_parent().get_dataset()
self.quality_band_names = []
for band_num in range(1,dataset.RasterCount+1):
self.quality_band_names.append(
dataset.GetRasterBand(band_num).GetMetadata()['DESCRIPTION'])
self.band_combo.set_popdown_strings( self.quality_band_names)
def gui_refresh(self):
if self.quality_layer is not None:
self.quality_refresh()
def adjustment_cb(self,adjustment,*args):
if self.updating or self.quality_layer is None:
return
value = adjustment.value
if adjustment == self.min_adjustment:
self.min_entry.set_text(str(value))
else:
self.max_entry.set_text(str(value))
self.set_quality_band_cb()
def entry_cb(self,entry,*args):
if self.updating:
return
self.set_quality_band_cb()
def find_tool(self, tool_name):
for (name, tool_inst) in gview.app.Tool_List:
if name == tool_name:
return tool_inst
return None
def create_gui(self):
vbox = GtkVBox(spacing=5)
vbox.set_border_width(10)
self.add(vbox)
# Add the Quality Band Selection Combo
hbox = GtkHBox(spacing=5)
vbox.pack_start(hbox,expand=FALSE)
hbox.pack_start(GtkLabel('Quality:'), expand=FALSE)
self.band_combo = GtkCombo()
hbox.pack_start(self.band_combo)
self.band_combo.entry.connect('changed', self.set_quality_band_cb)
self.band_combo.set_popdown_strings(
['XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'])
band_list = ['inactive']
self.band_combo.set_popdown_strings( band_list )
# ------ Quality Scale Min -------
hbox = GtkHBox(spacing=5)
vbox.pack_start(hbox)
hbox.pack_start(GtkLabel('Scale Min:'),expand=FALSE)
self.min_adj
|
ustment = GtkAdjustment(0.0, 0.0, 1.0, 0.05, 0.05, 0.05)
self.min_adjustment.connect('value-changed',self.adjustment_cb)
self.min_slider = GtkHScale(self.min_adjustment)
self.min_slider.set_digits(3)
hbox.pack_start(self.min_slider)
self.min_entry = GtkEntry(maxlen=8)
|
self.min_entry.connect('activate',self.entry_cb)
self.min_entry.connect('leave-notify-event',self.entry_cb)
self.min_entry.set_text('0.0')
hbox.pack_start(self.min_entry,expand=FALSE)
# ------ Quality Scale Max -------
hbox = GtkHBox(spacing=5)
vbox.pack_start(hbox)
hbox.pack_start(GtkLabel('Scale Max:'),expand=FALSE)
self.max_adjustment = GtkAdjustment(1.0, 0.0, 1.0, 0.05, 0.05, 0.05)
self.max_adjustment.connect('value-changed',self.adjustment_cb)
self.max_slider = GtkHScale(self.max_adjustment)
self.max_slider.set_digits(3)
hbox.pack_start(self.max_slider)
self.max_entry = GtkEntry(maxlen=8)
self.max_entry.connect('activate',self.entry_cb)
self.max_entry.connect('leave-notify-event',self.entry_cb)
self.max_entry.set_text('1.0')
hbox.pack_start(self.max_entry,expand=FALSE)
# Add the Rescale and Close action buttons.
box2 = GtkHBox(spacing=10)
vbox.add(box2)
box2.show()
execute_btn = GtkButton("Histogram")
execute_btn.connect("clicked", self.tool.hist_tool.roipoitool_cb)
box2.pack_start(execute_btn)
execute_btn = GtkButton("Rescale")
execute_btn.connect("clicked", self.rescale_landsat_cb)
box2.pack_start(execute_btn)
execute_btn = GtkButton("Reload")
execute_btn.connect("cli
|
ncos/lisa
|
src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/cachecontrol/compat.py
|
Python
|
mit
| 724 | 0 |
try:
from urllib.parse import urljoin
except ImportError:
fro
|
m urlparse import urljoin
try:
import cPickle as pickle
except ImportError:
import pickle
# Handle the case where the requests module has been patched to not have
# urllib3 bundled as
|
part of its source.
try:
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
except ImportError:
from pip._vendor.urllib3.response import HTTPResponse
try:
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
except ImportError:
from pip._vendor.urllib3.util import is_fp_closed
# Replicate some six behaviour
try:
text_type = unicode
except NameError:
text_type = str
|
jgresula/jagpdf
|
code/test/apitest/py/defaultfont2.py
|
Python
|
mit
| 952 | 0.003151 |
#!/usr/bin/env python
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import jagpdf
import jag.testlib as testlib
def test_main(argv=None):
doc = testlib.create_test_doc(argv, 'defaultfont2.pdf')
doc.page_start(200, 36)
doc.page().canvas().text(10, 10, 'written in the default font')
doc.page_end()
doc.page_start(200, 48)
canvas = doc.page().canvas()
canvas.sta
|
te_save()
courier = doc.font_load('standard;name=Courier;size=10')
canvas.text_fo
|
nt(courier)
canvas.text(10, 10, 'written in Courier')
canvas.state_restore()
doc.page().canvas().text(10, 30, 'written in the default font')
doc.page_end()
doc.page_start(200, 36)
doc.page().canvas().text(10, 10, 'written in the default font')
doc.page_end()
doc.finalize()
if __name__ == "__main__":
test_main()
|
minlexx/xnovacmd
|
ui/xnova/xn_page_cache.py
|
Python
|
gpl-2.0
| 4,643 | 0.002585 |
import os
import pathlib
import locale
import time
from . import xn_logger
logger = xn_logger.get(__name__, debug=False)
# Incapsulates downloaded pages storage
# keeps all downloaded files in ./cache
# the file first requested from this cache,
# and only if get() returns None, it will be
# downloaded over network
class XNovaPageCache:
def __init__(self):
self._pages = {}
self._mtimes = {}
self._page_cache_dir = './cache/page'
self._img_cache_dir = './cache/img'
self.save_load_encoding = locale.getpreferredencoding()
logger.info('Locale preferred encoding: {0}'.format(self.save_load_encoding))
# scan ./cache/page directory and load all files into memory
def load_from_disk_cache(self, clean=True):
if clean:
self._pages = {}
self._mtimes = {}
cache_dir = pathlib.Path(self._page_cache_dir)
if not cache_dir.exists():
try:
cache_dir.mkdir(parents=True)
logger.info('Created pages cache dir')
except OSError as ose:
logger.error('Cannot create page cache dir: {0}'.format(str(ose)))
num_loaded = 0
for subitem in cache_dir.iterdir():
if subitem.is_file():
try:
# get file last modification time
stt = subitem.stat()
mtime = int(stt.st_mtime)
with subitem.open(mode='rt', encoding=self.save_load_encoding) as f:
fname = subitem.name
contents = f.read()
self._pages[fname] = contents # save file contents
self._mtimes[fname] = mtime # save also modification time
num_loaded += 1
except IOError as ioe:
pass
except UnicodeDecodeError as ude:
logger.error(
|
'Encoding error in
|
[{0}], skipped: {1}'.format(subitem.name, str(ude)))
logger.info('Loaded {0} cached pages.'.format(num_loaded))
# ensure that image cache dir also exists
cache_dir = pathlib.Path(self._img_cache_dir)
if not cache_dir.exists():
try:
cache_dir.mkdir(parents=True)
logger.info('Created images cache dir')
except OSError as ose:
logger.error('Cannot create img cahe dir: {0}'.format(str(ose)))
# save page into cache
def set_page(self, page_name, contents):
if page_name is None:
return
self._pages[page_name] = contents
self._mtimes[page_name] = int(time.time()) # also update modified time!
try:
fn = os.path.join(self._page_cache_dir, page_name)
f = open(fn, mode='wt', encoding=self.save_load_encoding)
# f = open(fn, mode='wt')
f.write(contents)
f.close()
except IOError as ioe:
logger.error('set_page("{0}", ...): IOError: {1}'.format(page_name, str(ioe)))
except UnicodeEncodeError as uee:
logger.critical('set_page("{0}", ...): UnicodeEncodeError: {1}'.format(page_name, str(uee)))
logger.critical(' self.save_load_encoding is "{0}"'.format(self.save_load_encoding))
def save_image(self, img_path: str, img_bytes: bytes):
img_path_plain = img_path.replace('/', '_')
filename = os.path.join(self._img_cache_dir, img_path_plain)
try:
with open(filename, mode='wb') as f:
f.write(img_bytes)
except IOError as ioe:
logger.error('image [{0}] save failed: [{1}]'.format(filename, str(ioe)))
# get page from cache
# the file first requested from this cache,
# and only if get() returns None, it will be
# downloaded over network
def get_page(self, page_name, max_cache_secs=None):
if page_name is None:
return None
if len(page_name) < 1:
return None
if page_name in self._pages:
# should we check file cache time?
if max_cache_secs is None:
# do not check cache time, just return
return self._pages[page_name]
# get current time
tm_now = int(time.time())
tm_cache = self._mtimes[page_name]
tm_diff = tm_now - tm_cache
if tm_diff <= max_cache_secs:
return self._pages[page_name]
logger.info('cache considered invalid for [{0}]: {1}s > {2}s'.format(page_name, tm_diff, max_cache_secs))
return None
|
spectrumone/online-shop-template
|
myshop/cart/cart.py
|
Python
|
mit
| 3,017 | 0.001326 |
f
|
rom decimal import Decimal
from django.conf import settings
from coupons.models import Coupon
from shop.models import Product
class Cart(object):
def __init__(self, request):
"""
initialize the cart.
"""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart =
|
self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
#store current applied coupon
self.coupon_id = self.session.get('coupon_id')
def add(self, product, quantity=1, update_quantity=False):
"""
Add a product to the cart or update it quantity
"""
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0,
'price': str(product.price)}
if update_quantity:
print('quantity', quantity)
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
def save(self):
# Update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
#mark the session as "modified" to make sure it is saved
self.session.modified = True
def remove(self, product):
"""
Remove a product from the cart.
"""
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
"""
Iterate over the items in the cart and get the products
from the database.
"""
product_ids = self.cart.keys()
# get the product objects and add them to the cart
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
"""
Count all items in the cart.
"""
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in
self.cart.values())
def clear(self):
# remove cart from session
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
@property
def coupon(self):
if self.coupon_id:
return Coupon.objects.get(id=self.coupon_id)
return None
def get_discount(self):
if self.coupon:
return (self.coupon.discount / Decimal('100')) \
* self.get_total_price()
return Decimal('0')
def get_total_price_after_discount(self):
return self.get_total_price() - self.get_discount()
|
scott-maddox/openbandparams
|
src/openbandparams/examples/advanced/GaInAsSb_on_GaSb/Plot_Bandgap_vs_Lattice_Constant_of_Quaternary3.py
|
Python
|
agpl-3.0
| 3,726 | 0.001879 |
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of openbandparams.
#
# openbandparams is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# openbandparams is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with openbandparams. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local openbandparams version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..')))
from openbandparams import *
import matplotlib.pyplot as plt
import numpy
quaternary = GaInAsSb
T = 300
# initialize the plot
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Lattice Parameter at %g K ($\AA$)' % T)
plt.ylabel('Bandgap at %g K (eV)' % T)
# plot the binaries
xs = []
y_Gamma = []
y_X = []
y_L = []
labels = []
for b in quaternary.binaries:
xs.append(b.a(T=T))
y_Gamma.append(b.Eg_Gamma(T=T))
y_X.append(b.Eg_X(T=T))
y_L.append(b.Eg_L(T=T))
labels.append(b.name)
ax.plot(xs, y_Gamma, 'r.')
ax.plot(xs, y_X, 'b.')
ax.plot(xs, y_L, 'g.')
# label the binaries
for x, y, label in zip(xs, y_Gamma, labels):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
for x, y, label in zip(xs, y_X, labels):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=di
|
ct(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
for x, y, label in zip(xs, y_L, labels):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc=
|
'white', alpha=0.9),
textcoords='offset points')
# plot the quaternary
indices = numpy.arange(100)
fractions = numpy.linspace(0, 1, 100)
x = numpy.empty(100, dtype=numpy.float)
y_Gamma = numpy.empty(100, dtype=numpy.float)
y_X = numpy.empty(100, dtype=numpy.float)
y_L = numpy.empty(100, dtype=numpy.float)
first = True
for xfrac in numpy.linspace(0, 1, 10):
for i, yfrac in zip(indices, fractions):
instance = quaternary(x=xfrac, y=yfrac)
x[i] = instance.a(T=T)
y_Gamma[i] = instance.Eg_Gamma(T=T)
y_X[i] = instance.Eg_X(T=T)
y_L[i] = instance.Eg_L(T=T)
if first:
ax.plot(x, y_Gamma, 'r-', label='$\Gamma$')
ax.plot(x, y_X, 'b-', label='$X$')
ax.plot(x, y_L, 'g-', label='$L$')
first = False
else:
ax.plot(x, y_Gamma, 'r-')
ax.plot(x, y_X, 'b-')
ax.plot(x, y_L, 'g-')
for yfrac in numpy.linspace(0, 1, 10):
for i, xfrac in zip(indices, fractions):
instance = quaternary(x=xfrac, y=yfrac)
x[i] = instance.a(T=T)
y_Gamma[i] = instance.Eg_Gamma(T=T)
y_X[i] = instance.Eg_X(T=T)
y_L[i] = instance.Eg_L(T=T)
ax.plot(x, y_Gamma, 'r--')
ax.plot(x, y_X, 'b--')
ax.plot(x, y_L, 'g--')
plt.xlim(6, 6.5)
plt.ylim(0, 0.8)
plt.legend(loc='best')
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
output_filename = sys.argv[1]
plt.savefig(output_filename)
else:
plt.show()
|
shankari/e-mission-server
|
emission/incomplete_tests/TestCarbon.py
|
Python
|
bsd-3-clause
| 13,159 | 0.012387 |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
from past.utils import old_div
import unittest
import json
import logging
import re
from datetime import datetime, timedelta
# Our imports
from emission.analysis.result import carbon
import emission.core.get_database as edb
from emission.core.get_database import get_mode_db, get_section_db
import emission.tests.common as etc
from emission.core import common
class TestCarbon(unittest.TestCase):
def setUp(self):
from copy import copy
self.testUsers = ["test@example.com", "best@example.com", "fest@example.com",
"rest@example.com", "nest@example.com"]
self.serverName = 'localhost'
# Sometimes, we may have entries left behind in the database if one of the tests failed
# or threw an exception, so let us start by cleaning up all entries
etc.dropAllCollections(edb._get_current_db())
self.ModesColl = get_mode_db()
self.assertEquals(self.ModesColl.estimated_document_count(), 0)
etc.loadTable(self.serverName, "Stage_Modes", "emission/tests/data/modes.json")
etc.loadTable(self.serverName, "Stage_Sections", "emission/tests/data/testCarbonFile")
self.SectionsColl = get_section_db()
self.walkExpect = 1057.2524056424411
se
|
lf.busExpect = 2162.668467546699
self.busCarbon = old_div(267.0,1609)
self.airCarbon = old_div(217.0,1609)
self.driveCarbon = ol
|
d_div(278.0,1609)
self.busOptimalCarbon = old_div(92.0,1609)
self.now = datetime.now()
self.dayago = self.now - timedelta(days=1)
self.weekago = self.now - timedelta(weeks = 1)
for section in self.SectionsColl.find():
section['section_start_datetime'] = self.dayago
section['section_end_datetime'] = self.dayago + timedelta(hours = 1)
if section['confirmed_mode'] == 5:
airSection = copy(section)
airSection['confirmed_mode'] = 9
airSection['_id'] = section['_id'] + "_air"
self.SectionsColl.insert(airSection)
# print("Section start = %s, section end = %s" %
# (section['section_start_datetime'], section['section_end_datetime']))
self.SectionsColl.save(section)
def tearDown(self):
for testUser in self.testUsers:
etc.purgeSectionData(self.SectionsColl, testUser)
self.ModesColl.remove()
self.assertEquals(self.ModesColl.estimated_document_count(), 0)
def getMyQuerySpec(self, user, modeId):
return common.getQuerySpec(user, modeId, self.weekago, self.now)
def testGetModes(self):
modes = carbon.getAllModes()
for mode in modes:
print(mode['mode_id'], mode['mode_name'])
self.assertEquals(len(modes), 9)
def testGetDisplayModes(self):
modes = carbon.getDisplayModes()
for mode in modes:
print(mode['mode_id'], mode['mode_name'])
# skipping transport, underground and not a trip
self.assertEquals(len(modes), 8)
def testGetTripCountForMode(self):
modes = carbon.getDisplayModes()
# try different modes
self.assertEqual(carbon.getTripCountForMode("test@example.com", 1, self.weekago, self.now), 1) # walk
self.assertEqual(carbon.getTripCountForMode("test@example.com", 5, self.weekago, self.now), 1) # bus
self.assertEqual(carbon.getTripCountForMode("test@example.com", 9, self.weekago, self.now), 1) # bus
# try different users
self.assertEqual(carbon.getTripCountForMode("best@example.com", 1, self.weekago, self.now), 1) # walk
self.assertEqual(carbon.getTripCountForMode("rest@example.com", 5, self.weekago, self.now), 1) # bus
# try to sum across users
# We have 5 users - best, fest, rest, nest and test
self.assertEqual(carbon.getTripCountForMode(None, 1, self.weekago, self.now), 5) # walk
self.assertEqual(carbon.getTripCountForMode(None, 5, self.weekago, self.now), 5) # bus
def testTotalModeShare(self):
modeshare = carbon.getModeShare(None, self.weekago, self.now)
self.assertEqual(modeshare['walking'], 5)
self.assertEqual(modeshare['bus'], 5)
self.assertEqual(modeshare['cycling'], 0)
self.assertEqual(modeshare['car'], 0)
self.assertEqual(modeshare['train'], 0)
# self.assertFalse(modeshare.keys() contains 'not a trip')
# self.assertFalse(modeshare.keys() contains 'transport')
def testMyModeShare(self):
modeshare = carbon.getModeShare('fest@example.com', self.weekago, self.now)
print(modeshare)
self.assertEqual(modeshare['walking'], 1)
self.assertEqual(modeshare['bus'], 1)
self.assertEqual(modeshare['cycling'], 0)
self.assertEqual(modeshare['car'], 0)
self.assertEqual(modeshare['train'], 0)
# self.assertFalse(modeshare.keys() contains 'not a trip')
# self.assertFalse(modeshare.keys() contains 'transport')
def testDistanceForMode(self):
# try different modes
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("test@example.com", 1)),
self.walkExpect) # walk
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("test@example.com", 5)),
self.busExpect) # bus
# try different users
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("best@example.com", 1)), self.walkExpect) # walk
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec("rest@example.com", 5)), self.busExpect) # bus
# try to sum across users
# We have 5 users - best, fest, rest, nest and test
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec(None, 1)), len(self.testUsers) * self.walkExpect) # walk
self.assertEqual(carbon.getDistanceForMode(self.getMyQuerySpec(None, 5)), len(self.testUsers) * self.busExpect) # bus
def testMyModeDistance(self):
myModeDistance = carbon.getModeShareDistance('fest@example.com', self.weekago, self.now)
self.assertEqual(myModeDistance['walking'], self.walkExpect)
self.assertEqual(myModeDistance['cycling'], 0)
self.assertEqual(myModeDistance['bus'], self.busExpect)
self.assertEqual(myModeDistance['train'], 0)
def testTotalModeDistance(self):
totalModeDistance = carbon.getModeShareDistance(None, self.weekago, self.now)
self.assertEqual(totalModeDistance['walking'], len(self.testUsers) * self.walkExpect)
self.assertEqual(totalModeDistance['cycling'], 0)
self.assertEqual(totalModeDistance['bus'], len(self.testUsers) * self.busExpect)
self.assertEqual(totalModeDistance['train'], 0)
def testMyCarbonFootprint(self):
myModeDistance = carbon.getModeCarbonFootprint('fest@example.com', carbon.carbonFootprintForMode, self.weekago, self.now)
self.assertEqual(myModeDistance['walking'], 0)
self.assertEqual(myModeDistance['cycling'], 0)
self.assertEqual(myModeDistance['bus_short'], (self.busCarbon * self.busExpect/1000))
self.assertEqual(myModeDistance['train_short'], 0)
# We duplicate the bus trips to get air trips, so the distance should be the same
self.assertEqual(myModeDistance['air_short'], (self.airCarbon * self.busExpect/1000))
def testTotalCarbonFootprint(self):
totalModeDistance = carbon.getModeCarbonFootprint(None, carbon.carbonFootprintForMode, self.weekago, self.now)
self.assertEqual(totalModeDistance['walking'], 0)
self.assertEqual(totalModeDistance['cycling'], 0)
# We divide by 1000 to make it comprehensible in getModeCarbonFootprint
self.assertEqual(totalModeDistance['bus_short'], old_div((self.busCarbon * len(self.testUsers) * self.busExpect),1000))
self.assertEqual(totalModeDistance['air_short'], old_div((self.airCarbon * len(self.testUsers) * self.busExpect),1000))
self.assertEqual(totalModeDistance['train_short'], 0)
def testMySummary(self):
(myModeShareCount, avgModeShareCount,
myModeShareDistance, avgModeShareDistance,
myModeCarbonFootprint, avgModeCarbonFootprint,
myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized,
myOptimalCarbonFootprint, avgOptimalCarbonFootprint,
myOptimalCarbonFootprintNoLongMotorized
|
praetorian-inc/pentestly
|
modules/reporting/list.py
|
Python
|
gpl-3.0
| 1,569 | 0.003824 |
from recon.core.module import BaseModule
import codecs
import os
class Module(BaseModule):
meta = {
'name': 'List Creator',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Creates a file containing a list of records from the database.',
'options': (
('table', 'hosts', True, 'source table of data for the list'),
('column', 'ip_address', True, 'source column of data for the list'),
('unique', True, True, 'only return unique items from the dataset'),
('nulls', False, True, 'include nulls in the dataset'),
('filename', os.path.join(BaseModule.workspace, 'list.txt'), True, 'path and filename for output'),
),
}
def module_run(self):
filename = self.options['filename']
with codecs.open(filename, 'wb', encoding='utf-8') as outfile:
# handle the source of information for the report
column = self.options['column']
table = self.options['table']
nulls = ' WHERE "%s" IS NOT
|
NULL' % (column) if not self.options['nulls'] else ''
unique = 'DISTINCT ' if self.options['unique'] else ''
values = (unique, column, table, nulls)
query = 'SELECT %s"%s" FROM "%s"%s ORDER BY 1' % values
rows = self.query(query)
for row in [x[0] for x in rows]:
row = row if row else ''
outfile.write('%s\n' % (row))
|
print(row)
self.output('%d items added to \'%s\'.' % (len(rows), filename))
|
mfittere/SixDeskDB
|
sixdeskdb/davsturns.py
|
Python
|
lgpl-2.1
| 28,687 | 0.049395 |
# da vs turns module
import numpy as np
from scipy import optimize
import matplotlib.pyplot as pl
import glob, sys, os, time
from deskdb import SixDeskDB,tune_dir,mk_dir
import matplotlib
# ------------- basic functions -----------
def get_divisors(n):
"""finds the divisors of an integer number"""
large_divisors = []
for i in xrange(1, int(np.sqrt(n) + 1)):
if n % i is 0:
yield i
if i is not n / i:
large_divisors.insert(0, n / i)
for divisor in large_divisors:
yield divisor
def linear_fit(datx,daty,daterr):
'''Linear model fit with f(x)=p0+p1*x
(datx,daty): data, daterr: measurement error
return values (res,p0,p0err,p1,p1err):
- res: sum of residuals^2 normalized with the measurment error
- p0,p1: fit paramaeters
- p0err, p1err: error of fit parameters'''
fitfunc = lambda p,x: p[0]+p[1]*x#p[0]=Dinf, p[1]=b0
errfunc = lambda p,x,y,err: (y-fitfunc(p,x))/err
pinit = [0.1, 0.1]
#minimize
outfit=optimize.leastsq(errfunc, pinit,args=(datx,daty,daterr),full_output=1)
(p0,p1)=outfit[0]#(p[0],p[1])
var =outfit[1]#variance matrix
p0err =np.sqrt(var[0,0])#err p[0]
p1err =np.sqrt(var[1,1])#err p[1]
# res=sum((daty-fitfunc((p0,p1),datx))**2)/len(datx-2) #not weighted with error
res=sum((errfunc((p0,p1),datx,daty,daterr))**2)/len(datx)#weighted with error
return (res,p0,p0err,p1,p1err)
# ----------- functions necessary for the analysis -----------
#@profile
def get_min_turn_ang(s,t,a,it):
"""returns array with (angle,minimum sigma,sturn) of particles with lost turn number < it.
check if there is a particle with angle ang with lost turn number <it
if true: lost turn number and amplitude of the last stable particle is saved = particle "before" the particle with the smallest amplitude with nturns<it
if false: the smallest lost turn number and the largest amplitude is saved
"""
# s,t,a are ordered by angle,amplitude
angles,sigmas=t.shape# angles = number of angles, sigmas = number of amplitudes
ftype=[('angle',float),('sigma',float),('sturn',float)]
mta=np.zeros(angles,dtype=ftype)
# enumerate(a[:,0]) returns (0, a[0]), (1, a[1]), (2, a[2]), ... = iang, ang where iang = index of the array (0,1,2,...) for ang = angle (e.g. [1.5, ... , 1.5] , [3.0, ... ,3.0])
for iang,ang in enumerate(a[:,0]):
tang = t[iang]
sang = s[iang]
iturn = tang<it # select lost turn number < it
if(any(tang[iturn])):
sangit=sang[iturn].min()
argminit=sang.searchsorted(sangit) # get index of smallest amplitude with sturn<it - amplitudes are ordered ascending
mta[iang]=(ang,sang[argminit-1],tang[argminit-1])#last stable amplitude -> index argminit-1
else:
mta[iang]=(ang,sang.max(),tang.min())
return mta
def select_ang_surv(data,seed,nang):
"""returns data reduced to ((angmax+1)/nang)-1 angles -> nang being the divisor of angmax"""
angmax=len(data['angle'][:,0])#number of angles
print nang
if((nang not in list(get_divisors(angmax+1))) or ((angmax+1)/nang-1<3)):
print('%s is not a divisor of %s or two large (((angmax+1)/nang)-1<3)')%(nang,angmax+1)
sys.exit(0)
#define variables for only selection of angles
s,a,t=data['sigma'][nang::nang+1],data['angle'][nang::nang+1],data['sturn'][nang::nang+1]
ftype=[('angle',float),('sigma',float),('sturn',float)]
dataang=np.ndarray(np.shape(a),dtype=ftype)
dataang['sigma'],dataang['angle'],dataang['sturn']=s,a,t
return dataang
#@profile
def mk_da_vst(data,seed,tune,turnsl,turnstep):
"""returns 'seed','tunex','tuney','dawtrap','dastrap','dawsimp','dassimp',
'dawtraperr','dastraperr','dastraperrep','dastraperrepang',
'dastraperrepamp','dawsimperr','dassimperr','nturn','tlossmin',
'mtime'
the da is in steps of turnstep
das: integral over radius
das = 2/pi*int_0^(2pi)[r(theta)]dtheta=<r(theta)>
= 2/pi*dtheta*sum(a_i*r(theta_i))
daw: integral over phase space
daw = (int_0^(2pi)[(r(theta))^4*sin(2*theta)]dtheta)^1/4
= (dtheta*sum(a_i*r(theta_i)^4*sin(2*theta_i)))^1/4
trapezoidal rule (trap): a_i=(3/2,1, ... ,1,3/2)
simpson rule (simp): a_i=(55/24.,-1/6.,11/8.,1, ... 1,11/8.,-1/6.,55/24.)
numerical recipes open formulas 4.1.15 and 4.1.18
"""
mtime=time.time()
(tunex,tuney)=tune
s,a,t=data['sigma'],data['angle'],data['sturn']
tmax=np.max(t[s>0])#maximum number of turns
#set the 0 in t to tmax*100 in order to check if turnnumber<it (any(tang[tang<it])<it in get_min_turn_ang)
t[s==0]=tmax*100
angmax=len(a[:,0])#number of angles
angstep=np.pi/(2*(a
|
ngmax+1))#step in angle in rad
ampstep=np.abs((s[s>0][1])-(s[s>0][0]))
ftype=[('seed',int),('tunex',float),('tuney',float),('turn_max',int),('dawtrap',float),('dastrap',float),('dawsimp',float),('dassimp',float),('dawtraperr',float),('dastraperr',float),('dastraperrep',float),('dastraperrepang',float),('das
|
traperrepamp',float),('dawsimperr',float),('dassimperr',float),('nturn',float),('tlossmin',float),('mtime',float)]
l_turnstep=len(np.arange(turnstep,tmax,turnstep))
daout=np.ndarray(l_turnstep,dtype=ftype)
for nm in daout.dtype.names:
daout[nm]=np.zeros(l_turnstep)
dacount=0
currentdawtrap=0
currenttlossmin=0
#define integration coefficients at beginning and end which are unequal to 1
ajtrap_s=np.array([3/2.])#Simpson rule
ajtrap_e=np.array([3/2.])
ajsimp_s=np.array([55/24.,-1/6.,11/8.])#Simpson rule
ajsimp_e=np.array([11/8.,-1/6.,55/24.])
warnsimp=True
for it in np.arange(turnstep,tmax,turnstep):
mta=get_min_turn_ang(s,t,a,it)
mta_angle=mta['angle']*np.pi/180#convert to rad
l_mta_angle=len(mta_angle)
mta_sigma=mta['sigma']
if(l_mta_angle>2):
# define coefficients for simpson rule (simp)
# ajtrap = [3/2.,1,....1,3/2.]
ajtrap=np.concatenate((ajtrap_s,np.ones(l_mta_angle-2),ajtrap_e))
else:
print('WARNING! mk_da_vst - You need at least 3 angles to calculate the da vs turns! Aborting!!!')
sys.exit(0)
if(l_mta_angle>6):
# define coefficients for simpson rule (simp)
# ajsimp = [55/24.,-1/6.,11/8.,1,....1,11/8.,-1/6.,55/24. ]
ajsimp=np.concatenate((ajsimp_s,np.ones(l_mta_angle-6),ajsimp_e))
calcsimp=True
else:
if(warnsimp):
print('WARNING! mk_da_vst - You need at least 7 angles to calculate the da vs turns with the simpson rule! da*simp* will be set to 0.')
warnsimp=False
calcsimp=False
# ---- trapezoidal rule (trap)
# integral
dawtrapint = ((ajtrap*(mta_sigma**4*np.sin(2*mta_angle))).sum())*angstep
dawtrap = (dawtrapint)**(1/4.)
dastrap = (2./np.pi)*(ajtrap*(mta_sigma)).sum()*angstep
# error
dawtraperrint = np.abs(((ajtrap*(2*(mta_sigma**3)*np.sin(2*mta_angle))).sum())*angstep*ampstep)
dawtraperr = np.abs(1/4.*dawtrapint**(-3/4.))*dawtraperrint
dastraperr = ampstep/2
dastraperrepang = ((np.abs(np.diff(mta_sigma))).sum())/(2*(angmax+1))
dastraperrepamp = ampstep/2
dastraperrep = np.sqrt(dastraperrepang**2+dastraperrepamp**2)
# ---- simpson rule (simp)
if(calcsimp):
# int
dawsimpint = (ajsimp*((mta_sigma**4)*np.sin(2*mta_angle))).sum()*angstep
dawsimp = (dawsimpint)**(1/4.)
dassimpint = (ajsimp*mta_sigma).sum()*angstep
dassimp = (2./np.pi)*dassimpint
# error
dawsimperrint = (ajsimp*(2*(mta_sigma**3)*np.sin(2*mta_angle))).sum()*angstep*ampstep
dawsimperr = np.abs(1/4.*dawsimpint**(-3/4.))*dawsimperrint
dassimperr = ampstep/2#simplified
else:
(dawsimp,dassimp,dawsimperr,dassimperr)=np.zeros(4)
tlossmin=np.min(mta['sturn'])
if(dawtrap!=currentdawtrap and it-turnstep >= 0 and tlossmin!=currenttlossmin):
daout[dacount]=(seed,tunex,tuney,turnsl,dawtrap,dastrap,dawsimp,dassimp,dawtraperr,dastraperr,dastraperrep,dastraperrepang,dastraperrepamp,dawsimperr,dassimperr,it-turnstep,tlossmin,mtime)
dacount=dacount+1
currentdawtrap =dawtrap
currenttlossmin=tlossmin
return daout[daout['dawtrap']>0]#delete 0 from errors
# ----------- function
|
karlin13/LzCoinJupJup
|
JupJup/Present/__init__.py
|
Python
|
mit
| 37 | 0 |
fr
|
om JupJup.Present import col
|
lector
|
stesie/PyPC-NC
|
Control/MainWindow.py
|
Python
|
gpl-3.0
| 11,785 | 0.02919 |
from PySide import QtGui, QtCore
import os, struct, time
class ControlMainWindow(QtGui.QMainWindow):
_storeButtonUsed = False
_gv = None
_parser = None
_inter = None
_workpiecePos = [ 5, 5, 5 ]
_originOffset = [ 0, 0 ]
_polarCorrection = [ 1, 0 ]
_debounce = None
def __init__(self, chatBackend):
super(ControlMainWindow, self).__init__(None)
self._machine = MachineController(chatBackend);
self._machine.machineStatus().statusUpdated.connect(self.statusUpdated)
self._ui = Ui_MainWindow()
self._ui.setupUi(self)
self._ui.stop.clicked.connect(self._machine.stop)
self._ui.refMovement.clicked.connect(self.refMovement)
self._ui.importGCode.clicked.connect(self.importGCode)
self._ui.run.clicked.connect(self.run)
self._ui.resume.clicked.connect(self.resume)
self._ui.showGraphicsView.clicked.connect(self.showGraphicsView)
self._ui.gotoOther.setMenu(self._ui.menuGoto)
self._ui.storeOther.setMenu(self._ui.menuStore)
self._ui.menuBar.hide()
self._ui.storeXY.triggered.connect(self.storeXY)
self._ui.storeXYZ.triggered.connect(self.storeXYZ)
self._ui.storeX.triggered.connect(self.storeX)
self._ui.storeY.triggered.connect(self.storeY)
self._ui.storeZ.triggered.connect(self.storeZ)
self._ui.gotoXY.triggered.connect(self.gotoWorkpieceXY)
self._ui.gotoXYZ.triggered.connect(self.gotoWorkpieceXYZ)
self._ui.gotoX.triggered.connect(self.gotoWorkpieceX)
self._ui.gotoY.triggered.connect(self.gotoWorkpieceY)
self._ui.gotoZ.triggered.connect(self.gotoWorkpieceZ)
self._ui.driveXUp.clicked.connect(self.driveXUp)
self._ui.driveYUp.clicked.connect(self.driveYUp)
self._ui.driveZUp.clicked.connect(self.driveZUp)
self._ui.driveUUp.clicked.connect(self.driveUUp)
self._ui.driveXDown.clicked.connect(self.driveXDown)
self._ui.driveYDown.clicked.connect(self.driveYDown)
self._ui.driveZDown.clicked.connect(self.driveZDown)
self._ui.driveUDown.clicked.connect(self.driveUDown)
self._ui.feedRateOverride.valueChanged.connect(self.feedRateOverrideChanged)
self._machine.machineStatus().updateStatus()
@QtCore.Slot()
def refMovement(self):
# @fixme assert machine is not moving
self._machine.setAction(ReferenceMotionController(self._machine))
@QtCore.Slot()
def showGraphicsView(self):
if self._parser == None:
QtGui.QMessageBox.information(
self, 'PyPC-NC Graphics View',
'You need to import G-Code before visualizing it.')
return
if self._gv == None:
self._gv = ControlGraphicsView(self, self._machine)
self._gv.render(self._parser)
self._gv.show()
self._gv.closed.connect(self.graphicsViewClosed)
@QtCore.Slot()
def graphicsViewClosed(self):
self._gv = None
@QtCore.Slot()
def statusUpdated(self):
infos = []
if self._machine.machineStatus().status() & 0x10: infos.append('moving')
if self._machine.machineStatus().status() & 0x04: infos.append("ref'd")
if self._machine.machineStatus().status() & 0x08: infos.append("ref'ing")
status = hex(self._machine.machineStatus().status())
if infos:
status += ' (' + ', '.join(infos) + ')'
self._ui.statusX.setText(status)
self._ui.statusPx.setText("%.3f" % (self._machine.machineStatus().x() / 1000))
self._ui.statusPy.setText("%.3f" % (self._machine.machineStatus().y() / 1000))
self._ui.statusPz.setText("%.3f" % (self._machine.machineStatus().z() / 1000))
self._ui.statusPu.setText("%.3f" % (self._machine.machineStatus().u() / 1000))
self._ui.relX.setText("%.3f" % ((self._workpiecePos[0] - self._machine.machineStatus().x()) / 1000))
self._ui.relY.setText("%.3f" % ((self._workpiecePos[1] - self._machine.machineStatus().y()) / 1000))
self._ui.relZ.setText("%.3f" % ((self._workpiecePos[2] - self._machine.machineStatus().z()) / 1000))
if isinstance(self._machine.action(), ProgrammedMotionController):
self._ui.progress.setMaximum(self._machine.action().totalSteps())
self._ui.progress.setValue(self._machine.action().completedSteps())
elif self._inter and self._inter.pause:
if self._ui.progress.maximum():
QtGui.QMessageBox.information(
self, 'Tool Change',
'Insert tool %d now.' % self._inter.nextTool)
self._ui.progress.setMaximum(0)
else:
|
self._ui.progress.setMaximum(1)
self._ui.progress.setValue(0)
@QtCore.Slot()
def importGCode(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Import G-Code', '.')
if filename[0] == '': return
self.importGCodeFromFile(filename[0])
def importGCodeFromFile(self, filename):
parser = GCode.GCodeParser()
parser.readFile(filename)
parser.removeTapeMarkers()
parser.r
|
emoveComments()
parser.removeInlineComments()
parser.removeBlockSkipLines()
parser.normalizeAddressWhitespace()
parser.normalizeLeadingZeros()
parser.readSequenceNumbers()
self._parser = parser
@QtCore.Slot()
def run(self):
if not self._machine.machineStatus().status() & 0x04:
reply = QtGui.QMessageBox.question(self, 'G-Code Import',
'Are you sure to import G-Code without reference movement?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return
if not self._storeButtonUsed:
reply = QtGui.QMessageBox.question(self, 'G-Code Import',
'Are you sure to import G-Code without setting workpiece location?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return
filters = [
Filters.OffsetFilter([ -self._originOffset[0], -self._originOffset[1] ]),
Filters.PolarFixer(self._polarCorrection[0], self._polarCorrection[1]),
Filters.OffsetFilter(self._workpiecePos)
]
fc = Filters.FilterChain(filters, CNCCon.CNCConWriter())
self._inter = GCode.GCodeInterpreter(fc)
self._inter.position = [
self._machine.machineStatus().x() - self._workpiecePos[0] + self._originOffset[0],
self._machine.machineStatus().y() - self._workpiecePos[1] + self._originOffset[1],
self._machine.machineStatus().z() - self._workpiecePos[2]
]
self._inter.invertZ = self._ui.invertZ.isChecked()
self._inter.run(self._parser)
self._machine.setAction(ProgrammedMotionController(self._machine))
self._machine.action().setFeedRateOverride(self._ui.feedRateOverride.value())
self._machine.action().setCommands(self._inter.target.buffer)
@QtCore.Slot()
def resume(self):
if not self._inter:
QtGui.QMessageBox.information(
self, 'PyPC-NC',
'Interpreter not initialized. You need to open & "Run" first.')
return
if not self._inter.pause:
QtGui.QMessageBox.information(
self, 'PyPC-NC',
'Interpreter not currently paused. You may want to start over by clicking "Run".')
return
self._inter.target.buffer = [ ]
self._inter.position = [
self._machine.machineStatus().x() - self._workpiecePos[0] + self._originOffset[0],
self._machine.machineStatus().y() - self._workpiecePos[1] + self._originOffset[1],
self._machine.machineStatus().z() - self._workpiecePos[2]
]
self._inter.target.filters()[2].setOffsets(self._workpiecePos)
self._inter.resume(self._parser)
self._machine.setAction(ProgrammedMotionController(self._machine))
self._machine.action().setFeedRateOverride(self._ui.feedRateOverride.value())
self._machine.action().setCommands(self._inter.target.buffer)
@QtCore.Slot(int)
def feedRateOverrideChanged(self, value):
if isinstance(self._machine.action(), ProgrammedMotionController):
self._machine.action().setFeedRateOverride(self._ui.feedRateOverride.value())
@QtCore.Slot()
def storeXY(self):
self.storeX()
self.storeY()
@QtCore.Slot()
def storeXYZ(self):
self.storeXY()
self.storeZ()
@QtCore.Slot()
def storeX(self):
self._storeButtonUsed = True
self._workpiecePos[0] = self._machine.machineStatus().x()
@QtCore.Slot()
def storeY(self):
self._storeButtonUsed = True
self._workpiecePos[1] = self._machine.machineStatus().y()
@QtCore.Slot()
def storeZ(self):
self._storeButtonUsed = True
self._workpiecePos[2] = self._machine.machineStatus().z()
def gotoWorkpiece(self, x, y, z):
if isinstance(self._machine.action(), ProgrammedMotionContro
|
mejedi/tarantool
|
test/replication-py/cluster.test.py
|
Python
|
bsd-2-clause
| 13,538 | 0.001847 |
import os
import sys
import re
import yaml
import uuid
import glob
from lib.tarantool_server import TarantoolServer
## Get cluster uuid
cluster_uuid = ''
try:
cluster_uuid = yaml.load(server.admin("box.space._schema:get('cluster')",
silent = True))[0][1]
uuid.UUID('{' + cluster_uuid + '}')
print 'ok - cluster uuid'
except Exception as e:
print 'not ok - invalid cluster uuid', e
server.iproto.reconnect() # re-connect with new permissions
print '-------------------------------------------------------------'
print ' gh-696: Check global READ permissions for replication'
print '-------------------------------------------------------------'
# Generate replica cluster UUID
replica_uuid = str(uuid.uuid4())
## Universal read permission is required to perform JOIN/SUBSCRIBE
rows = list(server.iproto.py_con.join(replica_uuid))
print len(rows) == 1 and rows[0].return_message.find('Read access') >= 0 and \
'ok' or 'not ok', '-', 'join without read permissions on universe'
rows = list(server.iproto.py_con.subscribe(cluster_uuid, replica_uuid))
print len(rows) == 1 and rows[0].return_message.find('Read access') >= 0 and \
'ok' or 'not ok', '-', 'subscribe without read permissions on universe'
## Write permission to space `_cluster` is required to perform JOIN
server.admin("box.schema.user.grant('guest', 'read', 'universe')")
server.iproto.reconnect() # re-connect with new permissions
rows = list(server.iproto.py_con.join(replica_uuid))
print len(rows) == 1 and rows[0].return_message.find('Write access') >= 0 and \
'ok' or 'not ok', '-', 'join without write permissions to _cluster'
def check_join(msg):
ok = True
for resp in server.iproto.py_con.join(replica_uuid):
if resp.completion_status != 0:
print 'not ok', '-', msg, resp.return_message
ok = False
server.iproto.reconnect() # the only way to stop JOIN
if not ok:
return
tuples = server.iproto.py_con.space('_cluster').select(replica_uuid, index = 1)
if len(tuples) == 0:
print 'not ok', '-', msg, 'missing entry in _cluster'
return
server_id = tuples[0][0]
print 'ok', '-', msg
return server_id
## JOIN with permissions
server.admin("box.schema.user.grant('guest', 'write', 'space', '_cluster')")
server.iproto.reconnect() # re-connect with new permissions
server_id = check_join('join with granted permissions')
server.iproto.py_con.space('_cluster').delete(server_id)
# JOIN with granted role
server.admin("box.schema.user.revoke('guest', 'read', 'universe')")
server.admin("box.schema.user.revoke('guest', 'write', 'space', '_cluster')")
server.admin("box.schema.user.grant('guest', 'replication')")
server.iproto.reconnect() # re-connect with new permissions
server_id = check_join('join with granted role')
server.iproto.py_con.space('_cluster').delete(server_id)
print '-------------------------------------------------------------'
print 'gh-707: Master crashes on JOIN if it does not have snapshot files'
print 'gh-480: If socket is closed while JOIN, replica wont reconnect'
print '-------------------------------------------------------------'
data_dir = os.path.join(server.vardir, server.name)
for k in glob.glob(os.path.join(data_dir, '*.snap')):
os.unlink(k)
# remember the number of servers in _cluster table
server_count = len(server.iproto.py_con.space('_cluster').select(()))
rows = list(server.iproto.py_con.join(replica_uuid))
print len(rows) > 0 and rows[-1].return_message.find('.snap') >= 0 and \
'ok' or 'not ok', '-', 'join without snapshots'
res = server.iproto.py_con.space('_cluster').select(())
if server_count <= len(res):
print 'ok - _cluster did not change after unsuccessful JOIN'
else:
print 'not ok - _cluster did change after unsuccessful JOIN'
print res
server.admin("box.schema.user.revoke('guest', 'replication')")
server.admin('box.snapshot()')
print '-------------------------------------------------------------'
print 'gh-434: Assertion if replace _cluster tuple for local server'
print '-------------------------------------------------------------'
master_uuid = server.get_param('server')['uuid']
sys.stdout.push_filter(master_uuid, '<master uuid>')
# Invalid UUID
server.admin("box.space._cluster:replace{1, require('uuid').NULL:str()}")
# Update of UUID is not OK
server.admin("box.space._cluster:replace{1, require('uuid').str()}")
# Update of tail is OK
server.admin("box.space._cluster:update(1, {{'=', 3, 'test'}})")
print '-------------------------------------------------------------'
print 'gh-1140: Ass
|
ertion if replace _cluster tuple for remote server'
print '-----------------------
|
--------------------------------------'
# Test that insert is OK
new_uuid = '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95'
server.admin("box.space._cluster:insert{{5, '{0}'}}".format(new_uuid))
server.admin("box.info.vclock[5] == nil")
# Replace with the same UUID is OK
server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid))
# Replace with a new UUID is not OK
new_uuid = 'a48a19a3-26c0-4f8c-a5b5-77377bab389b'
server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid))
# Update of tail is OK
server.admin("box.space._cluster:update(5, {{'=', 3, 'test'}})")
# Delete is OK
server.admin("box.space._cluster:delete(5)")
# gh-1219: LSN must not be removed from vclock on unregister
server.admin("box.info.vclock[5] == nil")
# Cleanup
server.stop()
server.deploy()
print '-------------------------------------------------------------'
print 'Start a new replica and check box.info on the start'
print '-------------------------------------------------------------'
# master server
master = server
master_id = master.get_param('server')['id']
master.admin("box.schema.user.grant('guest', 'replication')")
replica = TarantoolServer(server.ini)
replica.script = 'replication-py/replica.lua'
replica.vardir = server.vardir
replica.rpl_master = master
replica.deploy()
replica_id = replica.get_param('server')['id']
replica_uuid = replica.get_param('server')['uuid']
sys.stdout.push_filter(replica_uuid, '<replica uuid>')
replica.admin('box.info.server.id == %d' % replica_id)
replica.admin('not box.info.server.ro')
replica.admin('box.info.server.lsn == 0')
replica.admin('box.info.vclock[%d] == nil' % replica_id)
print '-------------------------------------------------------------'
print 'Modify data to change LSN and check box.info'
print '-------------------------------------------------------------'
replica.admin('box.space._schema:insert{"test", 48}')
replica.admin('box.info.server.lsn == 1')
replica.admin('box.info.vclock[%d] == 1' % replica_id)
print '-------------------------------------------------------------'
print 'Unregister replica and check box.info'
print '-------------------------------------------------------------'
# gh-527: update vclock on delete from box.space._cluster'
master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id)
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id ~= %d' % replica_id)
replica.admin('box.info.server.lsn == -1')
# gh-1219: LSN must not be removed from vclock on unregister
replica.admin('box.info.vclock[%d] == 1' % replica_id)
# gh-246: box.info.server.ro is controlled by box.cfg { read_only = xx }
# unregistration doesn't change box.info.server.ro
replica.admin('not box.info.server.ro')
# actually box is read-only if id is not registered
replica.admin('box.space._schema:replace{"test", 48}')
replica.admin('box.cfg { read_only = true }')
replica.admin('box.space._schema:replace{"test", 48}')
replica.admin('box.cfg { read_only = false }')
replica.admin('box.space._schema:replace{"test", 48}')
print '-------------------------------------------------------------'
print 'Re-register replica with the same server_id'
print '-------------------------------------------------------------'
replica.admin('box.cfg { read_only = true }')
master.admin('box.space._cluster:insert{%d, "%s"} ~= nil' %
(replica_id, replica_uuid))
replica.wait_lsn(master_id, master.get_lsn(master_id))
replica.admin('box.info.server.id == %d' % replica_id)
# gh-1219: LSN must not
|
YixuanLi/geo-tweet
|
twitter-timeline/get_non_locator_timeline.py
|
Python
|
gpl-2.0
| 5,282 | 0.029156 |
# -*- coding: utf-8 -*-
# __author__: Yixuan LI
# __email__: yl2363@cornell.edu
import os
import json
import re
from optparse import OptionParser
import tweepy
import time
class UserTimeline:
def __init__(self,inputDir,outputDir):
self
|
.inputDir = inputDir
self.outputDir = ou
|
tputDir
os.system("mkdir -p %s"%(outputDir))
# Get the names of the files under the input directory and save them in a list
self.fileList = os.listdir(inputDir)
print self.fileList
self.userHash = {} # [key,value] pair to record the unique users in the tweets
self.uniqueUserCount = 0 # count unique users in the dataset
self.tweetCount = 0 # total tweets processed
self.api = None
def authentication(self):
consumer_key="z86C8djY3bYOPD1WkYV73nVP6"
consumer_secret="BT8oKrcj955MKjv0qS8Kra2Iw91E3uSMTqEVurfTmKjXfG0hNm"
access_token="746349096-Bz1n8T6vNEFBAMG2YqVdJFOtrM321d5HeupxMlxM"
access_token_secret="ZZQZsjvJXnIlyl04Mg2vCxS8g122b3AljpiytiKCKRFPL"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
print "authentication finished"
def get_user_id(self):
written = 0
if os.path.exists(self.outputDir + "/" + "uniqueUserID.txt"):
pass
else:
for tweetFile in self.fileList[1:]:
with open(self.inputDir+"/"+tweetFile,'r') as fin:
for line in fin:
try:
lineContents = json.loads(line) # load a line
self.tweetCount += 1
print self.tweetCount # for debugging
except:
continue
try:
if lineContents["coordinates"] is not None:
continue
else:
# extract user's id
userID = lineContents["user"]["id"]
# extract tweet text and convert the string to lower case (http://stackoverflow.com/questions/6797984/how-to-convert-string-to-lowercase-in-python)
#tweet = lineContents["text"].lower()
if not self.userHash.has_key(userID): # if the user has not been counted
self.uniqueUserCount += 1 # count the number of unique users
self.userHash[userID] = True
fileNum = int(self.uniqueUserCount/7250 + 1)
with open(self.outputDir + "/" + "uniqueUserID_"+str(fileNum)+".txt","a") as fileout:
written += 1
fileout.write(str(userID))
fileout.write("\n")
print written," written"
except:
continue
print "There are ", self.uniqueUserCount, "unique users"
print self.tweetCount, " tweets processed"
def get_user_timeline(self):
with open(self.outputDir + "/" + "uniqueUserID_6.txt",'r') as fin:
for userID in fin:
# store the tweets of each user in a single file named by the {userID}.json
filePath = self.outputDir + "/" + str(userID[:-1])+".json"
print userID
if os.path.exists(filePath):
with open(filePath,'r') as myfile:
count = sum(1 for line in myfile)
if count > 900:
continue
else:
# http://stackoverflow.com/questions/6996603/how-do-i-delete-a-file-or-folder-in-python
os.remove(filePath)
pageCount = 1
trialTime = 0
# get user timeline tweets
while pageCount < 6:
print "Collecting", pageCount, " -th page"
# open the output file in append mode
self.fout = open(filePath,"a")
try:
tweets = self.api.user_timeline(id=userID,count=200,page=pageCount)
pageCount += 1
except:
time.sleep(70)
trialTime += 1
if trialTime == 2:
pageCount = 8
continue
# write to file
# Note that data returned by api.user_timeline is status object
for tweet in tweets:
print tweet.text
# convert tweepy status object to json format
# http://stackoverflow.com/questions/27900451/convert-tweepy-status-object-into-json
self.fout.write(json.dumps(tweet._json))
self.fout.write('\n')
time.sleep(70) # rate limit (15 requests per 15 minutes window)
if __name__=='__main__':
#########################################################################################
# Parse the arguments
class MyParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
usage = "usage: python plot_stats.py [options]"
description = """
"""
epilog = """
"""
parser = MyParser(usage, description=description,epilog=epilog)
parser.add_option("--inputDir", "--input file of twitter data", dest="input_path", default=None,
help="input directory of twitter streaming data in JSON format [default: None]")
parser.add_option("--outputDir", "--output directory of twitter user timeline data", dest="output_path", default=None,
help="output directory of twitter user timeline data [default: None]")
(options, args) = parser.parse_args()
# input directory
inputDir = options.input_path
# output directory
outputDir = options.output_path
########################################################################
getter = UserTimeline(inputDir,outputDir)
getter.authentication()
#getter.get_user_id()
getter.get_user_timeline()
|
capoe/espressopp.soap
|
src/analysis/TotalVelocity.py
|
Python
|
gpl-3.0
| 2,733 | 0.011343 |
# Copyright (C) 2014 Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************************
**espressopp.analysis.TotalVelocity**
*************************************
.. function:: espressopp.analysis.TotalVelocity(system)
:param system: The system object.
:type system: espressopp.System
.. function:: espressopp.analysis.TotalVelocity.compute()
Compute the total velocity of the system.
:rtype: float
.. function:: espressopp.analysis.TotalVelocity.reset()
Subtract the total velocity of the system from every particle.
Examples
---------
Reset the velocity
++++++++++++++++++++
>>> total_velocity = espressopp.analysis.TotalVelocity(system)
>>> total_velocity.reset()
Extension to integrator
++++++++++++++++++++++++++++++++++++++++++++
This extension can also be attached to integrator and run `reset()` every `n-th` steps.
>>> total_velocity = espressopp.analysis.TotalVelocity(system)
>>> ext_remove_com = espressopp.analysis.ExtAnalyze(total_velocity, 10)
>>> integrator.addExtension(ext_remove_com)
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_TotalVelocity
class TotalVelocityLocal(ObservableLocal, analysis_TotalVelocity):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActiv
|
e()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_TotalVelocity, system)
def compute(self):
return self.cxxclass.compute(self)
def reset
|
(self):
return self.cxxclass.reset(self)
if pmi.isController :
class TotalVelocity(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.TotalVelocityLocal',
pmicall = [ "compute", "reset" ],
pmiproperty = ["v"]
)
|
DeveloperMal/wger
|
wger/manager/api/resources.py
|
Python
|
agpl-3.0
| 6,864 | 0.000874 |
# -*- coding: utf-8 -*-
# This file is part of wger
|
Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY
|
WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from tastypie import fields
from tastypie.authentication import (
ApiKeyAuthentication,
MultiAuthentication,
SessionAuthentication
)
from tastypie.resources import ModelResource
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from wger.core.api.resources import DaysOfWeekResource
from wger.exercises.api.resources import ExerciseResource
from wger.utils.resources import UserObjectsOnlyAuthorization
from wger.manager.models import (
WorkoutSession,
Workout,
Schedule,
ScheduleStep,
Day,
Set,
Setting,
WorkoutLog
)
class WorkoutResource(ModelResource):
'''
Resource for workouts
'''
days = fields.ToManyField('wger.manager.api.resources.DayResource', 'day_set')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = Workout.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
"comment": ALL,
"creation_date": ALL}
class WorkoutSessionResource(ModelResource):
'''
Resource for workout sessions
'''
workout = fields.ToOneField('wger.manager.api.resources.WorkoutResource', 'workout')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = WorkoutSession.objects.all()
authentication = MultiAuthentication(SessionAuthentication(), ApiKeyAuthentication())
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
"date": ALL,
"time_start": ALL,
"time_end": ALL}
class ScheduleStepResource(ModelResource):
'''
Resource for schedule steps
'''
workout = fields.ToOneField(WorkoutResource, 'workout')
schedule = fields.ToOneField('wger.manager.api.resources.ScheduleResource', 'schedule')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(schedule__user=bundle.request.user)
class Meta:
queryset = ScheduleStep.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'schedule': ALL_WITH_RELATIONS,
'workout': ALL_WITH_RELATIONS}
class ScheduleResource(ModelResource):
'''
Resource for schedules
'''
steps = fields.ToManyField(ScheduleStepResource, 'schedulestep_set')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = Schedule.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'is_active': ALL,
'is_loop': ALL,
'name': ALL}
class DayResource(ModelResource):
'''
Resource for training days
'''
workout = fields.ToOneField(WorkoutResource, 'training')
days_of_week = fields.ToManyField(DaysOfWeekResource, 'day')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(training__user=bundle.request.user)
class Meta:
queryset = Day.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'description': ALL,
'workout': ALL_WITH_RELATIONS}
class SetResource(ModelResource):
'''
Resource for training sets
'''
day = fields.ToOneField(DayResource, 'exerciseday')
exercises = fields.ToManyField(ExerciseResource, 'exercises')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(exerciseday__training__user=bundle.request.user)
class Meta:
queryset = Set.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'day': ALL_WITH_RELATIONS,
'order': ALL,
'sets': ALL}
class SettingResource(ModelResource):
'''
Resource for training settings
'''
set = fields.ToOneField(SetResource, 'set')
exercise = fields.ToOneField(ExerciseResource, 'exercise')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(set__exerciseday__training__user=bundle.request.user)
class Meta:
queryset = Setting.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'exercise': ALL_WITH_RELATIONS,
'order': ALL,
'reps': ALL,
'set': ALL_WITH_RELATIONS}
class WorkoutLogResource(ModelResource):
'''
Resource for a workout log
'''
exercise = fields.ToOneField(ExerciseResource, 'exercise')
workout = fields.ToOneField(WorkoutResource, 'workout')
def authorized_read_list(self, object_list, bundle):
'''
Filter to own objects
'''
return object_list.filter(user=bundle.request.user)
class Meta:
queryset = WorkoutLog.objects.all()
authentication = ApiKeyAuthentication()
authorization = UserObjectsOnlyAuthorization()
filtering = {'id': ALL,
'date': ALL,
'exercise': ALL_WITH_RELATIONS,
'reps': ALL,
'weight': ALL,
'workout': ALL_WITH_RELATIONS}
|
balazssimon/ml-playground
|
udemy/Machine Learning A-Z/Part 2 - Regression/Section 5 - Multiple Linear Regression/backward_elimination_manual.py
|
Python
|
apache-2.0
| 1,681 | 0.030934 |
# Multiple Linear Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
# One hot encoding
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding the Dummy Variable Trap
X = X[:, 1:]
# The Linear Regression library includes the constant, but the statsmodels does not
# so we have to add it to our model:
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis=1)
# Building the optimal model using Backwards Elimination
import statsmodels.formula.api as sm
# Step 1
SL = 0.05
# Step 2, using Ordinary Least Squares from statsmodels (instead of Linear Regression from linear_model)
X_opt = X[:,[0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,1,3,4,5]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,3,4,5]]
|
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,3,5]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_OLS.summary()
# Step 4
X_opt = X[:,[0,3]]
# Step 5
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3
regressor_
|
OLS.summary()
# Finished
|
v4nz666/7drl2017
|
RoguePy/UI/__init__.py
|
Python
|
gpl-3.0
| 70 | 0 |
from UI import UI
from View import View
imp
|
ort Elem
|
ents
import Colors
|
mosquito/aio-pika
|
docs/source/rabbitmq-tutorial/examples/3-publish-subscribe/receive_logs.py
|
Python
|
apache-2.0
| 1,094 | 0 |
import asyncio
from aio_pika import connect, IncomingMessage, ExchangeType
loop = asyncio.get_event_loop()
async def on_message(message: IncomingMessage):
async with message.process():
print("[x] %r" % mess
|
age.body)
async def main():
# Perform connection
connection = await connect(
"amqp://guest:guest@localhost/", loop=loop
)
# Creating a channel
channel = awa
|
it connection.channel()
await channel.set_qos(prefetch_count=1)
logs_exchange = await channel.declare_exchange(
"logs", ExchangeType.FANOUT
)
# Declaring queue
queue = await channel.declare_queue(exclusive=True)
# Binding the queue to the exchange
await queue.bind(logs_exchange)
# Start listening the queue with name 'task_queue'
await queue.consume(on_message)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.create_task(main())
# we enter a never-ending loop that waits for data
# and runs callbacks whenever necessary.
print(" [*] Waiting for logs. To exit press CTRL+C")
loop.run_forever()
|
halbbob/dff
|
modules/builtins/info.py
|
Python
|
gpl-2.0
| 5,653 | 0.012383 |
# DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2011 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Solal Jacob <sja@digital-forensic.org>
#
__dff_module_info_version__ = "1.0.0"
from api.vfs import *
from api.module.script import *
from api.loader import *
from api.module.module import *
from api.taskmanager.taskmanager import *
from api.types.libtypes import Parameter, Variant, Argument, typeId, ConfigManager
from datetime import timedelta, datetime
from ui.console.utils import VariantTreePrinter
class INFO(Script, VariantTreePrinter):
def __init__(self):
Script.__init__(self, "info")
VariantTreePrinter.__init__(self)
self.loader = loader.loader()
self.tm = TaskManager()
self.cm = ConfigManager.Get()
def show_config(self, modname):
conf = self.cm.configByName(modname)
res = "\n\tConfig:"
arguments = conf.arguments()
for argument in arguments:
res
|
+= "\n\t\tname: " + str(argument.name())
res += "\n\t\tdesc
|
ription: " + str(argument.description())
if argument.inputType() == Argument.Empty:
res += "\n\t\tno input parameters"
else:
res += "\n\t\ttype: " + str(typeId.Get().typeToName(argument.type()))
res += "\n\t\trequirement: "
if argument.requirementType() == Argument.Optional:
res += "optional"
else:
res += "mandatory"
res += "\n\t\tinput parameters: "
if argument.parametersType() == Parameter.NotEditable:
res += "not editable "
else:
res += "editable "
if argument.inputType() == Argument.List:
res += "list"
else:
res += "single"
pcount = argument.parametersCount()
if pcount != 0:
parameters = argument.parameters()
res += "\n\t\tpredefined parameters: "
for parameter in parameters:
if argument.type() == typeId.Node:
res += str(parameter.value().absolute())
else:
res += parameter.toString()
pcount -= 1
if pcount != 0:
res += ", "
res += "\n"
constants = conf.constants()
if len(constants) > 0:
res += "\n\tConstant: \t"
for constant in constants:
res += "\n\t\tname: " + str(constant.name())
res += "\n\t\tdescription: " + str(constant.description())
res += "\n\t\ttype: " + str(typeId.Get().typeToName(constant.type()))
cvalues = constant.values()
cvallen = len(cvalues)
if cvallen > 0:
res += "\n\t\tvalues: "
for cvalue in cvalues:
if cvalue.type() == typeId.Node:
res += str(cvalue.value().absolute())
else:
res += cvalue.toString()
cvallen -= 1
if cvallen != 0:
res += ", "
res += "\n"
return res
def show_arg(self, args):
res = ""
if len(args):
res += "\n\n\t\tArguments: \t"
for argname in args.keys():
res += "\n\t\t\tname: " + argname
res += "\n\t\t\tparameters: "
val = args[argname]
if val.type() == typeId.List:
vlist = val.value()
vlen = len(vlist)
for item in vlist:
if item.type == typeId.Node:
res += str(val.value().absolute())
else:
res += item.toString()
vlen -= 1
if vlen != 0:
res += ", "
elif val.type() == typeId.Node:
res += str(val.value().absolute())
return res
def show_res(self, results):
res = self.fillMap(3, results, "\n\n\t\tResults:")
return res
def c_display(self):
print self.info
def getmodinfo(self, modname):
conf = self.cm.configByName(modname)
if conf == None:
return
self.lproc = self.tm.lprocessus
self.info += "\n" + modname + self.show_config(modname)
for proc in self.lproc:
if proc.mod.name == modname:
self.info += "\n\tProcessus " + str(proc.pid)
stime = datetime.fromtimestamp(proc.timestart)
self.info += "\n\t\texecution started at : " + str(stime)
if proc.timeend:
etime = datetime.fromtimestamp(proc.timeend)
self.info += "\n\t\texecution finished at : " + str(etime)
else:
etime = datetime.fromtimestamp(time.time())
delta = etime - stime
self.info += "\n\t\texecution time: " + str(delta)
self.info += self.show_arg(proc.args)
self.info += self.show_res(proc.res)
def start(self, args):
self.info = ""
if args.has_key("modules"):
modnames = args['modules'].value()
for modname in modnames:
self.getmodinfo(modname.value())
else:
self.modules = self.loader.modules
for modname in self.modules:
self.getmodinfo(modname)
class info(Module):
"""Show info on loaded drivers: configuration, arguments, results
"""
def __init__(self):
Module.__init__(self, "info", INFO)
self.tags = "builtins"
self.conf.addArgument({"name": "modules",
"description": "Display information concerning provided modules",
"input": Argument.Optional|Argument.List|typeId.String})
|
bennybauer/pinax-hello
|
runtests.py
|
Python
|
mit
| 1,274 | 0.000785 |
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.pinax_hello",
"pinax.pinax_hello.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
|
}
},
SITE_ID=1,
ROOT_URLCONF=
|
"pinax.pinax_hello.tests.urls",
SECRET_KEY="notasecret",
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["pinax.pinax_hello.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
|
mathieudesro/toad
|
tasks/11-tensormrtrix.py
|
Python
|
gpl-2.0
| 4,911 | 0.007127 |
# -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dw
|
i)
# convert d
|
iffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
cmd += "-mask {} ".format(mask)
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def isIgnore(self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages
|
imphil/fusesoc
|
fusesoc/provider/coregen.py
|
Python
|
gpl-3.0
| 1,221 | 0.002457 |
import logging
import os
import shutil
from fusesoc.provider.provider import Provider
from fusesoc.utils import Launcher
logger = logging.getLogger(__name__)
class Coregen(Provider):
def _checkout(self, local_dir):
script_file = self.config.get('script_file')
project_file = self.config.get('project_file')
extra_files = self.config.get('extra_files')
logger.info("Using Coregen to generate project " + project_file)
if not os.path.is
|
dir(local_dir):
os.makedirs(local_dir)
src_files = [script_file, project_file]
if extra_files:
src_files += extra_files.split()
for f in
|
src_files:
f_src = os.path.join(self.core_root, f)
f_dst = os.path.join(local_dir, f)
if os.path.exists(f_src):
d_dst = os.path.dirname(f_dst)
if not os.path.exists(d_dst):
os.makedirs(d_dst)
shutil.copyfile(f_src, f_dst)
else:
logger.error('Cannot find file %s' % f_src)
args = ['-r',
'-b', script_file,
'-p', project_file]
Launcher('coregen', args, cwd=local_dir).run()
|
maciekswat/dolfin_1.3.0
|
site-packages/dolfin/common/plotting.py
|
Python
|
gpl-3.0
| 4,997 | 0.003602 |
# Copyright (C) 2008-2012 Joachim B. Haga and Fredrik Valdmanis
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the
|
terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FIT
|
NESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Martin Sandve Alnaes, 2008.
# Modified by Anders Logg, 2008-2010.
#
# First added: 2008-03-05
# Last changed: 2013-05-29
import os
import dolfin.cpp as cpp
import ufl
__all__ = ['plot']
# Compatibility with book
def _VTKPlotter_write_ps(self, *args, **kwargs) :
print "*** Warning: VTKPlotter::write_ps() is not implemented -- use write_pdf instead"
def plot(object, *args, **kwargs):
"""
Plot given object.
*Arguments*
object
a :py:class:`Mesh <dolfin.cpp.Mesh>`, a :py:class:`MeshFunction
<dolfin.cpp.MeshFunction>`, a :py:class:`Function
<dolfin.functions.function.Function>`, a :py:class:`Expression`
<dolfin.cpp.Expression>, a :py:class:`DirichletBC`
<dolfin.cpp.DirichletBC> or a :py:class:`FiniteElement
<ufl.FiniteElement>`.
*Examples of usage*
In the simplest case, to plot only e.g. a mesh, simply use
.. code-block:: python
mesh = UnitSquare(4,4)
plot(mesh)
Use the ``title`` argument to specify title of the plot
.. code-block:: python
plot(mesh, tite="Finite element mesh")
It is also possible to plot an element
.. code-block:: python
element = FiniteElement("BDM", tetrahedron, 3)
plot(element)
Vector valued functions can be visualized with an alternative mode
.. code-block:: python
plot(u, mode = "glyphs")
A more advanced example
.. code-block:: python
plot(u,
wireframe = True, # use wireframe rendering
interactive = False, # do not hold plot on screen
scalarbar = False, # hide the color mapping bar
hardcopy_prefix = "myplot", # default plotfile name
scale = 2.0 # scale the warping/glyphs
title = "Fancy plot" # Set your own title
)
"""
mesh = kwargs.get('mesh')
p = cpp.Parameters()
for key in kwargs:
# If there is a "mesh" kwarg it should not be added to the parameters
if key != "mesh":
try:
p.add(key, kwargs[key])
except TypeError:
cpp.warning("Incompatible type for keyword argument \"%s\". Ignoring." % key)
# Plot element
if isinstance(object, ufl.FiniteElementBase):
if os.environ.get("DOLFIN_NOPLOT", "0") != "0": return
import ffc
return ffc.plot(object, *args, **kwargs)
if mesh is None and len(args) == 1 and isinstance(args[0], cpp.Mesh):
mesh = args[0]
# Plot expression
if isinstance(object, cpp.Expression):
if mesh is None:
raise TypeError, "expected a mesh when plotting an expression."
return cpp.plot(object, mesh, p)
# Try to project if object is not a standard plottable type
if not isinstance(object, (cpp.Function, cpp.Expression, cpp.Mesh,
cpp.DirichletBC, cpp.MeshFunction, cpp.MeshFunctionBool,
cpp.MeshFunctionInt, cpp.MeshFunctionDouble,
cpp.MeshFunctionSizet, cpp.DirichletBC, cpp.CSGGeometry)):
from dolfin.fem.projection import project
try:
cpp.info("Object cannot be plotted directly, projecting to"\
" piecewise linears.")
object = project(object, mesh=mesh)
except Exception as e:
raise RuntimeError(("Don't know how to plot given object:\n %s\n"\
"and projection failed:\n %s") % (str(object), str(e)))
plot_object = cpp.plot(object, p)
plot_object.write_ps = _VTKPlotter_write_ps
# Avoid premature deletion of plotted objects if they go out of scope
# before the plot window is closed. The plotter itself is safe, since it's
# created in the plot() C++ function, not directly from Python. But the
# Python plotter proxy may disappear, so we can't store the references
# there.
global _objects_referenced_from_plot_windows
_objects_referenced_from_plot_windows[plot_object.key()] = (object, mesh, p)
return plot_object
_objects_referenced_from_plot_windows = {}
|
zmathe/WebAppDIRAC
|
Lib/WebHandler.py
|
Python
|
gpl-3.0
| 10,812 | 0.045135 |
from DIRAC import gLogger
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.DISET.ThreadConfig import ThreadConfig
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.DISET.AuthManager import AuthManager
from WebAppDIRAC.Lib.SessionData import SessionData
from WebAppDIRAC.Lib import Conf
import ssl
import functools
import sys
import types
import json
import traceback
import tornado.web
import tornado.ioloop
import tornado.gen
import tornado.stack_context
import tornado.websocket
from concurrent.futures import ThreadPoolExecutor
global gThreadPool
gThreadPool = ThreadPoolExecutor( 100 )
class WErr( tornado.web.HTTPError ):
def __init__( self, code, msg = "", **kwargs ):
super( WErr, self ).__init__( code, str( msg ) or None )
for k in kwargs:
setattr( self, k, kwargs[ k ] )
self.ok = False
self.msg = msg
self.kwargs = kwargs
def __str__( self ):
return super( tornado.web.HTTPError, self ).__str__()
@classmethod
def fromSERROR( cls, result ):
#Prevent major fuckups with % in the message
return cls( 500, result[ 'Message' ].replace( "%", "" ) )
class WOK( object ):
def __init__( self, data = False, **kwargs ):
|
for k in kwargs:
setattr( self, k, kwargs[ k ] )
self.ok = True
self.data = data
def asyncWithCallback( method ):
return tornado.web.asynchronous( method )
def as
|
yncGen( method ):
return tornado.gen.coroutine( method )
class WebHandler( tornado.web.RequestHandler ):
__disetConfig = ThreadConfig()
__log = False
#Auth requirements
AUTH_PROPS = None
#Location of the handler in the URL
LOCATION = ""
#URL Schema with holders to generate handler urls
URLSCHEMA = ""
#RE to extract group and setup
PATH_RE = ""
#Helper function to create threaded gen.Tasks with automatic callback and execption handling
def threadTask( self, method, *args, **kwargs ):
"""
Helper method to generate a gen.Task and automatically call the callback when the real
method ends. THIS IS SPARTAAAAAAAAAA. SPARTA has improved using futures ;)
"""
#Save the task to access the runner
genTask = False
#This runs in the separate thread, calls the callback on finish and takes into account exceptions
def cbMethod( *cargs, **ckwargs ):
cb = ckwargs.pop( 'callback' )
method = cargs[0]
disetConf = cargs[1]
cargs = cargs[2]
self.__disetConfig.reset()
self.__disetConfig.load( disetConf )
ioloop = tornado.ioloop.IOLoop.instance()
try:
result = method( *cargs, **ckwargs )
ioloop.add_callback( functools.partial( cb, result ) )
except Exception as excp:
gLogger.error( "Following exception occured %s" % excp )
exc_info = sys.exc_info()
genTask.set_exc_info( exc_info )
ioloop.add_callback( lambda : genTask.exception() )
#Put the task in the thread :)
def threadJob( tmethod, *targs, **tkwargs ):
tkwargs[ 'callback' ] = tornado.stack_context.wrap( tkwargs[ 'callback' ] )
targs = ( tmethod, self.__disetDump, targs )
gThreadPool.submit( cbMethod, *targs, **tkwargs )
#Return a YieldPoint
genTask = tornado.gen.Task( threadJob, method, *args, **kwargs )
return genTask
def __disetBlockDecor( self, func ):
def wrapper( *args, **kwargs ):
raise RuntimeError( "All DISET calls must be made from inside a Threaded Task! Bad boy!" )
return wrapper
def __init__( self, *args, **kwargs ):
"""
Initialize the handler
"""
super( WebHandler, self ).__init__( *args, **kwargs )
if not WebHandler.__log:
WebHandler.__log = gLogger.getSubLogger( self.__class__.__name__ )
self.__credDict = {}
self.__setup = Conf.setup()
self.__processCredentials()
self.__disetConfig.reset()
self.__disetConfig.setDecorator( self.__disetBlockDecor )
self.__disetDump = self.__disetConfig.dump()
match = self.PATH_RE.match( self.request.path )
self._pathResult = self.__checkPath( *match.groups() )
self.__sessionData = SessionData( self.__credDict, self.__setup )
def __processCredentials( self ):
"""
Extract the user credentials based on the certificate or what comes from the balancer
"""
#NGINX
if Conf.balancer() == "nginx":
headers = self.request.headers
if headers[ 'X-Scheme' ] == "https" and headers[ 'X-Ssl_client_verify' ] == 'SUCCESS':
DN = headers[ 'X-Ssl_client_s_dn' ]
self.__credDict[ 'DN' ] = DN
self.__credDict[ 'issuer' ] = headers[ 'X-Ssl_client_i_dn' ]
result = Registry.getUsernameForDN( DN )
if not result[ 'OK' ]:
self.__credDict[ 'validDN' ] = False
else:
self.__credDict[ 'validDN' ] = True
self.__credDict[ 'username' ] = result[ 'Value' ]
return
#TORNADO
if not self.request.protocol == "https":
return
derCert = self.request.get_ssl_certificate( binary_form = True )
if not derCert:
return
pemCert = ssl.DER_cert_to_PEM_cert( derCert )
chain = X509Chain()
chain.loadChainFromString( pemCert )
result = chain.getCredentials()
if not result[ 'OK' ]:
self.log.error( "Could not get client credentials %s" % result[ 'Message' ] )
return
self.__credDict = result[ 'Value' ]
#Hack. Data coming from OSSL directly and DISET difer in DN/subject
try:
self.__credDict[ 'DN' ] = self.__credDict[ 'subject' ]
except KeyError:
pass
def _request_summary( self ):
"""
Return a string returning the summary of the request
"""
summ = super( WebHandler, self )._request_summary()
cl = []
if self.__credDict.get( 'validDN', False ):
cl.append( self.__credDict[ 'username' ] )
if self.__credDict.get( 'validGroup', False ):
cl.append( "@%s" % self.__credDict[ 'group' ] )
cl.append( " (%s)" % self.__credDict[ 'DN' ] )
summ = "%s %s" % ( summ, "".join( cl ) )
return summ
@property
def log( self ):
return self.__log
@classmethod
def getLog( cls ):
return cls.__log
def getUserDN( self ):
return self.__credDict.get( 'DN', '' )
def getUserName( self ):
return self.__credDict.get( 'username', '' )
def getUserGroup( self ):
return self.__credDict.get( 'group', '' )
def getUserSetup( self ):
return self.__setup
def getUserProperties( self ):
return self.__sessionData.getData().properties
def isRegisteredUser( self ):
return self.__credDict.get( 'validDN', "" ) and self.__credDict.get( 'validGroup', "" )
def getSessionData( self ):
return self.__sessionData.getData()
def actionURL( self, action = "" ):
"""
Given an action name for the handler, return the URL
"""
if action == "index":
action = ""
group = self.getUserGroup()
if group:
group = "/g:%s" % group
setup = self.getUserSetup()
if setup:
setup = "/s:%s" % setup
location = self.LOCATION
if location:
location = "/%s" % location
ats = dict( action = action, group = group, setup = setup, location = location )
return self.URLSCHEMA % ats
def __auth( self, handlerRoute, group ):
"""
Authenticate request
"""
userDN = self.getUserDN()
if group:
self.__credDict[ 'group' ] = group
else:
if userDN:
result = Registry.findDefaultGroupForDN( userDN )
if result[ 'OK' ]:
self.__credDict[ 'group' ] = result[ 'Value' ]
self.__credDict[ 'validGroup' ] = False
if type( self.AUTH_PROPS ) not in ( types.ListType, types.TupleType ):
self.AUTH_PROPS = [ p.strip() for p in self.AUTH_PROPS.split( "," ) if p.strip() ]
allAllowed = False
for p in self.AUTH_PROPS:
if p.lower() in ( 'all', 'any' ):
allAllowed = True
auth = AuthManager( Conf.getAuthSectionForHandler( handlerRoute ) )
ok = auth.authQuery( "", self.__credDict, self.AUTH_PROPS )
if ok:
if userDN:
self.__credDict[ 'validGroup' ] = True
self.log.info( "AUTH OK: %s by %s@%s (%s)" % ( handlerRoute, self.__credDict[ 'us
|
rehandalal/standup
|
standup/apps/api2/decorators.py
|
Python
|
bsd-3-clause
| 480 | 0 |
from functools import wraps
from flask import current_app
|
, request
from standup.errors import api_error
def api_key_required(view):
@wraps(view)
def wrapper(*args, **kwargs):
data = request.args if request.method == 'GET' else request.form
api_key = data.get
|
('api_key', '')
if api_key != current_app.config.get('API_KEY'):
return api_error(403, 'Forbidden: Invalid API key.')
return view(*args, **kwargs)
return wrapper
|
hankcs/HanLP
|
hanlp/components/mtl/multi_task_learning.py
|
Python
|
apache-2.0
| 38,042 | 0.00276 |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-06-20 19:55
import functools
import itertools
import logging
import os
from collections import defaultdict
from copy import copy
from itertools import chain
from typing import Union, List, Callable, Dict, Optional, Any, Iterable, Tuple
import numpy as np
import torch
from hanlp_common.constant import IDX, BOS, EOS
from hanlp_common.document import Document
from hanlp_common.util import merge_locals_kwargs, topological_sort, reorder, prefix_match
from hanlp_common.visualization import markdown_table
from toposort import toposort
from torch.utils.data import DataLoader
import hanlp.utils.torch_util
from hanlp.common.dataset import PadSequenceDataLoader, PrefetchDataLoader, CachedDataLoader
from hanlp.common.structure import History
from hanlp.common.torch_component import TorchComponent
from hanlp.common.transform import FieldLength, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding, ContextualWordEmbeddingModule
from hanlp.layers.embeddings.embedding import Embedding
from hanlp.layers.transformers.pt_imports import optimization
from hanlp.layers.transformers.utils import pick_tensor_for_each_token
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp.transform.transformer_tokenizer import TransformerSequenceTokenizer
from hanlp.utils.time_util import CountdownTimer
from hanlp.utils.torch_util import clip_grad_norm
class MultiTaskModel(torch.nn.Module):
def __init__(self,
encoder: torch.nn.Module,
scalar_mixes: torch.nn.ModuleDict,
decoders: torch.nn.ModuleDict,
use_raw_hidden_states: dict) -> None:
super().__init__()
self.use_raw_hidden_states = use_raw_hidden_states
self.encoder: ContextualWordEmbeddingModule = encoder
self.scalar_mixes = scalar_mixes
self.decoders = decoders
class MultiTaskDataLoader(DataLoader):
def __init__(self, training=True, tau: float = 0.8, **dataloaders) -> None:
# noinspection PyTypeChecker
super().__init__(None)
self.tau = tau
self.training = training
self.dataloaders: Dict[str, DataLoader] = dataloaders if dataloaders else {}
# self.iterators = dict((k, iter(v)) for k, v in dataloaders.items())
def __len__(self) -> int:
if self.dataloaders:
return sum(len(x) for x in self.dataloaders.values())
return 0
def __iter__(self):
if self.training:
sampling_weights, total_size = self.sampling_weights
task_names = list(self.dataloaders.keys())
iterators = dict((k, itertools.cycle(v)) for k, v in self.dataloaders.items())
for i in range(total_size):
task_name = np.random.choice(task_names, p=sampling_weights)
yield task_name, next(iterators[task_name])
else:
for task_name, dataloader in self.dataloaders.items():
for batch in dataloader:
yield task_name, batch
@property
def sampling_weights(self):
sampling_weights = self.sizes
total_size = sum(sampling_weights)
Z = sum(pow(v, self.tau) for v in sampling_weights)
sampling_weights = [pow(v, self.tau) / Z for v in sampling_weights]
return sampling_weights, total_size
@property
def sizes(self):
return [len(v) for v in self.dataloaders.values()]
class MultiTaskLearning(TorchComponent):
def __init__(self, **kwargs) -> None:
""" A multi-task learning (MTL) framework. It shares the same encoder across multiple decoders. These decoders
can have dependencies on each other which will be properly handled during decoding. To integrate a component
into this MTL framework, a component needs to implement the :class:`~hanlp.components.mtl.tasks.Task` interface.
This framework mostly follows the architecture of :cite:`clark-etal-2019-bam` and :cite:`he-choi-2021-stem`, with additional scalar mix
tricks (:cite:`kondratyuk-straka-2019-75`) allowing each task to attend to any subset of layers. We also
experimented with knowledge distillation on single tasks, the performance gain was nonsignificant on a large
dataset. In the near future, we have no plan to invest more efforts in distillation, since most datasets HanLP
uses are relatively large, and our hardware is relatively powerful.
Args:
**kwargs: Arguments passed to config.
"""
super().__init__(**kwargs)
self.model: Optional[MultiTaskModel] = None
self.tasks: Dict[str, Task] = None
self.vocabs = None
def build_dataloader(self,
data,
batch_size,
shuffle=False,
device=None,
logger: logging.Logger = None,
gradient_accumulation=1,
tau: float = 0.8,
prune=None,
prefetch=None,
tasks_need_custom_eval=None,
cache=False,
debug=False,
**kwargs) -> DataLoader:
# This method is only called during training or evaluation but not prediction
dataloader = MultiTaskDataLoader(training=shuffle, tau=tau)
for i, (task_name, task) in enumerate(self.tasks.items()):
encoder_transform, transform = self.build_transform(task)
training = None
if data == 'trn':
if debug:
_data = task.dev
else:
_data = task.trn
training = True
elif data == 'dev':
_data = task.dev
training = False
elif data == 'tst':
_data = task.tst
training = False
else:
_data = data
if isinstance(data, str):
logger.info(f'[yellow]{i + 1} / {len(self.tasks)}[/yellow] Building [blue]{data}[/blue] dataset for '
f'[cyan]{task_name}[/cyan] ...')
# Adjust Tokenizer according to task config
config = copy(task.config)
config.pop('transform', None)
task_dataloader: DataLoader = task.build_dataloader(_data, transform, training, device, logger,
|
tokenizer=encoder_transform.tokenizer,
gradient_accumulation=gradient_accumulation,
cache=isinstance(data, str), **config)
# if prune:
# # noinspection PyTypeChecker
|
# task_dataset: TransformDataset = task_dataloader.dataset
# size_before = len(task_dataset)
# task_dataset.prune(prune)
# size_after = len(task_dataset)
# num_pruned = size_before - size_after
# logger.info(f'Pruned [yellow]{num_pruned} ({num_pruned / size_before:.1%})[/yellow] '
# f'samples out of {size_before}.')
if cache and data in ('trn', 'dev'):
task_dataloader: CachedDataLoader = CachedDataLoader(
task_dataloader,
f'{cache}/{os.getpid()}-{data}-{task_name.replace("/", "-")}-cache.pt' if isinstance(cache,
str) else None
)
dataloader.dataloaders[task_name] = task_dataloader
if data == 'trn':
sampling_weights, total_size = dataloader.sampling_weights
headings = ['task', '#batches', '%batches', '#scaled', '%scaled', '#epoch']
matrix = []
min_epochs = []
for (task_name, dataset), we
|
michaelStettler/HISI
|
HISI/boundaries.py
|
Python
|
mit
| 16,880 | 0.005746 |
import numpy as np
from scipy import signal
import math
def norm_matrix(matrix):
for i in range(np.shape(matrix)[0]):
if np.max(matrix[i]) <= 0:
matrix[i] = matrix[i]
else:
matrix[i] /= np.max(matrix[i])
return matrix
def pool_boundaries(boundaries, filter_size, coeff):
"""
Parameters
----------
boundaries : matrix conainting the weights of the boundaries
filter_size : define the size of the pooling
coeff : define the strength coefficient of the pooling
Returns : new matrix of boundaries
-------
"""
pool = np.zeros(np.shape(boundaries))
size_filters = np.arange(filter_size) + 1
weight_pooling = [0.1, 0.08, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05] * coeff
for i,size_filter in enumerate(size_filters):
#vertical pooling
pool[:, :-size_filter,:] += weight_pooling[i] * boundaries[:, size_filter:, :]
pool[:, size_filter:, :] += weight_pooling[i] * boundaries[:, :-size_filter,:]
#horizontal pooling
pool[:, :, :-size_filter] += weight_pooling[i] * boundaries[:, :, size_filter:]
pool[:, :, size_filter:] += weight_pooling[i] * boundaries[:, :, :-size_filter]
pool[pool < 0] = 0
return boundaries + pool
def pool_shade_boundaries(boundaries):
pool = np.zeros(np.shape(boundaries))
size_filters = [1, 2, 3]
weight_pooling = [1.5, 1, 1]
# weight_pooling = [.5, .7, .3]
# size_filters = [1]
# weight_pooling = [1]
for k, size_filter in enumerate(size_filters):
for i in range(size_filter, np.shape(boundaries)[1] - size_filter):
for j in range(size_filter, np.shape(boundaries)[2] - size_filter):
pool[0, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[2, i - size_filter + 1, j - 1], boundaries[2, i + size_filter, j]]) -
np.mean([boundaries[2, i - size_filter + 1, j], boundaries[2, i + size_filter, j - 1]])
))
pool[0, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[3, i - size_filter + 1, j], boundaries[3, i + size_filter, j + 1]]) -
np.mean([boundaries[3, i - size_filter + 1, j + 1], boundaries[3, i + size_filter, j]])
))
pool[1, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[2, i - size_filter, j - 1], boundaries[2, i + size_filter - 1, j]]) -
np.mean([boundaries[2, i - size_filter, j], boundaries[2, i + size_filter - 1, j - 1]])
))
pool[1, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[3, i - size_filter, j], boundaries[3, i + size_filter - 1, j + 1]]) -
np.mean([boundaries[3, i - size_filter, j + 1], boundaries[3, i + size_filter - 1, j]])
))
pool[2, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[0, i - 1, j - size_filter + 1], boundaries[0, i, j + size_filter]]) -
np.mean([boundaries[0, i - 1, j + size_filter], boundaries[0, i, j - size_filter + 1]])
))
pool[2, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[1, i, j - size_filter + 1], boundaries[1, i + 1, j + size_filter]]) -
np.mean([boundaries[1, i, j + size_filter], boundaries[1, i + 1, j - size_filter + 1]])
))
pool[3, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[0, i - 1, j - size_filter + 1], boundaries[0, i, j + size_filter]]) -
np.mean([boundaries[0, i - 1, j + size_filter], boundaries[0, i, j - size_filter + 1]])
))
pool[3, i, j] += weight_pooling[k] * (np.abs(
np.mean([boundaries[1, i, j - size_filter], boundaries[1, i + 1, j + size_filter - 1]]) -
np.mean([boundaries[1, i, j + size_filter - 1], boundaries[1, i + 1, j - size_filter]])
))
return boundaries + pool
def rem_iner_bound(input, boundaries, tresh):
"""
Parameters
----------
input : image input
boundaries : matrix containing the weights of the boundaries
tresh : threshold used as a sensibility coefficient, small threshold means high sensibility
Returns the matrix of boundaries
-------
"""
pool = np.copy(boundaries)
for i in range(np.shape(input)[0] - 1):
for j in range(np.shape(input)[1] - 1):
patch = input[i:i + 2, j:j + 2]
min = np.min(patch)
max = np.max(patch)
diff = max-min
mean = np.mean(patch)
if 0 <= min:
#remove boundaries of background and very similar colors
if diff < 0.05:
boundaries[0, i, j:j + 2] -= pool[0, i, j:j + 2]
boundaries[1, i + 1, j:j + 2] -= pool[1, i + 1, j:j + 2]
boundaries[2, i:i + 2, j] -= pool[2, i:i + 2, j]
boundaries[3, i:i + 2, j + 1] -= pool[3, i:i + 2, j + 1]
else:
if mean > 0.5 and diff < tresh:
boundaries[0, i, j:j + 2] -= pool[0, i, j:j + 2]
boundaries[1, i +
|
1, j:j + 2] -= pool[1, i + 1, j:j + 2]
boundaries[2, i:i + 2, j] -= pool[2, i:i + 2, j]
boundaries[3, i:i + 2, j + 1] -= pool[3, i:i + 2, j + 1]
boundaries[boundaries < 0] = 0
return boundaries
def rem_inner_seg_bound(input, boundaries):
for i in range(np.shape(input)[0] - 1):
for j in range(np.shape(input)[1] - 1):
patch = input[i:i + 2, j:j + 2]
neg = patch[patch < 0]
if np.shape(neg)[
|
0] == 4:
boundaries[0, i, j:j + 2] = 0
boundaries[1, i + 1, j:j + 2] = 0
boundaries[2, i:i + 2, j] = 0
boundaries[3, i:i + 2, j + 1] = 0
def choose_loc(x, y, dir):
"""
Return the position of the next pixel in function of the direction one want to visit
Parameters
----------
x
y
dir
Returns
-------
"""
if dir == 0:
return [x-1,y]
elif dir == 1:
return [x+1,y]
elif dir == 2:
return [x, y-1]
elif dir == 3:
return [x, y+1]
def calculate_pixel(input, seg_img, boundaries, loc, thresh_bound):
direction = []
for dir in range(4):
pos = choose_loc(loc[0], loc[1], dir)
if 0 <= pos[0] < np.shape(input)[0] and 0 <= pos[1] < np.shape(input)[1]:
if boundaries[dir, pos[0], pos[1]] < thresh_bound:
if input[pos[0], pos[1]] > 0:
direction.append(dir)
elif seg_img[pos[0], pos[1]] > 0:
direction.append(dir)
for dir in direction:
pos = choose_loc(loc[0], loc[1], dir)
if input[pos[0],pos[1]] > 0:
seg_img[loc[0], loc[1]] += (1 / np.shape(direction)[0]) * input[pos[0], pos[1]]
else:
seg_img[loc[0], loc[1]] += (1 / np.shape(direction)[0]) * seg_img[pos[0], pos[1]]
def fill_pixel(visited_pixel, input, bound, seg_img, thresh_bound):
#fill pixels with the real pixel values from images
for i in range(np.shape(input)[0]):
for j in range(np.shape(input)[1]):
if visited_pixel[i, j] == 1 and input[i, j] > 0:
seg_img[i, j] = input[i, j]
#fill pixels of segmented images
#todo find a better way than this double loop
#todo perhaps need to do this in the four direction for gradients colors?
#top to down and left to right
for i in range(np.shape(input)[0]):
for j in range(np.shape(input)[1]):
if visited_pixel[i, j] == 1 and input[i, j] < 0:
calculate_pixel(input, seg_img, bound, [i, j], thresh_bound)
#bottom -> top right -> left filling for remaining
|
marrow/schema
|
marrow/schema/validate/geo.py
|
Python
|
mit
| 1,013 | 0.0385 |
from collections.abc import Sequence
from numbers import Number
from . import Validator, Length, Range, Instance
from .compound import
|
All
class Latitude(All):
"""Validate the given value as a number between -90 and +90 in decimal degrees, representing latitude."""
validators = [
Instance(Number),
Range(-90, 90)
]
latitude = Latitude()
class
|
Longitude(All):
"""Validate the given value as a number between -180 and +180 in decimal degrees, representing longitude."""
validators = [
Instance(Number),
Range(-180, 180)
]
longitude = Longitude()
class Position(All):
"""Validate the given value as any sequence of exactly two elements representing latitude and longitude."""
validators = [
Instance(Sequence),
Length(slice(2, 3)) # exactly two elements long
]
def validate(self, value, context=None):
value = super().validate(value, context)
_lat, _long = value
latitude.validate(_lat)
longitude.validate(_long)
return value
position = Position()
|
softcert/vsroom
|
vsroom/common/customoverview.py
|
Python
|
mit
| 1,942 | 0.003605 |
import time
import collections
from overviewbot import OverviewBot, Window
def format_time(timestamp):
return time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime(timestamp))
class CustomWindow(Window):
def __init__(self, input_key, *args, **keys):
Window.__init__(self, *args, **keys)
self.input_key = input_key
self.values = dict()
def added(self, event):
for value in event.values(self.input_key):
self.values[value] = self.values.get(value, 0) + 1
def discarded(self, event):
for value in event.values(self.input_key):
self.values[value] = self.values.get(value, 0) - 1
if self.values[value] <= 0:
del self.values[value]
def value(self):
if not self.values:
return None
if len(self.values) == 1:
return self.values.keys()[0]
return "%d unique values" % len(self.values)
class _Seen(Window):
def __init__(self, *args, **keys):
Window.__init__(self, *args, **keys)
self._times = collections.deque()
def added(self, _):
self._times.append(time.time())
def disca
|
rded(self, _):
self._times.popleft()
def _firstseen(self):
if self._times:
return format_time(self._times[0])
return None
def _lastseen(self):
if self._time
|
s:
return format_time(self._times[-1])
return None
class FirstSeen(_Seen):
def value(self):
return self._firstseen()
class LastSeen(_Seen):
def value(self):
return self._lastseen()
class CustomOverviewBot(OverviewBot):
def aggregates(self):
result = dict(OverviewBot.aggregates(self))
result["custom"] = CustomWindow
result["firstseen"] = FirstSeen
result["lastseen"] = LastSeen
return result
if __name__ == "__main__":
CustomOverviewBot.from_command_line().execute()
|
arthurSena/processors
|
tests/test_hra.py
|
Python
|
mit
| 992 | 0.004036 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from processors.hra.extractors import _clean_identifie
|
r
from processors.hra.extractors import _url_from_title
# Tests
def test_clean_identifier():
assert _clean_identifier('NCT12345678', prefix='NCT') == 'NCT12345678'
assert _clean_identifier('12345678', prefix='NCT') == 'NCT12345678'
assert _clean_identifier('ISRCTN12345678', prefix='ISRCTN') == 'ISRCTN12345678'
assert _clean_identifier('12345678', prefix='ISRCTN') == 'ISRCTN12345678'
|
assert _clean_identifier('n/a', prefix='NCT') == None
def test_url_from_title():
title = 'Longterm F/U study of BOTOX® in Idiopathic Overactive Bladder patients'
expected_url = 'http://www.hra.nhs.uk/news/research-summaries/longterm-fu-study-of-botox-in-idiopathic-overactive-bladder-patients'
assert _url_from_title(title) == expected_url
|
noironetworks/heat
|
heat/tests/openstack/senlin/test_receiver.py
|
Python
|
apache-2.0
| 4,398 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from openstack import exceptions
from heat.common import template_format
from heat.engine.clients.os import senlin
from heat.engine.resources.openstack.senlin import receiver as sr
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
receiver_stack_template = """
heat_template_version: 2016-04-08
description: Senlin Receiver Template
resources:
senlin-receiver:
type: OS::Senlin::Receiver
properties:
name: SenlinReceiver
cluster: fake_cluster
action: CLUSTER_SCALE_OUT
type: webhook
params:
foo: bar
"""
class FakeReceiver(o
|
bject):
def __init__(self, id='s
|
ome_id'):
self.id = id
self.name = "SenlinReceiver"
self.cluster_id = "fake_cluster"
self.action = "CLUSTER_SCALE_OUT"
self.channel = {'alarm_url': "http://foo.bar/webhooks/fake_url"}
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'cluster_id': self.cluster_id,
'action': self.action,
'channel': self.channel,
'actor': {'trust_id': ['fake_trust_id']}
}
class SenlinReceiverTest(common.HeatTestCase):
def setUp(self):
super(SenlinReceiverTest, self).setUp()
self.senlin_mock = mock.MagicMock()
self.patchobject(sr.Receiver, 'client',
return_value=self.senlin_mock)
self.patchobject(senlin.ClusterConstraint, 'validate',
return_value=True)
self.fake_r = FakeReceiver()
self.t = template_format.parse(receiver_stack_template)
def _init_recv(self, template):
self.stack = utils.parse_stack(template)
recv = self.stack['senlin-receiver']
return recv
def _create_recv(self, template):
recv = self._init_recv(template)
self.senlin_mock.create_receiver.return_value = self.fake_r
self.senlin_mock.get_receiver.return_value = self.fake_r
scheduler.TaskRunner(recv.create)()
self.assertEqual((recv.CREATE, recv.COMPLETE),
recv.state)
self.assertEqual(self.fake_r.id, recv.resource_id)
return recv
def test_recv_create_success(self):
self._create_recv(self.t)
expect_kwargs = {
'name': 'SenlinReceiver',
'cluster_id': 'fake_cluster',
'action': 'CLUSTER_SCALE_OUT',
'type': 'webhook',
'params': {'foo': 'bar'},
}
self.senlin_mock.create_receiver.assert_called_once_with(
**expect_kwargs)
def test_recv_delete_success(self):
self.senlin_mock.delete_receiver.return_value = None
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
self.senlin_mock.delete_receiver.assert_called_once_with(
recv.resource_id)
def test_recv_delete_not_found(self):
self.senlin_mock.delete_receiver.side_effect = [
exceptions.ResourceNotFound(http_status=404)
]
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
self.senlin_mock.delete_receiver.assert_called_once_with(
recv.resource_id)
def test_cluster_resolve_attribute(self):
excepted_show = {
'id': 'some_id',
'name': 'SenlinReceiver',
'cluster_id': 'fake_cluster',
'action': 'CLUSTER_SCALE_OUT',
'channel': {'alarm_url': "http://foo.bar/webhooks/fake_url"},
'actor': {'trust_id': ['fake_trust_id']}
}
recv = self._create_recv(self.t)
self.assertEqual(self.fake_r.channel,
recv._resolve_attribute('channel'))
self.assertEqual(excepted_show,
recv._show_resource())
|
ZenifiedFromI2P/antisocial
|
bootstraps/forms.py
|
Python
|
gpl-3.0
| 426 | 0.011737 |
from django import forms
attr = {'class': 'form-control'}
class GroupSeedForm(forms.Form):
|
seed = forms.CharField(label='Seed', max_length=1337, initial='none', widget=forms.TextInput(attrs=attr))
class UserSeedForm(forms.Form):
pseudonym = forms.CharField(label='Pseudonym', min_length=3, widget=forms.TextInput(attrs=attr))
password = forms.CharField(label='Password', widget=forms.PasswordInput(attrs=attr))
| |
BillBillBillBill/WishTalk-server
|
WishTalk/top/api/base.py
|
Python
|
mit
| 10,416 | 0.009482 |
# -*- coding: utf-8 -*-
'''
Crea
|
ted on 2012-
|
7-3
@author: lihao
'''
try: import httplib
except ImportError:
import http.client as httplib
import urllib
import time
import hashlib
import json
import top
import itertools
import mimetypes
'''
定义一些系统变量
'''
SYSTEM_GENERATE_VERSION = "taobao-sdk-python-20151214"
P_APPKEY = "app_key"
P_API = "method"
P_SESSION = "session"
P_ACCESS_TOKEN = "access_token"
P_VERSION = "v"
P_FORMAT = "format"
P_TIMESTAMP = "timestamp"
P_SIGN = "sign"
P_SIGN_METHOD = "sign_method"
P_PARTNER_ID = "partner_id"
P_CODE = 'code'
P_SUB_CODE = 'sub_code'
P_MSG = 'msg'
P_SUB_MSG = 'sub_msg'
N_REST = '/router/rest'
def sign(secret, parameters):
#===========================================================================
# '''签名方法
# @param secret: 签名需要的密钥
# @param parameters: 支持字典和string两种
# '''
#===========================================================================
# 如果parameters 是字典类的话
if hasattr(parameters, "items"):
keys = parameters.keys()
keys.sort()
parameters = "%s%s%s" % (secret,
str().join('%s%s' % (key, parameters[key]) for key in keys),
secret)
sign = hashlib.md5(parameters).hexdigest().upper()
return sign
def mixStr(pstr):
if(isinstance(pstr, str)):
return pstr
elif(isinstance(pstr, unicode)):
return pstr.encode('utf-8')
else:
return str(pstr)
class FileItem(object):
def __init__(self,filename=None,content=None):
self.filename = filename
self.content = content
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = "PYTHON_SDK_BOUNDARY"
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, str(value)))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((mixStr(fieldname), mixStr(filename), mixStr(mimetype), mixStr(body)))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'Content-Type: text/plain; charset=UTF-8',
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'Content-Transfer-Encoding: binary',
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
class TopException(Exception):
#===========================================================================
# 业务异常类
#===========================================================================
def __init__(self):
self.errorcode = None
self.message = None
self.subcode = None
self.submsg = None
self.application_host = None
self.service_host = None
def __str__(self, *args, **kwargs):
sb = "errorcode=" + mixStr(self.errorcode) +\
" message=" + mixStr(self.message) +\
" subcode=" + mixStr(self.subcode) +\
" submsg=" + mixStr(self.submsg) +\
" application_host=" + mixStr(self.application_host) +\
" service_host=" + mixStr(self.service_host)
return sb
class RequestException(Exception):
#===========================================================================
# 请求连接异常类
#===========================================================================
pass
class RestApi(object):
#===========================================================================
# Rest api的基类
#===========================================================================
def __init__(self, domain='gw.api.taobao.com', port = 80):
#=======================================================================
# 初始化基类
# Args @param domain: 请求的域名或者ip
# @param port: 请求的端口
#=======================================================================
self.__domain = domain
self.__port = port
self.__httpmethod = "POST"
if(top.getDefaultAppInfo()):
self.__app_key = top.getDefaultAppInfo().appkey
self.__secret = top.getDefaultAppInfo().secret
def get_request_header(self):
return {
'Content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
"Cache-Control": "no-cache",
"Connection": "Keep-Alive",
}
def set_app_info(self, appinfo):
#=======================================================================
# 设置请求的app信息
# @param appinfo: import top
# appinfo top.appinfo(appkey,secret)
#=======================================================================
self.__app_key = appinfo.appkey
self.__secret = appinfo.secret
def getapiname(self):
return ""
def getMultipartParas(self):
return [];
def getTranslateParas(self):
return {};
def _check_requst(self):
pass
def getResponse(self, authrize=None, timeout=30):
#=======================================================================
# 获取response结果
#=======================================================================
connection = httplib.HTTPConnection(self.__domain, self.__port, False, timeout)
sys_parameters = {
P_FORMAT: 'json',
P_APPKEY: self.__app_key,
P_SIGN_METHOD: "md5",
P_VERSION: '2.0',
P_TIMESTAMP: str(long(time.time() * 1000)),
P_PARTNER_ID: SYSTEM_GENERATE_VERSION,
P_API: self.getapiname(),
}
if authrize is not None:
sys_parameters[P_SESSION] = authrize
application_parameter = self.getApplicationParameters()
sign_parameter = sys_parameters.copy()
sign_parameter.update(application_parameter)
sys_parameters[P_SIGN] = sign(self.__secret, sign_parameter)
connection.connect()
header = self.get_request_header();
if(self.getMultipartParas()):
form = MultiPartForm()
for key, value in application_parameter.items():
form.add_field(key, value)
for key in self.getMultipartParas():
fileitem = getattr(self,key)
if(fileitem and isinstance(fileitem,FileItem)):
form.add_file(key,fileitem.filename,fileitem.content)
body = str(form)
header['Content-type'] = form.get_content_type()
else:
body = urllib.urlencode(applicatio
|
syberkitten/Imap4SearchQueryParser
|
SearchParser.py
|
Python
|
gpl-3.0
| 16,261 | 0.011746 |
__author__ = 'Liam'
import types
def flag(func):
func.is_flag = True
return func
class BadSearchOp(Exception):
def __init__(self, value = "bad search operation"):
self.value = value
def __str__(self):
return "BadSearchOp: %s" % self.value
class ImapSearchQueryParser(object):
"""
Receives a list of commands for the IMAP V4 search
and returns a dictionary of the commands, that can be used in various mail API's
including walla API for mail
based on RFC3501:
https://tools.ietf.org/html/rfc3501#section-6.4.4
example of commands:
C: A282 SEARCH FLAGGED SINCE 1-Feb-1994 NOT FROM "Smith"
S: * SEARCH 2 84 882
S: A282 OK SEARCH completed
C: A283 SEARCH TEXT "string not in mailbox"
S: * SEARCH
S: A283 OK SEARCH completed
C: A284 SEARCH CHARSET UTF-8 TEXT {6}
C: XXXXXX
S: * SEARCH 43
S: A284 OK SEARCH completed
"""
def __init__(self):
"""
:param query:
:return:
"""
#self.log("{} constructor ".format(self.__class__.__name__))
self.opFunctionList = [x for x,y in self.__class__.__dict__.items() if type(y) == types.FunctionType]
self.query = None
self.commands = {}
self.commands_list = []
#self.__validate()
#########################################################################
#
def __repr__(self):
return self.__class__.__name__+", commands: %s" % self.commands
def log(self,msg):
print msg
#self.logger.log(logging.DEBUG,msg)
def __str__(self):
return str(self.commands)
def _update_command_list(self, command, idx1, idx2=None):
"""
Updates both the command list and commands as to prepare for OR parsing
:param command: a single dictionary object with one key:value (command:argument)
:param idx1: first index
:param idx2: second index
:return:
"""
command_wrapper = {
'data': command,
'pos': [idx1]
}
# update second position
if idx2:
command_wrapper['pos'].append(idx2)
# adding to command list with positions of current command and argument
self.commands_list.append(command_wrapper)
# update the command
self.commands.update(command)
@flag
def OP__ALL(self,currentIndex=None):
self._update_command_list({'all': True}, currentIndex)
@flag
def OP__ANSWERED(self,currentIndex=None):
self._update_command_list({'answered': True}, currentIndex)
def OP__BCC(self,currentIndex=None):
"""
BCC <string>
Messages that contain the specified string in the envelope
structure's BCC field.
:param currentIndex:
:return:
"""
if currentIndex+1 < len(self.query):
#todo check bcc validation
self._update_command_list({'bcc': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BCC" provided but with no argument in query list')
def OP__BEFORE(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'before': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BEFORE" provided but with no argument in query list')
def OP__BODY(self,currentIndex=None):
argument = self._get_command_argument(c
|
urrentIndex)
if argument:
self._update_command_list({'body': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BODY" provided but with no argument in query list')
def OP__CC(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._up
|
date_command_list({'cc': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "CC" provided but with no argument in query list')
@flag
def OP__DELETED(self,currentIndex=None):
self._update_command_list({'deleted': True}, currentIndex)
@flag
def OP__DRAFT(self,currentIndex=None):
self._update_command_list({'draft': True}, currentIndex)
@flag
def OP__FLAGGED(self,currentIndex=None):
self._update_command_list({'flagged': True}, currentIndex)
def OP__FROM(self,currentIndex=None):
"""
FROM <string>
Messages that contain the specified string in the envelope
structure's FROM field.
:return:
"""
# assuming that next item is the value, such as: FROM 'man@mayman.com'
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'from': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "FROM" provided but with no argument in query list')
def OP__HEADER(self,currentIndex=None):
# todo work on this one
pass
def OP__KEYWORD(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'keyword': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "KEYWORD" provided but with no argument in query list')
def OP__LARGER(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'larger': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "LARGER" provided but with no argument in query list')
@flag
def OP__NEW(self,currentIndex=None):
self._update_command_list({'new': True}, currentIndex)
@flag
def OP__OLD(self,currentIndex=None):
self._update_command_list({'old': True}, currentIndex)
@flag
def OP__RECENT(self,currentIndex=None):
self._update_command_list({'recet': True}, currentIndex)
@flag
def OP__SEEN(self,currentIndex=None):
self._update_command_list({'seen': True}, currentIndex)
@flag
def OP__UNANSWERED(self,currentIndex=None):
self._update_command_list({'unanswered': True}, currentIndex)
@flag
def OP_UNDRAFT(self,currentIndex=None):
self._update_command_list({'undraft': True}, currentIndex)
@flag
def OP__UNFLAGGED(self,currentIndex=None):
self._update_command_list({'unflagged': True}, currentIndex)
@flag
def OP__UNKEYWORD(self,currentIndex=None):
"""
UNKEYWORD <flag>
Messages that do not have the specified keyword flag set.
"""
# todo make it proper somehow
#self.commands.update({'seen': True})
@flag
def OP__UNSEEN(self,currentIndex=None):
self._update_command_list({'unseen': True}, currentIndex)
def OP__SENTBEFORE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'sentbefore': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SENTBEFORE" provided but with no argument in query list')
def OP__SENTON(self, currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'senton': self.query[currentIndex+1]}, currentIndex)
else:
raise BadSearchOp('Operator "SENTON" provided but with no argument in query list')
def OP__SENTSINCE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'sentsince': self.query[currentIndex+1]},currentIndex)
else:
raise BadSearchOp('Operator "SENTSINCE" provided but with no argument in query list')
def OP__SINCE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'since': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SINCE" provide
|
Binnette/StreetComplete
|
app/copyShopDescriptions.py
|
Python
|
gpl-3.0
| 1,324 | 0.036254 |
import os
import re
source_dir = "src/main/res/"
target_dir = "../fastlane/metadata/android/"
def copy_key_from_strings_xml_to_file(xml, key, filename):
match = re.search("<string name=\"" + key + "\">\"?(.*?)\"?</string>", xml, re.DOTALL)
if match:
with open(filename, "w", encoding='utf8') as file:
file.write(match.group(1))
def get_locale_from(dirname):
if not dirname.startswith("values"):
return None
components = dirname.split("-")
if len(components) == 1:
return "en"
elif re.search('[0
|
-9]',components[1]):
|
return None
elif len(components) == 2:
return components[1]
elif len(components) == 3:
return components[1] + "-" + components[2][1:]
return None
for dirname in sorted(os.listdir(source_dir)):
locale = get_locale_from(dirname)
if not locale:
continue
stringsfile = source_dir + dirname + "/strings.xml"
if not os.path.exists(stringsfile):
continue;
print(locale)
locale_dir = target_dir + locale
if not os.path.exists(locale_dir):
os.makedirs(locale_dir)
with open(stringsfile, 'r', encoding='utf8') as file:
xml = file.read()
copy_key_from_strings_xml_to_file(xml, "store_listing_short_description", locale_dir + "/short_description.txt")
copy_key_from_strings_xml_to_file(xml, "store_listing_full_description", locale_dir + "/full_description.txt")
|
suutari/gitexpy
|
gitexpy/pack.py
|
Python
|
gpl-2.0
| 8,148 | 0.003436 |
import binascii
import hashlib
import mmap
import struct
import zlib
from . import delta
from .sixx import byte2int
class Error(Exception):
"""Pack Error"""
OBJ_TYPE_COMMIT = 1
OBJ_TYPE_TREE = 2
OBJ_TYPE_BLOB = 3
OBJ_TYPE_TAG = 4
OBJ_TYPE_OFS_DELTA = 6
OBJ_TYPE_REF_DELTA = 7
object_types = {
1: 'commit',
2: 'tree',
3: 'blob',
4: 'tag',
6: 'ofs_delta',
7: 'ref_delta',
}
DELTA_OBJECT_TYPES = [OBJ_TYPE_OFS_DELTA, OBJ_TYPE_REF_DELTA]
class Packfile(object):
def __init__(self, filename):
self.__file = open(filename, 'rb')
if self.__file.read
|
(4) != b'PACK':
raise Error('Not a packfile: %s' % filename)
self.version = struct.unpack('>L', self.__file.read(4))[0]
if self.version != 2:
raise Error(
'Version %d packfile is not supported: %s' %
(self.version, filename))
self.__objectcount = struct.unpack('>L', self.__file.
|
read(4))[0]
self.header_length = self.__file.tell()
self.data = mmap.mmap(
self.__file.fileno(), length=0, access=mmap.ACCESS_READ)
self.object_offset_map = {}
self.offset_id_map = {}
self.offsets = [self.header_length]
@property
def filename(self):
return self.__file.name
def __iter__(self):
for i in range(len(self)):
yield self[i]
def first_object(self):
return self.object_at(self.header_length)
def object_at(self, offset):
try:
return self.object_offset_map[offset]
except KeyError:
obj = PackfileObject(self, offset)
self.object_offset_map[offset] = obj
return obj
def object_by_id(self, object_id):
try:
return self.object_at(self.offset_id_map[object_id])
except KeyError:
for obj in self:
self.offset_id_map[obj.id] = obj.offset
if obj.id == object_id:
return obj
raise Error(
'Object with id=%s not found' %
binascii.hexlify(object_id).decode('ascii'))
def __len__(self):
return self.__objectcount
def __getitem__(self, i):
if i < 0 or i >= len(self):
raise IndexError(
'Object index %d is not in [0,%d]' % (i, len(self)-1))
if len(self.offsets) <= i:
offset = self.offsets[-1]
n = len(self.offsets) - 1
while n <= i:
offset = self.object_at(offset).end
n += 1
assert n == len(self.offsets)
self.offsets.append(offset)
assert len(self.offsets) > i
return self.object_at(self.offsets[i])
def is_checksum_ok(self):
sha = hashlib.sha1()
sha.update(self.data[:-20])
return self.data[-20:] == sha.digest()
def verify(self):
last_object_end = self[len(self)-1].end
assert last_object_end == len(self.data) - 20
assert self.is_checksum_ok
for obj in self:
assert obj.size == len(obj.decompressed_data)
if obj.type in DELTA_OBJECT_TYPES:
assert obj.delta_base
class PackfileObject(object):
def __init__(self, packfile, offset):
self.packfile = packfile
self.pack = packfile.data
self.offset = offset
self.__init_from_header()
self.__end = None
self.__delta_base = None
self.__delta_depth = None
self.__real_type = None
self.__decompressed_data = None
self.__data = None
self.__id = None
def __init_from_header(self):
pos = self.offset
self.type = (byte2int(self.pack[pos]) & 0b01110000) >> 4
sz = byte2int(self.pack[pos]) & 0b00001111
shift = 4
while byte2int(self.pack[pos]) & 0b10000000:
pos += 1
sz |= (byte2int(self.pack[pos]) & 0b01111111) << shift
shift += 7
self.size = sz
if self.type == OBJ_TYPE_OFS_DELTA:
pos += 1
dplus = 0
dplusadd = 1
doff = byte2int(self.pack[pos]) & 0b01111111
while byte2int(self.pack[pos]) & 0b10000000:
pos += 1
dplusadd <<= 7
dplus |= dplusadd
doff <<= 7
doff |= (byte2int(self.pack[pos]) & 0b01111111)
self.delta_offset = doff + dplus
self.__delta_base_id = None
elif self.type == OBJ_TYPE_REF_DELTA:
self.delta_offset = None
self.__delta_base_id = self.pack[pos+1:pos+21]
pos += 20
else:
self.delta_offset = None
self.__delta_base_id = None
self.start = pos + 1
@property
def end(self):
if self.__end is None:
self.__decompress()
return self.__end
@property
def delta_base(self):
if self.__delta_base is None:
if self.delta_offset is not None:
self.__delta_base = self.packfile.object_at(
self.offset - self.delta_offset)
elif self.__delta_base_id is not None:
self.__delta_base = self.packfile.object_by_id(
self.__delta_base_id)
return self.__delta_base
@property
def delta_base_id(self):
if self.__delta_base_id is None:
if self.delta_base is not None:
self.__delta_base_id = self.delta_base.id
return self.__delta_base_id
@property
def delta_depth(self):
if self.__delta_depth is None:
if self.delta_base is not None:
self.__delta_depth = self.delta_base.delta_depth + 1
else:
self.__delta_depth = 0
return self.__delta_depth
@property
def real_type(self):
if self.__real_type is None:
if self.delta_base is not None:
self.__real_type = self.delta_base.real_type
else:
self.__real_type = self.type
return self.__real_type
@property
def raw_data(self):
return self.pack[self.start:self.end]
@property
def decompressed_data(self):
if self.__decompressed_data is None:
self.__decompress()
return self.__decompressed_data
@property
def data(self):
if self.__data is None:
if self.type in DELTA_OBJECT_TYPES:
self.__data = delta.decode_delta(
self.decompressed_data, self.delta_base.data)
else:
self.__data = self.decompressed_data
return self.__data
@property
def id(self):
if self.__id is None:
hdr = '%s %d\0' % (object_types[self.real_type], len(self.data))
sha = hashlib.sha1()
sha.update(hdr.encode('ascii') + self.data)
self.__id = sha.digest()
return self.__id
def __decompress(self):
block_len = 4096
decompressor = zlib.decompressobj()
pos = self.start
data = b''
while True:
in_block_len = min(block_len, len(self.pack) - pos)
in_block = self.pack[pos:pos+in_block_len]
assert len(in_block) == in_block_len, '%d != %d' % (len(in_block), in_block_len)
decompressed = decompressor.decompress(in_block)
pos += in_block_len
data += decompressed
if decompressor.unused_data:
break
if pos >= len(self.pack):
assert pos == len(self.pack)
assert not decompressor.unconsumed_tail
break
self.__decompressed_data = data
self.__end = pos - len(decompressor.unused_data)
def __repr__(self):
typestr = (
object_types[self.type] if self.type in object_types
else 'type=%d' % self.type)
return '<%s %s offset=%d>' % (
self.__class__.__name__, typestr, self.offset)
def main(sys):
Packfile(sys.argv[1]).verify()
if __name__ == '__main__':
import sys
main(sys)
|
terentjew-alexey/market-analysis-system
|
mas_tools/classes.py
|
Python
|
mit
| 5,267 | 0.001899 |
# -*- coding: utf-8 -*-
import numpy as np
def signal_to_class(data, n=2, normalize=True):
"""
Converts a list of signals to a n-dimensional list of classes [buy, .., sell].
Arguments
n (int): Number of classes.
normalize (bool): It normalizes to unity. False - the signal changes only the sign.
Returns
Array of classes.
"""
result = np.array([])
data = np.array(data)
if len(data.shape) > 1:
raise ValueError("The array must be one-dimensional.")
if n == 2:
if normalize:
for item in data:
if item > 0: # buy
result = np.append(result, [1.0, 0.0])
if item <= 0: # sell
result = np.append(result, [0.0, 1.0])
else:
for item in data:
result = np.append(result, [0.5+item/2.0, 0.5-item/2.0])
elif n == 3:
if normalize:
for item in data:
if item > 0: # buy
result = np.append(result, [1.0, 0.0, 0.0])
if item < 0: # sell
result = np.append(result, [0.0, 0.0, 1.0])
if item == 0: # pass
result = np.append(result, [0.0, 1.0, 0.0])
else:
for item in data:
if item > 0: # buy
result = np.append(result, [abs(item), (1.0-abs(item)), 0.0])
if item < 0: # sell
result = np.append(result, [0.0, (1.0-abs(item)), abs(item)])
if item == 0: # pass
result = np.append(result, [0.0, 1.0, 0.0])
elif n == 6:
for item in data:
if item >= 0.8 and item <= 1.0:
result = np.append(result, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
elif item >= 0.4 and item < 0.8:
result = np.append(result, [0.0, 1.0, 0.0, 0.0, 0.0, 0.0])
elif item >= 0.0 and item < 0.4:
result = np.append(result, [0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
elif item > -0.4 and item < 0.0:
result = np.append(result, [0.0, 0.0, 0.0, 1.0, 0.0, 0.0])
elif item > -0.8 and item <= 0.4:
result = np.append(result, [0.0, 0.0, 0.0, 0.0, 1.0, 0.0])
elif item >= -1.0 and item <= 0.8:
result = np.append(result, [0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
return result.reshape((data.shape[0], n))
def class_to_signal(data, n=2, normalized=True):
"""
Converts a n-dimensional list of classes to a list of signals.
"""
result = np.array([])
if n == 2:
if normalized:
for item in data:
result = np.append(result, 1 if item[0] > item[1] else -1)
else:
for item in data:
result = np.append(result, item[0] * 2 - 1.0)
elif n == 3:
if normalized:
for item in data:
_class = np.argmax(item)
if _class == 0:
result = np.append(result, 1.0)
elif _class == 1:
result = np.append(result, 0.0)
elif _class == 2:
result = np.append(result, -1.0)
else:
for item in data:
_class = np.argmax(item)
if _cl
|
ass == 0:
result = np.append(result, item[0])
elif _class == 1:
result = np.append(result, 0.0)
elif _class == 2:
result = np.append(result, -item[2])
elif n == 6:
for item in data:
_clas
|
s = np.argmax(item)
if _class == 0:
result = np.append(result, 1.0)
elif _class == 1:
result = np.append(result, 0.66)
elif _class == 2:
result = np.append(result, 0.33)
elif _class == 3:
result = np.append(result, -0.33)
elif _class == 4:
result = np.append(result, -0.66)
elif _class == 5:
result = np.append(result, -1.0)
return result
def prepare_target(data, close_index=3, classes=6):
"""
Hello (=
uniform classes
"""
# TODO
# while const
classes = 6
data = np.array(data)
new_target = data[1:, close_index] / data[:-1, close_index]
new_target = np.insert(new_target, obj=0, values=[1.0])
n, bins = np.histogram(new_target, bins=200, range=(0.99, 1.01))
sixth = sum(n) / classes
points = [0., 0., 1., 0., 0.]
_sum = n[100]/2
p_idx = 1
for idx in range(99, -1):
_sum += n[idx]
if _sum >= sixth:
points[p_idx] = (idx - 100) / 10**4 + 1
p_idx -= 1
if p_idx < 0:
break
_sum = n[100]/2
p_idx = 3
for idx in range(101, 201):
_sum += n[idx]
if _sum >= sixth:
points[p_idx] = (idx - 100) / 10**4 + 1
p_idx += 1
if p_idx > 4:
break
# TODO
def select(a):
a > points[2]
return 1
new_target = [select(x) for x in new_target]
return new_target
|
TheClimateCorporation/conda-recipes
|
basemap/run_test.py
|
Python
|
apache-2.0
| 1,026 | 0.019493 |
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
# from basemap/examples/daynight.py
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
|
# map shows through
|
. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date.strftime("%d %b %Y %H:%M:%S"))
print('test passed!')
|
joshishungry/artificial_intel
|
assignments/lab5/neural_net_tester.py
|
Python
|
apache-2.0
| 2,227 | 0.00449 |
#!/usr/bin/env python2.5
#
# Unit tester for neural_net.py
#
import sys
from neural_net import train, test,\
make_neural_net_basic,\
make_neural_net_two_layer,\
make_neural_net_challenging,\
make_neural_net_with_weights
from neural_net_data import simple_data_sets,\
harder_data_sets,\
challenging_data_sets,\
manual_weight_data_sets,\
all_data_sets
def main(neural_net_func, data_sets, max_iterations=10000):
verbose = True
for name, training_data, test_data in data_sets:
print "-"*40
print "Training on %s data" %(name)
nn = neural_net_func()
train(nn, training_data, max_iterations=max_iterations,
verbose=verbose)
p
|
rint "Trained weights:"
for w in nn.weights:
print "Weight '%s': %f"%(w.get_name(),w.get_value())
print "Testing on %s test-data" %(name)
result = test(nn, test_data, verbose=verbose)
print "Accuracy: %f"%(result)
if __name__=="__main__":
t
|
est_names = ["simple"]
if len(sys.argv) > 1:
test_names = sys.argv[1:]
for test_name in test_names:
if test_name == "simple":
# these test simple logical configurations
main(make_neural_net_basic,
simple_data_sets)
elif test_name == "two_layer":
# these test cases are slightly harder
main(make_neural_net_two_layer,
simple_data_sets + harder_data_sets)
elif test_name == "challenging":
# these tests require a more complex architecture.
main(make_neural_net_challenging, challenging_data_sets)
elif test_name == "patchy":
# patchy problem is slightly tricky
# unless your network gets the right weights.
# it can quickly get stuck in local maxima.
main(make_neural_net_challenging, manual_weight_data_sets)
elif test_name == "weights":
# if you set the 'right' weights for
# the patchy problem it can converge very quickly.
main(make_neural_net_with_weights, manual_weight_data_sets,100)
else:
print "unrecognized test name %s" %(test_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.