text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""Process command line arguments."""
import sys
import os
from optparse import OptionParser, BadOptionError
usage_str = """python metrics [ options ] pgm1.ex1 [ pgm2.ex2 ... ]
Metrics are computed for the source code files
pgm1.ex1, pgm2.ex2, etc. At least one file name is required,
else this message appears.
Three types of output can be produced:
* Standard output for a quick summary of the main metrics.
Capitalized options negate the default option.
"""
class MyOptionParser(OptionParser):
"""Subclass OptionParser so I can override default error handler."""
def __init__( self, *args, **kwds ):
"""Just call super class's __init__ since we aren't making changes here."""
OptionParser.__init__( self, *args, **kwds )
def error( self, msg ):
"""Explicitly raise BadOptionError so calling program can handle it."""
raise BadOptionError( msg )
class ProcessArgsError( Exception ): pass
class ProcessArgs( object ):
"""Process command line arguments."""
def __init__( self,
*pArgs,
**pKwds
):
"""Initial processing of arguments."""
# default values for possible parameters
lib_name = ''
in_file_list = None
recurse_dir_list = None
self.include_metrics_str = 'sloc:SLOCMetric,mccabe:McCabeMetric'
exclude_metrics_str = None
quiet = False
verbose = 0
output_format = None
self.__dict__.update( locals() )
del( self.__dict__['self'] ) # remove recursive self from self.__dict__
self.__dict__.update( pKwds )
del( self.__dict__['pKwds'] ) # remove redundant pKwds in self.__dict__
# set up option parser
parser = MyOptionParser( '', version="%prog 0.8.1" )
parser.add_option("-f", "--files",
dest="in_file_list",
default=self.in_file_list,
help="File containing list of path names to modules for analysis." )
parser.add_option("-r", "--recurse-dir",
dest="recurse_dir",
default= None,
help="Name of a directory to recurse into. (Default is '.')" )
parser.add_option("-i", "--include",
dest="include_metrics_str",
default=self.include_metrics_str,
help="list of metrics to include in run. This is a comma separated list of metric module names with no whitespace. Optionally, you can specify the class name of the metric by following the module name with a colon (:) and the metric class name. (Default metrics are 'mccabe:McCabeMetric,sloc:SLOCMetric'. Default metric class name for metric module 'wxYz' is 'WxYzMetric' when only module name given -- note capitalized metric class name.)" )
parser.add_option("-l", "--library",
dest="lib_name",
default=self.lib_name,
help="user-defined name applied to collection of modules (Default is '')" )
parser.add_option("-q", "--quiet",
action="store_true",
dest="quiet",
default=self.quiet,
help="suppress normal summary output to stdout. (Default is %s)" % (self.quiet) )
parser.add_option("-v", "--verbose",
action="count",
dest="verbose",
default=self.verbose,
help="Produce verbose output - more -v's produce more output. (Default is no verbose output to stdout)")
parser.add_option("--format",
dest="output_format_str",
default = self.output_format,
choices = ["xml", "csv"],
help="Choose an output format for a parser to read. Valid choices: xml, csv")
# parse the command line/arguments for this instance
try:
(options, args) = parser.parse_args()
except BadOptionError, e:
sys.stderr.writelines( "\nBadOptionError: %s\n" % str( e ) )
sys.stderr.writelines( "\nThe valid options are:\n\n" )
sys.stderr.writelines(parser.format_help())
sys.exit( 1 )
print 'options: %s' % options
print 'args: %s' % args
# augment parameter values from instantiation with
# command line values.
# the command line parameter values take precidence
# over values in program.
args.extend( pArgs )
# convert command line arguments into instance values
self.__dict__.update( options.__dict__ )
if self.in_file_list:
try:
inf = open( self.in_file_list )
files = [line.strip() for line in inf]
inf.close()
args.extend( files )
except IOError, e:
raise ProcessArgsError( e )
exclude = ['.svn', '.hg', '.CVS', '.git']
if self.recurse_dir:
start = self.recurse_dir
print "Recurse %s" % (start)
for (root, dirs, files) in os.walk(start):
newfiles = []
for excl in exclude:
if excl in dirs:
dirs.remove(excl)
newfiles.extend([os.path.join(root, fn) for fn in files])
#print root, len(newfiles), 'Files found!'
args.extend(newfiles)
self.in_file_names = args
self.include_metrics = self.process_include_metrics(self.include_metrics_str)
# standardize
if self.output_format_str is not None:
self.output_format_str = self.output_format_str.upper()
if len( args ) < 1:
print usage_str
print parser.format_help()
e = "No souce filenames given.\n"
# because of what I believe to be a bug in the doctest module,
# which makes it mishandle exceptions, I have 'faked' the handling
# of raising an exception and just return
# if doctestSw:
# print e
# return
# else:
raise ProcessArgsError( e )
def conflict_handler(self, *args, **kwds):
print "args=%s" % args
print "kwds=%s" % kwds
def process_include_metrics(self, include_metrics_str):
include_metrics = []
try:
metric_list = include_metrics_str.split( ',' )
for a in metric_list:
s = a.split( ':' )
if len( s ) == 2: # both metric class and module name given
include_metrics.append( s )
elif len( s ) == 1:
# only the module name given. Generate default metric
# class name by capitalizing first letter of module
# name and appending "Metric" so the default metric
# class name for module wxYz is WxYzMetric.
if s[0]:
defName = s[0][0].upper() + s[0][1:] + 'Metric'
include_metrics.append( (s[0], defName) )
else:
raise ProcessArgsError("Missing metric module name")
else:
raise ProcessArgsError("Malformed items in includeMetric string")
except AttributeError, e:
e = ( "Invalid list of metric names: %s" %
include_metrics_str )
raise ProcessArgsError( e )
return include_metrics
def testpa( pa ):
"""Test of ProcessArgs.
Usage:
>>> pa=ProcessArgs('inFile.py')
>>> testpa(pa) #doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Arguments processed:
Include Metric Modules=sloc:SLOCMetric,mccabe:McCabeMetric
quiet=False
verbose=0
Metrics to be used are:
Module sloc contains metric class SLOCMetric
Module mccabe contains metric class McCabeMetric
Input files:
inFile.py
>>>
"""
print """Arguments processed:
\tInclude Metric Modules=%s
\tquiet=%s
\tverbose=%s""" % (
pa.include_metrics_str,
pa.quiet,
pa.verbose)
print "Metrics to be used are:"
for m,n in pa.include_metrics:
print "\tModule %s contains metric class %s" % (m,n)
if pa.in_file_names:
print "Input files:"
for f in pa.in_file_names:
print "\t%s" % f
|
GadgetSteve/metrics
|
metrics/processargs.py
|
Python
|
mit
| 8,553 | 0.012043 |
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import json
import copy
from netaddr import IPNetwork
from pprint import pformat
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
def get_ip(ip_w_pfx):
return str(IPNetwork(ip_w_pfx).ip)
# end get_ip
class BgpProvisioner(object):
def __init__(self, user, password, tenant, api_server_ip, api_server_port,
api_server_use_ssl=False, use_admin_api=False):
self._admin_user = user
self._admin_password = password
self._admin_tenant_name = tenant
self._api_server_ip = api_server_ip
self._api_server_port = api_server_port
self._api_server_use_ssl = api_server_use_ssl
self._vnc_lib = VncApiAdmin(
use_admin_api, self._admin_user, self._admin_password,
self._admin_tenant_name,
self._api_server_ip,
self._api_server_port, '/',
api_server_use_ssl=self._api_server_use_ssl)
# end __init__
def _get_rt_inst_obj(self):
vnc_lib = self._vnc_lib
# TODO pick fqname hardcode from common
rt_inst_obj = vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project',
'ip-fabric', '__default__'])
return rt_inst_obj
# end _get_rt_inst_obj
def add_bgp_router(self, router_type, router_name, router_ip,
router_asn, address_families=[], md5=None):
if not address_families:
address_families = ['route-target', 'inet-vpn', 'e-vpn', 'erm-vpn',
'inet6-vpn']
if router_type != 'control-node':
address_families.remove('erm-vpn')
if router_type != 'control-node':
if 'erm-vpn' in address_families:
raise RuntimeError("Only contrail bgp routers can support "
"family 'erm-vpn'")
bgp_addr_fams = AddressFamilies(address_families)
bgp_sess_attrs = [
BgpSessionAttributes(address_families=bgp_addr_fams)]
bgp_sessions = [BgpSession(attributes=bgp_sess_attrs)]
bgp_peering_attrs = BgpPeeringAttributes(session=bgp_sessions)
rt_inst_obj = self._get_rt_inst_obj()
vnc_lib = self._vnc_lib
if router_type == 'control-node':
vendor = 'contrail'
elif router_type == 'router':
vendor = 'mx'
else:
vendor = 'unknown'
router_params = BgpRouterParams(router_type=router_type,
vendor=vendor, autonomous_system=int(router_asn),
identifier=get_ip(router_ip),
address=get_ip(router_ip),
port=179, address_families=bgp_addr_fams)
bgp_router_obj = BgpRouter(router_name, rt_inst_obj,
bgp_router_parameters=router_params)
# Return early with a log if it already exists
try:
fq_name = bgp_router_obj.get_fq_name()
existing_obj = vnc_lib.bgp_router_read(fq_name=fq_name)
if md5:
bgp_params = existing_obj.get_bgp_router_parameters()
# set md5
print "Setting md5 on the existing uuid"
md5 = {'key_items': [ { 'key': md5 ,"key_id":0 } ], "key_type":"md5"}
bgp_params.set_auth_data(md5)
existing_obj.set_bgp_router_parameters(bgp_params)
vnc_lib.bgp_router_update(existing_obj)
print ("BGP Router " + pformat(fq_name) +
" already exists with uuid " + existing_obj.uuid)
return
except NoIdError:
pass
cur_id = vnc_lib.bgp_router_create(bgp_router_obj)
cur_obj = vnc_lib.bgp_router_read(id=cur_id)
# full-mesh with existing bgp routers
fq_name = rt_inst_obj.get_fq_name()
bgp_router_list = vnc_lib.bgp_routers_list(parent_fq_name=fq_name)
bgp_router_ids = [bgp_dict['uuid']
for bgp_dict in bgp_router_list['bgp-routers']]
bgp_router_objs = []
for id in bgp_router_ids:
bgp_router_objs.append(vnc_lib.bgp_router_read(id=id))
for other_obj in bgp_router_objs:
if other_obj.uuid == cur_id:
continue
cur_obj.add_bgp_router(other_obj, bgp_peering_attrs)
if md5:
md5 = {'key_items': [ { 'key': md5 ,"key_id":0 } ], "key_type":"md5"}
rparams = cur_obj.bgp_router_parameters
rparams.set_auth_data(md5)
cur_obj.set_bgp_router_parameters(rparams)
vnc_lib.bgp_router_update(cur_obj)
# end add_bgp_router
def del_bgp_router(self, router_name):
vnc_lib = self._vnc_lib
rt_inst_obj = self._get_rt_inst_obj()
fq_name = rt_inst_obj.get_fq_name() + [router_name]
cur_obj = vnc_lib.bgp_router_read(fq_name=fq_name)
# remove full-mesh with existing bgp routers
fq_name = rt_inst_obj.get_fq_name()
bgp_router_list = vnc_lib.bgp_routers_list(parent_fq_name=fq_name)
bgp_router_ids = [bgp_dict['uuid']
for bgp_dict in bgp_router_list['bgp-routers']]
bgp_router_objs = []
for id in bgp_router_ids:
bgp_router_objs.append(vnc_lib.bgp_router_read(id=id))
for other_obj in bgp_router_objs:
if other_obj.uuid == cur_obj.uuid:
# our refs will be dropped on delete further down
continue
other_obj.del_bgp_router(cur_obj)
vnc_lib.bgp_router_delete(id=cur_obj.uuid)
# end del_bgp_router
def add_route_target(self, rt_inst_fq_name, router_asn,
route_target_number):
vnc_lib = self._vnc_lib
rtgt_val = "target:%s:%s" % (router_asn, route_target_number)
net_obj = vnc_lib.virtual_network_read(fq_name=rt_inst_fq_name[:-1])
route_targets = net_obj.get_route_target_list()
if route_targets:
route_targets.add_route_target(rtgt_val)
else:
route_targets = RouteTargetList([rtgt_val])
net_obj.set_route_target_list(route_targets)
vnc_lib.virtual_network_update(net_obj)
# end add_route_target
def del_route_target(self, rt_inst_fq_name, router_asn,
route_target_number):
vnc_lib = self._vnc_lib
rtgt_val = "target:%s:%s" % (router_asn, route_target_number)
net_obj = vnc_lib.virtual_network_read(fq_name=rt_inst_fq_name[:-1])
if rtgt_val not in net_obj.get_route_target_list().get_route_target():
print "%s not configured for VN %s" % (rtgt_val,
rt_inst_fq_name[:-1])
return
route_targets = net_obj.get_route_target_list()
route_targets.delete_route_target(rtgt_val)
if route_targets.get_route_target():
net_obj.set_route_target_list(route_targets)
else:
net_obj.set_route_target_list(None)
vnc_lib.virtual_network_update(net_obj)
# end del_route_target
# end class BgpProvisioner
|
nischalsheth/contrail-controller
|
src/config/utils/provision_bgp.py
|
Python
|
apache-2.0
| 7,230 | 0.002905 |
from __future__ import annotations
import procrunner
def test_run(dials_data, tmp_path):
procrunner.run(
(
"dials.plot_reflections",
dials_data("centroid_test_data") / "experiments.json",
dials_data("centroid_test_data") / "integrated.refl",
"scan_range=0,5",
),
working_directory=tmp_path,
).check_returncode()
assert (tmp_path / "centroids.png").is_file()
|
dials/dials
|
tests/test_plot_reflections.py
|
Python
|
bsd-3-clause
| 445 | 0 |
from django.test.utils import override_settings
from hc.test import BaseTestCase
@override_settings(DISCORD_CLIENT_ID="t1", DISCORD_CLIENT_SECRET="s1")
class AddDiscordTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_discord/" % self.project.code
def test_instructions_work(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertContains(r, "Connect Discord", status_code=200)
self.assertContains(r, "discordapp.com/api/oauth2/authorize")
# There should now be a key in session
self.assertTrue("add_discord" in self.client.session)
@override_settings(DISCORD_CLIENT_ID=None)
def test_it_requires_client_id(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
self.client.login(username="bob@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
|
healthchecks/healthchecks
|
hc/front/tests/test_add_discord.py
|
Python
|
bsd-3-clause
| 1,235 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vi: ts=4 sw=4
################################################################################
# Code for defining a 'Sample' object, which keeps track of its state, and
# simplifies the task of aligning, measuring, etc.
################################################################################
# Known Bugs:
# N/A
################################################################################
# TODO:
# - Search for "TODO" below.
# - Ability to have a collection of simultaneous motions? (E.g. build up a set
# of deferred motions?)
# - Use internal naming scheme to control whether 'saxs'/'waxs' is put in the
# filename
################################################################################
import time
import re
import os
import shutil
class CoordinateSystem(object):
"""
A generic class defining a coordinate system. Several coordinate systems
can be layered on top of one another (with a reference to the underlying
coordinate system given by the 'base_stage' pointer). When motion of a given
CoordinateSystem is requested, the motion is passed (with coordinate
conversion) to the underlying stage.
"""
hint_replacements = { 'positive': 'negative',
'up': 'down',
'left': 'right',
'towards': 'away from',
'downstream': 'upstream',
'inboard': 'outboard',
'clockwise': 'counterclockwise',
'CW': 'CCW',
}
# Core methods
########################################
def __init__(self, name='<unnamed>', base=None, **kwargs):
'''Create a new CoordinateSystem (e.g. a stage or a sample).
Parameters
----------
name : str
Name for this stage/sample.
base : Stage
The stage on which this stage/sample sits.
'''
self.name = name
self.base_stage = base
self.enabled = True
self.md = {}
self._marks = {}
self._set_axes_definitions()
self._init_axes(self._axes_definitions)
def _set_axes_definitions(self):
'''Internal function which defines the axes for this stage. This is kept
as a separate function so that it can be over-ridden easily.'''
# The _axes_definitions array holds a list of dicts, each defining an axis
self._axes_definitions = []
def _init_axes(self, axes):
'''Internal method that generates method names to control the various axes.'''
# Note: Instead of defining CoordinateSystem() having methods '.x', '.xr',
# '.y', '.yr', etc., we programmatically generate these methods when the
# class (and subclasses) are instantiated.
# Thus, the Axis() class has generic versions of these methods, which are
# appropriated renamed (bound, actually) when a class is instantiated.
self._axes = {}
for axis in axes:
axis_object = Axis(axis['name'], axis['motor'], axis['enabled'], axis['scaling'], axis['units'], axis['hint'], self.base_stage, stage=self)
self._axes[axis['name']] = axis_object
# Bind the methods of axis_object to appropriately-named methods of
# the CoordinateSystem() class.
setattr(self, axis['name'], axis_object.get_position )
setattr(self, axis['name']+'abs', axis_object.move_absolute )
setattr(self, axis['name']+'r', axis_object.move_relative )
setattr(self, axis['name']+'pos', axis_object.get_position )
setattr(self, axis['name']+'posMotor', axis_object.get_motor_position )
setattr(self, axis['name']+'units', axis_object.get_units )
setattr(self, axis['name']+'hint', axis_object.get_hint )
setattr(self, axis['name']+'info', axis_object.get_info )
setattr(self, axis['name']+'set', axis_object.set_current_position )
setattr(self, axis['name']+'o', axis_object.goto_origin )
setattr(self, axis['name']+'setOrigin', axis_object.set_origin )
setattr(self, axis['name']+'mark', axis_object.mark )
setattr(self, axis['name']+'search', axis_object.search )
setattr(self, axis['name']+'scan', axis_object.scan )
setattr(self, axis['name']+'c', axis_object.center )
def comment(self, text, logbooks=None, tags=None, append_md=True, **md):
'''Add a comment related to this CoordinateSystem.'''
text += '\n\n[comment for CoordinateSystem: {} ({})].'.format(self.name, self.__class__.__name__)
if append_md:
md_current = { k : v for k, v in RE.md.items() } # Global md
md_current.update(get_beamline().get_md()) # Beamline md
# Self md
#md_current.update(self.get_md())
# Specified md
md_current.update(md)
text += '\n\n\nMetadata\n----------------------------------------'
for key, value in sorted(md_current.items()):
text += '\n{}: {}'.format(key, value)
logbook.log(text, logbooks=logbooks, tags=tags)
def set_base_stage(self, base):
self.base_stage = base
self._init_axes(self._axes_definitions)
# Convenience/helper methods
########################################
def multiple_string_replacements(self, text, replacements, word_boundaries=False):
'''Peform multiple string replacements simultaneously. Matching is case-insensitive.
Parameters
----------
text : str
Text to return modified
replacements : dictionary
Replacement pairs
word_boundaries : bool, optional
Decides whether replacements only occur for words.
'''
# Code inspired from:
# http://stackoverflow.com/questions/6116978/python-replace-multiple-strings
# Note inclusion of r'\b' sequences forces the regex-match to occur at word-boundaries.
if word_boundaries:
replacements = dict((r'\b'+re.escape(k.lower())+r'\b', v) for k, v in replacements.items())
pattern = re.compile("|".join(replacements.keys()), re.IGNORECASE)
text = pattern.sub(lambda m: replacements[r'\b'+re.escape(m.group(0).lower())+r'\b'], text)
else:
replacements = dict((re.escape(k.lower()), v) for k, v in replacements.items())
pattern = re.compile("|".join(replacements.keys()), re.IGNORECASE)
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return text
def _hint_replacements(self, text):
'''Convert a motor-hint into its logical inverse.'''
# Generates all the inverse replacements
replacements = dict((v, k) for k, v in self.hint_replacements.items())
replacements.update(self.hint_replacements)
return self.multiple_string_replacements(text, replacements, word_boundaries=True)
# Control methods
########################################
def setTemperature(self, temperature, verbosity=3):
if verbosity>=1:
print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
def temperature(self, verbosity=3):
if verbosity>=1:
print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
return 0.0
# Motion methods
########################################
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def is_enabled(self):
return self.enabled
def pos(self, verbosity=3):
'''Return (and print) the positions of all axes associated with this
stage/sample.'''
out = {}
for axis_name, axis_object in sorted(self._axes.items()):
out[axis_name] = axis_object.get_position(verbosity=verbosity)
#if verbosity>=2: print('') # \n
return out
def hints(self, verbosity=3):
'''Return (and print) the hints of all axes associated with this
stage/sample.'''
out = {}
for axis_name, axis_object in sorted(self._axes.items()):
if verbosity>=2: print('{:s}'.format(axis_name))
out[axis_name] = axis_object.get_hint(verbosity=verbosity)
if verbosity>=2: print('') # \n
return out
def origin(self, verbosity=3):
'''Returns the origin for axes.'''
out = {}
for axis_name, axis_object in sorted(self._axes.items()):
origin = axis_object.get_origin()
if verbosity>=2: print('{:s} origin = {:.3f} {:s}'.format(axis_name, origin, axis_object.get_units()))
out[axis_name] = origin
return out
def gotoOrigin(self, axes=None):
'''Go to the origin (zero-point) for this stage. All axes are zeroed,
unless one specifies the axes to move.'''
# TODO: Guard against possibly buggy behavior if 'axes' is a string instead of a list.
# (Python will happily iterate over the characters in a string.)
if axes is None:
axes_to_move = self._axes.values()
else:
axes_to_move = [self._axes[axis_name] for axis_name in axes]
for axis in axes_to_move:
axis.goto_origin()
def setOrigin(self, axes, positions=None):
'''Define the current position as the zero-point (origin) for this stage/
sample. The axes to be considered in this redefinition must be supplied
as a list.
If the optional positions parameter is passed, then those positions are
used to define the origins for the axes.'''
if positions is None:
for axis in axes:
getattr(self, axis+'setOrigin')()
else:
for axis, pos in zip(axes, positions):
getattr(self, axis+'setOrigin')(pos)
def gotoAlignedPosition(self):
'''Goes to the currently-defined 'aligned' position for this stage. If
no specific aligned position is defined, then the zero-point for the stage
is used instead.'''
# TODO: Optional offsets? (Like goto mark?)
if 'aligned_position' in self.md and self.md['aligned_position'] is not None:
for axis_name, position in self.md['aligned_position'].items():
self._axes[axis_name].move_absolute(position)
else:
self.gotoOrigin()
# Motion logging
########################################
def setAlignedPosition(self, axes):
'''Saves the current position as the 'aligned' position for this stage/
sample. This allows one to return to this position later. One must
specify the axes to be considered.
WARNING: Currently this position data is not saved persistently. E.g. it will
be lost if you close and reopen the console.
'''
positions = {}
for axis_name in axes:
positions[axis_name] = self._axes[axis_name].get_position(verbosity=0)
self.attributes['aligned_position'] = positions
def mark(self, label, *axes, **axes_positions):
'''Set a mark for the stage/sample/etc.
'Marks' are locations that have been labelled, which is useful for
later going to a labelled position (using goto), or just to keep track
of sample information (metadata).
By default, the mark is set at the current position. If no 'axes' are
specified, all motors are logged. Alternately, axes (as strings) can
be specified. If axes_positions are given as keyword arguments, then
positions other than the current position can be specified.
'''
positions = {}
if len(axes)==0 and len(axes_positions)==0:
for axis_name in self._axes:
positions[axis_name] = self._axes[axis_name].get_position(verbosity=0)
else:
for axis_name in axes:
positions[axis_name] = self._axes[axis_name].get_position(verbosity=0)
for axis_name, position in axes_positions.items():
positions[axis_name] = position
self._marks[label] = positions
def marks(self, verbosity=3):
'''Get a list of the current marks on the stage/sample/etc. 'Marks'
are locations that have been labelled, which is useful for later
going to a labelled position (using goto), or just to keep track
of sample information (metadata).'''
if verbosity>=3:
print('Marks for {:s} (class {:s}):'.format(self.name, self.__class__.__name__))
if verbosity>=2:
for label, positions in self._marks.items():
print(label)
for axis_name, position in sorted(positions.items()):
print(' {:s} = {:.4f} {:s}'.format(axis_name, position, self._axes[axis_name].get_units()))
return self._marks
def goto(self, label, verbosity=3, **additional):
'''Move the stage/sample to the location given by the label. For this
to work, the specified label must have been 'marked' at some point.
Additional keyword arguments can be provided. For instance, to move
3 mm from the left edge:
sam.goto('left edge', xr=+3.0)
'''
if label not in self._marks:
if verbosity>=1:
print("Label '{:s}' not recognized. Use '.marks()' for the list of marked positions.".format(label))
return
for axis_name, position in sorted(self._marks[label].items()):
if axis_name+'abs' in additional:
# Override the marked value for this position
position = additional[axis_name+'abs']
del(additional[axis_name+'abs'])
#relative = 0.0 if axis_name+'r' not in additional else additional[axis_name+'r']
if axis_name+'r' in additional:
relative = additional[axis_name+'r']
del(additional[axis_name+'r'])
else:
relative = 0.0
self._axes[axis_name].move_absolute(position+relative, verbosity=verbosity)
# Handle any optional motions not already covered
for command, amount in additional.items():
if command[-1]=='r':
getattr(self, command)(amount, verbosity=verbosity)
elif command[-3:]=='abs':
getattr(self, command)(amount, verbosity=verbosity)
else:
print("Keyword argument '{}' not understood (should be 'r' or 'abs').".format(command))
# State methods
########################################
def save_state(self):
'''Outputs a string you can use to re-initialize this object back
to its current state.'''
#TODO: Save to databroker?
state = { 'origin': {} }
for axis_name, axis in self._axes.items():
state['origin'][axis_name] = axis.origin
return state
def restore_state(self, state):
'''Outputs a string you can use to re-initialize this object back
to its current state.'''
for axis_name, axis in self._axes.items():
axis.origin = state['origin'][axis_name]
# End class CoordinateSystem(object)
########################################
class Axis(object):
'''Generic motor axis.
Meant to be used within a CoordinateSystem() or Stage() object.
'''
def __init__(self, name, motor, enabled, scaling, units, hint, base, stage=None, origin=0.0):
self.name = name
self.motor = motor
self.enabled = enabled
self.scaling = scaling
self.units = units
self.hint = hint
self.base_stage = base
self.stage = stage
self.origin = 0.0
self._move_settle_max_time = 10.0
self._move_settle_period = 0.05
self._move_settle_tolerance = 0.01
# Coordinate transformations
########################################
def cur_to_base(self, position):
'''Convert from this coordinate system to the coordinate in the (immediate) base.'''
base_position = self.get_origin() + self.scaling*position
return base_position
def base_to_cur(self, base_position):
'''Convert from this base position to the coordinate in the current system.'''
position = (base_position - self.get_origin())/self.scaling
return position
def cur_to_motor(self, position):
'''Convert from this coordinate system to the underlying motor.'''
if self.motor is not None:
return self.cur_to_base(position)
else:
base_position = self.cur_to_base(position)
return self.base_stage._axes[self.name].cur_to_motor(base_position)
def motor_to_cur(self, motor_position):
'''Convert a motor position into the current coordinate system.'''
if self.motor is not None:
return self.base_to_cur(motor_position)
else:
base_position = self.base_stage._axes[self.name].motor_to_cur(motor_position)
return self.base_to_cur(base_position)
# Programmatically-defined methods
########################################
# Note: Instead of defining CoordinateSystem() having methods '.x', '.xr',
# '.xp', etc., we programmatically generate these methods when the class
# (and subclasses) are instantiated.
# Thus, the Axis() class has generic versions of these methods, which are
# appropriated renamed (bound, actually) when a class is instantiated.
def get_position(self, verbosity=3):
'''Return the current position of this axis (in its coordinate system).
By default, this also prints out the current position.'''
if self.motor is not None:
base_position = self.motor.position
else:
verbosity_c = verbosity if verbosity>=4 else 0
base_position = getattr(self.base_stage, self.name+'pos')(verbosity=verbosity_c)
position = self.base_to_cur(base_position)
if verbosity>=2:
if self.stage:
stg = self.stage.name
else:
stg = '?'
if verbosity>=5 and self.motor is not None:
print( '{:s} = {:.3f} {:s}'.format(self.motor.name, base_position, self.get_units()) )
print( '{:s}.{:s} = {:.3f} {:s} (origin = {:.3f})'.format(stg, self.name, position, self.get_units(), self.get_origin()) )
return position
def get_motor_position(self, verbosity=3):
'''Returns the position of this axis, traced back to the underlying
motor.'''
if self.motor is not None:
return self.motor.position
else:
return getattr(self.base_stage, self.name+'posMotor')(verbosity=verbosity)
#return self.base_stage._axes[self.name].get_motor_position(verbosity=verbosity)
def move_absolute(self, position=None, wait=True, verbosity=3):
'''Move axis to the specified absolute position. The position is given
in terms of this axis' current coordinate system. The "defer" argument
can be used to defer motions until "move" is called.'''
if position is None:
# If called without any argument, just print the current position
return self.get_position(verbosity=verbosity)
# Account for coordinate transformation
base_position = self.cur_to_base(position)
if self.is_enabled():
if self.motor:
#mov( self.motor, base_position )
self.motor.user_setpoint.value = base_position
else:
# Call self.base_stage.xabs(base_position)
getattr(self.base_stage, self.name+'abs')(base_position, verbosity=0)
if self.stage:
stg = self.stage.name
else:
stg = '?'
if verbosity>=2:
# Show a realtime output of position
start_time = time.time()
current_position = self.get_position(verbosity=0)
while abs(current_position-position)>self._move_settle_tolerance and (time.time()-start_time)<self._move_settle_max_time:
current_position = self.get_position(verbosity=0)
print( '{:s}.{:s} = {:5.3f} {:s} \r'.format(stg, self.name, current_position, self.get_units()), end='')
time.sleep(self._move_settle_period)
#if verbosity>=1:
#current_position = self.get_position(verbosity=0)
#print( '{:s}.{:s} = {:5.3f} {:s} '.format(stg, self.name, current_position, self.get_units()))
elif verbosity>=1:
print( 'Axis %s disabled (stage %s).' % (self.name, self.stage.name) )
def move_relative(self, move_amount=None, verbosity=3):
'''Move axis relative to the current position.'''
if move_amount is None:
# If called without any argument, just print the current position
return self.get_position(verbosity=verbosity)
target_position = self.get_position(verbosity=0) + move_amount
return self.move_absolute(target_position, verbosity=verbosity)
def goto_origin(self):
'''Move axis to the currently-defined origin (zero-point).'''
self.move_absolute(0)
def set_origin(self, origin=None):
'''Sets the origin (zero-point) for this axis. If no origin is supplied,
the current position is redefined as zero. Alternatively, you can supply
a position (in the current coordinate system of the axis) that should
henceforth be considered zero.'''
if origin is None:
# Use current position
if self.motor is not None:
self.origin = self.motor.position
else:
if self.base_stage is None:
print("Error: %s %s has 'base_stage' and 'motor' set to 'None'." % (self.__class__.__name__, self.name))
else:
self.origin = getattr(self.base_stage, self.name+'pos')(verbosity=0)
else:
# Use supplied value (in the current coordinate system)
base_position = self.cur_to_base(origin)
self.origin = base_position
def set_current_position(self, new_position):
'''Redefines the position value of the current position.'''
current_position = self.get_position(verbosity=0)
self.origin = self.get_origin() + (current_position - new_position)*self.scaling
def search(self, step_size=1.0, min_step=0.05, intensity=None, target=0.5, detector=None, detector_suffix=None, polarity=+1, verbosity=3):
'''Moves this axis, searching for a target value.
Parameters
----------
step_size : float
The initial step size when moving the axis
min_step : float
The final (minimum) step size to try
intensity : float
The expected full-beam intensity readout
target : 0.0 to 1.0
The target ratio of full-beam intensity; 0.5 searches for half-max.
The target can also be 'max' to find a local maximum.
detector, detector_suffix
The beamline detector (and suffix, such as '_stats4_total') to trigger to measure intensity
polarity : +1 or -1
Positive motion assumes, e.g. a step-height 'up' (as the axis goes more positive)
'''
if not get_beamline().beam.is_on():
print('WARNING: Experimental shutter is not open.')
if intensity is None:
intensity = RE.md['beam_intensity_expected']
if detector is None:
#detector = gs.DETS[0]
detector = get_beamline().detector[0]
if detector_suffix is None:
#value_name = gs.TABLE_COLS[0]
value_name = get_beamline().TABLE_COLS[0]
else:
value_name = detector.name + detector_suffix
bec.disable_table()
# Check current value
RE(count([detector]))
value = detector.read()[value_name]['value']
if target is 'max':
if verbosity>=5:
print("Performing search on axis '{}' target is 'max'".format(self.name))
max_value = value
max_position = self.get_position(verbosity=0)
direction = +1*polarity
while step_size>=min_step:
if verbosity>=4:
print(" move {} by {} × {}".format(self.name, direction, step_size))
self.move_relative(move_amount=direction*step_size, verbosity=verbosity-2)
prev_value = value
RE(count([detector]))
value = detector.read()[value_name]['value']
if verbosity>=3:
print(" {} = {:.3f} {}; value : {}".format(self.name, self.get_position(verbosity=0), self.units, value))
if value>max_value:
max_value = value
max_position = self.get_position(verbosity=0)
if value>prev_value:
# Keep going in this direction...
pass
else:
# Switch directions!
direction *= -1
step_size *= 0.5
elif target is 'min':
if verbosity>=5:
print("Performing search on axis '{}' target is 'min'".format(self.name))
direction = +1*polarity
while step_size>=min_step:
if verbosity>=4:
print(" move {} by {} × {}".format(self.name, direction, step_size))
self.move_relative(move_amount=direction*step_size, verbosity=verbosity-2)
prev_value = value
RE(count([detector]))
value = detector.read()[value_name]['value']
if verbosity>=3:
print(" {} = {:.3f} {}; value : {}".format(self.name, self.get_position(verbosity=0), self.units, value))
if value<prev_value:
# Keep going in this direction...
pass
else:
# Switch directions!
direction *= -1
step_size *= 0.5
else:
target_rel = target
target = target_rel*intensity
if verbosity>=5:
print("Performing search on axis '{}' target {} × {} = {}".format(self.name, target_rel, intensity, target))
if verbosity>=4:
print(" value : {} ({:.1f}%)".format(value, 100.0*value/intensity))
# Determine initial motion direction
if value>target:
direction = -1*polarity
else:
direction = +1*polarity
while step_size>=min_step:
if verbosity>=4:
print(" move {} by {} × {}".format(self.name, direction, step_size))
self.move_relative(move_amount=direction*step_size, verbosity=verbosity-2)
RE(count([detector]))
value = detector.read()[value_name]['value']
if verbosity>=3:
print(" {} = {:.3f} {}; value : {} ({:.1f}%)".format(self.name, self.get_position(verbosity=0), self.units, value, 100.0*value/intensity))
# Determine direction
if value>target:
new_direction = -1.0*polarity
else:
new_direction = +1.0*polarity
if abs(direction-new_direction)<1e-4:
# Same direction as we've been going...
# ...keep moving this way
pass
else:
# Switch directions!
direction *= -1
step_size *= 0.5
bec.enable_table()
def scan(self):
print('todo')
def center(self):
print('todo')
def mark(self, label, position=None, verbosity=3):
'''Set a mark for this axis. (By default, the current position is
used.)'''
if position is None:
position = self.get_position(verbosity=0)
axes_positions = { self.name : position }
self.stage.mark(label, **axes_positions)
# Book-keeping
########################################
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def is_enabled(self):
return self.enabled and self.stage.is_enabled()
def get_origin(self):
return self.origin
def get_units(self):
if self.units is not None:
return self.units
else:
return getattr(self.base_stage, self.name+'units')()
def get_hint(self, verbosity=3):
'''Return (and print) the "motion hint" associated with this axis. This
hint gives information about the expected directionality of the motion.'''
if self.hint is not None:
s = '%s\n%s' % (self.hint, self.stage._hint_replacements(self.hint))
if verbosity>=2:
print(s)
return s
else:
return getattr(self.base_stage, self.name+'hint')(verbosity=verbosity)
def get_info(self, verbosity=3):
'''Returns information about this axis.'''
self.get_position(verbosity=verbosity)
self.get_hint(verbosity=verbosity)
def check_base(self):
if self.base_stage is None:
print("Error: %s %s has 'base_stage' set to 'None'." % (self.__class__.__name__, self.name))
class Sample_Generic(CoordinateSystem):
"""
The Sample() classes are used to define a single, individual sample. Each
sample is created with a particular name, which is recorded during measurements.
Logging of comments also includes the sample name. Different Sample() classes
can define different defaults for alignment, measurement, etc.
"""
# Core methods
########################################
def __init__(self, name, base=None, **md):
'''Create a new Sample object.
Parameters
----------
name : str
Name for this sample.
base : Stage
The stage/holder on which this sample sits.
'''
if base is None:
base = get_default_stage()
#print("Note: No base/stage/holder specified for sample '{:s}'. Assuming '{:s}' (class {:s})".format(name, base.name, base.__class__.__name__))
super().__init__(name=name, base=base)
self.name = name
self.md = {
'exposure_time' : 1.0 ,
'measurement_ID' : 1 ,
}
self.md.update(md)
self.naming_scheme = ['name', 'extra', 'exposure_time','id']
self.naming_delimeter = '_'
# TODO
#if base is not None:
#base.addSample(self)
self.reset_clock()
def _set_axes_definitions(self):
'''Internal function which defines the axes for this stage. This is kept
as a separate function so that it can be over-ridden easily.'''
# The _axes_definitions array holds a list of dicts, each defining an axis
self._axes_definitions = [ {'name': 'x',
'motor': None,
'enabled': True,
'scaling': +1.0,
'units': None,
'hint': None,
},
{'name': 'y',
'motor': None,
'enabled': True,
'scaling': +1.0,
'units': 'mm',
'hint': None,
},
#{'name': 'z',
#'motor': None,
#'enabled': False,
#'scaling': +1.0,
#'units': 'mm',
#'hint': None,
#},
{'name': 'th',
'motor': None,
'enabled': True,
'scaling': +1.0,
'units': 'deg',
'hint': None,
},
#{'name': 'chi',
#'motor': None,
#'enabled': True,
#'scaling': +1.0,
#'units': 'deg',
#'hint': None,
#},
#{'name': 'phi',
#'motor': None,
#'enabled': True,
#'scaling': +1.0,
#'units': 'deg',
#'hint': None,
#},
]
# Metadata methods
########################################
# These involve setting or getting values associated with this sample.
def clock(self):
'''Return the current value of the "clock" variable. This provides a
way to set a clock/timer for a sample. For instance, you can call
"reset_clock" when you initiate some change to the sample. Thereafter,
the "clock" method lets you check how long it has been since that
event.'''
clock_delta = time.time() - self.clock_zero
return clock_delta
def reset_clock(self):
'''Resets the sample's internal clock/timer to zero.'''
self.clock_zero = time.time()
return self.clock()
def get_attribute(self, attribute):
'''Return the value of the requested md.'''
if attribute in self._axes:
return self._axes[attribute].get_position(verbosity=0)
if attribute=='name':
return self.name
if attribute=='clock':
return self.clock()
if attribute=='temperature':
return self.temperature(verbosity=0)
if attribute in self.md:
return self.md[attribute]
replacements = {
'id' : 'measurement_ID' ,
'ID' : 'measurement_ID' ,
'extra' : 'savename_extra' ,
}
if attribute in replacements:
return self.md[replacements[attribute]]
return None
def set_attribute(self, attribute, value):
'''Arbitrary attributes can be set and retrieved. You can use this to
store additional meta-data about the sample.
WARNING: Currently this meta-data is not saved anywhere. You can opt
to store the information in the sample filename (using "naming").
'''
self.md[attribute] = value
def set_md(self, **md):
self.md.update(md)
def get_md(self, prefix='sample_', include_marks=True, **md):
'''Returns a dictionary of the current metadata.
The 'prefix' argument is prepended to all the md keys, which allows the
metadata to be grouped with other metadata in a clear way. (Especially,
to make it explicit that this metadata came from the sample.)'''
# Update internal md
#self.md['key'] = value
md_return = self.md.copy()
md_return['name'] = self.name
if include_marks:
for label, positions in self._marks.items():
md_return['mark_'+label] = positions
# Add md that varies over time
md_return['clock'] = self.clock()
md_return['temperature'] = self.temperature(verbosity=0)
for axis_name, axis in self._axes.items():
md_return[axis_name] = axis.get_position(verbosity=0)
md_return['motor_'+axis_name] = axis.get_motor_position(verbosity=0)
md_return['savename'] = self.get_savename() # This should be over-ridden by 'measure'
# Include the user-specified metadata
md_return.update(md)
# Add an optional prefix
if prefix is not None:
md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() }
return md_return
# Naming scheme methods
########################################
# These allow the user to control how data is named.
def naming(self, scheme=['name', 'extra', 'exposure_time','id'], delimeter='_'):
'''This method allows one to define the naming convention that will be
used when storing data for this sample. The "scheme" variable is an array
that lists the various elements one wants to store in the filename.
Each entry in "scheme" is a string referring to a particular element/
value. For instance, motor names can be stored ("x", "y", etc.), the
measurement time can be stored, etc.'''
self.naming_scheme = scheme
self.naming_delimeter = delimeter
def get_naming_string(self, attribute):
# Handle special cases of formatting the text
if attribute in self._axes:
return '{:s}{:.3f}'.format(attribute, self._axes[attribute].get_position(verbosity=0))
if attribute=='clock':
return '{:.1f}s'.format(self.get_attribute(attribute))
if attribute=='exposure_time':
return '{:.2f}s'.format(self.get_attribute(attribute))
if attribute=='temperature':
return 'T{:.3f}C'.format(self.get_attribute(attribute))
if attribute=='extra':
# Note: Don't eliminate this check; it will not be properly handled
# by the generic call below. When 'extra' is None, we should
# return None, so that it gets skipped entirely.
return self.get_attribute('savename_extra')
if attribute=='spot_number':
return 'spot{:d}'.format(self.get_attribute(attribute))
# Generically: lookup the attribute and convert to string
att = self.get_attribute(attribute)
if att is None:
# If the attribute is not found, simply return the text.
# This allows the user to insert arbitrary text info into the
# naming scheme.
return attribute
else:
return str(att)
def get_savename(self, savename_extra=None):
'''Return the filename that will be used to store data for the upcoming
measurement. The method "naming" lets one control what gets stored in
the filename.'''
if savename_extra is not None:
self.set_attribute('savename_extra', savename_extra)
attribute_strings = []
for attribute in self.naming_scheme:
s = self.get_naming_string(attribute)
if s is not None:
attribute_strings.append(s)
self.set_attribute('savename_extra', None)
savename = self.naming_delimeter.join(attribute_strings)
# Avoid 'dangerous' characters
savename = savename.replace(' ', '_')
#savename = savename.replace('.', 'p')
savename = savename.replace('/', '-slash-')
return savename
# Logging methods
########################################
def comment(self, text, logbooks=None, tags=None, append_md=True, **md):
'''Add a comment related to this sample.'''
text += '\n\n[comment for sample: {} ({})].'.format(self.name, self.__class__.__name__)
if append_md:
md_current = { k : v for k, v in RE.md.items() } # Global md
md_current.update(get_beamline().get_md()) # Beamline md
# Sample md
md_current.update(self.get_md())
# Specified md
md_current.update(md)
text += '\n\n\nMetadata\n----------------------------------------'
for key, value in sorted(md_current.items()):
text += '\n{}: {}'.format(key, value)
logbook.log(text, logbooks=logbooks, tags=tags)
def log(self, text, logbooks=None, tags=None, append_md=True, **md):
if append_md:
text += '\n\n\nMetadata\n----------------------------------------'
for key, value in sorted(md.items()):
text += '\n{}: {}'.format(key, value)
logbook.log(text, logbooks=logbooks, tags=tags)
# Control methods
########################################
def setTemperature(self, temperature, verbosity=3):
return self.base_stage.setTemperature(temperature, verbosity=verbosity)
def temperature(self, verbosity=3):
return self.base_stage.temperature(verbosity=verbosity)
# Measurement methods
########################################
def get_measurement_md(self, prefix=None, **md):
#md_current = {}
md_current = { k : v for k, v in RE.md.items() } # Global md
#md_current['detector_sequence_ID'] = caget('XF:11BMB-ES{Det:SAXS}:cam1:FileNumber_RBV')
#md_current['detector_sequence_ID'] = caget('XF:11BMB-ES{}:cam1:FileNumber_RBV'.format(pilatus_Epicsname))
if get_beamline().detector[0].name is 'pilatus300':
md_current['detector_sequence_ID'] = caget('XF:11BMB-ES{Det:SAXS}:cam1:FileNumber_RBV')
elif get_beamline().detector[0].name is 'pilatus2M':
md_current['detector_sequence_ID'] = caget('XF:11BMB-ES{Det:PIL2M}:cam1:FileNumber_RBV')
md_current.update(get_beamline().get_md())
md_current.update(md)
# Add an optional prefix
if prefix is not None:
md_return = { '{:s}{:s}'.format(prefix, key) : value for key, value in md_return.items() }
return md_current
def _expose_manual(self, exposure_time=None, verbosity=3, poling_period=0.1, **md):
'''Internal function that is called to actually trigger a measurement.'''
# TODO: Improve this (switch to Bluesky methods)
# TODO: Store metadata
if 'measure_type' not in md:
md['measure_type'] = 'expose'
self.log('{} for {}.'.format(md['measure_type'], self.name), **md)
if exposure_time is not None:
# Prep detector
#caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime', exposure_time)
#caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquirePeriod', exposure_time+0.1)
#caput('XF:11BMB-ES{}:cam1:AcquireTime'.format(pilatus_Epicsname), exposure_time)
#caput('XF:11BMB-ES{}:cam1:AcquirePeriod'.format(pilatus_Epicsname), exposure_time+0.1)
if get_beamline().detector[0].name is 'pilatus300':
caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime', exposure_time)
caput('XF:11BMB-ES{Det:SAXS}:cam1:AcquirePeriod', exposure_time+0.1)
elif get_beamline().detector[0].name is 'pilatus2M':
caput('XF:11BMB-ES{Det:PIL2M}:cam1:AcquireTime', exposure_time)
caput('XF:11BMB-ES{Det:PIL2M}:cam1:AcquirePeriod', exposure_time+0.1)
get_beamline().beam.on()
# Trigger acquisition manually
caput('XF:11BMB-ES{}:cam1:Acquire'.format(pilatus_Epicsname), 1)
if verbosity>=2:
start_time = time.time()
while caget('XF:11BMB-ES{}:cam1:Acquire'.format(pilatus_Epicsname))==1 and (time.time()-start_time)<(exposure_time+20):
percentage = 100*(time.time()-start_time)/exposure_time
print( 'Exposing {:6.2f} s ({:3.0f}%) \r'.format((time.time()-start_time), percentage), end='')
time.sleep(poling_period)
else:
time.sleep(exposure_time)
if verbosity>=3 and caget('XF:11BMB-ES{}:cam1:Acquire'.format(pilatus_Epicsname))==1:
print('Warning: Detector still not done acquiring.')
get_beamline().beam.off()
def expose(self, exposure_time=None, extra=None, verbosity=3, poling_period=0.1, **md):
'''Internal function that is called to actually trigger a measurement.'''
'''TODO: **md doesnot work in RE(count). '''
if 'measure_type' not in md:
md['measure_type'] = 'expose'
#self.log('{} for {}.'.format(md['measure_type'], self.name), **md)
# Set exposure time
if exposure_time is not None:
#for detector in gs.DETS:
for detector in get_beamline().detector:
if exposure_time != caget('XF:11BMB-ES{Det:PIL2M}:cam1:AcquireTime'):
detector.setExposureTime(exposure_time, verbosity=verbosity)
#extra wait time when changing the exposure time.
time.sleep(2)
#extra wait time for adjusting pilatus2M
#time.sleep(2)
# Do acquisition
get_beamline().beam.on()
md['plan_header_override'] = md['measure_type']
start_time = time.time()
#md_current = self.get_md()
md['beam_int_bim3'] = beam.bim3.flux(verbosity=0)
md['beam_int_bim4'] = beam.bim4.flux(verbosity=0)
md['beam_int_bim5'] = beam.bim5.flux(verbosity=0)
#md.update(md_current)
#uids = RE(count(get_beamline().detector, 1), **md)
uids = RE(count(get_beamline().detector), **md)
#get_beamline().beam.off()
#print('shutter is off')
# Wait for detectors to be ready
max_exposure_time = 0
for detector in get_beamline().detector:
if detector.name is 'pilatus300':
current_exposure_time = caget('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime')
max_exposure_time = max(max_exposure_time, current_exposure_time)
elif detector.name is 'pilatus2M':
current_exposure_time = caget('XF:11BMB-ES{Det:PIL2M}:cam1:AcquireTime')
max_exposure_time = max(max_exposure_time, current_exposure_time)
elif detector.name is 'PhotonicSciences_CMS':
current_exposure_time = detector.exposure_time
max_exposure_time = max(max_exposure_time, current_exposure_time)
else:
if verbosity>=1:
print("WARNING: Didn't recognize detector '{}'.".format(detector.name))
if verbosity>=2:
status = 0
while (status==0) and (time.time()-start_time)<(max_exposure_time+20):
percentage = 100*(time.time()-start_time)/max_exposure_time
print( 'Exposing {:6.2f} s ({:3.0f}%) \r'.format((time.time()-start_time), percentage), end='')
time.sleep(poling_period)
status = 1
for detector in get_beamline().detector:
if detector.name is 'pilatus300':
if caget('XF:11BMB-ES{Det:SAXS}:cam1:Acquire')==1:
status *= 0
elif detector.name is 'pilatus2M':
if caget('XF:11BMB-ES{Det:PIL2M}:cam1:Acquire')==1:
status *= 0
elif detector.name is 'PhotonicSciences_CMS':
if not detector.detector_is_ready(verbosity=0):
status *= 0
print('')
else:
time.sleep(max_exposure_time)
if verbosity>=3 and caget('XF:11BMB-ES{Det:SAXS}:cam1:Acquire')==1:
print('Warning: Detector pilatus300 still not done acquiring.')
if verbosity>=3 and caget('XF:11BMB-ES{Det:PIL2M}:cam1:Acquire')==1:
print('Warning: Detector pilatus2M still not done acquiring.')
get_beamline().beam.off()
for detector in get_beamline().detector:
#self.handle_file(detector, extra=extra, verbosity=verbosity, **md)
self.handle_file(detector, extra=extra, verbosity=verbosity)
def handle_file(self, detector, extra=None, verbosity=3, subdirs=True, **md):
subdir = ''
if detector.name is 'pilatus300':
chars = caget('XF:11BMB-ES{Det:SAXS}:TIFF1:FullFileName_RBV')
filename = ''.join(chr(char) for char in chars)[:-1]
# Alternate method to get the last filename
#filename = '{:s}/{:s}.tiff'.format( detector.tiff.file_path.get(), detector.tiff.file_name.get() )
if verbosity>=3:
print(' Data saved to: {}'.format(filename))
if subdirs:
subdir = '/saxs/'
#if md['measure_type'] is not 'snap':
if True:
self.set_attribute('exposure_time', caget('XF:11BMB-ES{Det:SAXS}:cam1:AcquireTime'))
# Create symlink
#link_name = '{}/{}{}'.format(RE.md['experiment_alias_directory'], subdir, md['filename'])
#savename = md['filename'][:-5]
savename = self.get_savename(savename_extra=extra)
link_name = '{}/{}{}_{:04d}_saxs.tiff'.format(RE.md['experiment_alias_directory'], subdir, savename, RE.md['scan_id'])
if os.path.isfile(link_name):
i = 1
while os.path.isfile('{}.{:d}'.format(link_name,i)):
i += 1
os.rename(link_name, '{}.{:d}'.format(link_name,i))
os.symlink(filename, link_name)
if verbosity>=3:
print(' Data linked as: {}'.format(link_name))
elif detector.name is 'pilatus2M':
chars = caget('XF:11BMB-ES{Det:PIL2M}:TIFF1:FullFileName_RBV')
filename = ''.join(chr(char) for char in chars)[:-1]
# Alternate method to get the last filename
#filename = '{:s}/{:s}.tiff'.format( detector.tiff.file_path.get(), detector.tiff.file_name.get() )
if verbosity>=3:
print(' Data saved to: {}'.format(filename))
if subdirs:
subdir = '/saxs/'
#if md['measure_type'] is not 'snap':
if True:
self.set_attribute('exposure_time', caget('XF:11BMB-ES{Det:PIL2M}:cam1:AcquireTime'))
# Create symlink
#link_name = '{}/{}{}'.format(RE.md['experiment_alias_directory'], subdir, md['filename'])
#savename = md['filename'][:-5]
savename = self.get_savename(savename_extra=extra)
link_name = '{}/{}{}_{:04d}_saxs.tiff'.format(RE.md['experiment_alias_directory'], subdir, savename, RE.md['scan_id'])
#link_name = '{}/{}{}_{:04d}_saxs.cbf'.format(RE.md['experiment_alias_directory'], subdir, savename, RE.md['scan_id']-1)
if os.path.isfile(link_name):
i = 1
while os.path.isfile('{}.{:d}'.format(link_name,i)):
i += 1
os.rename(link_name, '{}.{:d}'.format(link_name,i))
os.symlink(filename, link_name)
if verbosity>=3:
print(' Data linked as: {}'.format(link_name))
elif detector.name is 'PhotonicSciences_CMS':
self.set_attribute('exposure_time', detector.exposure_time)
filename = '{:s}/{:s}.tif'.format( detector.file_path, detector.file_name )
if subdirs:
subdir = '/waxs/'
#savename = md['filename'][:-5]
savename = self.get_savename(savename_extra=extra)
savename = '{}/{}{}_{:04d}_waxs.tiff'.format(RE.md['experiment_alias_directory'], subdir, savename, RE.md['scan_id'])
shutil.copy(filename, savename)
if verbosity>=3:
print(' Data saved to: {}'.format(savename))
else:
if verbosity>=1:
print("WARNING: Can't do file handling for detector '{}'.".format(detector.name))
return
def snap(self, exposure_time=None, extra=None, measure_type='snap', verbosity=3, **md):
'''Take a quick exposure (without saving data).'''
self.measure(exposure_time=exposure_time, extra=extra, measure_type=measure_type, verbosity=verbosity, **md)
def measure(self, exposure_time=None, extra=None, measure_type='measure', verbosity=3, tiling=False, **md):
'''Measure data by triggering the area detectors.
Parameters
----------
exposure_time : float
How long to collect data
extra : string, optional
Extra information about this particular measurement (which is typically
included in the savename/filename).
tiling : string
Controls the detector tiling mode.
None : regular measurement (single detector position)
'ygaps' : try to cover the vertical gaps in the Pilatus300k
'''
if tiling is 'ygaps':
extra_current = 'pos1' if extra is None else '{}_pos1'.format(extra)
md['detector_position'] = 'lower'
self.measure_single(exposure_time=exposure_time, extra=extra_current, measure_type=measure_type, verbosity=verbosity, **md)
#movr(SAXSy, 5.16) # move detector up by 30 pixels; 30*0.172 = 5.16
SAXSy.move(SAXSy.user_readback.value + 5.16)
extra_current = 'pos2' if extra is None else '{}_pos2'.format(extra)
md['detector_position'] = 'upper'
self.measure_single(exposure_time=exposure_time, extra=extra_current, measure_type=measure_type, verbosity=verbosity, **md)
#movr(SAXSy, -5.16)
SAXSy.move(SAXSy.user_readback.value + -5.16)
#if tiling is 'big':
# TODO: Use multiple images to fill the entire detector motion range
else:
# Just do a normal measurement
self.measure_single(exposure_time=exposure_time, extra=extra, measure_type=measure_type, verbosity=verbosity, **md)
def measure_single(self, exposure_time=None, extra=None, measure_type='measure', verbosity=3, **md):
'''Measure data by triggering the area detectors.
Parameters
----------
exposure_time : float
How long to collect data
extra : string, optional
Extra information about this particular measurement (which is typically
included in the savename/filename).
'''
if exposure_time is not None:
self.set_attribute('exposure_time', exposure_time)
#else:
#exposure_time = self.get_attribute('exposure_time')
savename = self.get_savename(savename_extra=extra)
#caput('XF:11BMB-ES{Det:SAXS}:cam1:FileName', savename)
if verbosity>=2 and (get_beamline().current_mode != 'measurement'):
print("WARNING: Beamline is not in measurement mode (mode is '{}')".format(get_beamline().current_mode))
if verbosity>=1 and len(get_beamline().detector)<1:
print("ERROR: No detectors defined in cms.detector")
return
md_current = self.get_md()
md_current.update(self.get_measurement_md())
md_current['sample_savename'] = savename
md_current['measure_type'] = measure_type
#md_current['filename'] = '{:s}_{:04d}.tiff'.format(savename, md_current['detector_sequence_ID'])
md_current['filename'] = '{:s}_{:04d}.tiff'.format(savename, RE.md['scan_id'])
#md_current.update(md)
self.expose(exposure_time, extra=extra, verbosity=verbosity, **md_current)
#self.expose(exposure_time, extra=extra, verbosity=verbosity, **md)
self.md['measurement_ID'] += 1
def _test_time(self):
print(time.time())
time.time()
def _test_measure_single(self, exposure_time=None, extra=None, shutteronoff=True, measure_type='measure', verbosity=3, **md):
'''Measure data by triggering the area detectors.
Parameters
----------
exposure_time : float
How long to collect data
extra : string, optional
Extra information about this particular measurement (which is typically
included in the savename/filename).
'''
#print('1') #0s
#print(time.time())
if exposure_time is not None:
self.set_attribute('exposure_time', exposure_time)
#else:
#exposure_time = self.get_attribute('exposure_time')
savename = self.get_savename(savename_extra=extra)
#caput('XF:11BMB-ES{Det:SAXS}:cam1:FileName', savename)
if verbosity>=2 and (get_beamline().current_mode != 'measurement'):
print("WARNING: Beamline is not in measurement mode (mode is '{}')".format(get_beamline().current_mode))
if verbosity>=1 and len(get_beamline().detector)<1:
print("ERROR: No detectors defined in cms.detector")
return
#print('2') #0.0004s
#print(time.time())
md_current = self.get_md()
md_current['sample_savename'] = savename
md_current['measure_type'] = measure_type
md_current.update(self.get_measurement_md())
#md_current['filename'] = '{:s}_{:04d}.tiff'.format(savename, md_current['detector_sequence_ID'])
md_current['filename'] = '{:s}_{:04d}.tiff'.format(savename, RE.md['scan_id'])
md_current.update(md)
#print('3') #0.032s
#print(time.time())
self._test_expose(exposure_time, shutteronoff=shutteronoff, extra=extra, verbosity=verbosity, **md_current)
#print('4') #5.04s
#print(time.time())
self.md['measurement_ID'] += 1
#print('5') #5.0401
#print(time.time())
def _test_expose(self, exposure_time=None, extra=None, verbosity=3, poling_period=0.1, shutteronoff=True, **md):
'''Internal function that is called to actually trigger a measurement.'''
if 'measure_type' not in md:
md['measure_type'] = 'expose'
#self.log('{} for {}.'.format(md['measure_type'], self.name), **md)
# Set exposure time
if exposure_time is not None:
for detector in get_beamline().detector:
detector.setExposureTime(exposure_time, verbosity=verbosity)
#print('1') #5e-5
#print(self.clock())
# Do acquisition
# check shutteronoff, if
if shutteronoff == True:
get_beamline().beam.on()
else:
print('shutter is disabled')
#print('2') #3.0
#print(self.clock())
md['plan_header_override'] = md['measure_type']
start_time = time.time()
print('2') #3.0
print(self.clock())
#uids = RE(count(get_beamline().detector, 1), **md)
#uids = RE(count(get_beamline().detector), **md)
yield from (count(get_beamline().detector))
print('3') #4.3172
print(self.clock())
#get_beamline().beam.off()
#print('shutter is off')
# Wait for detectors to be ready
max_exposure_time = 0
for detector in get_beamline().detector:
if detector.name is 'pilatus300' or 'pilatus2M':
current_exposure_time = caget('XF:11BMB-ES{}:cam1:AcquireTime'.format(pilatus_Epicsname))
max_exposure_time = max(max_exposure_time, current_exposure_time)
elif detector.name is 'PhotonicSciences_CMS':
current_exposure_time = detector.exposure_time
max_exposure_time = max(max_exposure_time, current_exposure_time)
else:
if verbosity>=1:
print("WARNING: Didn't recognize detector '{}'.".format(detector.name))
print('4') #4.3193
print(self.clock())
if verbosity>=2:
status = 0
print('status1 = ', status)
while (status==0) and (time.time()-start_time)<(max_exposure_time+20):
percentage = 100*(time.time()-start_time)/max_exposure_time
print( 'Exposing {:6.2f} s ({:3.0f}%) \r'.format((time.time()-start_time), percentage), end='')
print('status2 = ', status)
time.sleep(poling_period)
status = 1
for detector in get_beamline().detector:
if detector.name is 'pilatus300' or 'pilatus2M':
print('status2.5 = ', status)
if caget('XF:11BMB-ES{}:cam1:Acquire'.format(pilatus_Epicsname))==1:
status = 0
print('status3 = ', status)
print('status3.5 = ', status)
elif detector.name is 'PhotonicSciences_CMS':
if not detector.detector_is_ready(verbosity=0):
status = 0
print('5') #3.0
print(self.clock())
print('6') #3.0
print(self.clock())
else:
time.sleep(max_exposure_time)
#print('5') #4.4193
#print(self.clock())
if verbosity>=3 and caget('XF:11BMB-ES{}:cam1:Acquire'.format(pilatus_Epicsname))==1:
print('Warning: Detector still not done acquiring.')
if shutteronoff == True:
get_beamline().beam.off()
else:
print('shutter is disabled')
#print('6') #4.9564
#print(self.clock())
for detector in get_beamline().detector:
self.handle_file(detector, extra=extra, verbosity=verbosity, **md)
#print('7') #4.9589
#print(self.clock())
def _test_measureSpots(self, num_spots=4, translation_amount=0.2, axis='y', exposure_time=None, extra=None, shutteronoff=True, measure_type='measureSpots', tiling=False, **md):
'''Measure multiple spots on the sample.'''
if 'spot_number' not in self.md:
self.md['spot_number'] = 1
start_time = time.time()
for spot_num in range(num_spots):
self._test_measure_single(exposure_time=exposure_time, extra=extra, measure_type=measure_type, shutteronoff=shutteronoff, tiling=tiling, **md)
print(spot_num+1)
print(time.time()-start_time)
getattr(self, axis+'r')(translation_amount)
self.md['spot_number'] += 1
print('{:d} of {:d} is done'.format(spot_num+1,num_spots))
print(time.time()-start_time)
def measureSpots(self, num_spots=4, translation_amount=0.2, axis='y', exposure_time=None, extra=None, measure_type='measureSpots', tiling=False, **md):
'''Measure multiple spots on the sample.'''
if 'spot_number' not in self.md:
self.md['spot_number'] = 1
for spot_num in range(num_spots):
self.measure(exposure_time=exposure_time, extra=extra, measure_type=measure_type, tiling=tiling, **md)
getattr(self, axis+'r')(translation_amount)
self.md['spot_number'] += 1
print('{:d} of {:d} is done'.format(spot_num+1,num_spots))
def measureTimeSeries(self, exposure_time=None, num_frames=10, wait_time=None, extra=None, measure_type='measureTimeSeries', verbosity=3, tiling=False, fix_name=True, **md):
if fix_name and ('clock' not in self.naming_scheme):
self.naming_scheme_hold = self.naming_scheme
self.naming_scheme = self.naming_scheme_hold.copy()
self.naming_scheme.insert(-1, 'clock')
md['measure_series_num_frames'] = num_frames
for i in range(num_frames):
if verbosity>=3:
print('Measuring frame {:d}/{:d} ({:.1f}% complete).'.format(i+1, num_frames, 100.0*i/num_frames))
md['measure_series_current_frame'] = i+1
self.measure(exposure_time=exposure_time, extra=extra, measure_type=measure_type, verbosity=verbosity, tiling=tiling, **md)
if wait_time is not None:
time.sleep(wait_time)
def measureTimeSeriesAngles(self, exposure_time=None, num_frames=10, wait_time=None, extra=None, measure_type='measureTimeSeries', verbosity=3, tiling=False, fix_name=True, **md):
if fix_name and ('clock' not in self.naming_scheme):
self.naming_scheme_hold = self.naming_scheme
self.naming_scheme = self.naming_scheme_hold.copy()
self.naming_scheme.insert(-1, 'clock')
md['measure_series_num_frames'] = num_frames
for i in range(num_frames):
if verbosity>=3:
print('Measuring frame {:d}/{:d} ({:.1f}% complete).'.format(i+1, num_frames, 100.0*i/num_frames))
md['measure_series_current_frame'] = i+1
print('Angles in measure include: {}'.format(sam.incident_angles_default))
self.measureIncidentAngles(exposure_time=exposure_time, extra=extra, **md)
if wait_time is not None:
time.sleep(wait_time)
#if (i % 2 ==0):
# self.xr(-1)
#else:
# self.xr(1)
#self.pos()
def measureTemperature(self, temperature, exposure_time=None, wait_time=None, temperature_tolerance=0.4, extra=None, measure_type='measureTemperature', verbosity=3, tiling=False, poling_period=1.0, fix_name=True, **md):
# Set new temperature
self.setTemperature(temperature, verbosity=verbosity)
# Wait until we reach the temperature
while abs(self.temperature(verbosity=0) - temperature)>temperature_tolerance:
if verbosity>=3:
print(' setpoint = {:.3f}°C, Temperature = {:.3f}°C \r'.format(caget('XF:11BM-ES{Env:01-Out:1}T-SP')-273.15, self.temperature(verbosity=0)), end='')
time.sleep(poling_period)
# Allow for additional equilibration at this temperature
if wait_time is not None:
time.sleep(wait_time)
# Measure
#if fix_name and ('temperature' not in self.naming_scheme):
# self.naming_scheme_hold = self.naming_scheme
# self.naming_scheme = self.naming_scheme_hold.copy()
# self.naming_scheme.insert(-1, 'temperature')
self.measure(exposure_time=exposure_time, extra=extra, measure_type=measure_type, verbosity=verbosity, tiling=tiling, **md)
#self.naming_scheme = self.naming_scheme_hold
def measureTemperatures(self, temperatures, exposure_time=None, wait_time=None, temperature_tolerance=0.4, extra=None, measure_type='measureTemperature', verbosity=3, tiling=False, poling_period=1.0, fix_name=True, **md):
for temperature in temperatures:
self.measureTemperature(temperature, exposure_time=exposure_time, wait_time=wait_time, temperature_tolerance=temperature_tolerance, measure_type=measure_type, verbosity=verbosity, tiling=tiling, poling_period=poling_period, fix_name=fix_name, **md)
def do(self, step=0, verbosity=3, **md):
'''Performs the "default action" for this sample. This usually means
aligning the sample, and taking data.
The 'step' argument can optionally be given to jump to a particular
step in the sequence.'''
if verbosity>=4:
print(' doing sample {}'.format(self.name))
if step<=1:
if verbosity>=5:
print(' step 1: goto origin')
self.xo() # goto origin
self.yo()
#self.gotoAlignedPosition()
#if step<=5:
#self.align()
if step<=10:
if verbosity>=5:
print(' step 10: measuring')
self.measure(**md)
# Control methods
########################################
def setTemperature(self, temperature, verbosity=3):
#if verbosity>=1:
#print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
if verbosity>=2:
print(' Changing temperature setpoint from {:.3f}°C to {:.3f}°C'.format(caget('XF:11BM-ES{Env:01-Out:1}T-SP')-273.15, temperature))
caput('XF:11BM-ES{Env:01-Out:1}T-SP', temperature+273.15)
def temperature(self, verbosity=3):
#if verbosity>=1:
#print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
current_temperature = caget('XF:11BM-ES{Env:01-Chan:A}T:C-I')
if verbosity>=3:
print(' Temperature = {:.3f}°C (setpoint = {:.3f}°C)'.format( current_temperature, caget('XF:11BM-ES{Env:01-Out:1}T-SP')-273.15 ) )
return current_temperature
class SampleTSAXS_Generic(Sample_Generic):
pass
class SampleGISAXS_Generic(Sample_Generic):
def __init__(self, name, base=None, **md):
super().__init__(name=name, base=base, **md)
self.naming_scheme = ['name', 'extra', 'th', 'exposure_time']
self.incident_angles_default = [0.08, 0.10, 0.12, 0.15, 0.20]
def measureSpots(self, num_spots=2, translation_amount=0.1, axis='x', exposure_time=None, extra=None, measure_type='measureSpots', **md):
super().measureSpots(num_spots=num_spots, translation_amount=translation_amount, axis=axis, exposure_time=exposure_time, extra=extra, measure_type=measure_type, **md)
def measureIncidentAngle(self, angle, exposure_time=None, extra=None, **md):
self.thabs(angle)
self.measure(exposure_time=exposure_time, extra=extra, **md)
def measureIncidentAngles(self, angles=None, exposure_time=None, extra=None, **md):
if angles is None:
angles = self.incident_angles_default
for angle in angles:
self.measureIncidentAngle(angle, exposure_time=exposure_time, extra=extra, **md)
def _alignOld(self, step=0):
'''Align the sample with respect to the beam. GISAXS alignment involves
vertical translation to the beam center, and rocking theta to get the
sample plane parralel to the beam.
The 'step' argument can optionally be given to jump to a particular
step in the sequence.'''
# TODO: Deprecate and delete
if step<=0:
# TODO: Check what mode we are in, change if necessary...
# get_beamline().modeAlignment()
beam.on()
# TODO: Improve implementation
if step<=2:
#fit_scan(smy, 2.6, 35, fit='HM')
fit_scan(smy, 2.6, 35, fit='sigmoid_r')
if step<=4:
#fit_scan(smy, 0.6, 17, fit='HM')
fit_scan(smy, 0.6, 17, fit='sigmoid_r')
fit_scan(sth, 1.2, 21, fit='max')
#if step<=6:
# fit_scan(smy, 0.3, 17, fit='sigmoid_r')
# fit_scan(sth, 1.2, 21, fit='COM')
if step<=8:
fit_scan(smy, 0.2, 17, fit='sigmoid_r')
fit_scan(sth, 0.8, 21, fit='gauss')
if step<=9:
#self._testing_refl_pos()
#movr(sth,.1)
#fit_scan(sth, 0.2, 41, fit='gauss')
#fit_scan(smy, 0.2, 21, fit='gauss')
#movr(sth,-.1)
beam.off()
def align(self, step=0, reflection_angle=0.08, verbosity=3):
'''Align the sample with respect to the beam. GISAXS alignment involves
vertical translation to the beam center, and rocking theta to get the
sample plane parralel to the beam. Finally, the angle is re-optimized
in reflection mode.
The 'step' argument can optionally be given to jump to a particular
step in the sequence.'''
if verbosity>=4:
print(' Aligning {}'.format(self.name))
if step<=0:
# Prepare for alignment
if RE.state!='idle':
RE.abort()
if get_beamline().current_mode!='alignment':
if verbosity>=2:
print("WARNING: Beamline is not in alignment mode (mode is '{}')".format(get_beamline().current_mode))
#get_beamline().modeAlignment()
get_beamline().setDirectBeamROI()
beam.on()
if step<=2:
if verbosity>=4:
print(' align: searching')
# Estimate full-beam intensity
value = None
if True:
# You can eliminate this, in which case RE.md['beam_intensity_expected'] is used by default
self.yr(-2)
#detector = gs.DETS[0]
detector = get_beamline().detector[0]
value_name = get_beamline().TABLE_COLS[0]
RE(count([detector]))
value = detector.read()[value_name]['value']
self.yr(+2)
if 'beam_intensity_expected' in RE.md and value<RE.md['beam_intensity_expected']*0.75:
print('WARNING: Direct beam intensity ({}) lower than it should be ({})'.format(value, RE.md['beam_intensity_expected']))
# Find the step-edge
self.ysearch(step_size=0.5, min_step=0.005, intensity=value, target=0.5, verbosity=verbosity, polarity=-1)
# Find the peak
self.thsearch(step_size=0.4, min_step=0.01, target='max', verbosity=verbosity)
if step<=4:
if verbosity>=4:
print(' align: fitting')
fit_scan(smy, 1.2, 21, fit='HMi')
#time.sleep(2)
fit_scan(sth, 1.5, 21, fit='max')
#time.sleep(2)
#if step<=5:
# #fit_scan(smy, 0.6, 17, fit='sigmoid_r')
# fit_edge(smy, 0.6, 17)
# fit_scan(sth, 1.2, 21, fit='max')
if step<=8:
#fit_scan(smy, 0.3, 21, fit='sigmoid_r')
fit_edge(smy, 0.6, 21)
#time.sleep(2)
#fit_edge(smy, 0.4, 21)
fit_scan(sth, 0.8, 21, fit='COM')
#time.sleep(2)
self.setOrigin(['y', 'th'])
if step<=9 and reflection_angle is not None:
# Final alignment using reflected beam
if verbosity>=4:
print(' align: reflected beam')
get_beamline().setReflectedBeamROI(total_angle=reflection_angle*2.0)
#get_beamline().setReflectedBeamROI(total_angle=reflection_angle*2.0, size=[12,2])
self.thabs(reflection_angle)
result = fit_scan(sth, 0.2, 41, fit='max')
#result = fit_scan(sth, 0.2, 81, fit='max') #it's useful for alignment of SmarAct stage
sth_target = result.values['x_max']-reflection_angle
if result.values['y_max']>10:
th_target = self._axes['th'].motor_to_cur(sth_target)
self.thsetOrigin(th_target)
#fit_scan(smy, 0.2, 21, fit='max')
self.setOrigin(['y'])
if step<=10:
self.thabs(0.0)
beam.off()
def alignQuick(self, align_step=8, reflection_angle=0.08, verbosity=3):
get_beamline().modeAlignment()
#self.yo()
self.tho()
beam.on()
self.align(step=align_step, reflection_angle=reflection_angle, verbosity=verbosity)
def _testing_level(self, step=0,pos_x_left=-5, pos_x_right=5):
#TODO: Move this code. (This should be a property of the GIBar object.)
#level GIBar by checking bar height at pos_left and pos_right
print('checking the level of GIBar')
#if step<=1:
# cms.modeAlignment()
#sam.xabs(pos_x_left)
#fit_scan(smy, .6, 17, fit='sigmooid_r') #it's better not to move smy after scan but only the center position
#pos_y_left=smy.user_readback.value
#
#sam.xabs(pos_x_right)
#fit_scan(smy, .6, 17, fit='sigmooid_r')
#pos_y_right=smy.user_readback.value
#offset_schi=(pos_y_right-pos_y_left)/(pos_x_right-pos_x_left)
#movr(sch, offset_schi)
#double-check the chi offset
#sam.xabs(pos_x_left)
#fit_scan(smy, .6, 17, fit='sigmooid_r') #it's better not to move smy after scan but only the center position
#pos_y_left=smy.user_readback.value
#sam.xabs(pos_x_right)
#fit_scan(smy, .6, 17, fit='sigmooid_r')
#pos_y_right=smy.user_readback.value
#offset_schi=(pos_y_right-pos_y_left)/(pos_x_right-pos_x_left)
#if offset_schi<=0.1:
#print('schi offset is aligned successfully!')
#else:
#print('schi offset is WRONG. Please redo the level command')
pass
def do(self, step=0, align_step=0, **md):
if step<=1:
get_beamline().modeAlignment()
if step<=2:
self.xo() # goto origin
if step<=4:
self.yo()
self.tho()
if step<=5:
self.align(step=align_step)
#self.setOrigin(['y','th']) # This is done within align
#if step<=7:
#self.xr(0.2)
if step<=8:
get_beamline().modeMeasurement()
if step<=10:
#detselect([pilatus300, psccd])
#detselect(psccd)
#detselect(pilatus300)
detselect(pilatus2M)
for detector in get_beamline().detector:
detector.setExposureTime(self.md['exposure_time'])
self.measureIncidentAngles(self.incident_angles_default, **md)
self.thabs(0.0)
class SampleCDSAXS_Generic(Sample_Generic):
def __init__(self, name, base=None, **md):
super().__init__(name=name, base=base, **md)
self.naming_scheme = ['name', 'extra', 'phi', 'exposure_time']
self.rot_angles_default = np.arange(-45, +45+1, +1)
#self.rot_angles_default = np.linspace(-45, +45, num=90, endpoint=True)
def _set_axes_definitions(self):
'''Internal function which defines the axes for this stage. This is kept
as a separate function so that it can be over-ridden easily.'''
super()._set_axes_definitions()
self._axes_definitions.append( {'name': 'phi',
'motor': srot,
'enabled': True,
'scaling': +1.0,
'units': 'deg',
'hint': None,
} )
def measureAngle(self, angle, exposure_time=None, extra=None, measure_type='measure', **md):
self.phiabs(angle)
self.measure(exposure_time=exposure_time, extra=extra, measure_type=measure_type, **md)
def measureAngles(self, angles=None, exposure_time=None, extra=None, measure_type='measureAngles', **md):
if angles is None:
angles = self.rot_angles_default
for angle in angles:
self.measureAngle(angle, exposure_time=exposure_time, extra=extra, measure_type=measure_type, **md)
class Stage(CoordinateSystem):
pass
class SampleStage(Stage):
def __init__(self, name='SampleStage', base=None, **kwargs):
super().__init__(name=name, base=base, **kwargs)
def _set_axes_definitions(self):
'''Internal function which defines the axes for this stage. This is kept
as a separate function so that it can be over-ridden easily.'''
# The _axes_definitions array holds a list of dicts, each defining an axis
self._axes_definitions = [ {'name': 'x',
'motor': smx,
'enabled': True,
'scaling': +1.0,
'units': 'mm',
'hint': 'positive moves stage left/outboard (beam moves right on sample)',
},
{'name': 'y',
'motor': smy,
'enabled': True,
'scaling': +1.0,
'units': 'mm',
'hint': 'positive moves stage up (beam moves down on sample)',
},
{'name': 'th',
'motor': sth,
'enabled': True,
'scaling': +1.0,
'units': 'deg',
'hint': 'positive tilts clockwise (positive incident angle)',
},
]
class Holder(Stage):
'''The Holder() classes are used to define bars/stages that hold one or more
samples. This class can thus help to keep track of coordinate conversions,
to store the positions of multiple samples, and to automate the measurement
of multiple samples.'''
# Core methods
########################################
def __init__(self, name='Holder', base=None, **kwargs):
if base is None:
base = get_default_stage()
super().__init__(name=name, base=base, **kwargs)
self._samples = {}
def _set_axes_definitions(self):
'''Internal function which defines the axes for this stage. This is kept
as a separate function so that it can be over-ridden easily.'''
# The _axes_definitions array holds a list of dicts, each defining an axis
self._axes_definitions = [ {'name': 'x',
'motor': None,
'enabled': True,
'scaling': +1.0,
'units': 'mm',
'hint': 'positive moves stage left/outboard (beam moves right on sample)',
},
{'name': 'y',
'motor': None,
'enabled': True,
'scaling': +1.0,
'units': 'mm',
'hint': 'positive moves stage up (beam moves down on sample)',
},
{'name': 'th',
'motor': None,
'enabled': True,
'scaling': +1.0,
'units': 'deg',
'hint': 'positive tilts clockwise (positive incident angle)',
},
]
# Sample management
########################################
def addSample(self, sample, sample_number=None):
'''Add a sample to this holder/bar.'''
if sample_number is None:
if len(self._samples)==0:
sample_number = 1
else:
ki = [ int(key) for key in self._samples.keys() ]
sample_number = np.max(ki) + 1
if sample_number in self._samples.keys():
print('Warning: Sample number {} is already defined on holder "{:s}". Use "replaceSample" if you are sure you want to eliminate the existing sample from the holder.'.format(sample_number, self.name) )
else:
self._samples[sample_number] = sample
self._samples[sample_number] = sample
sample.set_base_stage(self)
sample.md['holder_sample_number'] = sample_number
def removeSample(self, sample_number):
'''Remove a particular sample from this holder/bar.'''
del self._samples[sample_number]
def removeSamplesAll(self):
self._samples = {}
def replaceSample(self, sample, sample_number):
'''Replace a given sample on this holder/bar with a different sample.'''
self.removeSample(sample_number)
self.addSample(sample, sample_number)
def getSample(self, sample_number, verbosity=3):
'''Return the requested sample object from this holder/bar.
One can provide an integer, in which case the corresponding sample
(from the holder's inventory) is returned. If a string is provided,
the closest-matching sample (by name) is returned.'''
if type(sample_number) is int:
if sample_number not in self._samples:
if verbosity>=1:
print('Error: Sample {} not defined.'.format(sample_number))
return None
sample_match = self._samples[sample_number]
if verbosity>=3:
print('{}: {:s}'.format(sample_number, sample_match.name))
return sample_match
elif type(sample_number) is str:
# First search for an exact name match
matches = 0
sample_match = None
sample_i_match = None
for sample_i, sample in sorted(self._samples.items()):
if sample.name==sample_number:
matches += 1
if sample_match is None:
sample_match = sample
sample_i_match = sample_i
if matches==1:
if verbosity>=3:
print('{}: {:s}'.format(sample_i_match, sample_match.name))
return sample_match
elif matches>1:
if verbosity>=2:
print('{:d} exact matches for "{:s}", returning sample {}: {:s}'.format(matches, sample_number, sample_i_match, sample_match.name))
return sample_match
# Try to find a 'start of name' match
for sample_i, sample in sorted(self._samples.items()):
if sample.name.startswith(sample_number):
matches += 1
if sample_match is None:
sample_match = sample
sample_i_match = sample_i
if matches==1:
if verbosity>=3:
print('Beginning-name match: {}: {:s}'.format(sample_i_match, sample_match.name))
return sample_match
elif matches>1:
if verbosity>=2:
print('{:d} beginning-name matches for "{:s}", returning sample {}: {:s}'.format(matches, sample_number, sample_i_match, sample_match.name))
return sample_match
# Try to find a substring match
for sample_i, sample in sorted(self._samples.items()):
if sample_number in sample.name:
matches += 1
if sample_match is None:
sample_match = sample
sample_i_match = sample_i
if matches==1:
if verbosity>=3:
print('Substring match: {}: {:s}'.format(sample_i_match, sample_match.name))
return sample_match
elif matches>1:
if verbosity>=2:
print('{:d} substring matches for "{:s}", returning sample {}: {:s}'.format(matches, sample_number, sample_i_match, sample_match.name))
return sample_match
if verbosity>=1:
print('No sample has a name matching "{:s}"'.format(sample_number))
return None
else:
print('Error: Sample designation "{}" not understood.'.format(sample_number))
return None
def getSamples(self, range=None, verbosity=3):
'''Get the list of samples associated with this holder.
If the optional range argument is provided (2-tuple), then only sample
numbers within that range (inclusive) are run. If range is instead a
string, then all samples with names that match are returned.'''
samples = []
if range is None:
for sample_number in sorted(self._samples):
samples.append(self._samples[sample_number])
elif type(range) is list:
start, stop = range
for sample_number in sorted(self._samples):
if sample_number>=start and sample_number<=stop:
samples.append(self._samples[sample_number])
elif type(range) is str:
for sample_number, sample in sorted(self._samples.items()):
if range in sample.name:
samples.append(sample)
elif type(range) is int:
samples.append(self._samples[range])
else:
if verbosity>=1:
print('Range argument "{}" not understood.'.format(range))
return samples
def listSamples(self):
'''Print a list of the current samples associated with this holder/
bar.'''
for sample_number, sample in sorted(self._samples.items()):
print( '{}: {:s}'.format(sample_number, sample.name) )
def gotoSample(self, sample_number):
sample = self.getSample(sample_number, verbosity=0)
sample.gotoAlignedPosition()
return sample
# Control methods
########################################
def setTemperature(self, temperature, verbosity=3):
#if verbosity>=1:
#print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
if verbosity>=2:
print(' Changing temperature setpoint from {:.3f}°C to {:.3f}°C'.format(caget('XF:11BM-ES{Env:01-Out:1}T-SP')-273.15, temperature))
caput('XF:11BM-ES{Env:01-Out:1}T-SP', temperature+273.15)
def temperature(self, verbosity=3):
#if verbosity>=1:
#print('Temperature functions not implemented in {}'.format(self.__class__.__name__))
current_temperature = caget('XF:11BM-ES{Env:01-Chan:A}T:C-I')
if verbosity>=3:
print(' Temperature = {:.3f}°C (setpoint = {:.3f}°C)'.format( current_temperature, caget('XF:11BM-ES{Env:01-Out:1}T-SP')-273.15 ) )
return current_temperature
# Action (measurement) methods
########################################
def doSamples(self, range=None, verbosity=3, **md):
'''Activate the default action (typically measurement) for all the samples.
If the optional range argument is provided (2-tuple), then only sample
numbers within that range (inclusive) are run. If range is instead a
string, then all samples with names that match are returned.'''
for sample in self.getSamples(range=range):
if verbosity>=3:
print('Doing sample {}...'.format(sample.name))
sample.do(verbosity=verbosity, **md)
def doTemperature(self, temperature, wait_time=None, temperature_tolerance=0.4, range=None, verbosity=3, poling_period=2.0, **md):
# Set new temperature
self.setTemperature(temperature, verbosity=verbosity)
# Wait until we reach the temperature
while abs(self.temperature(verbosity=0) - temperature)>temperature_tolerance:
if verbosity>=3:
print(' setpoint = {:.3f}°C, Temperature = {:.3f}°C \r'.format(caget('XF:11BM-ES{Env:01-Out:1}T-SP')-273.15, self.temperature(verbosity=0)), end='')
time.sleep(poling_period)
# Allow for additional equilibration at this temperature
if wait_time is not None:
time.sleep(wait_time)
self.doSamples(range=range, verbosity=verbosity, **md)
def doTemperatures(self, temperatures, wait_time=None, temperature_tolerance=0.4, range=None, verbosity=3, **md):
for temperature in temperatures:
self.doTemperature(temperature, wait_time=wait_time, temperature_tolerance=temperature_tolerance, range=range, verbosity=verbosity, **md)
class PositionalHolder(Holder):
'''This class is a sample holder that is one-dimensional. E.g. a bar with a
set of samples lined up, or a holder with a set number of slots for holding
samples. This class thus helps to associate each sample with its position
on the bar.'''
# Core methods
########################################
def __init__(self, name='PositionalHolder', base=None, **kwargs):
super().__init__(name=name, base=base, **kwargs)
self._positional_axis = 'x'
self.GaragePosition=[]
# Sample management
########################################
def slot(self, sample_number):
'''Moves to the selected slot in the holder.'''
getattr(self, self._positional_axis+'abs')( self.get_slot_position(sample_number) )
def get_slot_position(self, slot):
'''Return the motor position for the requested slot number.'''
# This method should be over-ridden in sub-classes, so as to properly
# implement the positioning appropriate for that holder.
position = 0.0 + slot*1.0
return position
def addSampleSlot(self, sample, slot, detector_opt='SAXS'):
'''Adds a sample to the specified "slot" (defined/numbered sample
holding spot on this holder).'''
self.addSample(sample, sample_number=slot)
sample.setOrigin( [self._positional_axis], [self.get_slot_position(slot)] )
sample.detector=detector_opt
def addSampleSlotPosition(self, sample, slot, position, detector_opt='SAXS'):
'''Adds a sample to the specified "slot" (defined/numbered sample
holding spot on this holder).'''
self.addSample(sample, sample_number=slot)
sample.setOrigin( [self._positional_axis], [position] )
sample.detector=detector_opt
def listSamplesPositions(self):
'''Print a list of the current samples associated with this holder/
bar.'''
for sample_number, sample in self._samples.items():
#pos = getattr(sample, self._positional_axis+'pos')(verbosity=0)
pos = sample.origin(verbosity=0)[self._positional_axis]
print( '%s: %s (%s = %.3f)' % (str(sample_number), sample.name, self._positional_axis, pos) )
def listSamplesDetails(self):
'''Print a list of the current samples associated with this holder/
bar.'''
for sample_number, sample in self._samples.items():
#pos = getattr(sample, self._positional_axis+'pos')(verbosity=0)
pos = sample.origin(verbosity=0)[self._positional_axis]
print( '%s: %s (%s = %.3f) %s' % (str(sample_number), sample.name, self._positional_axis, pos, sample.detector) )
def addGaragePosition(self, shelf_num, spot_num):
'''the position in garage'''
if shelf_num not in range(1, 5) or spot_num not in range(1, 4):
print('Out of the range in Garage (4 x 3)')
self.GaragePosition=[shelf_num, spot_num]
class GIBar(PositionalHolder):
'''This class is a sample bar for grazing-incidence (GI) experiments.'''
# Core methods
########################################
def __init__(self, name='GIBar', base=None, **kwargs):
super().__init__(name=name, base=base, **kwargs)
self._positional_axis = 'x'
# Set the x and y origin to be the center of slot 8
self.xsetOrigin(-71.89405)
self.ysetOrigin(10.37925)
self.mark('right edge', x=+108.2)
self.mark('left edge', x=0)
self.mark('center', x=54.1, y=0)
def addSampleSlotPosition(self, sample, slot, position, detector_opt='SAXS', account_substrate=True):
'''Adds a sample to the specified "slot" (defined/numbered sample
holding spot on this holder).'''
super().addSampleSlotPosition(sample, slot, position)
# Adjust y-origin to account for substrate thickness
if account_substrate and 'substrate_thickness' in sample.md:
sample.ysetOrigin( -1.0*sample.md['substrate_thickness'] )
sample.detector=detector_opt
class CapillaryHolder(PositionalHolder):
'''This class is a sample holder that has 15 slots for capillaries.'''
# Core methods
########################################
def __init__(self, name='CapillaryHolder', base=None, **kwargs):
super().__init__(name=name, base=base, **kwargs)
self._positional_axis = 'x'
self.x_spacing = 6.342 # 3.5 inches / 14 spaces
# slot 1; smx = +26.60
# slot 8; smx = -17.80
# slot 15; smx = -61.94
# Set the x and y origin to be the center of slot 8
self.xsetOrigin(-17.49410+0.35)
self.ysetOrigin(-2.36985)
self.mark('right edge', x=+54.4)
self.mark('left edge', x=-54.4)
self.mark('bottom edge', y=-12.71)
self.mark('center', x=0, y=0)
def get_slot_position(self, slot):
'''Return the motor position for the requested slot number.'''
return +1*self.x_spacing*(slot-8)
class CapillaryHolderHeated(CapillaryHolder):
def update_sample_names(self):
for sample in self.getSamples():
if 'temperature' not in sample.naming_scheme:
sample.naming_scheme.insert(-1, 'temperature')
def doHeatCool(self, heat_temps, cool_temps, exposure_time=None, stabilization_time=120, temp_tolerance=0.5, step=1):
if step<=1:
for temperature in heat_temps:
try:
self.setTemperature(temperature)
while self.temperature(verbosity=0) < temperature-temp_tolerance:
time.sleep(5)
time.sleep(stabilization_time)
for sample in self.getSamples():
sample.gotoOrigin()
sample.xr(-0.05)
sample.measure(exposure_time)
except HTTPError:
pass
if step<=5:
for temperature in heat_temps:
try:
self.setTemperature(temperature)
self.setTemperature(temperature)
while self.temperature(verbosity=0) > temperature+temp_tolerance:
time.sleep(5)
time.sleep(stabilization_time)
for sample in self.getSamples():
sample.gotoOrigin()
sample.xr(0.1)
sample.measure(exposure_time)
except HTTPError:
pass
stg = SampleStage()
def get_default_stage():
return stg
if False:
# For testing:
# %run -i /opt/ipython_profiles/profile_collection/startup/94-sample.py
sam = SampleGISAXS_Generic('testing_of_code')
sam.mark('here')
#sam.mark('XY_field', 'x', 'y')
#sam.mark('specified', x=1, th=0.1)
#sam.naming(['name', 'extra', 'clock', 'th', 'exposure_time', 'id'])
#sam.thsetOrigin(0.5)
#sam.marks()
hol = CapillaryHolder(base=stg)
hol.addSampleSlot( SampleGISAXS_Generic('test_sample_01'), 1.0 )
hol.addSampleSlot( SampleGISAXS_Generic('test_sample_02'), 3.0 )
hol.addSampleSlot( SampleGISAXS_Generic('test_sample_03'), 5.0 )
sam = hol.getSample(1)
|
yugangzhang/GitTest
|
CMS_Profile/94-sample.py
|
Python
|
bsd-3-clause
| 105,895 | 0.016292 |
#!/usr/bin/python
# Python 2/3 compatibility boilerplate
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
# begin our implementation
from raffle import *
print(Fore.RED + 'Starting raffle.py.....')
run(['john', 'mary', 'rodrigo', 'jane', 'julie', 'michelle', 'goose', 'dan'])
|
conradstorz/raffle
|
run_raffle.py
|
Python
|
apache-2.0
| 364 | 0.013736 |
# -*- coding: UTF-8 -*-
from datetime import date
import re
import pytest
from pyopenmensa.feed import LazyBuilder
@pytest.fixture
def canteen():
return LazyBuilder()
def test_date_converting(canteen):
day = date(2013, 3, 7)
assert canteen.dayCount() == 0
canteen.setDayClosed('2013-03-07')
assert canteen.dayCount() == 1
canteen.setDayClosed(day)
assert canteen.dayCount() == 1
canteen.setDayClosed('07.03.2013')
assert canteen.dayCount() == 1
def test_has_meals_for(canteen):
day = date(2013, 3, 7)
assert canteen.hasMealsFor(day) is False
canteen._days[day] = {'Hausgericht': ('Gulash', [], {})}
assert canteen.hasMealsFor(day) is True
canteen.setDayClosed(day)
assert canteen.hasMealsFor(day) is False
def test_add_meal(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Gulasch')
assert canteen.hasMealsFor(day)
def test_to_long_meal_name(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Y'*251)
canteen.hasMealsFor(day)
def test_caseinsensitive_notes(canteen):
day = date(2013, 3, 7)
canteen.legendKeyFunc = lambda v: v.lower()
canteen.setLegendData(legend={'f': 'Note'})
canteen.addMeal(day, 'Test', 'Essen(F)')
assert canteen._days[day]['Test'][0] == ('Essen', ['Note'], {})
def test_notes_regex(canteen):
day = date(2013, 3, 7)
canteen.extra_regex = re.compile('_([0-9]{1,3})_(?:: +)?', re.UNICODE)
canteen.setLegendData(legend={'2': 'Found Note'})
canteen.addMeal(day, 'Test', '_2_: Essen _a_, _2,2_, (2)')
assert canteen._days[day]['Test'][0] == ('Essen _a_, _2,2_, (2)', ['Found Note'], {})
|
mswart/pyopenmensa
|
tests/feed/test_lazy_canteen.py
|
Python
|
lgpl-3.0
| 1,685 | 0.000593 |
__recurse__ = False
__toc__ = [r'projexui.resources.rc.pyqt4_projexui_apps_rc',
r'projexui.resources.rc.pyqt4_projexui_default_rc',
r'projexui.resources.rc.pyqt4_projexui_styles_rc',
r'projexui.resources.rc.pyside_projexui_apps_rc',
r'projexui.resources.rc.pyside_projexui_default_rc',
r'projexui.resources.rc.pyside_projexui_styles_rc']
|
bitesofcode/projexui
|
projexui/resources/rc/__plugins__.py
|
Python
|
lgpl-3.0
| 343 | 0.017493 |
a = float(input())
b = float(input())
print('MEDIA = {:.5f}'.format((a * 3.5 + b * 7.5) / 11.0))
|
deniscostadsc/playground
|
solutions/beecrowd/1005/1005.py
|
Python
|
mit
| 98 | 0 |
# coding: utf-8
# pylint: disable = invalid-name, C0111
import json
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
early_stopping_rounds=5)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Dump model to JSON...')
# dump model to json (and save to file)
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
print('Calculate feature importances...')
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
# print('Feature importances:', list(gbm.feature_importance("gain")))
|
cbecker/LightGBM
|
examples/python-guide/simple_example.py
|
Python
|
mit
| 1,762 | 0 |
# -*- coding: utf-8 -*-
import copy
from ruamel.yaml import YAML
from six import iteritems
_required = ['server']
class Config(object):
def __init__(self, configFile):
self.configFile = configFile
self._configData = {}
self.yaml = YAML()
self._inBaseConfig = []
def loadConfig(self):
configData = self._readConfig(self.configFile)
self._validate(configData)
self._configData = configData
def _readConfig(self, fileName):
try:
with open(fileName, mode='r') as config:
configData = self.yaml.load(config)
if not configData:
configData = {}
# if this is the base server config, store what keys we loaded
if fileName == self.configFile:
self._inBaseConfig = list(configData.keys())
except Exception as e:
raise ConfigError(fileName, e)
if 'import' not in configData:
return configData
for fname in configData['import']:
includeConfig = self._readConfig('configs/{}.yaml'.format(fname))
for key, val in iteritems(includeConfig):
# not present in base config, just assign it
if key not in configData:
configData[key] = val
continue
# skip non-collection types that are already set
if isinstance(configData[key], (str, int)):
continue
if isinstance(val, str):
raise ConfigError(fname, 'The included config file tried '
'to merge a non-string with a '
'string')
try:
iter(configData[key])
iter(val)
except TypeError:
# not a collection, so just don't merge them
pass
else:
try:
# merge with + operator
configData[key] += val
except TypeError:
# dicts can't merge with +
try:
for subKey, subVal in iteritems(val):
if subKey not in configData[key]:
configData[key][subKey] = subVal
except (AttributeError, TypeError):
# if either of these, they weren't both dicts.
raise ConfigError(fname, 'The variable {!r} could '
'not be successfully '
'merged'.format(key))
return configData
def writeConfig(self):
# filter the configData to only those keys
# that were present in the base server config,
# or have been modified at runtime
configData = copy.deepcopy(self._configData)
to_delete = set(configData.keys()).difference(self._inBaseConfig)
for key in to_delete:
del configData[key]
# write the filtered configData
try:
with open(self.configFile, mode='w') as config:
self.yaml.dump(configData, config)
except Exception as e:
raise ConfigError(self.configFile, e)
def getWithDefault(self, key, default=None):
if key in self._configData:
return self._configData[key]
return default
def _validate(self, configData):
for key in _required:
if key not in configData:
raise ConfigError(self.configFile, 'Required item {!r} was not found in the config.'.format(key))
def __len__(self):
return len(self._configData)
def __iter__(self):
return iter(self._configData)
def __getitem__(self, key):
return self._configData[key]
def __setitem__(self, key, value):
# mark this key to be saved in the server config
if key not in self._inBaseConfig:
self._inBaseConfig.append(key)
self._configData[key] = value
def __contains__(self, key):
return key in self._configData
class ConfigError(Exception):
def __init__(self, configFile, message):
self.configFile = configFile
self.message = message
def __str__(self):
return 'An error occurred while reading config file {}: {}'.format(self.configFile,
self.message)
|
MatthewCox/PyMoronBot
|
pymoronbot/config.py
|
Python
|
mit
| 4,668 | 0.000643 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import core
from ... import default_main_program
from ... import default_startup_program
from ... import framework
from ... import layers
from ... import program_guard
from ... import unique_name
from . import fp16_utils
from .fp16_utils import rewrite_program
from .fp16_utils import cast_model_to_fp16
from .fp16_utils import cast_parameters_to_fp16
from .fp16_utils import update_role_var_grad
from .fp16_lists import AutoMixedPrecisionLists
from .amp_nn import check_finite_and_unscale
from .amp_nn import update_loss_scaling
import types
import warnings
import paddle
__all__ = ["decorate"]
class OptimizerWithMixedPrecision(object):
"""
Optimizer with mixed-precision (MP) training. This is a wrapper of a common
optimizer, plus the support of mixed-precision pre-training. The object
of this class almost has the same behavior as the common optimizer, with the
methods `minimize()`, `backward()`, `apply_gradients()` implemented.
Additionally, it enables the MP training automatically, i.e, the creation
and maintenance of master parameters, scaling of loss, etc.
Args:
optimizer (Optimizer): A common Optimizer object.
amp_lists (CustomOpLists): An CustomOpLists object.
init_loss_scaling (float): The initial loss scaling factor.
use_dynamic_loss_scaling (bool): Whether to use dynamic loss scaling.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
use_pure_fp16(bool): Whether to use the pure fp16 training. Default False.
use_fp16_guard(bool): Whether to use `fp16_guard` when constructing the program.
Default None, which means that its value is equal to `use_pure_fp16`.
"""
def __init__(self, optimizer, amp_lists, init_loss_scaling,
use_dynamic_loss_scaling, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio, use_pure_fp16,
use_fp16_guard):
self._optimizer = optimizer
self._amp_lists = amp_lists
self._param_grads = None
self._train_program = None
self._is_distributed = False
self._scaled_loss = None
self._loss_scaling = None
self._init_loss_scaling = init_loss_scaling
self._use_dynamic_loss_scaling = use_dynamic_loss_scaling
self._learning_rate = optimizer._learning_rate
self._learning_rate_map = optimizer._learning_rate_map
self._use_pure_fp16 = use_pure_fp16
self._use_fp16_guard = use_fp16_guard
self._to_fp16_var_names = None
if self._use_dynamic_loss_scaling:
self._incr_every_n_steps = incr_every_n_steps
self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._num_good_steps = None
self._num_bad_steps = None
def _set_distributed(self, flag):
# if distributed, all cards will communication with each other,
# overlap communication and computation by split the
# check_finite_and_unscale op.
self._is_distributed = flag
def get_loss_scaling(self):
"""Return the real-time loss scaling factor.
"""
assert self._loss_scaling is not None, 'Please call minimize() before calling get_loss_scaling().'
return self._loss_scaling
def get_scaled_loss(self):
"""Return the scaled loss.
It's useful when you feed customed loss into executor.
"""
return self._scaled_loss
def _supports_check_nan_inf(self):
return getattr(self._optimizer, "_supports_check_nan_inf", False)
def _init_amp_var(self):
self._loss_scaling = layers.create_global_var(
name=unique_name.generate("loss_scaling"),
shape=[1],
value=self._init_loss_scaling,
dtype='float32',
persistable=True)
if self._use_dynamic_loss_scaling:
self._num_good_steps = layers.create_global_var(
name=unique_name.generate("num_good_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
self._num_bad_steps = layers.create_global_var(
name=unique_name.generate("num_bad_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
# Ensure the data type of learning rate vars is float32 (same as the
# master parameter dtype)
if isinstance(self._optimizer._learning_rate, float):
self._optimizer._learning_rate_map[default_main_program()] = \
layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._optimizer._learning_rate),
dtype='float32',
persistable=True)
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
"""
Backward propagation or auto differentiation for gradients' computation.
Args:
loss (Variable): The loss Variable to minimize.
startup_program (Program|None): The startup Program for initializing
parameters in `parameter_list`.
parameter_list (list|None): A list of Variables to update.
no_grad_set (set|None): A set of Variables should be ignored.
callbacks (list|None): A list of callable objects to run when appending
backward operator for one parameter.
Returns:
A list of (param, grad), which is a tuple of a parameter and its
gradient respectively, and the scaled loss.
"""
train_program = loss.block.program
self._train_program = train_program
# NOTE(zhiqiu): _float_status is only used for NPU.
if core.is_compiled_with_npu():
float_status = paddle.static.data(
name="float_status", shape=[8], dtype='float32')
self._train_program.global_block().append_op(
type="alloc_float_status",
outputs={"FloatStatus": float_status}, )
self._train_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, )
self._float_status = float_status
else:
self._float_status = None
with program_guard(self._train_program, startup_program):
self._init_amp_var()
if self._use_pure_fp16:
self._to_fp16_var_names = cast_model_to_fp16(
self._train_program, self._amp_lists, self._use_fp16_guard)
else:
rewrite_program(self._train_program, self._amp_lists)
if loss.dtype != core.VarDesc.VarType.FP32:
loss = loss.astype('float32')
# When not using dynamic loss scaling and the init loss scaling value is equal to 1.0,
# the model can be optimized.
if self._use_dynamic_loss_scaling or self._init_loss_scaling != 1.0:
self._scaled_loss = loss * self._loss_scaling
else:
self._scaled_loss = loss
params_grads = self._optimizer.backward(
self._scaled_loss, startup_program, parameter_list, no_grad_set,
callbacks)
if self._supports_check_nan_inf():
self._add_cast_ops_to_startup_program(startup_program)
return params_grads
def _add_cast_ops_to_startup_program(self, startup_program):
names = list(self._to_fp16_var_names) if self._to_fp16_var_names else []
names.sort()
startup_program = default_startup_program(
) if startup_program is None else startup_program
block = startup_program.global_block()
param_names = [p.name for p in block.all_parameters()]
for name in names:
if name not in param_names:
continue
tmp = block.create_var(dtype=core.VarDesc.VarType.FP32)
block.append_op(
type='assign', inputs={'X': [name]}, outputs={'Out': [tmp]})
block.append_op(
type='cast',
inputs={'X': [tmp]},
outputs={'Out': [name]},
attrs={
'in_dtype': core.VarDesc.VarType.FP32,
'out_dtype': core.VarDesc.VarType.FP16,
})
self._to_fp16_var_names = None
def amp_init(self,
place,
scope=None,
test_program=None,
use_fp16_test=False):
"""
Init the amp training, such as cast fp32 parameters to fp16 type.
Args:
place(CUDAPlace): place is used to initialize
fp16 parameters with fp32 values.
scope(Scope): The scope is used to find fp32 parameters.
test_program(Program): The program is used for testing.
use_fp16_test(bool): Whether to use fp16 testing.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.enable_static()
def run_example_code():
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
# 1) Use fp16_guard to control the range of fp16 kernels used.
with paddle.static.amp.fp16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden)
# 2) Create the optimizer and set `multi_precision` to True.
# Setting `multi_precision` to True can avoid the poor accuracy
# or the slow convergence in a way.
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
# 3) These ops in `custom_black_list` will keep in the float32 computation type.
amp_list = paddle.static.amp.CustomOpLists(
custom_black_list=['pool2d'])
# 4) The entry of Paddle AMP.
# Enable pure fp16 training by setting `use_pure_fp16` to True.
optimizer = paddle.static.amp.decorate(
optimizer,
amp_list,
init_loss_scaling=128.0,
use_dynamic_loss_scaling=True,
use_pure_fp16=True)
# If you don't use the default_startup_program(), you sholud pass
# your defined `startup_program` into `minimize`.
optimizer.minimize(loss)
exe.run(paddle.static.default_startup_program())
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
# If you want to perform the testing process, you should pass `test_program` into `amp_init`.
optimizer.amp_init(place, scope=paddle.static.global_scope())
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
run_example_code()
"""
assert self._train_program is not None, \
"Please call the minimize method first."
if self._use_pure_fp16:
cast_parameters_to_fp16(place, self._train_program, scope,
self._to_fp16_var_names)
if test_program is not None:
if self._use_pure_fp16:
cast_model_to_fp16(test_program, self._amp_lists,
self._use_fp16_guard)
elif use_fp16_test:
rewrite_program(test_program, self._amp_lists)
def apply_gradients(self, params_grads):
"""
Check scaled gradients to determine whether to update loss scaling and update
parameters by their scaled gradients.
Args:
params_grads (list): A list of params and scaled grads.
Returns:
A list of optimize operators.
"""
# Change the op_role_var attr for some ops, so that gradients
# transferred across GPUs can be FP16.
update_role_var_grad(self._train_program, params_grads)
# When not using dynamic loss scaling and the init loss scaling value is equal to 1.0,
# the model can be optimized.
if not self._use_dynamic_loss_scaling and self._init_loss_scaling == 1.0:
return self._optimizer.apply_gradients(params_grads)
if self._supports_check_nan_inf():
self._optimizer._set_scale(self._loss_scaling)
optimize_ops = self._optimizer.apply_gradients(params_grads)
found_inf = self._optimizer._found_inf
self._add_dynamic_loss_scaling(params_grads, found_inf)
return optimize_ops
found_inf = self._check_finite_and_unscale(params_grads)
if self._use_dynamic_loss_scaling:
self._add_dynamic_loss_scaling(params_grads, found_inf)
# Pass found_inf to adam, to skip update for not only param, but also momentum and beta_pow
# With fleet, optimizers are nested and the real optimizer set by user is the inner most one.
real_optimizer = self._optimizer
while hasattr(real_optimizer, "inner_opt"):
real_optimizer = real_optimizer.inner_opt
if isinstance(real_optimizer, (paddle.fluid.optimizer.Adam,
paddle.optimizer.AdamW)):
# NOTE(zhiqiu): Since found_inf needs to be on cpu in adam op, we
# copy it in advance to avoid multiple time copies.
with self._train_program._optimized_guard([]):
found_inf = paddle.tensor.creation._memcpy(found_inf,
paddle.CPUPlace())
real_optimizer._set_auxiliary_var('found_inf', found_inf)
elif hasattr(real_optimizer, "_set_auxiliary_var"):
real_optimizer._set_auxiliary_var('found_inf', found_inf)
optimize_ops = self._optimizer.apply_gradients(params_grads)
return optimize_ops
def _split_grads(self, params_grads):
grads = [g for _, g in params_grads]
fp32_grads = [g for g in grads if g.dtype == core.VarDesc.VarType.FP32]
fp16_grads = [g for g in grads if g.dtype == core.VarDesc.VarType.FP16]
assert len(fp32_grads) + len(fp16_grads) == len(grads), \
"Data types of all grads must be either fp16 or fp32."
return grads, fp32_grads, fp16_grads
def _check_finite_and_unscale(self, params_grads):
grads, fp32_grads, fp16_grads = self._split_grads(params_grads)
found_infs = []
if self._is_distributed:
# if distributed, split check_finite_and_unscale to overlap
# unscale with communication
if core.is_compiled_with_npu():
with self._train_program._optimized_guard(grads):
_, found_inf = check_finite_and_unscale(
grads,
self._loss_scaling,
name="find_infinite_scale",
float_status=self._float_status)
found_infs.append(found_inf)
else:
for p, g in params_grads:
with self._train_program._optimized_guard([p, g]):
_, found_inf = check_finite_and_unscale(
[g, ],
self._loss_scaling,
name="find_infinite_scale",
float_status=self._float_status)
found_infs.append(found_inf)
elif self._use_pure_fp16:
if fp32_grads:
with self._train_program._optimized_guard(fp32_grads):
_, fp32_found_inf = check_finite_and_unscale(
fp32_grads,
self._loss_scaling,
name="find_infinite_scale_fp32",
float_status=self._float_status)
found_infs.append(fp32_found_inf)
if fp16_grads:
with self._train_program._optimized_guard(fp16_grads):
_, fp16_found_inf = check_finite_and_unscale(
fp16_grads,
self._loss_scaling,
name="find_infinite_scale_fp16",
float_status=self._float_status)
found_infs.append(fp16_found_inf)
else:
with self._train_program._optimized_guard(grads):
_, found_inf = check_finite_and_unscale(
grads,
self._loss_scaling,
name="find_infinite_scale",
float_status=self._float_status)
if self._is_distributed or self._use_pure_fp16:
with self._train_program._optimized_guard([]):
all_infs = layers.concat(found_infs)
found_inf = layers.reduce_any(all_infs)
return found_inf
def _add_dynamic_loss_scaling(self, params_grads, found_inf):
if self._supports_check_nan_inf():
with self._train_program._optimized_guard([]):
update_loss_scaling(
[],
found_inf,
self._loss_scaling,
self._num_good_steps,
self._num_bad_steps,
self._incr_every_n_steps,
self._decr_every_n_nan_or_inf,
self._incr_ratio,
self._decr_ratio,
stop_update=False,
name="update_loss_scaling")
return
grads, fp32_grads, fp16_grads = self._split_grads(params_grads)
if self._use_pure_fp16:
stop_update = False
with self._train_program._optimized_guard([]):
if fp32_grads:
update_loss_scaling(
fp32_grads,
found_inf,
self._loss_scaling,
self._num_good_steps,
self._num_bad_steps,
self._incr_every_n_steps,
self._decr_every_n_nan_or_inf,
self._incr_ratio,
self._decr_ratio,
stop_update=stop_update,
name="update_loss_scaling_fp32")
stop_update = True
if fp16_grads:
update_loss_scaling(
fp16_grads,
found_inf,
self._loss_scaling,
self._num_good_steps,
self._num_bad_steps,
self._incr_every_n_steps,
self._decr_every_n_nan_or_inf,
self._incr_ratio,
self._decr_ratio,
stop_update=stop_update,
name="update_loss_scaling_fp16")
else:
with self._train_program._optimized_guard([]):
update_loss_scaling(
grads,
found_inf,
self._loss_scaling,
self._num_good_steps,
self._num_bad_steps,
self._incr_every_n_steps,
self._decr_every_n_nan_or_inf,
self._incr_ratio,
self._decr_ratio,
name="update_loss_scaling")
def apply_optimize(self, loss, startup_program, params_grads):
program = loss.block.program
with program_guard(program, startup_program):
optimize_ops = self.apply_gradients(params_grads)
return optimize_ops
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""
Perform optimization by minimizing the given loss.
Args:
loss (Variable): The loss Variable.
startup_program (Program): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
Returns:
The scaled loss by scaling factor, the list of optimize ops, and a
list of scaled parameters and gradients.
"""
opt_dict = self._optimizer.__class__.__dict__
if 'minimize' in opt_dict and isinstance(opt_dict['minimize'],
types.FunctionType):
warnings.warn(
"The decorated optimizer has its own `minimize` method, but it will not be executed."
)
scaled_params_grads = self.backward(
loss,
startup_program=startup_program,
parameter_list=parameter_list,
no_grad_set=no_grad_set)
optimize_ops = self.apply_optimize(loss, startup_program,
scaled_params_grads)
return optimize_ops, scaled_params_grads
def decorate(optimizer,
amp_lists=None,
init_loss_scaling=2**15,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
use_dynamic_loss_scaling=True,
use_pure_fp16=False,
use_fp16_guard=None):
"""
Decorate the given optimizer to adapt to the mixed-precision training.
Args:
optimizer(Optimizer): A common Optimizer.
amp_lists (CustomOpLists): An CustomOpLists object.
init_loss_scaling(float): The initial loss scaling factor.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling.
use_pure_fp16(bool): Whether to use the pure fp16 training. Default False.
use_fp16_guard(bool): Whether to use `fp16_guard` when constructing the program.
Default None, which means that its value equals to `use_pure_fp16`.
Returns:
An optimizer acting like a normal one but with mixed-precision training
enabled.
Examples 1:
.. code-block:: python
# black&white list based strategy example
import paddle
import paddle.static as static
paddle.enable_static()
data = static.data(name='X', shape=[None, 1], dtype='float32')
hidden = static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer = paddle.optimizer.Adam(learning_rate=0.001)
mp_optimizer = static.amp.decorate(
optimizer=optimizer, init_loss_scaling=8.0)
ops, param_grads = mp_optimizer.minimize(loss)
scaled_loss = mp_optimizer.get_scaled_loss()
Examples 2:
.. code-block:: python
# pure fp16 training example
import numpy as np
import paddle
import paddle.nn.functional as F
def run_example_code():
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
# 1) Use fp16_guard to control the range of fp16 kernels used.
with paddle.static.amp.fp16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden)
# 2) Create the optimizer and set `multi_precision` to True.
# Setting `multi_precision` to True can avoid the poor accuracy
# or the slow convergence in a way.
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
# 3) These ops in `custom_black_list` will keep in the float32 computation type.
amp_list = paddle.static.amp.CustomOpLists(
custom_black_list=['pool2d'])
# 4) The entry of Paddle AMP.
# Enable pure fp16 training by setting `use_pure_fp16` to True.
optimizer = paddle.static.amp.decorate(
optimizer,
amp_list,
init_loss_scaling=128.0,
use_dynamic_loss_scaling=True,
use_pure_fp16=True)
# If you don't use the default_startup_program(), you sholud pass
# your defined `startup_program` into `minimize`.
optimizer.minimize(loss)
exe.run(paddle.static.default_startup_program())
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
# If you want to perform the testing process, you should pass `test_program` into `amp_init`.
optimizer.amp_init(place, scope=paddle.static.global_scope())
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
run_example_code()
"""
if amp_lists is None:
amp_lists = AutoMixedPrecisionLists()
if use_fp16_guard is None:
use_fp16_guard = use_pure_fp16
mp_optimizer = OptimizerWithMixedPrecision(
optimizer, amp_lists, init_loss_scaling, use_dynamic_loss_scaling,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
use_pure_fp16, use_fp16_guard)
return mp_optimizer
|
PaddlePaddle/Paddle
|
python/paddle/fluid/contrib/mixed_precision/decorator.py
|
Python
|
apache-2.0
| 28,628 | 0.002655 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "resumeparser.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
jaffyadhav/django-resume-parser
|
manage.py
|
Python
|
unlicense
| 810 | 0 |
__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
|
JuliaLang/JuliaBox
|
engine/src/juliabox/plugins/dns_gcd/impl_gcd.py
|
Python
|
mit
| 2,592 | 0.002315 |
from sympy.core.evalf import PrecisionExhausted, complex_accuracy
from sympy import pi, I, Symbol, Add, Rational, exp, sqrt, sin, cos, \
fibonacci, Integral, oo, E, atan, log, integrate, floor, ceiling, \
factorial, binomial, Sum, zeta, Catalan, Pow, GoldenRatio, sympify, \
sstr, Function, Eq, Mul, Pow, Derivative
from sympy.mpmath.libmp.libmpf import from_float
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n')
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_helpers():
assert complex_accuracy((from_float(2.0),None,35,None)) == 35
assert complex_accuracy((from_float(2.0),from_float(10.0),35,100)) == 37
assert complex_accuracy((from_float(2.0),from_float(1000.0),35,100)) == 43
assert complex_accuracy((from_float(2.0),from_float(10.0),100,35)) == 35
assert complex_accuracy((from_float(2.0),from_float(1000.0),100,35)) == 35
def test_evalf_basic():
assert NS('pi',15) == '3.14159265358979'
assert NS('2/3',10) == '0.6666666667'
assert NS('355/113-pi',6) == '2.66764e-7'
assert NS('16*atan(1/5)-4*atan(1/239)', 15) == '3.14159265358979'
def test_cancellation():
assert NS(Add(pi,Rational(1,10**1000),-pi,evaluate=False),15,maxn=1200) == '1.00000000000000e-1000'
def test_evalf_powers():
assert NS('pi**(10**20)',10) == '1.339148777e+49714987269413385435'
assert NS(pi**(10**100),10) == ('4.946362032e+4971498726941338543512682882'
'9089887365167832438044244613405349992494711208'
'95526746555473864642912223')
assert NS('2**(1/10**50)',15) == '1.00000000000000'
assert NS('2**(1/10**50)-1',15) == '6.93147180559945e-51'
# Evaluation of Rump's ill-conditioned polynomial
def test_evalf_rump():
a = 1335*y**6/4+x**2*(11*x**2*y**2-y**6-121*y**4-2)+11*y**8/2+x/(2*y)
assert NS(a, 15, subs={x:77617, y:33096}) == '-0.827396059946821'
def test_evalf_complex():
assert NS('2*sqrt(pi)*I',10) == '3.544907702*I'
assert NS('3+3*I',15) == '3.00000000000000 + 3.00000000000000*I'
assert NS('E+pi*I',15) == '2.71828182845905 + 3.14159265358979*I'
assert NS('pi * (3+4*I)',15) == '9.42477796076938 + 12.5663706143592*I'
assert NS('I*(2+I)',15) == '-1.00000000000000 + 2.00000000000000*I'
#assert NS('(pi+E*I)*(E+pi*I)',15) in ('.0e-15 + 17.25866050002*I', '.0e-17 + 17.25866050002*I', '-.0e-17 + 17.25866050002*I')
assert NS('(pi+E*I)*(E+pi*I)',15,chop=True) == '17.2586605000200*I'
def test_evalf_complex_powers():
assert NS('(E+pi*I)**100000000000000000') == \
'-3.58896782867793e+61850354284995199 + 4.58581754997159e+61850354284995199*I'
# XXX: rewrite if a+a*I simplification introduced in sympy
#assert NS('(pi + pi*I)**2') in ('.0e-15 + 19.7392088021787*I', '.0e-16 + 19.7392088021787*I')
assert NS('(pi + pi*I)**2', chop=True) == '19.7392088021787*I'
assert NS('(pi + 1/10**8 + pi*I)**2') == '6.2831853e-8 + 19.7392088650106*I'
assert NS('(pi + 1/10**12 + pi*I)**2') == '6.283e-12 + 19.7392088021850*I'
#assert NS('(pi + pi*I)**4') == '-389.63636413601 + .0e-14*I'
assert NS('(pi + pi*I)**4', chop=True) == '-389.636364136010'
assert NS('(pi + 1/10**8 + pi*I)**4') == '-389.636366616512 + 2.4805021e-6*I'
assert NS('(pi + 1/10**12 + pi*I)**4') == '-389.636364136258 + 2.481e-10*I'
assert NS('(10000*pi + 10000*pi*I)**4', chop=True) == '-3.89636364136010e+18'
def test_evalf_exponentiation():
assert NS(sqrt(-pi)) == '1.77245385090552*I'
assert NS(Pow(pi*I, Rational(1,2), evaluate=False)) == '1.25331413731550 + 1.25331413731550*I'
assert NS(pi**I) == '0.413292116101594 + 0.910598499212615*I'
assert NS(pi**(E+I/3)) == '20.8438653991931 + 8.36343473930031*I'
assert NS((pi+I/3)**(E+I/3)) == '17.2442906093590 + 13.6839376767037*I'
assert NS(exp(pi)) == '23.1406926327793'
assert NS(exp(pi+E*I)) == '-21.0981542849657 + 9.50576358282422*I'
assert NS(pi**pi) == '36.4621596072079'
assert NS((-pi)**pi) == '-32.9138577418939 - 15.6897116534332*I'
assert NS((-pi)**(-pi)) == '-0.0247567717232697 + 0.0118013091280262*I'
# An example from Smith, "Multiple Precision Complex Arithmetic and Functions"
def test_evalf_complex_cancellation():
A = Rational('63287/100000')
B = Rational('52498/100000')
C = Rational('69301/100000')
D = Rational('83542/100000')
F = Rational('2231321613/2500000000')
# XXX: the number of returned mantissa digits in the real part could
# change with the implementation. What matters is that the returned digits are
# correct.
assert NS((A+B*I)*(C+D*I),6) == '6.44862e-6 + 0.892529*I'
assert NS((A+B*I)*(C+D*I),10) == '6.447099821e-6 + 0.8925286452*I'
assert NS((A+B*I)*(C+D*I) - F*I, 5) in ('6.4471e-6 - .0e-15*I', '6.4471e-6 + .0e-15*I')
def test_evalf_logs():
assert NS("log(3+pi*I)", 15) == '1.46877619736226 + 0.808448792630022*I'
assert NS("log(pi*I)", 15) == '1.14472988584940 + 1.57079632679490*I'
def test_evalf_trig():
assert NS('sin(1)',15) == '0.841470984807897'
assert NS('cos(1)',15) == '0.540302305868140'
assert NS('sin(10**-6)',15) == '9.99999999999833e-7'
assert NS('cos(10**-6)',15) == '0.999999999999500'
assert NS('sin(E*10**100)',15) == '0.409160531722613'
# Some input near roots
assert NS(sin(exp(pi*sqrt(163))*pi), 15) == '-2.35596641936785e-12'
assert NS(sin(pi*10**100 + Rational(7,10**5), evaluate=False), 15, maxn=120) == \
'6.99999999428333e-5'
assert NS(sin(Rational(7,10**5), evaluate=False), 15) == \
'6.99999999428333e-5'
# Check detection of various false identities
def test_evalf_near_integers():
# Binet's formula
f = lambda n: ((1+sqrt(5))**n)/(2**n * sqrt(5))
assert NS(f(5000) - fibonacci(5000), 10, maxn=1500) == '5.156009964e-1046'
# Some near-integer identities from
# http://mathworld.wolfram.com/AlmostInteger.html
assert NS('sin(2017*2**(1/5))',15) == '-1.00000000000000'
assert NS('sin(2017*2**(1/5))',20) == '-0.99999999999999997857'
assert NS('1+sin(2017*2**(1/5))',15) == '2.14322287389390e-17'
assert NS('45 - 613*E/37 + 35/991', 15) == '6.03764498766326e-11'
def test_evalf_ramanujan():
assert NS(exp(pi*sqrt(163)) - 640320**3 - 744, 10) == '-7.499274028e-13'
# A related identity
A = 262537412640768744*exp(-pi*sqrt(163))
B = 196884*exp(-2*pi*sqrt(163))
C = 103378831900730205293632*exp(-3*pi*sqrt(163))
assert NS(1-A-B+C,10) == '1.613679005e-59'
# Input that for various reasons have failed at some point
def test_evalf_bugs():
assert NS(sin(1)+exp(-10**10),10) == NS(sin(1),10)
assert NS(exp(10**10)+sin(1),10) == NS(exp(10**10),10)
assert NS('log(1+1/10**50)',20) == '1.0000000000000000000e-50'
assert NS('log(10**100,10)',10) == '100.0000000'
assert NS('log(2)',10) == '0.6931471806'
assert NS('(sin(x)-x)/x**3', 15, subs={x:'1/10**50'}) == '-0.166666666666667'
assert NS(sin(1)+Rational(1,10**100)*I,15) == '0.841470984807897 + 1.00000000000000e-100*I'
assert x.evalf() == x
assert NS((1+I)**2*I,6) == '-2.00000 + 2.32831e-10*I'
d={n: (-1)**Rational(6,7), y: (-1)**Rational(4,7), x: (-1)**Rational(2,7)}
assert NS((x*(1+y*(1 + n))).subs(d).evalf(),6) == '0.346011 + 0.433884*I'
assert NS(((-I-sqrt(2)*I)**2).evalf()) == '-5.82842712474619'
assert NS((1+I)**2*I,15) == '-2.00000000000000 + 2.16840434497101e-19*I'
#1659 (1/2):
assert NS(pi.evalf(69) - pi) == '-4.43863937855894e-71'
#1659 (2/2): With the bug present, this still only fails if the
# terms are in the order given here. This is not generally the case,
# because the order depends on the hashes of the terms.
assert NS(20 - 5008329267844*n**25 - 477638700*n**37 - 19*n,
subs={n:.01}) == '19.8100000000000'
def test_evalf_integer_parts():
a = floor(log(8)/log(2) - exp(-1000), evaluate=False)
b = floor(log(8)/log(2), evaluate=False)
raises(PrecisionExhausted, "a.evalf()")
assert a.evalf(chop=True) == 3
assert a.evalf(maxn=500) == 2
raises(PrecisionExhausted, "b.evalf()")
raises(PrecisionExhausted, "b.evalf(maxn=500)")
assert b.evalf(chop=True) == 3
assert int(floor(factorial(50)/E,evaluate=False).evalf()) == \
11188719610782480504630258070757734324011354208865721592720336800L
assert int(ceiling(factorial(50)/E,evaluate=False).evalf()) == \
11188719610782480504630258070757734324011354208865721592720336801L
assert int(floor((GoldenRatio**999 / sqrt(5) + Rational(1,2))).evalf(1000)) == fibonacci(999)
assert int(floor((GoldenRatio**1000 / sqrt(5) + Rational(1,2))).evalf(1000)) == fibonacci(1000)
def test_evalf_trig_zero_detection():
a = sin(160*pi, evaluate=False)
t = a.evalf(maxn=100)
assert abs(t) < 1e-100
assert t._prec < 2
assert a.evalf(chop=True) == 0
raises(PrecisionExhausted, "a.evalf(strict=True)")
def test_evalf_divergent_series():
n = Symbol('n', integer=True)
raises(ValueError, 'Sum(1/n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(n/(n**2+1), (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-1)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-1)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(n**2, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(2**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-2)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((2*n+3)/(3*n**2+4), (n,0, oo)).evalf()')
raises(ValueError, 'Sum((0.5*n**3)/(n**4+1),(n,0,oo)).evalf()')
def test_evalf_py_methods():
assert abs(float(pi+1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi+1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi+E*I) - (3.1415926535897931+2.7182818284590451j)) < 1e-10
raises(ValueError, "float(pi+x)")
raises(ValueError, "complex(pi+x)")
def test_evalf_power_subs_bugs():
assert (x**2).evalf(subs={x:0}) == 0
assert sqrt(x).evalf(subs={x:0}) == 0
assert (x**Rational(2,3)).evalf(subs={x:0}) == 0
assert (x**x).evalf(subs={x:0}) == 1
assert (3**x).evalf(subs={x:0}) == 1
assert exp(x).evalf(subs={x:0}) == 1
assert ((2+I)**x).evalf(subs={x:0}) == 1
assert (0**x).evalf(subs={x:0}) == 1
def test_evalf_arguments():
raises(TypeError, 'pi.evalf(method="garbage")')
def test_implemented_function_evalf():
from sympy.utilities.lambdify import implemented_function
f = Function('f')
x = Symbol('x')
f = implemented_function(f, lambda x: x + 1)
assert str(f(x)) == "f(x)"
assert str(f(2)) == "f(2)"
assert f(2).evalf() == 3
assert f(x).evalf() == f(x)
del f._imp_ # XXX: due to caching _imp_ would influence all other tests
def test_evaluate_false():
for no in [[], 0, False, None]:
assert Add(3, 2, evaluate=no).is_Add
assert Mul(3, 2, evaluate=no).is_Mul
assert Pow(3, 2, evaluate=no).is_Pow
assert Pow(y, 2, evaluate=True) - Pow(y, 2, evaluate=True) == 0
def test_evalf_relational():
assert Eq(x/5, y/10).evalf() == Eq(0.2*x, 0.1*y)
def test_issue_2387():
assert not cos(sqrt(0.5 + I)).n().is_Function
def test_issue_2387_bug():
from sympy import I, Expr
assert abs(Expr._from_mpmath(I._to_mpmath(15), 15) - I) < 1.0e-15
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/core/tests/test_evalf.py
|
Python
|
agpl-3.0
| 11,270 | 0.011713 |
#!/usr/bin/python3
"""
Script to check recently uploaded files.
This script checks if a file description is present and if there are other
problems in the image's description.
This script will have to be configured for each language. Please submit
translations as addition to the Pywikibot framework.
Everything that needs customisation is indicated by comments.
This script understands the following command-line arguments:
-limit The number of images to check (default: 80)
-commons The bot will check if an image on Commons has the same name
and if true it reports the image.
-duplicates[:#] Checking if the image has duplicates (if arg, set how many
rollback wait before reporting the image in the report
instead of tag the image) default: 1 rollback.
-duplicatesreport Report the duplicates in a log *AND* put the template in
the images.
-maxusernotify Maximum notifications added to a user talk page in a single
check, to avoid email spamming.
-sendemail Send an email after tagging.
-break To break the bot after the first check (default: recursive)
-sleep[:#] Time in seconds between repeat runs (default: 30)
-wait[:#] Wait x second before check the images (default: 0)
-skip[:#] The bot skip the first [:#] images (default: 0)
-start[:#] Use allimages() as generator
(it starts already from File:[:#])
-cat[:#] Use a category as generator
-regex[:#] Use regex, must be used with -url or -page
-page[:#] Define the name of the wikipage where are the images
-url[:#] Define the url where are the images
-nologerror If given, this option will disable the error that is risen
when the log is full.
Instructions for the real-time settings.
For every new block you have to add:
<------- ------->
In this way the bot can understand where the block starts in order to take the
right parameter.
* Name= Set the name of the block
* Find= search this text in the image's description
* Findonly= search for exactly this text in the image's description
* Summary= That's the summary that the bot will use when it will notify the
problem.
* Head= That's the incipit that the bot will use for the message.
* Text= This is the template that the bot will use when it will report the
image's problem.
Todo
----
* Clean the code, some passages are pretty difficult to understand.
* Add the "catch the language" function for commons.
* Fix and reorganise the new documentation
* Add a report for the image tagged.
"""
#
# (C) Pywikibot team, 2006-2022
#
# Distributed under the terms of the MIT license.
#
import collections
import re
import time
from typing import Generator
import pywikibot
from pywikibot import config, i18n
from pywikibot import pagegenerators as pg
from pywikibot.backports import List, Tuple
from pywikibot.bot import suggest_help
from pywikibot.exceptions import (
EditConflictError,
Error,
IsRedirectPageError,
LockedPageError,
NoPageError,
NotEmailableError,
PageRelatedError,
PageSaveRelatedError,
ServerError,
TranslationError,
)
from pywikibot.family import Family
from pywikibot.site import Namespace
###############################################################################
# <--------------------------- Change only below! --------------------------->#
###############################################################################
# NOTE: in the messages used by the bot if you put __botnick__ in the text, it
# will automatically replaced with the bot's nickname.
# That's what you want that will be added. (i.e. the {{no source}} with the
# right day/month/year )
N_TXT = {
'commons': '{{subst:nld}}',
'meta': '{{No license}}',
'test': '{{No license}}',
'ar': '{{subst:ملم}}',
'arz': '{{subst:ملم}}',
'de': '{{Dateiüberprüfung}}',
'en': '{{subst:nld}}',
'fa': '{{subst:حق تکثیر تصویر نامعلوم}}',
'fr': '{{subst:lid}}',
'ga': '{{subst:Ceadúnas de dhíth}}',
'hr': '{{Bez licence}}',
'hu': '{{nincslicenc|~~~~~}}',
'it': '{{subst:unverdata}}',
'ja': '{{subst:Nld}}',
'ko': '{{subst:nld}}',
'ru': '{{subst:nld}}',
'sd': '{{subst:اجازت نامعلوم}}',
'sr': '{{subst:датотека без лиценце}}',
'ta': '{{subst:nld}}',
'ur': '{{subst:حقوق نسخہ تصویر نامعلوم}}',
'zh': '{{subst:No license/auto}}',
}
# Text that the bot will try to see if there's already or not. If there's a
# {{ I'll use a regex to make a better check.
# This will work so:
# '{{no license' --> '\{\{(?:template:)?no[ _]license ?(?:\||\n|\}|/) ?' (case
# insensitive).
# If there's not a {{ it will work as usual (if x in Text)
TXT_FIND = {
'commons': ['{{no license', '{{no license/en',
'{{nld', '{{no permission', '{{no permission since'],
'meta': ['{{no license', '{{nolicense', '{{nld'],
'test': ['{{no license'],
'ar': ['{{لت', '{{لا ترخيص'],
'arz': ['{{nld', '{{no license'],
'de': ['{{DÜP', '{{Düp', '{{Dateiüberprüfung'],
'en': ['{{nld', '{{no license'],
'fa': ['{{حق تکثیر تصویر نامعلوم۲'],
'ga': ['{{Ceadúnas de dhíth', '{{Ceadúnas de dhíth'],
'hr': ['{{bez licence'],
'hu': ['{{nincsforrás', '{{nincslicenc'],
'it': ['{{unverdata', '{{unverified'],
'ja': ['{{no source', '{{unknown',
'{{non free', '<!--削除についての議論が終了するまで'],
'ko': ['{{출처 없음', '{{라이선스 없음', '{{Unknown'],
'ru': ['{{no license'],
'sd': ['{{ناحوالا', '{{ااجازت نامعلوم', '{{Di-no'],
'sr': ['{{датотека без лиценце', '{{датотека без извора'],
'ta': ['{{no source', '{{nld', '{{no license'],
'ur': ['{{ناحوالہ', '{{اجازہ نامعلوم', '{{Di-no'],
'zh': ['{{no source', '{{unknown', '{{No license'],
}
# When the bot find that the usertalk is empty is not pretty to put only the
# no source without the welcome, isn't it?
EMPTY = {
'commons': '{{subst:welcome}}\n~~~~\n',
'meta': '{{subst:Welcome}}\n~~~~\n',
'ar': '{{subst:أهلا ومرحبا}}\n~~~~\n',
'arz': '{{subst:اهلا و سهلا}}\n~~~~\n',
'de': '{{subst:willkommen}} ~~~~',
'en': '{{subst:welcome}}\n~~~~\n',
'fa': '{{subst:خوشامدید|%s}}',
'fr': '{{Bienvenue nouveau\n~~~~\n',
'ga': '{{subst:Fáilte}} - ~~~~\n',
'hr': '{{subst:dd}}--~~~~\n',
'hu': '{{subst:Üdvözlet|~~~~}}\n',
'it': '<!-- inizio template di benvenuto -->\n{{subst:Benvebot}}\n~~~~\n'
'<!-- fine template di benvenuto -->',
'ja': '{{subst:Welcome/intro}}\n{{subst:welcome|--~~~~}}\n',
'ko': '{{환영}}--~~~~\n',
'ru': '{{subst:Приветствие}}\n~~~~\n',
'sd': '{{ڀليڪار}}\n~~~~\n',
'sr': '{{dd}}--~~~~\n',
'ta': '{{welcome}}\n~~~~\n',
'ur': '{{خوش آمدید}}\n~~~~\n',
'zh': '{{subst:welcome|sign=~~~~}}',
}
# if the file has an unknown extension it will be tagged with this template.
# In reality, there aren't unknown extension, they are only not allowed...
DELETE_IMMEDIATELY = {
'commons': '{{speedy|The file has .%s as extension. '
'Is it ok? Please check.}}',
'meta': '{{Delete|The file has .%s as extension.}}',
'ar': '{{شطب|الملف له .%s كامتداد.}}',
'arz': '{{مسح|الملف له .%s كامتداد.}}',
'en': '{{db-meta|The file has .%s as extension.}}',
'fa': '{{حذف سریع|تصویر %s اضافی است.}}',
'ga': '{{scrios|Tá iarmhír .%s ar an comhad seo.}}',
'hu': '{{azonnali|A fájlnak .%s a kiterjesztése}}',
'it': '{{cancella subito|motivo=Il file ha come estensione ".%s"}}',
'ja': '{{db|知らないファイルフォーマット %s}}',
'ko': '{{delete|잘못된 파일 형식 (.%s)}}',
'ru': '{{db-badimage}}',
'sr': '{{speedy|Ова датотека садржи екстензију %s. '
'Молим вас да проверите да ли је у складу са правилима.}}',
'ta': '{{delete|'
'இந்தக் கோப்பு .%s என்றக் கோப்பு நீட்சியைக் கொண்டுள்ளது.}}',
'ur': '{{سریع حذف شدگی|اس ملف میں .%s بطور توسیع موجود ہے۔ }}',
'zh': '{{delete|未知檔案格式%s}}',
}
# That's the text that the bot will add if it doesn't find the license.
# Note: every __botnick__ will be repleaced with your bot's nickname
# (feel free not to use if you don't need it)
NOTHING_NOTIFICATION = {
'commons': "\n{{subst:User:Filnik/untagged|File:%s}}\n\n''This message "
"was '''added automatically by ~~~''', if you need "
'some help about it, please read the text above again and '
'follow the links in it, if you still need help ask at the '
'[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]] '
"'''[[Commons:Help desk|->]][[Commons:Help desk]]''' in any "
"language you like to use.'' --~~~~",
'meta': '{{subst:No license notice|File:%s}}',
'ar': '{{subst:مصدر الملف|File:%s}} --~~~~',
'arz': '{{subst:file source|File:%s}} --~~~~',
'en': '{{subst:file source|File:%s}} --~~~~',
'fa': '{{subst:اخطار نگاره|%s}}',
'ga': '{{subst:Foinse na híomhá|File:%s}} --~~~~',
'hu': '{{subst:adjforrást|Kép:%s}}\n Ezt az üzenetet ~~~ automatikusan '
'helyezte el a vitalapodon, kérdéseddel fordulj a gazdájához, vagy '
'a [[WP:KF|Kocsmafalhoz]]. --~~~~',
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza licenza|'
'%s|~~~}} --~~~~',
'ja': '\n{{subst:Image copyright|File:%s}}--~~~~',
'ko': '\n{{subst:User:Kwjbot IV/untagged|%s}} --~~~~',
'ru': '{{subst:Запрос о статусе файла|Файл:%s}} --~~~~',
'sr': '\n{{subst:Обавештење о датотеци без лиценце|%s}} --~~~~',
'sd': '{{subst:تصوير جو ذريعو|File:%s}}--~~~~',
'ta': '\n{{subst:Di-no license-notice|படிமம்:%s}} ~~~~',
'ur': '{{subst:ماخذ تصویر|File:%s}}--~~~~',
'zh': '\n{{subst:Uploadvionotice|File:%s}} ~~~~',
}
# This is a list of what bots used this script in your project.
# NOTE: YOUR bot username will be automatically added.
BOT_LIST = {
'commons': ['Siebot', 'CommonsDelinker', 'Filbot', 'Sz-iwbot',
'ABFbot'],
'meta': ['MABot'],
'ar': ['MenoBot'],
'arz': ['MenoBot'],
'de': ['Xqbot'],
'en': ['OrphanBot'],
'fa': ['Amirobot'],
'ga': ['AllieBot'],
'it': ['Filbot', 'Nikbot', '.snoopybot.'],
'ja': ['Alexbot'],
'ko': ['Kwjbot IV'],
'ru': ['Rubinbot'],
'sr': ['KizuleBot'],
'ta': ['TrengarasuBOT'],
'ur': ['Shuaib-bot', 'Tahir-bot', 'SAMI.Bot'],
'zh': ['Alexbot'],
}
# The message that the bot will add the second time that find another license
# problem.
SECOND_MESSAGE_WITHOUT_LICENSE = {
'hu': '\nSzia! Úgy tűnik a [[:Kép:%s]] képpel is hasonló a probléma, '
'mint az előbbivel. Kérlek olvasd el a [[WP:KÉPLIC|feltölthető '
'képek]]ről szóló oldalunk, és segítségért fordulj a [[WP:KF-JO|'
'Jogi kocsmafalhoz]]. Köszönöm --~~~~',
'it': ':{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza'
'licenza2|%s|~~~}} --~~~~',
}
# You can add some settings to a wiki page. In this way, you can change them
# without touching the code. That's useful if you are running the bot on
# Toolserver.
PAGE_WITH_SETTINGS = {
'commons': 'User:Filbot/Settings',
'it': 'Progetto:Coordinamento/Immagini/Bot/Settings#Settings',
'sr': 'User:KizuleBot/checkimages.py/подешавања',
'zh': 'User:Alexbot/cisettings#Settings',
}
# The bot can report some images (like the images that have the same name of an
# image on commons) This is the page where the bot will store them.
REPORT_PAGE = {
'commons': 'User:Filbot/Report',
'meta': 'User:MABot/Report',
'test': 'User:Pywikibot-test/Report',
'ar': 'User:MenoBot/Report',
'arz': 'User:MenoBot/Report',
'de': 'Benutzer:Xqbot/Report',
'en': 'User:Filnik/Report',
'fa': 'کاربر:Amirobot/گزارش تصویر',
'ga': 'User:AllieBot/ReportImages',
'hu': 'User:Bdamokos/Report',
'it': 'Progetto:Coordinamento/Immagini/Bot/Report',
'ja': 'User:Alexbot/report',
'ko': 'User:Kwjbot IV/Report',
'ru': 'User:Rubinbot/Report',
'sd': 'واپرائيندڙ:Kaleem Bhatti/درخواست تصوير',
'sr': 'User:KizuleBot/checkimages.py/дневник',
'ta': 'User:Trengarasu/commonsimages',
'ur': 'صارف:محمد شعیب/درخواست تصویر',
'zh': 'User:Alexsh/checkimagereport',
}
# If a template isn't a license but it's included on a lot of images, that can
# be skipped to analyze the image without taking care of it. (the template must
# be in a list)
# Warning: Don't add template like "en, de, it" because they are already in
# (added in the code, below
# Warning 2: The bot will use regex, make the names compatible, please (don't
# add "Template:" or {{because they are already put in the regex).
# Warning 3: the part that use this regex is case-insensitive (just to let you
# know..)
HIDDEN_TEMPLATE = {
# Put the other in the page on the project defined below
'commons': ['Template:Information'],
'meta': ['Template:Information'],
'test': ['Template:Information'],
'ar': ['Template:معلومات'],
'arz': ['Template:معلومات'],
'de': ['Template:Information'],
'en': ['Template:Information'],
'fa': ['الگو:اطلاعات'],
'fr': ['Template:Information'],
'ga': ['Template:Information'],
'hr': ['Template:Infoslika'],
'hu': ['Template:Információ', 'Template:Enwiki', 'Template:Azonnali'],
'it': ['Template:EDP', 'Template:Informazioni file',
'Template:Information', 'Template:Trademark',
'Template:Permissionotrs'],
'ja': ['Template:Information'],
'ko': ['Template:그림 정보'],
'ru': ['Template:Изображение',
'Template:Обоснование добросовестного использования'],
'sd': ['Template:معلومات'],
'sr': ['Шаблон:Информација', 'Шаблон:Non-free use rationale 2'],
'ta': ['Template:Information'],
'ur': ['Template:معلومات'],
'zh': ['Template:Information'],
}
# A page where there's a list of template to skip.
PAGE_WITH_HIDDEN_TEMPLATES = {
'commons': 'User:Filbot/White_templates#White_templates',
'it': 'Progetto:Coordinamento/Immagini/Bot/WhiteTemplates',
'ko': 'User:Kwjbot_IV/whitetemplates/list',
'sr': 'User:KizuleBot/checkimages.py/дозвољенишаблони',
}
# A page where there's a list of template to consider as licenses.
PAGE_WITH_ALOWED_TEMPLATES = {
'commons': 'User:Filbot/Allowed templates',
'de': 'Benutzer:Xqbot/Lizenzvorlagen',
'it': 'Progetto:Coordinamento/Immagini/Bot/AllowedTemplates',
'ko': 'User:Kwjbot_IV/AllowedTemplates',
'sr': 'User:KizuleBot/checkimages.py/дозвољенишаблони',
}
# Template added when the bot finds only an hidden template and nothing else.
# Note: every __botnick__ will be repleaced with your bot's nickname
# (feel free not to use if you don't need it)
HIDDEN_TEMPALTE_NOTIFICATION = {
'commons': ("\n{{subst:User:Filnik/whitetemplate|File:%s}}\n\n''This "
'message was added automatically by ~~~, if you need '
'some help about it please read the text above again and '
'follow the links in it, if you still need help ask at the '
'[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]]'
" '''[[Commons:Help desk|→]] [[Commons:Help desk]]''' in any "
"language you like to use.'' --~~~~"),
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/'
'Template_insufficiente|%s|~~~}} --~~~~',
'ko': '\n{{subst:User:Kwj2772/whitetemplates|%s}} --~~~~',
}
# In this part there are the parameters for the dupe images.
# Put here the template that you want to put in the image to warn that it's a
# dupe. put __image__ if you want only one image, __images__ if you want the
# whole list
DUPLICATES_TEXT = {
'commons': '\n{{Dupe|__image__}}',
'de': '{{NowCommons}}',
'it': '\n{{Progetto:Coordinamento/Immagini/Bot/Template duplicati|'
'__images__}}',
'ru': '{{NCT|__image__}}',
'sr': '{{NowCommons|__image__}}',
}
# Message to put in the talk
DUPLICATES_USER_TALK_TEXT = {
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Duplicati|'
'%s|%s|~~~}} --~~~~',
}
# Regex to detect the template put in the image's description to find the dupe
DUPLICATES_REGEX = {
'commons': r'\{\{(?:[Tt]emplate:|)(?:[Dd]up(?:licat|)e|[Bb]ad[ _][Nn]ame)'
r'[|}]',
'de': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}',
'it': r'\{\{(?:[Tt]emplate:|)[Pp]rogetto:[Cc]oordinamento/Immagini/Bot/'
r'Template duplicati[|}]',
'sr': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}',
}
# Category with the licenses and / or with subcategories with the other
# licenses.
CATEGORY_WITH_LICENSES = {
'commons': 'Category:License tags',
'meta': 'Category:License templates',
'test': 'Category:CC license tags',
'ar': 'تصنيف:قوالب حقوق الصور',
'arz': 'تصنيف:Wikipedia image copyright templates',
'de': 'Kategorie:Vorlage:Lizenz für Bilder',
'en': 'Category:Wikipedia file copyright templates',
'fa': 'رده:الگو:حق تکثیر پرونده',
'ga': "Catagóir:Clibeanna cóipchirt d'íomhánna",
'it': 'Categoria:Template Licenze copyright',
'ja': 'Category:画像の著作権表示テンプレート',
'ko': '분류:위키백과 그림 저작권 틀',
'ru': 'Category:Шаблоны:Лицензии файлов',
'sd': 'زمرو:وڪيپيڊيا فائل ڪاپي رائيٽ سانچا',
'sr': 'Категорија:Шаблони за слике',
'ta': 'Category:காப்புரிமை வார்ப்புருக்கள்',
'ur': 'زمرہ:ویکیپیڈیا سانچہ جات حقوق تصاویر',
'zh': 'Category:版權申告模板',
}
# Page where is stored the message to send as email to the users
EMAIL_PAGE_WITH_TEXT = {
# 'de': 'Benutzer:ABF/D3',
}
# Title of the email
EMAIL_SUBJECT = {
# 'de': 'Problemen mit Deinem Bild auf der Deutschen Wikipedia',
}
# Seems that uploader bots aren't interested to get messages regarding the
# files that they upload.. strange, uh?
# Format: [[user,regex], [user,regex]...] the regex is needed to match the user
# where to send the warning-msg
UPLOAD_BOTS = {
'commons': [['File Upload Bot (Magnus Manske)',
r'\|[Ss]ource=Transferred from .*?; '
r'transferred to Commons by \[\[User:(.*?)\]\]']],
}
# Service images that don't have to be deleted and/or reported has a template
# inside them (you can let this param as None)
SERVICE_TEMPLATES = {
'it': ['Template:Immagine di servizio'],
}
# Add your project (in alphabetical order) if you want that the bot starts
PROJECT_INSERTED = ['ar', 'arz', 'commons', 'de', 'en', 'fa', 'ga', 'hu', 'it',
'ja', 'ko', 'ru', 'meta', 'sd', 'sr', 'ta', 'test', 'ur',
'zh']
# END OF CONFIGURATION.
SETTINGS_REGEX = re.compile(r"""
<-------\ ------->\n
\*[Nn]ame\ ?=\ ?['"](.*?)['"]\n
\*([Ff]ind|[Ff]indonly)\ ?=\ ?(.*?)\n
\*[Ii]magechanges\ ?=\ ?(.*?)\n
\*[Ss]ummary\ ?=\ ?['"](.*?)['"]\n
\*[Hh]ead\ ?=\ ?['"](.*?)['"]\n
\*[Tt]ext\ ?=\ ?['"](.*?)['"]\n
\*[Mm]ex\ ?=\ ?['"]?([^\n]*?)['"]?\n
""", re.DOTALL | re.VERBOSE)
class LogIsFull(Error):
"""Log is full and the bot cannot add other data to prevent Errors."""
def print_with_time_zone(message) -> None:
"""Print the messages followed by the TimeZone encoded correctly."""
time_zone = time.strftime('%d %b %Y %H:%M:%S (UTC)', time.gmtime())
pywikibot.output('{} {}'.format(message.rstrip(), time_zone))
class CheckImagesBot:
"""A robot to check recently uploaded files."""
ignore_save_related_errors = True
ignore_server_errors = False
def __init__(
self,
site,
log_full_number: int = 25000,
sendemail_active: bool = False,
duplicates_report: bool = False,
log_full_error: bool = True,
max_user_notify=None
) -> None:
"""Initializer, define some instance variables."""
self.site = site
self.log_full_error = log_full_error
self.log_full_number = log_full_number
self.rep_page = i18n.translate(self.site, REPORT_PAGE)
if not self.rep_page:
raise TranslationError(
'No report page provided in "REPORT_PAGE" dict '
'for your project!')
self.image_namespace = site.namespaces.FILE.custom_name + ':'
self.list_entry = '\n* [[:{}%s]] '.format(self.image_namespace)
# The summary of the report
self.com = i18n.twtranslate(self.site, 'checkimages-log-comment')
hiddentemplates_raw = i18n.translate(self.site, HIDDEN_TEMPLATE)
if not hiddentemplates_raw:
raise TranslationError(
'No non-license templates provided in "HIDDEN_TEMPLATE" dict '
'for your project!')
self.hiddentemplates = {
pywikibot.Page(self.site, tmp, ns=self.site.namespaces.TEMPLATE)
for tmp in hiddentemplates_raw}
self.page_hidden = i18n.translate(self.site,
PAGE_WITH_HIDDEN_TEMPLATES)
self.page_allowed = i18n.translate(self.site,
PAGE_WITH_ALOWED_TEMPLATES)
self.comment = i18n.twtranslate(self.site.lang,
'checkimages-source-tag-comment')
# Adding the bot's nickname at the notification text if needed.
self.bots = i18n.translate(self.site, BOT_LIST)
if self.bots:
self.bots.append(site.username())
else:
self.bots = [site.username()]
self.sendemail_active = sendemail_active
self.skip_list = []
self.duplicates_report = duplicates_report
if max_user_notify:
self.num_notify = collections.defaultdict(lambda: max_user_notify)
else:
self.num_notify = None
# Load the licenses only once, so do it once
self.list_licenses = self.load_licenses()
def set_parameters(self, image) -> None:
"""Set parameters."""
# ensure we have a FilePage
self.image = pywikibot.FilePage(image)
self.image_name = image.title(with_ns=False)
self.timestamp = None
self.uploader = None
def report(
self,
newtext,
image_to_report,
notification=None,
head=None,
notification2=None,
unver: bool = True,
comm_talk=None,
comm_image=None
) -> None:
"""Function to make the reports easier."""
self.image_to_report = image_to_report
self.newtext = newtext
if not newtext:
raise TranslationError(
'No no-license template provided in "N_TXT" dict '
'for your project!')
self.head = head or ''
self.notification = notification
self.notification2 = notification2
if self.notification:
self.notification = re.sub(r'__botnick__', self.site.username(),
notification)
if self.notification2:
self.notification2 = re.sub(r'__botnick__', self.site.username(),
notification2)
self.comm_talk = comm_talk
self.comm_image = comm_image or self.comment
image_tagged = False
try:
image_tagged = self.tag_image(unver)
except NoPageError:
pywikibot.output('The page has been deleted! Skip!')
except EditConflictError:
pywikibot.output('Edit conflict! Skip!')
if image_tagged and self.notification:
try:
self.put_mex_in_talk()
except EditConflictError:
pywikibot.output('Edit Conflict! Retrying...')
try:
self.put_mex_in_talk()
except Exception:
pywikibot.exception()
pywikibot.output(
'Another error... skipping the user...')
def upload_bot_change_function(
self,
report_page_text,
upload_bot_array
) -> str:
"""Detect the user that has uploaded the file through upload bot."""
regex = upload_bot_array[1]
results = re.findall(regex, report_page_text)
if results:
luser = results[0]
return luser
# we can't find the user, report the problem to the bot
return upload_bot_array[0]
def tag_image(self, put: bool = True) -> bool:
"""Add template to the Image page and find out the uploader."""
# Get the image's description
report_page_object = pywikibot.FilePage(self.site,
self.image_to_report)
try:
report_page_text = report_page_object.get()
except NoPageError:
pywikibot.output(self.image_name + ' has been deleted...')
return False
# You can use this function also to find only the user that
# has upload the image (FixME: Rewrite a bit this part)
if put:
pywikibot.showDiff(report_page_text,
self.newtext + '\n' + report_page_text)
pywikibot.output(self.comm_image)
try:
report_page_object.put(self.newtext + '\n' + report_page_text,
summary=self.comm_image)
except LockedPageError:
pywikibot.output('File is locked. Skipping.')
return False
# paginetta it's the image page object.
try:
if report_page_object == self.image and self.uploader:
nick = self.uploader
else:
nick = report_page_object.latest_file_info.user
except PageRelatedError:
pywikibot.output(
'Seems that {} has only the description and not the file...'
.format(self.image_to_report))
repme = self.list_entry + "problems '''with the APIs'''"
self.report_image(self.image_to_report, self.rep_page, self.com,
repme)
return False
upload_bots = i18n.translate(self.site, UPLOAD_BOTS)
user = pywikibot.User(self.site, nick)
luser = user.title(as_url=True)
if upload_bots:
for upload_bot in upload_bots:
if upload_bot[0] == luser:
luser = self.upload_bot_change_function(report_page_text,
upload_bot)
user = pywikibot.User(self.site, luser)
self.talk_page = user.getUserTalkPage()
self.luser = luser
return True
def put_mex_in_talk(self) -> None:
"""Function to put the warning in talk page of the uploader."""
commento2 = i18n.twtranslate(self.site.lang,
'checkimages-source-notice-comment')
email_page_name = i18n.translate(self.site, EMAIL_PAGE_WITH_TEXT)
email_subj = i18n.translate(self.site, EMAIL_SUBJECT)
if self.notification2:
self.notification2 %= self.image_to_report
else:
self.notification2 = self.notification
second_text = False
# Getting the talk page's history, to check if there is another
# advise...
try:
testoattuale = self.talk_page.get()
history = list(self.talk_page.revisions(total=10))
latest_user = history[0]['user']
pywikibot.output(
'The latest user that has written something is: '
+ latest_user)
# A block to prevent the second message if the bot also
# welcomed users...
if latest_user in self.bots and len(history) > 1:
second_text = True
except IsRedirectPageError:
pywikibot.output(
'The user talk is a redirect, trying to get the right talk...')
try:
self.talk_page = self.talk_page.getRedirectTarget()
testoattuale = self.talk_page.get()
except NoPageError:
testoattuale = i18n.translate(self.site, EMPTY)
except NoPageError:
pywikibot.output('The user page is blank')
testoattuale = i18n.translate(self.site, EMPTY)
if self.comm_talk:
commentox = self.comm_talk
else:
commentox = commento2
if second_text:
new_text = '{}\n\n{}'.format(testoattuale, self.notification2)
else:
new_text = '{}\n\n== {} ==\n{}'.format(testoattuale, self.head,
self.notification)
# Check maximum number of notifications for this talk page
if (self.num_notify is not None
and self.num_notify[self.talk_page.title()] == 0):
pywikibot.output('Maximum notifications reached, skip.')
return
try:
self.talk_page.put(new_text, summary=commentox, minor=False)
except PageSaveRelatedError as e:
if not self.ignore_save_related_errors:
raise
err = e
except ServerError as e:
if not self.ignore_server_errors:
raise
err = e
else:
if self.num_notify is not None:
self.num_notify[self.talk_page.title()] -= 1
err = None
if err:
pywikibot.exception(err)
pywikibot.output('Skipping saving talk page {}'
.format(self.talk_page))
if email_page_name and email_subj:
email_page = pywikibot.Page(self.site, email_page_name)
try:
email_text = email_page.get()
except (NoPageError, IsRedirectPageError):
return
if self.sendemail_active:
text_to_send = re.sub(r'__user-nickname__', r'{}'
.format(self.luser), email_text)
email_class = pywikibot.User(self.site, self.luser)
try:
email_class.send_email(email_subj, text_to_send)
except NotEmailableError:
pywikibot.output('User is not mailable, aborted')
def regex_generator(self, regexp, textrun) -> Generator[pywikibot.FilePage,
None, None]:
"""Find page to yield using regex to parse text."""
regex = re.compile(r'{}'.format(regexp), re.DOTALL)
results = regex.findall(textrun)
for image in results:
yield pywikibot.FilePage(self.site, image)
def load_hidden_templates(self) -> None:
"""Function to load the white templates."""
# A template as {{en is not a license! Adding also them in the
# whitelist template...
for key in Family.load('wikipedia').langs.keys():
self.hiddentemplates.add(pywikibot.Page(
self.site, 'Template:{}'.format(key)))
# Hidden template loading
if self.page_hidden:
try:
page_hidden_text = pywikibot.Page(self.site,
self.page_hidden).get()
except (NoPageError, IsRedirectPageError):
page_hidden_text = ''
for element in self.load(page_hidden_text):
self.hiddentemplates.add(pywikibot.Page(self.site, element))
def important_image(self, list_given) -> pywikibot.FilePage:
"""
Get tuples of image and time, return the most used or oldest image.
:param list_given: a list of tuples which hold seconds and FilePage
:type list_given: list
:return: the most used or oldest image
"""
# find the most used image
inx_found = None # index of found image
max_usage = 0 # hold max amount of using pages
for num, element in enumerate(list_given):
image = element[1]
image_used = len(list(image.usingPages()))
if image_used > max_usage:
max_usage = image_used
inx_found = num
if inx_found is not None:
return list_given[inx_found][1]
# find the oldest image
sec, image = max(list_given, key=lambda element: element[0])
return image
def check_image_on_commons(self) -> bool:
"""Checking if the file is on commons."""
pywikibot.output('Checking if [[{}]] is on commons...'
.format(self.image_name))
try:
hash_found = self.image.latest_file_info.sha1
except NoPageError:
return False # Image deleted, no hash found. Skip the image.
site = pywikibot.Site('commons', 'commons')
commons_image_with_this_hash = next(
iter(site.allimages(sha1=hash_found, total=1)), None)
if commons_image_with_this_hash:
service_template = pywikibot.translate(self.site,
SERVICE_TEMPLATES)
templates_in_the_image = self.image.templates()
if service_template is not None:
for template in service_template:
if pywikibot.Page(self.site,
template) in templates_in_the_image:
pywikibot.output(
"{} is on commons but it's a service image."
.format(self.image_name))
return True # continue with the check-part
pywikibot.output(self.image_name + ' is on commons!')
if self.image.file_is_shared():
pywikibot.output(
"But, the file doesn't exist on your project! Skip...")
# We have to skip the check part for that image because
# it's on commons but someone has added something on your
# project.
return False
if re.findall(r'\bstemma\b', self.image_name.lower()) and \
self.site.code == 'it':
pywikibot.output(
"{} has 'stemma' inside, means that it's ok."
.format(self.image_name))
return True
# It's not only on commons but the image needs a check
# the second usually is a url or something like that.
# Compare the two in equal way, both url.
repme = ((self.list_entry
+ "is also on '''Commons''': [[commons:File:%s]]")
% (self.image_name,
commons_image_with_this_hash.title(
with_ns=False)))
if (self.image.title(as_url=True)
== commons_image_with_this_hash.title(as_url=True)):
repme += ' (same name)'
self.report_image(self.image_name, self.rep_page, self.com, repme,
addings=False)
return True
def check_image_duplicated(self, duplicates_rollback) -> bool:
"""Function to check the duplicated files."""
dup_text = i18n.translate(self.site, DUPLICATES_TEXT)
dup_regex = i18n.translate(self.site, DUPLICATES_REGEX)
dup_talk_text = i18n.translate(self.site, DUPLICATES_USER_TALK_TEXT)
# Head of the message given to the author
dup_talk_head = i18n.twtranslate(
self.site, 'checkimages-doubles-head')
# Comment while bot reports the problem in the uploader's talk
dup_comment_talk = i18n.twtranslate(
self.site, 'checkimages-doubles-talk-comment')
# Comment used by the bot while it reports the problem in the image
dup_comment_image = i18n.twtranslate(
self.site, 'checkimages-doubles-file-comment')
image_page = pywikibot.FilePage(self.site, self.image_name)
hash_found = image_page.latest_file_info.sha1
duplicates = list(self.site.allimages(sha1=hash_found))
if not duplicates:
return False # Image deleted, no hash found. Skip the image.
if len(duplicates) > 1:
xdict = {'en':
'%(name)s has {{PLURAL:count'
'|a duplicate! Reporting it'
'|%(count)s duplicates! Reporting them}}...'}
pywikibot.output(i18n.translate('en', xdict,
{'name': self.image_name,
'count': len(duplicates) - 1}))
if dup_text and dup_regex:
time_image_list = []
for dup_page in duplicates:
if (dup_page.title(as_url=True) != self.image.title(
as_url=True)
or self.timestamp is None):
try:
self.timestamp = (
dup_page.latest_file_info.timestamp)
except PageRelatedError:
continue
data = self.timestamp.timetuple()
data_seconds = time.mktime(data)
time_image_list.append([data_seconds, dup_page])
older_image_page = self.important_image(time_image_list)
older_page_text = older_image_page.text
# And if the images are more than two?
string = ''
images_to_tag_list = []
for dup_page in duplicates:
if dup_page == older_image_page:
# the most used or oldest image
# not report also this as duplicate
continue
try:
dup_page_text = dup_page.text
except NoPageError:
continue
if not (re.findall(dup_regex, dup_page_text)
or re.findall(dup_regex, older_page_text)):
pywikibot.output(
'{} is a duplicate and has to be tagged...'
.format(dup_page))
images_to_tag_list.append(dup_page.title())
string += '* {}\n'.format(
dup_page.title(as_link=True, textlink=True))
else:
pywikibot.output(
"Already put the dupe-template in the files's page"
" or in the dupe's page. Skip.")
return False # Ok - Let's continue the checking phase
# true if the image are not to be tagged as dupes
only_report = False
# put only one image or the whole list according to the request
if '__images__' in dup_text:
text_for_the_report = dup_text.replace(
'__images__',
'\n{}* {}\n'.format(
string,
older_image_page.title(
as_link=True, textlink=True)))
else:
text_for_the_report = dup_text.replace(
'__image__',
older_image_page.title(as_link=True, textlink=True))
# Two iteration: report the "problem" to the user only once
# (the last)
if len(images_to_tag_list) > 1:
for image_to_tag in images_to_tag_list[:-1]:
fp = pywikibot.FilePage(self.site, image_to_tag)
already_reported_in_past = fp.revision_count(self.bots)
# if you want only one edit, the edit found should be
# more than 0 -> num - 1
if already_reported_in_past > duplicates_rollback - 1:
only_report = True
break
# Delete the image in the list where we're write on
image = self.image_namespace + image_to_tag
text_for_the_report = re.sub(
r'\n\*\[\[:{}\]\]'.format(re.escape(image)),
'', text_for_the_report)
self.report(text_for_the_report, image_to_tag,
comm_image=dup_comment_image, unver=True)
if images_to_tag_list and not only_report:
fp = pywikibot.FilePage(self.site, images_to_tag_list[-1])
already_reported_in_past = fp.revision_count(self.bots)
image_title = re.escape(self.image.title(as_url=True))
from_regex = (r'\n\*\[\[:{}{}\]\]'
.format(self.image_namespace, image_title))
# Delete the image in the list where we're write on
text_for_the_report = re.sub(from_regex, '',
text_for_the_report)
# if you want only one edit, the edit found should be more
# than 0 -> num - 1
if already_reported_in_past > duplicates_rollback - 1 or \
not dup_talk_text:
only_report = True
else:
self.report(
text_for_the_report, images_to_tag_list[-1],
dup_talk_text
% (older_image_page.title(with_ns=True),
string),
dup_talk_head, comm_talk=dup_comment_talk,
comm_image=dup_comment_image, unver=True)
if self.duplicates_report or only_report:
if only_report:
repme = ((self.list_entry + 'has the following duplicates '
"('''forced mode'''):")
% self.image.title(as_url=True))
else:
repme = (
(self.list_entry + 'has the following duplicates:')
% self.image.title(as_url=True))
for dup_page in duplicates:
if (dup_page.title(as_url=True)
== self.image.title(as_url=True)):
# the image itself, not report also this as duplicate
continue
repme += '\n** [[:{}{}]]'.format(
self.image_namespace, dup_page.title(as_url=True))
result = self.report_image(self.image_name, self.rep_page,
self.com, repme, addings=False)
if not result:
return True # If Errors, exit (but continue the check)
if older_image_page.title() != self.image_name:
# The image is a duplicate, it will be deleted. So skip the
# check-part, useless
return False
return True # Ok - No problem. Let's continue the checking phase
def report_image(self, image_to_report, rep_page=None, com=None,
rep_text=None, addings: bool = True) -> bool:
"""Report the files to the report page when needed."""
rep_page = rep_page or self.rep_page
com = com or self.com
rep_text = rep_text or self.list_entry + '~~~~~'
if addings:
# Adding the name of the image in the report if not done already
rep_text = rep_text % image_to_report
another_page = pywikibot.Page(self.site, rep_page)
try:
text_get = another_page.get()
except NoPageError:
text_get = ''
except IsRedirectPageError:
text_get = another_page.getRedirectTarget().get()
# Don't care for differences inside brackets.
end = rep_text.find('(', max(0, rep_text.find(']]')))
if end < 0:
end = None
short_text = rep_text[rep_text.find('[['):end].strip()
reported = True
# Skip if the message is already there.
if short_text in text_get:
pywikibot.output('{} is already in the report page.'
.format(image_to_report))
reported = False
elif len(text_get) >= self.log_full_number:
if self.log_full_error:
raise LogIsFull(
'The log page ({}) is full! Please delete the old files '
'reported.'.format(another_page.title()))
pywikibot.output(
'The log page ({}) is full! Please delete the old files '
' reported. Skip!'.format(another_page.title()))
# Don't report, but continue with the check
# (we don't know if this is the first time we check this file
# or not)
else:
# Adding the log
another_page.put(text_get + rep_text, summary=com, force=True,
minor=False)
pywikibot.output('...Reported...')
return reported
def takesettings(self) -> None:
"""Function to take the settings from the wiki."""
settings_page = i18n.translate(self.site, PAGE_WITH_SETTINGS)
try:
if not settings_page:
self.settings_data = None
else:
page = pywikibot.Page(self.site, settings_page)
self.settings_data = []
try:
testo = page.get()
number = 1
for m in SETTINGS_REGEX.finditer(testo):
name = str(m.group(1))
find_tipe = str(m.group(2))
find = str(m.group(3))
imagechanges = str(m.group(4))
summary = str(m.group(5))
head = str(m.group(6))
text = str(m.group(7))
mexcatched = str(m.group(8))
tupla = [number, name, find_tipe, find, imagechanges,
summary, head, text, mexcatched]
self.settings_data += [tupla]
number += 1
if not self.settings_data:
pywikibot.output(
"You've set wrongly your settings, please take a "
'look to the relative page. (run without them)')
self.settings_data = None
except NoPageError:
pywikibot.output("The settings' page doesn't exist!")
self.settings_data = None
except Error:
pywikibot.output(
'Problems with loading the settigs, run without them.')
self.settings_data = None
self.some_problem = False
if not self.settings_data:
self.settings_data = None
# Real-Time page loaded
if self.settings_data:
pywikibot.output('>> Loaded the real-time page... <<')
else:
pywikibot.output('>> No additional settings found! <<')
def load_licenses(self) -> List[pywikibot.Page]:
"""Load the list of the licenses."""
cat_name = i18n.translate(self.site, CATEGORY_WITH_LICENSES)
if not cat_name:
raise TranslationError(
'No allowed licenses category provided in '
'"CATEGORY_WITH_LICENSES" dict for your project!')
pywikibot.output('\nLoading the allowed licenses...\n')
cat = pywikibot.Category(self.site, cat_name)
list_licenses = list(cat.articles())
if self.site.code == 'commons':
no_licenses_to_skip = pywikibot.Category(self.site,
'License-related tags')
for license_given in no_licenses_to_skip.articles():
if license_given in list_licenses:
list_licenses.remove(license_given)
pywikibot.output('')
# Add the licenses set in the default page as licenses to check
if self.page_allowed:
try:
page_allowed_text = pywikibot.Page(self.site,
self.page_allowed).get()
except (NoPageError, IsRedirectPageError):
page_allowed_text = ''
for name_license in self.load(page_allowed_text):
page_license = pywikibot.Page(self.site, name_license)
if page_license not in list_licenses:
# the list has wiki-pages
list_licenses.append(page_license)
return list_licenses
def mini_template_check(self, template) -> bool:
"""Check if template is in allowed licenses or in licenses to skip."""
# the list_licenses are loaded in the __init__
# (not to load them multimple times)
if template in self.list_licenses:
self.license_selected = template.title(with_ns=False)
self.seems_ok = True
# let the last "fake" license normally detected
self.license_found = self.license_selected
return True
if template in self.hiddentemplates:
# if the whitetemplate is not in the images description, we don't
# care
try:
self.all_licenses.remove(template)
except ValueError:
return False
else:
self.white_templates_found = True
return False
def template_in_list(self) -> None:
"""
Check if template is in list.
The problem is the calls to the Mediawiki system because they can be
pretty slow. While searching in a list of objects is really fast, so
first of all let's see if we can find something in the info that we
already have, then make a deeper check.
"""
for template in self.licenses_found:
if self.mini_template_check(template):
break
if not self.license_found:
for template in self.licenses_found:
if template.isRedirectPage():
template = template.getRedirectTarget()
if self.mini_template_check(template):
break
def smart_detection(self) -> Tuple[str, bool]:
"""
Detect templates.
The bot instead of checking if there's a simple template in the
image's description, checks also if that template is a license or
something else. In this sense this type of check is smart.
"""
self.seems_ok = False
self.license_found = None
self.white_templates_found = False
regex_find_licenses = re.compile(
r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)[|\n<}]', re.DOTALL)
regex_are_licenses = re.compile(
r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)\}\}', re.DOTALL)
while True:
self.load_hidden_templates()
self.licenses_found = self.image.templates()
templates_in_the_image_raw = regex_find_licenses.findall(
self.image_check_text)
if not self.licenses_found and templates_in_the_image_raw:
# {{nameTemplate|something <- this is not a template, be sure
# that we haven't catch something like that.
licenses_test = regex_are_licenses.findall(
self.image_check_text)
if not self.licenses_found and licenses_test:
raise Error(
"Invalid or broken templates found in the image's "
'page {}!'.format(self.image))
self.all_licenses = []
if not self.list_licenses:
raise TranslationError(
'No allowed licenses found in "CATEGORY_WITH_LICENSES" '
'category for your project!')
# Found the templates ONLY in the image's description
for template_selected in templates_in_the_image_raw:
tp = pywikibot.Page(self.site, template_selected)
for template_real in self.licenses_found:
if (tp.title(as_url=True, with_ns=False).lower()
== template_real.title(as_url=True,
with_ns=False).lower()):
if template_real not in self.all_licenses:
self.all_licenses.append(template_real)
break
if self.licenses_found:
self.template_in_list()
if not self.license_found and self.all_licenses:
self.all_licenses = [
template.getRedirectTarget()
if template.isRedirectPage() else template
for template in self.all_licenses if template.exists()]
if self.all_licenses:
self.license_found = self.all_licenses[0].title()
# If it has "some_problem" it must check the additional settings.
self.some_problem = False
if self.settings_data:
# use additional settings
self.find_additional_problems()
if self.some_problem:
if self.mex_used in self.image_check_text:
pywikibot.output('File already fixed. Skipping.')
else:
pywikibot.output(
"The file's description for {} contains {}..."
.format(self.image_name, self.name_used))
if self.mex_used.lower() == 'default':
self.mex_used = self.unvertext
if self.imagestatus_used:
reported = True
else:
reported = self.report_image(self.image_name)
if reported:
self.report(self.mex_used, self.image_name, self.text_used,
self.head_used, None,
self.imagestatus_used, self.summary_used)
else:
pywikibot.output('Skipping the file...')
self.some_problem = False
else:
if not self.seems_ok and self.license_found:
rep_text_license_fake = ((self.list_entry
+ "seems to have a ''fake license'',"
' license detected:'
' <nowiki>%s</nowiki>') %
(self.image_name, self.license_found))
print_with_time_zone(
'{} seems to have a fake license: {}, reporting...'
.format(self.image_name, self.license_found))
self.report_image(self.image_name,
rep_text=rep_text_license_fake,
addings=False)
elif self.license_found:
pywikibot.output('[[%s]] seems ok, license found: {{%s}}...'
% (self.image_name, self.license_found))
return (self.license_found, self.white_templates_found)
def load(self, raw) -> List[str]:
"""Load a list of objects from a string using regex."""
list_loaded = []
# I search with a regex how many user have not the talk page
# and i put them in a list (i find it more easy and secure)
regl = r"(\"|\')(.*?)\1(?:,|\])"
pl = re.compile(regl)
for xl in pl.finditer(raw):
word = xl.group(2).replace('\\\\', '\\')
if word not in list_loaded:
list_loaded.append(word)
return list_loaded
def skip_images(self, skip_number, limit) -> bool:
"""Given a number of files, skip the first -number- files."""
# If the images to skip are more the images to check, make them the
# same number
if skip_number == 0:
pywikibot.output('\t\t>> No files to skip...<<')
return False
skip_number = min(skip_number, limit)
# Print a starting message only if no images has been skipped
if not self.skip_list:
pywikibot.output(
i18n.translate(
'en',
'Skipping the first {{PLURAL:num|file|%(num)s files}}:\n',
{'num': skip_number}))
# If we still have pages to skip:
if len(self.skip_list) < skip_number:
pywikibot.output('Skipping {}...'.format(self.image_name))
self.skip_list.append(self.image_name)
if skip_number == 1:
pywikibot.output('')
return True
pywikibot.output('')
return False
@staticmethod
def wait(generator, wait_time) -> Generator[pywikibot.FilePage, None,
None]:
"""
Skip the images uploaded before x seconds.
Let the users to fix the image's problem alone in the first x seconds.
"""
print_with_time_zone(
'Skipping the files uploaded less than {} seconds ago..'
.format(wait_time))
for page in generator:
image = pywikibot.FilePage(page)
try:
timestamp = image.latest_file_info.timestamp
except PageRelatedError:
continue
now = pywikibot.Timestamp.utcnow()
delta = now - timestamp
if delta.total_seconds() > wait_time:
yield image
else:
pywikibot.warning(
'Skipping {}, uploaded {} {} ago..'
.format(image.title(), delta.days, 'days')
if delta.days > 0
else (image.title(), delta.seconds, 'seconds'))
def is_tagged(self) -> bool:
"""Understand if a file is already tagged or not."""
# TODO: enhance and use textlib.MultiTemplateMatchBuilder
# Is the image already tagged? If yes, no need to double-check, skip
no_license = i18n.translate(self.site, TXT_FIND)
if not no_license:
raise TranslationError(
'No no-license templates provided in "TXT_FIND" dict '
'for your project!')
for i in no_license:
# If there are {{ use regex, otherwise no (if there's not the
# {{ may not be a template and the regex will be wrong)
if '{{' in i:
regex_pattern = re.compile(
r'\{\{(?:template)?%s ?(?:\||\r?\n|\}|<|/) ?'
% i.split('{{')[1].replace(' ', '[ _]'), re.I)
result = regex_pattern.findall(self.image_check_text)
if result:
return True
elif i.lower() in self.image_check_text:
return True
return False
def find_additional_problems(self) -> None:
"""Extract additional settings from configuration page."""
# In every tuple there's a setting configuration
for tupla in self.settings_data:
name = tupla[1]
find_tipe = tupla[2]
find = tupla[3]
find_list = self.load(find)
imagechanges = tupla[4]
if imagechanges.lower() == 'false':
imagestatus = False
elif imagechanges.lower() == 'true':
imagestatus = True
else:
pywikibot.error('Imagechanges set wrongly!')
self.settings_data = None
break
summary = tupla[5]
head_2 = tupla[6]
if head_2.count('==') == 2:
head_2 = re.findall(r'\s*== *(.+?) *==\s*', head_2)[0]
text = tupla[7] % self.image_name
mex_catched = tupla[8]
for k in find_list:
if find_tipe.lower() == 'findonly':
search_results = re.findall(r'{}'.format(k.lower()),
self.image_check_text.lower())
if search_results:
if search_results[0] == self.image_check_text.lower():
self.some_problem = True
self.text_used = text
self.head_used = head_2
self.imagestatus_used = imagestatus
self.name_used = name
self.summary_used = summary
self.mex_used = mex_catched
break
elif find_tipe.lower() == 'find':
if re.findall(r'{}'.format(k.lower()),
self.image_check_text.lower()):
self.some_problem = True
self.text_used = text
self.head_used = head_2
self.imagestatus_used = imagestatus
self.name_used = name
self.summary_used = summary
self.mex_used = mex_catched
continue
def check_step(self) -> None:
"""Check a single file page."""
# something = Minimal requirements for an image description.
# If this fits, no tagging will take place
# (if there aren't other issues)
# MIT license is ok on italian wikipedia, let also this here
# Don't put "}}" here, please. Useless and can give problems.
something = ['{{']
# Allowed extensions
try:
allowed_formats = self.site.siteinfo.get(
'fileextensions', get_default=False)
except KeyError:
allowed_formats = []
else:
allowed_formats = [item['ext'].lower() for item in allowed_formats]
brackets = False
delete = False
notification = None
# get the extension from the image's name
extension = self.image_name.split('.')[-1]
# Load the notification messages
hidden_template_notification = i18n.translate(
self.site, HIDDEN_TEMPALTE_NOTIFICATION)
self.unvertext = i18n.translate(self.site, N_TXT)
di = i18n.translate(self.site, DELETE_IMMEDIATELY)
# The header of the Unknown extension's message.
dih = i18n.twtranslate(self.site, 'checkimages-unknown-extension-head')
# Text that will be add if the bot find a unknown extension.
din = i18n.twtranslate(self.site,
'checkimages-unknown-extension-msg') + ' ~~~~'
# Header that the bot will add if the image hasn't the license.
nh = i18n.twtranslate(self.site, 'checkimages-no-license-head')
# Summary of the delete immediately.
dels = i18n.twtranslate(self.site, 'checkimages-deletion-comment')
nn = i18n.translate(self.site, NOTHING_NOTIFICATION)
smwl = i18n.translate(self.site, SECOND_MESSAGE_WITHOUT_LICENSE)
try:
self.image_check_text = self.image.get()
except NoPageError:
pywikibot.output('Skipping {} because it has been deleted.'
.format(self.image_name))
return
except IsRedirectPageError:
pywikibot.output("Skipping {} because it's a redirect."
.format(self.image_name))
return
# Delete the fields where the templates cannot be loaded
regex_nowiki = re.compile(r'<nowiki>(.*?)</nowiki>', re.DOTALL)
regex_pre = re.compile(r'<pre>(.*?)</pre>', re.DOTALL)
self.image_check_text = regex_nowiki.sub('', self.image_check_text)
self.image_check_text = regex_pre.sub('', self.image_check_text)
# Deleting the useless template from the description (before adding
# sth in the image the original text will be reloaded, don't worry).
if self.is_tagged():
print_with_time_zone('{} is already tagged.'
.format(self.image_name))
return
# something is the array with {{, MIT License and so on.
for a_word in something:
if a_word in self.image_check_text:
# There's a template, probably a license
brackets = True
# Is the extension allowed? (is it an image or f.e. a .xls file?)
if allowed_formats and extension.lower() not in allowed_formats:
delete = True
(license_found, hidden_template_found) = self.smart_detection()
# Here begins the check block.
if brackets and license_found:
return
if delete:
pywikibot.output('{} is not a file!'.format(self.image_name))
if not di:
pywikibot.output('No localized message given for '
"'DELETE_IMMEDIATELY'. Skipping.")
return
# Some formatting for delete immediately template
dels = dels % {'adding': di}
di = '\n' + di
# Modify summary text
config.default_edit_summary = dels
canctext = di % extension
notification = din % {'file': self.image.title(as_link=True,
textlink=True)}
head = dih
self.report(canctext, self.image_name, notification, head)
return
if not self.image_check_text.strip(): # empty image description
pywikibot.output(
"The file's description for {} does not contain a license "
' template!'.format(self.image_name))
if hidden_template_found and hidden_template_notification:
notification = hidden_template_notification % self.image_name
elif nn:
notification = nn % self.image_name
head = nh
self.report(self.unvertext, self.image_name, notification, head,
smwl)
return
pywikibot.output('{} has only text and not the specific '
'license...'.format(self.image_name))
if hidden_template_found and hidden_template_notification:
notification = hidden_template_notification % self.image_name
elif nn:
notification = nn % self.image_name
head = nh
self.report(self.unvertext, self.image_name, notification, head, smwl)
def main(*args: str) -> bool:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
# Command line configurable parameters
repeat = True # Restart after having check all the images?
limit = 80 # How many images check?
time_sleep = 30 # How many time sleep after the check?
skip_number = 0 # How many images to skip before checking?
wait_time = 0 # How many time sleep before the check?
commons_active = False # Is there an image with the same name at commons?
normal = False # Check the new images or use another generator?
url_used = False # Use the url-related function instead of the new-pages
regex_gen = False # Use the regex generator
duplicates_active = False # Use the duplicate option
duplicates_report = False # Use the duplicate-report option
max_user_notify = None
sendemail_active = False # Use the send-email
log_full_error = True # Raise an error when the log is full
generator = None
unknown = [] # unknown parameters
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
# Here below there are the local parameters.
for arg in local_args:
option, _, value = arg.partition(':')
if option == '-limit':
limit = int(value or pywikibot.input(
'How many files do you want to check?'))
elif option == '-sleep':
time_sleep = int(value or pywikibot.input(
'How many seconds do you want runs to be apart?'))
elif option == '-break':
repeat = False
elif option == '-nologerror':
log_full_error = False
elif option == '-commons':
commons_active = True
elif option == '-duplicatesreport':
duplicates_report = True
elif option == '-duplicates':
duplicates_active = True
duplicates_rollback = int(value or 1)
elif option == '-maxusernotify':
max_user_notify = int(value or pywikibot.input(
'What should be the maximum number of notifications per user '
'per check?'))
elif option == '-sendemail':
sendemail_active = True
elif option == '-skip':
skip_number = int(value or pywikibot.input(
'How many files do you want to skip?'))
elif option == '-wait':
wait_time = int(value or pywikibot.input(
'How many time do you want to wait before checking the '
'files?'))
elif option == '-start':
first_page_title = value or pywikibot.input(
'From which page do you want to start?')
namespaces = tuple(
ns + ':' for ns in site.namespace(Namespace.FILE, True))
if first_page_title.startswith(namespaces):
first_page_title = first_page_title.split(':', 1)[1]
generator = site.allimages(start=first_page_title)
repeat = False
elif option == '-page':
regex_page_name = value or pywikibot.input(
'Which page do you want to use for the regex?')
repeat = False
regex_gen = True
elif option == '-url':
regex_page_url = value or pywikibot.input(
'Which url do you want to use for the regex?')
url_used = True
repeat = False
regex_gen = True
elif option == '-regex':
regexp_to_use = value or pywikibot.input(
'Which regex do you want to use?')
generator = 'regex'
repeat = False
elif option == '-cat':
cat_name = value or pywikibot.input('In which category do I work?')
cat = pywikibot.Category(site, 'Category:' + cat_name)
generator = cat.articles(namespaces=[6])
repeat = False
elif option == '-ref':
ref_name = value or pywikibot.input(
'The references of what page should I parse?')
ref = pywikibot.Page(site, ref_name)
generator = ref.getReferences(namespaces=[6])
repeat = False
else:
unknown.append(arg)
if not generator:
normal = True
# Ensure that the bot is localized and right command args are given
if site.code not in PROJECT_INSERTED:
additional_text = ('Your project is not supported by this script.\n'
'To allow your project in the script you have to '
'add a localization into the script and add your '
'project to the "PROJECT_INSERTED" list!')
else:
additional_text = ''
if suggest_help(unknown_parameters=unknown,
additional_text=additional_text):
return False
# Reading the log of the new images if another generator is not given.
if normal:
if limit == 1:
pywikibot.output('Retrieving the latest file for checking...')
else:
pywikibot.output('Retrieving the latest {} files for checking...'
.format(limit))
while True:
# Defing the Main Class.
bot = CheckImagesBot(site, sendemail_active=sendemail_active,
duplicates_report=duplicates_report,
log_full_error=log_full_error,
max_user_notify=max_user_notify)
if normal:
generator = pg.NewimagesPageGenerator(total=limit, site=site)
# if url_used and regex_gen, get the source for the generator
if url_used and regex_gen:
text_regex = site.getUrl(regex_page_url, no_hostname=True)
# Not an url but a wiki page as "source" for the regex
elif regex_gen:
page = pywikibot.Page(site, regex_page_name)
try:
text_regex = page.get()
except NoPageError:
pywikibot.output("{} doesn't exist!".format(page.title()))
text_regex = '' # No source, so the bot will quit later.
# If generator is the regex' one, use your own Generator using an url
# or page and a regex.
if generator == 'regex' and regex_gen:
generator = bot.regex_generator(regexp_to_use, text_regex)
bot.takesettings()
if wait_time > 0:
generator = bot.wait(generator, wait_time)
for image in generator:
# Setting the image for the main class
bot.set_parameters(image)
if skip_number and bot.skip_images(skip_number, limit):
continue
# Check on commons if there's already an image with the same name
if commons_active and site.family.name != 'commons':
if not bot.check_image_on_commons():
continue
# Check if there are duplicates of the image on the project
if duplicates_active:
if not bot.check_image_duplicated(duplicates_rollback):
continue
bot.check_step()
if repeat:
pywikibot.output('Waiting for {} seconds,'.format(time_sleep))
pywikibot.sleep(time_sleep)
else:
break
return True
if __name__ == '__main__':
start = time.time()
ret = False
try:
ret = main()
except KeyboardInterrupt:
ret = True
finally:
if ret is not False:
final = time.time()
delta = int(final - start)
pywikibot.output('Execution time: {} seconds\n'.format(delta))
|
wikimedia/pywikibot-core
|
scripts/checkimages.py
|
Python
|
mit
| 76,106 | 0 |
# Copyright 2014 Rackspace Hosting
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock, patch
from trove.backup import models as backup_models
from trove.common import cfg
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.datastore import models as datastore_models
from trove.instance import models
from trove.instance.models import DBInstance
from trove.instance.models import filter_ips
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import SimpleInstance
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import api as task_api
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
CONF = cfg.CONF
class SimpleInstanceTest(trove_testtools.TestCase):
def setUp(self):
super(SimpleInstanceTest, self).setUp()
db_info = DBInstance(
InstanceTasks.BUILDING, name="TestInstance")
self.instance = SimpleInstance(
None, db_info, InstanceServiceStatus(
ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock())
db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
"internal": [{"addr": "10.123.123.123"}],
"public": [{"addr": "15.123.123.123"}]}
self.orig_conf = CONF.network_label_regex
self.orig_ip_regex = CONF.ip_regex
self.orig_black_list_regex = CONF.black_list_regex
def tearDown(self):
super(SimpleInstanceTest, self).tearDown()
CONF.network_label_regex = self.orig_conf
CONF.ip_start = None
def test_get_root_on_create(self):
root_on_create_val = Instance.get_root_on_create(
'redis')
self.assertFalse(root_on_create_val)
def test_filter_ips_white_list(self):
CONF.network_label_regex = '.*'
CONF.ip_regex = '^(15.|123.)'
CONF.black_list_regex = '^10.123.123.*'
ip = self.instance.get_visible_ip_addresses()
ip = filter_ips(
ip, CONF.ip_regex, CONF.black_list_regex)
self.assertEqual(2, len(ip))
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
def test_filter_ips_black_list(self):
CONF.network_label_regex = '.*'
CONF.ip_regex = '.*'
CONF.black_list_regex = '^10.123.123.*'
ip = self.instance.get_visible_ip_addresses()
ip = filter_ips(
ip, CONF.ip_regex, CONF.black_list_regex)
self.assertEqual(2, len(ip))
self.assertTrue('10.123.123.123' not in ip)
def test_one_network_label(self):
CONF.network_label_regex = 'public'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(['15.123.123.123'], ip)
def test_two_network_labels(self):
CONF.network_label_regex = '^(private|public)$'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(2, len(ip))
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
def test_all_network_labels(self):
CONF.network_label_regex = '.*'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(3, len(ip))
self.assertTrue('10.123.123.123' in ip)
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
class CreateInstanceTest(trove_testtools.TestCase):
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
def setUp(self):
util.init_db()
self.context = trove_testtools.TroveTestContext(self, is_admin=True)
self.name = "name"
self.flavor_id = 5
self.image_id = "UUID"
self.databases = []
self.users = []
self.datastore = datastore_models.DBDatastore.create(
id=str(uuid.uuid4()),
name='mysql' + str(uuid.uuid4()),
)
self.datastore_version = (
datastore_models.DBDatastoreVersion.create(
id=str(uuid.uuid4()),
datastore_id=self.datastore.id,
name="5.5" + str(uuid.uuid4()),
manager="mysql",
image_id="image_id",
packages="",
active=True))
self.volume_size = 1
self.az = "az"
self.nics = None
self.configuration = None
self.tenant_id = "UUID"
self.datastore_version_id = str(uuid.uuid4())
self.db_info = DBInstance.create(
name=self.name, flavor_id=self.flavor_id,
tenant_id=self.tenant_id,
volume_size=self.volume_size,
datastore_version_id=self.datastore_version.id,
task_status=InstanceTasks.BUILDING,
configuration_id=self.configuration
)
self.backup_name = "name"
self.descr = None
self.backup_state = backup_models.BackupState.COMPLETED
self.instance_id = self.db_info.id
self.parent_id = None
self.deleted = False
self.backup = backup_models.DBBackup.create(
name=self.backup_name,
description=self.descr,
tenant_id=self.tenant_id,
state=self.backup_state,
instance_id=self.instance_id,
parent_id=self.parent_id,
datastore_version_id=self.datastore_version.id,
deleted=False
)
self.backup.size = 1.1
self.backup.save()
self.backup_id = self.backup.id
self.orig_client = models.create_nova_client
models.create_nova_client = nova.fake_create_nova_client
self.orig_api = task_api.API(self.context).create_instance
task_api.API(self.context).create_instance = Mock()
self.run_with_quotas = models.run_with_quotas
models.run_with_quotas = Mock()
self.check = backup_models.DBBackup.check_swift_object_exist
backup_models.DBBackup.check_swift_object_exist = Mock(
return_value=True)
super(CreateInstanceTest, self).setUp()
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
def tearDown(self):
self.db_info.delete()
self.backup.delete()
self.datastore.delete()
self.datastore_version.delete()
models.create_nova_client = self.orig_client
task_api.API(self.context).create_instance = self.orig_api
models.run_with_quotas = self.run_with_quotas
backup_models.DBBackup.check_swift_object_exist = self.check
self.backup.delete()
self.db_info.delete()
super(CreateInstanceTest, self).tearDown()
def test_exception_on_invalid_backup_size(self):
self.assertEqual(self.backup.id, self.backup_id)
exc = self.assertRaises(
exception.BackupTooLarge, models.Instance.create,
self.context, self.name, self.flavor_id,
self.image_id, self.databases, self.users,
self.datastore, self.datastore_version,
self.volume_size, self.backup_id,
self.az, self.nics, self.configuration
)
self.assertIn("Backup is too large for "
"given flavor or volume.", str(exc))
def test_can_restore_from_backup_with_almost_equal_size(self):
# target size equals to "1Gb"
self.backup.size = 0.99
self.backup.save()
instance = models.Instance.create(
self.context, self.name, self.flavor_id,
self.image_id, self.databases, self.users,
self.datastore, self.datastore_version,
self.volume_size, self.backup_id,
self.az, self.nics, self.configuration)
self.assertIsNotNone(instance)
class TestReplication(trove_testtools.TestCase):
def setUp(self):
util.init_db()
self.datastore = datastore_models.DBDatastore.create(
id=str(uuid.uuid4()),
name='name' + str(uuid.uuid4()),
default_version_id=str(uuid.uuid4()))
self.datastore_version = datastore_models.DBDatastoreVersion.create(
id=self.datastore.default_version_id,
name='name' + str(uuid.uuid4()),
image_id=str(uuid.uuid4()),
packages=str(uuid.uuid4()),
datastore_id=self.datastore.id,
manager='mysql',
active=1)
self.master = DBInstance(
InstanceTasks.NONE,
id=str(uuid.uuid4()),
name="TestMasterInstance",
datastore_version_id=self.datastore_version.id)
self.master.set_task_status(InstanceTasks.NONE)
self.master.save()
self.master_status = InstanceServiceStatus(
ServiceStatuses.RUNNING,
id=str(uuid.uuid4()),
instance_id=self.master.id)
self.master_status.save()
self.safe_nova_client = models.create_nova_client
models.create_nova_client = nova.fake_create_nova_client
super(TestReplication, self).setUp()
def tearDown(self):
self.master.delete()
self.master_status.delete()
self.datastore.delete()
self.datastore_version.delete()
models.create_nova_client = self.safe_nova_client
super(TestReplication, self).tearDown()
@patch('trove.instance.models.LOG')
def test_replica_of_not_active_master(self, mock_logging):
self.master.set_task_status(InstanceTasks.BUILDING)
self.master.save()
self.master_status.set_status(ServiceStatuses.BUILDING)
self.master_status.save()
self.assertRaises(exception.UnprocessableEntity,
Instance.create,
None, 'name', 1, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=self.master.id)
@patch('trove.instance.models.LOG')
def test_replica_with_invalid_slave_of_id(self, mock_logging):
self.assertRaises(exception.NotFound,
Instance.create,
None, 'name', 1, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=str(uuid.uuid4()))
def test_create_replica_from_replica(self):
self.replica_datastore_version = Mock(
spec=datastore_models.DBDatastoreVersion)
self.replica_datastore_version.id = "UUID"
self.replica_datastore_version.manager = 'mysql'
self.replica_info = DBInstance(
InstanceTasks.NONE,
id="UUID",
name="TestInstance",
datastore_version_id=self.replica_datastore_version.id,
slave_of_id=self.master.id)
self.replica_info.save()
self.assertRaises(exception.Forbidden, Instance.create,
None, 'name', 2, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=self.replica_info.id)
|
redhat-openstack/trove
|
trove/tests/unittests/instance/test_instance_models.py
|
Python
|
apache-2.0
| 11,729 | 0 |
from django import forms
from djwed.wedding.models import *
from djwed.wedding.admin_actions import *
from django.contrib import admin
class RequireOneFormSet(forms.models.BaseInlineFormSet):
"""Require at least one form in the formset to be completed."""
def clean(self):
"""Check that at least one form has been completed."""
super(RequireOneFormSet, self).clean()
if not self.is_valid():
return
for cleaned_data in self.cleaned_data:
# form has data and we aren't deleting it.
if cleaned_data and not cleaned_data.get('DELETE', False):
# we can break out after the first complete form
return
raise forms.ValidationError("At least one %s is required." %
(self.model._meta.verbose_name,))
class InviteeNotesInline(admin.TabularInline):
model = InviteeNotes
extra = 0
verbose_name_plural = "invitee notes"
class RSVPInline(admin.TabularInline):
model = RSVP
extra = 2
class GuestInline(admin.StackedInline):
model = Guest
extra = 1
class FoodOptionInline(admin.StackedInline):
model = FoodOption
extra = 3
class CommentInline(admin.StackedInline):
model = Comment
extra = 0
exclude = ('rsvp',)
readonly_fields = ('text',)
verbose_name_plural = "comments from invitees"
class GiftThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
verbose_name = "Source"
verbose_name_plural = "Sources"
formset = RequireOneFormSet
class InviteeThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
class InviteeAdmin(admin.ModelAdmin):
#fieldsets = [
# (None, {'fields': ['question']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
#]
inlines = [GuestInline,InviteeNotesInline,CommentInline,InviteeThankYouInline]
list_display = ('full_name', 'tags', 'full_address', 'state','country')
list_editable = ('tags',)
list_filter = ['side', 'association','country','state']
search_fields = ['full_name_override','invite_code','guest__first_name', 'guest__last_name', 'guest__nickname']
actions = [
export_as_csv_action("Export addresses as CSV",
fields=['full_name', 'full_address']),
]
#date_hierarchy = 'pub_date'
class LongFoodChoiceField(forms.ModelChoiceField):
#widget = forms.widgets.RadioSelect()
def label_from_instance(self, obj):
return obj.long_desc
class GuestAdmin(admin.ModelAdmin):
inlines = [RSVPInline,]
list_display = ('full_name', 'email', 'tags')
list_filter = ['rsvp__status', 'role', 'invitee__side', 'invitee__association']
search_fields = ['first_name', 'last_name']
list_editable = ('email', 'tags')
class RSVPAdminForm(forms.ModelForm):
class Meta: model = RSVP
def clean(self, *args, **kwargs):
sret = super(RSVPAdminForm, self).clean(*args,**kwargs)
if self.cleaned_data['food_selection'] and self.cleaned_data['food_selection'].venue != self.cleaned_data['venue']:
raise ValidationError('Food selected from another venue')
if self.cleaned_data['venue'].site != u'MA' and self.cleaned_data['bus_selection']:
raise ValidationError('Bus selection for a site with no bus')
rsvp_filter = RSVP.objects.filter(venue = self.cleaned_data['venue'],
guest = self.cleaned_data['guest'])
if rsvp_filter.count()>1 or (rsvp_filter.count() == 1
and rsvp_filter.all()[0] != self.instance):
raise ValidationError('Only one RSVP allowed per person')
return sret
class RSVPAdmin(admin.ModelAdmin):
#inlines = [GuestInline,]
#food_selection = LongFoodChoiceField([], required=False, empty_label = "--- Please choose from a dinner selection below ---")
list_display = (
'guest_site',
'venue',
'status',
'food_selection',
'bus_selection',
'last_updated',
'prelim',
'guest_invitee',
'last_update_source',
'guest',
'table_assign',
)
search_fields = [
'guest__first_name',
'guest__last_name',
'guest__invitee__guest__last_name',
'guest__invitee__invite_code',
]
list_editable = (
'status',
'food_selection',
'bus_selection',
'prelim',
'last_update_source',
'table_assign',
)
form = RSVPAdminForm
list_filter = ('venue','status', 'guest__invitee__side',
'guest__invitee__association', 'guest__invitee__country',
'guest__invitee__state',
)
def guest_site(self,rsvp):
return u"%s (%s)"%(rsvp.guest.full_name(), unicode(rsvp.venue.site))
guest_site.short_description = "Guest (Site)"
def guest_invitee(self,rsvp):
return rsvp.guest.invitee
guest_invitee.short_description = "Invitee"
def guest_invitee_association(self,rsvp):
return rsvp.guest.invitee.association
guest_invitee_association.short_description = "Association"
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "guest":
kwargs["queryset"] = Guest.objects.all().order_by('last_name','first_name')
return db_field.formfield(**kwargs)
return super(RSVPAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class InviteeNotesAdmin(admin.ModelAdmin):
search_fields = ['invitee__guest__first_name',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = [ 'invitee',
'likely_site',
'ma_likelihood',
'ca_likelihood',
'or_likelihood',
'savedate',
'batch',
'invitee_rsvp_count',
'adults',
'children',
'invitee_country',
]
list_editable = ['ma_likelihood',
'ca_likelihood',
'savedate',
'batch',
]
def invitee_rsvp_count(self,inote):
counts = inote.invitee.rsvp_yes_counts()
return ', '.join('%s: %d' % (venue, counts[venue])
for venue in sorted(counts.keys()))
invitee_rsvp_count.short_description = "RSVP Yes"
def invitee_country(self,inote):
return str(inote.invitee.country)
invitee_country.short_description = "Country"
class CommentAdmin(admin.ModelAdmin):
list_filter = ['type']
search_fields = ['invitee__guest__first_name','text',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = ['id','invitee','type','last_updated','text']
class VenueAdmin(admin.ModelAdmin):
inlines = [FoodOptionInline,]
class PageSnippetAdmin(admin.ModelAdmin):
list_display = ['key','title','last_updated']
class GiftAdmin(admin.ModelAdmin):
search_fields = [
'sources__guest__first_name',
'sources__guest__nickname',
'sources__guest__last_name',
'notes',
'description',
]
list_filter = ['status','registry','assignment']
list_display = ['source_names','received','description','notes',
'assignment','registry','status']
list_editable = ('status', 'assignment')
inlines = [GiftThankYouInline,]
radio_fields = {
'assignment': admin.HORIZONTAL,
'registry': admin.VERTICAL,
'status': admin.HORIZONTAL,
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "source" and request.META['REQUEST_METHOD'] != 'POST':
kwargs["queryset"] = Invitee.objects.all().order_by('guest__last_name','guest__first_name')
return db_field.formfield(**kwargs)
return super(GiftAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def source_names(self, gift):
return u"; ".join(unicode(inv) for inv in gift.sources.all())
source_names.short_description = "Sources"
class ThankYouAdmin(admin.ModelAdmin):
list_display = [
'invitee',
'status',
'sent',
]
list_editable = ['status', 'sent']
list_filter = [
'status',
'sent',
'gift__assignment',
'gift__received',
'invitee__side',
]
search_fields = [
'invitee__guest__first_name',
'invitee__guest__last_name',
'invitee__guest__nickname',
'gift__description',
'gift__notes',
]
class TableAdmin(admin.ModelAdmin):
search_fields = ['rsvp__guest__first_name','name','number','notes',
'rsvp__guest__last_name','invitee__guest__nickname']
list_display = ['number','name','venue','table_count','table_guests','notes','position']
list_editable = ('name','notes')
list_filter = ['venue',]
def table_count(self,table):
return str(table.rsvp_set.count())
table_count.short_description = "# people"
def table_guests(self,table):
guests = []
for r in table.rsvp_set.all():
guests.append(unicode(r.guest))
guests.sort()
return u" , \n".join(guests)
table_guests.short_description = "guests"
class RSVPOptionAdmin(admin.ModelAdmin):
list_display = ['short_desc', 'likelihood', 'rsvp_count', 'long_desc']
def rsvp_count(self, option):
return str(option.rsvp_set.count())
rsvp_count.short_description = "# people"
admin.site.register(Invitee, InviteeAdmin)
admin.site.register(InviteeNotes, InviteeNotesAdmin)
admin.site.register(Guest, GuestAdmin)
admin.site.register(Venue, VenueAdmin)
admin.site.register(PageSnippet, PageSnippetAdmin)
admin.site.register(RSVP, RSVPAdmin)
admin.site.register(RSVPOption, RSVPOptionAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Gift, GiftAdmin)
admin.site.register(ThankYou, ThankYouAdmin)
admin.site.register(Table, TableAdmin)
|
garyp/djwed
|
wedding/admin.py
|
Python
|
mit
| 10,509 | 0.008945 |
import yaml
import time
import random
import threading
import subprocess
from rackattack.physical import pikapatch
from rackattack import clientfactory
from rackattack.physical import config
from rackattack.api import Requirement, AllocationInfo
from rackattack.physical.tests.integration.main import useFakeGeneralConfiguration
import pika
assert "egg" in pika.__file__
class RackattackTestClients(threading.Thread):
SCENARIOS = dict(few=(1, 4), moreThanFew=(5, 9), many=(10, 30))
SCENARIOS_PROBABILITIES = dict(few=0.7, moreThanFew=0.2, many=0.1)
def __init__(self, nodeBaseName="node"):
assert(sum(self.SCENARIOS_PROBABILITIES.values()) == 1)
super(RackattackTestClients, self).__init__()
self._nodeBaseName = nodeBaseName
self._client = clientfactory.factory()
with open(config.CONFIGURATION_FILE) as f:
conf = yaml.load(f.read())
self._osmosisServerIP = conf["OSMOSIS_SERVER_IP"]
self._label = self._generateLabelName()
self._nrHosts = self._getNrHosts()
self._nrAllocatedHosts = 0
self._profiledAllocation = None
self._allocations = set()
self._stop = False
def run(self):
while True:
if self._stop:
while self._allocations:
allocation = self._allocations.pop()
allocation.free()
return
self._updateNrAllocatedHosts()
if self._nrAllocatedHosts == self._nrHosts:
self._free()
elif not self._allocations:
self._allocateForBackground()
elif self._nrAllocatedHosts <= self._nrHosts:
self._performRandomLoadAction()
else:
assert(False)
interval = 0.5 + random.random() * 1.2
time.sleep(interval)
def stop(self):
self._stop = True
def _updateNrAllocatedHosts(self):
stillAlive = set()
self._nrAllocatedHosts = 0
for allocation in self._allocations:
if allocation.dead() is None:
self._nrAllocatedHosts += len(allocation._requirements)
stillAlive.add(allocation)
self._allocations = stillAlive
def _generateLabelName(self):
cmd = "osmosis listlabels --objectStores=%(osmosisServerIP)s:1010 star | head -n 1" % \
dict(osmosisServerIP=self._osmosisServerIP)
print "Running %(cmd)s" % dict(cmd=cmd)
labelName = subprocess.check_output(cmd, shell=True)
labelName = labelName.strip()
return labelName
def _performRandomLoadAction(self):
wantedAllocationRatio = 0.65
allocationRatio = self._nrAllocatedHosts / float(self._nrHosts)
print "allocationRatio: {}, nrAllocated: {}, nrHosts: {}".format(allocationRatio,
self._nrAllocatedHosts,
self._nrHosts)
if allocationRatio < wantedAllocationRatio:
print "Will most likeliy allocate now..."
majorityAction = self._allocateForBackground
minorityAction = self._free
else:
print "Reached the wanted ratio..."
time.sleep(0.5)
print "Will most likeliy free now..."
majorityAction = self._free
minorityAction = self._allocateForBackground
withinWhatRange = random.random()
if withinWhatRange < 0.9:
majorityAction()
else:
minorityAction()
def _generateRequirements(self, nrHosts, pool):
requirements = dict([("{}{}".format(self._nodeBaseName, nodeIdx),
Requirement(imageLabel=self._label,
imageHint=self._label,
hardwareConstraints=None,
pool=pool))
for nodeIdx in xrange(nrHosts)])
return requirements
def _generateAllocationInfo(self):
allocationInfo = AllocationInfo(user="johabab", purpose="loadTests")
return allocationInfo
def allocate(self, nrHosts, pool="default"):
self._updateNrAllocatedHosts()
self._allocate(nrHosts, pool)
def _allocateForBackground(self):
nrHosts = self._getRandomNrHosts()
self._allocate(nrHosts)
def _allocate(self, nrHostsToAllocate, pool="default"):
requirements = self._generateRequirements(nrHostsToAllocate, pool=pool)
allocationInfo = self._generateAllocationInfo()
print "Trying to allocate %(nrHosts)s hosts from %(pool)s" % dict(nrHosts=len(requirements),
pool=pool)
allocation = None
try:
allocation = self._client.allocate(requirements, allocationInfo)
self._allocations.add(allocation)
print "Allocation succeeded"
except Exception as e:
if 'not enough machines' in str(e):
print "Allocation failed: not enough machines"
else:
print str(e)
return allocation
def _getRandomNrHosts(self):
scenarioNames = self.SCENARIOS.keys()
scenarioNames.sort()
withinWhichRange = random.random()
rangeBound = 0
chosenScenarioName = None
for scenarioName in scenarioNames:
rangeBound += self.SCENARIOS_PROBABILITIES[scenarioName]
if withinWhichRange <= rangeBound:
chosenScenarioName = scenarioName
break
assert chosenScenarioName is not None
nrHosts = random.randint(*self.SCENARIOS[chosenScenarioName])
return nrHosts
def free(self):
self._updateNrAllocatedHosts()
self._free()
def _free(self):
allocation = self._allocations.pop()
print "Trying to free an allocation..."
try:
allocation.free()
except Exception as e:
print "Failed freeing allocation: {}".format(str(e))
print "Allocation freed."
def _getNrHosts(self):
status = self._client.call("admin__queryStatus")
return len(status["hosts"])
backgroundStressTestClient = None
profilingTestClient = None
def bgStress(mode):
if mode == "on":
print "Starting test clients..."
backgroundStressTestClient.start()
elif mode == "off":
print "Stopping test clients..."
backgroundStressTestClient.stop()
def allocate(nrHosts, pool="default"):
nrHosts = int(nrHosts)
profilingTestClient.allocate(nrHosts, pool=pool)
profilingAllocation = True
def free():
profilingTestClient.free()
def main():
print """Available commands:
bgstress on/off
\tRuns allocations (and frees them) in the background.
allocate nrHosts [pool=default]
\tAllocates the given number of hosts from the given pool.
free
\tFrees the current allocation (which was created with the 'allocate' command, if such allocation
exists."""
useFakeGeneralConfiguration()
import pdb
pdb.set_trace()
global backgroundStressTestClient, profilingTestClient, profilingAllocation
backgroundStressTestClient = RackattackTestClients("background-stress")
profilingTestClient = RackattackTestClients("profiling")
client = clientfactory.factory()
profilingAllocation = False
commands = dict(bgstress=bgStress, allocate=allocate, free=free)
while True:
cmdline = raw_input()
cmdline = cmdline.strip()
if not cmdline:
continue
cmdline = cmdline.split(" ")
cmdline = [item.strip() for item in cmdline]
commandName = cmdline[0]
args = cmdline[1:]
if commandName not in commands:
print "Invalid command: %(commandName)s" % dict(commandName=commandName)
continue
command = commands[commandName]
try:
command(*args)
except Exception as e:
print "An error has occurred while executing command: %(message)s" % dict(message=e.message)
continue
if __name__ == '__main__':
main()
|
eliran-stratoscale/rackattack-physical
|
rackattack/physical/tests/integration/main_faketestclients.py
|
Python
|
apache-2.0
| 8,351 | 0.001317 |
# coding: utf-8
#
# Esri start of added imports
import sys, os, arcpy
# Esri end of added imports
# Esri start of added variables
g_ESRI_variable_1 = 'lyrFC'
g_ESRI_variable_2 = 'lyrTmp'
g_ESRI_variable_3 = 'ID'
g_ESRI_variable_4 = 'lyrOut'
g_ESRI_variable_5 = ';'
# Esri end of added variables
#------------------------------------------------------------------------------
# Copyright 2014 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# ==================================================
# PointTargetGRG.py
# --------------------------------------------------
# Built on ArcGIS
# ==================================================
#
# Creates a Gridded Reference Graphic
#
#
# ==================================================
# HISTORY:
#
# 8/25/2015 - mf - Needed to update script for non-ArcMap/Pro testing environment
#
# ==================================================
import os, sys, math, traceback
import arcpy
from arcpy import env
import Utilities
# Read in the parameters
targetPointOrigin = arcpy.GetParameterAsText(0)
numberCellsHo = arcpy.GetParameterAsText(1)
numberCellsVert = arcpy.GetParameterAsText(2)
cellWidth = arcpy.GetParameterAsText(3)
cellHeight = arcpy.GetParameterAsText(4)
cellUnits = arcpy.GetParameterAsText(5)
gridSize = arcpy.GetParameterAsText(6)
labelStartPos = arcpy.GetParameterAsText(7)
labelStyle = arcpy.GetParameterAsText(8)
outputFeatureClass = arcpy.GetParameterAsText(9)
tempOutput = os.path.join("in_memory", "tempFishnetGrid")
sysPath = sys.path[0]
appEnvironment = None
DEBUG = True
mxd = None
mapList = None
df, aprx = None, None
def labelFeatures(layer, field):
''' set up labeling for layer '''
if appEnvironment == "ARCGIS_PRO":
if layer.supports("SHOWLABELS"):
for lblclass in layer.listLabelClasses():
lblclass.visible = True
lblclass.expression = " [" + str(field) + "]"
layer.showLabels = True
elif appEnvironment == "ARCMAP":
if layer.supports("LABELCLASSES"):
for lblclass in layer.labelClasses:
lblclass.showClassLabels = True
lblclass.expression = " [" + str(field) + "]"
layer.showLabels = True
arcpy.RefreshActiveView()
else:
pass # if returns "OTHER"
def findLayerByName(layerName):
''' find layer in app '''
global mapList
global mxd
#UPDATE
# if isPro:
if appEnvironment == "ARCGIS_PRO":
for layer in mapList.listLayers():
if layer.name == layerName:
arcpy.AddMessage("Found matching layer [" + layer.name + "]")
return layer
else:
arcpy.AddMessage("Incorrect layer: [" + layer.name + "]")
# else:
elif appEnvironment == "ARCMAP":
for layer in arcpy.mapping.ListLayers(mxd):
if layer.name == layerName:
arcpy.AddMessage("Found matching layer [" + layer.name + "]")
return layer
else:
arcpy.AddMessage("Incorrect layer: [" + layer.name + "]")
else:
arcpy.AddMessage("Non-map application (ArcCatalog, stand-alone test, etc.")
def RotateFeatureClass(inputFC, outputFC,
angle=0, pivot_point=None):
"""Rotate Feature Class
inputFC Input features
outputFC Output feature class
angle Angle to rotate, in degrees
pivot_point X,Y coordinates (as space-separated string)
Default is lower-left of inputFC
As the output feature class no longer has a "real" xy locations,
after rotation, it no coordinate system defined.
"""
def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"):
"""Rotate an xy cooordinate about a specified origin
x,y xy coordinates
xc,yc center of rotation
angle angle
units "DEGREES" (default) or "RADIANS"
"""
import math
x = x - xc
y = y - yc
# make angle clockwise (like Rotate_management)
angle = angle * -1
if units == "DEGREES":
angle = math.radians(angle)
xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc
yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc
return xr, yr
# temp names for cleanup
env_file = None
lyrFC, lyrTmp, lyrOut = [None] * 3 # layers
tmpFC = None # temp dataset
Row, Rows, oRow, oRows = [None] * 4 # cursors
try:
# process parameters
try:
xcen, ycen = [float(xy) for xy in pivot_point.split()]
pivot_point = xcen, ycen
except:
# if pivot point was not specified, get it from
# the lower-left corner of the feature class
ext = arcpy.Describe(inputFC).extent
xcen, ycen = ext.XMin, ext.YMin
pivot_point = xcen, ycen
angle = float(angle)
# set up environment
env_file = arcpy.CreateScratchName("xxenv",".xml","file",
os.environ["TEMP"])
arcpy.SaveSettings(env_file)
# Disable any GP environment clips or project on the fly
arcpy.ClearEnvironment("extent")
arcpy.ClearEnvironment("outputCoordinateSystem")
WKS = env.workspace
if not WKS:
if os.path.dirname(outputFC):
WKS = os.path.dirname(outputFC)
else:
WKS = os.path.dirname(
arcpy.Describe(inputFC).catalogPath)
env.workspace = env.scratchWorkspace = WKS
# Disable GP environment clips or project on the fly
arcpy.ClearEnvironment("extent")
arcpy.ClearEnvironment("outputCoordinateSystem")
# get feature class properties
lyrFC = g_ESRI_variable_1
arcpy.MakeFeatureLayer_management(inputFC, lyrFC)
dFC = arcpy.Describe(lyrFC)
shpField = dFC.shapeFieldName
shpType = dFC.shapeType
FID = dFC.OIDFieldName
# create temp feature class
tmpFC = arcpy.CreateScratchName("xxfc", "", "featureclass")
arcpy.CreateFeatureclass_management(os.path.dirname(tmpFC),
os.path.basename(tmpFC),
shpType)
lyrTmp = g_ESRI_variable_2
arcpy.MakeFeatureLayer_management(tmpFC, lyrTmp)
# set up id field (used to join later)
TFID = "XXXX_FID"
arcpy.AddField_management(lyrTmp, TFID, "LONG")
arcpy.DeleteField_management(lyrTmp, g_ESRI_variable_3)
# rotate the feature class coordinates
# only points, polylines, and polygons are supported
# open read and write cursors
Rows = arcpy.SearchCursor(lyrFC, "", "",
"%s;%s" % (shpField,FID))
oRows = arcpy.InsertCursor(lyrTmp)
arcpy.AddMessage("Opened search cursor")
if shpType == "Point":
for Row in Rows:
shp = Row.getValue(shpField)
pnt = shp.getPart()
pnt.X, pnt.Y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
oRow = oRows.newRow()
oRow.setValue(shpField, pnt)
oRow.setValue(TFID, Row. getValue(FID))
oRows.insertRow(oRow)
elif shpType in ["Polyline", "Polygon"]:
parts = arcpy.Array()
rings = arcpy.Array()
ring = arcpy.Array()
for Row in Rows:
shp = Row.getValue(shpField)
p = 0
for part in shp:
for pnt in part:
if pnt:
x, y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
ring.add(arcpy.Point(x, y, pnt.ID))
else:
# if we have a ring, save it
if len(ring) > 0:
rings.add(ring)
ring.removeAll()
# we have our last ring, add it
rings.add(ring)
ring.removeAll()
# if only one, remove nesting
if len(rings) == 1: rings = rings.getObject(0)
parts.add(rings)
rings.removeAll()
p += 1
# if only one, remove nesting
if len(parts) == 1: parts = parts.getObject(0)
if dFC.shapeType == "Polyline":
shp = arcpy.Polyline(parts)
else:
shp = arcpy.Polygon(parts)
parts.removeAll()
oRow = oRows.newRow()
oRow.setValue(shpField, shp)
oRow.setValue(TFID,Row.getValue(FID))
oRows.insertRow(oRow)
else:
#raise Exception, "Shape type {0} is not supported".format(shpType) #UPDATE
raise Exception("Shape type {0} is not supported".format(shpType))
del oRow, oRows # close write cursor (ensure buffer written)
oRow, oRows = None, None # restore variables for cleanup
# join attributes, and copy to output
arcpy.AddJoin_management(lyrTmp, TFID, lyrFC, FID)
env.qualifiedFieldNames = False
arcpy.Merge_management(lyrTmp, outputFC)
lyrOut = g_ESRI_variable_4
arcpy.MakeFeatureLayer_management(outputFC, lyrOut)
# drop temp fields 2,3 (TFID, FID)
fnames = [f.name for f in arcpy.ListFields(lyrOut)]
dropList = g_ESRI_variable_5.join(fnames[2:4])
arcpy.DeleteField_management(lyrOut, dropList)
#except MsgError, xmsg: #UPDATE
except MsgError as xmsg:
arcpy.AddError(str(xmsg))
except arcpy.ExecuteError:
tbinfo = traceback.format_tb(sys.exc_info()[2])[0]
arcpy.AddError(tbinfo.strip())
arcpy.AddError(arcpy.GetMessages())
numMsg = arcpy.GetMessageCount()
for i in range(0, numMsg):
arcpy.AddReturnMessage(i)
#except Exception, xmsg: #UPDATE
except Exception as xmsg:
tbinfo = traceback.format_tb(sys.exc_info()[2])[0]
arcpy.AddError(tbinfo + str(xmsg))
finally:
# reset environment
if env_file: arcpy.LoadSettings(env_file)
# Clean up temp files
for f in [lyrFC, lyrTmp, lyrOut, tmpFC, env_file]:
try:
if f: arcpy.Delete_management(f)
except:
pass
# delete cursors
try:
for c in [Row, Rows, oRow, oRows]: del c
except:
pass
# return pivot point
try:
pivot_point = "{0} {1}".format(*pivot_point)
except:
pivot_point = None
return pivot_point
def ColIdxToXlName(index):
''' Converts an index into a letter, labeled like excel columns, A to Z, AA to ZZ, etc. '''
if index < 1:
raise ValueError("Index is too small")
result = ""
while True:
if index > 26:
index, r = divmod(index - 1, 26)
result = chr(r + ord('A')) + result
else:
return chr(index + ord('A') - 1) + result
def main():
''' main method '''
try:
#UPDATE
gisVersion = arcpy.GetInstallInfo()["Version"]
global appEnvironment
appEnvironment = Utilities.GetApplication()
if DEBUG == True: arcpy.AddMessage("App environment: " + appEnvironment)
global aprx
global mapList
global mxd
global df
isPro = False
#if gisVersion == "1.0": #Pro:
if appEnvironment == "ARCGIS_PRO":
from arcpy import mp
aprx = arcpy.mp.ArcGISProject("CURRENT")
mapList = aprx.listMaps()[0]
isPro = True
#else:
elif appEnvironment == "ARCMAP":
from arcpy import mapping
mxd = arcpy.mapping.MapDocument('CURRENT')
df = arcpy.mapping.ListDataFrames(mxd)[0]
isPro = False
else:
if DEBUG == True: arcpy.AddMessage("Non-map application...")
# If grid size is drawn on the map, use this instead of cell width and cell height
inputExtentDrawnFromMap = False
angleDrawn = 0
workspace = arcpy.env.workspace
topLeftDrawn = 0
global cellWidth
global cellHeight
if float(cellWidth) == 0 and float(cellHeight) == 0:
inputExtentDrawnFromMap = True
tempGridFC = os.path.join(arcpy.env.scratchWorkspace, "GridSize")
arcpy.CopyFeatures_management(gridSize, tempGridFC)
pts = None
with arcpy.da.SearchCursor(tempGridFC, 'SHAPE@XY', explode_to_points=True) as cursor:
pts = [r[0] for r in cursor][0:4]
arcpy.Delete_management(tempGridFC)
# Find the highest points in the drawn rectangle, to calculate the top left and top right coordinates.
highestPoint = None
nextHighestPoint = None
for pt in pts:
if highestPoint is None or pt[1] > highestPoint[1]:
nextHighestPoint = highestPoint
highestPoint = pt
elif nextHighestPoint is None or pt[1] > nextHighestPoint[1]:
nextHighestPoint = pt
topLeft = highestPoint if highestPoint[0] < nextHighestPoint[0] else nextHighestPoint
topRight = highestPoint if highestPoint[0] > nextHighestPoint[0] else nextHighestPoint
topLeftDrawn = topLeft
# Calculate the cell height and cell width
cellWidth= math.sqrt((pts[0][0] - pts[1][0]) ** 2 + (pts[0][1] - pts[1][1]) ** 2)
cellHeight = math.sqrt((pts[1][0] - pts[2][0]) ** 2 + (pts[1][1] - pts[2][1]) ** 2)
# Calculate angle
hypotenuse = math.sqrt(math.pow(topLeft[0] - topRight[0], 2) + math.pow(topLeft[1] - topRight[1], 2))
adjacent = topRight[0] - topLeft[0]
numberToCos = float(adjacent)/float(hypotenuse)
angleInRadians = math.acos(numberToCos)
angleDrawn = math.degrees(angleInRadians)
if (topRight[1] > topLeft[1]):
angleDrawn = 360 - angleDrawn
else:
if (cellUnits == "Feet"):
cellWidth = float(cellWidth) * 0.3048
cellHeight = float(cellHeight) * 0.3048
# Get the coordinates of the point inputExtentDrawnFromMap.
rows = arcpy.SearchCursor(targetPointOrigin)
extent = None
for row in rows:
shape = row.getValue("SHAPE")
extent = shape.extent
pointExtents = str.split(str(extent))
''' This seemed to be shifting the grid when it was not required so commented out
# Shift the grid center point if the rows and/or columns are even.
if (float(numberCellsHo)%2 == 0.0):
hoShiftAmt = float(cellHeight) / 2.0
# Determines shift up/down based on where box was inputExtentDrawnFromMap
if inputExtentDrawnFromMap == False:
pointExtents[1] = str(float(pointExtents[1]) - hoShiftAmt)
elif (float(topLeftDrawn[1]) > float(pointExtents[1])):
pointExtents[1] = str(float(pointExtents[1]) - hoShiftAmt)
else:
pointExtents[1] = str(float(pointExtents[1]) + hoShiftAmt)
if (float(numberCellsVert)%2 == 0.0):
vertShiftAmt = float(cellWidth) / 2.0
# Determines shift left/right based on where box was inputExtentDrawnFromMap
if inputExtentDrawnFromMap == False:
pointExtents[0] = str(float(pointExtents[0]) - vertShiftAmt)
elif (float(topLeftDrawn[0]) > float(pointExtents[0])):
pointExtents[0] = str(float(pointExtents[0]) - vertShiftAmt)
else:
pointExtents[0] = str(float(pointExtents[0]) + vertShiftAmt)
'''
# From the template extent, get the origin, y axis, and opposite corner coordinates
rightCorner = float(pointExtents[0]) + ((float(cellWidth) * float(numberCellsVert)) /2.0)
leftCorner = float(pointExtents[0]) - ((float(cellWidth) * float(numberCellsVert)) /2.0)
topCorner = float(pointExtents[1]) + ((float(cellHeight) * float(numberCellsHo)) /2.0)
bottomCorner = float(pointExtents[1]) - ((float(cellHeight) * float(numberCellsHo)) /2.0)
originCoordinate = str(leftCorner) + " " + str(bottomCorner)
yAxisCoordinate = str(leftCorner) + " " + str(bottomCorner + 10)
oppCornerCoordinate = str(rightCorner) + " " + str(topCorner)
fullExtent = str(leftCorner) + " " + str(bottomCorner) + " " + str(rightCorner) + " " + str(topCorner)
# If grid size is drawn on the map, then calculate the rotation of the grid
if inputExtentDrawnFromMap:
# Find the highest two points in the inputExtentDrawnFromMap shape
highestPoint = None
nextHighestPoint = None
for pt in pts:
if highestPoint is None or pt[1] > highestPoint[1]:
nextHighestPoint = highestPoint
highestPoint = pt
elif nextHighestPoint is None or pt[1] > nextHighestPoint[1]:
nextHighestPoint = pt
topLeft = highestPoint if highestPoint[0] < nextHighestPoint[0] else nextHighestPoint
topRight = highestPoint if highestPoint[0] > nextHighestPoint[0] else nextHighestPoint
yDiff = topRight[1] - topLeft[1]
xDiff = topRight[0] - topLeft[0]
# Set the Y-Axis Coordinate so that the grid rotates properly
extentHeight = float(topCorner) - float(bottomCorner)
# Set the start position for labeling
startPos = None
if (labelStartPos == "Upper-Right"):
startPos = "UR"
elif (labelStartPos == "Upper-Left"):
startPos = "UL"
elif (labelStartPos == "Lower-Left"):
startPos = "LL"
elif (labelStartPos == "Lower-Right"):
startPos = "LR"
arcpy.AddMessage("Creating Fishnet Grid")
arcpy.CreateFishnet_management(tempOutput, originCoordinate, yAxisCoordinate, 0, 0, str(numberCellsHo), str(numberCellsVert), oppCornerCoordinate, "NO_LABELS", fullExtent, "POLYGON")
# Sort the grid upper left to lower right, and delete the in memory one
arcpy.AddMessage("Sorting the grid for labeling")
tempSort = os.path.join("in_memory", "tempSort")
arcpy.Sort_management(tempOutput, tempSort, [["Shape", "ASCENDING"]], startPos)
# arcpy.Delete_management("in_memory") #Not sure why we are trying to delete in_memory
# Add a field which will be used to add the grid labels
arcpy.AddMessage("Adding field for labeling the grid")
gridField = "Grid"
arcpy.AddField_management(tempSort, gridField, "TEXT")
# Number the fields
arcpy.AddMessage("Numbering the grids")
letterIndex = 1
secondLetterIndex = 1
letter = 'A'
secondLetter = 'A'
number = 1
lastY = -9999
cursor = arcpy.UpdateCursor(tempSort)
for row in cursor:
yPoint = row.getValue("SHAPE").firstPoint.Y
if (lastY != yPoint) and (lastY != -9999):
letterIndex += 1
letter = ColIdxToXlName(letterIndex)
if (labelStyle != "Numeric"):
number = 1
secondLetter = 'A'
secondLetterIndex = 1
lastY = yPoint
if (labelStyle == "Alpha-Numeric"):
row.setValue(gridField, str(letter) + str(number))
elif (labelStyle == "Alpha-Alpha"):
row.setValue(gridField, str(letter) + str(secondLetter))
elif (labelStyle == "Numeric"):
row.setValue(gridField, str(number))
cursor.updateRow(row)
number += 1
secondLetterIndex += 1
secondLetter = ColIdxToXlName(secondLetterIndex)
# Rotate the shape, if needed.
if (inputExtentDrawnFromMap):
arcpy.AddMessage("Rotating the grid")
RotateFeatureClass(tempSort, outputFeatureClass, angleDrawn, pointExtents[0] + " " + pointExtents[1])
else:
arcpy.CopyFeatures_management(tempSort, outputFeatureClass)
arcpy.Delete_management(tempSort)
# Get and label the output feature
#UPDATE
targetLayerName = os.path.basename(outputFeatureClass)
if appEnvironment == "ARCGIS_PRO":
#params = arcpy.GetParameterInfo()
## get the symbology from the GRG.lyr
#scriptPath = sys.path[0]
#layerFilePath = os.path.join(scriptPath,r"commondata\userdata\GRG.lyrx")
#arcpy.AddMessage("Applying Symbology from {0}".format(layerFilePath))
#params[8].symbology = layerFilePath
arcpy.AddMessage("Do not apply symbology it will be done in the next task step")
elif appEnvironment == "ARCMAP":
#arcpy.AddMessage("Adding features to map (" + str(targetLayerName) + ")...")
#arcpy.MakeFeatureLayer_management(outputFeatureClass, targetLayerName)
# create a layer object
#layer = arcpy.mapping.Layer(targetLayerName)
# get the symbology from the NumberedStructures.lyr
#layerFilePath = os.path.join(os.getcwd(),"data\Layers\GRG.lyr")
#layerFilePath = os.path.join(os.path.dirname(os.path.dirname(__file__)),"layers\GRG.lyr")
# apply the symbology to the layer
#arcpy.ApplySymbologyFromLayer_management(layer, layerFilePath)
# add layer to map
#arcpy.mapping.AddLayer(df, layer, "AUTO_ARRANGE")
# find the target layer in the map
#mapLyr = arcpy.mapping.ListLayers(mxd, targetLayerName)[0]
#arcpy.AddMessage("Labeling output features (" + str(targetLayerName) + ")...")
# Work around needed as ApplySymbologyFromLayer_management does not honour labels
#labelLyr = arcpy.mapping.Layer(layerFilePath)
# copy the label info from the source to the map layer
#mapLyr.labelClasses = labelLyr.labelClasses
# turn labels on
#mapLyr.showLabels = True
arcpy.AddMessage("Non-map environment, skipping labeling based on best practices")
else:
arcpy.AddMessage("Non-map environment, skipping labeling...")
# Apply symbology to the GRG layer
#UPDATE
#symbologyPath = os.path.dirname(workspace) + "\\Layers\GRG.lyr"
#arcpy.ApplySymbologyFromLayer_management(layer, symbologyPath)
# Set tool output
arcpy.SetParameter(8, outputFeatureClass)
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \
"\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# MAIN =============================================
if __name__ == "__main__":
main()
|
pshowalter/solutions-geoprocessing-toolbox
|
clearing_operations/scripts/PointTargetGRG.py
|
Python
|
apache-2.0
| 24,658 | 0.005272 |
# SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <dlu-ch@users.noreply.github.com>
import testenv # also sets up module search paths
import dlb.di
import dlb.fs
import sys
import re
import logging
import time
import io
import collections
import unittest
class LoggingCompatibilityTest(unittest.TestCase):
def test_levels_are_equals(self):
for level_name in ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):
self.assertEqual(getattr(logging, level_name), getattr(dlb.di, level_name))
class GetLevelMarkerTest(unittest.TestCase):
def test_exact_levels_are_correct(self):
self.assertEqual('D', dlb.di.get_level_indicator(dlb.di.DEBUG))
self.assertEqual('I', dlb.di.get_level_indicator(dlb.di.INFO))
self.assertEqual('W', dlb.di.get_level_indicator(dlb.di.WARNING))
self.assertEqual('E', dlb.di.get_level_indicator(dlb.di.ERROR))
self.assertEqual('C', dlb.di.get_level_indicator(dlb.di.CRITICAL))
def test_fails_for_positive_are_debug(self):
msg = "'level' must be positive"
with self.assertRaises(ValueError) as cm:
dlb.di.get_level_indicator(logging.NOTSET)
self.assertEqual(msg, str(cm.exception))
with self.assertRaises(ValueError) as cm:
dlb.di.get_level_indicator(-123)
self.assertEqual(msg, str(cm.exception))
def test_exact_greater_than_critical_are_critical(self):
self.assertEqual('C', dlb.di.get_level_indicator(dlb.di.CRITICAL + 123))
def test_between_is_next_smaller(self):
self.assertEqual('I', dlb.di.get_level_indicator(dlb.di.INFO + 1))
self.assertEqual('I', dlb.di.get_level_indicator(dlb.di.WARNING - 1))
class FormatMessageTest(unittest.TestCase):
def format_info_message(self, message):
return dlb.di.format_message(message, dlb.di.INFO)
def test_fails_on_empty(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message('')
msg = "'message' must contain at least one non-empty line"
self.assertEqual(msg, str(cm.exception))
def test_single_line_returns_stripped(self):
self.assertEqual('I äüä schoo\U0001f609', self.format_info_message(' äüä schoo\U0001f609 '))
def test_fails_for_none(self):
with self.assertRaises(TypeError) as cm:
# noinspection PyTypeChecker
self.format_info_message(None)
msg = "'message' must be a str"
self.assertEqual(msg, str(cm.exception))
def test_fails_for_bytes(self):
with self.assertRaises(TypeError) as cm:
# noinspection PyTypeChecker
self.format_info_message(b'abc')
msg = "'message' must be a str"
self.assertEqual(msg, str(cm.exception))
def test_fails_for_nonprintable(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message('abc\n a\0')
msg = (
"'message' must not contain ASCII control characters except "
"'\\t' and '\\b', unlike '\\x00' in line 2"
)
self.assertEqual(msg, str(cm.exception))
def test_removed_empty_lines_before_and_after(self):
m = self.format_info_message(' \n \n\n \na \n b\n\n \n')
self.assertEqual("I a \n | b", m)
m = self.format_info_message(' \r\n \r\n\r\n \r\na \r\n b\r\n\r\n \r\n')
self.assertEqual("I a \n | b", m)
m = self.format_info_message(' \r \r\r \ra \r b\r\r \r')
self.assertEqual("I a \n | b", m)
def test_removed_empty_lines_between(self):
m = self.format_info_message('a\n\n\n b\n c')
self.assertEqual("I a \n | b \n | c", m)
def test_unindents(self):
m = self.format_info_message(
"""
bla
a
b
""")
self.assertEqual("I bla \n | a \n | b", m)
def test_fails_for_underindented(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message(
"""
bla
x
y
""")
msg = (
"each continuation line in 'message' must be indented at least 4 spaces more than "
"the first non-empty line, unlike line 4"
)
self.assertEqual(msg, str(cm.exception))
with self.assertRaises(ValueError) as cm:
self.format_info_message(
"""
bla
x
y
""")
self.assertEqual(msg, str(cm.exception))
def test_fails_for_reserved_start(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message("'hehe'")
msg = "first non-empty line in 'message' must not start with reserved character \"'\""
self.assertEqual(msg, str(cm.exception))
def test_field_are_justified(self):
m = self.format_info_message(
"""
a\tb33\t100\b
a2\tb2\t10\b
a33\tb\t1\b
""")
self.assertEqual('I a b33100 \n | a2 b2 10 \n | a33b 1', m)
m = self.format_info_message(
"""
table:
a:\t A =\b 1\b
b2:\t B =\b 23\b
""")
self.assertEqual('I table: \n | a: A = 1 \n | b2: B = 23', m)
def test_fails_for_dot_at_end_of_first_line(self):
with self.assertRaises(ValueError) as cm:
self.format_info_message("start...")
msg = "first non-empty line in 'message' must not end with '.'"
self.assertEqual(msg, str(cm.exception))
with self.assertRaises(ValueError) as cm:
self.format_info_message("done.")
msg = "first non-empty line in 'message' must not end with '.'"
self.assertEqual(msg, str(cm.exception))
class MessageThresholdTest(unittest.TestCase):
def test_default_is_info(self):
dlb.di.set_threshold_level(dlb.di.WARNING + 1)
self.assertTrue(dlb.di.is_unsuppressed_level(dlb.di.WARNING + 1))
self.assertFalse(dlb.di.is_unsuppressed_level(dlb.di.WARNING))
dlb.di.set_threshold_level(dlb.di.CRITICAL + 100)
self.assertTrue(dlb.di.is_unsuppressed_level(dlb.di.CRITICAL + 100))
self.assertFalse(dlb.di.is_unsuppressed_level(dlb.di.CRITICAL + 99))
def test_fails_on_nonpositve(self):
with self.assertRaises(ValueError) as cm:
dlb.di.set_threshold_level(0)
msg = "'level' must be positive"
self.assertEqual(msg, str(cm.exception))
def test_fails_on_none(self):
with self.assertRaises(TypeError) as cm:
dlb.di.set_threshold_level(None)
msg = "'level' must be something convertible to an int"
self.assertEqual(msg, str(cm.exception))
class SetOutputFileTest(unittest.TestCase):
class File:
def write(self, text: str):
pass
def test_fails_for_none(self):
with self.assertRaises(TypeError) as cm:
dlb.di.set_output_file(None)
msg = "'file' does not have a 'write' method: None"
self.assertEqual(msg, str(cm.exception))
def test_successful_for_stdout_and_stderr(self):
dlb.di.set_output_file(sys.stdout)
r = dlb.di.set_output_file(sys.stderr)
self.assertEqual(sys.stdout, r)
r = dlb.di.set_output_file(sys.stderr)
self.assertEqual(sys.stderr, r)
def test_successful_for_custom_class_with_only_write(self):
f = SetOutputFileTest.File()
r = dlb.di.set_output_file(f)
r = dlb.di.set_output_file(r)
self.assertEqual(f, r)
class ClusterTest(unittest.TestCase):
def setUp(self):
dlb.di.set_threshold_level(dlb.di.INFO)
_ = dlb.di._first_monotonic_ns # make sure attribute exists
dlb.di._first_monotonic_ns = None
def test_works_as_context_manager(self):
output = io.StringIO()
dlb.di.set_output_file(output)
c = dlb.di.Cluster('A\n a')
self.assertEqual('', output.getvalue()) # does not output anything
with c as cr:
self.assertEqual('I A \n | a\n', output.getvalue()) # does not output anything
self.assertIsNone(cr)
def test_cluster_do_nest(self):
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A'):
with dlb.di.Cluster('B'):
with dlb.di.Cluster('C'):
pass
with dlb.di.Cluster('D'):
pass
self.assertEqual('I A\n I B\n I C\n I D\n', output.getvalue()) # does not output anything
def test_level_threshold_is_observed_when_nested(self):
dlb.di.set_threshold_level(dlb.di.WARNING)
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A', level=dlb.di.ERROR):
self.assertEqual('E A\n', output.getvalue())
with dlb.di.Cluster('B'):
self.assertEqual('E A\n', output.getvalue())
with dlb.di.Cluster('C', level=dlb.di.WARNING):
self.assertEqual('E A\n I B\n W C\n', output.getvalue())
with dlb.di.Cluster('D'):
self.assertEqual('E A\n I B\n W C\n', output.getvalue())
def test_progress_only_if_not_suppress_at_enter(self):
dlb.di.set_threshold_level(dlb.di.WARNING)
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A', is_progress=True):
self.assertEqual('', output.getvalue())
self.assertEqual('', output.getvalue())
output = io.StringIO()
dlb.di.set_output_file(output)
with self.assertRaises(AssertionError):
with dlb.di.Cluster('A', is_progress=True):
assert False
self.assertEqual('', output.getvalue())
def test_progress_success_is_at_most_info(self):
dlb.di.set_threshold_level(dlb.di.DEBUG)
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A', level=dlb.di.DEBUG, is_progress=True):
self.assertEqual('D A...\n', output.getvalue())
self.assertEqual('D A...\n D done.\n', output.getvalue())
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A', level=dlb.di.CRITICAL, is_progress=True):
self.assertEqual('C A...\n', output.getvalue())
self.assertEqual('C A...\n I done.\n', output.getvalue()) # at most dlb.di.INFO
def test_progress_failure_is_at_least_error(self):
dlb.di.set_threshold_level(dlb.di.DEBUG)
output = io.StringIO()
dlb.di.set_output_file(output)
with self.assertRaises(AssertionError):
with dlb.di.Cluster('A', level=dlb.di.DEBUG, is_progress=True):
assert False
self.assertEqual('D A...\n E failed with AssertionError.\n', output.getvalue()) # at least dlb.di.ERROR
output = io.StringIO()
dlb.di.set_output_file(output)
with self.assertRaises(AssertionError):
with dlb.di.Cluster('A', level=dlb.di.CRITICAL, is_progress=True):
assert False
self.assertEqual('C A...\n C failed with AssertionError.\n', output.getvalue())
def test_timing_information_is_correct_for_delayed_output_of_title(self):
dlb.di.set_threshold_level(dlb.di.WARNING)
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A', with_time=True):
self.assertEqual('', output.getvalue())
time.sleep(0.1)
with dlb.di.Cluster('B'):
self.assertEqual('', output.getvalue())
with dlb.di.Cluster('C', level=dlb.di.WARNING):
self.assertRegex(output.getvalue(), r'\A()I A \[\+0\.0+s\]\n I B\n W C\n\Z')
def test_timing_information_is_correct_for_progress(self):
output = io.StringIO()
dlb.di.set_output_file(output)
regex = re.compile(r"(?m)(.|\n)* \[\+(?P<time>[0-9.]+)s\]\n\Z")
with dlb.di.Cluster('A', with_time=True, is_progress=True):
s = output.getvalue()
m = regex.match(s)
t0 = m.group('time')
self.assertRegex(t0, r'\A()0\.0{1,9}\Z')
time.sleep(0.1)
s = output.getvalue()
m = regex.match(s)
t = m.group('time')
self.assertNotEqual(t, t0, s)
class InformTest(unittest.TestCase):
def setUp(self):
dlb.di.set_threshold_level(dlb.di.INFO)
_ = dlb.di._first_monotonic_ns # make sure attribute exists
dlb.di._first_monotonic_ns = None
def test_output_without_cluster_is_not_indented(self):
output = io.StringIO()
dlb.di.set_output_file(output)
self.assertTrue(dlb.di.inform('M\n m'))
self.assertEqual('I M \n | m\n', output.getvalue())
def test_output_in_cluster_is_indented(self):
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A'):
self.assertTrue(dlb.di.inform('M\n m'))
self.assertEqual('I A\n I M \n | m\n', output.getvalue())
dlb.di.set_threshold_level(dlb.di.WARNING)
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('A'):
with dlb.di.Cluster('B'):
self.assertTrue(dlb.di.inform('M\n m', level=dlb.di.WARNING))
self.assertEqual('I A\n I B\n W M \n | m\n', output.getvalue())
def test_suppresses_below_threshold(self):
output = io.StringIO()
dlb.di.set_output_file(output)
self.assertFalse(dlb.di.inform('M\n m', level=dlb.di.DEBUG))
self.assertEqual('', output.getvalue())
def test_timing_information_is_correct(self):
output = io.StringIO()
dlb.di.set_output_file(output)
self.assertTrue(dlb.di.inform('M\n m', with_time=True))
self.assertRegex(output.getvalue(), r'\A()I M \[\+0\.0{1,9}s\] \n \| m\n\Z')
class UsageExampleTest(unittest.TestCase):
def setUp(self):
dlb.di.set_threshold_level(dlb.di.INFO)
_ = dlb.di._first_monotonic_ns # make sure attribute exists
dlb.di._first_monotonic_ns = None
def test_example1(self):
output = io.StringIO()
dlb.di.set_output_file(output)
with dlb.di.Cluster('title', level=dlb.di.DEBUG):
dlb.di.inform(
"""
summary
first\t 1\t
second\t 200\t
""")
self.assertEqual('D title\n I summary \n | first 1 \n | second 200\n', output.getvalue())
def test_example2(self):
output = io.StringIO()
dlb.di.set_output_file(output)
rom_max = 128
logfile = dlb.fs.Path('out/linker.log')
with dlb.di.Cluster(f"analyze memory usage\n see {logfile.as_string()!r} for details",
is_progress=True):
ram, rom, emmc = (12, 108, 512)
dlb.di.inform(
f"""
in use:
RAM:\t {ram}\b kB
ROM (NOR flash):\t {rom}\b kB
eMMC:\t {emmc}\b kB
""")
if rom > 0.8 * rom_max:
dlb.di.inform("more than 80% of ROM used", level=dlb.di.WARNING)
o = (
"I analyze memory usage... \n"
" | see 'out/linker.log' for details\n"
" I in use: \n"
" | RAM: 12 kB \n"
" | ROM (NOR flash): 108 kB \n"
" | eMMC: 512 kB\n"
" W more than 80% of ROM used\n"
" I done.\n"
)
self.assertEqual(o, output.getvalue())
def test_example3(self):
# https://en.wikipedia.org/wiki/Halstead_complexity_measures
metrics = [
('volume', 'V', 1.7, ''),
('programming required', 'T', 127.3, ' s'),
('difficulty', 'D', 12.8, '')
]
m = ''.join(f"\n {n}:\t {s} =\b {v}\b{u}" for n, s, v, u in metrics)
s = dlb.di.format_message('Halstead complexity measures:' + m, dlb.di.INFO)
o = (
"I Halstead complexity measures: \n"
" | volume: V = 1.7 \n"
" | programming required: T = 127.3 s \n"
" | difficulty: D = 12.8"
)
self.assertEqual(o, s)
|
dlu-ch/dlb
|
test/dlb/0/test_di.py
|
Python
|
lgpl-3.0
| 16,694 | 0.001498 |
from glob import glob
import numpy
import pickle
import os
# Run iterations of "count" to count the number of terms in each folder of zipped up pubmed articles
home = os.environ["HOME"]
scripts = "%s/SCRIPT/repofish/analysis/methods" %(home)
base = "%s/data/pubmed" %os.environ["LAB"]
outfolder = "%s/repos" %(base)
articles_folder = "%s/articles" %(base)
if not os.path.exists(outfolder):
os.mkdir(outfolder)
folders = [x for x in glob("%s/*" %articles_folder) if os.path.isdir(x)]
batch_size = 1000.0
iters = int(numpy.ceil(len(folders)/batch_size))
# Prepare and submit a job for each
for i in range(iters):
start = i*int(batch_size)
if i != iters:
end = start + int(batch_size)
else:
end = len(folders)
subset = folders[start:end]
script_file = "%s/findgithub_%s.job" %(scripts,i)
filey = open(script_file,'w')
filey.writelines("#!/bin/bash\n")
filey.writelines("#SBATCH --job-name=%s\n" %i)
filey.writelines("#SBATCH --output=.out/%s.out\n" %i)
filey.writelines("#SBATCH --error=.out/%s.err\n" %i)
filey.writelines("#SBATCH --time=2:00:00\n")
for folder in subset:
filey.writelines('python %s/1.find_repos.py "%s" %s\n' % (scripts,folder,outfolder))
filey.close()
os.system("sbatch -A Analysis_Lonestar -p normal -n 24 findgithub_%s.job" %i)
|
vsoch/repofish
|
analysis/methods/1.run_find_repos.py
|
Python
|
mit
| 1,341 | 0.014169 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Gaspar/.qgis/python/plugins/delPropiedad/forms_ui/frmSelec.ui'
#
# Created: Wed Jul 18 12:50:20 2012
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_frmSelec(object):
def setupUi(self, frmSelec):
frmSelec.setObjectName(_fromUtf8("frmSelec"))
frmSelec.resize(972, 310)
frmSelec.setWindowTitle(QtGui.QApplication.translate("frmSelec", "Seleccionar trabajo", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget = QtGui.QTableWidget(frmSelec)
self.tableWidget.setGeometry(QtCore.QRect(10, 30, 951, 231))
self.tableWidget.setToolTip(QtGui.QApplication.translate("frmSelec", "Seleccione una fila y pulse aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.bttAceptar = QtGui.QPushButton(frmSelec)
self.bttAceptar.setGeometry(QtCore.QRect(440, 270, 111, 31))
self.bttAceptar.setText(QtGui.QApplication.translate("frmSelec", "Aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.bttAceptar.setObjectName(_fromUtf8("bttAceptar"))
self.bttCancelar = QtGui.QPushButton(frmSelec)
self.bttCancelar.setGeometry(QtCore.QRect(570, 270, 91, 31))
self.bttCancelar.setText(QtGui.QApplication.translate("frmSelec", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
self.bttCancelar.setObjectName(_fromUtf8("bttCancelar"))
self.label = QtGui.QLabel(frmSelec)
self.label.setGeometry(QtCore.QRect(20, 10, 331, 16))
self.label.setText(QtGui.QApplication.translate("frmSelec", "Selecciones el trabajo que desea consultar:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(frmSelec)
QtCore.QMetaObject.connectSlotsByName(frmSelec)
def retranslateUi(self, frmSelec):
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
frmSelec = QtGui.QDialog()
ui = Ui_frmSelec()
ui.setupUi(frmSelec)
frmSelec.show()
sys.exit(app.exec_())
|
gasparmoranavarro/TopoDelProp
|
forms/frmSelec.py
|
Python
|
gpl-2.0
| 2,643 | 0.004162 |
__author__ = 'Carlos'
from time import sleep, time
import minimalmodbus as mb
import csv
class Torch200:
def __init__(self, com_port):
mb.BAUDRATE = 9600
mb.TIMEOUT = 3
self.profile = mb.Instrument(com_port, 1)
self.control = mb.Instrument(com_port, 2)
self.start_time = None
self.exceptions_count = 0
def start(self):
pass
def stop(self):
pass
def get_data(self):
try:
(prof_temp,) = self.profile.read_registers(0x1000, 1)
(ctrl_temp, set_point) = self.control.read_registers(0x1000, 2)
except (IOError, ValueError):
self.control.serial.flushInput()
self.exceptions_count += 1
raise
meas_time = time()
return (meas_time, set_point, ctrl_temp, prof_temp)
def set_program(self, id):
if type(id) is not int:
raise TypeError
if id not in range(1, 6):
raise ValueError
self.control.write_registers(0x1004, id)
if __name__=='__main__':
oven = Torch200('COM15')
start_time = None
while True:
try:
data = oven.get_data()
except (IOError, ValueError):
sleep(0.1)
continue
(meas_time, set_point, ctrl_temp, prof_temp) = data
if set_point > 0:
if start_time is None:
start_time = meas_time
filename = r'C:\Documents and Settings\Carlos\My Documents\Dropbox\torch\T200C+ ' + str(start_time) + r'.csv'
csv_fp = open(filename, 'wb')
csv_out = csv.writer(csv_fp)
csv_out.writerow(['time', 'set_point', 'ctrl_temp', 'prof_temp'])
data = (meas_time - start_time, set_point, ctrl_temp, prof_temp)
csv_out.writerow(data)
else:
if start_time is not None:
csv_fp.close()
start_time = None
print "(%6.2f, %3d, %3d, %3d)" % data
sleep(0.5)
|
nanodude/Torch200
|
Torch200.py
|
Python
|
gpl-3.0
| 2,015 | 0.002978 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.model.ocil_2_0.QuestionResultType import QuestionResultType
import logging
logger = logging.getLogger(__name__)
class ChoiceQuestionResultType(QuestionResultType):
MODEL_MAP = {
'elements': [
{'tag_name': 'answer', 'class': 'ChoiceAnswerType', 'max': 1},
],
}
|
cjaymes/pyscap
|
src/scap/model/ocil_2_0/ChoiceQuestionResultType.py
|
Python
|
gpl-3.0
| 988 | 0.001012 |
# cls_plan_BDI.py
import datetime
class Plan_BDI(object):
"""
class for handling various plans for AIKIF using
Belief | Desires | Intentions
"""
def __init__(self, name, dependency):
self.name = name
self.id = 1
self.dependency = dependency
self.plan_version = "v0.10"
self.success = False
self.start_date = datetime.datetime.now().strftime("%I:%M%p %d-%B-%Y")
self.resources = []
self.constraint = []
self.beliefs = Beliefs(self)
self.desires = Desires(self)
self.intentions = Intentions(self)
def __str__(self):
res = "---== Plan ==---- \n"
res += "name : " + self.name + "\n"
res += "version : " + self.plan_version + "\n"
for i in self.beliefs.list():
res += "belief : " + i + "\n"
for i in self.desires.list():
res += "desire : " + i + "\n"
for i in self.intentions.list():
res += "intention : " + i + "\n"
return res
def get_name(self):
return self.name
def generate_plan(self):
"""
Main logic in class which generates a plan
"""
print("generating plan... TODO")
def load_plan(self, fname):
""" read the list of thoughts from a text file """
with open(fname, "r") as f:
for line in f:
if line != '':
tpe, txt = self.parse_plan_from_string(line)
#print('tpe= "' + tpe + '"', txt)
if tpe == 'name':
self.name = txt
elif tpe == 'version':
self.plan_version = txt
elif tpe == 'belief':
self.beliefs.add(txt)
elif tpe == 'desire':
self.desires.add(txt)
elif tpe == 'intention':
self.intentions.add(txt)
def save_plan(self, fname):
with open(fname, "w") as f:
f.write("# AIKIF Plan specification \n")
f.write("name :" + self.name + "\n")
f.write("version :" + self.plan_version + "\n")
for txt in self.beliefs.list():
f.write("belief :" + txt + "\n")
for txt in self.desires.list():
f.write("desire :" + txt + "\n")
for txt in self.intentions.list():
f.write("intention :" + txt + "\n")
def parse_plan_from_string(self, line):
tpe = ''
txt = ''
if line != '':
if line[0:1] != '#':
parts = line.split(":")
tpe = parts[0].strip()
txt = parts[1].strip()
return tpe, txt
def add_resource(self, name, tpe):
"""
add a resource available for the plan. These are text strings
of real world objects mapped to an ontology key or programs
from the toolbox section (can also be external programs)
"""
self.resources.append([name, tpe])
def add_constraint(self, name, tpe, val):
"""
adds a constraint for the plan
"""
self.constraint.append([name, tpe, val])
class Thoughts(object):
""" base class for beliefs, desires, intentions simply
to make it easier to manage similar groups of objects """
def __init__(self, thought_type):
#print("Thoughts - init: thought_type = " + thought_type + "\n")
self._thoughts = []
self._type = thought_type
def __str__(self):
res = ' -- Thoughts --\n'
for i in self._thoughts:
res += i + '\n'
return res
def add(self, name):
self._thoughts.append(name)
def list(self, print_console=False):
lst = []
for i, thought in enumerate(self._thoughts):
if print_console is True:
print(self._type + str(i) + ' = ' + thought)
lst.append(thought)
return lst
class Beliefs(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Beliefs, self).__init__('belief')
class Desires(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Desires, self).__init__('desire')
class Intentions(Thoughts):
def __init__(self, parent_plan):
self.parent_plan = parent_plan
super(Intentions, self).__init__('intention')
def TEST():
myplan = Plan_BDI('new plan', '')
myplan.beliefs.add('belief0')
myplan.beliefs.add('belief1')
myplan.beliefs.add('belief2')
myplan.desires.add('desire0')
myplan.desires.add('desire1')
myplan.intentions.add('intention0')
myplan.beliefs.list()
myplan.desires.list()
myplan.intentions.list()
#myplan.save_plan("test_plan.txt")
#myplan.load_plan("test_plan.txt")
print(str(myplan))
if __name__ == '__main__':
TEST()
|
acutesoftware/AIKIF
|
aikif/lib/cls_plan_BDI.py
|
Python
|
gpl-3.0
| 5,209 | 0.009023 |
#!/usr/bin/python
#===============================================================================
#
# conversion script to create a mbstestlib readable file containing test specifications
# out of an testset file in XML format
#
#===============================================================================
# Input can be given via optional command line parameters.
#
#
# TODO: add check for joint count
# TODO: add model description to output (as comment)
import sys # for io
import xml.dom.minidom # for xml parsing
from glob import glob # for expanding wildcards in cmd line arguements
class _config:
default_input_file = 'testset-example.xml'
output_file_ext = '.txt'
empty_vecn = ""
zero_vec = "0 0 0"
unity_mat = "1 0 0 0 1 0 0 0 1"
case_defaults = { 'delta': "0.001",
'base_r': zero_vec,
'base_R': unity_mat,
'base_v': zero_vec,
'base_omega': zero_vec,
'base_vdot': zero_vec,
'base_omegadot': zero_vec,
'gravitiy': zero_vec,
'joints_q': empty_vecn,
'joints_qdot': empty_vecn,
'joints_qdotdot': empty_vecn,
'joints_tau': empty_vecn,
'tcp_r': zero_vec,
'tcp_R': unity_mat,
'tcp_v': zero_vec,
'tcp_omega': zero_vec,
'tcp_vdot': zero_vec,
'tcp_omegadot': zero_vec,
'f_ext': zero_vec,
'n_ext': zero_vec
}
case_output_order = [
'delta',
'base_r',
'base_R',
'base_v',
'base_omega',
'base_vdot',
'base_omegadot',
'gravitiy',
'joints_q',
'joints_qdot',
'joints_qdotdot',
'joints_tau',
'tcp_r',
'tcp_R',
'tcp_v',
'tcp_omega',
'tcp_vdot',
'tcp_omegadot',
'f_ext',
'n_ext'
]
class _state:
error_occured_while_processing_xml = False
input_file = ''
def getText(nodelist):
# str(method.childNodes[0].nodeValue) # TODO: remove
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# inspired by http://code.activestate.com/recipes/52306-to-sort-a-dictionary/
def sortedDict(adict):
return [ adict[k] for k in sorted(adict.keys()) ]
# parses a specific node and either stores it's value in a dict or the default value
# may set the error bit
def parse_opt(nodename, valuetype, current_case, current_case_value_dict):
# if the node does not exist use the default value
nodelist = current_case.getElementsByTagName(nodename)
if nodelist.length == 0:
current_case_value_dict.update({nodename : _config.case_defaults.get(nodename)})
elif nodelist.length > 1:
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' defined more than once.")
return
else:
# we have one single node to parse
node = nodelist[0]
value = node.getAttribute(valuetype)
if value == None:
# TODO: more advanced checks with regexp
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' has an empty value or wrong type ('"+ valuetype +"').")
return
else :
current_case_value_dict.update({nodename : value})
return
def convert_xml_testset_2_raw_testset(mbs_test_set):
raw_testsets = dict([]) # filename:content dict
for mbs in mbs_test_set.getElementsByTagName('mbs'): # for every file
file = mbs.getAttribute('file')
raw_testset = []
if mbs.getElementsByTagName('model').length != 1:
_state.error_occured_while_processing_xml = True
print("Only one model allowed per file!")
return dict([])
# extract model
raw_testset.append("% " + mbs.getElementsByTagName('model')[0].getAttribute('desc'))
raw_testset.append(getText(mbs.getElementsByTagName('model')[0].childNodes))
# insert separation marker
raw_testset.append("\nendmodel")
# now process the cases
if mbs.getElementsByTagName('case').length == 0:
_state.error_occured_while_processing_xml = True
print("No cases defined!")
return dict([])
cases = dict([])
for case in mbs.getElementsByTagName('case'):
# TODO: sanity check -> number collisions
# parse case
case_nr = case.getAttribute('nr')
case_desc = case.getAttribute('desc')
case_value_dict = dict([])
# everything but joints does not have to be defined explicitly
# TODO: unify these calls in a generic way (e.g. add type to case_output_order and iterate over it)
parse_opt('delta', 'scalar', case, case_value_dict)
parse_opt('base_r', 'vector3', case, case_value_dict)
parse_opt('base_R', 'matrix3x3', case, case_value_dict)
parse_opt('base_v', 'vector3', case, case_value_dict)
parse_opt('base_omega', 'vector3', case, case_value_dict)
parse_opt('base_vdot', 'vector3', case, case_value_dict)
parse_opt('base_omegadot', 'vector3', case, case_value_dict)
parse_opt('gravitiy', 'vector3', case, case_value_dict)
# TODO: checks with n (the number of joints)
parse_opt('joints_q', 'vector_n', case, case_value_dict)
parse_opt('joints_qdot', 'vector_n', case, case_value_dict)
parse_opt('joints_qdotdot', 'vector_n', case, case_value_dict)
parse_opt('joints_tau', 'vector_n', case, case_value_dict)
parse_opt('tcp_r', 'vector3', case, case_value_dict)
parse_opt('tcp_R', 'matrix3x3', case, case_value_dict)
parse_opt('tcp_v', 'vector3', case, case_value_dict)
parse_opt('tcp_omega', 'vector3', case, case_value_dict)
parse_opt('tcp_vdot', 'vector3', case, case_value_dict)
parse_opt('tcp_omegadot', 'vector3', case, case_value_dict)
parse_opt('f_ext', 'vector3', case, case_value_dict)
parse_opt('n_ext', 'vector3', case, case_value_dict)
if _state.error_occured_while_processing_xml: return dict([])
# compile raw case output
case_content = ["\n" + case_desc]
for value_name in _config.case_output_order:
if case_value_dict.get(value_name) is None :
_state.error_occured_while_processing_xml = True
print("Not all values defined in one testcase!")
return dict([])
case_content.append(case_value_dict.get(value_name))
cases.update({case_nr : "\n".join(case_content)})
# flatten cases (and sort)
raw_testset.append("\n".join(sortedDict(cases)))
# update file:testset dict
raw_testsets.update({file : "\n".join(raw_testset)})
# return the dict of files:testsets
return raw_testsets
#===============================================================================
# process command line arguments (i.e. file i/o)
#===============================================================================
script_name = sys.argv[0][sys.argv[0].rfind("\\")+1:]
if len(sys.argv) == 1:
_state.input_file = _config.default_input_file
print("No command line arguments were given. Defaulting to:")
print("Input '" + _state.input_file + "'")
print("Usage hint: " + script_name + " [INPUTFILE(s)]\n")
elif len(sys.argv) == 2:
if sys.argv[1] == "--help":
print("Usage: " + script_name + " [INPUTFILE(s)]")
sys.exit()
else:
_state.input_file = glob(sys.argv[1])
#===============================================================================
# run the conversion
#===============================================================================
for inputfile in _state.input_file :
xmldom = xml.dom.minidom.parse(inputfile)
raw_testsets = convert_xml_testset_2_raw_testset(xmldom.firstChild)
if not _state.error_occured_while_processing_xml :
for k in raw_testsets.keys():
with open(k, 'w') as raw_testset_file:
raw_testset_file.write(raw_testsets.get(k))
print("File '" + k + "' written.")
#===============================================================================
# concluding housekeeping
#===============================================================================
if not _state.error_occured_while_processing_xml:
print("Conversion successful.")
else:
print("The xml file could not be processed properly. It most likely contains errors.")
sys.exit(_state.error_occured_while_processing_xml)
|
SIM-TU-Darmstadt/mbslib
|
dependencies/mbstestlib/src/testsetXML2intermediateConverter.py
|
Python
|
lgpl-3.0
| 9,479 | 0.007068 |
class MoveOperations:
"""Specifies criteria for how to move files."""
none = 0
overwrite = 1
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/files/move_operations.py
|
Python
|
mit
| 105 | 0 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Member.area_of_residence'
db.add_column('mks_member', 'area_of_residence', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True), keep_default=False)
# Adding field 'Member.place_of_residence'
db.add_column('mks_member', 'place_of_residence', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Member.area_of_residence'
db.delete_column('mks_member', 'area_of_residence')
# Deleting field 'Member.place_of_residence'
db.delete_column('mks_member', 'place_of_residence')
models = {
'mks.correlation': {
'Meta': {'object_name': 'Correlation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'm1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m1'", 'to': "orm['mks.Member']"}),
'm2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m2'", 'to': "orm['mks.Member']"}),
'normalized_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_same_party': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'blank': 'True', 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.weeklypresence': {
'Meta': {'object_name': 'WeeklyPresence'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"})
}
}
complete_apps = ['mks']
|
livni/old-OK
|
src/knesset/mks/migrations/0004_add_members_residence.py
|
Python
|
bsd-3-clause
| 6,016 | 0.009142 |
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self, zmarkdown):
self.blockprocessors = odict.OrderedDict()
self.state = State()
self.zmarkdown = zmarkdown
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent
Element) is created and the root element is passed to the parser
as the parent. The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = util.etree.Element(self.zmarkdown.doc_tag)
self.parseChunk(self.root, '\n'.join(lines))
return util.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
if self.zmarkdown.inline:
self.blockprocessors["paragraph"].run(parent, [text])
else:
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used
internally.
This is a public method as an extension may need to add/alter
additional BlockProcessors which call this method to recursively
parse a nested block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
if processor.run(parent, blocks) is not False:
# run returns True or None
break
|
zestedesavoir/Python-ZMarkdown
|
zmarkdown/blockparser.py
|
Python
|
bsd-3-clause
| 3,639 | 0 |
import unittest
from source_document import SourceDocument
from test_tagged_document import create_test_repo
from tagged_document import TaggedDocument
class SourceDocumentTests(unittest.TestCase):
"""Unit tests for the Document class"""
def test_cleaning(self):
# Tests removing snippets
input_path = "tests/sample-expanded.txt"
reference_path = "tests/sample.txt"
reference_text = open(reference_path, "r").read()
document = SourceDocument(input_path)
self.assertEqual(document.cleaned_contents, reference_text)
def test_finding_documents(self):
found_documents = SourceDocument.find("tests", ["txt"])
self.assertTrue(len(found_documents) == 7)
def test_processing(self):
# Tests rendering a snippet using tagged documents.
repo = create_test_repo()
tagged_documents = TaggedDocument.find(repo, ["txt"])
self.assertTrue(tagged_documents)
input_path = "tests/sample.txt"
reference_path = "tests/sample-expanded.txt"
reference_text = open(reference_path, "r").read()
source = SourceDocument(input_path)
rendered_output = source.render(tagged_documents, language="swift",show_query=False)
self.assertEqual(rendered_output, (reference_text, True))
|
thesecretlab/snippet-expander
|
tests/test_source_document.py
|
Python
|
mit
| 1,336 | 0.004491 |
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from alapage.models import Page
class Command(BaseCommand):
help = 'Creates a homepage'
def handle(self, *args, **options):
content = ""
#~ check if home exists
home_exists = Page.objects.filter(url='/').exists()
#~ create page
if not home_exists:
Page.objects.create(url='/', title='Home', content=content)
print("Homepage created")
else:
print("The homepage already exists with root url")
return
|
synw/django-alapage
|
alapage/management/commands/create_homepage.py
|
Python
|
mit
| 603 | 0.006633 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# GNU General Public License v2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# ----------------------------------------------------------------------------
import time
import sys
from players.atack_agent import AtackAgent
# sys.path.append("../")
PORT = 6000
HOST = "localhost"
"""
Run an agent
"""
if __name__ == "__main__":
# enforce current number of arguments, print help otherwise
if len(sys.argv) == 2:
print "args: ./run_team.py <team_name>"
#Get team name from arguments
team_name = sys.argv[1]
else:
team_name = "default"
AtackAgent().connect(HOST, PORT, team_name).play()
# wait until killed to terminate agent processes
try:
while 1:
time.sleep(0.05)
except KeyboardInterrupt:
print "Exiting."
sys.exit()
|
dsaldana/phantoms_soccer2d
|
phantom_team/run_agent.py
|
Python
|
gpl-2.0
| 1,609 | 0.001243 |
from elastic_boogaloo import classifiers, distributions, scorers
from elasticsearch import Elasticsearch
es_client = Elasticsearch('localhost:9200')
scorer = scorers.ElasticsearchIndexTopScorer(es_client, 'megacorp')
positive_distribution = distributions.ExponentialDistribution()
negative_distribution = distributions.ExponentialDistribution()
classifier = classifiers.UnopinionatedBinaryClassifier(scorer, positive_distribution, negative_distribution)
print('Training douglas as positive...')
classifier.train_positive('douglas')
print('Done')
print('Probability of douglas being positive:', classifier.classify('douglas'))
print('Probability of rock being positive:', classifier.classify('rock'))
|
nkashy1/elastic-boogaloo
|
example.py
|
Python
|
mit
| 706 | 0.001416 |
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure("scikit-learn parallel %s benchmark results" % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel("n_samples")
plt.ylabel("Time (s)")
plt.title("Parallel %s" % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
|
manhhomienbienthuy/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
Python
|
bsd-3-clause
| 1,272 | 0 |
# -*- coding: utf-8 -*-
"""This testing module tests the behaviour of the search box in the Provider section
It does not check for filtering results so far."""
import fauxfactory
import pytest
from selenium.common.exceptions import NoSuchElementException
from cfme.infrastructure import host
from cfme.infrastructure.provider import InfraProvider
# TODO: we should not call out to utils here, but maybe rather have an infra setup provider fixture
from fixtures.pytest_store import store
from utils.providers import setup_a_provider_by_class
from utils.appliance.implementations.ui import navigate_to
from utils.log import logger
from cfme.web_ui import search
from cfme.web_ui.search import DisabledButtonException
from cfme.web_ui.cfme_exception import (assert_no_cfme_exception,
is_cfme_exception, cfme_exception_text)
pytestmark = [pytest.mark.usefixtures("setup_cleanup_search"), pytest.mark.tier(3)]
@pytest.fixture(scope="module")
def single_provider():
"""Ensure the infra provider is setup"""
try:
return setup_a_provider_by_class(InfraProvider)
except Exception as ex:
pytest.skip("Exception while setting up providers, therefore skipping: {}".format(ex))
@pytest.fixture(scope="module")
def hosts_with_vm_count(hosts):
"""Returns a list of tuples (hostname, vm_count)"""
hosts_with_vm_count = []
for host_name in hosts:
hosts_with_vm_count.append((host_name, int(host.find_quadicon(host_name, True).no_vm)))
return sorted(hosts_with_vm_count, key=lambda tup: tup[1])
@pytest.yield_fixture(scope="function")
def setup_cleanup_search():
"""Navigate to InfraProvider, clear search on setup and teardown"""
navigate_to(InfraProvider, 'All')
search.ensure_no_filter_applied()
yield
# cleanup after test
search.ensure_no_filter_applied()
search.ensure_advanced_search_closed()
@pytest.yield_fixture(scope="function")
def rails_delete_filter(request):
"""Introspect a function bound filter_name and use ssh_client and rails to delete it"""
# No pre-test, just cleanup after yield
yield
filter_name = getattr(request.function, "filter_name", None)
logger.debug('rails_delete_filter: calling rails to delete filter: {}'.format(filter_name))
if filter_name:
try:
store.current_appliance.ssh_client.run_rails_command(
'"MiqSearch.where(:description => {}).first.delete"'.format(repr(filter_name)))
except Exception as ex:
logger.warning('rails_delete_filter: exception during delete. {}'.format(ex))
pass
else:
logger.warning('rails_delete_filter: failed to get filter_name')
def test_can_do_advanced_search(single_provider):
navigate_to(InfraProvider, 'All')
assert search.is_advanced_search_possible(), "Cannot do advanced search here!"
@pytest.mark.requires("test_can_do_advanced_search")
def test_can_open_advanced_search(single_provider):
navigate_to(InfraProvider, 'All')
search.ensure_advanced_search_open()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_without_user_input(single_provider):
# Set up the filter
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=, 0)")
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_with_user_input(single_provider):
# Set up the filter
logger.debug('DEBUG: test_with_user_input: fill and apply')
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=)",
fill_callback={"COUNT": 0})
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_with_user_input_and_cancellation(single_provider):
# Set up the filter
search.fill_and_apply_filter(
"fill_count(Infrastructure Provider.VMs, >=)", fill_callback={"COUNT": 0},
cancel_on_user_filling=True
)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_cancel(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_cancel.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_cancel.filter_name))
# Try save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
test_filter_save_cancel.filter_name, cancel=True)
assert_no_cfme_exception()
assert search.reset_filter()
# Exception depends on system state - Load button will be disabled if there are no saved filters
with pytest.raises((DisabledButtonException, NoSuchElementException)):
search.load_filter(saved_filter=test_filter_save_cancel.filter_name)
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_load(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_load.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_and_load.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_filter_save_and_load.filter_name)
assert_no_cfme_exception()
# Reset filter
assert search.reset_filter()
# Load filter
assert search.load_filter(test_filter_save_and_load.filter_name)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_cancel_load(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_cancel_load.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(test_filter_save_and_cancel_load.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_filter_save_and_cancel_load.filter_name)
assert_no_cfme_exception()
# Reset Filter
assert search.reset_filter()
# Load and cancel
assert search.load_filter(test_filter_save_and_cancel_load.filter_name, cancel=True)
assert_no_cfme_exception()
@pytest.mark.requires("test_can_open_advanced_search")
def test_filter_save_and_cancel_load_with_user_input(single_provider, rails_delete_filter):
# bind filter_name to the function for fixture cleanup
test_filter_save_and_cancel_load_with_user_input.filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(
test_filter_save_and_cancel_load_with_user_input.filter_name))
# Save filter
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
test_filter_save_and_cancel_load_with_user_input.filter_name)
assert_no_cfme_exception()
# Reset Filter
assert search.reset_filter()
search.load_and_apply_filter(
test_filter_save_and_cancel_load_with_user_input.filter_name,
fill_callback={"COUNT": 0},
cancel_on_user_filling=True
)
assert_no_cfme_exception()
def test_quick_search_without_filter(request, single_provider):
assert_no_cfme_exception()
# Make sure that we empty the regular search field after the test
request.addfinalizer(search.ensure_normal_search_empty)
# Filter this host only
search.normal_search(fauxfactory.gen_alphanumeric())
assert_no_cfme_exception()
def test_quick_search_with_filter(request, single_provider):
search.fill_and_apply_filter("fill_count(Infrastructure Provider.VMs, >=, 0)")
assert_no_cfme_exception()
# Make sure that we empty the regular search field after the test
request.addfinalizer(search.ensure_normal_search_empty)
# Filter this host only
search.normal_search(fauxfactory.gen_alphanumeric())
assert_no_cfme_exception()
def test_can_delete_filter(single_provider):
filter_name = fauxfactory.gen_alphanumeric()
logger.debug('Set filter_name to: {}'.format(filter_name))
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)", filter_name)
assert_no_cfme_exception()
search.reset_filter()
assert_no_cfme_exception()
search.load_filter(filter_name)
assert_no_cfme_exception()
if not search.delete_filter():
raise pytest.fail("Cannot delete filter! Probably the delete button is not present!")
assert_no_cfme_exception()
@pytest.mark.meta(blockers=[1097150, 1320244])
def test_delete_button_should_appear_after_save(single_provider, rails_delete_filter):
"""Delete button appears only after load, not after save"""
# bind filter_name to the function for fixture cleanup
test_delete_button_should_appear_after_save.filter_name = fauxfactory.gen_alphanumeric()
search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
test_delete_button_should_appear_after_save.filter_name)
if not search.delete_filter(): # Returns False if the button is not present
pytest.fail("Could not delete filter right after saving!")
@pytest.mark.meta(blockers=[1097150, 1320244])
def test_cannot_delete_more_than_once(single_provider):
"""When Delete button appars, it does not want to go away"""
filter_name = fauxfactory.gen_alphanumeric()
assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)", filter_name)
assert search.load_filter(filter_name) # circumvent the thing happening in previous test
# Delete once
if not search.delete_filter():
pytest.fail("Could not delete the filter even first time!")
assert_no_cfme_exception()
# Try it second time
if search.delete_filter(): # If the button is there, it says True
# This should not happen
msg = "Delete twice accepted!"
if is_cfme_exception():
msg += " CFME Exception text: `{}`".format(cfme_exception_text())
pytest.fail(msg)
|
rananda/cfme_tests
|
cfme/tests/infrastructure/test_advanced_search_providers.py
|
Python
|
gpl-2.0
| 10,078 | 0.002977 |
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from djangoautoconf.local_key_manager import get_default_admin_username, \
get_default_admin_password
from djangoautoconf.management.commands.web_manage_tools.user_creator import create_admin
def create_default_admin():
super_username = get_default_admin_username()
super_password = get_default_admin_password()
if not User.objects.filter(username=super_username).exists():
create_admin(super_username, super_password, "r@j.cn")
print("default admin created")
else:
print("default admin already created")
class Command(BaseCommand):
args = ''
help = 'Create command cache for environment where os.listdir is not working'
def handle(self, *args, **options):
create_default_admin()
|
weijia/djangoautoconf
|
djangoautoconf/management/commands/create_default_super_user.py
|
Python
|
bsd-3-clause
| 844 | 0.003555 |
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import hashlib
import os
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk_index
from warehouse.utils import AttributeDict
class Index(object):
_index = "warehouse"
def __init__(self, models, config):
self.models = models
self.config = config
self.es = Elasticsearch(
hosts=self.config.hosts,
**self.config.get("client_options", {})
)
self.types = AttributeDict()
def register(self, type_):
obj = type_(self)
self.types[obj._type] = obj
def reindex(self, index=None, alias=True, keep_old=False):
# Generate an Index Name for Warehouse
index = "".join([
index if index is not None else self._index,
hashlib.md5(os.urandom(16)).hexdigest()[:8],
])
# Create this index
self.es.indices.create(index, {
"mappings": {
doc_type._type: doc_type.get_mapping()
for doc_type in self.types.values()
},
})
# Index everything into the new index
for doc_type in self.types.values():
doc_type.index_all(index=index)
# Update the alias unless we've been told not to
if alias:
self.update_alias(self._index, index, keep_old=keep_old)
def update_alias(self, alias, index, keep_old=False):
# Get the old index from ElasticSearch
try:
old_index = self.es.indices.get_alias(self._index).keys()[0]
except TransportError as exc:
if not exc.status_code == 404:
raise
old_index = None
# Remove the alias to the old index if it exists
if old_index is not None:
actions = [{"remove": {"index": old_index, "alias": alias}}]
else:
actions = []
# Add the alias to the new index
actions += [{"add": {"index": index, "alias": alias}}]
# Update To the New Index
self.es.indices.update_aliases({"actions": actions})
# Delete the old index if it exists and unless we're keeping it
if not keep_old and old_index is not None:
self.es.indices.delete(old_index)
class BaseMapping(object):
SEARCH_LIMIT = 25
def __init__(self, index):
self.index = index
def get_mapping(self):
raise NotImplementedError
def get_indexable(self):
raise NotImplementedError
def extract_id(self, item):
raise NotImplementedError
def extract_document(self, item):
raise NotImplementedError
def index_all(self, index=None):
# Determine which index we are indexing into
_index = index if index is not None else self.index._index
# Bulk Index our documents
bulk_index(
self.index.es,
[
{
"_index": _index,
"_type": self._type,
"_id": self.extract_id(item),
"_source": self.extract_document(item),
}
for item in self.get_indexable()
],
)
def search(self, query):
raise NotImplementedError
|
mattrobenolt/warehouse
|
warehouse/search/indexes.py
|
Python
|
apache-2.0
| 3,926 | 0 |
import ujson
def json_load(file_name):
with open(file_name, 'r') as f:
data = ujson.loads(f.read())
return data
def json_dump(file_name, data):
with open(file_name, 'w') as f:
f.write(ujson.dumps(data))
test_dict = {
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
}
json_dump('test.json', test_dict)
test_load = json_load('test.json')
print(test_load)
|
ShaunKarran/homesense
|
esp8266/micropython/main.py
|
Python
|
gpl-3.0
| 403 | 0 |
import sys
import socket
import os
import os.path
from optparse import OptionParser
#import scipy as scp
import numpy as np
import matplotlib.pyplot as plt
import pylab
import genome_management.kg_file_handling as kgf
import math
def file_exists(ls,file):
for f in ls:
if(f==file):
return 1
return 0
def mkdir(dir,file):
ls_dir = os.listdir(dir)
if(not(file_exists(ls_dir,file))):
command = "mkdir %s/%s"%(dir,file)
os.system(command)
return "%s/%s"%(dir,file)
class region_info:
def __init__(self,name,chr,start,end,TID):
self.name = name
self.chr = chr
self.start = start
self.end = end
self.frequencies_by_pop = {}
self.cps_by_genome = {}
self.transcript_id = TID
self.TID = TID
self.cps_all = []
self.pop_by_genome = {}
def add_info_from_genome(self,cp,genome):
if(not(genome.pop in self.frequencies_by_pop)):
self.frequencies_by_pop[genome.pop] = []
self.frequencies_by_pop[genome.pop].append(cp)
self.cps_by_genome[genome.genome_name] = cp
self.pop_by_genome[genome.genome_name] = genome.pop
self.cps_all.append(cp)
#def get_var(self):
# self.vars = {}
#self.cps_all = np.array(self.cps_all)
# varT = self.cps_all.var()
# self.vars["all"]=varT
# self.means = {}
# meanT = self.cps_all.mean(1)
# self.means["all"] = meanT
# for pop,copies_by_pop in self.frequencies_by_pop.iteritems():
# copies_by_pop = np.array(copies_by_pop)
# self.vars[pop] = self.summary[:,pop_index].var(1)
# self.means[pop] = self.summary[:,pop_index].mean(1)
# self.vsts = {}
# self.fsts = {}
# for pop,pop_index in self.indivs_by_pop.iteritems():
# for pop_2,pop_index_2 in self.indivs_by_pop.iteritems():
# n_pop = float(pop_index.shape[0])
# n_pop_2 = float(pop_index_2.shape[0])
# both_pops = np.r_[self.indivs_by_pop[pop],self.indivs_by_pop[pop_2]]
# var_both = self.summary[:,both_pops].var(1)
# N = n_pop+n_pop_2
# self.vsts["_".join([pop,pop_2])] = (var_both - ((self.vars[pop]*n_pop+self.vars[pop_2]*n_pop_2)/N)) / var_both
def make_output_file(region,region_info,outdir,cell_line_info,genome_info):
outfile_name = "%s/%s_pop_summary.csv"%(outdir,region_info.name)
FOUT = open(outfile_name,'w')
FOUT.write("indiv,cp,pop,cell lines fixed, cell lines in Nitrogen,coverage\n")
for indiv,cp in region_info.cps_by_genome.iteritems():
pop = region_info.pop_by_genome[indiv]
output = indiv in cell_line_info and cell_line_info[indiv] or ""
output = "%s,%d,%s,%s,%f\n"%(indiv,cp,pop,output,genome_info.genomes[indiv].coverage)
FOUT.write(output)
print output
def make_simple_plot(region,region_info,outdir,cell_line_info,genome_info):
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
axescolor = '#f6f6f6'
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.8] #left, bottom, width, height
ax = f.add_axes(rect1)
colors = {'Yoruba':'r','European':'b','Asian':'g'}
for indiv,cp in region_info.cps_by_genome.iteritems():
cvg = genome_info.genomes[indiv].coverage
fixed_cell_line = cell_line_info[indiv].split(",")[0].rstrip() == "yes"
liquid_nitrogen_cell_line = cell_line_info[indiv].split(",")[1].rstrip() == "yes"
color = colors[genome_info.genomes[indiv].pop]
ax.plot(np.array([cvg]),np.array([cp]),'%so'%(color))
ax.set_xlabel("cvg",size=20)
ax.set_ylabel("copy",size=20)
ax.set_title("%s"%(region_info.name),size=20)
f.savefig("%s/%s_copy_vs_cvg.pdf"%(outdir,region_info.name),format='pdf')
plt.close(1)
def make_histogram(region,region_info,outdir,great_ape_gene_hashes):
print region_info.name
plt.rc('grid',color='0.75',linestyle='l',linewidth='0.1')
f=plt.figure()
f.set_figwidth(10)
f.set_figheight(10)
nbins=0
mx=0
mn=100
do_apes=True
great_ape_cps = {}
if do_apes:
for ape,gene_hash in great_ape_gene_hashes.iteritems():
if not region_info.TID in gene_hash:
do_apes=False
print "ID does not exist for APE"
print region_info.TID
break
great_ape_cps[ape] = gene_hash[region_info.TID]
mx=int(max(great_ape_cps[ape],mx))
mn=int(min(great_ape_cps[ape],mn))
axescolor = '#f6f6f6'
left, width = 0.1, 0.8
rect1 = [left, 0.1, width, 0.8] #left, bottom, width, height
for pop,freq_info in region_info.frequencies_by_pop.iteritems():
#nbins = int(round(max(nbins,max(freq_info))))
mx=int(max(max(freq_info),mx))
mn=int(min(min(freq_info),mn))
#nbins+=1
nbins = mx-mn+1
labels = []
pop_to_hists = {}
for pop,freq_info in region_info.frequencies_by_pop.iteritems():
print pop,freq_info
pop_to_hists[pop] = np.histogram(np.array(freq_info),bins=nbins,range=[mn,mx],normed=True,new=True)[0]
print np.histogram(np.array(freq_info),bins=nbins,range=[mn,mx],normed=True,new=True)
print pop_to_hists[pop]
x = np.arange(mn,mx+1)
width=.25
print x
for i in range(x.shape[0]):
labels.append(str(x[i]))
ax = f.add_axes(rect1)
bars = {}
leg = []
leg_colors = []
lines = []
k=0
colors = ['r','g','b','o']
starty = .9
sub=.03
i=0
for pop,freqs in region_info.frequencies_by_pop.iteritems():
med = np.median(np.array(freqs))
sig2 = np.array(freqs).var()
leg.append("%s med: %d var: %.1f"%(pop,int(med),sig2))
i+=1
for pop,hist in pop_to_hists.iteritems():
bars[pop] = ax.bar(x+k*width,hist,width,color=colors[k],alpha=0.5)
leg_colors.append(colors[k])
#ax.legend(bars[pop][0],pop)
lines.append(bars[pop][0])
k+=1
ape_colors = ['orange','purple','yellow','brown']
k=0
if do_apes:
for ape,cp in great_ape_cps.iteritems():
bars_ape = ax.bar(np.array([cp]),np.array([.1]),width/2,color=ape_colors[k],alpha=.8)
leg.append("%s %f"%(ape,cp))
lines.append(bars_ape[0])
k+=1
ax.set_xticks(x+width*k/2)
ax.set_xticklabels(labels,size=20)
ax.grid(color='k',linestyle='--',linewidth=1,alpha=.3)
yticklabels = [str(x) for x in np.arange(0,1,.1)]
ax.set_yticklabels(yticklabels,size=20)
ax.set_ylabel("%",size=20)
ax.set_xlabel("cp number",size=20)
ax.legend(lines,leg)
ax.set_title("%s"%(region_info.name),size=20)
f.savefig("%s/%s_pop_hist.pdf"%(outdir,region_info.name),format='pdf')
plt.close(1)
return
k=0
for pop,ihist in percent_hists.iteritems():
percent_hists[pop] = ihist/ihist.sum()
#jhplot(x,hist,"|%s"%(colors[k]))
#hist(x)
vlines(x+float(k)/3,zeros,percent_hists[pop],color=colors[k],linewidth=7)
k+=1
leg.append(pop)
#legend(leg)
title("percent")
print leg
legend(leg)
f.get_axes()[0].xaxis.set_ticks(range(21))
#f.add_axes([0,40,0,1],xticks=[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20],label='axis2',axisbg='g')
#[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20])
f=figure(2)
k=0
for pop,ihist in mode_hists.iteritems():
mode_hists[pop] = ihist/ihist.sum()
#plot(x,hist,"|%s"%(colors[k]))
#hist(x)
vlines(x+float(k)/5,zeros,mode_hists[pop],color=colors[k],linewidth=7)
k+=1
legend(leg)
title("Predicted copy number %s"%(name))
xlabel("predicted copy number")
ylabel("percentage of population")
f.get_axes()[0].xaxis.set_ticks(range(21))
savefig("%smode_hist.png"%(name),format='png')
print percent_hists
print mode_hists
def load_plot_regions(fn_regions):
if fn_regions == None: return []
plot_regions = []
for line in open(fn_regions,'r').readlines():
if line[0] == "#": continue
print line
sline = line.split()
uID = "%s:"%(sline[1])
uID += ":".join(sline[2:5])
plot_regions.append(uID)
print uID
return plot_regions
def get_transcript_ids(fn_transcript_id):
print fn_transcript_id
gene_id_list = open(fn_transcript_id,'r').readlines()
transcript_ids = {}
for gene_info in gene_id_list:
(TID,name,chr,start,end,unmasked_len,GCp) = gene_info.split()
transcript_ids["%s:%s:%s"%(chr,start,end)] = {"tid":TID,"chr":chr,"start":start,"end":end,"unmasked":unmasked_len,"GC":GCp}
return transcript_ids
def get_cp_by_gene(gene_file):
cps_by_TID = {}
for line in open(gene_file,'r').readlines():
if len(line.split()) == 0: continue
(chr,start,end,TID,cp) = line.split()
cps_by_TID[TID] = float(cp)
return cps_by_TID
def get_calkan_cp_calls(fn_great_ape_cps_files):
calkan_cp_calls = {}
if(fn_great_ape_cps_files!=None):
for line in open(fn_great_ape_cps_files,'r').readlines():
(genome,gene_file) = line.split()
calkan_cp_calls[genome] = get_cp_by_gene(gene_file)
return calkan_cp_calls
if __name__=='__main__':
opts = OptionParser()
opts.add_option('','--input_file_name',dest='input_file_name')
opts.add_option('','--input_genomes',dest='fn_input_genomes')
opts.add_option('','--outdir',dest='outdir')
opts.add_option('','--sex_pop_index',dest='fn_sex_pop_index')
#opts.add_option('','--analysis_dir',dest='fn_analysis_dir')
opts.add_option('','--input_regions',dest='input_regions',default=None)
opts.add_option('','--out_file',dest='outfile',default=None)
opts.add_option('','--regress',dest='regress',action='store_true',default=False)
opts.add_option('','--plot_regions',dest='plot_regions',default=None)
opts.add_option('','--do_plotting',action="store_true",dest='do_plotting',default=False)
opts.add_option('','--great_ape_cps_files',dest='fn_great_ape_cps_files',default=None)
opts.add_option('','--cell_line_information',dest='fn_cell_line_info',default=None)
opts.add_option('','--output_coverage',dest='output_cvg',action='store_true',default=False)
opts.add_option('','--simple_plot',dest='simple_plot',action='store_true',default=False)
opts.add_option('','--input_dir',dest='input_dir',default=None)
#opts.add_option('','--transcript_id_file',dest='fn_transcript_id')
#opts.add_option('','--call_metric',dest='outfile',default="summary")
#opts.add_option('','--out_genomes',dest='fn_out_genomes')
(o, args) = opts.parse_args()
great_ape_cps = get_calkan_cp_calls(o.fn_great_ape_cps_files)
cell_line_info = {}
if o.fn_cell_line_info != None:
read_cell_line_info = open(o.fn_cell_line_info,'r').readlines()
for cell_line_line in read_cell_line_info:
(name,cells_fixed,in_nitrogen) = cell_line_line.split(",")
cell_line_info[name] = "%s,%s"%(cells_fixed,in_nitrogen.rstrip())
print cell_line_info[name]
mkdir("./",o.outdir)
print "loading genome information"
genome_info = kgf.genome_info(o.fn_input_genomes,o.fn_sex_pop_index,QC_check=o.output_cvg)
print "done"
regions_by_uID = {}
#print o.input_regions
expected_len = 0
if o.input_regions != None:
for l in open(o.input_regions,'r').readlines():
expected_len+= (l[0]!="#") and 1
input_genomes = open(o.fn_input_genomes,'r').readlines()
plot_regions = load_plot_regions(o.plot_regions)
outstr = "\t".join(["name", "chr", "start", "end", "TID"])
for input_genomes_line in input_genomes:
(genome_id,fn_wssd_dir,fn_bac_dir,chunk_dir,primary_analysis_dir) = input_genomes_line.split()
if genome_id[0] == "#": continue
genome_ob = genome_info.genomes[genome_id]
if o.input_dir is None:
input_file = "%s/%s/ml_region_analysis/%s"%(primary_analysis_dir,genome_id,o.input_file_name)
else:
input_file = "%s/%s_%s"%(o.input_dir,o.input_file_name,genome_id)
print input_file
##########check the output file exists
#if(not(os.path.exists("%s/%s/ml_region_analysis/%s"%(primary_analysis_dir,genome_id,o.input_file_name)))):
if not os.path.exists(input_file):
print "%s does not appear to exist" % (input_file)
print
print '%s my have failed previous QC or may still be running' % (genome_id)
continue
##############check the output file is of the correct length
#################here we coudl also put "take the first n"
#analyzed_by_ml_lines = open("%s/%s/ml_region_analysis/%s"%(primary_analysis_dir,genome_id,o.input_file_name)).readlines()
analyzed_by_ml_lines = open(input_file, "r").readlines()
if(len(analyzed_by_ml_lines) != expected_len):
print "expected:%d encountered:%d" % (expected_len, len(analyzed_by_ml_lines))
print "expected number of lines in %s does not match that in %s" % (analyzed_by_ml_lines, o.input_regions)
#continue
print "\t getting information %s" %(genome_id)
outstr += "\t%s" % genome_id
for analysis_line in analyzed_by_ml_lines:
(name,TID,chr,start,end,cp,bywnd_cp,median,ll,regressed_cp,regressed_cp_by_wnd,regressed_cp_median) = analysis_line.split()
if o.regress:
cp = float(regressed_cp_median)
else:
cp = float(median)
uID = "%s:%s:%s:%s"%(TID,chr,start,end)
if(not(uID in regions_by_uID)):
regions_by_uID[uID] = region_info(name,chr,start,end,TID)
regions_by_uID[uID].add_info_from_genome(cp,genome_ob)
outstr+="\n"
for region_uID, region_inf in regions_by_uID.iteritems():
outstr+="\t".join([region_inf.name,region_inf.chr,region_inf.start,region_inf.end,region_inf.transcript_id])
#for genome_id,genome in genome_info.genomes.iteritems():
for input_genomes_line in input_genomes:
(genome_id,fn_wssd_dir,fn_bac_dir,chunk_dir,primary_analysis_dir) = input_genomes_line.split()
if genome_id[0] =="#": continue
if genome_id in region_inf.cps_by_genome:
#print genome_id
outstr+="\t%f"%(region_inf.cps_by_genome[genome_id])
else:
print "ERROR genome_id not in region_info"
print genome_id
print region_inf.cps_by_genome
sys.exit(1)
outstr+="\n"
# print outstr
if o.outfile != None:
open("%s/%s"%(o.outdir,o.outfile),'w').write(outstr)
#print percent_hists[pop]
#print hist
# percent_hists[pop]=ihist + percent_hists[pop]
# mode_hists[pop][np.where(ihist==np.amax(ihist))[0]]+=1
|
EichlerLab/read_depth_genotyper
|
scripts/make_ml_output_summary.py
|
Python
|
mit
| 15,332 | 0.028111 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import tokenize
from pants.contrib.python.checks.checker.common import CheckstylePlugin
# TODO(wickman) Update this to sanitize line continuation styling as we have
# disabled it from pycodestyle.py due to mismatched indentation styles.
class Indentation(CheckstylePlugin):
"""Enforce proper indentation."""
@classmethod
def name(cls):
return 'indentation'
INDENT_LEVEL = 2 # the one true way
def nits(self):
indents = []
for token in self.python_file.tokens:
token_type, token_text, token_start = token[0:3]
if token_type is tokenize.INDENT:
last_indent = len(indents[-1]) if indents else 0
current_indent = len(token_text)
if current_indent - last_indent != self.INDENT_LEVEL:
yield self.error('T100',
'Indentation of {} instead of {}'.format(
current_indent - last_indent, self.INDENT_LEVEL),
token_start[0])
indents.append(token_text)
elif token_type is tokenize.DEDENT:
indents.pop()
|
twitter/pants
|
contrib/python/src/python/pants/contrib/python/checks/checker/indentation.py
|
Python
|
apache-2.0
| 1,262 | 0.008716 |
from django.contrib.auth import get_user_model, login, logout
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.db.models import Q # import for AJAX / dynamic searching
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext, loader
from django.shortcuts import render, render_to_response
from django.views import View
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.urls import reverse_lazy, reverse
from .forms import AuthForm, SearchForm, AddGreenRoofForm
from warsaw.models import GreenRoof, District, City
class WarsawView(View):
def get(self, request):
return render(request, 'warsaw/index.html')
greenroofs = GreenRoof.objects.order_by('roof_address')
template = loader.get_template('warsaw/index.html')
context = RequestContext(request, {
'greenroofs': greenroofs, 'content': render_to_string('warsaw/index.html', {'waypoints': waypoints})
})
return HttpResponse(template.render(context))
class LoginView(View):
def get(self, request):
form = AuthForm()
ctx = {'form' : form}
return render(request, 'warsaw/login.html', ctx)
def post(self, request):
form = AuthForm(data=request.POST)
ctx = {'form' : form}
if form.is_valid():
user = form.cleaned_data['user']
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return render(request, 'warsaw/login.html', ctx)
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def detail(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
return render_to_response('polls/detail.html', {'poll': p}, context_instance=RequestContext(request))
#Needs expansion to show field for MultiPolygonField (now there is text Mpoly in the form, but no input place)
class AddGreenRoofView(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
permission_required = ['warsaw.add_greenroof']
raise_exception = True
model = GreenRoof #remember to import class
form_class = AddGreenRoofForm
def handle_no_permission(self):
if not self.request.user.is_authenticated:
return HttpResponseRedirect(reverse('login'))
else:
return super().handle_no_permission()
class DeleteGreenRoofView(DeleteView):
model = GreenRoof
success_url = reverse_lazy('index')
class GreenRoofSearchView(View):
def get(self, request):
ctx = {'form' : SearchForm()}
return render(request, 'warsaw/gr_search_form.html', ctx)
def post(self, request):
form = SearchForm(data=request.POST)
ctx = {'form' : form}
print('Form is valid', form.is_valid())
if form.is_valid():
address = form.cleaned_data['address']
greenroofs = GreenRoof.objects.filter(roof_address__icontains=address)
print(greenroofs)
ctx['results'] = greenroofs
return render(request, 'warsaw/gr_results_form.html', ctx)
class GreenRoofView(DetailView):
model = GreenRoof
fields = '__all__'
class UpdateGreenRoofView(UpdateView):
model = GreenRoof
fields = '__all__'
template_name_suffix = '_update_form'
class DeleteGreenRoofView(DeleteView):
model = GreenRoof
success_url = reverse_lazy('index')
|
martazaryn/green-roofs-mappping
|
Green_Roof_MapPy/warsaw/views.py
|
Python
|
mit
| 3,276 | 0.025946 |
import logging
from datetime import datetime
from collections import defaultdict
from servicelayer.jobs import Job
from aleph.core import db, cache
from aleph.authz import Authz
from aleph.queues import cancel_queue, ingest_entity, get_status
from aleph.model import Collection, Entity, Document, Mapping
from aleph.model import Permission, Events, EntitySet
from aleph.index import collections as index
from aleph.index import xref as xref_index
from aleph.index import entities as entities_index
from aleph.logic.notifications import publish, flush_notifications
from aleph.logic.documents import ingest_flush, MODEL_ORIGIN
from aleph.logic.aggregator import get_aggregator
log = logging.getLogger(__name__)
def create_collection(data, authz, sync=False):
now = datetime.utcnow()
collection = Collection.create(data, authz, created_at=now)
if collection.created_at == now:
publish(
Events.CREATE_COLLECTION,
params={"collection": collection},
channels=[collection, authz.role],
actor_id=authz.id,
)
db.session.commit()
return update_collection(collection, sync=sync)
def update_collection(collection, sync=False):
"""Update a collection and re-index."""
Authz.flush()
refresh_collection(collection.id)
return index.index_collection(collection, sync=sync)
def refresh_collection(collection_id):
"""Operations to execute after updating a collection-related
domain object. This will refresh stats and flush cache."""
cache.kv.delete(
cache.object_key(Collection, collection_id),
cache.object_key(Collection, collection_id, "stats"),
)
def get_deep_collection(collection):
mappings = Mapping.by_collection(collection.id).count()
entitysets = EntitySet.type_counts(collection_id=collection.id)
return {
"statistics": index.get_collection_stats(collection.id),
"counts": {"mappings": mappings, "entitysets": entitysets},
"status": get_status(collection),
"shallow": False,
}
def compute_collections():
"""Update collection caches, including the global stats cache."""
authz = Authz.from_role(None)
schemata = defaultdict(int)
countries = defaultdict(int)
categories = defaultdict(int)
for collection in Collection.all():
compute_collection(collection)
if authz.can(collection.id, authz.READ):
categories[collection.category] += 1
things = index.get_collection_things(collection.id)
for schema, count in things.items():
schemata[schema] += count
for country in collection.countries:
countries[country] += 1
log.info("Updating global statistics cache...")
data = {
"collections": sum(categories.values()),
"schemata": dict(schemata),
"countries": dict(countries),
"categories": dict(categories),
"things": sum(schemata.values()),
}
key = cache.key(cache.STATISTICS)
cache.set_complex(key, data, expires=cache.EXPIRE)
def compute_collection(collection, force=False, sync=False):
key = cache.object_key(Collection, collection.id, "stats")
if cache.get(key) is not None and not force:
return
refresh_collection(collection.id)
log.info("[%s] Computing statistics...", collection)
index.update_collection_stats(collection.id)
cache.set(key, datetime.utcnow().isoformat())
index.index_collection(collection, sync=sync)
def aggregate_model(collection, aggregator):
"""Sync up the aggregator from the Aleph domain model."""
log.debug("[%s] Aggregating model...", collection)
aggregator.delete(origin=MODEL_ORIGIN)
writer = aggregator.bulk()
for document in Document.by_collection(collection.id):
proxy = document.to_proxy(ns=collection.ns)
writer.put(proxy, fragment="db", origin=MODEL_ORIGIN)
for entity in Entity.by_collection(collection.id):
proxy = entity.to_proxy()
aggregator.delete(entity_id=proxy.id)
writer.put(proxy, fragment="db", origin=MODEL_ORIGIN)
writer.flush()
def index_aggregator(
collection, aggregator, entity_ids=None, skip_errors=False, sync=False
):
def _generate():
idx = 0
entities = aggregator.iterate(entity_id=entity_ids, skip_errors=skip_errors)
for idx, proxy in enumerate(entities, 1):
if idx > 0 and idx % 1000 == 0:
log.debug("[%s] Index: %s...", collection, idx)
yield proxy
log.debug("[%s] Indexed %s entities", collection, idx)
entities_index.index_bulk(collection, _generate(), sync=sync)
def reingest_collection(collection, job_id=None, index=False, flush=True):
"""Trigger a re-ingest for all documents in the collection."""
job_id = job_id or Job.random_id()
if flush:
ingest_flush(collection)
for document in Document.by_collection(collection.id):
proxy = document.to_proxy(ns=collection.ns)
ingest_entity(collection, proxy, job_id=job_id, index=index)
def reindex_collection(collection, skip_errors=True, sync=False, flush=False):
"""Re-index all entities from the model, mappings and aggregator cache."""
from aleph.logic.mapping import map_to_aggregator
from aleph.logic.profiles import profile_fragments
aggregator = get_aggregator(collection)
for mapping in collection.mappings:
if mapping.disabled:
log.debug("[%s] Skip mapping: %r", collection, mapping)
continue
try:
map_to_aggregator(collection, mapping, aggregator)
except Exception:
# More or less ignore broken models.
log.exception("Failed mapping: %r", mapping)
aggregate_model(collection, aggregator)
profile_fragments(collection, aggregator)
if flush:
log.debug("[%s] Flushing...", collection)
index.delete_entities(collection.id, sync=True)
index_aggregator(collection, aggregator, skip_errors=skip_errors, sync=sync)
compute_collection(collection, force=True)
def delete_collection(collection, keep_metadata=False, sync=False):
deleted_at = collection.deleted_at or datetime.utcnow()
cancel_queue(collection)
aggregator = get_aggregator(collection)
aggregator.delete()
flush_notifications(collection, sync=sync)
index.delete_entities(collection.id, sync=sync)
xref_index.delete_xref(collection, sync=sync)
Mapping.delete_by_collection(collection.id)
EntitySet.delete_by_collection(collection.id, deleted_at)
Entity.delete_by_collection(collection.id)
Document.delete_by_collection(collection.id)
if not keep_metadata:
Permission.delete_by_collection(collection.id)
collection.delete(deleted_at=deleted_at)
db.session.commit()
if not keep_metadata:
index.delete_collection(collection.id, sync=True)
aggregator.drop()
refresh_collection(collection.id)
Authz.flush()
def upgrade_collections():
for collection in Collection.all(deleted=True):
if collection.deleted_at is not None:
delete_collection(collection, keep_metadata=True, sync=True)
else:
compute_collection(collection, force=True)
# update global cache:
compute_collections()
|
pudo/aleph
|
aleph/logic/collections.py
|
Python
|
mit
| 7,335 | 0.000273 |
#-*- encoding:utf-8 -*-
'''
Created on Dec 1, 2014
@author: letian
'''
import networkx as nx
from Segmentation import Segmentation
import numpy as np
import math
class TextRank4Sentence(object):
def __init__(self, stop_words_file = None, delimiters='?!;?!。;…\n'):
'''
`stop_words_file`:默认值为None,此时内部停止词表为空;可以设置为文件路径(字符串),将从停止词文件中提取停止词。
`delimiters`:默认值是`'?!;?!。;…\n'`,用来将文本拆分为句子。
self.sentences:由句子组成的列表。
self.words_no_filter:对sentences中每个句子分词而得到的两级列表。
self.words_no_stop_words:去掉words_no_filter中的停止词而得到的两级列表。
self.words_all_filters:保留words_no_stop_words中指定词性的单词而得到的两级列表。
'''
self.seg = Segmentation(stop_words_file=stop_words_file, delimiters=delimiters)
self.sentences = None
self.words_no_filter = None # 2维列表
self.words_no_stop_words = None
self.words_all_filters = None
self.graph = None
self.key_sentences = None
def train(self, text, lower = False, speech_tag_filter=True,
source = 'no_stop_words', sim_func = 'standard'):
'''
`text`:文本内容,字符串。
`lower`:是否将文本转换为小写。默认为False。
`speech_tag_filter`:若值为True,将调用内部的词性列表来过滤生成words_all_filters。
若值为False,words_all_filters与words_no_stop_words相同。
`source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来生成句子之间的相似度。
默认值为`'all_filters'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。
`sim_func`: 指定计算句子相似度的函数。当前只有一个函数,对应默认值`standard`。
'''
self.key_sentences = []
(self.sentences, self.words_no_filter, self.words_no_stop_words, self.words_all_filters) = self.seg.segment(text=text,
lower=lower,
speech_tag_filter=speech_tag_filter);
# -
# print self.sentences
if source == 'no_filter':
source = self.words_no_filter
elif source == 'all_filters':
source = self.words_all_filters
else:
source = self.words_no_stop_words
sim_func = self._get_similarity_standard
sentences_num = len(source)
self.graph = np.zeros((sentences_num, sentences_num))
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func(source[x], source[y])
self.graph[x, y] = similarity
self.graph[y, x] = similarity
# for x in xrange(sentences_num):
# row_sum = np.sum(self.graph[x, :])
# if row_sum > 0:
# self.graph[x, :] = self.graph[x, :] / row_sum
# print self.graph
nx_graph = nx.from_numpy_matrix(self.graph)
scores = nx.pagerank(nx_graph) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
# print sorted_scores
for index, _ in sorted_scores:
self.key_sentences.append(self.sentences[index])
# print '\n'.join(self.key_sentences)
def _get_similarity_standard(self, word_list1, word_list2):
'''
默认的用于计算两个句子相似度的函数。
word_list1, word_list2: 分别代表两个句子,都是由单词组成的列表
'''
vector1, vector2 =self._gen_vectors(word_list1, word_list2)
# print vector1, vector2
vector3 = [vector1[x]*vector2[x] for x in xrange(len(vector1))]
vector4 = [1 for num in vector3 if num > 0.]
co_occur_num = sum(vector4)
# print co_occur_num
if co_occur_num == 0.:
return 0.
denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
if denominator == 0.:
return 0.
return co_occur_num / denominator
def _gen_vectors(self, word_list1, word_list2):
'''
两个句子转换成两个同样大小向量。可以通过这两个向量来计算两个句子的相似度。
word_list1, word_list2: 分别代表两个句子,都是由单词组成的列表
'''
words = list(set(word_list1 + word_list2))
vector1 = [float(word_list1.count(word)) for word in words]
vector2 = [float(word_list2.count(word)) for word in words]
return vector1, vector2
def get_key_sentences(self, num = 6, sentence_min_len = 6):
'''
获取最重要的num个长度大于等于sentence_min_len的句子用来生成摘要。
返回列表。
'''
result = []
count = 0
for sentence in self.key_sentences:
if count >= num:
break
if len(sentence) >= sentence_min_len:
result.append(sentence)
count += 1
return result
if __name__ == '__main__':
import codecs
# text = codecs.open('../text/03.txt', 'r', 'utf-8').read()
text = "这间酒店位于北京东三环,里面摆放很多雕塑,文艺气息十足。答谢宴于晚上8点开始。"
tr4s = TextRank4Sentence(stop_words_file='../stopword.data')
tr4s.train(text=text, speech_tag_filter=True, lower=True, source = 'all_filters')
print '\n'.join(tr4s.get_key_sentences(num=1))
print '\n'.join(tr4s.sentences)
for wl in tr4s.words_no_filter:
print '[', ', \''.join(wl), ']'
print
for wl in tr4s.words_no_stop_words:
print '[', ', \''.join(wl), ']'
print
for wl in tr4s.words_all_filters:
print '[', ', \''.join(wl), ']'
|
MSC19950601/TextRank4ZH
|
textrank4zh/TextRank4Sentence.py
|
Python
|
mit
| 6,656 | 0.012715 |
# coding=utf-8
"""
Django settings for procult project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
from django.conf import settings
import raven
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv(
'SECRET_KEY',
'ru@3uj@@mm#(#s8_=$%h$=f+v75&8@s$dzz8-7$07-r85l0b+6'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', False)
ALLOWED_HOSTS = os.getenv('ALLOWED_DOMAIN', 'localhost').split(',')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'raven.contrib.django.raven_compat',
'rest_framework',
'rest_localflavor',
'import_export',
'procult.authentication',
'procult.core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'procult.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'procult.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Alem disso, usando o dj-database-url que configura o banco a partir
# da variavel de ambiente DATABASE_URL, e caso não encontre uma
# utiliza um valor padrão.
# https://pypi.python.org/pypi/dj-database-url
DATABASES = {
'default': dj_database_url.config(
default='postgres://procult:123456@localhost/procult'
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication
AUTH_USER_MODEL = 'authentication.User'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
ALLOWED_FILES = [
'application/pdf',
'application/msword',
'application/excel',
'application/x-excel',
'application/vnd.ms-excel',
'application/x-msexcel',
'application/powerpoint',
'application/mspowerpoint',
'application/x-mspowerpoint',
'application/vnd.ms-powerpoint',
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.sun.xml.writer',
'application/vnd.sun.xml.writer.global',
'application/vnd.sun.xml.impress',
'application/vnd.sun.xml.draw',
'application/vnd.sun.xml.calc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.presentationml.slide',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/x-7z-compressed',
'application/zip',
'application/x-rar-compressed',
'image/png',
'image/gif',
'image/jpg',
'image/jpeg',
'image/pjpeg',
'image/tiff',
'image/x-tiff',
'image/bmp',
'image/x-windows-bmp',
'audio/ogg',
'audio/mpeg',
'audio/mpeg3',
'audio/mp3',
'audio/mp4'
'audio/x-mpeg-3',
'audio/voc',
'audio/wav',
'audio/x-wav',
'audio/aiff',
'audio/x-aiff',
'audio/midi',
'audio/x-mid',
'audio/x-midi',
'audio/webm',
'application/mp4',
'application/x-troff-msvideo',
'application/vnd.rn-realmedia',
'application/ogg',
'video/mp4',
'video/mpeg',
'video/ogg',
'video/x-mpeg',
'video/avi',
'video/msvideo',
'video/x-msvideo',
'video/x-dv',
'video/quicktime'
'video/webm',
'video/H261',
'video/H263',
'video/H263-1998',
'video/H263-2000',
'video/H264',
'video/H264-RCDO',
'video/H264-SVC '
]
# Django Rest Framework
REST_FRAMEWORK = {
'DATE_FORMAT': "%d/%m/%Y",
'DATE_INPUT_FORMATS': ["%d/%m/%Y", "%d/%m/%y"],
'PAGE_SIZE': 100,
'EXCEPTION_HANDLER': 'procult.core.exceptions.custom_exception_handler',
'UNICODE_JSON': False
}
# Desabilitando o friendly browser view do Django Rest Framework
if not settings.DEBUG:
REST_FRAMEWORK.update({
'DEFAULT_RENDERER_CLASSES': (
'procult.core.renderers.UnicodeJSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
})
# Local configuration
# TODO: Separate in multiple settings
if settings.DEBUG:
INSTALLED_APPS += (
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
CORS_ORIGIN_ALLOW_ALL = os.getenv('DEBUG', False)
# Define CORS to allow client in development mode
CORS_ORIGIN_WHITELIST = (
'localhost:5000',
'procult.local:5000',
'0.0.0.0:5000',
)
RAVEN_CONFIG = {
'dsn': os.getenv('RAVEN_DSN_URL'),
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
|
hackultura/procult
|
procult/settings.py
|
Python
|
gpl-2.0
| 7,162 | 0.00014 |
import mutiprocessing
######################################################################
# Controllers for parallel execution, one per worker.
# Return when a 'None' job (poison pill) is reached.
######################################################################
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, block=True, timeout=None):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.block = block
self.timeout = timeout
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get(self.block, self.timeout)
if next_task is None:
# Poison pill means we should exit
break
self.result_queue.put(next_task())
return
class Task(object):
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self):
return self.func(*self.args)
|
t-brandt/acorns-adi
|
parallel/multiproc_utils.py
|
Python
|
bsd-2-clause
| 1,069 | 0.006548 |
"""repos_mean table
Revision ID: 4a7b02b0a63
Revises: b75a76936b
Create Date: 2015-11-05 11:25:32.920590
"""
revision = '4a7b02b0a63'
down_revision = 'b75a76936b'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy.sql import func
import sqlalchemy as sa
def upgrade():
op.create_table(
'repos_mean',
sa.Column('repo_id', sa.BigInteger(), nullable=False),
sa.Column('created_at', sa.Date(), nullable=False),
sa.Column('value', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(
['repo_id'], ['repos.id'],
name='fk_repos_mean_repo_id', ondelete='CASCADE'
),
sa.PrimaryKeyConstraint('repo_id', 'created_at')
)
def downgrade():
op.drop_table('repos_mean')
|
kkamkou/gitmostwanted.com
|
migration/versions/4a7b02b0a63_repos_mean_table.py
|
Python
|
mit
| 782 | 0.003836 |
import pytest
import queue
from iotile_transport_awsiot.mqtt_client import OrderedAWSIOTClient
import time
pytestmark = pytest.mark.skip("This distribution needs to be updated to work with asyncio gateway")
def test_gateway(gateway, local_broker, args):
"""Make sure we can connect to the gateway by sending packets over the mqtt message broker."""
client = OrderedAWSIOTClient(args)
client.connect('hello')
local_broker.expect(5)
client.publish('devices/d--0000-0000-0000-0002/control/probe', {'type': 'command', 'operation': 'probe', 'client': 'hello'})
local_broker.wait()
# There should be 1 command message, 1 response and 1 advertisement notification per device
assert len(local_broker.messages) == 5
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0001/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0003/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/devices/d--0000-0000-0000-0004/data/advertisement' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/data/status' in local_broker.messages
assert 'devices/d--0000-0000-0000-0002/control/probe' in local_broker.messages
def test_probe(gateway, hw_man, local_broker):
"""Make sure we can probe for devices."""
local_broker.expect(3)
results = hw_man.scan(wait=0.1)
assert len(results) == 3
assert results[0]['uuid'] == 1
assert results[0]['connection_string'] == 'd--0000-0000-0000-0001'
assert results[1]['uuid'] == 3
assert results[1]['connection_string'] == 'd--0000-0000-0000-0003'
assert results[2]['uuid'] == 4
assert results[2]['connection_string'] == 'd--0000-0000-0000-0004'
def test_connect(gateway, hw_man, local_broker):
"""Make sure we can connect to a device."""
hw_man.scan(wait=0.1)
hw_man.connect(1)
hw_man.disconnect()
def test_streaming(gateway, hw_man, local_broker):
"""Make sure we can receive streamed data."""
hw_man.connect(3, wait=0.1)
hw_man.enable_streaming()
reps = hw_man.wait_reports(100, timeout=1.0)
assert len(reps) == 100
def test_tracing(gateway, hw_man, local_broker):
"""Make sure we can receive tracing data."""
hw_man.connect(4, wait=0.1)
hw_man.enable_tracing()
time.sleep(0.1)
data = hw_man.dump_trace('raw')
assert data == b'Hello world, this is tracing data!'
def test_rpcs(gateway, hw_man, local_broker):
"""Make sure we can send rpcs."""
hw_man.connect(3, wait=0.1)
hw_man.controller()
def test_script(gateway, hw_man, local_broker):
"""Make sure we can send scripts."""
script = bytearray(('ab'*10000).encode('utf-8'))
progs = queue.Queue()
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 0.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.put((x,y)))
last_done = -1
last_total = None
prog_count = 0
while not progs.empty():
done, total = progs.get(block=False)
assert done <= total
assert done >= last_done
if last_total is not None:
assert total == last_total
last_done = done
last_total = total
prog_count += 1
assert prog_count > 0
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
def test_script_chunking(gateway, hw_man, local_broker):
"""Make sure we can send scripts."""
script = bytearray(('a'*1024*80).encode('utf-8'))
progs = queue.Queue()
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 0.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.put((x, y)))
last_done = -1
last_total = None
prog_count = 0
while not progs.empty():
done, total = progs.get(block=False)
assert done <= total
assert done >= last_done
if last_total is not None:
assert total == last_total
last_done = done
last_total = total
prog_count += 1
assert prog_count > 0
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
def test_script_progress_throttling(gateway, hw_man, local_broker):
"""Make sure progress updates are properly throttled."""
script = bytearray(('a'*1024*80).encode('utf-8'))
progs = []
hw_man.connect(3, wait=0.1)
gateway.agents[0].throttle_progress = 10.0
hw_man.stream._send_highspeed(script, lambda x, y: progs.append((x, y)))
dev = gateway.device_manager.adapters[0]._adapter.devices[3]
assert dev.script == script
# This should happen faster than our throttling period so we should
# get exactly 2 progress updates, on start and on finish
assert len(progs) == 2
x, y = progs[0]
assert x == 0
x, y = progs[1]
assert x == y
def test_autodisconnect(gateway, hw_man, local_broker):
"""Make sure we autodisconnect clients."""
gateway.agents[0].client_timeout = 0.1
hw_man.connect(3, wait=0.1)
assert len(gateway.agents[0]._connections) == 1
time.sleep(1.5)
assert len(gateway.agents[0]._connections) == 0
assert hw_man.stream.connection_interrupted is True
# Make sure we can reconnect automatically
hw_man.controller()
assert len(gateway.agents[0]._connections) == 1
# Let us lapse again
time.sleep(1.5)
assert len(gateway.agents[0]._connections) == 0
# Return to our disconnected state
hw_man.disconnect()
# Make sure we can connect normally again
hw_man.connect(3, wait=0.1)
|
iotile/coretools
|
transport_plugins/awsiot/test/test_agent.py
|
Python
|
gpl-3.0
| 5,635 | 0.00213 |
# -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SSL transport. Requires PyOpenSSL (http://pyopenssl.sf.net).
SSL connections require a ContextFactory so they can create SSL contexts.
End users should only use the ContextFactory classes directly - for SSL
connections use the reactor.connectSSL/listenSSL and so on, as documented
in IReactorSSL.
All server context factories should inherit from ContextFactory, and all
client context factories should inherit from ClientContextFactory. At the
moment this is not enforced, but in the future it might be.
Future Plans:
- split module so reactor-specific classes are in a separate module
- support for switching TCP into SSL
- more options
Maintainer: Itamar Shtull-Trauring
"""
# If something goes wrong, most notably an OpenSSL import failure,
# sys.modules['twisted.internet.ssl'] will be bound to a partially
# initialized module object. This is wacko, but we will take advantage
# of it to publish whether or not SSL is available.
# See the end of this module for the other half of this solution.
# The correct idiom to import this module is thus:
# try:
# from twisted.internet import ssl
# except ImportError:
# # happens the first time the interpreter tries to import it
# ssl = None
# if ssl and not ssl.supported:
# # happens second and later times
# ssl = None
supported = False
# System imports
from OpenSSL import SSL
from zope.interface import implements, implementsOnly, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces, base, address
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{SSL.Context}.
"""
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
class Client(tcp.Client):
"""I am an SSL client."""
implementsOnly(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport])
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def getHost(self):
"""Returns the address from which I am connecting."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Returns the address that I am connected."""
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'SSL')
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
class Server(tcp.Server):
"""I am an SSL server.
"""
implements(interfaces.ISSLTransport)
def getHost(self):
"""Return server's address."""
h, p = self.socket.getsockname()
return address.IPv4Address('TCP', h, p, 'SSL')
def getPeer(self):
"""Return address of peer."""
h, p = self.client
return address.IPv4Address('TCP', h, p, 'SSL')
class Port(tcp.Port):
"""I am an SSL port."""
_socketShutdownMethod = 'sock_shutdown'
transport = Server
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def createInternetSocket(self):
"""(internal) create an SSL socket
"""
sock = tcp.Port.createInternetSocket(self)
return SSL.Connection(self.ctxFactory.getContext(), sock)
def _preMakeConnection(self, transport):
# *Don't* call startTLS here
# The transport already has the SSL.Connection object from above
transport._startTLS()
return tcp.Port._preMakeConnection(self, transport)
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.host = host
self.port = port
self.bindAddress = bindAddress
self.contextFactory = contextFactory
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'SSL')
from twisted.internet._sslverify import DistinguishedName, DN, Certificate
from twisted.internet._sslverify import CertificateRequest, PrivateCertificate
from twisted.internet._sslverify import KeyPair
from twisted.internet._sslverify import OpenSSLCertificateOptions as CertificateOptions
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'CertificateOptions',
]
supported = True
|
sorenh/cc
|
vendor/Twisted-10.0.0/twisted/internet/ssl.py
|
Python
|
apache-2.0
| 7,496 | 0.002935 |
from .resource import Resource
from collections import Iterator
import copy
try:
# python 2
from urllib import quote
except ImportError:
# python 3
from urllib.parse import quote
class Pages(Iterator):
def __init__(self, opts, url, path, params):
if isinstance(path, list):
pages_url = '/'.join([url] + [quote(elem) for elem in path])
else:
pages_url = '/'.join([url, quote(path)])
self.resource = Resource(pages_url, **opts)
self.params = params
self._root_resource = Resource(url[:url.find('/v0')], **opts)
self.response = None
def _handle_page(self, querydict={}, val='next', **headers):
"""
Executes the request getting the next (or previous) page,
incrementing (or decrementing) the current page.
"""
params = copy.copy(self.params)
params.update(querydict)
# update uri based on next page
if self.response:
self.response.raise_for_status()
_next = self.response.links.get(val, {}).get('url')
if _next:
response = self._root_resource._make_request(
'GET', _next, params, **headers)
self._handle_res(None, response)
return response
else:
raise StopIteration
else:
response = self.resource._make_request(
'GET', '', params, **headers)
self._handle_res(None, response)
return response
def _handle_res(self, session, response):
"""
Stores the response, which we use for determining
next and prev pages.
"""
self.response = response
def reset(self):
"""
Clear the page's current place.
page_1 = page.next().result()
page_2 = page.next().result()
page.reset()
page_x = page.next().result()
assert page_x.url == page_1.url
"""
self.response = None
def next(self, querydict={}, **headers):
"""
Gets the next page of results.
Raises `StopIteration` when there are no more results.
"""
return self._handle_page(querydict, **headers)
def __next__(self):
return self.next()
def prev(self, querydict={}, **headers):
"""
Gets the previous page of results.
Raises `StopIteration` when there are no more results.
Note: Only collection searches provide a `prev` value.
For all others, `prev` will always return `StopIteration`.
"""
return self._handle_page(querydict, 'prev', **headers)
def all(self):
results = []
for response in self:
response.raise_for_status()
results.extend(response['results'])
return results
|
helloworldC2/VirtualRobot
|
porc/pages.py
|
Python
|
mit
| 2,872 | 0.000696 |
#!/usr/bin/env python
"""
crate_anon/crateweb/core/context_processors.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**A common context dictionary for all Django requests.**
"""
from typing import Any, Dict
from django.conf import settings
from django.http.request import HttpRequest
from crate_anon.common.constants import CRATE_DOCS_URL, HelpUrl
# noinspection PyUnusedLocal
def common_context(request: HttpRequest) -> Dict[str, Any]:
"""
Returns a context used across the site.
Args:
request: the :class:`django.http.request.HttpRequest`
Returns:
dict: a context dictionary
"""
return {
'CRATE_DOCS_URL': CRATE_DOCS_URL,
'HelpUrl': HelpUrl,
'nav_on_main_menu': False,
'RESEARCH_DB_TITLE': settings.RESEARCH_DB_TITLE,
}
# Try to minimize SQL here (ideally none!), as these calls will be used for
# EVERY request.
# This problem can partially be circumvented with a per-request cache; see
# http://stackoverflow.com/questions/3151469/per-request-cache-in-django
# But good practice is: keep queries to a minimum.
|
RudolfCardinal/crate
|
crate_anon/crateweb/core/context_processors.py
|
Python
|
gpl-3.0
| 1,968 | 0 |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
class MocaException(Exception):
"""Base class for MoCA Exceptions"""
def __init__(self, msg):
self.value = msg
def __str__(self):
"""string representation of MoCA Exception
Returns
-------
mocastr: string representation
"""
mocastr = repr(self.value)
return mocastr
|
saketkc/moca
|
moca/helpers/exceptions.py
|
Python
|
isc
| 455 | 0.002198 |
class TestStatic(object):
@staticmethod
def static1(self):
pass
@staticmethod
def static2(self):
pass
|
siddhika1889/Pydev-Editor
|
tests/pysrc/extendable/static.py
|
Python
|
epl-1.0
| 143 | 0.020979 |
from ptypes import *
v = 0 # FIXME: this file format is busted
class seq_parameter_set_rbsp(pbinary.struct):
class __pic_order_type_1(pbinary.struct):
_fields_ = [
(1, 'delta_pic_order_always_zero_flag'),
(v, 'offset_for_non_ref_pic'),
(v, 'offset_for_top_to_bottom_field'),
(v, 'num_ref_frames_in_pic_order_cnt_cycle'),
(lambda s: dyn.array( dyn.clone(pbinary.struct,_fields_=[(v,'offset_for_ref_frame')]), s['num_ref_frames_in_pic_order_cnt_cycle']), 'ref_frames')
]
def __pic_order(self):
type = self['pic_order_cnt_type']
if type == 0:
return dyn.clone(pbinary.struct, _fields_=[(v, 'log2_max_pic_order_cnt_lsb')])
elif type == 1:
return __pic_order_type_1
raise NotImplementedError(type)
class __frame_crop_offset(pbinary.struct):
_fields_ = [
(v, 'frame_crop_left_offset'),
(v, 'frame_crop_right_offset'),
(v, 'frame_crop_top_offset'),
(v, 'frame_crop_bottom_offset'),
]
def __frame_crop(self):
if self['frame_cropping_flag']:
return __frame_crop_offset
return dyn.clone(pbinary.struct,_fields_=[])
def __rbsp_trailing_bits(self):
return 0
_fields_ = [
(8, 'profile_idc'),
(1, 'constraint_set0_flag'),
(1, 'constraint_set1_flag'),
(1, 'constraint_set2_flag'),
(5, 'reserved_zero_5bits'),
(8, 'level_idc'),
(v, 'seq_parameter_set_id'),
(v, 'pic_order_cnt_type'),
(__pic_order, 'pic_order'),
(v, 'num_ref_frames'),
(1, 'gaps_in_frame_num_value_allowed_flag'),
(v, 'pic_width_in_mbs_minus1'),
(v, 'pic_height_in_map_units_minus1'),
(1, 'frame_mbs_only_flag'),
(lambda s: [0,1][s['frame_mbs_only_flag']], 'mb_adaptive_frame_field_flag'),
(1, 'direct_8x8_inference_flag'),
(1, 'frame_cropping_flag'),
(__frame_crop, 'frame_crop'),
(1, 'vul_parameters_present_flag'),
(lambda s: [dyn.clone(pbinary.struct,_fields_=[]),__vul_parameters][s['vul_parameters_present_flag']], 'vul_parameters'),
(__rbsp_trailing_bits, 'rbsp_trailing_bits'),
]
|
arizvisa/syringe
|
template/video/h264.py
|
Python
|
bsd-2-clause
| 2,279 | 0.005265 |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import base64
import getpass
import os.path
from vistrails.core import get_vistrails_application
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core.system import vistrails_default_file_type, get_elementtree_library, \
default_connections_file, vistrails_examples_directory
from vistrails.core.external_connection import ExtConnectionList, DBConnection
from vistrails.core.thumbnails import ThumbnailCache
from vistrails.core import debug
from vistrails.db.services.locator import XMLFileLocator as _XMLFileLocator, \
DBLocator as _DBLocator, ZIPFileLocator as _ZIPFileLocator, \
BaseLocator as _BaseLocator, UntitledLocator as _UntitledLocator
from vistrails.db.services.io import SaveBundle, test_db_connection
from vistrails.db import VistrailsDBException
from vistrails.db.domain import DBWorkflow
ElementTree = get_elementtree_library()
class BaseLocator(_BaseLocator):
@staticmethod
def convert_locator(locator):
if locator.__class__ == _XMLFileLocator:
locator.__class__ = XMLFileLocator
elif locator.__class__ == _ZIPFileLocator:
locator.__class__ = ZIPFileLocator
elif locator.__class__ == _DBLocator:
DBLocator.convert(locator)
elif locator.__class__ == _UntitledLocator:
locator.__class__ = UntitledLocator
@staticmethod
def from_url(url):
locator = _BaseLocator.from_url(url)
BaseLocator.convert_locator(locator)
return locator
class CoreLocator(object):
@staticmethod
def prompt_autosave(parent_widget):
pass # Opens a dialog that prompts the user if they want to
# use temporaries
@staticmethod
def load_from_gui(parent_widget, obj_type):
pass # Opens a dialog that the user will be able to use to
# show the right values, and returns a locator suitable
# for loading a file
@staticmethod
def save_from_gui(parent_widget, obj_type, locator):
pass # Opens a dialog that the user will be able to use to
# show the right values, and returns a locator suitable
# for saving a file
def update_from_gui(self, klass=None):
pass
# FIXME Need to do some more intelligent conversions anywhere this
# function gets called
@staticmethod
def get_convert_klass(vt_type):
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.log.log import Log
from vistrails.core.modules.module_registry import ModuleRegistry
from vistrails.core.log.opm_graph import OpmGraph
klass_map = {Vistrail.vtType: Vistrail,
Pipeline.vtType: Pipeline,
Log.vtType: Log,
ModuleRegistry.vtType: ModuleRegistry,
OpmGraph.vtType: OpmGraph}
return klass_map[vt_type]
class UntitledLocator(_UntitledLocator, CoreLocator):
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
obj = _UntitledLocator.load(self, klass.vtType)
klass.convert(obj)
obj.locator = self
return obj
class XMLFileLocator(_XMLFileLocator, CoreLocator):
def __init__(self, filename, **kwargs):
_XMLFileLocator.__init__(self, filename, **kwargs)
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
obj = _XMLFileLocator.load(self, klass.vtType)
klass.convert(obj)
obj.locator = self
return obj
def save(self, obj):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
klass = obj.__class__
obj = _XMLFileLocator.save(self, obj, False)
klass.convert(obj)
obj.locator = self
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
def save_as(self, obj, version=None):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
klass = obj.__class__
obj = _XMLFileLocator.save(self, obj, True, version)
klass.convert(obj)
obj.locator = self
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
##########################################################################
def __eq__(self, other):
if not isinstance(other, XMLFileLocator):
return False
return self._name == other._name
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
# def update_from_gui(self, parent_widget, klass=None):
# from core.vistrail.vistrail import Vistrail
# if klass is None:
# klass = Vistrail
# import gui.extras.core.db.locator as db_gui
# return db_gui.get_load_file_locator_from_gui(parent_widget, klass.vtType)
class DBLocator(_DBLocator, CoreLocator):
class getKeyChain(object):
def set_key(self, key, passwd):
get_vistrails_application().keyChain.set_key(key,passwd)
def get_key(self, key):
return get_vistrails_application().keyChain.get_key(key)
keyChain = getKeyChain()
def __init__(self, host, port, database, user, passwd, name=None,
**kwargs):
_DBLocator.__init__(self, host, port, database, user, passwd, name,
**kwargs)
self.__list = ExtConnectionList.getInstance(default_connections_file())
self.ext_connection_id = -1
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
save_bundle = _DBLocator.load(self, klass.vtType, ThumbnailCache.getInstance().get_directory())
if klass.vtType == DBWorkflow.vtType:
wf = save_bundle
klass = self.get_convert_klass(wf.vtType)
klass.convert(wf)
wf.locator = self
return wf
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save(self, save_bundle):
save_bundle = _DBLocator.save(self, save_bundle, False)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save_as(self, save_bundle, version=None):
save_bundle = _DBLocator.save(self, save_bundle, True, version)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
# Need to copy images into thumbnail cache directory so references
# won't become invalid if they are in a temp dir that gets destroyed
# when the previous locator is closed
import shutil
thumb_cache = ThumbnailCache.getInstance()
thumb_cache_dir = thumb_cache.get_directory()
new_thumbnails = []
for thumbnail in save_bundle.thumbnails:
if os.path.dirname(thumbnail) == thumb_cache_dir:
new_thumbnails.append(thumbnail)
else:
cachedir_thumbnail = os.path.join(thumb_cache_dir, os.path.basename(thumbnail))
try:
shutil.copyfile(thumbnail, cachedir_thumbnail)
new_thumbnails.append(cachedir_thumbnail)
except Exception, e:
debug.critical("copying %s -> %s failed" % (
thumbnail, cachedir_thumbnail),
e)
save_bundle.thumbnails = new_thumbnails
# Need to update thumbnail cache in case some references have changed
thumb_cache.add_entries_from_files(save_bundle.thumbnails)
return save_bundle
def update_from_gui(self, parent_widget, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
import vistrails.gui.extras.core.db.locator as db_gui
if klass is None:
klass = Vistrail
config = self.find_connection_info(self._host, self._port, self._db)
if config is None or config['succeeded']==False:
config = db_gui.get_db_connection_from_gui(parent_widget,
-1,
"",
self._host,
self._port,
self._user,
self._passwd,
self._db)
if config is not None and config['succeeded'] == True:
self._host = config['host']
self._port = config['port']
self._db = config['db']
self._user = config['user']
self._passwd = config['passwd']
self.ext_connection_id = self.set_connection_info(**config)
return True
return False
def update_from_console(self):
config = self.find_connection_info(self._host, self._port, self._db)
if config is None:
# the problem here is if VisTrails is being run through command
# line from LaTex, stdout is being redirected to a log file, so
# the user does not see the prompt in raw_input. getpass uses the
# controlling terminal so it works fine. Just to make sure he sees
# the first message prompt we will the controlling terminal
try:
f= open('/dev/tty', 'w')
f.write("\nConnect to db with username [%s]: "%self._user)
f.close()
user = raw_input()
except IOError:
debug.warning("Couldn't write to terminal. Will try stdout")
user = raw_input("Connecting to db with username[%s]: "%self._user)
try:
if user != '':
self._user = user
passwd = getpass.getpass("password:")
self._passwd = passwd
config = {'host': self._host,
'port': int(self._port),
'user': self._user,
'passwd': self._passwd,
'db': self._db
}
test_db_connection(config)
config['succeeded'] = True
config['name'] = '%s@%s'%(self._user,self._host)
config['id'] = -1
except VistrailsDBException, e:
debug.critical('VisTrails DB Exception', e)
config['succeeded'] = False
except Exception, e2:
debug.critical('VisTrails Exception', e2)
config['succeeded'] = False
if config is not None:
if config['succeeded'] == False:
passwd = getpass.getpass("\nVisTrails DB password for user %s:"%config['user'])
self._user = config['user']
self._passwd = passwd
dbconfig = {'host': self._host,
'port': int(self._port),
'user': self._user,
'passwd': self._passwd,
'db': self._db
}
try:
test_db_connection(dbconfig)
config['succeeded'] = True
config['passwd'] = self._passwd
except VistrailsDBException, e:
debug.critical('VisTrails DB Exception', e)
config['succeeded'] = False
if config['succeeded'] == True:
self._host = config['host']
self._port = config['port']
self._db = config['db']
self._user = config['user']
self._passwd = config['passwd']
self.ext_connection_id = self.set_connection_info(**config)
return True
return False
return False
def find_connection_info(self, host, port, db):
"""find_connection_info(host:str, port: int, db: str) -> dict
Returns complete info of a connection with the given parameters
"""
id = self.__list.find_db_connection(host,port,db)
if id != -1:
return self.get_connection_info(id)
else:
return None
def get_connection_info(self, id):
"""get_connection_info(id: int) -> dict
Returns info of ExtConnection """
conn = self.__list.get_connection(id)
if conn != None:
succeeded = False
key = str(conn.id) + "." + conn.name + "." + conn.host
passwd = DBLocator.keyChain.get_key(key)
config = {'host': conn.host,
'port': conn.port,
'user': conn.user,
'passwd': passwd}
try:
test_db_connection(config)
succeeded = True
except VistrailsDBException:
succeeded = False
config['id'] = conn.id
config['name'] = conn.name
config['db'] = conn.database
config['succeeded'] = succeeded
else:
config = None
return config
def set_connection_info(self, *args, **kwargs):
"""set_connection_info(id: int, name: str, host: str, port:int,
user:str, passwd:str, db:str) -> None
If the connection exists it will update it, else it will add it
"""
id = kwargs["id"]
name = kwargs["name"]
host = kwargs["host"]
port = kwargs["port"]
user = kwargs["user"]
passwd = kwargs["passwd"]
db = kwargs["db"]
conn = DBConnection(id=id,
name=name,
host=host,
port=port,
user=user,
passwd='',
database=db,
dbtype='MySQL')
if self.__list.has_connection(id):
self.__list.set_connection(id,conn)
else:
if conn.id == -1:
conn.id = self.__list.get_fresh_id()
self.__list.add_connection(conn)
key = str(conn.id) + "." + conn.name + "." + conn.host
DBLocator.keyChain.set_key(key,passwd)
return conn.id
##########################################################################
def __eq__(self, other):
if type(other) != type(self):
return False
return (self._host == other._host and
self._port == other._port and
self._db == other._db and
self._user == other._user and
#self._name == other._name and
long(self._obj_id) == long(other._obj_id) and
self._obj_type == other._obj_type)
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
return True
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_db_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_db_locator_from_gui(parent_widget, obj_type,
locator)
@staticmethod
def from_xml(node, include_name=False):
locator = _DBLocator.from_xml(node, include_name)
locator.__class__ = DBLocator
return locator
@staticmethod
def convert(locator):
locator.__class__ = DBLocator
locator.__list = ExtConnectionList.getInstance(
default_connections_file())
class ZIPFileLocator(_ZIPFileLocator, CoreLocator):
def __init__(self, filename, **kwargs):
_ZIPFileLocator.__init__(self, filename, **kwargs)
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
save_bundle = _ZIPFileLocator.load(self, klass.vtType)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save(self, save_bundle):
save_bundle = _ZIPFileLocator.save(self, save_bundle, False)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save_as(self, save_bundle, version=None):
save_bundle = _ZIPFileLocator.save(self, save_bundle, True, version)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
# Need to update thumbnail cache since files have moved
ThumbnailCache.getInstance().add_entries_from_files(save_bundle.thumbnails)
return save_bundle
##########################################################################
def __eq__(self, other):
if not isinstance(other, ZIPFileLocator):
return False
return self._name == other._name
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
class FileLocator(CoreLocator):
def __new__(self, filename=None, **kwargs):
if filename:
if filename.endswith('.vt'):
return ZIPFileLocator(filename, **kwargs)
elif filename.endswith('.vtl'):
return FileLocator.from_link_file(filename)
else:
return XMLFileLocator(filename, **kwargs)
else:
#return class based on default file type
if vistrails_default_file_type() == '.vt':
return ZIPFileLocator
else:
return XMLFileLocator
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
@staticmethod
def parse(element):
""" parse(element) -> XMLFileLocator
Parse an XML object representing a locator and returns an
XMLFileLocator or a ZIPFileLocator object.
"""
if str(element.getAttribute('type')) == 'file':
for n in element.childNodes:
if n.localName == "name":
filename = str(n.firstChild.nodeValue).strip(" \n\t")
return FileLocator(filename)
return None
else:
return None
#ElementTree port
@staticmethod
def from_xml(node):
"""from_xml(node:ElementTree.Element) -> XMLFileLocator or None
Parse an XML object representing a locator and returns a
XMLFileLocator or a ZIPFileLocator object."""
if node.tag != 'locator':
return None
type_ = node.get('type', '')
if str(type_) == 'file':
for child in node.getchildren():
if child.tag == 'name':
filename = child.text.encode('latin-1').strip()
return FileLocator(filename)
return None
@staticmethod
def from_link_file(filename):
"""from_link_file(filename: str) -> DBLocator
This will parse a '.vtl' file and will create a DBLocator. .vtl files
are vistrail link files and they are used to point vistrails to open
vistrails from the database on the web. """
def convert_from_str(value,type):
def bool_conv(x):
s = str(x).upper()
if s == 'TRUE':
return True
if s == 'FALSE':
return False
if value is not None:
if type == 'str':
return str(value)
elif value.strip() != '':
if type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'bool':
return bool_conv(value)
elif type == 'base64':
return base64.b64decode(value)
return None
def guess_extension_from_contents(contents):
if contents.startswith("<vistrail"):
return ".xml"
else:
return ".vt"
tree = ElementTree.parse(filename)
node = tree.getroot()
if node.tag != 'vtlink':
return None
#read attributes
data = node.get('host', None)
host = convert_from_str(data, 'str')
data = node.get('port', None)
port = convert_from_str(data,'int')
data = node.get('database', None)
database = convert_from_str(data,'str')
data = node.get('vtid')
vt_id = convert_from_str(data, 'int')
data = node.get('version')
version = convert_from_str(data, 'str')
data = node.get('tag')
tag = convert_from_str(data, 'str')
data = node.get('execute')
execute = convert_from_str(data, 'bool')
data = node.get('showSpreadsheetOnly')
showSpreadsheetOnly = convert_from_str(data, 'bool')
data = node.get('url', None)
url = convert_from_str(data,'str')
data = node.get('vtcontent', None)
vtcontent = convert_from_str(data,'base64')
data = node.get('filename', None)
vtname = convert_from_str(data, 'str')
data = node.get('forceDB',None)
forceDB = convert_from_str(data,'bool')
data = node.get('mashuptrail', None)
mashuptrail = convert_from_str(data, 'str')
data = node.get('mashupVersion', None)
mashupVersion = convert_from_str(data, 'int')
data = node.get('parameterExploration', None)
parameterExploration = convert_from_str(data, 'int')
#if execute is False, we will show the builder too
if showSpreadsheetOnly and not execute:
showSpreadsheetOnly = False
try:
version = int(version)
except (ValueError, TypeError):
pass
if tag is None:
tag = ''
## execute and showSpreadsheetOnly should be written to the current
## configuration
config = get_vistrails_configuration()
config.execute = execute
config.showWindow = not showSpreadsheetOnly
if not forceDB:
if vtcontent is not None:
if url is not None:
basename = url.split('/')[-1]
base,ext = os.path.splitext(basename)
dirname = os.path.dirname(filename)
fname = os.path.join(dirname,basename)
else:
basename = os.path.basename(filename)
base,ext = os.path.splitext(basename)
ext = guess_extension_from_contents(vtcontent)
dirname = os.path.dirname(filename)
fname = os.path.join(dirname,"%s%s"%(base,ext))
create_file = True
if os.path.exists(fname): #file was extracted before
create_file = False
oldf = open(fname)
oldcontents = oldf.read()
if oldcontents != vtcontent:
import vistrails.gui.extras.core.db.locator as db_gui
(overwrite, newname) = \
db_gui.ask_to_overwrite_file(None, 'vistrail')
create_file = True
if newname:
fname = newname
elif overwrite == False:
i=1
while os.path.exists(fname):
newbase = "%s_%s%s" % (base, i, ext)
fname = os.path.join(dirname,newbase)
i+=1
if create_file:
f = open(fname,'wb')
f.write(vtcontent)
f.close()
return FileLocator(fname, version_node=version, version_tag=tag,
mashuptrail=mashuptrail,
mashupVersion=mashupVersion,
parameterExploration=parameterExploration)
if host is not None:
user = ""
passwd = ""
return DBLocator(host, port, database,
user, passwd, None, obj_id=vt_id,
obj_type='vistrail',connection_id=None,
version_node=version, version_tag=tag,
mashuptrail=mashuptrail,
mashupVersion=mashupVersion,
parameterExploration=parameterExploration)
elif vtname is not None:
if os.path.dirname(vtname) == '':
#check if file exists in the same directory as the .vtl file
dirname = os.path.dirname(filename)
newvtname = os.path.join(dirname,vtname)
if os.path.exists(newvtname):
vtname = newvtname
#check for magic strings
if "@examples" in vtname:
vtname=vtname.replace("@examples", vistrails_examples_directory())
return FileLocator(vtname, version_node=version, version_tag=tag,
mashuptrail=mashuptrail,
mashupVersion=mashupVersion,
parameterExploration=parameterExploration)
|
Nikea/VisTrails
|
vistrails/core/db/locator.py
|
Python
|
bsd-3-clause
| 30,882 | 0.005375 |
import mallet.hmm as h_mm
import mallet.state as state
# emissions
def emissions():
return [
{'A': 0.25, 'B': 0.25, 'C': 0.5},
{'A': 0.55, 'B': 0.15, 'C': 0.3},
{'A': 0.675, 'B': 0.20, 'C': 0.125},
{'B': 0.5, 'C': 0.5},
{'A': 0.0, 'B': 0.5, 'C': 0.5}
]
def invalid_emissions():
return [
{'A': 0.5, 'B': 0.25, 'C': 0.10}
]
# states
def state_params():
emissions_list = emissions()
return [
(1, 'Begin', 'BEGIN', {}),
(2, 'State1', 'S', emissions_list[0]),
(3, 'State2', 'T', emissions_list[1]),
(4, 'State3', 'U', emissions_list[2]),
(5, 'End', 'END', {}),
]
def states():
state_param_list = state_params()
return dict((params[0], state.State(*params)) for params in state_param_list)
# transitions
def transitions(state_list = None):
if state_list is None: state_list = states()
return {
1: {
state_list[2]: 1.0
},
2: {
state_list[2]: 0.5,
state_list[3]: 0.5
},
3: {
state_list[3]: 0.75,
state_list[4]: 0.25
},
4: {
state_list[4]: 0.15,
state_list[5]: 0.85
},
5: {}
}
def fake_transitions(state_list = None):
if state_list is None: state_list = states()
return {
1: {
state_list[2]: 1.0,
state_list[3]: 0.0
}
}
def states_with_transitions():
states_with_transitions = states()
transition_list = transitions(states_with_transitions)
for name, state in states_with_transitions.iteritems():
state.transitions = transition_list[state.id_num]
return states_with_transitions
def hmm():
return h_mm.HMM(states_with_transitions())
|
undeadpixel/mallet
|
test/fixtures/hmm_fixtures.py
|
Python
|
mit
| 1,846 | 0.008126 |
# devicetree.py
# Device management for anaconda's storage configuration module.
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
import os
import re
from gi.repository import BlockDev as blockdev
from .actionlist import ActionList
from .errors import DeviceError, DeviceTreeError, StorageError
from .deviceaction import ActionDestroyDevice, ActionDestroyFormat
from .devices import BTRFSDevice, DASDDevice, NoDevice, PartitionDevice
from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice
from . import formats
from .devicelibs import lvm
from .devicelibs import edd
from . import udev
from . import util
from .flags import flags
from .populator import Populator
from .storage_log import log_method_call, log_method_return
import logging
log = logging.getLogger("blivet")
_LVM_DEVICE_CLASSES = (LVMLogicalVolumeDevice, LVMVolumeGroupDevice)
class DeviceTree(object):
""" A quasi-tree that represents the devices in the system.
The tree contains a list of :class:`~.devices.StorageDevice` instances,
which does not necessarily reflect the actual state of the system's
devices. :class:`~.deviceaction.DeviceAction` is used to perform
modifications to the tree, except when initially populating the tree.
:class:`~.deviceaction.DeviceAction` instances are registered, possibly
causing the addition or removal of :class:`~.devices.StorageDevice`
instances to/from the tree. A :class:`~.deviceaction.DeviceAction`
is reversible up to the time its 'execute' method is called.
Only one action of any given type/object pair should exist for
any given device at any given time.
:class:`~.deviceaction.DeviceAction` instances can only be registered
for leaf devices, except for resize actions.
"""
def __init__(self, conf=None, passphrase=None, luksDict=None,
iscsi=None, dasd=None):
"""
:keyword conf: storage discovery configuration
:type conf: :class:`~.StorageDiscoveryConfig`
:keyword passphrase: default LUKS passphrase
:keyword luksDict: a dict with UUID keys and passphrase values
:type luksDict: dict
:keyword iscsi: ISCSI control object
:type iscsi: :class:`~.iscsi.iscsi`
:keyword dasd: DASD control object
:type dasd: :class:`~.dasd.DASD`
"""
self.reset(conf, passphrase, luksDict, iscsi, dasd)
def reset(self, conf=None, passphrase=None, luksDict=None,
iscsi=None, dasd=None):
""" Reset the instance to its initial state. """
# internal data members
self._devices = []
self._actions = ActionList()
# a list of all device names we encounter
self.names = []
self._hidden = []
# initialize attributes that may later hold cached lvm info
self.dropLVMCache()
lvm.lvm_cc_resetFilter()
self._populator = Populator(self,
conf=conf,
passphrase=passphrase,
luksDict=luksDict,
iscsi=iscsi,
dasd=dasd)
@property
def actions(self):
return self._actions
def setDiskImages(self, images):
""" Set the disk images and reflect them in exclusiveDisks.
:param images: dict with image name keys and filename values
:type images: dict
.. note::
Disk images are automatically exclusive. That means that, in the
presence of disk images, any local storage not associated with
the disk images is ignored.
"""
self._populator.setDiskImages(images)
@property
def exclusiveDisks(self):
return self._populator.exclusiveDisks
@property
def ignoredDisks(self):
return self._populator.ignoredDisks
@property
def dasd(self):
return self._populator.dasd
@dasd.setter
def dasd(self, dasd):
self._populator.dasd = dasd
@property
def protectedDevNames(self):
return self._populator.protectedDevNames
@property
def diskImages(self):
return self._populator.diskImages
@property
def pvInfo(self):
if self._pvs_cache is None:
pvs = blockdev.lvm.pvs()
self._pvs_cache = dict((pv.pv_name, pv) for pv in pvs) # pylint: disable=attribute-defined-outside-init
return self._pvs_cache
@property
def lvInfo(self):
if self._lvs_cache is None:
lvs = blockdev.lvm.lvs()
self._lvs_cache = dict(("%s-%s" % (lv.vg_name, lv.lv_name), lv) for lv in lvs) # pylint: disable=attribute-defined-outside-init
return self._lvs_cache
def dropLVMCache(self):
""" Drop cached lvm information. """
self._pvs_cache = None # pylint: disable=attribute-defined-outside-init
self._lvs_cache = None # pylint: disable=attribute-defined-outside-init
def _addDevice(self, newdev, new=True):
""" Add a device to the tree.
:param newdev: the device to add
:type newdev: a subclass of :class:`~.devices.StorageDevice`
Raise ValueError if the device's identifier is already
in the list.
"""
if newdev.uuid and newdev.uuid in [d.uuid for d in self._devices] and \
not isinstance(newdev, NoDevice):
raise ValueError("device is already in tree")
# make sure this device's parent devices are in the tree already
for parent in newdev.parents:
if parent not in self._devices:
raise DeviceTreeError("parent device not in tree")
newdev.addHook(new=new)
self._devices.append(newdev)
# don't include "req%d" partition names
if ((newdev.type != "partition" or
not newdev.name.startswith("req")) and
newdev.type != "btrfs volume" and
newdev.name not in self.names):
self.names.append(newdev.name)
log.info("added %s %s (id %d) to device tree", newdev.type,
newdev.name,
newdev.id)
def _removeDevice(self, dev, force=None, modparent=True):
""" Remove a device from the tree.
:param dev: the device to remove
:type dev: a subclass of :class:`~.devices.StorageDevice`
:keyword force: whether to force removal of a non-leaf device
:type force: bool
:keyword modparent: update parent device to account for removal
:type modparent: bool
.. note::
Only leaves may be removed.
"""
if dev not in self._devices:
raise ValueError("Device '%s' not in tree" % dev.name)
if not dev.isleaf and not force:
log.debug("%s has %d kids", dev.name, dev.kids)
raise ValueError("Cannot remove non-leaf device '%s'" % dev.name)
dev.removeHook(modparent=modparent)
if modparent:
# if this is a partition we need to remove it from the parted.Disk
if isinstance(dev, PartitionDevice) and dev.disk is not None:
# adjust all other PartitionDevice instances belonging to the
# same disk so the device name matches the potentially altered
# name of the parted.Partition
for device in self._devices:
if isinstance(device, PartitionDevice) and \
device.disk == dev.disk:
device.updateName()
self._devices.remove(dev)
if dev.name in self.names and getattr(dev, "complete", True):
self.names.remove(dev.name)
log.info("removed %s %s (id %d) from device tree", dev.type,
dev.name,
dev.id)
def recursiveRemove(self, device, actions=True):
""" Remove a device after removing its dependent devices.
:param :class:`~.devices.StorageDevice` device: the device to remove
:keyword bool actions: whether to schedule actions for the removal
If the device is not a leaf, all of its dependents are removed
recursively until it is a leaf device. At that point the device is
removed, unless it is a disk. If the device is a disk, its
formatting is removed but no attempt is made to actually remove the
disk device.
"""
log.debug("removing %s", device.name)
devices = self.getDependentDevices(device)
# this isn't strictly necessary, but it makes the action list easier to
# read when removing logical partitions because of the automatic
# renumbering that happens if you remove them in ascending numerical
# order
devices.reverse()
while devices:
log.debug("devices to remove: %s", [d.name for d in devices])
leaves = [d for d in devices if d.isleaf]
log.debug("leaves to remove: %s", [d.name for d in leaves])
for leaf in leaves:
if actions:
if leaf.format.exists and not leaf.protected and \
not leaf.formatImmutable:
self.registerAction(ActionDestroyFormat(leaf))
self.registerAction(ActionDestroyDevice(leaf))
else:
if not leaf.formatImmutable:
leaf.format = None
self._removeDevice(leaf)
devices.remove(leaf)
if not device.formatImmutable:
if actions:
self.registerAction(ActionDestroyFormat(device))
else:
device.format = None
if not device.isDisk:
if actions:
self.registerAction(ActionDestroyDevice(device))
else:
self._removeDevice(device)
def registerAction(self, action):
""" Register an action to be performed at a later time.
:param action: the action
:type action: :class:`~.deviceaction.DeviceAction`
Modifications to the Device instance are handled before we
get here.
"""
if not (action.isCreate and action.isDevice) and \
action.device not in self._devices:
raise DeviceTreeError("device is not in the tree")
elif (action.isCreate and action.isDevice):
if action.device in self._devices:
raise DeviceTreeError("device is already in the tree")
if action.isCreate and action.isDevice:
self._addDevice(action.device)
elif action.isDestroy and action.isDevice:
self._removeDevice(action.device)
elif action.isCreate and action.isFormat:
if isinstance(action.device.format, formats.fs.FS) and \
action.device.format.mountpoint in self.filesystems:
raise DeviceTreeError("mountpoint already in use")
# apply the action before adding it in case apply raises an exception
action.apply()
log.info("registered action: %s", action)
self._actions.append(action)
def cancelAction(self, action):
""" Cancel a registered action.
:param action: the action
:type action: :class:`~.deviceaction.DeviceAction`
This will unregister the action and do any required
modifications to the device list.
Actions all operate on a Device, so we can use the devices
to determine dependencies.
"""
if action.isCreate and action.isDevice:
# remove the device from the tree
self._removeDevice(action.device)
elif action.isDestroy and action.isDevice:
# add the device back into the tree
self._addDevice(action.device, new=False)
action.cancel()
self._actions.remove(action)
log.info("canceled action %s", action)
def findActions(self, device=None, action_type=None, object_type=None,
path=None, devid=None):
""" Find all actions that match all specified parameters.
A value of None for any of the keyword arguments indicates that any
value is acceptable for that field.
:keyword device: device to match
:type device: :class:`~.devices.StorageDevice` or None
:keyword action_type: action type to match (eg: "create", "destroy")
:type action_type: str or None
:keyword object_type: operand type to match (eg: "device" or "format")
:type object_type: str or None
:keyword path: device path to match
:type path: str or None
:keyword devid: device id to match
:type devid: int or None
:returns: a list of matching actions
:rtype: list of :class:`~.deviceaction.DeviceAction`
"""
return self._actions.find(device=device,
action_type=action_type,
object_type=object_type,
path=path,
devid=devid)
def processActions(self, callbacks=None, dryRun=False):
self.actions.process(devices=self.devices,
dryRun=dryRun,
callbacks=callbacks)
def getDependentDevices(self, dep, hidden=False):
""" Return a list of devices that depend on dep.
The list includes both direct and indirect dependents.
:param dep: the device whose dependents we are looking for
:type dep: :class:`~.devices.StorageDevice`
:keyword bool hidden: include hidden devices in search
"""
dependents = []
log_method_call(self, dep=dep, hidden=hidden)
# don't bother looping looking for dependents if this is a leaf device
# XXX all hidden devices are leaves
if dep.isleaf and not hidden:
log.debug("dep is a leaf")
return dependents
devices = self._devices[:]
if hidden:
devices.extend(self._hidden)
for device in devices:
log.debug("checking if %s depends on %s", device.name, dep.name)
if device.dependsOn(dep):
dependents.append(device)
return dependents
def getRelatedDisks(self, disk):
""" Return disks related to disk by container membership.
:param :class:`~.devices.StorageDevice` disk: the disk
:returns: related disks
:rtype: set of :class:`~.devices.StorageDevice`
.. note::
The disk may be hidden.
"""
return set(d for dep in self.getDependentDevices(disk, hidden=True)
for d in dep.disks)
def hide(self, device):
""" Hide the specified device.
:param device: the device to hide
:type device: :class:`~.devices.StorageDevice`
Hiding a device will cancel all actions that involve the device and
will remove the device from the device list.
If the device is not a leaf device, all devices that depend on it
will be hidden leaves-first until the device is a leaf device.
If a device exists, performs some special actions and places
it on a list of hidden devices.
Mixes recursion and side effects, most significantly in the code
that removes all the actions. However, this code is a null op
in every case except the first base case that is reached,
where all actions are removed. This means that when a device
is removed explicitly in this function by means of a direct call to
_removeDevices it is guaranteed that all actions have already
been canceled.
If a device does not exist then it must have been removed by the
cancelation of all the actions, so it does not need to be removed
explicitly.
Most devices are considered leaf devices if they have no children,
however, some devices must satisfy more stringent requirements.
_removeDevice() will raise an exception if the device it is
removing is not a leaf device. hide() guarantees that any
device that it removes will have no children, but it does not
guarantee that the more stringent requirements will be enforced.
Therefore, _removeDevice() is invoked with the force parameter
set to True, to skip the isleaf check.
"""
if device in self._hidden:
return
# cancel actions first thing so that we hide the correct set of devices
if device.isDisk:
# Cancel all actions on this disk and any disk related by way of an
# aggregate/container device (eg: lvm volume group).
disks = [device]
related_actions = [a for a in self._actions
if a.device.dependsOn(device)]
for related_device in (a.device for a in related_actions):
disks.extend(related_device.disks)
disks = set(disks)
cancel = [a for a in self._actions
if set(a.device.disks).intersection(disks)]
for action in reversed(cancel):
self.cancelAction(action)
for d in self.getChildren(device):
self.hide(d)
log.info("hiding device %s", device)
if not device.exists:
return
self._removeDevice(device, force=True, modparent=False)
self._hidden.append(device)
lvm.lvm_cc_addFilterRejectRegexp(device.name)
if isinstance(device, DASDDevice):
self.dasd.remove(device)
if device.name not in self.names:
self.names.append(device.name)
def unhide(self, device):
""" Restore a device's visibility.
:param device: the device to restore/unhide
:type device: :class:`~.devices.StorageDevice`
.. note::
Actions canceled while hiding the device are not rescheduled
automatically.
"""
# the hidden list should be in leaves-first order
for hidden in reversed(self._hidden):
if hidden == device or hidden.dependsOn(device) and \
not any(parent in self._hidden for parent in hidden.parents):
log.info("unhiding device %s %s (id %d)", hidden.type,
hidden.name,
hidden.id)
self._hidden.remove(hidden)
self._devices.append(hidden)
hidden.addHook(new=False)
lvm.lvm_cc_removeFilterRejectRegexp(hidden.name)
if isinstance(device, DASDDevice):
self.dasd.append(device)
def setupDiskImages(self):
""" Set up devices to represent the disk image files. """
self._populator.setupDiskImages()
def updateDeviceFormat(self, device):
return self._populator.updateDeviceFormat(device)
def pruneActions(self):
return self._actions.prune()
def sortActions(self):
return self._actions.sort()
def populate(self, cleanupOnly=False):
""" Locate all storage devices.
Everything should already be active. We just go through and gather
details as needed and set up the relations between various devices.
Devices excluded via disk filtering (or because of disk images) are
scanned just the rest, but then they are hidden at the end of this
process.
"""
udev.settle()
self.dropLVMCache()
try:
self._populator.populate(cleanupOnly=cleanupOnly)
except Exception:
raise
finally:
self._hideIgnoredDisks()
if flags.installer_mode:
self.teardownAll()
def _isIgnoredDisk(self, disk):
return ((self.ignoredDisks and disk.name in self.ignoredDisks) or
(self.exclusiveDisks and
disk.name not in self.exclusiveDisks))
def _hideIgnoredDisks(self):
# hide any subtrees that begin with an ignored disk
for disk in [d for d in self._devices if d.isDisk]:
if self._isIgnoredDisk(disk):
ignored = True
# If the filter allows all members of a fwraid or mpath, the
# fwraid or mpath itself is implicitly allowed as well. I don't
# like this very much but we have supported this usage in the
# past, so I guess we will support it forever.
if disk.parents and all(p.format.hidden for p in disk.parents):
ignored = any(self._isIgnoredDisk(d) for d in disk.parents)
if ignored:
self.hide(disk)
def teardownAll(self):
""" Run teardown methods on all devices. """
for device in self.leaves:
if device.protected:
continue
try:
device.teardown(recursive=True)
except (StorageError, blockdev.BlockDevError) as e:
log.info("teardown of %s failed: %s", device.name, e)
def teardownDiskImages(self):
""" Tear down any disk image stacks. """
self._populator.teardownDiskImages()
def setupAll(self):
""" Run setup methods on all devices. """
for device in self.leaves:
try:
device.setup()
except DeviceError as e:
log.error("setup of %s failed: %s", device.name, e)
def _filterDevices(self, incomplete=False, hidden=False):
""" Return list of devices modified according to parameters.
:param bool incomplete: include incomplete devices in result
:param bool hidden: include hidden devices in result
:returns: a generator of devices
:rtype: generator of :class:`~.devices.Device`
"""
if hidden:
devices = (d for d in self._devices[:] + self._hidden[:])
else:
devices = (d for d in self._devices[:])
if not incomplete:
devices = (d for d in devices if getattr(d, "complete", True))
return devices
def getDeviceBySysfsPath(self, path, incomplete=False, hidden=False):
""" Return a list of devices with a matching sysfs path.
:param str path: the sysfs path to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: the first matching device found
:rtype: :class:`~.devices.Device`
"""
log_method_call(self, path=path, incomplete=incomplete, hidden=hidden)
result = None
if path:
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = next((d for d in devices if d.sysfsPath == path), None)
log_method_return(self, result)
return result
def getDeviceByUuid(self, uuid, incomplete=False, hidden=False):
""" Return a list of devices with a matching UUID.
:param str uuid: the UUID to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: the first matching device found
:rtype: :class:`~.devices.Device`
"""
log_method_call(self, uuid=uuid, incomplete=incomplete, hidden=hidden)
result = None
if uuid:
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = next((d for d in devices if d.uuid == uuid or d.format.uuid == uuid), None)
log_method_return(self, result)
return result
def getDevicesBySerial(self, serial, incomplete=False, hidden=False):
""" Return a list of devices with a matching serial.
:param str serial: the serial to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: all matching devices found
:rtype: list of :class:`~.devices.Device`
"""
log_method_call(self, serial=serial, incomplete=incomplete, hidden=hidden)
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
retval = []
for device in devices:
if not hasattr(device, "serial"):
log.warning("device %s has no serial attr", device.name)
continue
if device.serial == serial:
retval.append(device)
log_method_return(self, [r.name for r in retval])
return retval
def getDeviceByLabel(self, label, incomplete=False, hidden=False):
""" Return a device with a matching filesystem label.
:param str label: the filesystem label to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: the first matching device found
:rtype: :class:`~.devices.Device`
"""
log_method_call(self, label=label, incomplete=incomplete, hidden=hidden)
result = None
if label:
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = next((d for d in devices if getattr(d.format, "label", None) == label), None)
log_method_return(self, result)
return result
def getDeviceByName(self, name, incomplete=False, hidden=False):
""" Return a device with a matching name.
:param str name: the name to look for
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: the first matching device found
:rtype: :class:`~.devices.Device`
"""
log_method_call(self, name=name, incomplete=incomplete, hidden=hidden)
result = None
if name:
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = next((d for d in devices if d.name == name or \
(isinstance(d, _LVM_DEVICE_CLASSES) and d.name == name.replace("--","-"))),
None)
log_method_return(self, result)
return result
def getDeviceByPath(self, path, incomplete=False, hidden=False):
""" Return a device with a matching path.
If there is more than one device with a matching path,
prefer a leaf device to a non-leaf device.
:param str path: the path to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: the first matching device found
:rtype: :class:`~.devices.Device`
"""
log_method_call(self, path=path, incomplete=incomplete, hidden=hidden)
result = None
if path:
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
# The usual order of the devices list is one where leaves are at
# the end. So that the search can prefer leaves to interior nodes
# the list that is searched is the reverse of the devices list.
result = next((d for d in reversed(list(devices)) if d.path == path or \
(isinstance(d, _LVM_DEVICE_CLASSES) and d.path == path.replace("--","-"))),
None)
log_method_return(self, result)
return result
def getDevicesByType(self, device_type, incomplete=False, hidden=False):
""" Return a list of devices with a matching device type.
:param str device_type: the type to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: all matching device found
:rtype: list of :class:`~.devices.Device`
"""
log_method_call(self, device_type=device_type, incomplete=incomplete, hidden=hidden)
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = [d for d in devices if d.type == device_type]
log_method_return(self, [r.name for r in result])
return result
def getDevicesByInstance(self, device_class, incomplete=False, hidden=False):
""" Return a list of devices with a matching device class.
:param class device_class: the device class to match
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: all matching device found
:rtype: list of :class:`~.devices.Device`
"""
log_method_call(self, device_class=device_class, incomplete=incomplete, hidden=hidden)
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = [d for d in devices if isinstance(d, device_class)]
log_method_return(self, [r.name for r in result])
return result
def getDeviceByID(self, id_num, incomplete=False, hidden=False):
""" Return a device with specified device id.
:param int id_num: the id to look for
:param bool incomplete: include incomplete devices in search
:param bool hidden: include hidden devices in search
:returns: the first matching device found
:rtype: :class:`~.devices.Device`
"""
log_method_call(self, id_num=id_num, incomplete=incomplete, hidden=hidden)
devices = self._filterDevices(incomplete=incomplete, hidden=hidden)
result = next((d for d in devices if d.id == id_num), None)
log_method_return(self, result)
return result
@property
def devices(self):
""" List of devices currently in the tree """
devices = []
for device in self._devices:
if not getattr(device, "complete", True):
continue
if device.uuid and device.uuid in [d.uuid for d in devices] and \
not isinstance(device, NoDevice):
raise DeviceTreeError("duplicate uuids in device tree")
devices.append(device)
return devices
@property
def filesystems(self):
""" List of filesystems. """
#""" Dict with mountpoint keys and filesystem values. """
filesystems = []
for dev in self.leaves:
if dev.format and getattr(dev.format, 'mountpoint', None):
filesystems.append(dev.format)
return filesystems
@property
def uuids(self):
""" Dict with uuid keys and :class:`~.devices.Device` values. """
uuids = {}
for dev in self._devices:
try:
uuid = dev.uuid
except AttributeError:
uuid = None
if uuid:
uuids[uuid] = dev
try:
uuid = dev.format.uuid
except AttributeError:
uuid = None
if uuid:
uuids[uuid] = dev
return uuids
@property
def labels(self):
""" Dict with label keys and Device values.
FIXME: duplicate labels are a possibility
"""
labels = {}
for dev in self._devices:
# don't include btrfs member devices
if getattr(dev.format, "label", None) and \
(dev.format.type != "btrfs" or isinstance(dev, BTRFSDevice)):
labels[dev.format.label] = dev
return labels
@property
def leaves(self):
""" List of all devices upon which no other devices exist. """
leaves = [d for d in self._devices if d.isleaf]
return leaves
def getChildren(self, device):
""" Return a list of a device's children. """
return [c for c in self._devices if device in c.parents]
def resolveDevice(self, devspec, blkidTab=None, cryptTab=None, options=None):
""" Return the device matching the provided device specification.
The spec can be anything from a device name (eg: 'sda3') to a device
node path (eg: '/dev/mapper/fedora-root' or '/dev/dm-2') to
something like 'UUID=xyz-tuv-qrs' or 'LABEL=rootfs'.
:param devspec: a string describing a block device
:type devspec: str
:keyword blkidTab: blkid info
:type blkidTab: :class:`~.BlkidTab`
:keyword cryptTab: crypto info
:type cryptTab: :class:`~.CryptTab`
:keyword options: mount options
:type options: str
:returns: the device
:rtype: :class:`~.devices.StorageDevice` or None
"""
# find device in the tree
device = None
if devspec.startswith("UUID="):
# device-by-uuid
uuid = devspec.partition("=")[2]
if ((uuid.startswith('"') and uuid.endswith('"')) or
(uuid.startswith("'") and uuid.endswith("'"))):
uuid = uuid[1:-1]
device = self.uuids.get(uuid)
elif devspec.startswith("LABEL="):
# device-by-label
label = devspec.partition("=")[2]
if ((label.startswith('"') and label.endswith('"')) or
(label.startswith("'") and label.endswith("'"))):
label = label[1:-1]
device = self.labels.get(label)
elif re.match(r'(0x)?[A-Za-z0-9]{2}(p\d+)?$', devspec):
# BIOS drive number
spec = int(devspec, 16)
for (edd_name, edd_number) in edd.edd_dict.items():
if edd_number == spec:
device = self.getDeviceByName(edd_name)
break
elif options and "nodev" in options.split(","):
device = self.getDeviceByName(devspec)
if not device:
device = self.getDeviceByPath(devspec)
else:
if not devspec.startswith("/dev/"):
device = self.getDeviceByName(devspec)
if not device:
devspec = "/dev/" + devspec
if not device:
if devspec.startswith("/dev/disk/"):
devspec = os.path.realpath(devspec)
if devspec.startswith("/dev/dm-"):
try:
dm_name = blockdev.dm.name_from_node(devspec[5:])
except blockdev.DMError as e:
log.info("failed to resolve %s: %s", devspec, e)
dm_name = None
if dm_name:
devspec = "/dev/mapper/" + dm_name
if re.match(r'/dev/md\d+(p\d+)?$', devspec):
try:
md_name = blockdev.md.name_from_node(devspec[5:])
except blockdev.MDRaidError as e:
log.info("failed to resolve %s: %s", devspec, e)
md_name = None
if md_name:
devspec = "/dev/md/" + md_name
# device path
device = self.getDeviceByPath(devspec)
if device is None:
if blkidTab:
# try to use the blkid.tab to correlate the device
# path with a UUID
blkidTabEnt = blkidTab.get(devspec)
if blkidTabEnt:
log.debug("found blkid.tab entry for '%s'", devspec)
uuid = blkidTabEnt.get("UUID")
if uuid:
device = self.getDeviceByUuid(uuid)
if device:
devstr = device.name
else:
devstr = "None"
log.debug("found device '%s' in tree", devstr)
if device and device.format and \
device.format.type == "luks":
map_name = device.format.mapName
log.debug("luks device; map name is '%s'", map_name)
mapped_dev = self.getDeviceByName(map_name)
if mapped_dev:
device = mapped_dev
if device is None and cryptTab and \
devspec.startswith("/dev/mapper/"):
# try to use a dm-crypt mapping name to
# obtain the underlying device, possibly
# using blkid.tab
cryptTabEnt = cryptTab.get(devspec.split("/")[-1])
if cryptTabEnt:
luks_dev = cryptTabEnt['device']
try:
device = self.getChildren(luks_dev)[0]
except IndexError as e:
pass
elif device is None:
# dear lvm: can we please have a few more device nodes
# for each logical volume?
# three just doesn't seem like enough.
name = devspec[5:] # strip off leading "/dev/"
(vg_name, _slash, lv_name) = name.partition("/")
if lv_name and not "/" in lv_name:
# looks like we may have one
lv = "%s-%s" % (vg_name, lv_name)
device = self.getDeviceByName(lv)
# check mount options for btrfs volumes in case it's a subvol
if device and device.type.startswith("btrfs") and options:
# start with the volume -- not a subvolume
device = getattr(device, "volume", device)
attr = None
if "subvol=" in options:
attr = "name"
val = util.get_option_value("subvol", options)
elif "subvolid=" in options:
attr = "vol_id"
val = util.get_option_value("subvolid", options)
elif device.defaultSubVolume:
# default subvolume
device = device.defaultSubVolume
if attr and val:
for subvol in device.subvolumes:
if getattr(subvol, attr, None) == val:
device = subvol
break
if device:
log.debug("resolved '%s' to '%s' (%s)", devspec, device.name, device.type)
else:
log.debug("failed to resolve '%s'", devspec)
return device
def handleNodevFilesystems(self):
for line in open("/proc/mounts").readlines():
try:
(_devspec, mountpoint, fstype, _options, _rest) = line.split(None, 4)
except ValueError:
log.error("failed to parse /proc/mounts line: %s", line)
continue
if fstype in formats.fs.nodev_filesystems:
if not flags.include_nodev:
continue
log.info("found nodev %s filesystem mounted at %s",
fstype, mountpoint)
# nodev filesystems require some special handling.
# For now, a lot of this is based on the idea that it's a losing
# battle to require the presence of an FS class for every type
# of nodev filesystem. Based on that idea, we just instantiate
# NoDevFS directly and then hack in the fstype as the device
# attribute.
fmt = formats.getFormat("nodev")
fmt.device = fstype
# NoDevice also needs some special works since they don't have
# per-instance names in the kernel.
device = NoDevice(fmt=fmt)
n = len([d for d in self.devices if d.format.type == fstype])
device._name += ".%d" % n
self._addDevice(device)
def saveLUKSpassphrase(self, device):
""" Save a device's LUKS passphrase in case of reset. """
self._populator.saveLUKSpassphrase(device)
def __str__(self):
done = []
def show_subtree(root, depth):
abbreviate_subtree = root in done
s = "%s%s\n" % (" " * depth, root)
done.append(root)
if abbreviate_subtree:
s += "%s...\n" % (" " * (depth+1),)
else:
for child in self.getChildren(root):
s += show_subtree(child, depth + 1)
return s
roots = [d for d in self._devices if not d.parents]
tree = ""
for root in roots:
tree += show_subtree(root, 0)
return tree
|
dwlehman/blivet
|
blivet/devicetree.py
|
Python
|
lgpl-2.1
| 42,702 | 0.001171 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
],
),
]
|
PeterHo/mysite
|
lists/migrations/0001_initial.py
|
Python
|
apache-2.0
| 420 | 0.002381 |
input = """
% No auxiliary atoms at all.
ouch :- #max{V:a(V)} = 0.
"""
output = """
{}
"""
|
Yarrick13/hwasp
|
tests/wasp1/AllAnswerSets/aggregates_max_bug_1.test.py
|
Python
|
apache-2.0
| 92 | 0 |
"""
Convenience functions built on top of boto that are useful
when we deploy using asgard.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import logging
import time
from datetime import datetime, timedelta
import backoff
import boto
from boto.exception import EC2ResponseError, BotoServerError
from boto.ec2.autoscale.tag import Tag
from tubular.utils import EDP, WAIT_SLEEP_TIME
from tubular.exception import (
ImageNotFoundException,
MultipleImagesFoundException,
MissingTagException,
TimeoutException,
)
LOG = logging.getLogger(__name__)
ISO_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
ASG_DELETE_TAG_KEY = 'delete_on_ts'
MAX_ATTEMPTS = os.environ.get('RETRY_MAX_ATTEMPTS', 5)
RETRY_FACTOR = os.environ.get('RETRY_FACTOR', 1.5)
def giveup_if_not_throttling(ex):
"""
Checks that a BotoServerError exceptions message contains the throttling string.
Args:
ex (boto.exception.BotoServerError):
Returns:
False if the throttling string is not found.
"""
return not (str(ex.status) == "400" and ex.body and '<Code>Throttling</Code>' in ex.body)
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def get_all_autoscale_groups(names=None):
"""
Get all the autoscale groups
Arguments:
names (list) - A list of ASG names as strings
Returns:
List of :class:`boto.ec2.autoscale.group.AutoScalingGroup` instances.
"""
autoscale_conn = boto.connect_autoscale()
fetched_asgs = autoscale_conn.get_all_groups(names=names)
total_asgs = []
while True:
total_asgs.extend([asg for asg in fetched_asgs])
if fetched_asgs.next_token:
fetched_asgs = autoscale_conn.get_all_groups(names=names, next_token=fetched_asgs.next_token)
else:
break
return total_asgs
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def get_all_load_balancers(names=None):
"""
Get all the ELBs
Arguments:
names (list): A list of ELB names as strings
Returns:
a list of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
elb_conn = boto.connect_elb()
fetched_elbs = elb_conn.get_all_load_balancers(names)
total_elbs = []
while True:
total_elbs.extend([elb for elb in fetched_elbs])
if fetched_elbs.next_token:
fetched_elbs = elb_conn.get_all_load_balancers(names, fetched_elbs.next_token)
else:
break
return total_elbs
def _instance_elbs(instance_id, elbs):
"""
Given an EC2 instance and ELBs, return the ELB(s) in which it is active.
Arguments:
instance_id (:obj:`boto.ec2.instance.Reservation`): Instance used to find out which ELB it is active in.
elbs (:obj:`list` of :obj:`boto.ec2.elb.loadbalancer.LoadBalancer`): List of ELBs to us in checking.
Returns:
:obj:`list` of :obj:`boto.ec2.elb.loadbalancer.LoadBalancer`:
One or more ELBs used by the passed-in instance -or- None.
"""
instance_elbs = []
for elb in elbs:
elb_instance_ids = [inst.id for inst in elb.instances]
if instance_id in elb_instance_ids:
instance_elbs.append(elb)
return instance_elbs
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def active_ami_for_edp(env, dep, play):
"""
Given an environment, deployment, and play, find the base AMI id used for the active deployment.
Arguments:
env (str): Environment to check (stage, prod, loadtest, etc.)
dep (str): Deployment to check (edx, edge, mckinsey, etc.)
play (str): Play to check (edxapp, discovery, ecommerce, etc.)
Returns:
str: Base AMI id of current active deployment for the EDP.
Raises:
MultipleImagesFoundException: If multiple AMI IDs are found within the EDP's ELB.
ImageNotFoundException: If no AMI IDs are found for the EDP.
"""
LOG.info("Looking up AMI for {}-{}-{}...".format(env, dep, play))
ec2_conn = boto.connect_ec2()
all_elbs = get_all_load_balancers()
LOG.info("Found {} load balancers.".format(len(all_elbs)))
edp_filter = {
"tag:environment": env,
"tag:deployment": dep,
"tag:play": play,
}
reservations = ec2_conn.get_all_reservations(filters=edp_filter)
LOG.info("{} reservations found for EDP {}-{}-{}".format(len(reservations), env, dep, play))
amis = set()
for reservation in reservations:
for instance in reservation.instances:
elbs = _instance_elbs(instance.id, all_elbs)
if instance.state == 'running' and len(elbs) > 0:
amis.add(instance.image_id)
LOG.info("AMI found for {}-{}-{}: {}".format(env, dep, play, instance.image_id))
else:
LOG.info("Instance {} state: {} - elbs in: {}".format(instance.id, instance.state, len(elbs)))
if len(amis) > 1:
msg = "Multiple AMIs found for {}-{}-{}, should have only one.".format(env, dep, play)
raise MultipleImagesFoundException(msg)
if len(amis) == 0:
msg = "No AMIs found for {}-{}-{}.".format(env, dep, play)
raise ImageNotFoundException(msg)
return amis.pop()
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def tags_for_ami(ami_id):
"""
Look up the tags for an AMI.
Arguments:
ami_id (str): An AMI Id.
Returns:
dict: The tags for this AMI.
Raises:
ImageNotFoundException: No image found with this ami ID.
MissingTagException: AMI is missing one or more of the expected tags.
"""
LOG.debug("Looking up edp for {}".format(ami_id))
ec2 = boto.connect_ec2()
try:
ami = ec2.get_all_images(ami_id)[0]
except IndexError:
raise ImageNotFoundException("ami: {} not found".format(ami_id))
except EC2ResponseError as error:
raise ImageNotFoundException(str(error))
return ami.tags
def edp_for_ami(ami_id):
"""
Look up the EDP tags for an AMI.
Arguments:
ami_id (str): An AMI Id.
Returns:
EDP Named Tuple: The EDP tags for this AMI.
Raises:
ImageNotFoundException: No image found with this ami ID.
MissingTagException: AMI is missing one or more of the expected tags.
"""
tags = tags_for_ami(ami_id)
try:
edp = EDP(tags['environment'], tags['deployment'], tags['play'])
except KeyError as key_err:
missing_key = key_err.args[0]
msg = "{} is missing the {} tag.".format(ami_id, missing_key)
raise MissingTagException(msg)
LOG.debug("Got EDP for {}: {}".format(ami_id, edp))
return edp
def validate_edp(ami_id, environment, deployment, play):
"""
Validate that an AMI is tagged for a specific EDP (environment, deployment, play).
Arguments:
ami_id (str): An AMI Id.
environment (str): Environment for AMI, e.g. prod, stage
deployment (str): Deployment for AMI, e.g. edx, edge
play (str): Play for AMI, e.g. edxapp, insights, discovery
Returns:
True if AMI EDP matches specified EDP, otherwise False.
"""
edp = edp_for_ami(ami_id)
edp_matched = (
edp.environment == environment and
edp.deployment == deployment and
edp.play == play
)
if not edp_matched:
LOG.info("AMI {0} EDP did not match specified: {1} != ({2}, {3}, {4})".format(
ami_id, edp, environment, deployment, play
))
return edp_matched
def is_stage_ami(ami_id):
"""
Check if an AMI is intended for stage deployment.
Arguments:
ami_id (str): An AMI Id.
Returns:
True if AMI environment is "stage", otherwise False.
"""
edp = edp_for_ami(ami_id)
ami_for_stage = edp.environment == "stage"
if not ami_for_stage:
LOG.info("AMI {0} is not intended for stage! - {1}".format(ami_id, edp))
return ami_for_stage
def asgs_for_edp(edp, filter_asgs_pending_delete=True):
"""
All AutoScalingGroups that have the tags of this play.
A play is made up of many auto_scaling groups.
Arguments:
EDP Named Tuple: The edp tags for the ASGs you want.
Returns:
list: list of ASG names that match the EDP.
eg.
[
u'edxapp-v018',
u'sandbox-edx-hacking-ASG',
u'sandbox-edx-insights-ASG',
u'test-edx-ecomapp',
u'test-edx-edxapp-v007',
u'test2-edx-certificates',
]
"""
all_groups = get_all_autoscale_groups()
matching_groups = []
LOG.info("Found {} ASGs".format(len(all_groups)))
for group in all_groups:
LOG.debug("Checking group {}".format(group))
tags = {tag.key: tag.value for tag in group.tags}
LOG.debug("Tags for asg {}: {}".format(group.name, tags))
if filter_asgs_pending_delete and ASG_DELETE_TAG_KEY in tags.keys():
LOG.info("filtering ASG: {0} because it is tagged for deletion on: {1}"
.format(group.name, tags[ASG_DELETE_TAG_KEY]))
continue
edp_keys = ['environment', 'deployment', 'play']
if all([tag in tags for tag in edp_keys]):
group_env = tags['environment']
group_deployment = tags['deployment']
group_play = tags['play']
group_edp = EDP(group_env, group_deployment, group_play)
if group_edp == edp:
matching_groups.append(group.name)
LOG.info(
"Returning %s ASGs for EDP %s-%s-%s.",
len(matching_groups),
edp.environment,
edp.deployment,
edp.play
)
return matching_groups
def create_tag_for_asg_deletion(asg_name, seconds_until_delete_delta=None):
"""
Create a tag that will be used to mark an ASG for deletion.
"""
if seconds_until_delete_delta is None:
tag_value = None
else:
tag_value = (datetime.utcnow() + timedelta(seconds=seconds_until_delete_delta)).isoformat()
return Tag(key=ASG_DELETE_TAG_KEY,
value=tag_value,
propagate_at_launch=False,
resource_id=asg_name)
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def tag_asg_for_deletion(asg_name, seconds_until_delete_delta=1800):
"""
Tag an asg with a tag named ASG_DELETE_TAG_KEY with a value of the MS since epoch UTC + ms_until_delete_delta
that an ASG may be deleted.
Arguments:
asg_name (str): the name of the autoscale group to tag
Returns:
None
"""
tag = create_tag_for_asg_deletion(asg_name, seconds_until_delete_delta)
autoscale = boto.connect_autoscale()
if len(get_all_autoscale_groups([asg_name])) < 1:
LOG.info("ASG {} no longer exists, will not tag".format(asg_name))
else:
autoscale.create_or_update_tags([tag])
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def remove_asg_deletion_tag(asg_name):
"""
Remove deletion tag from an asg.
Arguments:
asg_name (str): the name of the autoscale group from which to remove the deletion tag
Returns:
None
"""
asgs = get_all_autoscale_groups([asg_name])
if len(asgs) < 1:
LOG.info("ASG {} no longer exists, will not remove deletion tag.".format(asg_name))
else:
for asg in asgs:
for tag in asg.tags:
if tag.key == ASG_DELETE_TAG_KEY:
tag.delete()
def get_asgs_pending_delete():
"""
Get a list of all the autoscale groups marked with the ASG_DELETE_TAG_KEY.
Return only those groups who's ASG_DELETE_TAG_KEY as past the current time.
It's intended for this method to be robust and to return as many ASGs that
are pending delete as possible even if an error occurs during the process.
Returns:
List(<boto.ec2.autoscale.group.AutoScalingGroup>)
"""
current_datetime = datetime.utcnow()
asgs_pending_delete = []
asgs = get_all_autoscale_groups()
LOG.debug("Found {0} autoscale groups".format(len(asgs)))
for asg in asgs:
LOG.debug("Checking for {0} on asg: {1}".format(ASG_DELETE_TAG_KEY, asg.name))
for tag in asg.tags:
try:
if tag.key == ASG_DELETE_TAG_KEY:
LOG.debug("Found {0} tag, deletion time: {1}".format(ASG_DELETE_TAG_KEY, tag.value))
if datetime.strptime(tag.value, ISO_DATE_FORMAT) - current_datetime < timedelta(0, 0, 0):
LOG.debug("Adding ASG: {0} to the list of ASGs to delete.".format(asg.name))
asgs_pending_delete.append(asg)
break
except ValueError:
LOG.warning(
"ASG {0} has an improperly formatted datetime string for the key {1}. Value: {2} . "
"Format must match {3}".format(
asg.name, tag.key, tag.value, ISO_DATE_FORMAT
)
)
continue
except Exception as err: # pylint: disable=broad-except
LOG.warning("Error occured while building a list of ASGs to delete, continuing: {0}".format(err))
continue
LOG.info("Number of ASGs pending delete: {0}".format(len(asgs_pending_delete)))
return asgs_pending_delete
def terminate_instances(region, tags, max_run_hours, skip_if_tag):
"""
Terminates instances based on tag and the number of hours an instance has been running.
Args:
region (str): the ec2 region to search for instances.
tags (dict): tag names/values to search for instances (e.g. {'tag:Name':'*string*'} ).
max_run_hours (int): number of hours the instance should be left running before termination.
skip_if_tag (str): Instance will not be terminated if it is tagged with this value.
Returns:
list: of the instance IDs terminated.
"""
conn = boto.ec2.connect_to_region(region)
instances_to_terminate = []
reservations = conn.get_all_instances(filters=tags)
for reservation in reservations:
for instance in reservation.instances:
total_run_time = datetime.utcnow() - datetime.strptime(instance.launch_time[:-1], ISO_DATE_FORMAT)
if total_run_time > timedelta(hours=max_run_hours) and skip_if_tag not in instance.tags:
instances_to_terminate.append(instance.id)
if len(instances_to_terminate) > 0:
conn.terminate_instances(instance_ids=instances_to_terminate)
return instances_to_terminate
def wait_for_in_service(all_asgs, timeout):
"""
Wait for the ASG and all instances in them to be healthy
according to AWS metrics.
Arguments:
all_asgs(list<str>): A list of ASGs we want to be healthy.
timeout: The amount of time in seconds to wait for healthy state.
[
u'test-edx-edxapp-v008',
u'test-edx-worker-v005',
]
Returns: Nothing if healthy, raises a timeout exception if un-healthy.
"""
if len(all_asgs) == 0:
LOG.info("No ASGs to monitor - skipping health check.")
return
asgs_left_to_check = list(all_asgs)
LOG.info("Waiting for ASGs to be healthy: {}".format(asgs_left_to_check))
end_time = datetime.utcnow() + timedelta(seconds=timeout)
while end_time > datetime.utcnow():
asgs = get_all_autoscale_groups(asgs_left_to_check)
for asg in asgs:
all_healthy = True
for instance in asg.instances:
if instance.health_status.lower() != 'healthy' or instance.lifecycle_state.lower() != 'inservice':
# Instance is not ready.
all_healthy = False
break
if all_healthy:
# Then all are healthy we can stop checking this.
LOG.debug("All instances healthy in ASG: {}".format(asg.name))
LOG.debug(asgs_left_to_check)
asgs_left_to_check.remove(asg.name)
if len(asgs_left_to_check) == 0:
return
time.sleep(1)
raise TimeoutException("Some instances in the following ASGs never became healthy: {}".format(asgs_left_to_check))
def wait_for_healthy_elbs(elbs_to_monitor, timeout):
"""
Wait for all instances in all ELBs listed to be healthy. Raise a
timeout exception if they don't become healthy.
Arguments:
elbs_to_monitor(list<str>): Names of ELBs that we are monitoring.
timeout: Timeout in seconds of how long to wait.
Returns:
None: When all ELBs have only healthy instances in them.
Raises:
TimeoutException: We we have run out of time.
"""
@backoff.on_exception(backoff.expo,
BotoServerError,
max_tries=MAX_ATTEMPTS,
giveup=giveup_if_not_throttling,
factor=RETRY_FACTOR)
def _get_elb_health(selected_elb):
"""
Get the health of an ELB
Args:
selected_elb (boto.ec2.elb.loadbalancer.LoadBalancer):
Returns:
list of InstanceState <boto.ec2.elb.instancestate.InstanceState>
"""
return selected_elb.get_instance_health()
if len(elbs_to_monitor) == 0:
LOG.info("No ELBs to monitor - skipping health check.")
return
elbs_left = set(elbs_to_monitor)
end_time = datetime.utcnow() + timedelta(seconds=timeout)
while end_time > datetime.utcnow():
elbs = get_all_load_balancers(elbs_left)
for elb in elbs:
LOG.info("Checking health for ELB: {}".format(elb.name))
all_healthy = True
for instance in _get_elb_health(elb):
if instance.state != 'InService':
all_healthy = False
break
if all_healthy:
LOG.info("All instances are healthy, remove {} from list of load balancers {}.".format(
elb.name, elbs_left
))
elbs_left.remove(elb.name)
LOG.info("Number of load balancers remaining with unhealthy instances: {}".format(len(elbs_left)))
if len(elbs_left) == 0:
LOG.info("All instances in all ELBs are healthy, returning.")
return
time.sleep(WAIT_SLEEP_TIME)
raise TimeoutException("The following ELBs never became healthy: {}".format(elbs_left))
|
eltoncarr/tubular
|
tubular/ec2.py
|
Python
|
agpl-3.0
| 19,451 | 0.001954 |
import pytest
import serf
from _base import FakeClient, FakeConnection
def test_request_join () :
_body = dict(
Existing=('127.0.0.1:7901', ),
Replay=True,
)
_request = serf.get_request_class('join')(**_body)
_request.check(FakeClient(), )
assert _request.is_checked
_body = dict( # missing value
What='is it',
)
_request = serf.get_request_class('join')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
_body = dict(
Existing=('127.0.0.1:7901', ),
Replay=1, # invalid value, it must be bool
)
_request = serf.get_request_class('join')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
class JoinFakeConnection (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xa0\xa3Seq\x01\x81\xa3Num\x01',
)
def test_response_join () :
_client = serf.Client(connection_class=JoinFakeConnection, )
def _callback (response, ) :
assert response.request.command == 'join'
assert not response.error
assert response.is_success
assert response.body is not None
assert response.seq == 1
_body = response.body
assert isinstance(_body, dict, )
assert 'Num' in _body
assert _body.get('Num') == 1
_body = dict(
Existing=('127.0.0.1:7901', ),
Replay=True,
)
_client.join(**_body).add_callback(_callback, ).request()
|
spikeekips/serf-python
|
test/test_command_join.py
|
Python
|
mpl-2.0
| 1,679 | 0.008338 |
# -*- coding: utf-8 -*-
# Test dataset script
# Author: Sébastien Combéfis
# Date: December 23, 2012
# Problem: Question de Bilan Final : Mission 1
from lib.pythia import *
import random
class TestDataSetQ1(TestDataSet):
def __init__(self):
TestDataSet.__init__(self, 'q1', 5)
def genTestData(self):
A = random.randint(1, 100)
return [A]
TestDataSetQ1().generate()
|
UCL-INGI/Informatique-1
|
old_pythia/18_java/test/gendataset.py
|
Python
|
agpl-3.0
| 406 | 0.009901 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
class BaseException(Exception):
"""An error occurred."""
def __init__(self, message=None):
self.message = message
def __str__(self):
return self.message or self.__class__.__doc__
class CommandError(BaseException):
"""Invalid usage of CLI."""
class InvalidEndpoint(BaseException):
"""The provided endpoint is invalid."""
class CommunicationError(BaseException):
"""Unable to communicate with server."""
class HTTPException(BaseException):
"""Base exception for all HTTP-derived exceptions."""
code = 'N/A'
def __init__(self, details=None):
self.details = details
def __str__(self):
try:
data = json.loads(self.details)
message = data.get("error_message", {}).get("faultstring")
if message:
return "%s (HTTP %s) ERROR %s" % (
self.__class__.__name__, self.code, message)
except (ValueError, TypeError, AttributeError):
pass
return "%s (HTTP %s)" % (self.__class__.__name__, self.code)
class HTTPMultipleChoices(HTTPException):
code = 300
def __str__(self):
self.details = ("Requested version of OpenStack Images API is not"
"available.")
return "%s (HTTP %s) %s" % (self.__class__.__name__, self.code,
self.details)
class HTTPBadRequest(HTTPException):
code = 400
class HTTPUnauthorized(HTTPException):
code = 401
class HTTPForbidden(HTTPException):
code = 403
class HTTPNotFound(HTTPException):
code = 404
class HTTPMethodNotAllowed(HTTPException):
code = 405
class HTTPConflict(HTTPException):
code = 409
class HTTPOverLimit(HTTPException):
code = 413
class HTTPInternalServerError(HTTPException):
code = 500
class HTTPNotImplemented(HTTPException):
code = 501
class HTTPBadGateway(HTTPException):
code = 502
class HTTPServiceUnavailable(HTTPException):
code = 503
#NOTE(bcwaldon): Build a mapping of HTTP codes to corresponding exception
# classes
_code_map = {}
for obj_name in dir(sys.modules[__name__]):
if obj_name.startswith('HTTP'):
obj = getattr(sys.modules[__name__], obj_name)
_code_map[obj.code] = obj
def from_response(response, details=None):
"""Return an instance of an HTTPException based on httplib response."""
cls = _code_map.get(response.status, HTTPException)
return cls(details)
|
JioCloud/python-ceilometerclient
|
ceilometerclient/exc.py
|
Python
|
apache-2.0
| 3,070 | 0.000326 |
"""Cement testing utilities."""
import unittest
from tempfile import mkstemp, mkdtemp
from ..core import backend, foundation
from ..utils.misc import rando
# shortcuts
from nose import SkipTest
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from nose.tools import raises
from nose.plugins.attrib import attr
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = "app-%s" % rando()[:12]
config_files = []
argv = []
base_controller = None
arguments = []
exit_on_close = False
class CementTestCase(unittest.TestCase):
"""
A sub-class of unittest.TestCase.
"""
app_class = TestApp
"""The test class that is used by self.make_app to create an app."""
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
"""
Sets up self.app with a generic TestApp(). Also resets the backend
hooks and handlers so that everytime an app is created it is setup
clean each time.
"""
self.app = self.make_app()
_, self.tmp_file = mkstemp()
self.tmp_dir = mkdtemp()
def make_app(self, *args, **kw):
"""
Create a generic app using TestApp. Arguments and Keyword Arguments
are passed to the app.
"""
self.reset_backend()
return self.app_class(*args, **kw)
def reset_backend(self):
"""
Remove all registered hooks and handlers from the backend.
"""
for _handler in backend.__handlers__.copy():
del backend.__handlers__[_handler]
for _hook in backend.__hooks__.copy():
del backend.__hooks__[_hook]
def ok(self, expr, msg=None):
"""Shorthand for assert."""
return ok(expr, msg)
def eq(self, a, b, msg=None):
"""Shorthand for 'assert a == b, "%r != %r" % (a, b)'. """
return eq(a, b, msg)
# The following are for internal, Cement unit testing only
@attr('core')
class CementCoreTestCase(CementTestCase):
pass
@attr('ext')
class CementExtTestCase(CementTestCase):
pass
|
rjdp/cement
|
cement/utils/test.py
|
Python
|
bsd-3-clause
| 2,208 | 0 |
import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, always_zap = False, zap_wakeup = "always", rename_repeat = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.needChangePriorityFrontend = config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.InfoBarInstance = Screens.InfoBar.InfoBar.instance
self.ts_dialog = None
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
name = self.name
description = self.description
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
if self.rename_repeat:
event_description = evt.getShortDescription()
if not event_description:
event_description = evt.getExtendedDescription()
if event_description and event_description != description:
description = event_description
event_name = evt.getEventName()
if event_name and event_name != name:
name = event_name
if not self.calculateFilename(event_name):
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, name.replace("\n", ""), description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == 1:
if self.always_zap:
if Screens.Standby.inStandby:
self.log(5, "wakeup and zap to recording service")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
if self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
else:
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle(self.Filename)
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare or (self.ts_dialog is not None and not self.checkingTimeshiftRunning()):
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if self.always_zap:
return False
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20, default=True)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
if RecordTimerEntry.wasInDeepStandby and self.zap_wakeup in ("always", "from_deep_standby") or self.zap_wakeup in ("always", "from_standby"):
self.log(11, "wakeup and zap")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
if self.checkingTimeshiftRunning():
if self.ts_dialog is None:
self.openChoiceActionBeforeZap()
else:
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.keypress()
if Screens.Standby.inStandby: #In case some plugin did put the receiver already in standby
config.misc.standbyCounter.value = 0
else:
Notifications.AddNotification(Screens.Standby.Standby, StandbyCounterIncrease=False)
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
# Tell the trashcan we started recording. The trashcan gets events,
# but cannot tell what the associated path is.
Trashcan.instance.markDirty(self.Filename)
return True
elif next_state == self.StateEnded:
old_end = self.end
self.ts_dialog = None
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if not checkForRecordings():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or self.afterEvent == AFTEREVENT.AUTO and (Screens.Standby.inStandby or RecordTimerEntry.wasInStandby) and not config.misc.standbyCounter.value:
if not Screens.Standby.inTryQuitMainloop:
if Screens.Standby.inStandby:
RecordTimerEntry.TryQuitMainloop()
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20, default=True)
elif self.afterEvent == AFTEREVENT.STANDBY or self.afterEvent == AFTEREVENT.AUTO and RecordTimerEntry.wasInStandby:
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nreceiver to standby. Do that now?"), timeout=20, default=True)
else:
RecordTimerEntry.keypress()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = config.usage.recording_frontend_priority.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority.value
self.change_frontend = False
if elem is not None:
setPreferredTuner(int(elem))
def checkingTimeshiftRunning(self):
return config.usage.check_timeshift.value and self.InfoBarInstance and self.InfoBarInstance.timeshiftEnabled() and self.InfoBarInstance.timeshift_was_activated
def openChoiceActionBeforeZap(self):
if self.ts_dialog is None:
type = _("record")
if self.justplay:
type = _("zap")
elif self.always_zap:
type = _("zap and record")
message = _("You must switch to the service %s (%s - '%s')!\n") % (type, self.service_ref.getServiceName(), self.name)
if self.repeated:
message += _("Attention, this is repeated timer!\n")
message += _("Timeshift is running. Select an action.\n")
choice = [(_("Zap"), "zap"), (_("Don't zap and disable timer"), "disable"), (_("Don't zap and remove timer"), "remove")]
if not self.InfoBarInstance.save_timeshift_file:
choice.insert(1, (_("Save timeshift in movie dir and zap"), "save_movie"))
if self.InfoBarInstance.timeshiftActivated():
choice.insert(0, (_("Save timeshift and zap"), "save"))
else:
choice.insert(1, (_("Save timeshift and zap"), "save"))
else:
message += _("Reminder, you have chosen to save timeshift file.")
#if self.justplay or self.always_zap:
# choice.insert(2, (_("Don't zap"), "continue"))
choice.insert(2, (_("Don't zap"), "continue"))
def zapAction(choice):
start_zap = True
if choice:
if choice in ("zap", "save", "save_movie"):
self.log(8, "zap to recording service")
if choice in ("save", "save_movie"):
ts = self.InfoBarInstance.getTimeshift()
if ts and ts.isTimeshiftEnabled():
if choice =="save_movie":
self.InfoBarInstance.save_timeshift_in_movie_dir = True
self.InfoBarInstance.save_timeshift_file = True
ts.saveTimeshiftFile()
del ts
self.InfoBarInstance.saveTimeshiftFiles()
elif choice == "disable":
self.disable()
NavigationInstance.instance.RecordTimer.timeChanged(self)
start_zap = False
self.log(8, "zap canceled by the user, timer disabled")
elif choice == "remove":
start_zap = False
self.afterEvent = AFTEREVENT.NONE
NavigationInstance.instance.RecordTimer.removeEntry(self)
self.log(8, "zap canceled by the user, timer removed")
elif choice == "continue":
if self.justplay:
self.end = self.begin
start_zap = False
self.log(8, "zap canceled by the user")
if start_zap:
if not self.justplay:
self.setRecordingPreferredTuner()
self.failureCB(True)
else:
self.log(8, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
self.ts_dialog = self.InfoBarInstance.session.openWithCallback(zapAction, MessageBox, message, simple=True, list=choice, timeout=20)
def sendStandbyNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
self.ts_dialog = None
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and not Screens.Standby.inStandby and self.InfoBarInstance and self.InfoBarInstance.execing
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
rename_repeat = long(xml.get("rename_repeat") or "1")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
zap_wakeup = str(xml.get("zap_wakeup") or "always")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, always_zap = always_zap, zap_wakeup = zap_wakeup, rename_repeat = rename_repeat)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = True
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
return True
return False
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' zap_wakeup="' + str(timer.zap_wakeup) + '"')
list.append(' rename_repeat="' + str(int(timer.rename_repeat)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
import os
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now or isWakeup and timer.zap_wakeup in ("from_standby", "never"):
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def getNextTimerTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if next_act < now or isWakeup and timer.justplay and timer.zap_wakeup in ("from_standby", "never"):
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): # wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInRepeatTimer(self, timer, event):
time_match = 0
is_editable = False
begin = event.getBeginTime()
duration = event.getDuration()
end = begin + duration
timer_end = timer.end
if timer.disabled and timer.isRunning():
if begin < timer.begin <= end or timer.begin <= begin <= timer_end:
return True
else:
return False
if timer.justplay and (timer_end - timer.begin) <= 1:
timer_end += 60
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(timer.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = timer.begin < begin or begin <= timer.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = timer.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - timer.begin) / 60)
if xend < xbegin:
xend += 1440
if timer.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
return time_match and is_editable
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
# check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if type in (2,7,12,17,22,27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
|
popazerty/blackhole-vuplus
|
RecordTimer.py
|
Python
|
gpl-2.0
| 43,007 | 0.030321 |
from math import pi
def fractal_area(r,n):
first = r**2
if n == 1:
return first * pi
second = 4 * (r/2)**2
if n == 2:
return (first + second) * pi
rest = sum((r/2**i)**2 * 4*3**(i-1) for i in range(2,n))
return (first + second + rest) * pi
def main():
for _ in range(int(input())):
r,n = map(int, input().split())
print('%.6f' % fractal_area(r,n))
if __name__ == "__main__":
main()
|
JonSteinn/Kattis-Solutions
|
src/Fractal Area/Python 3/main.py
|
Python
|
gpl-3.0
| 448 | 0.017857 |
import os
import logging
import argparse
from gii.core import Project, app
from gii.core.tools import Build
cli = argparse.ArgumentParser(
prog = 'gii build',
description = 'Build GII Host(s) for current project'
)
cli.add_argument( 'targets',
type = str,
nargs = '*',
default = 'native'
)
cli.add_argument( '-c, --configure',
dest = 'configure',
help = 'Configure waf buildtool',
action = 'store_true',
default = False
)
cli.add_argument( '-p, --profile',
dest = 'profile',
help = 'release/debug ',
default = 'debug'
)
cli.add_argument( '--clean-bin',
dest = 'clean-bin',
help = 'Clean built binary files',
action = 'store_true',
default = False
)
cli.add_argument( '--clean',
dest = 'clean',
help = 'Clean build files',
action = 'store_true',
default = False
)
cli.add_argument( '-v','--verbose',
dest = 'verbose',
help = 'Verbosal building log',
action = 'store_true',
default = False
)
def main( argv ):
app.openProject()
args = cli.parse_args( argv[1:] )
code = Build.run(
**vars( args )
)
exit( code )
|
tommo/gii
|
tools/build/__init__.py
|
Python
|
mit
| 1,086 | 0.138122 |
# author : Johann-Mattis List
# email : mattis.list@uni-marburg.de
# created : 2014-09-10 19:49
# modified : 2014-09-10 21:17
"""
Wordlist plugin for burmish data.
"""
__author__="Johann-Mattis List"
__date__="2014-09-10"
import unicodedata as ucd
import re
import sqlite3
import lingpyd
from .unicode import *
def clean_entry(entry, **keywords):
"""
Normalize (NFC) entry and remove bad chars.
"""
kw = dict(
brackets = rcParams['brackets'],
exact_bracket_matching = True,
)
kw.update(keywords)
# normalize first
new_entry = ucd.normalize("NFC", entry)
# normalize linguistically
entries = list(new_entry)
for i,char in enumerate(entries):
try:
entries[i] = rcParams['normalizations'][char]
except KeyError:
pass
new_entry = ''.join(entries)
if kw['exact_bracket_matching']:
# delete stuff in brackets
for b1 in kw['brackets']:
b2 = get_pendant(b1)
# get possible index
idxA = new_entry.find(b1)
idxB = new_entry.find(b2)
# check for existing indices
if idxA != -1 and idxA < idxB:
new_entry = new_entry[:idxA]+new_entry[idxB+1:]
else:
b1s = []
b2s = []
for b1 in kw['brackets']:
idxA = new_entry.find(b1)
if idxA != -1:
b1s.append(idxA)
idxB = new_entry.find(get_pendant(b1))
if idxB != -1:
b2s.append(idxB)
new_entry = new_entry[:min(b1s)]+new_entry[max(b2s)+1:]
# go for spaces and replace by '_'
new_entry = new_entry.replace(' ','_')
return new_entry
def ipa2tokens(
istring,
**keywords
):
"""
Tokenize IPA-encoded strings.
Parameters
----------
seq : str
The input sequence that shall be tokenized.
diacritics : {str, None} (default=None)
A string containing all diacritics which shall be considered in the
respective analysis. When set to *None*, the default diacritic string
will be used.
vowels : {str, None} (default=None)
A string containing all vowel symbols which shall be considered in the
respective analysis. When set to *None*, the default vowel string will
be used.
tones : {str, None} (default=None)
A string indicating all tone letter symbals which shall be considered
in the respective analysis. When set to *None*, the default tone string
will be used.
combiners : str (default="\u0361\u035c")
A string with characters that are used to combine two separate
characters (compare affricates such as t͡s).
breaks : str (default="-.")
A string containing the characters that indicate that a new token
starts right after them. These can be used to indicate that two
consecutive vowels should not be treated as diphtongs or for diacritics
that are put before the following letter.
merge_vowels : bool
Indicate, whether vowels should be merged into diphtongs
(default=True), or whether each vowel symbol should be considered
separately.
merge_identical_symbols : bool
Indicate, whether identical symbols should be merged into one token, or
rather be kept separate.
semi_diacritics: str (default="ʃhsʑɕʂʐñ")
Indicate which symbols shall be treated as "semi-diacritics", that is,
as symbols which can occur on their own, but which eventually, when
preceded by a consonant, will form clusters with it. If you want to
disable this features, just set the keyword to an empty string.
Returns
-------
tokens : list
A list of IPA tokens.
Examples
--------
>>> from lingpyd import *
>>> myseq = 't͡sɔyɡə'
>>> ipa2tokens(myseq)
['t͡s', 'ɔy', 'ɡ', 'ə']
See also
--------
tokens2class
class2tokens
"""
# go for defaults
kw = dict(
vowels = lingpyd.settings.rcParams['vowels'],
diacritics = lingpyd.settings.rcParams['diacritics'],
expand_nasals = True, # addon
tones = lingpyd.settings.rcParams['tones'],
combiners = lingpyd.settings.rcParams['combiners'],
breaks = lingpyd.settings.rcParams['breaks'],
stress = lingpyd.settings.rcParams['stress'],
merge_vowels = lingpyd.settings.rcParams['merge_vowels'],
merge_identical_symbols = True,
semi_diacritics = 'ʃhsʑɕʂʐñ'
)
kw.update(keywords)
# clean the entry first
istring = clean_entry(istring)
# check for pre-tokenized strings
if ' ' in istring:
out = istring.split(' ')
if istring.startswith('#'):
return out[1:-1]
else:
return out
# create the list for the output
out = []
nasals = "ãũẽĩõ"
nasal_char = "\u0303"
semi_diacritics = kw['semi_diacritics'] #"ʃhsʑɕʂʐñ"
nogos = '_'
# set basic characteristics
vowel = False # no vowel
tone = False # no tone
merge = False # no merge command
start = True # start of unit
nasal = False # start of nasal vowel environment
for char in istring:
# check for nasal stack and vowel environment
if nasal:
if char not in kw['vowels'] and char not in kw['diacritics'] :
out += [rcParams['nasal_placeholder']]
nasal = False
# check for breaks first, since they force us to start anew
if char in kw['breaks']:
start = True
vowel = False
tone = False
merge = False
# check for combiners next
elif char in kw['combiners']:
out[-1] += char
merge = True
# check for stress
elif char in kw['stress']:
out += [char]
# XXX be careful about the removement of the start-flag here, but it
# XXX seems to make sense so far!
merge = True
tone = False
vowel = False
start = False
# check for merge command
elif merge:
out[-1] += char
if char in kw['vowels']:
vowel = True
merge = False
# check for nasals
elif kw['expand_nasals'] and char == nasal_char and vowel:
out[-1] += char
start = False
nasal = True
# check for weak diacritics
elif char in semi_diacritics and not start and not vowel and not tone and out[-1] not in nogos:
out[-1] += char
# check for diacritics
elif char in kw['diacritics']:
if not start:
out[-1] += char
else:
out += [char]
start = False
merge = True
# check for vowels
elif char in kw['vowels']:
if vowel and kw['merge_vowels']:
out[-1] += char
else:
out += [char]
vowel = True
start = False
tone = False
# check for tones
elif char in kw['tones']:
vowel = False
if tone:
out[-1] += char
else:
out += [char]
tone = True
start = False
# consonants
else:
vowel = False
tone = False
out += [char]
start = False
tone = False
if nasal:
out += [rcParams['nasal_placeholder']]
if kw['merge_identical_symbols']:
new_out = [out[0]]
for i in range(len(out) -1):
outA = out[i]
outB = out[i+1]
if outA == outB:
new_out[-1] += outB
else:
new_out += [outB]
return new_out
return out
def secondary_structures(tokens):
"""
Function handles the tokenization of strings into secondary structures.
"""
segment = rcParams['morpheme_separator']
pstring = lingpyd.prosodic_string(tokens)
# check for more than one vowel in the set
vlen = pstring.count('X')+pstring.count('Z')+pstring.count('Y')
if vlen == 1: return tokens
elif vlen == 2 and rcParams['nasal_placeholder'] in tokens:
return tokens
out = []
tmp_tokens = list(zip(tokens,
lingpyd.prosodic_string(tokens)
))
new_syllable = True
len_so_far = 0
while tmp_tokens:
token,prochar = tmp_tokens.pop(0)
# check for tonal pro-chars
if prochar == 'T' and len(tmp_tokens) != 0:
out += [token, segment]
new_syllable = True
len_so_far = 0
elif prochar == '_' and tmp_tokens and not new_syllable:
out += [segment]
new_syllable = True
len_so_far = 0
elif prochar == '_' and len(out) > 1 and out[-1] == segment:
new_syllable = True
len_so_far = 0
# check for markers if no tone is given
elif prochar == 'B' and not new_syllable and tmp_tokens:
if out[-1] == 'A':
out += [token]
len_so_far += 1
else:
if len_so_far > 1:
out += [segment,token]
len_so_far = 1
else:
out += [token]
len_so_far += 1
# if nothing else is given, just append the string
else:
out += [token]
new_syllable = False
len_so_far += 1
return out
class Wordlist(lingpyd.basic.wordlist.Wordlist):
def __init__(self, infile, **keywords):
if type(infile) == dict:
lingpyd.basic.wordlist.Wordlist.__init__(self, infile, **keywords)
elif infile.endswith('.triples'):
D = lingpyd.basic.ops.triple2tsv(infile, output='dict', **keywords)
lingpyd.basic.wordlist.Wordlist.__init__(self, D)
else:
lingpyd.basic.wordlist.Wordlist.__init__(self,infile, **keywords)
def tokenize(self, override=True, preprocessing=False):
if not preprocessing:
preprocessing = lambda x: x
self.add_entries('tokens', 'ipa', lambda x:
ipa2tokens(preprocessing(x)),override=override)
self.add_entries('prostring','tokens', lambda x: lingpyd.prosodic_string(x,
_output='CcV'), override)
self.add_entries('tokens', 'tokens', lambda x: secondary_structures(x),
override = override)
def update(self, dbase, table, ignore=False):
"""
Upload triple-data to sqlite3-db.
"""
if not ignore: ignore=[]
# get the triples
triples = lingpyd.basic.ops.tsv2triple(self,False)
# connect to tatabase
db = sqlite3.connect(dbase)
cursor = db.cursor();
try:
cursor.execute('drop table '+table+';')
except sqlite3.OperationalError:
pass
cursor.execute('create table '+table+' (ID int, COL text, VAL text);')
cursor.execute('vacuum')
for a,b,c in triples:
if b.lower() not in ignore:
if type(c) == list:
c = ' '.join([str(x) for x in c])
else:
c = str(c)
cursor.execute('insert into '+table+' values (?, ?, ?);', (a, b, c))
db.commit()
|
lingpy/plugins
|
burmish/basics.py
|
Python
|
gpl-2.0
| 11,825 | 0.00882 |
""" Module to correct pulsar and FRB DMs for the MW ISM """
from ne2001 import ne_io, density #ne2001 ism model
import pygedm #ymw ism model
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord, Galactic
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
ne = density.ElectronDensity()
def find_delta_dm(transient_type,transient_data,ism_model,b_val,mc_deg=5,save_df=True):
"""
Find pulsar/FRB DMs corrected for by the MW ISM DM and remove observations in complex DM regions.
Returns array of DMs
FRB data is available as a csv in the FRBs/FRB/frb/data/FRBs repo (FRB catalogue [Petroff et al. 2017])
Pulsar data is avaiable as a csv in the FRBs/pulsars/pulsars/data/atnf_cat repo (v1.61 ATNF pulsar catalogue [Manchester et al. 2005])
Arguments:
transient_type (str):
Accepts 'frb' or 'pulsar'.
transient_data (str):
Path to data (in .csv format).
ism_model (str):
Model used to calculated the MW halo DM.
Accepts 'ymw16' [Yao et al. 2017] or 'ne2001' [Cordes & Lazio 2003].
b_val (int):
Galactic latitude considered (b>b_val, b<-b_val).
mc_deg (int):
Number of degrees from Magellanic clouds within which transients are removed.
save_df (str, optional):
Save transient DMs and coords to csv.
Outputs:
"""
# Sort data and get coords
if transient_type=='frb':
transcat_df = pd.read_csv(transient_data, skiprows=1, usecols= [0,5,6,7], names=['Name','l','b','dm'])
transcat_df['dm'] = transcat_df['dm'].str.split('&').str[0].astype(float).values
coords = SkyCoord(l=transcat_df['l'], b=transcat_df['b'], unit=(u.degree),frame=Galactic)
elif transient_type=='pulsar':
transcat_df = pd.read_csv(transient_data, skiprows=2, usecols = [1,2,3,9,10], names=['Name','Pref','dm','RAJD','DECJD'])
transcat_df = transcat_df[~transcat_df['dm'].str.contains('*', regex=False)].reset_index(drop=True)
transcat_df['dm'] = transcat_df['dm'].astype(float)
c_icrs = SkyCoord(ra=transcat_df['RAJD'], dec=transcat_df['DECJD'], unit=(u.degree), frame='icrs')
transcat_df['l'] = pd.DataFrame(c_icrs.galactic.l.value)
transcat_df['b'] = pd.DataFrame(c_icrs.galactic.b.value)
coords = SkyCoord(l=transcat_df['l'], b=transcat_df['b'], unit=(u.degree),frame=Galactic)
# Find transients in line of sight of MCs
logging.info('Removing transients near Magellanic clouds...')
# LMC
lmc_distance = 50*u.kpc
lmc_coord = SkyCoord('J052334.6-694522',unit=(u.hourangle, u.deg),distance=lmc_distance)
close_to_lmc = lmc_coord.separation(coords) < mc_deg*u.deg
lmc_trans = list(transcat_df[close_to_lmc]['Name'])
# SMC
smc_distance = 61*u.kpc
smc_coord = SkyCoord('J005238.0-724801',unit=(u.hourangle, u.deg),distance=smc_distance)
close_to_smc = smc_coord.separation(coords) < mc_deg*u.deg
smc_trans = list(transcat_df[close_to_smc]['Name'])
transcat_df = transcat_df[~transcat_df['Name'].isin(lmc_trans)].reset_index(drop=True)
transcat_df = transcat_df[~transcat_df['Name'].isin(smc_trans)].reset_index(drop=True)
if transient_type=='pulsar':
transcat_df = transcat_df[~transcat_df['Pref'].str.contains('mfl+06', regex=False)].reset_index(drop=True)
elif transient_type=='frb':
pass
# Remove transients with low Galactic lattitudes
logging.info('Removing transients with low Galactic lattitudes...')
transcat_df = pd.concat([transcat_df[transcat_df.b > b_val], transcat_df[transcat_df.b < -b_val]], ignore_index=True)
# ISM model
logging.info('Correcting transient DMs for ISM...')
trans_ism = []
if ism_model=='ymw16':
for i in range(len(transcat_df['dm'])):
trans_ism_ = pygedm.dist_to_dm(transcat_df['l'].iloc[i], transcat_df['b'].iloc[i], 100000)[0].value
trans_ism = np.append(trans_ism,trans_ism_)
elif ism_model=='ne2001':
for i in range(len(transcat_df['dm'])):
trans_ism_ = ne.DM(transcat_df['l'].iloc[i], transcat_df['b'].iloc[i], 100.).value
trans_ism = np.append(trans_ism,trans_ism_)
transcat_df['trans_ism'] = pd.DataFrame(trans_ism)
transcat_df['deltaDM'] = pd.DataFrame(transcat_df['dm']-transcat_df['trans_ism'])
if save_df==True:
transcat_df.to_csv('transient_data/'+transient_type+'cat_df_'+ism_model+'_'+str(int(b_val))+'.csv')
logging.info('Transient data saved to csv.')
else:
pass
return np.array(transcat_df['deltaDM'])
|
FRBs/DM
|
frb/dm_kde/sort_transient_data.py
|
Python
|
bsd-3-clause
| 4,742 | 0.015183 |
#!/usr/bin/env python
"""Conditional import for Chipsec. Only Linux is supported at this stage."""
|
google/grr
|
grr/client/grr_response_client/components/chipsec_support/actions/__init__.py
|
Python
|
apache-2.0
| 99 | 0 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common client library functions and classes used by all products."""
__author__ = 'Joseph DiLallo'
import os
import sys
import warnings
import httplib2
import socks
import suds
import yaml
import googleads.errors
import googleads.oauth2
VERSION = '3.0.1'
_COMMON_LIB_SIG = 'googleads/%s' % VERSION
_PROXY_YAML_KEY = 'proxy_info'
_PYTHON_VERSION = 'Python/%d.%d' % (sys.version_info[0], sys.version_info[1])
# The keys in the authentication dictionary that are used to construct OAuth 2.0
# credentials.
_OAUTH_2_AUTH_KEYS = ('client_id', 'client_secret', 'refresh_token')
# The keys in the proxy dictionary that are used to construct a ProxyInfo
# instance.
_PROXY_KEYS = ('host', 'port')
def GenerateLibSig(short_name):
"""Generates a library signature suitable for a user agent field.
Args:
short_name: The short, product-specific string name for the library.
Returns:
A library signature string to append to user-supplied user-agent value.
"""
return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)
def LoadFromStorage(path, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth 2.0 credentials. It may also optionally provide
proxy_info in order to configure a proxy.
Args:
path: A path string to the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth 2.0 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth 2.0 key was missing.
"""
if not os.path.isabs(path):
path = os.path.expanduser(path)
try:
with open(path, 'r') as handle:
data = yaml.safe_load(handle.read())
product_data = data.get(product_yaml_key) or {}
proxy_data = data.get(_PROXY_YAML_KEY) or {}
except IOError:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not be opened.' % path)
original_keys = list(product_data.keys())
original_proxy_keys = list(proxy_data.keys())
client_kwargs = {}
try:
for key in required_client_values:
client_kwargs[key] = product_data[key]
del product_data[key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required values. Required '
'values are: %s, actual values are %s'
% (path, required_client_values, original_keys))
try:
proxy_info = (httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP, proxy_data['host'],
proxy_data['port'])
if proxy_data else None)
client_kwargs['https_proxy'] = ('%s:%s' % (proxy_info.proxy_host,
proxy_info.proxy_port)
if proxy_info else None)
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required proxy values.'
'Required values are: %s, actual values are %s'
% (path, _PROXY_KEYS, original_proxy_keys))
ca_certs = proxy_data.get('ca_certs', None)
disable_ssl_certificate_validation = proxy_data.get(
'disable_ssl_certificate_validation', True)
try:
client_kwargs['oauth2_client'] = (
googleads.oauth2.GoogleRefreshTokenClient(
product_data['client_id'], product_data['client_secret'],
product_data['refresh_token'], proxy_info,
disable_ssl_certificate_validation, ca_certs))
for auth_key in _OAUTH_2_AUTH_KEYS:
del product_data[auth_key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file, %s, is missing some of the required OAuth 2.0 '
'values. Required values are: %s, actual values are %s'
% (path, _OAUTH_2_AUTH_KEYS, original_keys))
for value in optional_product_values:
if value in product_data:
client_kwargs[value] = product_data[value]
del product_data[value]
if product_data:
warnings.warn(
'Your yaml file, %s, contains the following unrecognized '
'keys: %s. They were ignored.' % (path, product_data), stacklevel=3)
return client_kwargs
def _PackForSuds(obj, factory):
"""Packs SOAP input into the format we want for suds.
The main goal here is to pack dictionaries with an 'xsi_type' key into
objects. This allows dictionary syntax to be used even with complex types
extending other complex types. The contents of dictionaries and lists/tuples
are recursively packed. Mutable types are copied - we don't mutate the input.
Args:
obj: A parameter for a SOAP request which will be packed. If this is
a dictionary or list, the contents will recursively be packed. If this
is not a dictionary or list, the contents will be recursively searched
for instances of unpacked dictionaries or lists.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
Returns:
If the given obj was a dictionary that contained the 'xsi_type' key, this
will be an instance of a class generated from the WSDL. Otherwise, this will
be the same data type as the input obj was.
"""
if obj in ({}, None):
# Force suds to serialize empty objects. There are legitimate use cases for
# this, for example passing in an empty SearchCriteria object to a DFA
# search method in order to select everything.
return suds.null()
elif isinstance(obj, dict):
if 'xsi_type' in obj:
try:
new_obj = factory.create(obj['xsi_type'])
except suds.TypeNotFound:
new_obj = factory.create(':'.join(['ns0', obj['xsi_type']]))
# Suds sends an empty XML element for enum types which are not set. None
# of Google's Ads APIs will accept this. Initializing all of the fields in
# a suds object to None will ensure that they don't get serialized at all
# unless the user sets a value. User values explicitly set to None will be
# packed into a suds.null() object.
for param, _ in new_obj:
# Another problem is that the suds.mx.appender.ObjectAppender won't
# serialize object types with no fields set, but both AdWords and DFP
# rely on sending objects with just the xsi:type set. The below "if"
# statement is an ugly hack that gets this to work in all(?) situations
# by taking advantage of the fact that these classes generally all have
# a type field. The only other option is to monkey patch ObjectAppender.
if param.endswith('.Type'):
setattr(new_obj, param, obj['xsi_type'])
else:
setattr(new_obj, param, None)
for key in obj:
if key == 'xsi_type': continue
setattr(new_obj, key, _PackForSuds(obj[key], factory))
else:
new_obj = {}
for key in obj:
new_obj[key] = _PackForSuds(obj[key], factory)
return new_obj
elif isinstance(obj, (list, tuple)):
return [_PackForSuds(item, factory) for item in obj]
else:
_RecurseOverObject(obj, factory)
return obj
def _RecurseOverObject(obj, factory, parent=None):
"""Recurses over a nested structure to look for changes in Suds objects.
Args:
obj: A parameter for a SOAP request field which is to be inspected and
will be packed for Suds if an xsi_type is specified, otherwise will be
left unaltered.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
parent: The parent object that contains the obj parameter to be inspected.
"""
if _IsSudsIterable(obj):
# Since in-place modification of the Suds object is taking place, the
# iterator should be done over a frozen copy of the unpacked fields.
copy_of_obj = tuple(obj)
for item in copy_of_obj:
if _IsSudsIterable(item):
if 'xsi_type' in item:
if isinstance(obj, tuple):
parent[obj[0]] = _PackForSuds(obj[1], factory)
else:
obj.remove(item)
obj.append(_PackForSuds(item, factory))
_RecurseOverObject(item, factory, obj)
def _IsSudsIterable(obj):
"""A short helper method to determine if a field is iterable for Suds."""
return (obj and not isinstance(obj, basestring) and hasattr(obj, '__iter__'))
class SudsServiceProxy(object):
"""Wraps a suds service object, allowing custom logic to be injected.
This class is responsible for refreshing the HTTP and SOAP headers, so changes
to the client object will be reflected in future SOAP calls, and for
transforming SOAP call input parameters, allowing dictionary syntax to be used
with all SOAP complex types.
Attributes:
suds_client: The suds.client.Client this service belongs to. If you are
familiar with suds and want to use autogenerated classes, you can access
the client and its factory,
"""
def __init__(self, suds_client, header_handler):
"""Initializes a suds service proxy.
Args:
suds_client: The suds.client.Client whose service will be wrapped. Note
that this is the client itself, not the client's embedded service
object.
header_handler: A HeaderHandler responsible for setting the SOAP and HTTP
headers on the service client.
"""
self.suds_client = suds_client
self._header_handler = header_handler
self._method_proxies = {}
def __getattr__(self, attr):
if attr in self.suds_client.wsdl.services[0].ports[0].methods:
if attr not in self._method_proxies:
self._method_proxies[attr] = self._CreateMethod(attr)
return self._method_proxies[attr]
else:
return getattr(self.suds_client.service, attr)
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
soap_service_method = getattr(self.suds_client.service, method_name)
def MakeSoapRequest(*args):
"""Perform a SOAP call."""
self._header_handler.SetHeaders(self.suds_client)
return soap_service_method(*[_PackForSuds(arg, self.suds_client.factory)
for arg in args])
return MakeSoapRequest
class HeaderHandler(object):
"""A generic header handler interface that must be subclassed by each API."""
def SetHeaders(self, client):
"""Sets the SOAP and HTTP headers on the given suds client."""
raise NotImplementedError('You must subclass HeaderHandler.')
|
dietrichc/streamline-ppc-reports
|
googleads/common.py
|
Python
|
apache-2.0
| 12,102 | 0.006363 |
import os
import sys
import string
import random
import math
#################################################
# State
balance = 0
def deposit(amount):
global balance
balance += amount
return balance
def withdraw(amount):
global balance
balance -= amount
return balance
#################################################
# Dict like
def make_account():
return {'balance': 0}
def deposit(account, amount):
account['balance'] += amount
return account['balance']
def withdraw(account, amount):
account['balance'] -= amount
return account['balance']
# >>> a = make_account()
# >>> b = make_account()
# >>> deposit(a, 100)
# 100
# >>> deposit(b, 50)
# 50
# >>> withdraw(b, 10)
# 40
# >>> withdraw(a, 10)
# 90
#################################################
# Class
class BankAccount:
def __init__(self, balance=0):
self.balance = balance
def withdraw(self, amount):
self.balance -= amount
return self.balance
def deposit(self, amount):
self.balance += amount
return self.balance
# >>> a = BankAccount()
# >>> b = BankAccount()
# >>> a.deposit(100)
# 100
# >>> b.deposit(50)
# 50
# >>> b.withdraw(10)
# 40
# >>> a.withdraw(10)
# 90
#################################################
# Inheritance
class MinimumBalanceAccount(BankAccount):
def __init__(self, minimum_balance):
BankAccount.__init__(self)
self.minimum_balance = minimum_balance
def withdraw(self, amount):
if self.balance - amount < self.minimum_balance:
print('Sorry, minimum balance must be maintained.')
else:
BankAccount.withdraw(self, amount)
# >>> a = MinimumBalanceAccount(0)
# >>> a.deposit(100)
# 100
# >>> b.withdraw(101)
# 'Sorry, minimum balance must be maintained.'
########################################
# Mangling, Exceptions
def generate_id(n=16):
alphabet = string.ascii_letters + string.digits
return ''.join(random.choice(alphabet) for _ in range(n))
class WithdrawError(Exception):
"""Not enough money"""
def __init__(self, amount):
super().__init__()
self.amount = amount
class AdvancedBankAccount:
MAX_BALANCE = 2 ** 64
def __init__(self):
self._balance = 0
self.__id = generate_id()
def withdraw(self, amount):
if not isinstance(amount, int):
raise ValueError
if self._balance < amount:
raise WithdrawError(amount)
self._balance -= amount
return self._balance
def deposit(self, amount):
self._balance += amount
return self._balance
def get_max_balance():
return AdvancedBankAccount.MAX_BALANCE
if __name__ == '__main__':
a = AdvancedBankAccount()
b = a
c = AdvancedBankAccount()
a.deposit(10)
# AdvancedBankAccount.deposit(a, 10) # the same
print('UNACCEPTABLE! b balance:', b._balance)
# print(b.__id) # error, name mangling
a.get_id = lambda self: self.__id
# print(a.get_id()) # TypeError
# print(a.get_id(a)) # AttributeError
################################################
# UNACCEPTABLE!
print("UNACCEPTABLE! b id:", b._AdvancedBankAccount__id) # name unmangling
# static
AdvancedBankAccount.MAX_BALANCE = 2 ** 32
print('max balance:', AdvancedBankAccount.get_max_balance())
a.MAX_BALANCE = 2 ** 64
print('a max: {}, c max: {}'.format(a.MAX_BALANCE,
c.MAX_BALANCE))
################################################
# Exceptions
# in module import
try:
a.withdraw("100")
except:
pass
# UNACCEPTIBLE!
try:
a.withdraw(100)
except WithdrawError as e:
pass
try:
a.withdraw(100)
except (ValueError, WithdrawError) as e:
print('exception raised')
else:
print('no exception')
finally:
print('Finally')
def tricky():
try:
print('Tricky called')
return 1
finally:
print('Tricky finally called')
return 42
return 0
print(tricky())
# how about with statement?
# module is object -> import
class Shape:
def area(self):
raise NotImplementedError
class Circle(Shape):
def __init__(self, radius):
self.radius = radius
def area(self):
return math.pi * self.radius ** 2
class Square(Shape):
def __init__(self, side):
self.side = side
def area(self):
return self.side ** 2
if __name__ == "__main__":
a = [Square(10), Circle(2)]
s = sum(s.area() for s in a)
print(s)
|
SPbAU-ProgrammingParadigms/materials
|
python_2/common_objects.py
|
Python
|
unlicense
| 4,690 | 0.001066 |
import os
import re
import setuptools
def get_version(package: str) -> str:
"""Return package version as listed in __version__ variable at __init__.py"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search(r"__version__\s*=\s*['\"]([^'\"]+)['\"]", init_py).group(1)
with open("README.rst", "r", encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='imap-tools',
version=get_version('imap_tools'),
packages=setuptools.find_packages(exclude=['tests']),
url='https://github.com/ikvk/imap_tools',
license='Apache-2.0',
long_description=long_description,
long_description_content_type="text/x-rst",
author='Vladimir Kaukin',
author_email='KaukinVK@ya.ru',
description='Work with email by IMAP',
keywords=['imap', 'imap-client', 'python3', 'python', 'email'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
# install_requires=['typing>=3.6.2'],
)
|
ikvk/imap_tools
|
setup.py
|
Python
|
apache-2.0
| 1,092 | 0.001832 |
# Copyright (C) 2000,2005 Bruce Guenter <bruce@untroubled.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import functions
import context
import syntax
import lex
import parser
path = syntax.path
if __name__ == '__main__':
body = sys.stdin.read()
tree = parser.parse(body)
tree.showtree(0)
ctxt = context.Context(sys.stdout,
{'username':'nobody', 'domain':'testdomain.org'})
tree.execute(ctxt)
print ctxt.dict
|
bruceg/bglibs
|
python/template/__init__.py
|
Python
|
lgpl-2.1
| 1,139 | 0.005268 |
import time
import sys
import RPi.GPIO as GPIO
'''Stringhe binarie ON e OFF per le prese 1,2,3,4 mappate come a,b,c,d'''
a_on = ''
a_off = ''
b_on = ''
b_off = ''
c_on = ''
c_off = ''
d_on = '0011100011000111011010001'
d_off = '0011100011000111011010011'
'''intervalli brevi/lunghi nel segnale e tra le ripetizioni (in secondi)'''
intervallo_breve = 0.00030
intervallo_lungo = 0.00096
intervallo_tra_tentativi = 0.0113
NUMERO_DI_TENTATIVI = 15
'''PIN da utilizzare per inviare i dati verso il chip trasmettitore'''
PIN_DATA_DI_INVIO = 16
def transmit_code(code):
'''Utilizziamo lo standard BCM per specificare quale PIN utilizzare'''
GPIO.setmode(GPIO.BCM)
'''Impostiamo il PIN indicato nello standard BCM come PIN di invio dati'''
GPIO.setup(PIN_DATA_DI_INVIO, GPIO.OUT)
'''Ripetiamo la trasmissione per il numero di tentativi indicati'''
for t in range(NUMERO_DI_TENTATIVI):
for i in code:
if i == '1':
'''Bit = 1, accensione breve e poi spegnimento lungo del PIN'''
GPIO.output(PIN_DATA_DI_INVIO, 1)
time.sleep(intervallo_breve)
GPIO.output(PIN_DATA_DI_INVIO, 0)
time.sleep(intervallo_lungo)
elif i == '0':
'''Bit = 0, accensione lunga e poi spegnimento breve del PIN'''
GPIO.output(PIN_DATA_DI_INVIO, 1)
time.sleep(intervallo_lungo)
GPIO.output(PIN_DATA_DI_INVIO, 0)
time.sleep(intervallo_breve)
else:
continue
'''Spegnimento del PIN e attesa fino al prossimo intervallo'''
GPIO.output(PIN_DATA_DI_INVIO, 0)
time.sleep(intervallo_tra_tentativi)
'''Invio terminato e chiusura del GPIO'''
GPIO.cleanup()
if __name__ == '__main__':
'''Cattura del segnale da inviare: a_on, a_off, b_on - etc...'''
for argument in sys.argv[1:]:
exec('transmit_code(' + str(argument) + ')')
|
geekonerd/smartplugs
|
data/transmit.py
|
Python
|
gpl-3.0
| 2,037 | 0.000491 |
# Copyright (C) 2011 Sam Rushing
# Copyright (C) 2012-2014 The python-bitcoinlib developers
# Copyright (C) 2015 The python-altcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import ctypes
import ctypes.util
from bitcoin.core.key import CECKey, _ssl
# CECKey with added support for key generation
class CAltcoinECKey(CECKey):
def __init__(self):
CECKey.__init__(self)
def __del__(self):
CECKey.__del__(self)
def get_secret_bytes(self):
global _ssl
secret = _ssl.EC_KEY_get0_private_key(self.k)
mb = ctypes.create_string_buffer(32)
size = _ssl.BN_bn2bin(secret, mb)
if size == 32:
return mb.raw
else:
# Move the data into a zero-padded buffer of 32 bytes
padding = 32 - size
new_buffer = ctypes.create_string_buffer(32)
for idx in range(0, padding):
new_buffer[idx] = "\x00"
for idx in range(padding, 32):
new_buffer[idx] = mb[idx - padding]
return new_buffer.raw
def generate(self):
global _ssl
_ssl.EC_KEY_generate_key(self.k)
return self.k
__all__ = (
'CAltcoinECKey',
)
|
coinwarp/python-altcoinlib
|
altcoin/core/key.py
|
Python
|
lgpl-3.0
| 1,510 | 0.000662 |
"""OO interfaces to encodings for ND arrays which caching."""
import numpy as np
import abc
from ..util import ABC
from . import runlength as rl
from .. import caching
try:
from scipy import sparse as sp
except BaseException as E:
from ..exceptions import ExceptionModule
sp = ExceptionModule(E)
def _empty_stripped(shape):
num_dims = len(shape)
encoding = DenseEncoding(
np.empty(shape=(0,) * num_dims, dtype=bool))
padding = np.zeros(shape=(num_dims, 2), dtype=int)
padding[:, 1] = shape
return encoding, padding
class Encoding(ABC):
"""
Base class for objects that implement a specific subset of of ndarray ops.
This presents a unified interface for various different ways of encoding
conceptually dense arrays and to interoperate between them.
Example implementations are ND sparse arrays, run length encoded arrays
and dense encodings (wrappers around np.ndarrays).
"""
def __init__(self, data):
self._data = data
self._cache = caching.Cache(id_function=data.crc)
@abc.abstractproperty
def dtype(self):
pass
@abc.abstractproperty
def shape(self):
pass
@abc.abstractproperty
def sum(self):
pass
@abc.abstractproperty
def size(self):
pass
@abc.abstractproperty
def sparse_indices(self):
pass
@abc.abstractproperty
def sparse_values(self):
pass
@abc.abstractproperty
def dense(self):
pass
@abc.abstractmethod
def gather_nd(self, indices):
pass
@abc.abstractmethod
def mask(self, mask):
pass
@abc.abstractmethod
def get_value(self, index):
pass
@abc.abstractmethod
def copy(self):
pass
@property
def is_empty(self):
return self.sparse_indices[self.sparse_values != 0].size == 0
@caching.cache_decorator
def stripped(self):
"""
Get encoding with all zeros stripped from the start and end
of each axis.
Returns
------------
encoding: ?
padding : (n, 2) int
Padding at the start and end that was stripped
"""
if self.is_empty:
return _empty_stripped(self.shape)
dense = self.dense
shape = dense.shape
ndims = len(shape)
padding = []
slices = []
for dim, size in enumerate(shape):
axis = tuple(range(dim)) + tuple(range(dim + 1, ndims))
filled = np.any(dense, axis=axis)
indices, = np.nonzero(filled)
lower = indices.min()
upper = indices.max() + 1
padding.append([lower, size - upper])
slices.append(slice(lower, upper))
return DenseEncoding(dense[tuple(slices)]), np.array(padding, int)
def _flip(self, axes):
return FlippedEncoding(self, axes)
def md5(self):
return self._data.md5()
def crc(self):
return self._data.crc()
@property
def ndims(self):
return len(self.shape)
def reshape(self, shape):
return self.flat if len(shape) == 1 else ShapedEncoding(self, shape)
@property
def flat(self):
return FlattenedEncoding(self)
def flip(self, axis=0):
return _flipped(self, axis)
@property
def sparse_components(self):
return self.sparse_indices, self.sparse_values
@property
def data(self):
return self._data
def run_length_data(self, dtype=np.int64):
if self.ndims != 1:
raise ValueError(
'`run_length_data` only valid for flat encodings')
return rl.dense_to_rle(self.dense, dtype=dtype)
def binary_run_length_data(self, dtype=np.int64):
if self.ndims != 1:
raise ValueError(
'`run_length_data` only valid for flat encodings')
return rl.dense_to_brle(self.dense, dtype=dtype)
def transpose(self, perm):
return _transposed(self, perm)
def _transpose(self, perm):
return TransposedEncoding(self, perm)
@property
def mutable(self):
return self._data.mutable
@mutable.setter
def mutable(self, value):
self._data.mutable = value
class DenseEncoding(Encoding):
"""Simple `Encoding` implementation based on a numpy ndarray."""
def __init__(self, data):
if not isinstance(data, caching.TrackedArray):
if not isinstance(data, np.ndarray):
raise ValueError('DenseEncoding data must be a numpy array')
data = caching.tracked_array(data)
super(DenseEncoding, self).__init__(data=data)
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@caching.cache_decorator
def sum(self):
return self._data.sum()
@caching.cache_decorator
def is_empty(self):
return not np.any(self._data)
@property
def size(self):
return self._data.size
@property
def sparse_components(self):
indices = self.sparse_indices
values = self.gather(indices)
return indices, values
@caching.cache_decorator
def sparse_indices(self):
return np.column_stack(np.where(self._data))
@caching.cache_decorator
def sparse_values(self):
return self.sparse_components[1]
def _flip(self, axes):
dense = self.dense
for a in axes:
dense = np.flip(dense, a)
return DenseEncoding(dense)
@property
def dense(self):
return self._data
def gather(self, indices):
return self._data[indices]
def gather_nd(self, indices):
return self._data[tuple(indices.T)]
def mask(self, mask):
return self._data[mask if isinstance(mask, np.ndarray) else mask.dense]
def get_value(self, index):
return self._data[tuple(index)]
def reshape(self, shape):
return DenseEncoding(self._data.reshape(shape))
def _transpose(self, perm):
return DenseEncoding(self._data.transpose(perm))
@property
def flat(self):
return DenseEncoding(self._data.reshape((-1,)))
def copy(self):
return DenseEncoding(self._data.copy())
class SparseEncoding(Encoding):
"""
`Encoding` implementation based on an ND sparse implementation.
Since the scipy.sparse implementations are for 2D arrays only, this
implementation uses a single-column CSC matrix with index
raveling/unraveling.
"""
def __init__(self, indices, values, shape=None):
"""
Parameters
------------
indices: (m, n)-sized int array of indices
values: (m, n)-sized dtype array of values at the specified indices
shape: (n,) iterable of integers. If None, the maximum value of indices
+ 1 is used.
"""
data = caching.DataStore()
super(SparseEncoding, self).__init__(data)
data['indices'] = indices
data['values'] = values
indices = data['indices']
if len(indices.shape) != 2:
raise ValueError(
'indices must be 2D, got shaped %s' % str(indices.shape))
if data['values'].shape != (indices.shape[0],):
raise ValueError(
'values and indices shapes inconsistent: %s and %s'
% (data['values'], data['indices']))
if shape is None:
self._shape = tuple(data['indices'].max(axis=0) + 1)
else:
self._shape = tuple(shape)
if not np.all(indices < self._shape):
raise ValueError('all indices must be less than shape')
if not np.all(indices >= 0):
raise ValueError('all indices must be non-negative')
@staticmethod
def from_dense(dense_data):
sparse_indices = np.where(dense_data)
values = dense_data[sparse_indices]
return SparseEncoding(
np.stack(sparse_indices, axis=-1), values, shape=dense_data.shape)
def copy(self):
return SparseEncoding(
indices=self.sparse_indices.copy(),
values=self.sparse_values.copy(),
shape=self.shape)
@property
def sparse_indices(self):
return self._data['indices']
@property
def sparse_values(self):
return self._data['values']
@property
def dtype(self):
return self.sparse_values.dtype
@caching.cache_decorator
def sum(self):
return self.sparse_values.sum()
@property
def ndims(self):
return self.sparse_indices.shape[-1]
@property
def shape(self):
return self._shape
@property
def size(self):
return np.prod(self.shape)
@property
def sparse_components(self):
return self.sparse_indices, self.sparse_values
@caching.cache_decorator
def dense(self):
sparse = self._csc
# sparse.todense gives an `np.matrix` which cannot be reshaped
dense = np.empty(shape=sparse.shape, dtype=sparse.dtype)
sparse.todense(out=dense)
return np.reshape(dense, self.shape)
@caching.cache_decorator
def _csc(self):
values = self.sparse_values
indices = self._flat_indices(self.sparse_indices)
indptr = [0, len(indices)]
return sp.csc_matrix((values, indices, indptr), shape=(self.size, 1))
def _flat_indices(self, indices):
assert(indices.shape[1] == 3 and len(indices.shape) == 2)
return np.ravel_multi_index(indices.T, self.shape)
def _shaped_indices(self, flat_indices):
return np.column_stack(np.unravel_index(flat_indices, self.shape))
def gather_nd(self, indices):
mat = self._csc[self._flat_indices(indices)].todense()
# mat is a np matrix, which stays rank 2 after squeeze
# np.asarray changes this to a standard rank 2 array.
return np.asarray(mat).squeeze(axis=-1)
def mask(self, mask):
i, _ = np.where(self._csc[mask.reshape((-1,))])
return self._shaped_indices(i)
def get_value(self, index):
return self._gather_nd(np.expand_dims(index, axis=0))[0]
@caching.cache_decorator
def stripped(self):
"""
Get encoding with all zeros stripped from the start/end of each axis.
Returns:
encoding: SparseEncoding with same values but indices shifted down
by padding[:, 0]
padding: (n, 2) array of ints denoting padding at the start/end
that was stripped
"""
if self.is_empty:
return _empty_stripped(self.shape)
indices = self.sparse_indices
pad_left = np.min(indices, axis=0)
pad_right = np.max(indices, axis=0)
pad_right *= -1
pad_right += self.shape
padding = np.column_stack((pad_left, pad_right))
return SparseEncoding(indices - pad_left, self.sparse_values), padding
def SparseBinaryEncoding(indices, shape=None):
"""
Convenient factory constructor for SparseEncodings with values all ones.
Parameters
------------
indices: (m, n) sparse indices into conceptual rank-n array
shape: length n iterable or None. If None, maximum of indices along first
axis + 1 is used
Returns
------------
rank n bool `SparseEncoding` with True values at each index.
"""
return SparseEncoding(
indices, np.ones(shape=(indices.shape[0],), dtype=bool), shape)
class RunLengthEncoding(Encoding):
"""1D run length encoding.
See `trimesh.voxel.runlength` documentation for implementation details.
"""
def __init__(self, data, dtype=None):
"""
Parameters
------------
data: run length encoded data.
dtype: dtype of encoded data. Each second value of data is cast will be
cast to this dtype if provided.
"""
super(RunLengthEncoding, self).__init__(
data=caching.tracked_array(data))
if dtype is None:
dtype = self._data.dtype
if len(self._data.shape) != 1:
raise ValueError('data must be 1D numpy array')
self._dtype = dtype
@caching.cache_decorator
def is_empty(self):
return not np.any(
np.logical_and(self._data[::2], self._data[1::2]))
@property
def ndims(self):
return 1
@property
def shape(self):
return (self.size,)
@property
def dtype(self):
return self._dtype
def md5(self):
return self._data.md5()
def crc(self):
return self._data.crc()
@staticmethod
def from_dense(dense_data, dtype=np.int64, encoding_dtype=np.int64):
return RunLengthEncoding(
rl.dense_to_rle(dense_data, dtype=encoding_dtype), dtype=dtype)
@staticmethod
def from_rle(rle_data, dtype=None):
if dtype != rle_data.dtype:
rle_data = rl.rle_to_rle(rle_data, dtype=dtype)
return RunLengthEncoding(rle_data)
@staticmethod
def from_brle(brle_data, dtype=None):
return RunLengthEncoding(rl.brle_to_rle(brle_data, dtype=dtype))
@caching.cache_decorator
def stripped(self):
if self.is_empty:
return _empty_stripped(self.shape)
data, padding = rl.rle_strip(self._data)
if padding == (0, 0):
encoding = self
else:
encoding = RunLengthEncoding(data, dtype=self._dtype)
padding = np.expand_dims(padding, axis=0)
return encoding, padding
@caching.cache_decorator
def sum(self):
return (self._data[::2] * self._data[1::2]).sum()
@caching.cache_decorator
def size(self):
return rl.rle_length(self._data)
def _flip(self, axes):
if axes != (0,):
raise ValueError(
'encoding is 1D - cannot flip on axis %s' % str(axes))
return RunLengthEncoding(rl.rle_reverse(self._data))
@caching.cache_decorator
def sparse_components(self):
return rl.rle_to_sparse(self._data)
@caching.cache_decorator
def sparse_indices(self):
return self.sparse_components[0]
@caching.cache_decorator
def sparse_values(self):
return self.sparse_components[1]
@caching.cache_decorator
def dense(self):
return rl.rle_to_dense(self._data, dtype=self._dtype)
def gather(self, indices):
return rl.rle_gather_1d(self._data, indices, dtype=self._dtype)
def gather_nd(self, indices):
indices = np.squeeze(indices, axis=-1)
return self.gather(indices)
def sorted_gather(self, ordered_indices):
return np.array(
tuple(rl.sorted_rle_gather_1d(self._data, ordered_indices)),
dtype=self._dtype)
def mask(self, mask):
return np.array(
tuple(rl.rle_mask(self._data, mask)), dtype=self._dtype)
def get_value(self, index):
for value in self.sorted_gather((index,)):
return np.asanyarray(value, dtype=self._dtype)
def copy(self):
return RunLengthEncoding(self._data.copy(), dtype=self.dtype)
def run_length_data(self, dtype=np.int64):
return rl.rle_to_rle(self._data, dtype=dtype)
def binary_run_length_data(self, dtype=np.int64):
return rl.rle_to_brle(self._data, dtype=dtype)
class BinaryRunLengthEncoding(RunLengthEncoding):
"""1D binary run length encoding.
See `trimesh.voxel.runlength` documentation for implementation details.
"""
def __init__(self, data):
"""
Parameters
------------
data: binary run length encoded data.
"""
super(BinaryRunLengthEncoding, self).__init__(data=data, dtype=bool)
@caching.cache_decorator
def is_empty(self):
return not np.any(self._data[1::2])
@staticmethod
def from_dense(dense_data, encoding_dtype=np.int64):
return BinaryRunLengthEncoding(
rl.dense_to_brle(dense_data, dtype=encoding_dtype))
@staticmethod
def from_rle(rle_data, dtype=None):
return BinaryRunLengthEncoding(
rl.rle_to_brle(rle_data, dtype=dtype))
@staticmethod
def from_brle(brle_data, dtype=None):
if dtype != brle_data.dtype:
brle_data = rl.brle_to_brle(brle_data, dtype=dtype)
return BinaryRunLengthEncoding(brle_data)
@caching.cache_decorator
def stripped(self):
if self.is_empty:
return _empty_stripped(self.shape)
data, padding = rl.rle_strip(self._data)
if padding == (0, 0):
encoding = self
else:
encoding = BinaryRunLengthEncoding(data)
padding = np.expand_dims(padding, axis=0)
return encoding, padding
@caching.cache_decorator
def sum(self):
return self._data[1::2].sum()
@caching.cache_decorator
def size(self):
return rl.brle_length(self._data)
def _flip(self, axes):
if axes != (0,):
raise ValueError(
'encoding is 1D - cannot flip on axis %s' % str(axes))
return BinaryRunLengthEncoding(rl.brle_reverse(self._data))
@property
def sparse_components(self):
return self.sparse_indices, self.sparse_values
@caching.cache_decorator
def sparse_values(self):
return np.ones(shape=(self.sum,), dtype=bool)
@caching.cache_decorator
def sparse_indices(self):
return rl.brle_to_sparse(self._data)
@caching.cache_decorator
def dense(self):
return rl.brle_to_dense(self._data)
def gather(self, indices):
return rl.brle_gather_1d(self._data, indices)
def gather_nd(self, indices):
indices = np.squeeze(indices)
return self.gather(indices)
def sorted_gather(self, ordered_indices):
gen = rl.sorted_brle_gather_1d(self._data, ordered_indices)
return np.array(tuple(gen), dtype=bool)
def mask(self, mask):
gen = rl.brle_mask(self._data, mask)
return np.array(tuple(gen), dtype=bool)
def copy(self):
return BinaryRunLengthEncoding(self._data.copy())
def run_length_data(self, dtype=np.int64):
return rl.brle_to_rle(self._data, dtype=dtype)
def binary_run_length_data(self, dtype=np.int64):
return rl.brle_to_brle(self._data, dtype=dtype)
class LazyIndexMap(Encoding):
"""
Abstract class for implementing lazy index mapping operations.
Implementations include transpose, flatten/reshaping and flipping
Derived classes must implement:
* _to_base_indices(indices)
* _from_base_indices(base_indices)
* shape
* dense
* mask(mask)
"""
@abc.abstractmethod
def _to_base_indices(self, indices):
pass
@abc.abstractmethod
def _from_base_indices(self, base_indices):
pass
@property
def is_empty(self):
return self._data.is_empty
@property
def dtype(self):
return self._data.dtype
@property
def sum(self):
return self._data.sum
@property
def size(self):
return self._data.size
@property
def sparse_indices(self):
return self._from_base_indices(self._data.sparse_indices)
@property
def sparse_values(self):
return self._data.sparse_values
def gather_nd(self, indices):
return self._data.gather_nd(self._to_base_indices(indices))
def get_value(self, index):
return self._data[tuple(self._to_base_indices(index))]
class FlattenedEncoding(LazyIndexMap):
"""
Lazily flattened encoding.
Dense equivalent is np.reshape(data, (-1,)) (np.flatten creates a copy).
"""
def _to_base_indices(self, indices):
return np.column_stack(np.unravel_index(indices, self._data.shape))
def _from_base_indices(self, base_indices):
return np.expand_dims(
np.ravel_multi_index(base_indices.T, self._data.shape), axis=-1)
@property
def shape(self):
return self.size,
@property
def dense(self):
return self._data.dense.reshape((-1,))
def mask(self, mask):
return self._data.mask(mask.reshape(self._data.shape))
@property
def flat(self):
return self
def copy(self):
return FlattenedEncoding(self._data.copy())
class ShapedEncoding(LazyIndexMap):
"""
Lazily reshaped encoding.
Numpy equivalent is `np.reshape`
"""
def __init__(self, encoding, shape):
if isinstance(encoding, Encoding):
if encoding.ndims != 1:
encoding = encoding.flat
else:
raise ValueError('encoding must be an Encoding')
super(ShapedEncoding, self).__init__(data=encoding)
self._shape = tuple(shape)
nn = self._shape.count(-1)
size = np.prod(self._shape)
if nn == 1:
size = np.abs(size)
if self._data.size % size != 0:
raise ValueError(
'cannot reshape encoding of size %d into shape %s' %
(self._data.size, str(self._shape)))
rem = self._data.size // size
self._shape = tuple(rem if s == -1 else s for s in self._shape)
elif nn > 2:
raise ValueError('shape cannot have more than one -1 value')
elif np.prod(self._shape) != self._data.size:
raise ValueError(
'cannot reshape encoding of size %d into shape %s' %
(self._data.size, str(self._shape)))
def _from_base_indices(self, base_indices):
return np.column_stack(np.unravel_index(base_indices, self.shape))
def _to_base_indices(self, indices):
return np.expand_dims(
np.ravel_multi_index(indices.T, self.shape), axis=-1)
@property
def flat(self):
return self._data
@property
def shape(self):
return self._shape
@property
def dense(self):
return self._data.dense.reshape(self.shape)
def mask(self, mask):
return self._data.mask(mask.flat)
def copy(self):
return ShapedEncoding(encoding=self._data.copy(), shape=self.shape)
class TransposedEncoding(LazyIndexMap):
"""
Lazily transposed encoding
Dense equivalent is `np.transpose`
"""
def __init__(self, base_encoding, perm):
if not isinstance(base_encoding, Encoding):
raise ValueError(
'base_encoding must be an Encoding, got %s'
% str(base_encoding))
if len(base_encoding.shape) != len(perm):
raise ValueError(
'base_encoding has %d ndims - cannot transpose with perm %s'
% (base_encoding.ndims, str(perm)))
super(TransposedEncoding, self).__init__(base_encoding)
perm = np.array(perm, dtype=np.int64)
if not all(i in perm for i in range(base_encoding.ndims)):
raise ValueError('perm %s is not a valid permutation' % str(perm))
inv_perm = np.empty_like(perm)
inv_perm[perm] = np.arange(base_encoding.ndims)
self._perm = perm
self._inv_perm = inv_perm
def transpose(self, perm):
return _transposed(self._data, [self._perm[p] for p in perm])
def _transpose(self, perm):
raise RuntimeError('Should not be here')
@property
def perm(self):
return self._perm
@property
def shape(self):
shape = self._data.shape
return tuple(shape[p] for p in self._perm)
def _to_base_indices(self, indices):
return np.take(indices, self._perm, axis=-1)
def _from_base_indices(self, base_indices):
try:
return np.take(base_indices, self._inv_perm, axis=-1)
except TypeError:
# windows sometimes tries to use wrong dtypes
return np.take(base_indices.astype(np.int64),
self._inv_perm.astype(np.int64),
axis=-1)
@property
def dense(self):
return self._data.dense.transpose(self._perm)
def gather(self, indices):
return self._data.gather(self._base_indices(indices))
def mask(self, mask):
return self._data.mask(
mask.transpose(self._inv_perm)).transpose(self._perm)
def get_value(self, index):
return self._data[tuple(self._base_indices(index))]
@property
def data(self):
return self._data
def copy(self):
return TransposedEncoding(
base_encoding=self._data.copy(), perm=self._perm)
class FlippedEncoding(LazyIndexMap):
"""
Encoding with entries flipped along one or more axes.
Dense equivalent is `np.flip`
"""
def __init__(self, encoding, axes):
ndims = encoding.ndims
if isinstance(axes, np.ndarray) and axes.size == 1:
axes = axes.item(),
elif isinstance(axes, int):
axes = axes,
axes = tuple(a + ndims if a < 0 else a for a in axes)
self._axes = tuple(sorted(axes))
if len(set(self._axes)) != len(self._axes):
raise ValueError(
"Axes cannot contain duplicates, got %s" % str(self._axes))
super(FlippedEncoding, self).__init__(encoding)
if not all(0 <= a < self._data.ndims for a in axes):
raise ValueError(
'Invalid axes %s for %d-d encoding'
% (str(axes), self._data.ndims))
def _to_base_indices(self, indices):
indices = indices.copy()
shape = self.shape
for a in self._axes:
indices[:, a] *= -1
indices[:, a] += shape
return indices
def _from_base_indices(self, base_indices):
return self._to_base_indices(base_indices)
@property
def shape(self):
return self._data.shape
@property
def dense(self):
dense = self._data.dense
for a in self._axes:
dense = np.flip(dense, a)
return dense
def mask(self, mask):
if not isinstance(mask, Encoding):
mask = DenseEncoding(mask)
mask = mask.flip(self._axes)
return self._data.mask(mask).flip(self._axes)
def copy(self):
return FlippedEncoding(self._data.copy(), self._axes)
def flip(self, axis=0):
if isinstance(axis, np.ndarray):
if axis.size == 1:
axis = axis.item(),
else:
axis = tuple(axis)
elif isinstance(axis, int):
axes = axis,
else:
axes = tuple(axis)
return _flipped(self, self._axes + axes)
def _flip(self, axes):
raise RuntimeError('Should not be here')
def _flipped(encoding, axes):
if not hasattr(axes, '__iter__'):
axes = axes,
unique_ax = set()
ndims = encoding.ndims
axes = tuple(a + ndims if a < 0 else a for a in axes)
for a in axes:
if a in unique_ax:
unique_ax.remove(a)
else:
unique_ax.add(a)
if len(unique_ax) == 0:
return encoding
else:
return encoding._flip(tuple(sorted(unique_ax)))
def _transposed(encoding, perm):
ndims = encoding.ndims
perm = tuple(p + ndims if p < 0 else p for p in perm)
if np.all(np.arange(ndims) == perm):
return encoding
else:
return encoding._transpose(perm)
|
mikedh/trimesh
|
trimesh/voxel/encoding.py
|
Python
|
mit
| 27,436 | 0 |
import os
import pickle
import random
import re
from datetime import datetime
from data_processing import (InteractiveAnswer, _in_list, colorit, space_fill,
split_wrd)
BOARDER_LENGTH = 40
class Quest():
def __init__(self, q, sel=None, ta=None, args={}):
'''
Class representing a Question.
Parameters
----------
basic arguments:
q : question. necessary. list.
sel : selections. list.
ta : true answer. list.
extensable arguments:
args : dict with sets of {'name': 'value'}.
'''
self.q = q
self.sel = sel
self.ta = ta
self.args = args
def __str__(self):
'''Visualize the `Quest`.'''
return '{\n\tq: %s,\n\tsel: %s,\n\tta: %s,\n\targs: %s\n}' % \
(self.q, self.sel, self.ta, self.args)
def __eq__(self, value):
'''Evalue two `Quest`s as equal.'''
if type(value) != type(self): return False
for i in ['q', 'sel', 'ta', 'args']:
if self.__getattribute__(i) != value.__getattribute__(i):
return False
return True
def __hash__(self):
return (hash('\n'.join(self.q)) + hash('\n'.join(self.sel)) + \
hash('\n'.join(self.ta)) + hash('\n'.join(self.args))) % int(1e+16)
class QuestForm(list):
def __init__(self, *args, **kwargs):
super(QuestForm, self).__init__(*args, **kwargs)
def __getitem__(self, ind):
if type(ind) == int:
return super(QuestForm, self).__getitem__(ind)
if type(ind) == slice:
return QuestForm(super(QuestForm, self).__getitem__(ind))
else:
returns = QuestForm()
for i in ind:
returns.append(self[i])
return returns
def append(self, *args, **kwargs):
super(QuestForm, self).append(*args, **kwargs)
return self
class QuestFormTextLoader():
'''QuestForm Loader for text files.'''
def __init__(self,
questpattern,
qpattern,
selpattern=None,
tapattern=None,
argpattern={}):
'''
Parameters
----------
questpattern : regex pattern for a question. necessary.
qpattern : regex pattern for question text in a question. necessary.
selpattern : regex pattern for selections.
a question can have several matching selections.
tapattern : regex pattern for true answer.
argpattern : dict with {'arg_name' : 'arg_regex'} sets.
'''
self.questpattern = questpattern
self.qpattern = qpattern
self.selpattern = selpattern
self.tapattern = tapattern
self.argpattern = dict(argpattern)
self.is_cached = False
def get_cached_qf(self, togo='Curdata.data'):
'''Load cached QuestForm.'''
if togo in os.listdir():
if InteractiveAnswer(
'Cached data found.Continue?', yes_or_no=True).get():
with open(togo, 'rb') as f:
return pickle.load(f)
else:
datas = ["Create a new data"] + [
i for i in os.listdir() if re.findall(r'.*\.data$', i)
]
if not datas: return
print("Cached data not found, listing other datas")
for i in range(len(datas)):
print('\t%3s: \t%s' % (i, datas[i]))
no = InteractiveAnswer(
'Which one to choose?',
verify=range(len(datas)),
serializer=
lambda x: [int(i) for i in re.findall(r'[0-9]+', x)]).get()[0]
if no == 0:
return
else:
with open(datas[no], 'rb') as f:
return pickle.load(f)
def _load(self, queststr):
questform = QuestForm()
for quest in re.findall(self.questpattern, queststr):
qitem = re.findall(self.qpattern, quest)
selitem = re.findall(self.selpattern,
quest) if self.selpattern else None
taitem = re.findall(self.tapattern,
quest) if self.tapattern else None
argitem = [(patnam,re.findall(self.argpattern(patnam),quest)) \
for patnam in self.argpattern] if self.argpattern else {}
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
def load(self, queststr):
'''Search queststr, match arguments and returns a QuestForm.'''
qf = self.get_cached_qf()
if qf is not None:
self.is_cached = True
return qf
if 'MainData.data' in os.listdir():
with open('MainData.data', 'rb') as f:
qf = pickle.load(f)
else:
qf = self._load(queststr)
with open('MainData.data', 'wb') as f:
pickle.dump(qf, f)
return qf
class QuestFormExcelLoader(QuestFormTextLoader):
'''QuestForm Loader for excel files. Requires `pandas` module.'''
def __init__(self, qcol, selcol=None, tacol=None, argcol={}):
'''
Parameters
----------
questpattern : regex pattern for a question. necessary.
qpattern : regex pattern for question text in a question. necessary.
selpattern : regex pattern for selections.
a question can have several matching selections.
tapattern : regex pattern for true answer.
argpattern : dict with {'arg_name' : 'arg_regex'} sets.
'''
super(QuestFormExcelLoader, self).__init__(None, qcol, selcol, tacol,
argcol)
def _load(self, questdf):
import pandas as pd
if type(questdf) == str: questdf = pd.read_excel(questdf)
questform = QuestForm()
for q in range(len(questdf)):
quest = questdf.ix[q]
qitem = quest[self.qpattern]
selitem = quest[self.selpattern] if self.selpattern else None
taitem = quest[self.tapattern] if self.tapattern else None
argitem = {
pat: quest[self.argpattern[pat]]
for pat in self.argpattern
} if self.argpattern else {}
qitem = None if qitem is None else ([qitem] if isinstance(
qitem, str) else list(qitem))
selitem = None if selitem is None else ([selitem] if isinstance(
selitem, str) else list(selitem))
taitem = None if taitem is None else ([taitem] if isinstance(
taitem, str) else list(taitem))
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
class BeginQuestForm():
'''Class for rendering the exam.'''
def __init__(self,
qf,
arrange='qast',
no_score=False,
input_manner=None,
no_filter=False,
storage='l|w',
filenames=['Curdata.data', 'Wrongdata.data']):
'''
Parameters
----------
qf : QuestForm. The QuestForm that test on.
storage : str with several units separated by `|`.
each unit contains one or more of `twol`.
`t` indicates Quests that marked as true.
`w` indicates Quests that marked as false.
`o` indicates Quests that marked as others.
`l` indicates Quests that isn't marked.
filenames : list with each element indicates the filename of
the output of `storage` option.
arrange : iterable. each element should be one argument in a `Quest` object.
`question` indicates the question text.
`args` indicates all args.
`selections` indicates the question text.
`trueanswer` indicates the trueanswer text.
`label` may indicate the `lable` keyword in `args` child in `Quest`.
If not ambiguous, you can use `q` or `que` to indicate `question`,
or `a` to indicate `answer`.
no_filter : determines whether to record the True/False/others score.
input_manner : a class with a .get() method returns input text.
designed for `InteractiveAnswer` class.
no_filter : determines whether to filter the qf by `self.sel_chap`.
'''
self.qf = qf
self.starttime = datetime.now()
self.correct = []
self.wrong = []
self.other = []
self.arrange = arrange
self.storage = storage
self.store_filenames = filenames
self.no_score = no_score
self.input_manner = input_manner
self.status = []
self.no_filter = no_filter
def selchap(self, qf):
'''
Dummy function to select chapters (or filtering the QuestForm).
Override this funtion to make it work.
'''
return qf
def oninit(self):
'''Things done on initialize'''
if InteractiveAnswer('Randomize?', yes_or_no=True).get():
random.shuffle(self.arranged_index)
print('\n', '=' * BOARDER_LENGTH, '\n')
print(
space_fill(
self.starttime.strftime('%Y-%m-%d %H:%M:%S'), BOARDER_LENGTH))
print(space_fill('Find %d questions.' % (self.length), BOARDER_LENGTH))
print(space_fill('start test.', BOARDER_LENGTH))
print('\n', '=' * BOARDER_LENGTH, '\n')
def _report(self):
''' Report prints.'''
print('\n\n', '=' * BOARDER_LENGTH, '\n')
usedtime = (datetime.now() - self.starttime).seconds
(usedtime, s) = divmod(usedtime, 60)
(h, m) = divmod(usedtime, 60)
print(space_fill('Total Time: %d hours, %d minutes, %d seconds'\
%(h, m, s) ,BOARDER_LENGTH))
if self.no_score: pass
elif len(self.correct) + len(self.wrong) != 0:
c = len(self.correct)
w = len(self.wrong)
print('Correct: ', c)
print('Wrong: ', w)
print('Score: %.2f' % (c / (c + w) * 100))
print('\n', '-' * BOARDER_LENGTH, '\n')
self.show_status(h)
print('\n', '=' * BOARDER_LENGTH, '\n')
def onkill(self):
''' Things done on kill/interrupt.'''
print('\n\n', '=' * BOARDER_LENGTH, '\n')
print(space_fill('Interrupted', BOARDER_LENGTH))
self._report()
self.store_data(level=self.storage, filenames=self.store_filenames)
return
def onfinish(self):
''' Things done on finishing exam.'''
print('\n\n', '=' * BOARDER_LENGTH, '\n')
print(space_fill('Finished', BOARDER_LENGTH))
self._report()
self.store_data(level=self.storage, filenames=self.store_filenames)
return
def store_data(self,
filenames=['Curdata.data', 'Wrongdata.data'],
level='l|w'):
''' Stores data.'''
# get left quests
l = [
i for i in range(len(self.qf))
if not (_in_list(i, self.correct) | _in_list(i, self.wrong)
| _in_list(i, self.other))
]
_level = level.split('|')
for fn, lv in zip(filenames, range(len(_level))):
index = []
# add required quests to index
for i, j in zip('cwol', [self.correct, self.wrong, self.other, l]):
if i in _level[lv]: index += j
index.sort()
qf = self.qf[index]
# TODO: duplicated. add append/write method as an option
if fn == 'Curdata.data':
if len(qf) != 0:
with open(fn, 'wb') as f:
pickle.dump(qf, f)
else:
try:
os.remove(fn)
except:
pass
else:
if fn not in os.listdir():
with open(fn, 'wb') as f:
pickle.dump(qf, f)
else:
with open(fn, 'rb') as f:
data = pickle.load(f)
data = QuestForm(data + qf)
with open(fn, 'wb') as f:
pickle.dump(data, f)
def raise_quest(self, quest, **kwargs):
'''Loop to raise a `Quest` according to `self.arrange`.'''
ans = None
for a in self.arrange:
if re.findall('^' + a, 'quest'):
self.raise_q(quest, **kwargs)
elif re.findall('^' + a, 'args'):
if not quest.args: continue
for k in quest.args:
print(k + ':', quest.args[k])
elif re.findall('^' + a, 'selection'):
self.raise_sel(quest, **kwargs)
elif re.findall('^' + a, 'true_answer'):
ans = self.get_input(self.input_manner)
ans = self.check_ans(ans, quest, **kwargs)
if ans is not True or self.no_score:
self.raise_ta(quest, **kwargs)
else:
for k in quest.args:
if re.findall('^' + a, k):
print(k + ':', quest.args[k])
print('\n', '-' * BOARDER_LENGTH, '\n')
return ans
def get_input(self, input_manner=None):
'''Get user input if input_manner is not given.'''
if input_manner is None:
return input('Your Answer: ')
else:
try:
return input_manner.get()
except AttributeError:
raise TypeError('`input_manner` should have a `get()` method')
def start(self):
'''Starting point.'''
try:
if not self.no_filter: self.qf = self.selchap(self.qf)
self.length = len(self.qf)
self.arranged_index = list(range(self.length))
self.oninit()
for quest in self.arranged_index:
tof = self.raise_quest(self.qf[quest], qid=quest)
if tof is True:
self.correct.append(quest)
self.status.append(
((datetime.now() - self.starttime).seconds, 1))
elif tof is False:
self.wrong.append(quest)
self.status.append(
((datetime.now() - self.starttime).seconds, 0))
else:
self.other.append(quest)
self.status.append(
((datetime.now() - self.starttime).seconds, 2))
self.onfinish()
except (KeyboardInterrupt, EOFError):
self.onkill()
def raise_q(self, quest, **kwargs):
'''Raises question in a `Quest`. You may want to overwrite it'''
print(
'Question %d/%d: ' %
(len(self.other) + len(self.correct) + len(self.wrong) + 1,
self.length),
end='')
print('\n'.join(quest.q))
return
def raise_sel(self, quest, **kwargs):
'''Raises selections in a `Quest`. You may want to overwrite it'''
if quest.sel: print('\n'.join(quest.sel))
def raise_ta(self, quest, **kwargs):
'''Raises true answer in a `Quest`. You may want to overwrite it'''
if quest.ta: print('True Answer:', ' '.join(quest.ta))
def check_ans(self, ans, quest, **kwargs):
'''
Check answer. returns True or False or other to your convenience.
You may want to overwrite it.
'''
if self.no_score: return True
if ans == ''.join(quest.ta):
print(colorit('Correct!', 'green'))
return True
else:
print(colorit('WRONG!', 'red'))
return False
def show_status(self, hduration):
''' Show statistics before exit. '''
result = []
tempres = [0, 0, 0]
status = self.status
if hduration == 0:
inteval = 3 * 60
if hduration > 0:
inteval = 5 * hduration * 60
cursec = inteval
for i in status:
while cursec - i[0] <= 0:
result.append(tempres)
tempres = [0, 0, 0]
cursec += inteval
tempres[i[1]] += 1
result.append(tempres)
total = inteval
for i in result:
print('%3dm:' % (total / 60),
colorit('+' * i[1], 'green') + colorit('-' * i[0], 'red'))
total += inteval
return result
|
heyrict/exam
|
exam.py
|
Python
|
apache-2.0
| 16,883 | 0.001599 |
discs = []
# Create data
discs.append((13, 11))
discs.append((5, 0))
discs.append((17, 11))
discs.append((3, 0))
discs.append((7, 2))
discs.append((19, 17))
discs.append((11, 0))
done = False
t = 0
while done is False:
done = True
for i, disc in enumerate(discs):
if (t + i + 1 + disc[1]) % disc[0] is not 0:
done = False
break
if done:
print(str(t))
break
t += 1
|
snyderks/advent-solutions
|
Day15.py
|
Python
|
mit
| 430 | 0 |
import json
import datetime
import threading
from base_plugin import *
import base_plugin
#=============================================Messaging===================================
def send_message(recipient, message, mtype='chat'):
'''
Send a message to recipient.
:param recipient: The To field of your message.
:param message: the message string to send.
:para mtype: The message type to send, supports public/private and xmpp style chat/groupchat.
'''
if mtype == 'private':
mtype = 'chat'
if mtype == 'public':
mtype = 'groupchat'
base_plugin.PluginContext.client.send_message(mto=recipient, mbody=message, mtype=mtype)
#=============================================FILTERS=====================================
#FIXME: this seems broken.
def self_message(event, plugin):
'''
filter for self generated events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if not self generated event, false otherwise.
'''
if msg.From_Nick != plugin.client.nick and plugin.client.nick in msg.Body:
return True
return False
def on_message(event, plugin):
'''
filter for group chat events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a group chat event, false otherwise.
'''
if event.Type in ["groupchat"]:
return True
return False
def on_private_message(event, plugin):
'''
filter for private message events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a private message event, false otherwise.
'''
if not event.Room:
return True
return False
def on_presence(event, plugin):
'''
filter for join/part type events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a presence event, false otherwise.
'''
if event.Type in ["available", "unavailable"]:
return True
return False
#=============================================FILE OPERATORS=====================================
def put_object_to_file(item, path):
'''
Syntactic sugar, write jsonified object to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(json.dumps(item))
def get_object_from_file(path):
'''
Syntactic sugar, read jsonified object from file.
:param path: path to log file where item is stored.
Returns - json expanded item from log file.
'''
with open(path, 'r') as f:
item_str = f.read()
return json.loads(item_str)
def append_to_file(string, path):
'''
Syntactic sugar, append string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'a') as f:
f.write(string)
def write_to_file(string, path):
'''
Syntactic sugar, write string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(string)
def read_from_file(path):
'''
Syntactic sugar, read from file.
:param path: path to log file where item is stored.
Returns - string contents of log file.
'''
with open(path, 'r') as f:
return f.read()
def read_lines_from_file(path):
'''
Read lines from file, as seperated by newline/enter.
:param path: path to log file
Returns - list of lines
'''
return read_from_file(path).split('\n')
#===========================================TIMED EVENTS=====================================
def schedule_event_by_delay(delay, event, args=[]):
'''
Schedule an event by a delay in seconds.
:param delay: number of seconds until event triggers.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
threading.Timer(delay, call_function_with_variable_arguments, [event, args]).start()
def schedule_event(time, event, args=[]):
'''
Schedule an event by an absolute time
:param time: the datetime object representing the trigger time.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
def schedule_event(year, month, day, hour, minute, second, event, args=[]):
'''
Schedule an event by an absolute time
:param year: year of the event
:param month: month of the event
:param day: day of the event
:param hour: hour of the event
:param minute: minute of the event
:param second: second of the event
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
time = datetime.datetime(year, month, day, hour, minute, second)
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
#==========================================HERE THERE BE DRAGONS=================================================
def call_function_with_variable_arguments(function, arguments):
'''
Takes functions, takes arguments, makes it fit.
:param function: The function to call
:param arguments: The argument list to make fit.
'''
iterator = len(arguments)
while True:
real_exception = None
try:
function(*(arguments[:iterator]))
return
except Exception as e:
if not real_exception or "takes exactly" not in str(e) or "arguments" not in str(e):
real_exception = e
iterator -= 1
if iterator < 0:
raise real_exception
|
Gehn/JustAChatBot
|
plugin_utils.py
|
Python
|
mit
| 5,651 | 0.028491 |
import sys
import os
import time
try:
import mflow
except:
sys.path.append(os.environ["PWD"] + "/../")
import mflow
import logging
import numpy as np
logger = logging.getLogger("mflow.mflow")
logger.setLevel(logging.ERROR)
address = "tcp://127.0.0.1:40000"
stream = mflow.connect(address, conn_type=mflow.BIND, mode=mflow.PUSH, receive_timeout=1, queue_size=1)
for i in range(16):
try:
header = '{"htype": "array-1.0", "type": "int32", "shape": [10], "frame": %d}' % i
data = np.zeros(10, dtype=np.int32) + i
stream.send(header.encode(), send_more=True, block=True)
stream.send(data.tobytes(), block=False)
print("Sending message %d" % i)
# Send out every 10ms
time.sleep(0.01)
except KeyboardInterrupt:
break
stream.disconnect()
|
datastreaming/mflow
|
examples/sender.py
|
Python
|
gpl-3.0
| 825 | 0.003636 |
# encoding: utf-8
# Copyright (C) 2008-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Author: Aric Hagberg (hagberg@lanl.gov)
"""
Read graphs in GML format.
"GML, the G>raph Modelling Language, is our proposal for a portable
file format for graphs. GML's key features are portability, simple
syntax, extensibility and flexibility. A GML file consists of a
hierarchical key-value lists. Graphs can be annotated with arbitrary
data structures. The idea for a common file format was born at the
GD'95; this proposal is the outcome of many discussions. GML is the
standard file format in the Graphlet graph editor system. It has been
overtaken and adapted by several other systems for drawing graphs."
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Format
------
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
for format specification.
Example graphs in GML format
http://www-personal.umich.edu/~mejn/netdata/
"""
try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ast import literal_eval
from collections import defaultdict
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
import re
try:
import htmlentitydefs
except ImportError:
# Python 3.x
import html.entities as htmlentitydefs
__all__ = ['read_gml', 'parse_gml', 'generate_gml', 'write_gml']
try:
long
except NameError:
long = int
try:
unicode
except NameError:
unicode = str
try:
unichr
except NameError:
unichr = chr
try:
literal_eval(r"u'\u4444'")
except SyntaxError:
# Remove 'u' prefixes in unicode literals in Python 3
rtp_fix_unicode = lambda s: s[1:]
else:
rtp_fix_unicode = None
def escape(text):
"""Use XML character references to escape characters.
Use XML character references for unprintable or non-ASCII
characters, double quotes and ampersands in a string
"""
def fixup(m):
ch = m.group(0)
return '&#' + str(ord(ch)) + ';'
text = re.sub('[^ -~]|[&"]', fixup, text)
return text if isinstance(text, str) else str(text)
def unescape(text):
"""Replace XML character references with the referenced characters"""
def fixup(m):
text = m.group(0)
if text[1] == '#':
# Character reference
if text[2] == 'x':
code = int(text[3:-1], 16)
else:
code = int(text[2:-1])
else:
# Named entity
try:
code = htmlentitydefs.name2codepoint[text[1:-1]]
except KeyError:
return text # leave unchanged
try:
return chr(code) if code < 256 else unichr(code)
except (ValueError, OverflowError):
return text # leave unchanged
return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text)
def literal_destringizer(rep):
"""Convert a Python literal to the value it represents.
Parameters
----------
rep : string
A Python literal.
Returns
-------
value : object
The value of the Python literal.
Raises
------
ValueError
If `rep` is not a Python literal.
"""
if isinstance(rep, (str, unicode)):
orig_rep = rep
if rtp_fix_unicode is not None:
rep = rtp_fix_unicode(rep)
try:
return literal_eval(rep)
except SyntaxError:
raise ValueError('%r is not a valid Python literal' % (orig_rep,))
else:
raise ValueError('%r is not a string' % (rep,))
@open_file(0, mode='rb')
def read_gml(path, label='label', destringizer=None):
"""Read graph in GML format from path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to read from.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
cannot convert a string to a value, a `ValueError` is raised. Default
value : None.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, parse_gml
Notes
-----
The GML specification says that files should be ASCII encoded, with any
extended ASCII characters (iso8859-1) appearing as HTML character entities.
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, 'test.gml')
>>> H = nx.read_gml('test.gml')
"""
def filter_lines(lines):
for line in lines:
try:
line = line.decode('ascii')
except UnicodeDecodeError:
raise NetworkXError('input is not ASCII-encoded')
if not isinstance(line, str):
lines = str(lines)
if line and line[-1] == '\n':
line = line[:-1]
yield line
G = parse_gml_lines(filter_lines(path), label, destringizer)
return G
def parse_gml(lines, label='label', destringizer=None):
"""Parse GML graph from a string or iterable.
Parameters
----------
lines : string or iterable of strings
Data in GML format.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
cannot convert a string to a value, a `ValueError` is raised. Default
value : None.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, read_gml
Notes
-----
This stores nested GML attributes as dictionaries in the
NetworkX graph, node, and edge attribute structures.
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
"""
def decode_line(line):
if isinstance(line, bytes):
try:
line.decode('ascii')
except UnicodeDecodeError:
raise NetworkXError('input is not ASCII-encoded')
if not isinstance(line, str):
line = str(line)
return line
def filter_lines(lines):
if isinstance(lines, (str, unicode)):
lines = decode_line(lines)
lines = lines.splitlines()
for line in lines:
yield line
else:
for line in lines:
line = decode_line(line)
if line and line[-1] == '\n':
line = line[:-1]
if line.find('\n') != -1:
raise NetworkXError('input line contains newline')
yield line
G = parse_gml_lines(filter_lines(lines), label, destringizer)
return G
def parse_gml_lines(lines, label, destringizer):
"""Parse GML into a graph.
"""
def tokenize():
patterns = [
r'[A-Za-z][0-9A-Za-z_]*\b', # keys
r'[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?', # reals
r'[+-]?[0-9]+', # ints
r'".*?"', # strings
r'\[', # dict start
r'\]', # dict end
r'#.*$|\s+' # comments and whitespaces
]
tokens = re.compile(
'|'.join('(' + pattern + ')' for pattern in patterns))
lineno = 0
for line in lines:
length = len(line)
pos = 0
while pos < length:
match = tokens.match(line, pos)
if match is not None:
for i in range(len(patterns)):
group = match.group(i + 1)
if group is not None:
if i == 0: # keys
value = group.rstrip()
elif i == 1: # reals
value = float(group)
elif i == 2: # ints
value = int(group)
else:
value = group
if i != 6: # comments and whitespaces
yield (i, value, lineno + 1, pos + 1)
pos += len(group)
break
else:
raise NetworkXError('cannot tokenize %r at (%d, %d)' %
(line[pos:], lineno + 1, pos + 1))
lineno += 1
yield (None, None, lineno + 1, 1) # EOF
def unexpected(curr_token, expected):
category, value, lineno, pos = curr_token
raise NetworkXError(
'expected %s, found %s at (%d, %d)' %
(expected, repr(value) if value is not None else 'EOF', lineno,
pos))
def consume(curr_token, category, expected):
if curr_token[0] == category:
return next(tokens)
unexpected(curr_token, expected)
def parse_kv(curr_token):
dct = defaultdict(list)
while curr_token[0] == 0: # keys
key = curr_token[1]
curr_token = next(tokens)
category = curr_token[0]
if category == 1 or category == 2: # reals or ints
value = curr_token[1]
curr_token = next(tokens)
elif category == 3: # strings
value = unescape(curr_token[1][1:-1])
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
elif category == 4: # dict start
curr_token, value = parse_dict(curr_token)
else:
unexpected(curr_token, "an int, float, string or '['")
dct[key].append(value)
dct = {key: (value if not isinstance(value, list) or len(value) != 1
else value[0]) for key, value in dct.items()}
return curr_token, dct
def parse_dict(curr_token):
curr_token = consume(curr_token, 4, "'['") # dict start
curr_token, dct = parse_kv(curr_token)
curr_token = consume(curr_token, 5, "']'") # dict end
return curr_token, dct
def parse_graph():
curr_token, dct = parse_kv(next(tokens))
if curr_token[0] is not None: # EOF
unexpected(curr_token, 'EOF')
if 'graph' not in dct:
raise NetworkXError('input contains no graph')
graph = dct['graph']
if isinstance(graph, list):
raise NetworkXError('input contains more than one graph')
return graph
tokens = tokenize()
graph = parse_graph()
directed = graph.pop('directed', False)
multigraph = graph.pop('multigraph', False)
if not multigraph:
G = nx.DiGraph() if directed else nx.Graph()
else:
G = nx.MultiDiGraph() if directed else nx.MultiGraph()
G.graph.update((key, value) for key, value in graph.items()
if key != 'node' and key != 'edge')
def pop_attr(dct, category, attr, i):
try:
return dct.pop(attr)
except KeyError:
raise NetworkXError(
"%s #%d has no '%s' attribute" % (category, i, attr))
nodes = graph.get('node', [])
mapping = {}
labels = set()
for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]):
id = pop_attr(node, 'node', 'id', i)
if id in G:
raise NetworkXError('node id %r is duplicated' % (id,))
if label != 'id':
label = pop_attr(node, 'node', 'label', i)
if label in labels:
raise NetworkXError('node label %r is duplicated' % (label,))
labels.add(label)
mapping[id] = label
G.add_node(id, **node)
edges = graph.get('edge', [])
for i, edge in enumerate(edges if isinstance(edges, list) else [edges]):
source = pop_attr(edge, 'edge', 'source', i)
target = pop_attr(edge, 'edge', 'target', i)
if source not in G:
raise NetworkXError(
'edge #%d has an undefined source %r' % (i, source))
if target not in G:
raise NetworkXError(
'edge #%d has an undefined target %r' % (i, target))
if not multigraph:
if not G.has_edge(source, target):
G.add_edge(source, target, **edge)
else:
raise nx.NetworkXError(
'edge #%d (%r%s%r) is duplicated' %
(i, source, '->' if directed else '--', target))
else:
key = edge.pop('key', None)
if key is not None and G.has_edge(source, target, key):
raise nx.NetworkXError(
'edge #%d (%r%s%r, %r) is duplicated' %
(i, source, '->' if directed else '--', target, key))
G.add_edge(source, target, key, **edge)
if label != 'id':
G = nx.relabel_nodes(G, mapping)
return G
def literal_stringizer(value):
"""Convert a value to a Python literal in GML representation.
Parameters
----------
value : object
The value to be converted to GML representation.
Returns
-------
rep : string
A double-quoted Python literal representing value. Unprintable
characters are replaced by XML character references.
Raises
------
ValueError
If `value` cannot be converted to GML.
Notes
-----
`literal_stringizer` is largely the same as `repr` in terms of
functionality but attempts prefix `unicode` and `bytes` literals with
`u` and `b` to provide better interoperability of data generated by
Python 2 and Python 3.
The original value can be recovered using the
:func:`networkx.readwrite.gml.literal_destringizer` function.
"""
def stringize(value):
if isinstance(value, (int, long, bool)) or value is None:
buf.write(str(value))
elif isinstance(value, unicode):
text = repr(value)
if text[0] != 'u':
try:
value.encode('latin1')
except UnicodeEncodeError:
text = 'u' + text
buf.write(text)
elif isinstance(value, (float, complex, str, bytes)):
buf.write(repr(value))
elif isinstance(value, list):
buf.write('[')
first = True
for item in value:
if not first:
buf.write(',')
else:
first = False
stringize(item)
buf.write(']')
elif isinstance(value, tuple):
if len(value) > 1:
buf.write('(')
first = True
for item in value:
if not first:
buf.write(',')
else:
first = False
stringize(item)
buf.write(')')
elif value:
buf.write('(')
stringize(value[0])
buf.write(',)')
else:
buf.write('()')
elif isinstance(value, dict):
buf.write('{')
first = True
for key, value in value.items():
if not first:
buf.write(',')
else:
first = False
stringize(key)
buf.write(':')
stringize(value)
buf.write('}')
elif isinstance(value, set):
buf.write('{')
first = True
for item in value:
if not first:
buf.write(',')
else:
first = False
stringize(item)
buf.write('}')
else:
raise ValueError(
'%r cannot be converted into a Python literal' % (value,))
buf = StringIO()
stringize(value)
return buf.getvalue()
def generate_gml(G, stringizer=None):
"""Generate a single entry of the graph G in GML format.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
stringizer : callable, optional
A stringizer which converts non-int/float/dict values into strings. If
it cannot convert a value into a string, it should raise a
`ValueError` raised to indicate that. Default value: None.
Returns
-------
lines: generator of strings
Lines of GML data. Newlines are not appended.
Raises
------
NetworkXError
If `stringizer` cannot convert a value into a string, or the value to
convert is not a string while `stringizer` is None.
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
'edge',node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
"""
valid_keys = re.compile('^[A-Za-z][0-9A-Za-z]*$')
def stringize(key, value, ignored_keys, indent, in_list=False):
if not isinstance(key, (str, unicode)):
raise NetworkXError('%r is not a string' % (key,))
if not valid_keys.match(key):
raise NetworkXError('%r is not a valid key' % (key,))
if not isinstance(key, str):
key = str(key)
if key not in ignored_keys:
if isinstance(value, (int, long)):
yield indent + key + ' ' + str(value)
elif isinstance(value, float):
text = repr(value).upper()
# GML requires that a real literal contain a decimal point, but
# repr may not output a decimal point when the mantissa is
# integral and hence needs fixing.
epos = text.rfind('E')
if epos != -1 and text.find('.', 0, epos) == -1:
text = text[:epos] + '.' + text[epos:]
yield indent + key + ' ' + text
elif isinstance(value, dict):
yield indent + key + ' ['
next_indent = indent + ' '
for key, value in value.items():
for line in stringize(key, value, (), next_indent):
yield line
yield indent + ']'
elif isinstance(value, list) and value and not in_list:
next_indent = indent + ' '
for value in value:
for line in stringize(key, value, (), next_indent, True):
yield line
else:
if stringizer:
try:
value = stringizer(value)
except ValueError:
raise NetworkXError(
'%r cannot be converted into a string' % (value,))
if not isinstance(value, (str, unicode)):
raise NetworkXError('%r is not a string' % (value,))
yield indent + key + ' "' + escape(value) + '"'
multigraph = G.is_multigraph()
yield 'graph ['
# Output graph attributes
if G.is_directed():
yield ' directed 1'
if multigraph:
yield ' multigraph 1'
ignored_keys = {'directed', 'multigraph', 'node', 'edge'}
for attr, value in G.graph.items():
for line in stringize(attr, value, ignored_keys, ' '):
yield line
# Output node data
node_id = dict(zip(G, range(len(G))))
ignored_keys = {'id', 'label'}
for node, attrs in G.node.items():
yield ' node ['
yield ' id ' + str(node_id[node])
for line in stringize('label', node, (), ' '):
yield line
for attr, value in attrs.items():
for line in stringize(attr, value, ignored_keys, ' '):
yield line
yield ' ]'
# Output edge data
ignored_keys = {'source', 'target'}
kwargs = {'data': True}
if multigraph:
ignored_keys.add('key')
kwargs['keys'] = True
for e in G.edges(**kwargs):
yield ' edge ['
yield ' source ' + str(node_id[e[0]])
yield ' target ' + str(node_id[e[1]])
if multigraph:
for line in stringize('key', e[2], (), ' '):
yield line
for attr, value in e[-1].items():
for line in stringize(attr, value, ignored_keys, ' '):
yield line
yield ' ]'
yield ']'
@open_file(1, mode='wb')
def write_gml(G, path, stringizer=None):
"""Write a graph `G` in GML format to the file or file handle `path`.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
path : filename or filehandle
The filename or filehandle to write. Files whose names end with .gz or
.bz2 will be compressed.
stringizer : callable, optional
A stringizer which converts non-int/non-float/non-dict values into
strings. If it cannot convert a value into a string, it should raise a
`ValueError` to indicate that. Default value: None.
Raises
------
NetworkXError
If `stringizer` cannot convert a value into a string, or the value to
convert is not a string while `stringizer` is None.
See Also
--------
read_gml, generate_gml
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
'edge',node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, "test.gml")
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_gml(G, "test.gml.gz")
"""
for line in generate_gml(G, stringizer):
path.write((line + '\n').encode('ascii'))
# fixture for nose
def teardown_module(module):
import os
for fname in ['test.gml', 'test.gml.gz']:
if os.path.isfile(fname):
os.unlink(fname)
|
JamesClough/networkx
|
networkx/readwrite/gml.py
|
Python
|
bsd-3-clause
| 23,163 | 0.000086 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_make_userprofile_user_a_onetoonefield'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='banned',
field=models.BooleanField(default=False, verbose_name='Banned'),
),
]
|
rtfd/readthedocs.org
|
readthedocs/core/migrations/0003_add_banned_status.py
|
Python
|
mit
| 406 | 0 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_get_operations.py
DESCRIPTION:
This sample demonstrates how to list/get all document model operations (succeeded, in-progress, failed)
associated with the Form Recognizer resource. Kinds of operations returned are "documentModelBuild",
"documentModelCompose", and "documentModelCopyTo". Note that operation information only persists for
24 hours. If the operation was successful, the document model can be accessed using get_model or list_models APIs.
USAGE:
python sample_get_operations.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
def sample_get_operations():
# [START list_operations]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
operations = list(document_model_admin_client.list_operations())
print("The following document model operations exist under my resource:")
for operation in operations:
print("\nOperation ID: {}".format(operation.operation_id))
print("Operation kind: {}".format(operation.kind))
print("Operation status: {}".format(operation.status))
print("Operation percent completed: {}".format(operation.percent_completed))
print("Operation created on: {}".format(operation.created_on))
print("Operation last updated on: {}".format(operation.last_updated_on))
print("Resource location of successful operation: {}".format(operation.resource_location))
# [END list_operations]
# [START get_operation]
# Get an operation by ID
if operations:
print("\nGetting operation info by ID: {}".format(operations[0].operation_id))
operation_info = document_model_admin_client.get_operation(operations[0].operation_id)
if operation_info.status == "succeeded":
print("My {} operation is completed.".format(operation_info.kind))
result = operation_info.result
print("Model ID: {}".format(result.model_id))
elif operation_info.status == "failed":
print("My {} operation failed.".format(operation_info.kind))
error = operation_info.error
print("{}: {}".format(error.code, error.message))
else:
print("My operation status is {}".format(operation_info.status))
else:
print("No operations found.")
# [END get_operation]
if __name__ == '__main__':
sample_get_operations()
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_get_operations.py
|
Python
|
mit
| 3,206 | 0.004055 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pystachio import Empty, Struct
from pystachio.composite import Structural
__all__ = ('Cluster',)
# TODO(wickman) It seems like some of this Trait/Mixin stuff should be a
# first-class construct in Pystachio. It could be a solution for extensible
# Job/Task definitions.
class Cluster(dict):
"""Cluster encapsulates a set of K/V attributes describing cluster configurations.
Given a cluster, attributes may be accessed directly on them, e.g.
cluster.name
cluster.scheduler_zk_path
In order to enforce particular "traits" of Cluster, use Cluster.Trait to construct
enforceable schemas, e.g.
class ResolverTrait(Cluster.Trait):
scheduler_zk_ensemble = Required(String)
scheduler_zk_path = Default(String, '/twitter/service/mesos/prod/scheduler')
cluster = Cluster(name = 'west', scheduler_zk_ensemble = 'zookeeper.west.twttr.net')
# Ensures that scheduler_zk_ensemble is defined in the cluster or it will raise a TypeError
cluster.with_trait(ResolverTrait).scheduler_zk_ensemble
# Will use the default if none is provided on Cluster.
cluster.with_trait(ResolverTrait).scheduler_zk_path
"""
Trait = Struct # noqa
def __init__(self, **kwargs):
self._traits = ()
super(Cluster, self).__init__(**kwargs)
def get_trait(self, trait):
"""Given a Cluster.Trait, extract that trait."""
if not issubclass(trait, Structural):
raise TypeError('provided trait must be a Cluster.Trait subclass, got %s' % type(trait))
# TODO(wickman) Expose this in pystachio as a non-private or add a load method with strict=
return trait(trait._filter_against_schema(self))
def check_trait(self, trait):
"""Given a Cluster.Trait, typecheck that trait."""
trait_check = self.get_trait(trait).check()
if not trait_check.ok():
raise TypeError(trait_check.message())
def with_traits(self, *traits):
"""Return a cluster annotated with a set of traits."""
new_cluster = self.__class__(**self)
for trait in traits:
new_cluster.check_trait(trait)
new_cluster._traits = traits
return new_cluster
def with_trait(self, trait):
"""Return a cluster annotated with a single trait (helper for self.with_traits)."""
return self.with_traits(trait)
def __setitem__(self, key, value):
raise TypeError('Clusters are immutable.')
def __getattr__(self, attribute):
for trait in self._traits:
expressed_trait = self.get_trait(trait)
if hasattr(expressed_trait, attribute):
value = getattr(expressed_trait, attribute)()
return None if value is Empty else value.get()
try:
return self[attribute]
except KeyError:
return self.__getattribute__(attribute)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
|
rosmo/aurora
|
src/main/python/apache/aurora/common/cluster.py
|
Python
|
apache-2.0
| 3,365 | 0.007727 |
#!/usr/bin/env python
import sys
import os
import numpy
from osgeo import gdal, gdal_array
TAIL_TRIM = 0.01
def get_band(filename, target_percent):
ds = gdal.Open(filename)
xsize = int(ds.RasterXSize * target_percent / 100.0)
ysize = int(ds.RasterYSize * target_percent / 100.0)
image = ds.GetRasterBand(1).ReadAsArray(resample_alg = gdal.GRIORA_Average,
buf_xsize = xsize,
buf_ysize = ysize)
return image
def get_scale(image):
'''
Return the values at which to clip an image.
'''
histogram = numpy.histogram(image, 65536, (-0.5, 65535.5))[0]
# Clear the nodata:
histogram[:1] = 0
count = numpy.sum(histogram)
# Walk up the near-black side of the histogram until
# we reach the end of the first percentile:
counter = 0
scale_min = None
for i in range(len(histogram)):
counter += histogram[i]
if counter > count * TAIL_TRIM:
scale_min = i
break
# Same, but moving left from the white end:
counter = 0
scale_max = None
for i in range(len(histogram)-1, 0, -1):
counter += histogram[i]
if counter > count * TAIL_TRIM:
scale_max = i
break
return scale_min, scale_max
def scale_image(image, scale_min, scale_max):
'''
Take a (presumptively uint16) image and return it scaled into
a uint8 image stretched linearly so that scale_min is mapped
to 0 and scale_max is mapped to 255.
'''
image = image.astype('float32')
image = (255 * (image - scale_min) / (scale_max - scale_min))
image = numpy.maximum(0, numpy.minimum(255, image))
image = image.astype('uint8')
return image
def thumbnail(root_scene, scene_dir, verbose=False):
red_file = '%s/%s_B4.TIF' % (scene_dir, root_scene)
grn_file = '%s/%s_B3.TIF' % (scene_dir, root_scene)
blu_file = '%s/%s_B2.TIF' % (scene_dir, root_scene)
if not os.path.exists(red_file) or not os.path.exists(grn_file) \
or not os.path.exists(blu_file):
print 'Missing one or more of %s, %s and %s, skip thumbnailing.' % (
red_file, grn_file, blu_file)
return
large_thumbnail = numpy.array([
get_band(red_file, 15),
get_band(grn_file, 15),
get_band(blu_file, 15)])
small_thumbnail = numpy.array([
get_band(red_file, 3),
get_band(grn_file, 3),
get_band(blu_file, 3)])
# Set the scale values for both images from the larger one:
scale_min, scale_max = get_scale(large_thumbnail)
large_thumbnail = scale_image(large_thumbnail, scale_min, scale_max)
small_thumbnail = scale_image(small_thumbnail, scale_min, scale_max)
# TODO: Georeference these jpegs
gdal_array.SaveArray(
large_thumbnail,
'%s/%s_thumb_large.jpg' % (scene_dir, root_scene),
format = 'JPEG')
gdal_array.SaveArray(
small_thumbnail,
'%s/%s_thumb_small.jpg' % (scene_dir, root_scene),
format = 'JPEG')
for filename in os.listdir(scene_dir):
if filename.endswith('.aux.xml'):
os.unlink(os.path.join(scene_dir,filename))
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: thumbnailer.py <root_scene> <scene_dir_path>'
sys.exit(1)
thumbnail(sys.argv[1], sys.argv[2])
|
landsat-pds/landsat_ingestor
|
ingestor/thumbnailer.py
|
Python
|
apache-2.0
| 3,430 | 0.005831 |
import psutil
def is_low_mem():
v = psutil.virtual_memory()
threshold = v.total / 4
# If we have less then 25% ram free, we should stop feeding the job system.
if v.available < threshold:
return True
return False
|
fake-name/ReadableWebProxy
|
common/memory.py
|
Python
|
bsd-3-clause
| 226 | 0.030973 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import sys
from apiclient.http import MediaFileUpload
import apiclient.errors
import urllib
import requests
import json
import pprint
import logging
logger = logging.getLogger("gdput")
#logger.setLevel(logging.ERROR)
import random
import os
import json
from gdcmdtools.base import GDBase
from gdcmdtools.perm import GDPerm
from gdcmdtools.auth import GDAuth
DICT_OF_CONVERTIBLE_FILE_TYPE = { \
'raw':[
"Raw file",
[]],
'ss':[
"Spreadsheet",
['xls', 'xlsx', 'ods', 'csv', 'tsv', 'tab']],
'ft':[
"Fusion Table",
['csv']],
'pt':[
"Presentation",
['ppt', 'pps', 'pptx']],
'dr':[
"Drawing",
['wmf']],
'ocr':[
"OCR",
['jpg', 'git', 'png', 'pdf']],
'doc':[
"Document",
['doc', 'docx', 'html', 'htm', 'txt', 'rtf']],
'gas':[
"GAS project",
['json']],
}
# FIXME: naming
class GDPut:
def __init__(
self,
source_file,
mime_type,
target_type,
folder_id,
title,
description,
location_column,
latlng_column,
permission,
csv_column_define):
logger.debug("source_file=%s, mime_type=%s, target_type=%s" %
(source_file, mime_type, target_type))
self.source_file = source_file
self.mime_type = mime_type
self.target_type = target_type
self.folder_id = folder_id
self.title = title
self.description = description
self.location_column = location_column
self.latlng_column = latlng_column
self.permission = permission
self.csv_column_define = csv_column_define
self.file_id = None
self.ft_headers = None
self.csv_latlng_suffix = "_latlng_%04x.csv" % random.getrandbits(16)
# base
auth = GDAuth()
creds = auth.get_credentials()
self.auth_user = creds.id_token.get("email",None)
if creds == None:
raise Exception("Failed to retrieve credentials")
self.http = auth.get_authorized_http()
base = GDBase()
self.service = base.get_drive_service(self.http)
self.root = base.get_root()
# ft service
if target_type == "ft":
self.ft_service = base.get_ft_service(self.http)
def if_folder_writable(self):
try:
permissions = self.service.permissions().list(fileId=self.folder_id).execute()
valid_roles = ["writer", "owner"]
logger.debug(pprint.pformat(permissions))
for p in permissions["items"]:
email = p.get("emailAddress",None).lower()
role = p.get("role",None).lower()
logger.debug("email: %s, role: %s" % (email, role))
if( email == self.auth_user ) and (role in valid_roles):
return True
except:
return False
return False
def run(self):
# check folder_id
if self.folder_id:
if self.if_folder_writable() == False:
raise Exception("folder_id doesn't exist or insufficient permission: %s" % self.folder_id)
try:
result = getattr(self, self.target_type+"_put")()
except AttributeError as e:
logger.error(e)
raise
except Exception, e:
logger.error(e)
raise
return result
def get_current_user(self):
pass
def raw_put(self):
return self.generic_put(False)
def check_gas(self):
# have "id",
# have at least one file
# the file should have type, id, name items.
with open(self.source_file, "rb") as f:
jsons = json.loads(f.read())
if_ok = False
if type(jsons) != dict:
return False
self.file_id = jsons["id"]
if jsons["id"] and (len(jsons["files"]) > 0):
for j in jsons["files"]:
if j["type"] and j["id"] and j["name"]:
if_ok = True
else:
return False
return if_ok
def gas_pack(self):
map_type_ext = {"server_js":"js", "html":"html"}
json_packed = {}
try:
with open(self.source_file, "rb") as fr1:
jsons = json.loads(fr1.read())
path = os.path.split(self.source_file)[0]
for j in jsons["files"]:
file_name = os.path.join(path, "%s.%s" % (j["name"], map_type_ext[j["type"]]))
with open(file_name, "rb") as fr2:
file_content = fr2.read()
j["source"] = file_content
new_json = "%s.packed" % self.source_file
with open(new_json, "wb+") as fw:
fw.write(json.dumps(jsons, indent=4))
except:
return False
else:
return True
def gas_put(self):
if not self.check_gas():
raise Exception("The target file is not a GAS project json, if you like to raw-upload a json, try '-t raw'")
if not self.gas_pack():
raise Exception("Failed to pack the GAS project files")
return self.generic_put(True, file_name = "%s.packed" % self.source_file)
def check_csv(self):
self.csv_delimiter = ','
with open(self.source_file, 'rb') as csv_file:
try:
dialect = csv.Sniffer().sniff(csv_file.readline())
if dialect.delimiter == self.csv_delimiter:
return True
except:
logger.error("Failed at calling csv.Sniffer().sniff)")
return False
def csv_save_latlng(self):
rows = []
# read csv header
with open(self.source_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
self.ft_headers = csv_reader.next()
if self.location_column and self.latlng_column:
self.ft_headers.append(self.latlng_column)
rows.append(self.ft_headers)
# TODO: check if location in the list
index_latlng = self.ft_headers.index(self.latlng_column)
index_location = self.ft_headers.index(self.location_column)
for row in csv_reader:
latlng = self.ft_geocoding(row[index_location])
row.insert(index_latlng, latlng)
rows.append(row)
# logger.debug(rows)
# save new file
csv_file_dir = os.path.dirname(self.source_file)
csv_file_basename = os.path.basename(self.source_file)
csv_file_noextension = os.path.splitext(csv_file_basename)[0]
latlng_file = os.path.join(csv_file_dir, csv_file_noextension + self.csv_latlng_suffix)
# write csv header with latlng
with open(latlng_file, 'wb+') as csv_file:
csv_writer = csv.writer(csv_file, lineterminator='\n')
csv_writer.writerows(rows)
return latlng_file
def ss_put(self):
if not self.check_csv():
raise Exception("The delimiter of the source csv file is not '%s'" % self.csv_delimiter)
return self.generic_put(True)
def user_define_column(self, cols, csv_column_define):
return_cols = []
for (col,col_type) in zip(cols, self.csv_column_define):
d = {"type":col_type, "name":col}
return_cols.append(d)
return return_cols
# read csv and convert to the fusion table
def create_ft(self, target_file):
table = {
"name":self.title,
"description":self.description,
"isExportable":True, # FIXME
"columns":[]
}
with open(target_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
cols = csv_reader.next()
self.ft_headers = cols
# FIXME:
if self.location_column and self.latlng_column:
if self.location_column not in cols:
raise Exception("Column %s not found in the csv file" % self.location_column)
if self.csv_column_define == None:
for c in cols:
if c == self.latlng_column:
d = {"type":"LOCATION"}
else:
d = {"type":"STRING"}
d["name"] = c
table["columns"].append(d)
else:
table["columns"] = self.user_define_column(cols, self.csv_column_define)
elif self.location_column and not self.latlng_column:
if self.location_column not in cols:
raise Exception("Column %s not found in the csv file" % self.location_column)
if self.csv_column_define == None:
for c in cols:
if c == self.location_column:
d = {"type":"LOCATION"}
else:
d = {"type":"STRING"}
d["name"] = c
table["columns"].append(d)
else:
table["columns"] = self.user_define_column(cols, self.csv_column_define)
else:
if self.csv_column_define == None:
for c in cols:
d = {"type":"STRING", "name":c}
table["columns"].append(d)
else:
table["columns"] = self.user_define_column(cols, self.csv_column_define)
return table
def ft_put(self):
if not self.check_csv():
raise Exception("The delimiter of the source csv file is not '%s'" % self.csv_delimiter)
# save new csv file with latlng data
if self.location_column and self.latlng_column:
target_file = self.csv_save_latlng()
table = self.create_ft(target_file)
else:
table = self.create_ft(self.source_file)
#logger.debug('body=%s' % body)
# table columns are created, get tableId
service_response = self.ft_service.table().insert(body=table).execute()
#logger.debug("service_response=%s" % service_response)
table_id = service_response["tableId"]
# move to target folder
if self.folder_id != None:
new_parent = {'id': self.folder_id}
try:
self.service.parents().insert(fileId=table_id, body=new_parent).execute()
except apiclient.errors.HttpError, error:
raise Exception('An error occurred: %s' % error)
# remove from root folder
try:
self.service.parents().delete(fileId=table_id, parentId=self.root).execute()
except apiclient.errors.HttpError, error:
raise Exception('Atable_idn error occurred: %s' % error)
if self.location_column and self.latlng_column:
url = self.ft_put_body(table_id, target_file)
else:
url = self.ft_put_body(table_id, self.source_file)
if self.permission != None:
GDPerm.insert(self.service, service_response['tableId'], self.permission)
ft_url = "https://www.google.com/fusiontables/data?docid=%s" % table_id
return ft_url
def ft_put_body(self, table_id, target_file):
params = urllib.urlencode({'isStrict': "false"})
URI = "https://www.googleapis.com/upload/fusiontables/v1/tables/%s/import?%s" % (table_id, params)
METHOD = "POST"
with open(target_file) as ft_file:
# get the rows
#ft_file.next()
rows = ft_file.read()
i_newline = rows.index('\n')+1
rows = rows[i_newline:]
# weird issue here: the URI should be encoded with UTF-8 if body is UTF-8 too.
utf8_body = rows.decode('utf-8').encode('utf-8')
#logger.debug(utf8_body)
try:
response, content = self.http.request(URI.encode('utf-8'), METHOD, body=utf8_body)
except:
raise Exception('Failed at calling http.request(%s, %s, %s)'
% (URI.encode('utf-8'), METHOD, utf8_body))
content = json.loads(content)
#logger.debug(content)
@staticmethod
def ft_geocoding(address):
GEOCODING_URL = "http://maps.googleapis.com/maps/api/geocode/json"
params = {'address':address, 'sensor':'false'}
response = requests.get(GEOCODING_URL, params=params)
response_json = (response.json())
# FIXME
lat = response_json["results"][0]["geometry"]["location"]["lat"]
lng = response_json["results"][0]["geometry"]["location"]["lng"]
latlng = str(lat)+","+str(lng)
return latlng
def generic_put(self, if_convert, file_name=None):
if( file_name ):
self.source_file = file_name
media_body = MediaFileUpload(
self.source_file,
mimetype=self.mime_type,
resumable=True)
if self.folder_id == None:
parents = []
else:
parents = [{
"kind":"drive#fileLink",
"id":self.folder_id}]
body = {
'title':self.title,
'description':self.description,
'mimeType':self.mime_type,
'parents':parents}
# FIXME: should impliment both update and insert for gas and non-gas file
if self.target_type == "gas":
request = self.service.files().update(body=body, fileId=self.file_id, media_body=media_body, convert=if_convert)
else:
request = self.service.files().insert(body=body, media_body=media_body, convert=if_convert)
service_response = None
print "Uploading file: %s" % self.source_file
while service_response is None:
status, service_response = request.next_chunk(num_retries=10)
if status:
sys.stdout.write("\rCompleted: %.2f%%" % (status.progress() * 100))
sys.stdout.flush()
else:
sys.stdout.write("\rCompleted!%s\n" % (" "*10))
sys.stdout.flush()
if self.permission != None:
GDPerm.insert(self.service, service_response['id'], self.permission)
return service_response
def pt_put(self):
return self.generic_put(True)
def dr_put(self):
return self.generic_put(True)
def ocr_put(self):
return self.generic_put(True)
def doc_put(self):
return self.generic_put(True)
#raise Exception("this function is not supported yet")
|
commonssibi/gdcmdtools
|
gdcmdtools/put.py
|
Python
|
bsd-2-clause
| 15,440 | 0.009456 |
import sys
import os
import argparse
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras.models import load_model
from vis.visualization.saliency import visualize_cam
from train import DataGenerator
from visualize import plot_row_item
def get_model_predictions_for_npz(model, data_generator, character_name, npz_name):
npz_file_path = os.path.join(data_generator.data_path, character_name, npz_name)
pixels = np.load(npz_file_path)['pixels']
predicted_labels = model.predict(np.array([pixels]), batch_size=1)
return data_generator.encoder.one_hot_decode(predicted_labels[0].astype(np.float64))
def cam_weighted_image(model, image_path, character_idx):
pixels = np.load(image_path)['pixels']
cam = visualize_cam(model, layer_idx=-1,
filter_indices=[character_idx],
seed_input=pixels)
return np.uint8(pixels*np.dstack([cam]*3))
def make_cam_plot(model, weight, image_path, cam_path, data_generator):
path_head, npz_name = os.path.split(image_path)
_, character_name = os.path.split(path_head)
model_name = os.path.basename(os.path.dirname(weight))
character_idx = data_generator.encoder.one_hot_index(character_name)
cam = cam_weighted_image(model, image_path, character_idx)
fig = plt.figure()
inner = gridspec.GridSpec(2, 1, wspace=0.05, hspace=0, height_ratios=[5, 1.2])
image_ax = plt.Subplot(fig, inner[0])
labels_ax = plt.Subplot(fig, inner[1])
character_name_to_probability = get_model_predictions_for_npz(model,
data_generator,
character_name,
npz_name)
top_character_probability = sorted(character_name_to_probability.items(),
key=lambda item_tup: item_tup[1],
reverse=True)[:3]
top_character_names, top_character_probabilities = zip(*top_character_probability)
plot_row_item(image_ax, labels_ax, cam, top_character_names, top_character_probabilities)
weight_idx = os.path.basename(weight).split('.')[1]
labels_ax.set_xlabel(npz_name)
image_ax.set_title(model_name + ', epoch ' + weight_idx)
fig.add_subplot(image_ax)
fig.add_subplot(labels_ax)
plt.savefig(os.path.join(cam_path, 'cam_{}.png'.format(weight_idx)))
plt.close(fig)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate an animation of class-activation maps")
parser.add_argument('--weight-file', required=True,
help="Model weight file")
parser.add_argument('--data-directory', required=True,
help="Directory containing the input *.npz images")
parser.add_argument('--cam-path', required=True,
help="Directory for storing CAM plots.")
parser.add_argument('--images', required=True, nargs="+",
help="Images to plot CAM for.")
args = parser.parse_args(sys.argv[1:])
data_generator = DataGenerator(args.data_directory)
model = load_model(args.weight_file)
for image in tqdm.tqdm(args.images, unit="image"):
try:
image_cam_path = os.path.join(args.cam_path, os.path.basename(image))
os.makedirs(image_cam_path)
except OSError as err:
if err.errno != os.errno.EEXIST:
raise err
make_cam_plot(model, args.weight_file, image, image_cam_path, data_generator)
|
yujanshrestha/pre-trained-keras-example
|
cam_animation.py
|
Python
|
mit
| 3,666 | 0.004364 |
from . import BasicTask
from . import SPARQLAdapter
class EntityAnnotationTask(BasicTask):
"""
EntityAnnotationTask: Annotate a list of entities with information from a SPARQL Endpoint
"""
__kernel = None # Kernel for this loader
__inputKey = 'entities' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationTask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
assert(isinstance(input, dict))
super(EntityAnnotationTask, self).execute(input)
data = input.get(self.__inputKey, None)
if data is None or not isinstance(data, list):
raise Exception("Impossible to parse these entities. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = [{"entity": word, "metadata": self.__kernel.entityExtraction(word, advancedSearch=False)} for word in data]
self.finish(data=output, failed=False, error=None)
except:
output = "Error annotating the entities"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationURITask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationURITask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationURITask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the URI of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = self.__kernel.getUniqueURI(data)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the URI of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationTypesTask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationTypesTask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationTypesTask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the types of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = self.__kernel.getEntityType(data)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the types of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationPropertiesTask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
__withPropertyValues = True
__requestedProperties = []
def __init__(self, name, initial_task=False, withPropertyValues=True, properties=[]):
super(EntityAnnotationPropertiesTask, self).__init__(name, initial_task)
self.__withPropertyValues = withPropertyValues
self.__requestedProperties = properties
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationPropertiesTask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the properties of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = None
if not self.__requestedProperties is None and len(self.__requestedProperties) > 0:
output = {
# Property[0] => Property name
# Property[1] => Language
'properties': [self.__kernel.getProperty(data, property[0], property[1]) for property in self.__requestedProperties]
}
else:
output = self.__kernel.getProperties(data, fetchValues=self.__withPropertyValues)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the properties of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
class EntityAnnotationThumbnailTask(BasicTask):
__kernel = None # Kernel for this loader
__inputKey = 'entity_name' # Expected input key
def __init__(self, name, initial_task=False):
super(EntityAnnotationThumbnailTask, self).__init__(name, initial_task)
def execute(self, input):
"""
Execute a task
"""
output = None
try:
super(EntityAnnotationThumbnailTask, self).execute(input)
data = input
if data is None:
raise Exception("Impossible to retrieve the thumbnail of a given entity. Please that input of this task! ")
self.__kernel = SPARQLAdapter()
output = self.__kernel.getThumbnail(data)
self.finish(data=output, failed=False, error=None)
except:
output = "Error retrieving the thumbnail of a given entity"
self.finish(data=None, failed=True, error=output)
return self.getOutput()
|
domenicosolazzo/jroc
|
jroc/tasks/sparql/dbpedia/EntityAnnotationTask.py
|
Python
|
gpl-3.0
| 5,822 | 0.00687 |
#!/usr/bin/env python
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:
README = fp.read()
with open(os.path.join(here, 'VERSION')) as version_file:
VERSION = version_file.read().strip()
excluded_packages = ["docs", "tests", "tests.*"]
if not os.environ.get('READTHEDOCS', False):
excluded_packages += ["faker.sphinx", "faker.sphinx.*"]
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
import pkgutil
import zipimport
zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
except AttributeError:
zip_safe = False
setup(
name='Faker',
version=VERSION,
description="Faker is a Python package that generates fake data for you.",
long_description=README,
entry_points={
'console_scripts': ['faker=faker.cli:execute_from_command_line'],
'pytest11': ['faker = faker.contrib.pytest.plugin'],
},
classifiers=[
# See https://pypi.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
keywords='faker fixtures data test mock generator',
author='joke2k',
author_email='joke2k@gmail.com',
url='https://github.com/joke2k/faker',
license='MIT License',
packages=find_packages(exclude=excluded_packages),
platforms=["any"],
zip_safe=zip_safe,
python_requires=">=3.4",
install_requires=[
"python-dateutil>=2.4",
"text-unidecode==1.3",
],
)
|
danhuss/faker
|
setup.py
|
Python
|
mit
| 2,500 | 0 |
import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
class TestMNISTTutorialCW(CleverHansTest):
def test_mnist_tutorial_cw(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_cw
# Run the MNIST tutorial on a dataset of reduced size
# and disable visualization.
cw_tutorial_args = {'train_start': 0,
'train_end': 10000,
'test_start': 0,
'test_end': 1666,
'viz_enabled': False}
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report = mnist_tutorial_cw.mnist_tutorial_cw(**cw_tutorial_args)
# Check accuracy values contained in the AccuracyReport object
self.assertTrue(report.clean_train_clean_eval > 0.85)
self.assertTrue(report.clean_train_adv_eval == 0.00)
# There is no adversarial training in the CW tutorial
self.assertTrue(report.adv_train_clean_eval == 0.)
self.assertTrue(report.adv_train_adv_eval == 0.)
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report_2 = mnist_tutorial_cw.mnist_tutorial_cw(**cw_tutorial_args)
atol_fac = 1e-6
self.assertClose(report.train_clean_train_clean_eval,
report_2.train_clean_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_clean_train_adv_eval,
report_2.train_clean_train_adv_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_clean_eval,
report_2.train_adv_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_adv_eval,
report_2.train_adv_train_adv_eval,
atol=atol_fac * 1)
if __name__ == '__main__':
unittest.main()
|
cihangxie/cleverhans
|
tests_tf/test_mnist_tutorial_cw.py
|
Python
|
mit
| 2,033 | 0 |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Feathercoin:0.9.6.2/|/Feathercoin:0.13.(0|1|2|99)/|/Feathercoin:0.16.(0|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
wellenreiter01/Feathercoin
|
contrib/seeds/makeseeds.py
|
Python
|
mit
| 5,457 | 0.003848 |
from openerp import fields, models,osv
from base_olims_model import BaseOLiMSModel
from openerp.tools.translate import _
from fields.string_field import StringField
from fields.text_field import TextField
from fields.widget.widget import TextAreaWidget
schema = (StringField('Title',
required=1,
),
TextField('Description',
widget=TextAreaWidget(
label=_('Description'),
description=_('Used in item listings and search results.')),
),
fields.One2many('olims.instrument',
'Type',
string='Type')
)
class InstrumentType(models.Model, BaseOLiMSModel):#(BaseContent):
_name = 'olims.instrument_type'
_rec_name = 'Title'
InstrumentType.initialze(schema)
|
sciCloud/OLiMS
|
models/instrumenttype.py
|
Python
|
agpl-3.0
| 794 | 0.013854 |
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import stat
import warnings
import click
from click.testing import CliRunner
from tower_cli.commands.config import config, echo_setting
from tower_cli.conf import settings, Parser
from tests.compat import unittest, mock
class ConfigTests(unittest.TestCase):
"""Establish that the `tower-cli config` command works in the way
that we expect.
"""
def setUp(self):
self.runner = CliRunner()
def test_no_arguments(self):
"""Establish that if `tower-cli config` is called with no arguments,
that we print out the current configuration.
"""
# Invoke the command.
with settings.runtime_values(username='meagan', verbose=False,
password='This is the best wine.'):
result = self.runner.invoke(config)
# Ensure that we got a 0 exit status
self.assertEqual(result.exit_code, 0)
# Ensure that the output looks correct.
self.assertIn('username: meagan', result.output)
self.assertIn('password: This is the best wine.', result.output)
self.assertIn('verbose: False', result.output)
def test_key_and_no_value(self):
"""Establish that if we are given a key and no value, that the
setting's value is printed.
"""
with settings.runtime_values(password='This is the best wine.'):
result = self.runner.invoke(config, ['password'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'password: This is the best wine.')
def test_write_setting(self):
"""Establish that if we attempt to write a valid setting, that
the parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
filename = os.path.expanduser('~/.tower_cli.cfg')
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod') as chmod:
result = self.runner.invoke(config, ['username', 'luke'])
chmod.assert_called_once_with(filename, int('0600', 8))
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertIn(mock.call(os.path.expanduser('~/.tower_cli.cfg'), 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = luke\n'),
mock_open.mock_calls)
def test_permissions_warning(self):
"""Warn user if configuration file permissions can not be set
"""
# Try to set permissions on file that does not exist, expecting warning
mock_open = mock.mock_open()
filename = '.tower_cli.cfg'
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod') as chmod:
chmod.side_effect = OSError
with mock.patch.object(warnings, 'warn') as warn:
result = self.runner.invoke(
config, ['username', 'luke', '--scope=local'])
warn.assert_called_once_with(mock.ANY, UserWarning)
chmod.assert_called_once_with(
filename, stat.S_IRUSR | stat.S_IWUSR)
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
def test_write_global_setting(self):
"""Establish that if we attempt to write a valid setting, that
the parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
filename = '/etc/tower/tower_cli.cfg'
mock_open = mock.mock_open()
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os.path, 'isdir') as isdir:
with mock.patch.object(os, 'chmod') as chmod:
isdir.return_value = True
result = self.runner.invoke(
config, ['username', 'luke', '--scope=global'],
)
isdir.assert_called_once_with('/etc/tower/')
chmod.assert_called_once_with(filename, int('0600', 8))
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertIn(mock.call('/etc/tower/tower_cli.cfg', 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = luke\n'),
mock_open.mock_calls)
def test_write_local_setting(self):
"""Establish that if we attempt to write a valid setting locally, that
the correct parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod') as chmod:
result = self.runner.invoke(
config, ['username', 'meagan', '--scope=local'],
)
filename = ".tower_cli.cfg"
chmod.assert_called_once_with(filename, int('0600', 8))
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertIn(mock.call('.tower_cli.cfg', 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = meagan\n'),
mock_open.mock_calls)
def test_unset(self):
"""Establish that calling `tower-cli config --unset` works in the
way that we expect.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os, 'chmod'):
result = self.runner.invoke(config, ['username', '--unset'])
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(),
'Configuration updated successfully.')
# Ensure that the output seems to be correct.
self.assertNotIn(mock.call().write('username = luke\n'),
mock_open.mock_calls)
def test_error_invalid_key(self):
"""Establish that if `tower-cli config` is sent an invalid key,
that we raise an exception.
"""
result = self.runner.invoke(config, ['bogus'])
self.assertEqual(result.exit_code, 1)
self.assertEqual(result.output.strip(),
'Error: Invalid configuration option "bogus".')
def test_error_value_and_unset(self):
"""Establish that if `tower-cli config` is called with both a value
and the --unset flag, that we raise an exception.
"""
result = self.runner.invoke(config, ['host', '127.0.0.1', '--unset'])
self.assertEqual(result.exit_code, 2)
self.assertEqual(result.output.strip(),
'Error: Cannot provide both a value and --unset.')
def test_error_no_global_config_file(self):
"""Establish that if no global config file exists, that tower-cli
does not attempt to create it.
"""
with mock.patch.object(os.path, 'isdir') as isdir:
isdir.return_value = False
result = self.runner.invoke(config,
['host', 'foo', '--scope=global'])
isdir.assert_called_once_with('/etc/tower/')
self.assertEqual(result.exit_code, 1)
self.assertEqual(result.output.strip(),
'Error: /etc/tower/ does not exist, and this '
'command cowardly declines to create it.')
class SupportTests(unittest.TestCase):
"""Establish that support functions in this module work in the way
that we expect.
"""
def test_echo_setting(self):
"""Establish that the `echo_setting` method works in the way
that we expect.
"""
with settings.runtime_values(host='20.12.4.21'):
with mock.patch.object(click, 'secho') as secho:
echo_setting('host')
self.assertEqual(secho.mock_calls, [
mock.call('host: ', fg='magenta', bold=True, nl=False),
mock.call('20.12.4.21', fg='white', bold=True),
])
class DeprecationTests(unittest.TestCase):
"""Establish any deprecation notices are sent with a command if they
are expected.
"""
def setUp(self):
self.runner = CliRunner()
def test_write_global_setting_deprecated(self):
"""Establish that if we attempt to write a valid setting, that
the parser's write method is run.
"""
# Invoke the command, but trap the file-write at the end
# so we don't plow over real things.
mock_open = mock.mock_open()
warning_text = 'The `--global` option is deprecated and will be '\
'removed. Use `--scope=global` to get the same effect.'
with mock.patch('tower_cli.commands.config.open', mock_open,
create=True):
with mock.patch.object(os.path, 'isdir') as isdir:
with mock.patch.object(os, 'chmod'):
with mock.patch.object(warnings, 'warn') as warn:
isdir.return_value = True
result = self.runner.invoke(
config, ['username', 'meagan', '--global'],
)
warn.assert_called_once_with(warning_text,
DeprecationWarning)
self.assertEqual(warn.mock_calls[0][1][1],
DeprecationWarning)
isdir.assert_called_once_with('/etc/tower/')
# Ensure that the command completed successfully.
self.assertEqual(result.exit_code, 0)
self.assertEqual('Configuration updated successfully.',
result.output.strip())
# Ensure that the output seems to be correct.
self.assertIn(mock.call('/etc/tower/tower_cli.cfg', 'w'),
mock_open.mock_calls)
self.assertIn(mock.call().write('username = meagan\n'),
mock_open.mock_calls)
|
tomfotherby/tower-cli
|
tests/test_commands_config.py
|
Python
|
apache-2.0
| 12,164 | 0 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
This is essentially a copy from nova.virt.images.py
Some slight modifications, but at some point
we should look at maybe pushing this up to Oslo
"""
import contextlib
import errno
import math
import os
import re
import tempfile
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import imageutils
from oslo_utils import timeutils
from oslo_utils import units
import psutil
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
image_helper_opts = [cfg.StrOpt('image_conversion_dir',
default='$state_path/conversion',
help='Directory used for temporary storage '
'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opts)
QEMU_IMG_LIMITS = processutils.ProcessLimits(
cpu_time=8,
address_space=1 * units.Gi)
VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2',
'vhd', 'vhdx', 'parallels')
QEMU_IMG_FORMAT_MAP = {
# Convert formats of Glance images to how they are processed with qemu-img.
'iso': 'raw',
'vhd': 'vpc',
}
def validate_disk_format(disk_format):
return disk_format in VALID_DISK_FORMATS
def fixup_disk_format(disk_format):
"""Return the format to be provided to qemu-img convert."""
return QEMU_IMG_FORMAT_MAP.get(disk_format, disk_format)
def qemu_img_info(path, run_as_root=True):
"""Return an object containing the parsed output from qemu-img info."""
cmd = ['env', 'LC_ALL=C', 'qemu-img', 'info', path]
if os.name == 'nt':
cmd = cmd[2:]
out, _err = utils.execute(*cmd, run_as_root=run_as_root,
prlimit=QEMU_IMG_LIMITS)
info = imageutils.QemuImgInfo(out)
# From Cinder's point of view, any 'luks' formatted images
# should be treated as 'raw'.
if info.file_format == 'luks':
info.file_format = 'raw'
return info
def get_qemu_img_version():
info = utils.execute('qemu-img', '--version', check_exit_code=False)[0]
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
LOG.warning("qemu-img is not installed.")
return None
return _get_version_from_string(version.groups()[0])
def _get_version_from_string(version_string):
return [int(x) for x in version_string.split('.')]
def check_qemu_img_version(minimum_version):
qemu_version = get_qemu_img_version()
if (qemu_version is None
or qemu_version < _get_version_from_string(minimum_version)):
if qemu_version:
current_version = '.'.join((str(element)
for element in qemu_version))
else:
current_version = None
_msg = _('qemu-img %(minimum_version)s or later is required by '
'this volume driver. Current qemu-img version: '
'%(current_version)s') % {'minimum_version': minimum_version,
'current_version': current_version}
raise exception.VolumeBackendAPIException(data=_msg)
def _convert_image(prefix, source, dest, out_format,
src_format=None, run_as_root=True):
"""Convert image to other format."""
cmd = prefix + ('qemu-img', 'convert',
'-O', out_format, source, dest)
# Check whether O_DIRECT is supported and set '-t none' if it is
# This is needed to ensure that all data hit the device before
# it gets unmapped remotely from the host for some backends
# Reference Bug: #1363016
# NOTE(jdg): In the case of file devices qemu does the
# flush properly and more efficiently than would be done
# setting O_DIRECT, so check for that and skip the
# setting for non BLK devs
if (utils.is_blk_device(dest) and
volume_utils.check_for_odirect_support(source,
dest,
'oflag=direct')):
cmd = prefix + ('qemu-img', 'convert',
'-t', 'none')
# AMI images can be raw or qcow2 but qemu-img doesn't accept "ami" as
# an image format, so we use automatic detection.
# TODO(geguileo): This fixes unencrypted AMI image case, but we need to
# fix the encrypted case.
if (src_format or '').lower() not in ('', 'ami'):
cmd += ('-f', src_format) # prevent detection of format
cmd += ('-O', out_format, source, dest)
start_time = timeutils.utcnow()
utils.execute(*cmd, run_as_root=run_as_root)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
try:
image_size = qemu_img_info(source,
run_as_root=run_as_root).virtual_size
except ValueError as e:
msg = ("The image was successfully converted, but image size "
"is unavailable. src %(src)s, dest %(dest)s. %(error)s")
LOG.info(msg, {"src": source,
"dest": dest,
"error": e})
return
fsz_mb = image_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
"duration %(duration).2f sec, destination %(dest)s")
LOG.debug(msg, {"src": source,
"sz": fsz_mb,
"duration": duration,
"dest": dest})
msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s"
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def convert_image(source, dest, out_format, src_format=None,
run_as_root=True, throttle=None):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(source, dest) as throttle_cmd:
_convert_image(tuple(throttle_cmd['prefix']),
source, dest,
out_format,
src_format=src_format,
run_as_root=run_as_root)
def resize_image(source, size, run_as_root=False):
"""Changes the virtual size of the image."""
cmd = ('qemu-img', 'resize', source, '%sG' % size)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_service, image_id, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
start_time = timeutils.utcnow()
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
try:
image_service.download(context, image_id, image_file)
except IOError as e:
if e.errno == errno.ENOSPC:
params = {'path': os.path.dirname(path),
'image': image_id}
reason = _("No space left in image_conversion_dir "
"path (%(path)s) while fetching "
"image %(image)s.") % params
LOG.exception(reason)
raise exception.ImageTooBig(image_id=image_id,
reason=reason)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(image_file.name).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
"duration %(duration).2f sec")
LOG.debug(msg, {"dest": image_file.name,
"sz": fsz_mb,
"duration": duration})
msg = "Image download %(sz).2f MB at %(mbps).2f MB/s"
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def get_qemu_data(image_id, has_meta, disk_format_raw, dest, run_as_root):
# We may be on a system that doesn't have qemu-img installed. That
# is ok if we are working with a RAW image. This logic checks to see
# if qemu-img is installed. If not we make sure the image is RAW and
# throw an exception if not. Otherwise we stop before needing
# qemu-img. Systems with qemu-img will always progress through the
# whole function.
try:
# Use the empty tmp file to make sure qemu_img_info works.
data = qemu_img_info(dest, run_as_root=run_as_root)
# There are a lot of cases that can cause a process execution
# error, but until we do more work to separate out the various
# cases we'll keep the general catch here
except processutils.ProcessExecutionError:
data = None
if has_meta:
if not disk_format_raw:
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and image is of "
"type %s. Only RAW images can be used if "
"qemu-img is not installed.") %
disk_format_raw,
image_id=image_id)
else:
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and the disk "
"format is not specified. Only RAW images "
"can be used if qemu-img is not installed."),
image_id=image_id)
return data
def fetch_verify_image(context, image_service, image_id, dest,
user_id=None, project_id=None, size=None,
run_as_root=True):
fetch(context, image_service, image_id, dest,
None, None)
image_meta = image_service.show(context, image_id)
with fileutils.remove_path_on_error(dest):
has_meta = False if not image_meta else True
try:
format_raw = True if image_meta['disk_format'] == 'raw' else False
except TypeError:
format_raw = False
data = get_qemu_data(image_id, has_meta, format_raw,
dest, run_as_root)
# We can only really do verification of the image if we have
# qemu data to use
if data is not None:
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and data.virtual_size > size:
params = {'image_size': data.virtual_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id,
reason=reason)
def fetch_to_vhd(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, run_as_root=True):
fetch_to_volume_format(context, image_service, image_id, dest, 'vpc',
blocksize, user_id, project_id,
run_as_root=run_as_root)
def fetch_to_raw(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, size=None, run_as_root=True):
fetch_to_volume_format(context, image_service, image_id, dest, 'raw',
blocksize, user_id, project_id, size,
run_as_root=run_as_root)
def fetch_to_volume_format(context, image_service,
image_id, dest, volume_format, blocksize,
user_id=None, project_id=None, size=None,
run_as_root=True):
qemu_img = True
image_meta = image_service.show(context, image_id)
# NOTE(avishay): I'm not crazy about creating temp files which may be
# large and cause disk full errors which would confuse users.
# Unfortunately it seems that you can't pipe to 'qemu-img convert' because
# it seeks. Maybe we can think of something for a future version.
with temporary_file() as tmp:
has_meta = False if not image_meta else True
try:
format_raw = True if image_meta['disk_format'] == 'raw' else False
except TypeError:
format_raw = False
data = get_qemu_data(image_id, has_meta, format_raw,
tmp, run_as_root)
if data is None:
qemu_img = False
tmp_images = TemporaryImages.for_image_service(image_service)
tmp_image = tmp_images.get(context, image_id)
if tmp_image:
tmp = tmp_image
else:
fetch(context, image_service, image_id, tmp, user_id, project_id)
if is_xenserver_format(image_meta):
replace_xenserver_image_with_coalesced_vhd(tmp)
if not qemu_img:
# qemu-img is not installed but we do have a RAW image. As a
# result we only need to copy the image to the destination and then
# return.
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s', {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
image_size_m = math.ceil(float(image_meta['size']) / units.Mi)
volume_utils.copy_volume(tmp, dest, image_size_m, blocksize)
return
data = qemu_img_info(tmp, run_as_root=run_as_root)
virt_size = int(math.ceil(float(data.virtual_size) / units.Gi))
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and virt_size > size:
params = {'image_size': virt_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
# NOTE(e0ne): check for free space in destination directory before
# image conversion.
check_available_space(dest, virt_size, image_id)
# NOTE(jdg): I'm using qemu-img convert to write
# to the volume regardless if it *needs* conversion or not
# TODO(avishay): We can speed this up by checking if the image is raw
# and if so, writing directly to the device. However, we need to keep
# check via 'qemu-img info' that what we copied was in fact a raw
# image and not a different format with a backing file, which may be
# malicious.
LOG.debug("%s was %s, converting to %s ", image_id, fmt, volume_format)
disk_format = fixup_disk_format(image_meta['disk_format'])
convert_image(tmp, dest, volume_format,
src_format=disk_format,
run_as_root=run_as_root)
def _validate_file_format(image_data, expected_format):
if image_data.file_format == expected_format:
return True
elif image_data.file_format == 'vpc' and expected_format == 'vhd':
# qemu-img still uses the legacy 'vpc' name for the vhd format.
return True
return False
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw', run_as_root=True):
image_id = image_meta['id']
if (image_meta['disk_format'] == volume_format):
LOG.debug("%s was %s, no need to convert to %s",
image_id, volume_format, image_meta['disk_format'])
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
else:
with utils.temporary_chown(volume_path):
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
return
with temporary_file() as tmp:
LOG.debug("%s was %s, converting to %s",
image_id, volume_format, image_meta['disk_format'])
data = qemu_img_info(volume_path, run_as_root=run_as_root)
backing_file = data.backing_file
fmt = data.file_format
if backing_file is not None:
# Disallow backing files as a security measure.
# This prevents a user from writing an image header into a raw
# volume with a backing file pointing to data they wish to
# access.
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file})
out_format = image_meta['disk_format']
# qemu-img accepts 'vpc' as argument for 'vhd 'format and 'parallels'
# as argument for 'ploop'.
if out_format == 'vhd':
out_format = 'vpc'
if out_format == 'ploop':
out_format = 'parallels'
convert_image(volume_path, tmp, out_format,
run_as_root=run_as_root)
data = qemu_img_info(tmp, run_as_root=run_as_root)
if data.file_format != out_format:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(f1)s, but format is now %(f2)s") %
{'f1': out_format, 'f2': data.file_format})
with open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
def check_virtual_size(virtual_size, volume_size, image_id):
virtual_size = int(math.ceil(float(virtual_size) / units.Gi))
if virtual_size > volume_size:
params = {'image_size': virtual_size,
'volume_size': volume_size}
reason = _("Image virtual size is %(image_size)dGB"
" and doesn't fit in a volume of size"
" %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id,
reason=reason)
return virtual_size
def check_available_space(dest, image_size, image_id):
# TODO(e0ne): replace psutil with shutil.disk_usage when we drop
# Python 2.7 support.
if not os.path.isdir(dest):
dest = os.path.dirname(dest)
free_space = psutil.disk_usage(dest).free
if free_space <= image_size:
msg = ('There is no space to convert image. '
'Requested: %(image_size)s, available: %(free_space)s'
) % {'image_size': image_size, 'free_space': free_space}
raise exception.ImageTooBig(image_id=image_id, reason=msg)
def is_xenserver_format(image_meta):
return (
image_meta['disk_format'] == 'vhd'
and image_meta['container_format'] == 'ovf'
)
def set_vhd_parent(vhd_path, parentpath):
utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath)
def extract_targz(archive_name, target):
utils.execute('tar', '-xzf', archive_name, '-C', target)
def fix_vhd_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
set_vhd_parent(child, parent)
def get_vhd_size(vhd_path):
out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
return int(out)
def resize_vhd(vhd_path, size, journal):
utils.execute(
'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal)
def coalesce_vhd(vhd_path):
utils.execute(
'vhd-util', 'coalesce', '-n', vhd_path)
def create_temporary_file(*args, **kwargs):
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs)
os.close(fd)
return tmp
def cleanup_temporary_file(backend_name):
temp_dir = CONF.image_conversion_dir
if (not temp_dir or not os.path.exists(temp_dir)):
LOG.debug("Configuration image_conversion_dir is None or the path "
"doesn't exist.")
return
try:
# TODO(wanghao): Consider using os.scandir for better performance in
# future when cinder only supports Python version 3.5+.
files = os.listdir(CONF.image_conversion_dir)
# NOTE(wanghao): For multi-backend case, if one backend was slow
# starting but another backend is up and doing an image conversion,
# init_host should only clean the tmp files which belongs to its
# backend.
for tmp_file in files:
if tmp_file.endswith(backend_name):
path = os.path.join(temp_dir, tmp_file)
os.remove(path)
except OSError as e:
LOG.warning("Exception caught while clearing temporary image "
"files: %s", e)
@contextlib.contextmanager
def temporary_file(*args, **kwargs):
tmp = None
try:
tmp = create_temporary_file(*args, **kwargs)
yield tmp
finally:
if tmp:
fileutils.delete_if_exists(tmp)
def temporary_dir():
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
return utils.tempdir(dir=CONF.image_conversion_dir)
def coalesce_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
with temporary_dir() as directory_for_journal:
size = get_vhd_size(child)
journal_file = os.path.join(
directory_for_journal, 'vhd-util-resize-journal')
resize_vhd(parent, size, journal_file)
coalesce_vhd(child)
return vhd_chain[-1]
def discover_vhd_chain(directory):
counter = 0
chain = []
while True:
fpath = os.path.join(directory, '%d.vhd' % counter)
if os.path.exists(fpath):
chain.append(fpath)
else:
break
counter += 1
return chain
def replace_xenserver_image_with_coalesced_vhd(image_file):
with temporary_dir() as tempdir:
extract_targz(image_file, tempdir)
chain = discover_vhd_chain(tempdir)
fix_vhd_chain(chain)
coalesced = coalesce_chain(chain)
fileutils.delete_if_exists(image_file)
os.rename(coalesced, image_file)
class TemporaryImages(object):
"""Manage temporarily downloaded images to avoid downloading it twice.
In the 'with TemporaryImages.fetch(image_service, ctx, image_id) as tmp'
clause, 'tmp' can be used as the downloaded image path. In addition,
image_utils.fetch() will use the pre-fetched image by the TemporaryImages.
This is useful to inspect image contents before conversion.
"""
def __init__(self, image_service):
self.temporary_images = {}
self.image_service = image_service
image_service.temp_images = self
@staticmethod
def for_image_service(image_service):
instance = image_service.temp_images
if instance:
return instance
return TemporaryImages(image_service)
@classmethod
@contextlib.contextmanager
def fetch(cls, image_service, context, image_id, suffix=''):
tmp_images = cls.for_image_service(image_service).temporary_images
with temporary_file(suffix=suffix) as tmp:
fetch_verify_image(context, image_service, image_id, tmp)
user = context.user_id
if not tmp_images.get(user):
tmp_images[user] = {}
tmp_images[user][image_id] = tmp
LOG.debug("Temporary image %(id)s is fetched for user %(user)s.",
{'id': image_id, 'user': user})
yield tmp
del tmp_images[user][image_id]
LOG.debug("Temporary image %(id)s for user %(user)s is deleted.",
{'id': image_id, 'user': user})
def get(self, context, image_id):
user = context.user_id
if not self.temporary_images.get(user):
return None
return self.temporary_images[user].get(image_id)
|
eharney/cinder
|
cinder/image/image_utils.py
|
Python
|
apache-2.0
| 26,645 | 0 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import base64
import errno
import json
import os
import taskcluster
def write_secret_to_file(path, data, key, base64decode=False, json_secret=False, append=False, prefix=''):
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../' + path))
try:
os.makedirs(os.path.dirname(path))
except OSError as error:
if error.errno != errno.EEXIST:
raise
print("Outputting secret to: {}".format(path))
with open(path, 'a' if append else 'w') as f:
value = data['secret'][key]
if base64decode:
value = base64.b64decode(value)
if json_secret:
value = json.dumps(value)
f.write(prefix + value)
def fetch_secret_from_taskcluster(name):
try:
secrets = taskcluster.Secrets({
# BaseUrl is still needed for tasks that haven't migrated to taskgraph yet.
'baseUrl': 'http://taskcluster/secrets/v1',
})
except taskcluster.exceptions.TaskclusterFailure:
# taskcluster library >=5 errors out when `baseUrl` is used
secrets = taskcluster.Secrets({
'rootUrl': os.environ.get('TASKCLUSTER_PROXY_URL', 'https://taskcluster.net'),
})
return secrets.get(name)
def main():
parser = argparse.ArgumentParser(
description='Fetch a taskcluster secret value and save it to a file.')
parser.add_argument('-s', dest="secret", action="store", help="name of the secret")
parser.add_argument('-k', dest='key', action="store", help='key of the secret')
parser.add_argument('-f', dest="path", action="store", help='file to save secret to')
parser.add_argument('--decode', dest="decode", action="store_true", default=False, help='base64 decode secret before saving to file')
parser.add_argument('--json', dest="json", action="store_true", default=False, help='serializes the secret to JSON format')
parser.add_argument('--append', dest="append", action="store_true", default=False, help='append secret to existing file')
parser.add_argument('--prefix', dest="prefix", action="store", default="", help='add prefix when writing secret to file')
result = parser.parse_args()
secret = fetch_secret_from_taskcluster(result.secret)
write_secret_to_file(result.path, secret, result.key, result.decode, result.json, result.append, result.prefix)
if __name__ == "__main__":
main()
|
StYaphet/firefox-ios
|
taskcluster/scripts/get-secret.py
|
Python
|
mpl-2.0
| 2,717 | 0.004417 |
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(keras.__version__)
fasion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fasion_mnist.load_data()
# pixel 0-255
# 28 x 28 images
print(X_train_full.shape)
print(X_train_full.dtype)
# create validation set
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model.add(keras.layers.Dense(100, activation="relu"))
# because we have 10 exclusive classes
model.add(keras.layers.Dense(10, activation="softmax"))
print(model.summary())
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
history = model.fit(
X_train, y_train, epochs=30, validation_data=(X_valid, y_valid)
)
print(history)
|
1995parham/Learning
|
ml/fasion-mnist/main.py
|
Python
|
gpl-2.0
| 1,163 | 0 |
# -*- coding: utf-8 -*-
from easy_karabiner.factory import *
def test_create_keymap():
raw_keymap = [
'KeyToKey',
['ctrl'],
['f12'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::CONTROL_L,
KeyCode::F12
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyToKey',
['ctrl', 'U'],
['end', 'shift_r', 'home', 'del', 'del', 'norepeat'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::U, ModifierFlag::CONTROL_L, ModifierFlag::NONE,
KeyCode::END, KeyCode::HOME, ModifierFlag::SHIFT_R,
KeyCode::DELETE, KeyCode::DELETE, Option::NOREPEAT
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyToKey',
['alt', 'shift', ','],
['fn', 'left'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::COMMA, ModifierFlag::OPTION_L, ModifierFlag::SHIFT_L, ModifierFlag::NONE,
KeyCode::CURSOR_LEFT, ModifierFlag::FN
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DropAllKeys',
['ModifierFlag::MY_VI_MODE'],
['DROPALLKEYS_DROP_KEY', 'DROPALLKEYS_DROP_CONSUMERKEY', 'DROPALLKEYS_DROP_POINTINGBUTTON'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DropAllKeys__
ModifierFlag::MY_VI_MODE,
Option::DROPALLKEYS_DROP_KEY,
Option::DROPALLKEYS_DROP_CONSUMERKEY,
Option::DROPALLKEYS_DROP_POINTINGBUTTON
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'SimultaneousKeyPresses',
['9', '0', '9', 'shift'],
['shift', '0', 'left']
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __SimultaneousKeyPresses__
@begin
KeyCode::KEY_9, KeyCode::KEY_0, KeyCode::KEY_9, ModifierFlag::SHIFT_L
@end
@begin
KeyCode::KEY_0, ModifierFlag::SHIFT_L, KeyCode::CURSOR_LEFT
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DoublePressModifier',
['fn'],
['cmd', 'alt', 'I'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::FN,
@begin
KeyCode::FN
@end
@begin
KeyCode::I, ModifierFlag::COMMAND_L, ModifierFlag::OPTION_L
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DoublePressModifier',
['fn'],
['F11'],
['F12'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::FN,
@begin
KeyCode::F11
@end
@begin
KeyCode::F12
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'HoldingKeyToKey',
['esc'],
['cmd_r', 'ctrl_r', 'alt_r', 'shift_r'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __HoldingKeyToKey__
KeyCode::ESCAPE,
@begin
KeyCode::ESCAPE
@end
@begin
KeyCode::COMMAND_R, ModifierFlag::CONTROL_R, ModifierFlag::OPTION_R, ModifierFlag::SHIFT_R
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyOverlaidModifier',
['caps'],
['ctrl'],
['esc'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyOverlaidModifier__
KeyCode::CAPSLOCK,
@begin
KeyCode::CONTROL_L
@end
@begin
KeyCode::ESCAPE
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyDownUpToKey',
['cmd', ','],
['cmd', 'shift', 'left'],
['cmd', 'left'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyDownUpToKey__
KeyCode::COMMA, ModifierFlag::COMMAND_L, ModifierFlag::NONE,
@begin
KeyCode::CURSOR_LEFT, ModifierFlag::COMMAND_L, ModifierFlag::SHIFT_L
@end
@begin
KeyCode::CURSOR_LEFT, ModifierFlag::COMMAND_L
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'BlockUntilKeyUp',
['sp']
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __BlockUntilKeyUp__
KeyCode::SPACE
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DropKeyAfterRemap',
['mission_control', 'MODIFIERFLAG_EITHER_LEFT_OR_RIGHT_SHIFT']
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DropKeyAfterRemap__
KeyCode::MISSION_CONTROL,
MODIFIERFLAG_EITHER_LEFT_OR_RIGHT_SHIFT
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'PassThrough',
]
k = KeymapCreater.create(raw_keymap)
s = '<autogen> __PassThrough__ </autogen>'
util.assert_xml_equal(k, s)
raw_keymap = [
'double',
['cmd', 'K'],
['up'] * 6,
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::K, ModifierFlag::COMMAND_L, ModifierFlag::NONE,
@begin
KeyCode::K, ModifierFlag::COMMAND_L
@end
@begin
KeyCode::CURSOR_UP, KeyCode::CURSOR_UP, KeyCode::CURSOR_UP,
KeyCode::CURSOR_UP, KeyCode::CURSOR_UP, KeyCode::CURSOR_UP
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'DoublePressModifier',
['cmd', 'J'],
['down'] * 6,
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __DoublePressModifier__
KeyCode::J, ModifierFlag::COMMAND_L, ModifierFlag::NONE,
@begin
KeyCode::J, ModifierFlag::COMMAND_L
@end
@begin
KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN,
KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN, KeyCode::CURSOR_DOWN
@end
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'KeyToKey',
['alt', 'E'],
['KeyCode::VK_OPEN_URL_FINDER'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __KeyToKey__
KeyCode::E, ModifierFlag::OPTION_L, ModifierFlag::NONE,
KeyCode::VK_OPEN_URL_FINDER
</autogen>'''
util.assert_xml_equal(k, s)
raw_keymap = [
'FlipScrollWheel',
['FLIPSCROLLWHEEL_HORIZONTAL', 'FLIPSCROLLWHEEL_VERTICAL'],
]
k = KeymapCreater.create(raw_keymap)
s = '''
<autogen> __FlipScrollWheel__
Option::FLIPSCROLLWHEEL_HORIZONTAL,
Option::FLIPSCROLLWHEEL_VERTICAL
</autogen>'''
util.assert_xml_equal(k, s)
def test_create_definition():
d = DefinitionCreater.create('KINDLE', ['com.amazon.Kindle'])
s = '''
<appdef>
<appname>KINDLE</appname>
<equal>com.amazon.Kindle</equal>
</appdef>'''
util.assert_xml_equal(d[0], s)
d = DefinitionCreater.create('EMACS_IGNORE_APP', [
'ECLIPSE', 'EMACS', 'TERMINAL',
'REMOTEDESKTOPCONNECTION', 'VI', 'X11',
'VIRTUALMACHINE', 'TERMINAL', 'SUBLIMETEXT',
])
s = '''
<replacementdef>
<replacementname>EMACS_IGNORE_APP</replacementname>
<replacementvalue>
ECLIPSE, EMACS, TERMINAL,
REMOTEDESKTOPCONNECTION, VI, X11,
VIRTUALMACHINE, TERMINAL, SUBLIMETEXT
</replacementvalue>
</replacementdef>'''
util.assert_xml_equal(d[0], s)
d1, d2 = DefinitionCreater.create('CHERRY_3494', ['0x046a', '0x0011'])
s1 = '''
<devicevendordef>
<vendorname>CHERRY_3494_VENDOR</vendorname>
<vendorid>0x046a</vendorid>
</devicevendordef>
'''
s2 = '''
<deviceproductdef>
<productname>CHERRY_3494_PRODUCT</productname>
<productid>0x0011</productid>
</deviceproductdef>
'''
util.assert_xml_equal(d1, s1)
util.assert_xml_equal(d2, s2)
d = DefinitionCreater.create('Open::FINDER', ['/Applications/Finder.app'])
s = '''
<vkopenurldef>
<name>KeyCode::VK_OPEN_URL_FINDER</name>
<url type="file">/Applications/Finder.app</url>
</vkopenurldef>'''
util.assert_xml_equal(d[0], s)
def test_create_filter():
f = FilterCreater.create('LOGITECH')
s = '''<device_only> DeviceVendor::LOGITECH </device_only>'''
util.assert_xml_equal(f[0], s)
f = FilterCreater.create('!EMACS_MODE_IGNORE_APPS')
s = '''<not> {{EMACS_MODE_IGNORE_APPS}} </not>'''
util.assert_xml_equal(f[0], s)
def test_create_keymaps():
raw_keymaps = [
['KeyToKey', ['Cmd'], ['Alt']],
['double', ['fn'], ['f12']],
['holding', ['ctrl'], ['esc'], ['cmd', 'alt', 'ctrl']],
]
outputs = [
'''
<autogen> __KeyToKey__
KeyCode::COMMAND_L,
KeyCode::OPTION_L
</autogen>
''',
'''
<autogen> __DoublePressModifier__
KeyCode::FN,
@begin KeyCode::FN @end
@begin KeyCode::F12 @end
</autogen>
''',
'''
<autogen> __HoldingKeyToKey__
KeyCode::CONTROL_L,
@begin KeyCode::ESCAPE @end
@begin KeyCode::COMMAND_L, ModifierFlag::OPTION_L, ModifierFlag::CONTROL_L @end
</autogen>
''',
]
keymap_objs = create_keymaps(raw_keymaps)
assert(len(keymap_objs) == len(outputs))
for i in range(len(outputs)):
util.assert_xml_equal(keymap_objs[i], outputs[i])
def test_create_definitions():
definitions = {
'BILIBILI': 'com.typcn.Bilibili',
'CHERRY_3494': ['0x046a', '0x0011'],
'UIElementRole::custom_ui': 'used as a filter',
'replacement_example': ['for', 'example', 'Xee'],
}
outputs = [
'''
<appdef>
<appname>BILIBILI</appname>
<equal>com.typcn.Bilibili</equal>
</appdef>
''',
'''
<devicevendordef>
<vendorname>CHERRY_3494_VENDOR</vendorname>
<vendorid>0x046a</vendorid>
</devicevendordef>
''',
'''
<deviceproductdef>
<productname>CHERRY_3494_PRODUCT</productname>
<productid>0x0011</productid>
</deviceproductdef>
''',
'''
<uielementroledef>custom_ui</uielementroledef>
''',
'''
<replacementdef>
<replacementname>replacement_example</replacementname>
<replacementvalue>for, example, Xee</replacementvalue>
</replacementdef>
''',
]
definition_objs = create_definitions(definitions)
assert(len(definition_objs) == len(outputs))
for i in range(len(outputs)):
util.assert_xml_equal(definition_objs[i], outputs[i])
def test_create_filters():
f = create_filters(['LOGITECH', 'LOGITECH_USB_RECEIVER'])
s = '''
<device_only>
DeviceVendor::LOGITECH,
DeviceProduct::LOGITECH_USB_RECEIVER
</device_only>'''
util.assert_xml_equal(f[0], s)
f = create_filters(['!EMACS_MODE_IGNORE_APPS', '!FINDER', '!SKIM'])
s = '''
<not>
{{EMACS_MODE_IGNORE_APPS}}, FINDER, SKIM
</not>'''
util.assert_xml_equal(f[0], s)
|
loggerhead/Easy-Karabiner
|
tests/test_factory.py
|
Python
|
mit
| 11,690 | 0.000342 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from kmip.core import enums
from kmip.demos import utils
from kmip.pie import client
if __name__ == '__main__':
logger = utils.build_console_logger(logging.INFO)
# Build and parse arguments
parser = utils.build_cli_parser(enums.Operation.CREATE)
opts, args = parser.parse_args(sys.argv[1:])
config = opts.config
algorithm = opts.algorithm
length = opts.length
# Exit early if the arguments are not specified
if algorithm is None:
logger.error('No algorithm provided, exiting early from demo')
sys.exit()
if length is None:
logger.error("No key length provided, exiting early from demo")
sys.exit()
algorithm = getattr(enums.CryptographicAlgorithm, algorithm, None)
# Build the client and connect to the server
with client.ProxyKmipClient(config=config) as client:
try:
uid = client.create(algorithm, length)
logger.info("Successfully created symmetric key with ID: "
"{0}".format(uid))
except Exception as e:
logger.error(e)
|
viktorTarasov/PyKMIP
|
kmip/demos/pie/create.py
|
Python
|
apache-2.0
| 1,766 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.