text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from aiohttp import ClientSession
from aiowing import settings
async def test_unauthenticated_records(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.get(test_app.router['admin_records'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == test_app.router['admin_login'].url()
await resp.release()
async def test_unauthenticated_login(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
assert resp.headers.get('Location') == \
test_app.router['admin_records'].url()
await resp.release()
async def test_unauthenticated_logout(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.get(test_app.router['admin_logout'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == test_app.router['admin_login'].url()
await resp.release()
async def test_authenticated_records(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
resp = await cli.get(test_app.router['admin_records'].url(),
allow_redirects=False)
assert resp.status == 200
await resp.release()
async def test_authenticated_login(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
resp = await cli.get(test_app.router['admin_login'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == \
test_app.router['admin_records'].url()
await resp.release()
async def test_authenticated_logout(test_app, test_client):
cli = await test_client(test_app)
resp = await cli.post(test_app.router['admin_login'].url(),
data={'email': settings.SUPERUSER_EMAIL,
'password': settings.SUPERUSER_PASSWORD},
allow_redirects=False)
resp = await cli.get(test_app.router['admin_logout'].url(),
allow_redirects=False)
assert resp.headers.get('Location') == test_app.router['admin_login'].url()
await resp.release()
| embali/aiowing | aiowing/apps/admin/tests/test_admin.py | Python | mit | 2,851 | 0 |
#!/usr/bin/env python3
# Convert MMGen 'v2' transaction file (amounts as BTCAmt())
# to MMGen 'v3' (amounts as strings)
# v3 tx files were introduced with MMGen version 0.9.7
import sys,os
repo_root = os.path.split(os.path.abspath(os.path.dirname(sys.argv[0])))[0]
sys.path = [repo_root] + sys.path
from mmgen.common import *
opts_data = {
'text': {
'desc': "Convert MMGen transaction file from v2 format to v3 format",
'usage': "<tx file>",
'options': """
-h, --help Print this help message
-d, --outdir=d Output files to directory 'd' instead of working dir
-q, --quiet Write (and overwrite) files without prompting
-S, --stdout Write data to STDOUT instead of file
"""
}
}
cmd_args = opts.init(opts_data)
import asyncio
from mmgen.tx import CompletedTX
if len(cmd_args) != 1:
opts.usage()
tx = asyncio.run(CompletedTX(cmd_args[0],quiet_open=True))
tx.file.write(ask_tty=False,ask_overwrite=not opt.quiet,ask_write=not opt.quiet)
| mmgen/mmgen | scripts/tx-v2-to-v3.py | Python | gpl-3.0 | 962 | 0.015593 |
# vim: set fileencoding: utf-8 -*-
# -*- coding: utf-8 -*-
import visa
from itertools import cycle, starmap, compress
import pandas as pd
import numpy as np
from collections import OrderedDict
from .force import *
from .force import (
DCForce,
StaircaseSweep,
PulsedSpot,
SPGU)
from .enums import *
from .measurement import *
from .measurement import (
MeasureSpot,
MeasureStaircaseSweep,
MeasurePulsedSpot,
)
from .setup import *
from .helpers import *
from .SMUs import *
from .dummy import DummyTester
from .loggers import exception_logger,write_logger, query_logger
class B1500():
def __init__(self, tester, auto_init=True, default_check_err=True):
self.__test_addr = tester
self._device=None
self.tests = OrderedDict()
self.__last_channel_setups={}
self.__last_channel_measurements={}
self.slots_installed={}
self._DIO_control_mode={}
self.sub_channels = []
self.__filter_all = None
self.__ADC={}
self.__TSC = None
self.__channels={}
self._recording = False
self.__HIGHSPEED_ADC={"number":None,"mode":None}
self.default_check_err=default_check_err
self.programs={}
self.__format = None
self.__outputMode = None
self.last_program=None
self.__no_store=("*RST","DIAG?","*TST?","CA","AB","RCV","WZ?","ST","END",
"SCR","VAR","LST?","CORRSER?","SER?","SIM?","SPM?",
"SPPER?","ERMOD?","ERSSP?","ERRX?","ERR?","EMG?",
"*LRN?","*OPC?","UNT?","WNU?","*SRE?","*STB?",)
if auto_init:
self.init()
def close(self):
if self._device:
self._device.close()
self._device=None
self.__keep_open = False
if self.__rm:
self.__rm.close()
self.__rm=None
def __del__(self):
self.close()
def init(self):
""" Resets the connected tester, then checks all installed modules,
querying their types and the available subchannels. It also
stores the available input and measure_ranges in the slots_installed dict,
with the slot number as key. sub channels is a list containing
all available channels"""
self.open()
self._reset()
self.slots_installed = self.__discover_slots()
self.sub_channels = []
for s,mod in self.slots_installed.items():
self.sub_channels.extend(mod.channels)
self.__channels = {i:self.slots_installed[self.__channel_to_slot(i)] for i in self.sub_channels}
self.enable_SMUSPGU()
self._check_err()
self.close()
def open(self, keep_open=False):
if not self._device:
try:
self.__rm = visa.ResourceManager()
self._device = self.__rm.open_resource(self.__test_addr)
self.__keep_open =True
except OSError as e:
exception_logger.warn("Could not find VISA driver, setting _device to std_out")
self.__rm.close()
self._device.close()
self.__rm = None
self._device = DummyTester()
def diagnostics(self, item):
""" from the manual:
- before using DiagnosticItem.trigger_IO , connect a BNC cable between the Ext Trig In and
Out connectors.
- After executing DiagnosticItem.high_voltage_LED confirm the status of LED. Then enter the AB
command
If the LED does not blink, the B1500 must be repaired.
- Before executing DiagnosticItem.digital_IO, disconnect any cable from the import digital I/O port.
- Before executing interlock_open or interlock_closed , open and close the
interlock circuit respectively
"""
return self.query(format_command("DIAG?", item))
def query(self, msg, delay=None,check_error=False):
""" Writes the msg to the Tester, reads output buffer after delay and
logs both to the query logger.Optionally checks for errors afterwards"""
query_logger.info(msg)
retval=[]
if self._recording and any([x in msg for x in self.__no_store]):
self.programs[self.last_program]["config_nostore"].append(msg)
exception_logger.warn("Skipped query '{}' since not allowed while recording".format(msg))
else:
self.open()
try:
retval = self._device.query(msg, delay=delay)
query_logger.info(str(retval)+"\n")
err =self._check_err()
if err[:2]!="+0":
exception_logger.warn(err)
exception_logger.warn(msg)
finally:
if not self.__keep_open:
self.close()
return retval
def write(self, msg, check_error=False):
""" Writes the msg to the Tester and logs it in the write
logger.Optionally checks for errors afterwards"""
write_logger.info(msg)
try:
if self._recording and any([x in msg for x in self.__no_store]):
self.programs[self.last_program]["config_nostore"].append(msg)
exception_logger.warn("Skipped query '{}' since not allowed while recording".format(msg))
else:
self.open()
retval = self._device.write(msg)
write_logger.info(str(retval)+"\n")
if check_error or self.default_check_err:
err =self._check_err()
if err[:2]!="+0":
exception_logger.warn(err)
exception_logger.warn(msg)
finally:
if not self.__keep_open:
self.close()
return retval
def read(self, check_error=False, timeout="default" ):
""" Reads out the current output buffer and logs it to the query logger
optionally checking for errors"""
retval=None
self.open()
old_timeout = self._device.timeout
if timeout != "default":
self._device.timeout
try:
if "ascii" in repr(self.__format):
retval = self._device.read()
elif "binary4" in reps(self.__format):
retval = self._device.read_raw()
elif "binary8" in reps(self.__format):
retval = self._device.read_raw()
else:
raise ValueError("Unkown format {0}".format(self.__format))
if check_error:
exception_logger.info(self._check_err())
finally:
if not self.__keep_open:
self.close()
return retval
def measure(self, test_tuple, force_wait=False, autoread=False):
""" Checks the channels defined in the test tuple and performs the
measurement the Setup represents. Only one type of measurement is possible,
otherwise it raises an exception."""
channels = test_tuple.channels
exc = None
data = None
num_meas = len([c for c in channels if c.measurement])
XE_measurement=any([c.measurement and c.measurement.mode in(
MeasureModes.spot,
MeasureModes.staircase_sweep,
MeasureModes.sampling,
MeasureModes.multi_channel_sweep,
MeasureModes.CV_sweep_dc_bias,
MeasureModes.multichannel_pulsed_spot,
MeasureModes.multichannel_pulsed_sweep,
MeasureModes.pulsed_spot,
MeasureModes.pulsed_sweep,
MeasureModes.staircase_sweep_pulsed_bias,
MeasureModes.quasi_pulsed_spot,
) for c in channels])
spgu_channels = [x.number for x in channels if x.spgu]
SPGU =any(spgu_channels)
search = any([x.binarysearch or x.linearsearch for x in channels])
if [XE_measurement, SPGU,search].count(True)>1:
raise ValueError("Only one type of Measurement can be defined, please check your channel setups")
# Measurements triggered by XE, read via NUB
if XE_measurement:
exc = self.write("XE")
if force_wait:
ready = 0
while ready == 0:
ready = self._operations_completed()
if autoread:
if isSweep(channels):
data = self._read_sweep(channels)
elif isSpot(channels):
data = self._read_spot()
# SPGU measurements
elif SPGU:
for x in spgu_channels:
self.__channels[x].start_pulses()
if force_wait:
self.__channels[x].wait_spgu()
elif search:
self.write("XE")
parsed_data = self.__parse_output(test_tuple.format, data, num_meas, self.__TSC) if data else data
return (exc,parsed_data)
def check_settings(self, parameter):
""" Queries the tester for the specified parameter
(see enums.py or tabcomplete for available parameters)"""
ret = self.query("*LRN? {}".format(parameter))
return ret
def SPGU(self, input_channel, pulse_base, pulse_peak, pulse_width):
""" Performs a simple SPGU pulse """
pulse_channel=Channel(number=input_channel, spgu=SPGU(pulse_base, pulse_peak,pulse_width))
test = TestSetup(channels=[pulse_channel])
self.run_test(test)
def DC_Sweep_V(self, input_channel, ground_channel, start,
stop, step, compliance, input_range=None, sweepmode=SweepMode.linear_up_down,
power_comp=None, measure_range=MeasureRanges_I.full_auto):
""" Performs a quick Voltage staircase sweep measurement on the specified channels """
return self._DC_Sweep(Targets.V, input_channel, ground_channel, start,
stop, step, compliance, input_range, sweepmode,
power_comp, measure_range)
def DC_Sweep_I(self, input_channel, ground_channel, start,
stop, step, compliance, input_range=None, sweepmode=SweepMode.linear_up_down,
power_comp=None, measure_range=MeasureRanges_I.full_auto):
""" Performs a quick Voltage staircase sweep measurement on the specified channels """
return self._DC_Sweep(Targets.I, input_channel, ground_channel, start,
stop, step, compliance, input_range, sweepmode,
power_comp, measure_range)
""" Performs a quick Current staircase sweep measurement on the specified channels """
return self._DC_Sweep(Targets.I, input_channel, ground_channel, start,
stop, step, compliance, input_range, sweepmode,
power_comp, measure_range)
def Pulsed_Spot_I(self, input_channel, ground_channel, base, pulse, width,compliance,measure_range=MeasureRanges_V.full_auto, hold=0 ):
""" Performs a quick PulsedSpot Current measurement the specified channels """
return self._Pulsed_Spot(Targets.I, input_channel, ground_channel, base, pulse, width,compliance,measure_range, hold )
def Pulsed_Spot_V(self, input_channel, ground_channel, base, pulse, width,compliance,measure_range=MeasureRanges_V.full_auto, hold=0 ):
""" Performs a quick PulsedSpot Voltage measurement the specified channels """
return self._Pulsed_Spot(Targets.V, input_channel, ground_channel, base, pulse, width,compliance,measure_range, hold )
def DC_Spot_I(self, input_channel, ground_channel, input_value,
compliance, input_range=InputRanges_I.full_auto, power_comp=None,
measure_range=MeasureRanges_V.full_auto):
""" Performs a quick Spot Current measurement the specified channels """
return self._DC_spot(Targets.I, input_channel, ground_channel, input_value,
compliance, input_range, power_comp,
measure_range)
def DC_Spot_V(self, input_channel, ground_channel, input_value,
compliance, input_range = InputRanges_V.full_auto, power_comp=None,
measure_range=MeasureRanges_I.full_auto):
""" Performs a quick Spot Voltage measurement the specified channels """
return self._DC_spot(Targets.V, input_channel, ground_channel, input_value,
compliance, input_range, power_comp,
measure_range)
def run_test(self, test_tuple, force_wait=False, auto_read=False, default_errcheck=None, force_new_setup=False):
""" Takes in a test tuple specifying channel setups and global parameters,
setups up parameters, channels and measurements accordingly and then performs the specified test,
returning gathered data if auto_read was specified. Allways
cleans up any opened channels after being run (forces zero and disconnect)"""
self.open(keep_open=True)
old_default=self.default_check_err
if default_errcheck is not None:
self.default_check_err=default_errcheck
self.set_format(test_tuple.format, test_tuple.output_mode, force_new_setup)
self.set_filter_all(test_tuple.filter, force_new_setup)
self._enable_timestamp(True, force_new_setup)
self._set_adc_global(
adc_modes=test_tuple.adc_modes,
highspeed_adc_number=test_tuple.highspeed_adc_number,
highspeed_adc_mode=test_tuple.highspeed_adc_mode, force_new_setup=force_new_setup)
try:
measurechannels = [c for c in test_tuple.channels if c.measurement]
measurements = [c.measurement for c in measurechannels]
if len(set([m.mode for m in measurements]))>1:
raise ValueError("Only 1 type of measurements allowed per setup, have {}".format(set(measurements)))
if len(measurements)>1:
if all([m.mode in (MeasureModes.spot, MeasureModes.staircase_sweep, MeasureModes.CV_sweep_dc_bias,MeasureModes.sampling) for m in measurements]):
self.set_parallel_measurements(True)
self.set_measure_mode(measurements[0].mode,*[c.number for c in measurechannels])
else:
raise ValueError("Parallel measurement only supported with spot,staircasesweep,sampling and CV-DC Bias sweep. For others, use the dedicated multichannel measurements")
elif len(measurements)==1 and measurements[0].mode not in (MeasureModes.binary_search, MeasureModes.linear_search):
self.set_measure_mode(measurements[0].mode, measurechannels[0].number)
if any([x.spgu for x in test_tuple.channels]):
if not test_tuple.spgu_selector_setup:
raise ValueError("If you want to use the spgu, you need to configure the SMUSPGU selector. seth the Testsetup.selector_setup with a list of (port,state) tuples")
self.enable_SMUSPGU()
for p,s in test_tuple.spgu_selector_setup:
self.set_SMUSPGU_selector(p, s)
for channel in test_tuple.channels:
self.setup_channel(channel, force_new_setup)
if channel.measurement:
self._setup_measurement(channel.number, channel.measurement, force_new_setup)
# resets timestamp, executes and optionally waits for answer,
# returns data with elapsed
ret = self.measure(test_tuple, force_wait,auto_read)
finally:
if len(measurements)>1:
self.set_parallel_measurements(False)
for channel in test_tuple.channels:
self._teardown_channel(channel)
if test_tuple.spgu_selector_setup:
for p,s in test_tuple.spgu_selector_setup:
self.set_SMUSPGU_selector(p, SMU_SPGU_state.open_relay)
self.default_check_err=old_default
self.close()
return ret
def _Pulsed_Spot(self, target, input_channel, ground_channel, base, pulse, width,compliance,input_range=None,measure_range=MeasureRanges_V.full_auto, hold=0 ):
if target == Targets.V:
input = Inputs.I
else:
input = Inputs.V
if input_range is None:
if input == Inputs.I:
input_range = self.__channels[input_channel].get_mincover_I(base, pulse)
else:
input_range = self.__channels[input_channel].get_mincover_V(base, pulse)
if measure_range is None:
if target == Inputs.I:
measure_range = self.__channels[input_channel].get_mincover_I(compliance)
else:
measure_range = self.__channels[input_channel].get_mincover_V(compliance)
measure_setup = MeasurePulsedSpot(target=target,
side=MeasureSides.voltage_side,
range=measure_range)
measure_channel = Channel(number=input_channel,
pulsed_spot=PulsedSpot(input=input,
input_range=input_range,
base=base,
pulse=pulse,
width=width,
compliance=compliance,
hold=hold),
measurement=measure_setup
)
ground_setup = DCForce(
input=Inputs.V,
value=0,
compliance=compliance)
ground = Channel(
number=ground_channel,
dcforce=ground_setup)
test = TestSetup(channels=[measure_channel, ground],)
return self.run_test(test)
def _DC_Sweep(self,target,input_channel,ground_channel,
start,stop,step,compliance,input_range=None,sweepmode=SweepMode.linear_up_down,
power_comp=None,measure_range=MeasureRanges_I.full_auto):
if target == Targets.V:
input = Inputs.I
else:
input = Inputs.V
if input_range is None:
if input == Inputs.I:
input_range = self.__channels[input_channel].get_mincover_I(start,stop)
else:
input_range = self.__channels[input_channel].get_mincover_V(start,stop)
if measure_range is None:
if target == Inputs.I:
measure_range = self.__channels[input_channel].get_mincover_I(compliance)
else:
measure_range = self.__channels[input_channel].get_mincover_V(compliance)
measure_setup = MeasureStaircaseSweep(target=target,
range=measure_range)
sweep_setup = StaircaseSweep(
input=Inputs.I,
start=start,
stop=stop,
step=step,
sweepmode=sweepmode,
compliance=compliance,
auto_abort=AutoAbort.enabled,
input_range=input_range,
power_comp=power_comp)
in_channel = Channel(
number=input_channel,
staircase_sweep=sweep_setup,
measurement=measure_setup)
ground_setup = DCForce(
input=Inputs.V,
value=0,
compliance=compliance)
ground = Channel(
number=ground_channel,
dcforce=ground_setup)
test = TestSetup(channels=[in_channel, ground],)
return self.run_test(test)
def _DC_spot(self,target, input_channel, ground_channel, input_value,
compliance, input_range=InputRanges_I.full_auto, power_comp=None,
measure_range=MeasureRanges_V.full_auto):
if target == Targets.V:
input = Inputs.I
else:
input = Inputs.V
if input_range is None:
if input == Inputs.I:
input_range = self.__channels[input_channel].get_mincover_I(input_value)
else:
input_range = self.__channels[input_channel].get_mincover_V(input_value)
if measure_range is None:
if target == Inputs.I:
measure_range = self.__channels[input_channel].get_mincover_I(compliance)
else:
measure_range = self.__channels[input_channel].get_mincover_V(compliance)
measure = MeasureSpot(target=target, range=measure_range)
measure_channel = Channel(number=input_channel,
dcforce=DCForce(input=input,
value=input_value,
compliance=compliance,
input_range=input_range,
),
measurement=measure)
ground_setup = DCForce(
input=Inputs.V,
value=0,
compliance=compliance,
input_range=InputRanges_V.full_auto)
ground = Channel(
number=ground_channel,
dcforce=ground_setup)
test = TestSetup(channels=[measure_channel, ground],)
self.run_test(test)
# methods ideally used indirectly, but might want to be used for finegrained
# control or for convenvience
def _operations_completed(self):
""" Queries tester for pending operations. Timeout needs to be set to
infinite, since tester will only respond after finishing current
operation"""
old_timeout = self._device.timeout
self._device.timeout = None
ready = int(self.query("*OPC?").strip())
self._device.timeout = old_timeout
return ready
def _enable_timestamp(self, state, force_new_setup=False):
""" Enable Timestamp during measurements"""
if self.__TSC==state and not force_new_setup:
exception_logger.info("No change for timestamp, not sending")
else:
self.__TSC=state
if state:
return self.write("TSC {}".format(1))
else:
return self.write("TSC {}".format(0))
def _reset_timestamp(self):
""" Clears Timestamp counter, if 100us resolution call this at least
every 100s for FMT 1,2 or 5, every 1000 for FMT 11,12,15,21,22,25"""
self.write("TSR")
def _restore_channel(self, channel_number):
""" Restores the channel parameters before the last force_zero command"""
self.__channels[channel_number].restore(channel_number)
def check_module_operation(self, explain=False):
ret = self.query("LOP?")
if explain:
raise NotImplementedError("Explanation functionality\
to annotate error codes will come in a future release")
return ret
def set_parallel_measurements(self, state):
if state:
self.write("PAD 1")
else:
self.write("PAD 0")
def set_SMUSPGU_selector(self, port, status):
""" After being enabled as explained in __set_DIO_control_mode,
applys SMU_SPGU_state to the SMU_SPGU_port (see enums.py)"""
if not self._DIO_control_mode.get(DIO_ControlModes.SMU_PGU_Selector_16440A)==DIO_ControlState.enabled:
raise ValueError("First execute self.enable_SMUSPGU")
return self.write("ERSSP {},{}".format(port, status))
def check_SMUSPGU_selector(self, port):
""" After being enabled as explained in __set_DIO_control_mode,
queries the specified SMU_SPGU_port for its state"""
if not self._DIO_control_mode.get(DIO_ControlModes.SMU_PGU_Selector_16440A):
raise ValueError("First execute self.enable_SMUSPGU")
return self.query("ERSSP? {}".format(port))
def enable_SMUSPGU(self):
""" Shorthand for activating spgu control"""
self.__set_DIO_control_mode(DIO_ControlModes.SMU_PGU_Selector_16440A)
self._DIO_control_mode[DIO_ControlModes.SMU_PGU_Selector_16440A]=True
def __set_DIO_control_mode(self, mode, state=DIO_ControlState.enabled):
""" Sets the control mode of the tester. In order to control the SMU/PGU
Controller via ERSSP or the set_SMU_SPGU_selector first this function needs to
be executed with mode=DIO_ControlModes.SMU_PGU_Selector_16440A, state=DIO_ControlState.enabled
There is no need stated in the documentation to ever deactive control modes, so the default is
"enable"
"""
if state==DIO_ControlState.enabled:
state=None
#HACK the tester complains about "incorrect terminator positi
#on with the mode argument, default is enabling so use the format_command
ret = self.write(format_command("ERMOD",mode, state))
for k,v in DIO_ControlModes.__members__.items():
if ret==mode:
self._DIO_control_mode[mode]=state
return ret
def _check_DIO_control_mode(self):
""" Returns the state of control modes, as a some of the activated value.
the values are :
0 General Purpose control mode(always active)
1 16440A SMUSPGU
2 N1258A/N1259A
4 N1265A
8 N1266A
16 N1268A
e.g. 16440A and N1268A active=> output is 17
"""
ret = int(self.query("ERMOD?").strip())
for k,v in DIO_ControlModes.__members__.items():
if ret==v:
return v
return ret
def setup_channel(self, channel, force_new_setup=False):
""" Configures channel with any parameters which can be set before
the acutal measurement or without any measurement at all"""
unit = self.__channels[channel.number]
unit.connect(channel.number)
if self.__last_channel_setups.get(unit)==channel and not force_new_setup:
# restore voltage settings to channel
unit.restore(channel.number)
exception_logger.warn("Channel configuration for channel {} has not changed, using old setup".format(channel.number))
else:
self.__last_channel_setups[unit]=channel
if not channel.spgu:
unit.set_series_resistance(channel.series_resistance,channel.number)
unit.set_selected_ADC(channel.number, channel.channel_adc)
if channel.dcforce is not None:
unit.setup_dc_force(channel.number, channel.dcforce)
elif channel.staircase_sweep is not None:
unit.setup_staircase_sweep(channel.number, channel.staircase_sweep)
elif channel.pulsed_sweep is not None:
unit.setup_pulsed_sweep(channel.number, channel.pulsed_sweep)
elif channel.pulsed_spot is not None:
unit.setup_pulsed_spot(channel.number, channel.pulsed_spot)
elif channel.quasipulse is not None:
raise NotImplementedError("Quasipulse measurements not yet implemented")
elif channel.highspeed_spot is not None:
raise NotImplementedError("HighSpeedSpot measurements not yet implemented")
elif channel.spgu is not None:
unit.setup_spgu(channel.number, channel.spgu)
elif channel.binarysearch is not None:
unit.setup_binarysearch_force(channel.binarysearch,channel=channel.number)
elif channel.linearsearch is not None:
unit.setup_linearsearch_force(channel.linearsearch,channel=channel.number)
else:
raise ValueError(
"At least one setup should be in the channel, maybe you forgot to force ground to 0?")
errors = []
ret = self._check_err()
if ret[:2]=='+0':
return ret
else:
while ret[:2]!='+0':
errors.append(ret)
ret=self._check_err()
return errors
def set_measure_mode(self,mode,*channels):
""" Defines which measurement to perform on the channel. Not used for all measurements,
check enums.py or MeasureModes for a full list of measurements. Not in SMUs because for parallel measurements, need to set all channels at once"""
self.write(format_command("MM",mode,*channels))
def _setup_measurement(self,channel_number, measurement, force_new_setup=False):
""" Sets up all parameters containing to the measurement. This is a
dispatcher function, since a lot fo measurements have overlapping setup.
Parameters Concerning the channel setup are handled in the respective
setup_X functions, this function and its callees are only concerned with
the measurements themselves."""
if measurement.mode in [
MeasureModes.spot,
MeasureModes.staircase_sweep,
MeasureModes.sampling,
MeasureModes.multi_channel_sweep,
MeasureModes.CV_sweep_dc_bias,
MeasureModes.multichannel_pulsed_spot,
MeasureModes.multichannel_pulsed_sweep,
MeasureModes.pulsed_spot,
MeasureModes.pulsed_sweep,
MeasureModes.staircase_sweep_pulsed_bias,
MeasureModes.quasi_pulsed_spot,
]:
self.__channels[channel_number]._setup_xe_measure(measurement,channel=channel_number )
elif measurement.mode in [
MeasureModes.spot_C,
MeasureModes.pulsed_spot_C,
MeasureModes.pulsed_sweep_CV,
MeasureModes.sweep_Cf,
MeasureModes.sweep_CV_ac_level,
MeasureModes.sampling_Ct,
]:
raise NotImplemented("CapacitanceMeasurement not yet implemented")
elif measurement.mode == MeasureModes.quasi_static_cv:
raise NotImplemented("QuasistatiCV measurement not yet implemented")
elif measurement.mode ==MeasureModes.binary_search:
self.__channels[channel_number].setup_binarysearch_measure(measurement,channel=channel_number )
elif measurement.mode==MeasureModes.linear_search:
self.__channels[channel_number].setup_linearsearch_measure(measurement,channel=channel_number )
else:
raise ValueError("Unknown Measuremode")
def clear_buffer(self):
return self.write("BC")
def set_filter_all(self, filter_state, force_new_setup=False):
""" Sets the spike and overshoot filter on the SMU output."""
if self.__filter_all==filter_state and not force_new_setup:
exception_logger.info("No change in filter, not sending command")
else:
self.__filter_all=filter_state
return self.write("FL {}".format(filter_state))
# individual commands (grouped only for input/target types, i.e. V/I
def _set_adc_global(
self,
adc_modes=[], # list of (adctype,admode) tuples, maximum 3, see enums.py
highspeed_adc_number=None,
highspeed_adc_mode=None, force_new_setup=False):
""" Set the configration for the different ADC types, switching between
manual and auto modes for all ADCs and specifying samples/integration time
for the highspeed ADC"""
if adc_modes:
return [self.set_adc(adctype, adcmode,force_new_setup) for adctype,adcmode in adc_modes]
else:
if highspeed_adc_number is None or highspeed_adc_mode is None:
raise ValueError(
"Either give complete adc mapping or specify highspeed ADC")
self.set_highspeed_ADC(highspeed_adc_number, highspeed_adc_mode,force_new_setup)
def set_adc(self, adc, mode, force_new_setup=False):
""" Set the configration for the different ADC types, switching between
manual and auto modes for all ADC
"""
if not self.__ADC.get(adc)==mode or force_new_setup:
self.__ADC[adc]=mode
self.write(format_command("AIT",adc,mode))
else:
exception_logger.info("No change for adc {}, not sending AIT".format(adc))
def set_highspeed_ADC(self, number, mode, force_new_setup=False):
if (not (self.__HIGHSPEED_ADC["number"]== number and self.__HIGHSPEED_ADC["mode"]==mode)) or force_new_setup:
return self.write(
"AV {}, {}".format(
number,
mode))
else:
exception_logger.info("AV parameters not changed, no write")
def set_format(self, format, output_mode, force_new_setup=False):
""" Specifies output mode and format to use for testing. Check
Formats enum for more details"""
if not (self.__format == format and self.__outputMode == output_mode) or force_new_setup :
self.__format = format
self.__outputMode = output_mode
self.write("FMT {},{}".format(format, output_mode))
else:
exception_logger.info("FMT parameters not changed, no write")
def _check_modules(self, mainframe=False):
""" Queries for installed modules and optionally mainframes connected"""
if mainframe:
return self.query("UNT? 1")
else:
return self.query("UNT? 0")
def record_program(self,program_name):
if self._recording:
raise ValueError("Already recording")
id = self.programs[self.last_program]["id"]+1 if self.last_program else 1
self.write("ST {}".format(id))
self._recording = True
self.programs[program_name]={}
self.programs[program_name]["index"]= id
self.programs[program_name]["steps"]=[]
self.programs[program_name]["config_nostore"]=[]
self.last_program=program_name
def stop_recording(self):
self._recording = False
exception_logger.info("Recorded program {} with index {} and the following steps".format(self.last_program,self.programs[self.last_program]["index"]))
exception_logger.info("\n".join(self.programs[self.last_program]["steps"]))
exception_logger.info("as well as the following captured steps(check these manually before exeting the program, or execute the self.nostore_execute if you are sure all them are idempotent")
exception_logger.info("\n".join(self.programs[self.last_program]["config_nostore"]))
def run_prog(self, program_name):
if self._recording:
raise ValueError("still recording")
self.write("DO {}".format(self.programs[program_name]["index"]))
def run_progs_by_ids(self, *ids):
""" Runs the specified programs, in order of the ids given"""
if self._recording:
raise ValueError("still recording")
if any([not i in [x["index"] for x in self.programs]]):
raise ValueError("One of your specified ids not in the buffer")
if len(ids)>8:
raise ValueError("You can only specify 8 programs at once")
self.write(format_command("DO",*ids))
def _reset(self):
""" Reset Tester"""
query = "*RST"
return self.write(query)
def _check_err(self, all=False, timeout=20000):
""" check for single error, or all errors in stack"""
query = "ERRX?"
if not self._recording:
old_timeout = self._device.timeout
self._device.timeout=timeout
ret = self._device.query(query)
self._device.timeout=old_timeout
if all:
results = []
while ret[:2]!='+0':
exception_logger.warn(ret)
results.append(ret)
ret = self._device.query(query)
return results
if ret[:2]!='+0':
exception_logger.warn(ret)
return ret
else:
exception.warn("Skipped query \"{}\" since it is not allowed while recording".format(query))
def _zero_channel(self, channel):
""" Force Channel voltage to zero, saving previous parameters"""
return self.write("DZ {}".format(channel))
def _idle_channel(self, channel):
return self.write(format_command("IN",channel))
def _close_channel(self, channel):
""" Disconnect channel"""
self.write("CL {}".format(channel))
def __channel_to_slot(self, channel):
""" Converts a subchannel to its slot, in order to access the stored
available Ranges"""
if not channel in self.sub_channels:
raise ValueError("Invalid Channel value")
return self.slots_installed[int(str(channel)[0])].slot
def _teardown_channel(self, channel):
""" Force Channel to zero and then disconnect """
if channel.number not in self.sub_channels:
exception_logger.warn("No channel {} installed, only have \n{}\n, proceeding with teardown but call check_err and verify your setup".format(channel, self.sub_channels))
self._zero_channel(channel.number)
self._close_channel(channel.number)
# methods only used in discovery,intended to be used only by via public calls,
# not directly
def __discover_slots(self):
""" Queries installed modules, then checks their type and their available ranges"""
slots = {}
ret = self._check_modules()
for i,x in enumerate(ret.strip().split(";")):
if x!="0,0":
slots[i+1]=self.__getModule(x.split(",")[0], i+1)
return slots
def _read_spot(self):
for i in range(10): # retry 10 times when failing
try:
ret = self.read()
except Exception:
continue
return ret
def _read_sweep(self, channels):
self.query("NUB?")
return self.read()
def __parse_output(self, test_format, output, num_measurements, timestamp):
try:
if test_format in (Format.binary4, Format.binary4_crl):
return parse_binary4(output)
elif test_format in (Format.binary8, Format.binary8,):
return parse_binary8(output)
else:
frame,series_dict= parse_ascii_default_dict(test_format, output)
return (frame,series_dict,output)
#return parse_ascii(test_format, output ,num_measurements, timestamp, self.__outputMode)
except Exception as e:
exception_logger.warn(e)
return output
def __getModule(self,model, slot):
""" Returns tuples of the available OutputRanging used for input_range settings, based on the model. Based on Pages 4-22 and 4-16 of B1500 manual"""
if model =="B1510A": # HPSMU High power source/monitor unit
return HPSMU(self, slot)
if model in ("B1511A","B1511B"): # MPSMU Medium power source/monitor unit
return MPSMU(self, slot)
if model =="B1512A": # HCSMU High current source/monitor unit
return HCSMU(self, slot)
if model in ("B1513A", "B1513B"): # HVSMU High voltage source/monitor unit
return HVSMU(self, slot)
if model =="B1514A": # MCSMU Medium current source/monitor unit
return MCSMU(self, slot)
if model =="B1517A": # HRSMU High resolution source/monitor unit
return HRSMU(self, slot)
if model =="B1520A": # MFCMU CMU Multi frequency capacitance measurement unit
return MFCFMU(self, slot)
elif model =="B1525A": # HVSPGU SPGU High voltage semiconductor pulse generator unit
return HVSPGU(self, slot)
else:
exception_logger.warn("We don't know this model {0} in slot {1}, thus we don't support it".format(model, slot))
| wojcech/agilentpyvisa | agilentpyvisa/B1500/tester.py | Python | agpl-3.0 | 39,893 | 0.008623 |
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
import os
import datetime
import logging
from py_modelica.exception_classes import ModelicaInstantiationError
from abc import ABCMeta, abstractmethod
class ToolBase:
__metaclass__ = ABCMeta
tool_name = ''
tool_version = ''
tool_version_nbr = ''
model_config = None
date_time = ''
## instance variables
tool_path = '' # path to the bin folder of the tool
model_file_name = '' # file that needs to be loaded
model_name = '' # name of the model in the loaded packages
msl_version = '' # version of Modelica Standard Library
mos_file_name = '' # modelica script files for compiling the model
result_mat = '' # contains the latest simulation results
base_result_mat = '' # contains the expected simulation results
working_dir = '' # contains the temporary files and executables
root_dir = ''
mo_dir = '' # contains the modelica file, (package or model)
output_dir = '' # relative or absolute
variable_filter = [] # list of names of variables to save/load to/from mat-file
experiment = {} # dictionary with StartTime, StopTime, Tolerance,
# NumberOfIntervals, Interval and Algorithm.
model_is_compiled = False # flag for telling if the model was compiled
model_did_simulate = False # flag for telling if the model has been simulated
lib_package_paths = [] # paths to additional packages
lib_package_names = [] # names of additional packages
max_simulation_time = 43200 # (=12h) time threshold before simulation is aborted
## Variables with execution statistics
compilation_time = -1
translation_time = -1
make_time = -1
simulation_time = -1
total_time = -1
def _initialize(self,
model_config):
"""
Creates a new instance of a modelica simulation.
dictionary : model_config
Mandatory Keys : 'model_name' (str), 'model_file_name' (str)
Optional Keys : 'MSL_version' (str), 'variable_filter' ([str]),
'result_file' (str), 'experiment' ({str})
"""
print ' --- ===== See debug.log for error/debug messages ===== --- \n'
print ' in {0}'.format(os.getcwd())
# create a logger, (will only be written to if no other logger defined 'higher' up)
logging.basicConfig(filename="debug.log",
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
log = logging.getLogger()
# always use highest level of debugging
log.setLevel(logging.DEBUG)
log.debug(" --- ==== ******************************* ==== ---")
log.info(" --- ==== ******* New Run Started ******* ==== ---")
self.date_time = '{0}'.format(datetime.datetime.today())
log.debug(" --- ==== * {0} ** ==== ---".format(self.date_time))
log.debug(" --- ==== ******************************* ==== ---")
log.debug("Entered _initialize")
log.info("tool_name : {0}".format(self.tool_name))
log.info("tool_path : {0}".format(self.tool_path))
self.root_dir = os.getcwd()
self.model_config = model_config
# Mandatory keys in dictionary
try:
model_file_name = self.model_config['model_file_name']
if model_file_name == "":
self.model_file_name = ""
log.info("No model_file name given, assumes model is in Modelica Standard Library")
else:
self.model_file_name = os.path.normpath(os.path.join(os.getcwd(), model_file_name))
self.mo_dir = os.path.dirname(self.model_file_name)
log.info("mo_dir : {}".format(self.mo_dir))
log.info("model_file_name : {0}".format(self.model_file_name))
model_name = self.model_config['model_name']
if model_name == "":
base_name = os.path.basename(model_file_name)
self.model_name = os.path.splitext(base_name)[0]
log.info("No model_name given, uses model_file_name without .mo")
else:
self.model_name = model_name
log.info("model_name : {0}".format(self.model_name))
except KeyError as err:
raise ModelicaInstantiationError("Mandatory key missing in model_config : {0}".format(err.message))
# optional keys in dictionary
if 'MSL_version' in model_config:
self.msl_version = self.model_config['MSL_version']
else:
self.msl_version = "3.2"
log.info("msl_version : {0}".format(self.msl_version))
if 'experiment' in model_config:
self.experiment = dict(
StartTime=model_config['experiment']['StartTime'],
StopTime=model_config['experiment']['StopTime'],
NumberOfIntervals=model_config['experiment']['NumberOfIntervals'],
Tolerance=model_config['experiment']['Tolerance'])
# Algorithm
if 'Algorithm' in model_config['experiment']:
if self.tool_name.startswith('Dymola'):
self.experiment.update({'Algorithm':
self.model_config['experiment']['Algorithm']['Dymola']})
elif self.tool_name == 'OpenModelica':
self.experiment.update({'Algorithm':
self.model_config['experiment']['Algorithm']['OpenModelica']})
elif self.tool_name == 'JModelica':
self.experiment.update({'Algorithm':
self.model_config['experiment']['Algorithm']['JModelica']})
else: # py_modelica 12.09
self.experiment.update({'Algorithm': 'dassl'})
# Interval
if 'IntervalMethod' in model_config['experiment']:
if model_config['experiment']['IntervalMethod'] == 'Interval':
self.experiment.update({"NumberOfIntervals": "0"})
self.experiment.update({"Interval": model_config['experiment']['Interval']})
else:
self.experiment.update({"NumberOfIntervals":
model_config['experiment']['NumberOfIntervals']})
self.experiment.update({"Interval": "0"})
else: # py_modelica 12.09
self.experiment.update({"NumberOfIntervals":
model_config['experiment']['NumberOfIntervals']})
self.experiment.update({"Interval": "0"})
else:
self.experiment = dict(StartTime='0',
StopTime='1',
NumberOfIntervals='500',
Interval='0',
Tolerance='1e-5',
Algorithm='dassl')
log.info("No experiment data given, default values will be used...")
log.info("Experiment settings : {0}".format(self.experiment))
if 'lib_package_paths' in model_config:
for lib_path in self.model_config['lib_package_paths']:
if lib_path:
self.lib_package_paths.append(str(lib_path))
if 'lib_package_names' in model_config:
for lib_name in self.model_config['lib_package_names']:
if lib_name:
self.lib_package_names.append(lib_name)
if os.name == 'nt':
try:
import _winreg as wr
key = wr.OpenKey(wr.HKEY_LOCAL_MACHINE, r'software\meta', 0, wr.KEY_READ)
try:
self.max_simulation_time = wr.QueryValueEx(key, 'MAX_SIMULATION_TIME')[0]
print 'Found MAX_SIMULATION_TIME in registry, value was {0} (={1:.1f} h).'\
.format(self.max_simulation_time, float(self.max_simulation_time)/3600)
except WindowsError:
print 'MAX_SIMULATION_TIME not set in registry, using default {0} (={1:.1f} h).'\
.format(self.max_simulation_time, float(self.max_simulation_time)/3600)
except WindowsError:
print 'META-Tools not installed, using default max_simulation_time at {0} (={1:.1f} h).'\
.format(self.max_simulation_time, float(self.max_simulation_time)/3600)
# end of __initialize__
@abstractmethod
def compile_model(self):
return bool
@abstractmethod
def simulate_model(self):
return bool
@abstractmethod
def change_experiment(self,
start_time='0',
stop_time='1',
increment='',
n_interval='500',
tolerance='1e-5',
max_fixed_step='',
solver='dassl',
output_format='',
variable_filter=''):
return bool
@abstractmethod
def change_parameter(self, change_dict):
return bool | pombredanne/metamorphosys-desktop | metamorphosys/META/src/Python27Packages/py_modelica/py_modelica/modelica_simulation_tools/tool_base.py | Python | mit | 12,494 | 0.002401 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************************************
espressopp.interaction.StillingerWeberTripleTerm
************************************************
This class provides methods to compute forces and energies of
the Stillinger Weber Triple Term potential.
if :math:`d_{12} >= r_{c_1}` or :math:`d_{32} >= r_{c_2}`
.. math::
U = 0.0
else
.. math::
U = \varepsilon \lambda e^{\frac{\sigma \gamma_1}{|r_{12}| - \sigma r_{c_1}}}
+ \frac{\sigma \gamma_2}{|r_{32}| - \sigma r_{c_2}}
\left(\frac{r_{12} r_{32}}{|r_{12}|\cdot |r_{32}|}
- cos(\theta_0) \right)^2
.. function:: espressopp.interaction.StillingerWeberTripleTerm(gamma, theta0, lmbd, epsilon, sigma, cutoff)
:param gamma: (default: 0.0)
:param theta0: (default: 0.0)
:param lmbd: (default: 0.0)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param cutoff: (default: infinity)
:type gamma: real
:type theta0: real
:type lmbd: real
:type epsilon: real
:type sigma: real
:type cutoff:
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm(system, vl3)
:param system:
:param vl3:
:type system:
:type vl3:
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm.getPotential(type1, type2, type3)
:param type1:
:param type2:
:param type3:
:type type1:
:type type2:
:type type3:
:rtype:
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm.getVerletListTriple()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.VerletListStillingerWeberTripleTerm.setPotential(type1, type2, type3, potential)
:param type1:
:param type2:
:param type3:
:param potential:
:type type1:
:type type2:
:type type3:
:type potential:
.. function:: espressopp.interaction.FixedTripleListStillingerWeberTripleTerm(system, ftl, potential)
:param system:
:param ftl:
:param potential:
:type system:
:type ftl:
:type potential:
.. function:: espressopp.interaction.FixedTripleListStillingerWeberTripleTerm.getFixedTripleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedTripleListStillingerWeberTripleTerm.setPotential(type1, type2, type3, potential)
:param type1:
:param type2:
:param type3:
:param potential:
:type type1:
:type type2:
:type type3:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.AngularPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_StillingerWeberTripleTerm, \
interaction_VerletListStillingerWeberTripleTerm, \
interaction_FixedTripleListStillingerWeberTripleTerm
class StillingerWeberTripleTermLocal(AngularPotentialLocal, interaction_StillingerWeberTripleTerm):
def __init__(self, gamma1=0.0, gamma2=0.0, theta0=0.0, lmbd=0.0,
epsilon=1.0, sigma1=1.0, sigma2=1.0, cutoff1=infinity, cutoff2=infinity):
"""Initialize the local StillingerWeberTripleTerm object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_StillingerWeberTripleTerm, gamma1, gamma2,
theta0, lmbd, epsilon, sigma1, sigma2, cutoff1, cutoff2)
def __init__(self, gamma=0.0, theta0=0.0, lmbd=0.0, epsilon=1.0, sigma=1.0, cutoff=infinity):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_StillingerWeberTripleTerm, gamma, gamma,
theta0, lmbd, epsilon, sigma, sigma, cutoff, cutoff)
class VerletListStillingerWeberTripleTermLocal(InteractionLocal, interaction_VerletListStillingerWeberTripleTerm):
def __init__(self, system, vl3):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListStillingerWeberTripleTerm, system, vl3)
def setPotential(self, type1, type2, type3, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, type3, potential)
def getPotential(self, type1, type2, type3):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, type3)
def getVerletListTriple(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletListTriple(self)
class FixedTripleListStillingerWeberTripleTermLocal(InteractionLocal, interaction_FixedTripleListStillingerWeberTripleTerm):
def __init__(self, system, ftl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleListStillingerWeberTripleTerm, system, ftl, potential)
def setPotential(self, type1, type2, type3, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, type3, potential)
def getFixedTripleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class StillingerWeberTripleTerm(AngularPotential):
'The StillingerWeberTripleTerm potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.StillingerWeberTripleTermLocal',
pmiproperty = [ 'gamma1', 'gamma2', 'theta0',
'lambda', 'epsilon', 'sigma1',
'sigma2', 'cutoff1', 'cutoff2']
)
class VerletListStillingerWeberTripleTerm(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListStillingerWeberTripleTermLocal',
pmicall = ['setPotential', 'getPotential','getVerletListTriple']
)
class FixedTripleListStillingerWeberTripleTerm(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListStillingerWeberTripleTermLocal',
pmicall = ['setPotential','getFixedTripleList']
)
| espressopp/espressopp | src/interaction/StillingerWeberTripleTerm.py | Python | gpl-3.0 | 8,110 | 0.006289 |
import asyncio
import logging
import sys
import re
from abc import ABC, abstractmethod
from typing import List, Optional, Union
from discord.utils import find
from errbot.backends.base import (
Person,
Message,
Room,
RoomOccupant,
Presence,
ONLINE,
OFFLINE,
AWAY,
DND,
RoomError,
)
from errbot.core import ErrBot
log = logging.getLogger(__name__)
try:
import discord
except ImportError:
log.exception("Could not start the Discord back-end")
log.fatal(
"You need to install the Discord API in order to use the Discord backend.\n"
"You can do `pip install -r requirements.txt` to install it"
)
sys.exit(1)
# Discord message size limit.
DISCORD_MESSAGE_SIZE_LIMIT = 2000
COLOURS = {
"red": 0xFF0000,
"green": 0x008000,
"yellow": 0xFFA500,
"blue": 0x0000FF,
"white": 0xFFFFFF,
"cyan": 0x00FFFF,
} # Discord doesn't know its colours
class DiscordSender(ABC, discord.abc.Snowflake):
@abstractmethod
async def send(self, content: str = None, embed: discord.Embed = None):
raise NotImplementedError
@abstractmethod
def get_discord_object(self) -> discord.abc.Messageable:
raise NotImplementedError
class DiscordPerson(Person, DiscordSender):
@classmethod
def username_and_discriminator_to_userid(cls, username: str, discriminator: str) -> str:
return find(
lambda m: m.name == username and m.discriminator == discriminator,
DiscordBackend.client.get_all_members(),
)
def __init__(self, user_id: str):
"""
@user_id: _must_ be a string representation of a Discord Snowflake (an integer).
"""
if not re.match(r"[0-9]+", str(user_id)):
raise ValueError(f"Invalid Discord user id {type(user_id)} {user_id}.")
self._user_id = user_id
def get_discord_object(self) -> discord.abc.Messageable:
return self.discord_user()
@property
def created_at(self):
return discord.utils.snowflake_time(self.id)
@property
def person(self) -> str:
return str(self)
@property
def id(self) -> str:
return self._user_id
def discord_user(self) -> discord.User:
return DiscordBackend.client.get_user(self._user_id)
@property
def username(self) -> str:
"""Convert a Discord user ID to their user name"""
user = self.discord_user()
if user is None:
log.error(f"Cannot find user with ID {self._user_id}")
return f"<{self._user_id}>"
return user.name
nick = username
@property
def client(self) -> None:
return None
@property
def fullname(self) -> Optional[str]:
usr = self.discord_user()
if usr is None:
raise ValueError("Discord user is not defined.")
return f"{usr.name}#{usr.discriminator}"
@property
def aclattr(self) -> str:
return self.fullname
async def send(
self,
content: str = None,
tts: bool = False,
embed: discord.Embed = None,
file: discord.File = None,
files: List[discord.File] = None,
delete_after: float = None,
nonce: int = None,
allowed_mentions: discord.AllowedMentions = None,
reference: Union[discord.Message, discord.MessageReference] = None,
mention_author: Optional[bool] = None,
):
await self.discord_user().send(
content=content,
tts=tts,
embed=embed,
file=file,
files=files,
delete_after=delete_after,
nonce=nonce,
allowed_mentions=allowed_mentions,
reference=reference,
mention_author=mention_author,
)
def __eq__(self, other):
return isinstance(other, DiscordPerson) and other.aclattr == self.aclattr
def __str__(self):
return f"{self.fullname}"
class DiscordRoom(Room, DiscordSender):
"""
DiscordRoom objects can be in two states:
1. They exist and we have a channel_id of that room
2. They don't currently exist and we have a channel name and guild
"""
@classmethod
def from_id(cls, channel_id):
channel = DiscordBackend.client.get_channel(channel_id)
if channel is None:
raise ValueError(f"Channel id:{channel_id} doesn't exist!")
return cls(channel.name, channel.guild.id)
def __init__(self, channel_name: str, guild_id: str):
"""
Allows to specify an existing room (via name + guild or via id) or allows the
creation of a future room by specifying a name and guild to create the channel in.
:param channel_name:
:param guild_id:
"""
if DiscordBackend.client.get_guild(guild_id) is None:
raise ValueError(f"Can't find guild id {guild_id} to init DiscordRoom")
self._guild_id = guild_id
self._channel_name = channel_name
self._channel_id = self.channel_name_to_id() # Can be None if channel doesn't exist
def get_discord_object(self):
return self.discord_channel()
def channel_name_to_id(self):
"""
Channel names are non-unique across Discord. Hence we require a guild name to uniquely
identify a room id
:return: ID of the room
"""
matching = [
channel
for channel in DiscordBackend.client.get_all_channels()
if self._channel_name == channel.name
and channel.guild.id == self._guild_id
and isinstance(channel, discord.TextChannel)
]
if len(matching) == 0:
return None
if len(matching) > 1:
log.warning(
"Multiple matching channels for channel"
f"name {self._channel_name} in guild id {self._guild_id}"
)
return matching[0].id
@property
def created_at(self):
return discord.utils.snowflake_time(self.id)
def invite(self, *args) -> None:
if not self.exists:
raise RuntimeError("Can't invite to a non-existent channel")
for identifier in args:
if not isinstance(identifier, DiscordPerson):
raise RuntimeError("Can't invite non Discord Users")
asyncio.run_coroutine_threadsafe(
self.discord_channel().set_permissions(
identifier.discord_user(), read_messages=True
),
loop=DiscordBackend.client.loop,
)
@property
def joined(self) -> bool:
log.error("Not implemented")
return True
def leave(self, reason: str = None) -> None:
"""
Can't just leave a room
:param reason:
:return:
"""
log.error("Not implemented")
async def create_room(self):
guild = DiscordBackend.client.get_guild(self._guild_id)
channel = await guild.create_text_channel(self._channel_name)
log.info(f"Created channel {self._channel_name} in guild {guild.name}")
self._channel_id = channel.id
def create(self) -> None:
if self.exists:
log.warning(f"Tried to create {self._channel_name} which already exists.")
raise RoomError("Room exists")
asyncio.run_coroutine_threadsafe(
self.create_room(), loop=DiscordBackend.client.loop
).result(timeout=5)
def destroy(self) -> None:
if not self.exists:
log.warning(f"Tried to destory {self._channel_name} which doesn't exist.")
raise RoomError("Room doesn't exist")
asyncio.run_coroutine_threadsafe(
self.discord_channel().delete(reason="Bot deletion command"),
loop=DiscordBackend.client.loop,
).result(timeout=5)
def join(self, username: str = None, password: str = None) -> None:
"""
All public channels are already joined. Only private channels can be joined and we
need an invite for that.
:param username:
:param password:
:return:
"""
log.warning(
"Can't join channels. Public channels are automatically joined"
" and private channels are invite only."
)
@property
def topic(self) -> str:
if not self.exists:
return ""
topic = self.discord_channel().topic
topic = "" if topic is None else topic
return topic
@property
def occupants(self) -> List[RoomOccupant]:
if not self.exists:
return []
occupants = []
for member in self.discord_channel().members:
occupants.append(DiscordRoomOccupant(member.id, self._channel_id))
return occupants
@property
def exists(self) -> bool:
return None not in [self._channel_id, DiscordBackend.client.get_channel(self._channel_id)]
@property
def guild(self):
"""
Gets the guild_id this channel belongs to. None if it doesn't exist
:return: Guild id or None
"""
return self._guild_id
@property
def name(self) -> str:
"""
Gets the channels' name
:return: channels' name
"""
if self._channel_id is None:
return self._channel_name
else:
self._channel_name = DiscordBackend.client.get_channel(self._channel_id).name
return self._channel_name
@property
def id(self):
"""
Can return none if not created
:return: Channel ID or None
"""
return self._channel_id
def discord_channel(
self,
) -> Optional[Union[discord.abc.GuildChannel, discord.abc.PrivateChannel]]:
return DiscordBackend.client.get_channel(self._channel_id)
async def send(self, content: str = None, embed: discord.Embed = None):
if not self.exists:
raise RuntimeError("Can't send a message on a non-existent channel")
if not isinstance(self.discord_channel(), discord.abc.Messageable):
raise RuntimeError(
f"Channel {self.name}[id:{self._channel_id}] doesn't support sending text messages"
)
await self.discord_channel().send(content=content, embed=embed)
def __str__(self):
return f"<#{self.id}>"
def __eq__(self, other: "DiscordRoom"):
if not isinstance(other, DiscordRoom):
return False
return None not in [other.id, self.id] and other.id == self.id
class DiscordCategory(DiscordRoom):
def channel_name_to_id(self):
"""
Channel names are non-unique across Discord. Hence we require a guild name to
uniquely identify a room id.
:return: ID of the room
"""
matching = [
channel
for channel in DiscordBackend.client.get_all_channels()
if self._channel_name == channel.name
and channel.guild.id == self._guild_id
and isinstance(channel, discord.CategoryChannel)
]
if len(matching) == 0:
return None
if len(matching) > 1:
log.warning(
"Multiple matching channels for channel name"
f" {self._channel_name} in guild id {self._guild_id}"
)
return matching[0].id
def create_subchannel(self, name: str) -> DiscordRoom:
category = self.get_discord_object()
if not isinstance(category, discord.CategoryChannel):
raise RuntimeError("Category is not a discord category object")
text_channel = asyncio.run_coroutine_threadsafe(
category.create_text_channel(name), loop=DiscordBackend.client.loop
).result(timeout=5)
return DiscordRoom.from_id(text_channel.id)
async def create_room(self):
guild = DiscordBackend.client.get_guild(self._guild_id)
channel = await guild.create_category(self._channel_name)
log.info(f"Created category {self._channel_name} in guild {guild.name}")
self._channel_id = channel.id
def join(self, username: str = None, password: str = None) -> None:
raise RuntimeError("Can't join categories")
def leave(self, reason: str = None) -> None:
raise RuntimeError("Can't leave categories")
@property
def joined(self) -> bool:
raise RuntimeError("Can't join categories")
@property
def topic(self) -> str:
raise RuntimeError("Can't set category topic")
@property
def occupants(self) -> List[RoomOccupant]:
raise NotImplementedError("Not implemented yet")
def invite(self, *args) -> None:
raise RuntimeError("Can't invite to categories")
class DiscordRoomOccupant(DiscordPerson, RoomOccupant):
def __init__(self, user_id: str, channel_id: str):
super().__init__(user_id)
self._channel = DiscordRoom.from_id(channel_id)
@property
def room(self) -> DiscordRoom:
return self._channel
async def send(self, content: str = None, embed: discord.Embed = None):
await self.room.send(content=content, embed=embed)
def __eq__(self, other):
return (
isinstance(other, DiscordRoomOccupant)
and other.id == self.id
and other.room.id == self.room.id
)
def __str__(self):
return f"{super().__str__()}@{self._channel.name}"
class DiscordBackend(ErrBot):
"""
This is the Discord backend for Errbot.
"""
client = None
def __init__(self, config):
super().__init__(config)
identity = config.BOT_IDENTITY
self.token = identity.get("token", None)
if not self.token:
log.fatal(
"You need to set a token entry in the BOT_IDENTITY"
" setting of your configuration."
)
sys.exit(1)
self.bot_identifier = None
intents = discord.Intents.default()
intents.members = True
DiscordBackend.client = discord.Client(intents=intents)
# Register discord event coroutines.
for fn in [
self.on_ready,
self.on_message,
self.on_member_update,
self.on_message_edit,
self.on_member_update,
]:
DiscordBackend.client.event(fn)
@property
def message_limit(self):
"""
Return the discord maximum message size. If message size is set in the confiugration it
will be used otherwise the default backend size will be used.
"""
try:
limit = min(int(self.bot_config.MESSAGE_SIZE_LIMIT), DISCORD_MESSAGE_SIZE_LIMIT)
except (AttributeError, ValueError) as e:
limit = DISCORD_MESSAGE_SIZE_LIMIT
return limit
async def on_error(self, event, *args, **kwargs):
super().on_error(event, *args, **kwargs)
# A stub entry in case special error handling is required.
pass
async def on_ready(self):
# Call connect only after successfully connected and ready to service Discord events.
self.connect_callback()
log.debug(
f"Logged in as {DiscordBackend.client.user.name}, {DiscordBackend.client.user.id}"
)
if self.bot_identifier is None:
self.bot_identifier = DiscordPerson(DiscordBackend.client.user.id)
for channel in DiscordBackend.client.get_all_channels():
log.debug(f"Found channel: {channel}")
async def on_message_edit(self, before, after):
log.warning("Message editing not supported.")
async def on_message(self, msg: discord.Message):
err_msg = Message(msg.content, extras=msg.embeds)
if isinstance(msg.channel, discord.abc.PrivateChannel):
err_msg.frm = DiscordPerson(msg.author.id)
err_msg.to = self.bot_identifier
else:
err_msg.to = DiscordRoom.from_id(msg.channel.id)
err_msg.frm = DiscordRoomOccupant(msg.author.id, msg.channel.id)
if self.process_message(err_msg):
# Message contains a command
recipient = err_msg.frm
if not isinstance(recipient, DiscordSender):
raise ValueError("Message object from is not a DiscordSender")
async with recipient.get_discord_object().typing():
self._dispatch_to_plugins("callback_message", err_msg)
if msg.mentions:
self.callback_mention(
err_msg,
[DiscordRoomOccupant(mention.id, msg.channel.id) for mention in msg.mentions],
)
def is_from_self(self, msg: Message) -> bool:
other = msg.frm
if not isinstance(other, DiscordPerson):
return False
return other.id == self.bot_identifier.id
async def on_member_update(self, before, after):
if before.status != after.status:
person = DiscordPerson(after.id)
log.debug(f"Person {person} changed status to {after.status} from {before.status}")
if after.status == discord.Status.online:
self.callback_presence(Presence(person, ONLINE))
elif after.status == discord.Status.offline:
self.callback_presence(Presence(person, OFFLINE))
elif after.status == discord.Status.idle:
self.callback_presence(Presence(person, AWAY))
elif after.status == discord.Status.dnd:
self.callback_presence(Presence(person, DND))
else:
log.debug("Unrecognised member update, ignoring...")
def query_room(self, room):
"""
Major hacky function. we just implicitly assume we're just in one guild server.
##category -> a category
#room -> Creates a room
:param room:
:return:
"""
if len(DiscordBackend.client.guilds) == 0:
log.error(f"Unable to join room '{room}' because no guilds were found!")
return None
guild = DiscordBackend.client.guilds[0]
room_name = room
if room_name.startswith("##"):
return DiscordCategory(room_name[2:], guild.id)
elif room_name.startswith("#"):
return DiscordRoom(room_name[1:], guild.id)
else:
return DiscordRoom(room_name, guild.id)
def send_message(self, msg: Message):
super().send_message(msg)
if not isinstance(msg.to, DiscordSender):
raise RuntimeError(
f"{msg.to} doesn't support sending messages."
f" Expected DiscordSender object but got {type(msg.to)}."
)
log.debug(
f"Message to:{msg.to}({type(msg.to)}) from:{msg.frm}({type(msg.frm)}),"
f" is_direct:{msg.is_direct} extras: {msg.extras} size: {len(msg.body)}"
)
for message in [
msg.body[i : i + self.message_limit]
for i in range(0, len(msg.body), self.message_limit)
]:
asyncio.run_coroutine_threadsafe(
msg.to.send(content=message), loop=DiscordBackend.client.loop
)
def send_card(self, card):
recipient = card.to
if not isinstance(recipient, DiscordSender):
raise RuntimeError(
f"{recipient} doesn't support sending messages."
f" Expected {DiscordSender} but got {type(recipient)}"
)
if card.color:
color = COLOURS.get(card.color, int(card.color.replace("#", "0x"), 16))
else:
color = None
# Create Embed object
em = discord.Embed(title=card.title, description=card.body, color=color)
if card.image:
em.set_image(url=card.image)
if card.thumbnail:
em.set_thumbnail(url=card.thumbnail)
if card.fields:
for key, value in card.fields:
em.add_field(name=key, value=value, inline=True)
asyncio.run_coroutine_threadsafe(
recipient.send(embed=em), loop=DiscordBackend.client.loop
).result(5)
def build_reply(self, mess, text=None, private=False, threaded=False):
response = self.build_message(text)
if mess.is_direct:
response.frm = self.bot_identifier
response.to = mess.frm
else:
if not isinstance(mess.frm, DiscordRoomOccupant):
raise RuntimeError("Non-Direct messages must come from a room occupant")
response.frm = DiscordRoomOccupant(self.bot_identifier.id, mess.frm.room.id)
response.to = DiscordPerson(mess.frm.id) if private else mess.to
return response
def serve_once(self):
try:
DiscordBackend.client.loop.run_until_complete(DiscordBackend.client.start(self.token))
except KeyboardInterrupt:
DiscordBackend.client.loop.run_until_complete(DiscordBackend.client.logout())
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
# noinspection PyBroadException
try:
gathered.cancel()
DiscordBackend.client.loop.run_until_complete(gathered)
# we want to retrieve any exceptions to make sure that
# they don't nag us about it being un-retrieved.
gathered.exception()
except Exception as e:
pass
self.disconnect_callback()
return True
def change_presence(self, status: str = ONLINE, message: str = ""):
log.debug(f'Presence changed to {status} and activity "{message}".')
activity = discord.Activity(name=message)
DiscordBackend.client.change_presence(status=status, activity=activity)
def prefix_groupchat_reply(self, message, identifier: Person):
message.body = f"@{identifier.nick} {message.body}"
def rooms(self):
return [
DiscordRoom.from_id(channel.id) for channel in DiscordBackend.client.get_all_channels()
]
@property
def mode(self):
return "discord"
def build_identifier(self, string_representation: str):
"""
This needs a major rethink/rework since discord bots can be in different
Guilds so room name clashes are certainly possible. For now we are only
uniquely identifying users
Valid forms of strreps:
user#discriminator -> Person
#channel@guild_id -> Room
:param string_representation:
:return: Identifier
Room Example: #general@12345678901234567 -> Sends a message to the #general channel of the guild with id 12345678901234567
"""
if not string_representation:
raise ValueError("Empty strrep")
if string_representation.startswith("#"):
strrep_split = string_representation.split("@")
return DiscordRoom(strrep_split[0][1:], int(strrep_split[1]))
if "#" in str(string_representation):
user, discriminator = str(string_representation).split("#")
else:
raise ValueError("No Discriminator")
log.debug(f"Build_identifier {string_representation}")
member = DiscordPerson.username_and_discriminator_to_userid(user, discriminator)
return DiscordPerson(user_id=member.id)
def upload_file(self, msg, filename):
with open(filename, "r") as f:
dest = None
if msg.is_direct:
dest = DiscordPerson(msg.frm.id).get_discord_object()
else:
dest = msg.to.get_discord_object()
log.info(f"Sending file {filename} to user {msg.frm}")
asyncio.run_coroutine_threadsafe(
dest.send(file=discord.File(f, filename=filename)),
loop=self.client.loop,
)
def history(self, channelname, before=None):
mychannel = discord.utils.get(self.client.get_all_channels(), name=channelname)
async def gethist(mychannel, before=None):
return [i async for i in self.client.logs_from(mychannel, limit=10, before=before)]
future = asyncio.run_coroutine_threadsafe(gethist(mychannel, before), loop=self.client.loop)
return future.result(timeout=None)
| gbin/err-backend-discord | discordb.py | Python | gpl-3.0 | 24,430 | 0.00176 |
#!/usr/bin/env python
import logging
import unittest
import os
import environment
import utils
import tablet
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_0_backup = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
# shard_2 tablets are not used by all tests and not included by default.
tablets = [shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly, shard_0_backup,
shard_1_master, shard_1_replica1]
tablets_shard2 = [shard_2_master, shard_2_replica1]
test_keyspace = 'test_keyspace'
db_name = 'vt_' + test_keyspace
def setUpModule():
try:
environment.topo_server().setup()
_init_mysql(tablets)
utils.run_vtctl(['CreateKeyspace', test_keyspace])
shard_0_master.init_tablet( 'master', test_keyspace, '0')
shard_0_replica1.init_tablet('replica', test_keyspace, '0')
shard_0_replica2.init_tablet('replica', test_keyspace, '0')
shard_0_rdonly.init_tablet( 'rdonly', test_keyspace, '0')
shard_0_backup.init_tablet( 'backup', test_keyspace, '0')
shard_1_master.init_tablet( 'master', test_keyspace, '1')
shard_1_replica1.init_tablet('replica', test_keyspace, '1')
utils.run_vtctl(['RebuildKeyspaceGraph', test_keyspace], auto_log=True)
# run checks now before we start the tablets
utils.validate_topology()
utils.Vtctld().start()
# create databases, start the tablets
for t in tablets:
t.create_db(db_name)
t.start_vttablet(wait_for_state=None)
# wait for the tablets to start
shard_0_master.wait_for_vttablet_state('SERVING')
shard_0_replica1.wait_for_vttablet_state('SERVING')
shard_0_replica2.wait_for_vttablet_state('SERVING')
shard_0_rdonly.wait_for_vttablet_state('SERVING')
shard_0_backup.wait_for_vttablet_state('NOT_SERVING')
shard_1_master.wait_for_vttablet_state('SERVING')
shard_1_replica1.wait_for_vttablet_state('SERVING')
# make sure all replication is good
for t in tablets:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', test_keyspace+'/0',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', test_keyspace+'/1',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', test_keyspace])
# check after all tablets are here and replication is fixed
utils.validate_topology(ping_tablets=True)
except Exception as setup_exception:
try:
tearDownModule()
except Exception as e:
logging.exception("Tearing down a failed setUpModule() failed: %s", e)
raise setup_exception
def _init_mysql(tablets):
setup_procs = []
for t in tablets:
setup_procs.append(t.init_mysql())
utils.wait_procs(setup_procs)
def tearDownModule():
if utils.options.skip_teardown:
return
tablet.kill_tablets(tablets)
teardown_procs = []
for t in tablets:
teardown_procs.append(t.teardown_mysql())
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in tablets:
t.remove_tree()
class TestSchema(unittest.TestCase):
def setUp(self):
for t in tablets:
t.create_db(db_name)
def tearDown(self):
# This test assumes that it can reset the tablets by simply cleaning their
# databases without restarting the tablets.
for t in tablets:
t.clean_dbs()
# Tablets from shard 2 are always started during the test. Shut them down now.
if shard_2_master in tablets:
for t in tablets_shard2:
t.scrap(force=True, skip_rebuild=True)
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
t.kill_vttablet()
tablets.remove(t)
utils.run_vtctl(['DeleteShard', 'test_keyspace/2'], auto_log=True)
def _check_tables(self, tablet, expectedCount):
tables = tablet.mquery(db_name, 'show tables')
self.assertEqual(len(tables), expectedCount,
'Unexpected table count on %s (not %u): got tables: %s' %
(tablet.tablet_alias, expectedCount, str(tables)))
def _check_db_not_created(self, tablet):
# Broadly catch all exceptions, since the exception being raised is internal to MySQL.
# We're strictly checking the error message though, so should be fine.
with self.assertRaisesRegexp(Exception, '(1049, "Unknown database \'%s\'")' % db_name):
tablet.mquery(db_name, 'show tables')
def _apply_schema(self, keyspace, sql):
out, _ = utils.run_vtctl(['ApplySchema',
'-sql='+sql,
keyspace],
trap_output=True,
raise_on_error=True)
return out
def _get_schema(self, tablet_alias):
out, _ = utils.run_vtctl(['GetSchema',
tablet_alias],
trap_output=True,
raise_on_error=True)
return out
def _create_test_table_sql(self, table):
return 'CREATE TABLE %s ( \
`id` BIGINT(20) not NULL, \
`msg` varchar(64), \
PRIMARY KEY (`id`) \
) ENGINE=InnoDB' % table
def _alter_test_table_sql(self, table, index_column_name):
return 'ALTER TABLE %s \
ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST, \
DROP PRIMARY KEY, \
ADD PRIMARY KEY (new_id), \
ADD INDEX idx_column(%s) \
' % (table, index_column_name)
def _apply_initial_schema(self):
schema_changes = ';'.join([
self._create_test_table_sql('vt_select_test01'),
self._create_test_table_sql('vt_select_test02'),
self._create_test_table_sql('vt_select_test03'),
self._create_test_table_sql('vt_select_test04')])
# apply schema changes to the test keyspace
self._apply_schema(test_keyspace, schema_changes)
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
# get schema for each shard
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
def test_schema_changes(self):
self._apply_initial_schema()
self._apply_schema(test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg'))
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
# test schema changes
os.makedirs(os.path.join(utils.vtctld.schema_change_dir, test_keyspace))
input_path = os.path.join(utils.vtctld.schema_change_dir, test_keyspace, "input")
os.makedirs(input_path)
sql_path = os.path.join(input_path, "create_test_table_x.sql")
with open(sql_path, 'w') as handler:
handler.write("create table test_table_x (id int)")
timeout = 10
# wait until this sql file being consumed by autoschema
while os.path.isfile(sql_path):
timeout = utils.wait_step('waiting for vtctld to pick up schema changes',
timeout,
sleep_time=0.2)
# check number of tables
self._check_tables(shard_0_master, 5)
self._check_tables(shard_1_master, 5)
def _setUp_tablets_shard_2(self):
try:
_init_mysql(tablets_shard2)
finally:
# Include shard2 tablets for tearDown.
tablets.extend(tablets_shard2)
shard_2_master.init_tablet( 'master', 'test_keyspace', '2')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '2')
# We intentionally don't want to create a db on these tablets.
shard_2_master.start_vttablet(wait_for_state=None)
shard_2_replica1.start_vttablet(wait_for_state=None)
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_2_replica1.wait_for_vttablet_state('NOT_SERVING')
for t in tablets_shard2:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', test_keyspace+'/2',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', test_keyspace])
def test_vtctl_copyschemashard_use_tablet_as_source(self):
self._test_vtctl_copyschemashard(shard_0_master.tablet_alias)
def test_vtctl_copyschemashard_use_shard_as_source(self):
self._test_vtctl_copyschemashard('test_keyspace/0')
def _test_vtctl_copyschemashard(self, source):
self._apply_initial_schema()
self._setUp_tablets_shard_2()
# CopySchemaShard is responsible for creating the db; one shouldn't exist before
# the command is run.
self._check_db_not_created(shard_2_master)
self._check_db_not_created(shard_2_replica1)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
utils.run_vtctl(['CopySchemaShard',
source,
'test_keyspace/2'],
auto_log=True)
# shard_2_master should look the same as the replica we copied from
self._check_tables(shard_2_master, 4)
utils.wait_for_replication_pos(shard_2_master, shard_2_replica1)
self._check_tables(shard_2_replica1, 4)
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_2_schema = self._get_schema(shard_2_master.tablet_alias)
self.assertEqual(shard_0_schema, shard_2_schema)
if __name__ == '__main__':
utils.main()
| SDHM/vitess | test/schema.py | Python | bsd-3-clause | 9,880 | 0.008502 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_slooni_jong_q1_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_tals_n","slooni_jong_q1_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/mission/quest_item/shared_slooni_jong_q1_needed.py | Python | mit | 477 | 0.046122 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def compute_pc(X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha)
emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb
| openeventdata/Focus_Locality_Extraction | Focus_Locality/Sentence_Embedding_Approach/Testing/SIFpreprocessing_test.py | Python | mit | 5,830 | 0.014923 |
__author__ = 'moskupols'
__all__ = ['Statistics', 'BlackList', 'console_statistic']
from .statistics import *
from . import console_statistic
| hatbot-team/hatbot | statistics/__init__.py | Python | mit | 144 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import errno
import inspect
import logging
import os
import platform
import random
import subprocess
import socket
import sys
import uuid
import iso8601
from heat.common import exception
logger = logging.getLogger(__name__)
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = unicode(pv)
else:
headers["x-image-meta-%s" % k.lower()] = unicode(v)
return headers
def add_features_to_http_headers(features, headers):
"""
Adds additional headers representing heat features to be enabled.
:param headers: Base set of headers
:param features: Map of enabled features
"""
if features:
for k, v in features.items():
if v is not None:
headers[k.lower()] = unicode(v)
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
result[field_name] = value or None
result['properties'] = properties
if 'size' in result:
try:
result['size'] = int(result['size'])
except ValueError:
raise exception.Invalid
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = bool_from_header_value(result[key])
return result
def bool_from_header_value(value):
"""
Returns True if value is a boolean True or the
string 'true', case-insensitive, False otherwise
"""
if isinstance(value, bool):
return value
elif isinstance(value, (basestring, unicode)):
if str(value).lower() == 'true':
return True
return False
def bool_from_string(subject):
"""
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if isinstance(subject, bool):
return subject
elif isinstance(subject, int):
return subject == 1
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), e:
raise exception.ImportFailure(import_str=import_str,
reason=e)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def isotime(at=None):
"""Stringify time in ISO 8601 format"""
if not at:
at = datetime.datetime.utcnow()
str = at.strftime(TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
str += ('Z' if tz == 'UTC' else tz)
return str
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC"""
offset = timestamp.utcoffset()
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/heat
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=false,
stdout=subprocess.PIPE)
return tuple(int(x) for x in p.communicate()[0].split())
except:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width == None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
| blomquisg/heat | heat/common/utils.py | Python | apache-2.0 | 10,575 | 0.000851 |
from sys import stdout
import argparse
import json
import logging
from .phuey import Bridge, Light
logger = logging.getLogger()
def command_interpreter(command):
python_dict = {}
commands = command.split(',')
for c in commands:
k, v = c.split('=')
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
elif v.isdigit() is True:
v = int(v)
python_dict[k] = v
return json.dumps(python_dict)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--bridge', '-b', metavar="BRIDGEIPADDRESS")
arg_parser.add_argument('--user', '-u', metavar="USERNAME")
arg_parser.add_argument('--light', '-l', metavar="LIGHTID")
arg_parser.add_argument('--command', '-c', metavar="COMMAND")
args = arg_parser.parse_args()
bridge_ip = args.bridge
user = args.user
lid = args.light
command = command_interpreter(args.command)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stdout)
ch.setLevel(logging.DEBUG)
fmt = '%(name)s - %(asctime)s - %(module)s-%(funcName)s/%(lineno)d - %(message)s'
formatter = logging.Formatter(fmt)
ch.setFormatter(formatter)
logger.addHandler(ch)
light = Light(bridge_ip, user, lid, 'my light')
logger.debug(command)
light.state = json.loads(command)
| pancho-villa/Phuey | phuey/light_cli.py | Python | mit | 1,447 | 0.002764 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :meth:`iris.experimental.ugrid.mesh.Mesh.from_coords`.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import numpy as np
from iris.coords import AuxCoord, DimCoord
from iris.experimental.ugrid import logger
from iris.experimental.ugrid.mesh import Connectivity, Mesh
from iris.tests.stock import simple_2d_w_multidim_coords
class Test1Dim(tests.IrisTest):
def setUp(self):
self.lon = DimCoord(
points=[0.5, 1.5, 2.5],
bounds=[[0, 1], [1, 2], [2, 3]],
standard_name="longitude",
long_name="edge longitudes",
var_name="lon",
units="degrees",
attributes={"test": 1},
)
# Should be fine with either a DimCoord or an AuxCoord.
self.lat = AuxCoord(
points=[0.5, 2.5, 1.5],
bounds=[[0, 1], [2, 3], [1, 2]],
standard_name="latitude",
long_name="edge_latitudes",
var_name="lat",
units="degrees",
attributes={"test": 1},
)
def create(self):
return Mesh.from_coords(self.lon, self.lat)
def test_dimensionality(self):
mesh = self.create()
self.assertEqual(1, mesh.topology_dimension)
self.assertArrayEqual(
[0, 1, 1, 2, 2, 3], mesh.node_coords.node_x.points
)
self.assertArrayEqual(
[0, 1, 2, 3, 1, 2], mesh.node_coords.node_y.points
)
self.assertArrayEqual([0.5, 1.5, 2.5], mesh.edge_coords.edge_x.points)
self.assertArrayEqual([0.5, 2.5, 1.5], mesh.edge_coords.edge_y.points)
self.assertIsNone(getattr(mesh, "face_coords", None))
for conn_name in Connectivity.UGRID_CF_ROLES:
conn = getattr(mesh, conn_name, None)
if conn_name == "edge_node_connectivity":
self.assertArrayEqual([[0, 1], [2, 3], [4, 5]], conn.indices)
else:
self.assertIsNone(conn)
def test_node_metadata(self):
mesh = self.create()
pairs = [
(self.lon, mesh.node_coords.node_x),
(self.lat, mesh.node_coords.node_y),
]
for expected_coord, actual_coord in pairs:
for attr in ("standard_name", "long_name", "units", "attributes"):
expected = getattr(expected_coord, attr)
actual = getattr(actual_coord, attr)
self.assertEqual(expected, actual)
self.assertIsNone(actual_coord.var_name)
def test_centre_metadata(self):
mesh = self.create()
pairs = [
(self.lon, mesh.edge_coords.edge_x),
(self.lat, mesh.edge_coords.edge_y),
]
for expected_coord, actual_coord in pairs:
for attr in ("standard_name", "long_name", "units", "attributes"):
expected = getattr(expected_coord, attr)
actual = getattr(actual_coord, attr)
self.assertEqual(expected, actual)
self.assertIsNone(actual_coord.var_name)
def test_mesh_metadata(self):
# Inappropriate to guess these values from the input coords.
mesh = self.create()
for attr in (
"standard_name",
"long_name",
"var_name",
):
self.assertIsNone(getattr(mesh, attr))
self.assertTrue(mesh.units.is_unknown())
self.assertDictEqual({}, mesh.attributes)
def test_lazy(self):
self.lon = AuxCoord.from_coord(self.lon)
self.lon = self.lon.copy(
self.lon.lazy_points(), self.lon.lazy_bounds()
)
self.lat = self.lat.copy(
self.lat.lazy_points(), self.lat.lazy_bounds()
)
mesh = self.create()
for coord in list(mesh.all_coords):
if coord is not None:
self.assertTrue(coord.has_lazy_points())
for conn in list(mesh.all_connectivities):
if conn is not None:
self.assertTrue(conn.has_lazy_indices())
def test_coord_shape_mismatch(self):
lat_orig = self.lat.copy(self.lat.points, self.lat.bounds)
self.lat = lat_orig.copy(
points=lat_orig.points, bounds=np.tile(lat_orig.bounds, 2)
)
with self.assertRaisesRegex(
ValueError, "bounds shapes are not identical"
):
_ = self.create()
self.lat = lat_orig.copy(
points=lat_orig.points[-1], bounds=lat_orig.bounds[-1]
)
with self.assertRaisesRegex(
ValueError, "points shapes are not identical"
):
_ = self.create()
def test_reorder(self):
# Swap the coords.
self.lat, self.lon = self.lon, self.lat
mesh = self.create()
# Confirm that the coords have been swapped back to the 'correct' order.
self.assertEqual("longitude", mesh.node_coords.node_x.standard_name)
self.assertEqual("latitude", mesh.node_coords.node_y.standard_name)
def test_non_xy(self):
for coord in self.lon, self.lat:
coord.standard_name = None
lon_name, lat_name = [
coord.long_name for coord in (self.lon, self.lat)
]
# Swap the coords.
self.lat, self.lon = self.lon, self.lat
with self.assertLogs(logger, "INFO", "Unable to find 'X' and 'Y'"):
mesh = self.create()
# Confirm that the coords have not been swapped back.
self.assertEqual(lat_name, mesh.node_coords.node_x.long_name)
self.assertEqual(lon_name, mesh.node_coords.node_y.long_name)
class Test2Dim(Test1Dim):
def setUp(self):
super().setUp()
self.lon.bounds = [[0, 0.5, 1], [1, 1.5, 2], [2, 2.5, 3]]
self.lon.long_name = "triangle longitudes"
self.lat.bounds = [[0, 1, 0], [2, 3, 2], [1, 2, 1]]
self.lat.long_name = "triangle latitudes"
def test_dimensionality(self):
mesh = self.create()
self.assertEqual(2, mesh.topology_dimension)
self.assertArrayEqual(
[0, 0.5, 1, 1, 1.5, 2, 2, 2.5, 3], mesh.node_coords.node_x.points
)
self.assertArrayEqual(
[0, 1, 0, 2, 3, 2, 1, 2, 1], mesh.node_coords.node_y.points
)
self.assertIsNone(mesh.edge_coords.edge_x)
self.assertIsNone(mesh.edge_coords.edge_y)
self.assertArrayEqual([0.5, 1.5, 2.5], mesh.face_coords.face_x.points)
self.assertArrayEqual([0.5, 2.5, 1.5], mesh.face_coords.face_y.points)
for conn_name in Connectivity.UGRID_CF_ROLES:
conn = getattr(mesh, conn_name, None)
if conn_name == "face_node_connectivity":
self.assertArrayEqual(
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], conn.indices
)
else:
self.assertIsNone(conn)
def test_centre_metadata(self):
mesh = self.create()
pairs = [
(self.lon, mesh.face_coords.face_x),
(self.lat, mesh.face_coords.face_y),
]
for expected_coord, actual_coord in pairs:
for attr in ("standard_name", "long_name", "units", "attributes"):
expected = getattr(expected_coord, attr)
actual = getattr(actual_coord, attr)
self.assertEqual(expected, actual)
self.assertIsNone(actual_coord.var_name)
def test_mixed_shapes(self):
self.lon = AuxCoord.from_coord(self.lon)
lon_bounds = np.array([[0, 0, 1, 1], [1, 1, 2, 2], [2, 3, 2.5, 999]])
self.lon.bounds = np.ma.masked_equal(lon_bounds, 999)
lat_bounds = np.array([[0, 1, 1, 0], [1, 2, 2, 1], [2, 2, 3, 999]])
self.lat.bounds = np.ma.masked_equal(lat_bounds, 999)
mesh = self.create()
self.assertArrayEqual(
mesh.face_node_connectivity.location_lengths(), [4, 4, 3]
)
self.assertEqual(mesh.node_coords.node_x.points[-1], 0.0)
self.assertEqual(mesh.node_coords.node_y.points[-1], 0.0)
class TestInvalidBounds(tests.IrisTest):
"""Invalid bounds not supported."""
def test_no_bounds(self):
lon = AuxCoord(points=[0.5, 1.5, 2.5])
lat = AuxCoord(points=[0, 1, 2])
with self.assertRaisesRegex(ValueError, "bounds missing from"):
_ = Mesh.from_coords(lon, lat)
def test_1_bound(self):
lon = AuxCoord(points=[0.5, 1.5, 2.5], bounds=[[0], [1], [2]])
lat = AuxCoord(points=[0, 1, 2], bounds=[[0.5], [1.5], [2.5]])
with self.assertRaisesRegex(
ValueError, r"Expected coordinate bounds.shape \(n, >=2\)"
):
_ = Mesh.from_coords(lon, lat)
class TestInvalidPoints(tests.IrisTest):
"""Only 1D coords supported."""
def test_2d_coord(self):
cube = simple_2d_w_multidim_coords()[:3, :3]
coord_1, coord_2 = cube.coords()
with self.assertRaisesRegex(
ValueError, "Expected coordinate ndim == 1"
):
_ = Mesh.from_coords(coord_1, coord_2)
| SciTools/iris | lib/iris/tests/unit/experimental/ugrid/mesh/test_Mesh__from_coords.py | Python | lgpl-3.0 | 9,316 | 0.000107 |
import win32security,win32file,win32api,ntsecuritycon,win32con
policy_handle = win32security.GetPolicyHandle('rupole',win32security.POLICY_ALL_ACCESS)
## mod_nbr, mod_time = win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyModificationInformation)
## print mod_nbr, mod_time
domain_name,dns_domain_name, dns_forest_name, domain_guid, domain_sid = \
win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyDnsDomainInformation)
print domain_name, dns_domain_name, dns_forest_name, domain_guid, domain_sid
event_audit_info=win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyAuditEventsInformation)
print event_audit_info
domain_name,sid =win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyPrimaryDomainInformation)
print domain_name, sid
domain_name,sid =win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyAccountDomainInformation)
print domain_name, sid
server_role = win32security.LsaQueryInformationPolicy(policy_handle,win32security.PolicyLsaServerRoleInformation)
print 'server role: ',server_role
win32security.LsaClose(policy_handle)
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/Demos/security/get_policy_info.py | Python | gpl-2.0 | 1,166 | 0.023156 |
# -*- mode: Python; coding: utf-8 -*-
"""For the purposes of classification, a corpus is defined as a collection
of labeled documents. Such documents might actually represent words, images,
etc.; to the classifier they are merely instances with features."""
from abc import ABCMeta, abstractmethod
from csv import reader as csv_reader
from glob import glob
from os.path import basename, dirname, split, splitext
from document import Document
class Corpus(object):
"""An abstract collection of documents."""
__metaclass__ = ABCMeta
def __init__(self, datafiles, document_class=Document):
self.documents = []
self.datafiles = glob(datafiles)
for datafile in self.datafiles:
self.load(datafile, document_class)
# Act as a mutable container for documents.
def __len__(self): return len(self.documents)
def __iter__(self): return iter(self.documents)
def __getitem__(self, key): return self.documents[key]
def __setitem__(self, key, value): self.documents[key] = value
def __delitem__(self, key): del self.documents[key]
@abstractmethod
def load(self, datafile, document_class):
"""Make labeled document instances for the data in a file."""
pass
class PlainTextFiles(Corpus):
"""A corpus contained in a collection of plain-text files."""
def load(self, datafile, document_class):
"""Make a document from a plain-text datafile. The document is labeled
using the last component of the datafile's directory."""
label = split(dirname(datafile))[-1]
with open(datafile, "r") as file:
data = file.read()
self.documents.append(document_class(data, label, datafile))
class PlainTextLines(Corpus):
"""A corpus in which each document is a line in a datafile."""
def load(self, datafile, document_class):
"""Make a document from each line of a plain text datafile.
The document is labeled using the datafile name, sans directory
and extension."""
label = splitext(basename(datafile))[0]
with open(datafile, "r") as file:
for line in file:
data = line.strip()
self.documents.append(document_class(data, label, datafile))
class NamesCorpus(PlainTextLines):
"""A collection of names, labeled by gender. See names/README for
copyright and license."""
def __init__(self, datafiles="names/*.txt", document_class=Document):
super(NamesCorpus, self).__init__(datafiles, document_class)
class CSVCorpus(Corpus):
"""A corpus encoded as a comma-separated-value (CSV) file."""
def load(self, datafile, document_class, encoding="utf-8"):
"""Make a document from each row of a CSV datafile.
Assumes data, label ordering and UTF-8 encoding."""
def unicode_csv_reader(csvfile, *args, **kwargs):
for row in csv_reader(csvfile, *args, **kwargs):
yield [unicode(cell, encoding) for cell in row]
with open(datafile, "r") as file:
for data, label in unicode_csv_reader(file):
label = label.strip().upper() # canonicalize label
self.documents.append(document_class(data, label, datafile))
class BlogsCorpus(CSVCorpus):
"""A collection of blog posts, labeled by author gender. See the paper
"Improving Gender Classification of Blog Authors" by Mukherjee and Liu
<http://www.cs.uic.edu/~liub/publications/EMNLP-2010-blog-gender.pdf>
for details and some impressive results."""
def __init__(self, datafiles="blog-gender-dataset.csv",
document_class=Document):
super(BlogsCorpus, self).__init__(datafiles, document_class)
| kahliloppenheimer/Naive-Bayes-Classifier | corpus.py | Python | mit | 3,727 | 0.00161 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Summary(models.Model):
_inherit = 'myo.summary'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_summary_annotation_rel',
'summary_id',
'annotation_id',
'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
summary_ids = fields.Many2many(
'myo.summary',
'myo_summary_annotation_rel',
'annotation_id',
'summary_id',
'Summaries'
)
| MostlyOpen/odoo_addons | myo_summary/models/annotation.py | Python | agpl-3.0 | 1,419 | 0 |
"""Implementation of treadmill admin ldap CLI cell plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import click
from ldap3.core import exceptions as ldap_exceptions
from treadmill import admin
from treadmill import cli
from treadmill import context
from treadmill import yamlwrapper as yaml
def init():
"""Configures cell CLI group"""
# Disable too many branches warning.
#
# pylint: disable=R0912
formatter = cli.make_formatter('cell')
@click.group()
@cli.admin.ON_EXCEPTIONS
def cell():
"""Manage cell configuration"""
pass
@cell.command()
@click.option('-v', '--version', help='Version.')
@click.option('-r', '--root', help='Distro root.')
@click.option('-l', '--location', help='Cell location.')
@click.option('-u', '--username', help='Cell proid account.')
@click.option('--archive-server', help='Archive server.')
@click.option('--archive-username', help='Archive username.')
@click.option('--ssq-namespace', help='SSQ namespace.')
@click.option('-d', '--data', help='Cell specific data in YAML',
type=click.Path(exists=True, readable=True))
@click.option('--status', help='Cell status')
@click.option('-m', '--manifest', help='Load cell from manifest file.',
type=click.Path(exists=True, readable=True))
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def configure(cell, version, root, location, username, archive_server,
archive_username, ssq_namespace, data, status, manifest):
"""Create, get or modify cell configuration"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
attrs = {}
if manifest:
with io.open(manifest, 'rb') as fd:
attrs = yaml.load(stream=fd)
if version:
attrs['version'] = version
if root:
if root == '-':
root = None
attrs['root'] = root
if location:
attrs['location'] = location
if username:
attrs['username'] = username
if archive_server:
attrs['archive-server'] = archive_server
if archive_server:
attrs['archive-username'] = archive_username
if ssq_namespace:
attrs['ssq-namespace'] = ssq_namespace
if status:
attrs['status'] = status
if data:
with io.open(data, 'rb') as fd:
attrs['data'] = yaml.load(stream=fd)
if attrs:
try:
admin_cell.create(cell, attrs)
except ldap_exceptions.LDAPEntryAlreadyExistsResult:
admin_cell.update(cell, attrs)
try:
cli.out(formatter(admin_cell.get(cell)))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
@cell.command()
@click.option('--idx', help='Master index.',
type=click.Choice(['1', '2', '3', '4', '5']),
required=True)
@click.option('--hostname', help='Master hostname.',
required=True)
@click.option('--client-port', help='Zookeeper client port.',
type=int,
required=True)
@click.option('--kafka-client-port', help='Kafka client port.',
type=int,
required=False)
@click.option('--jmx-port', help='Zookeeper jmx port.',
type=int,
required=True)
@click.option('--followers-port', help='Zookeeper followers port.',
type=int,
required=True)
@click.option('--election-port', help='Zookeeper election port.',
type=int,
required=True)
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def insert(cell, idx, hostname, client_port, jmx_port, followers_port,
election_port, kafka_client_port):
"""Add master server to a cell"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
data = {
'idx': int(idx),
'hostname': hostname,
'zk-client-port': client_port,
'zk-jmx-port': jmx_port,
'zk-followers-port': followers_port,
'zk-election-port': election_port,
}
if kafka_client_port is not None:
data['kafka-client-port'] = kafka_client_port
attrs = {
'masters': [data]
}
try:
admin_cell.update(cell, attrs)
cli.out(formatter(admin_cell.get(cell)))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
@cell.command()
@click.option('--idx', help='Master index.',
type=click.Choice(['1', '2', '3']),
required=True)
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def remove(cell, idx):
"""Remove master server from a cell"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
attrs = {
'masters': [{
'idx': int(idx),
'hostname': None,
'zk-client-port': None,
'zk-jmx-port': None,
'zk-followers-port': None,
'zk-election-port': None,
}]
}
try:
admin_cell.remove(cell, attrs)
cli.out(formatter(admin_cell.get(cell)))
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
@cell.command(name='list')
@cli.admin.ON_EXCEPTIONS
def _list():
"""Displays master servers"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
cells = admin_cell.list({})
cli.out(formatter(cells))
@cell.command()
@click.argument('cell')
@cli.admin.ON_EXCEPTIONS
def delete(cell):
"""Delete a cell"""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
try:
admin_cell.delete(cell)
except ldap_exceptions.LDAPNoSuchObjectResult:
click.echo('Cell does not exist: %s' % cell, err=True)
del delete
del _list
del configure
del insert
del remove
return cell
| bretttegart/treadmill | lib/python/treadmill/cli/admin/ldap/cell.py | Python | apache-2.0 | 6,420 | 0 |
#!/usr/bin/env python
#coding: utf-8
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a list of integers
def postorderTraversal(self, root):
pass
| wh-acmer/minixalpha-acm | LeetCode/Python/binary_tree_postorder_traversal_iter.py | Python | mit | 324 | 0.006173 |
'''
VERIDIC - Towards a centralized access control system
Copyright (C) 2011 Mikael Ates
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from authentic2.attribute_aggregator.core import get_profile_field_name_from_definition, \
get_definition_from_profile_field_name
logger = logging.getLogger(__name__)
SOURCE_NAME = 'USER_PROFILE'
def get_attributes(user, definitions=None, source=None, auth_source=False, **kwargs):
'''
Return attributes dictionnary
Dictionnary format:
attributes = dict()
data_from_source = list()
a1 = dict()
a1['oid'] = definition_name
Or
a1['definition'] = definition_name
definition may be the definition name like 'gn'
or an alias like 'givenName'
Or
a1['name'] = attribute_name_in_ns
a1['namespace'] = ns_name
a1['values'] = list_of_values
data_from_source.append(a1)
...
data_from_source.append(a2)
attributes[source_name] = data_from_source
First attempt on 'definition' key.
Else, definition is searched by 'name' and 'namespece' keys.
'''
from models import AttributeSource
try:
AttributeSource.objects.get(name=SOURCE_NAME)
except:
logger.debug('get_attributes: \
Profile source not configured')
return None
if source and source.name != SOURCE_NAME:
logger.debug('get_attributes: '
'The required source %s is not user profile' % source)
return None
attributes = dict()
data = []
try:
field_names = set()
user_profile_fields = getattr(user, 'USER_PROFILE', [])
if not user_profile_fields:
user_profile_fields = user._meta.get_all_field_names()
for field in user_profile_fields:
if isinstance(field, (tuple, list)):
field_names.add(field[0])
else:
field_names.add(field)
fields = []
if definitions:
for definition in definitions:
logger.debug('get_attributes: looking for %s' % definition)
field_name = get_profile_field_name_from_definition(definition)
if not field_name:
'''
Profile model may be extended without modifying the
mapping file if the attribute name is the same as the
definition
'''
logger.debug('get_attributes: '
'Field name will be the definition')
field_name = definition
if field_name in field_names:
fields.append((field_name, definition))
else:
logger.debug('get_attributes: Field not found in profile')
else:
fields = [(field_name,
get_definition_from_profile_field_name(field_name)) \
for field_name \
in field_names \
if get_definition_from_profile_field_name(field_name)]
for field_name, definition in fields:
logger.debug('get_attributes: found field %s' % (field_name,))
value = getattr(user, field_name, None)
if value:
if callable(value):
value = value()
logger.debug('get_attributes: found value %s' % value)
attr = {}
attr['definition'] = definition
if not isinstance(value, basestring) and hasattr(value,
'__iter__'):
attr['values'] = map(unicode, value)
else:
attr['values'] = [unicode(value)]
data.append(attr)
else:
logger.debug('get_attributes: no value found')
except (SiteProfileNotAvailable, ObjectDoesNotExist):
logger.debug('get_attributes: No user profile')
return None
attributes[SOURCE_NAME] = data
return attributes
| BryceLohr/authentic | authentic2/attribute_aggregator/user_profile.py | Python | agpl-3.0 | 4,952 | 0.002827 |
#!/usr/bin/env python
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
# BEGIN INCLUDE
import tempfile
from tdi import html
file_1 = tempfile.NamedTemporaryFile()
try:
file_2 = tempfile.NamedTemporaryFile()
try:
file_1.write("""<html lang="en"><body tdi:overlay="huh">yay.</body></html>""")
file_1.flush()
file_2.write("""<html><body tdi:overlay="huh">file 2!</body></html>""")
file_2.flush()
template = html.from_files([file_1.name, file_2.name])
finally:
file_2.close()
finally:
file_1.close()
template.render()
| ndparker/tdi | docs/examples/loading2.py | Python | apache-2.0 | 623 | 0.004815 |
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import division
from __future__ import print_function
import numpy as np
from yaff import *
from yaff.sampling.test.common import get_ff_water32, get_ff_water, get_ff_bks
def test_hessian_partial_water32():
ff = get_ff_water32()
select = [1, 2, 3, 14, 15, 16]
hessian = estimate_cart_hessian(ff, select=select)
assert hessian.shape == (18, 18)
def test_hessian_full_water():
ff = get_ff_water()
hessian = estimate_cart_hessian(ff)
assert hessian.shape == (9, 9)
evals = np.linalg.eigvalsh(hessian)
print(evals)
assert sum(abs(evals) < 1e-10) == 3
def test_hessian_full_x2():
K, d = np.random.uniform(1.0, 2.0, 2)
system = System(
numbers=np.array([1, 1]),
pos=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, d]]),
ffatypes=['H', 'H'],
bonds=np.array([[0, 1]]),
)
part = ForcePartValence(system)
part.add_term(Harmonic(K, d, Bond(0, 1)))
ff = ForceField(system, [part])
hessian = estimate_cart_hessian(ff)
evals = np.linalg.eigvalsh(hessian)
assert abs(evals[:-1]).max() < 1e-5
assert abs(evals[-1] - 2*K) < 1e-5
def test_elastic_water32():
ff = get_ff_water32()
elastic = estimate_elastic(ff, do_frozen=True)
assert elastic.shape == (6, 6)
def test_bulk_elastic_bks():
ff = get_ff_bks(smooth_ei=True, reci_ei='ignore')
system = ff.system
lcs = np.array([
[1, 1, 0],
[0, 0, 1],
])
system.align_cell(lcs)
ff.update_rvecs(system.cell.rvecs)
opt = QNOptimizer(FullCellDOF(ff, gpos_rms=1e-6, grvecs_rms=1e-6))
opt.run()
rvecs0 = system.cell.rvecs.copy()
vol0 = system.cell.volume
pos0 = system.pos.copy()
e0 = ff.compute()
elastic = estimate_elastic(ff)
assert abs(pos0 - system.pos).max() < 1e-10
assert abs(rvecs0 - system.cell.rvecs).max() < 1e-10
assert abs(vol0 - system.cell.volume) < 1e-10
assert elastic.shape == (6, 6)
# Make estimates of the same matrix elements with a simplistic approach
eps = 1e-3
from nose.plugins.skip import SkipTest
raise SkipTest('Double check elastic constant implementation')
# A) stretch in the Z direction
deform = np.array([1, 1, 1-eps])
rvecs1 = rvecs0*deform
pos1 = pos0*deform
ff.update_rvecs(rvecs1)
ff.update_pos(pos1)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e1 = ff.compute()
deform = np.array([1, 1, 1+eps])
rvecs2 = rvecs0*deform
pos2 = pos0*deform
ff.update_rvecs(rvecs2)
ff.update_pos(pos2)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e2 = ff.compute()
C = (e1 + e2 - 2*e0)/(eps**2)/vol0
assert abs(C - elastic[2,2]) < C*0.02
# B) stretch in the X direction
deform = np.array([1-eps, 1, 1])
rvecs1 = rvecs0*deform
pos1 = pos0*deform
ff.update_rvecs(rvecs1)
ff.update_pos(pos1)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e1 = ff.compute()
deform = np.array([1+eps, 1, 1])
rvecs2 = rvecs0*deform
pos2 = pos0*deform
ff.update_rvecs(rvecs2)
ff.update_pos(pos2)
opt = QNOptimizer(CartesianDOF(ff, gpos_rms=1e-6))
opt.run()
e2 = ff.compute()
C = (e1 + e2 - 2*e0)/(eps**2)/vol0
assert abs(C - elastic[0,0]) < C*0.02
| molmod/yaff | yaff/sampling/test/test_harmonic.py | Python | gpl-3.0 | 4,266 | 0.000469 |
# Copyright (C) 2013 Matthew C. Zwier and Lillian T. Chong
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division; __metaclass__=type
import logging, warnings
log = logging.getLogger(__name__)
import itertools, re
from itertools import imap
import numpy, h5py
import west, westpa
from oldtools.aframe import AnalysisMixin
from west import Segment
from oldtools.miscfn import parse_int_list
class WESTDataReaderMixin(AnalysisMixin):
'''A mixin for analysis requiring access to the HDF5 files generated during a WEST run.'''
def __init__(self):
super(WESTDataReaderMixin,self).__init__()
self.data_manager = None
self.west_h5name = None
# Whether pcoord caching is active
self.__cache_pcoords = False
# Cached items
self.__c_summary = None
self.__c_iter_groups = dict()
self.__c_seg_id_ranges = dict()
self.__c_seg_indices = dict()
self.__c_wtg_parent_arrays = dict()
self.__c_parent_arrays = dict()
self.__c_pcoord_arrays = dict()
self.__c_pcoord_datasets = dict()
def add_args(self, parser, upcall = True):
if upcall:
try:
upcall = super(WESTDataReaderMixin,self).add_args
except AttributeError:
pass
else:
upcall(parser)
group = parser.add_argument_group('WEST input data options')
group.add_argument('-W', '--west-data', dest='west_h5name', metavar='WEST_H5FILE',
help='''Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in west.cfg).''')
def process_args(self, args, upcall = True):
if args.west_h5name:
self.west_h5name = args.west_h5name
else:
westpa.rc.config.require(['west','data','west_data_file'])
self.west_h5name = westpa.rc.config.get_path(['west','data','west_data_file'])
westpa.rc.pstatus("Using WEST data from '{}'".format(self.west_h5name))
self.data_manager = westpa.rc.get_data_manager()
self.data_manager.backing_file = self.west_h5name
self.data_manager.open_backing(mode='r')
if upcall:
try:
upfunc = super(WESTDataReaderMixin,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def clear_run_cache(self):
del self.__c_summary
del self.__c_iter_groups, self.__c_seg_id_ranges, self.__c_seg_indices, self.__c_parent_arrays, self.__c_parent_arrays
del self.__c_pcoord_arrays, self.__c_pcoord_datasets
self.__c_summary = None
self.__c_iter_groups = dict()
self.__c_seg_id_ranges = dict()
self.__c_seg_indices = dict()
self.__c_parent_arrays = dict()
self.__c_wtg_parent_arrays = dict()
self.__c_pcoord_arrays = dict()
self.__c_pcoord_datasets = dict()
@property
def cache_pcoords(self):
'''Whether or not to cache progress coordinate data. While caching this data
can significantly speed up some analysis operations, this requires
copious RAM.
Setting this to False when it was formerly True will release any cached data.
'''
return self.__cache_pcoords
@cache_pcoords.setter
def cache_pcoords(self, cache):
self.__cache_pcoords = cache
if not cache:
del self.__c_pcoord_arrays
self.__c_pcoord_arrays = dict()
def get_summary_table(self):
if self.__c_summary is None:
self.__c_summary = self.data_manager.we_h5file['/summary'][...]
return self.__c_summary
def get_iter_group(self, n_iter):
'''Return the HDF5 group corresponding to ``n_iter``'''
try:
return self.__c_iter_groups[n_iter]
except KeyError:
iter_group = self.data_manager.get_iter_group(n_iter)
return iter_group
def get_segments(self, n_iter, include_pcoords = True):
'''Return all segments present in iteration n_iter'''
return self.get_segments_by_id(n_iter, self.get_seg_ids(n_iter, None), include_pcoords)
def get_segments_by_id(self, n_iter, seg_ids, include_pcoords = True):
'''Get segments from the data manager, employing caching where possible'''
if len(seg_ids) == 0: return []
seg_index = self.get_seg_index(n_iter)
all_wtg_parent_ids = self.get_wtg_parent_array(n_iter)
segments = []
if include_pcoords:
pcoords = self.get_pcoords(n_iter, seg_ids)
for (isegid, seg_id) in enumerate(seg_ids):
row = seg_index[seg_id]
parents_offset = row['wtg_offset']
n_parents = row['wtg_n_parents']
segment = Segment(seg_id = seg_id,
n_iter = n_iter,
status = row['status'],
endpoint_type = row['endpoint_type'],
walltime = row['walltime'],
cputime = row['cputime'],
weight = row['weight'],
)
if include_pcoords:
segment.pcoord = pcoords[isegid]
parent_ids = all_wtg_parent_ids[parents_offset:parents_offset+n_parents]
segment.wtg_parent_ids = {long(parent_id) for parent_id in parent_ids}
segment.parent_id = long(parent_ids[0])
segments.append(segment)
return segments
def get_children(self, segment, include_pcoords=True):
parents = self.get_parent_array(segment.n_iter+1)
seg_ids = self.get_seg_ids(segment.n_iter+1, parents == segment.seg_id)
return self.get_segments_by_id(segment.n_iter+1, seg_ids, include_pcoords)
def get_seg_index(self, n_iter):
try:
return self.__c_seg_indices[n_iter]
except KeyError:
seg_index = self.__c_seg_indices[n_iter] = self.get_iter_group(n_iter)['seg_index'][...]
return seg_index
def get_wtg_parent_array(self, n_iter):
try:
return self.__c_wtg_parent_arrays[n_iter]
except KeyError:
parent_array = self.__c_wtg_parent_arrays[n_iter] = self.get_iter_group(n_iter)['wtgraph'][...]
return parent_array
def get_parent_array(self, n_iter):
try:
return self.__c_parent_arrays[n_iter]
except KeyError:
parent_array = self.get_seg_index(n_iter)['parent_id']
self.__c_parent_arrays[n_iter] = parent_array
return parent_array
def get_pcoord_array(self, n_iter):
try:
return self.__c_pcoord_arrays[n_iter]
except KeyError:
pcoords = self.__c_pcoord_arrays[n_iter] = self.get_iter_group(n_iter)['pcoord'][...]
return pcoords
def get_pcoord_dataset(self, n_iter):
try:
return self.__c_pcoord_datasets[n_iter]
except KeyError:
pcoord_ds = self.__c_pcoord_datasets[n_iter] = self.get_iter_group(n_iter)['pcoord']
return pcoord_ds
def get_pcoords(self, n_iter, seg_ids):
if self.__cache_pcoords:
pcarray = self.get_pcoord_array(n_iter)
return [pcarray[seg_id,...] for seg_id in seg_ids]
else:
return self.get_pcoord_dataset(n_iter)[list(seg_ids),...]
def get_seg_ids(self, n_iter, bool_array = None):
try:
all_ids = self.__c_seg_id_ranges[n_iter]
except KeyError:
all_ids = self.__c_seg_id_ranges[n_iter] = numpy.arange(0,len(self.get_seg_index(n_iter)), dtype=numpy.uint32)
if bool_array is None:
return all_ids
else:
seg_ids = all_ids[bool_array]
try:
if len(seg_ids) == 0: return []
except TypeError:
# Not iterable, for some bizarre reason
return [seg_ids]
else:
return seg_ids
def get_created_seg_ids(self, n_iter):
'''Return a list of seg_ids corresponding to segments which were created for the given iteration (are not
continuations).'''
# Created segments have parent_id < 0
parent_ids = self.get_parent_array(n_iter)
return self.get_seg_ids(n_iter, parent_ids < 0)
def max_iter_segs_in_range(self, first_iter, last_iter):
'''Return the maximum number of segments present in any iteration in the range selected'''
n_particles = self.get_summary_table()['n_particles']
return n_particles[first_iter-1:last_iter].max()
def total_segs_in_range(self, first_iter, last_iter):
'''Return the total number of segments present in all iterations in the range selected'''
n_particles = self.get_summary_table()['n_particles']
return n_particles[first_iter-1:last_iter].sum()
def get_pcoord_len(self, n_iter):
'''Get the length of the progress coordinate array for the given iteration.'''
pcoord_ds = self.get_pcoord_dataset(n_iter)
return pcoord_ds.shape[1]
def get_total_time(self, first_iter = None, last_iter = None, dt=None):
'''Return the total amount of simulation time spanned between first_iter and last_iter (inclusive).'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
dt = dt or getattr(self, 'dt', 1.0)
total_len = 0
for n_iter in xrange(first_iter, last_iter+1):
total_len += self.get_pcoord_len(n_iter) - 1
return total_len * dt
class ExtDataReaderMixin(AnalysisMixin):
'''An external data reader, primarily designed for reading brute force data, but also suitable
for any auxiliary datasets required for analysis.
'''
default_chunksize = 8192
def __init__(self):
super(ExtDataReaderMixin,self).__init__()
self.ext_input_nargs = '+'
self.ext_input_filenames = []
self.ext_input_chunksize = self.default_chunksize
self.ext_input_usecols = None
self.ext_input_comment_regexp = None
self.ext_input_sep_regexp = None
def add_args(self, parser, upcall = True):
if upcall:
try:
upcall = super(ExtDataReaderMixin,self).add_args
except AttributeError:
pass
else:
upcall(parser)
input_options = parser.add_argument_group('external data input options')
input_options.add_argument('datafiles', nargs=self.ext_input_nargs, metavar='DATAFILE',
help='''Data file(s) to analyze, either text or Numpy (.npy or .npz) format.
Uncompressed numpy files will be memory-mapped, allowing analysis of data larger than
available RAM (though not larger than the available address space).''')
input_options.add_argument('--usecols', dest='usecols', metavar='COLUMNS', type=parse_int_list,
help='''Use only the given COLUMNS from the input file(s), e.g. "0", "0,1",
"0:5,7,9:10".''')
input_options.add_argument('--chunksize', dest='chunksize', type=long, default=self.default_chunksize,
help='''Process input data in blocks of size CHUNKSIZE. This will only reduce memory
requirements when using uncompressed Numpy (.npy) format input. (Default: %(default)d.)''')
def process_args(self, args, upcall = True):
if args.usecols:
westpa.rc.pstatus('Using only the following columns from external input: {!s}'.format(args.usecols))
self.ext_input_usecols = args.usecols
else:
self.ext_input_usecols = None
self.ext_input_filenames = args.datafiles
self.ext_input_chunksize = args.chunksize or self.default_chunksize
if upcall:
try:
upfunc = super(ExtDataReaderMixin,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def is_npy(self, filename):
with file(filename, 'rb') as fileobj:
first_bytes = fileobj.read(len(numpy.lib.format.MAGIC_PREFIX))
if first_bytes == numpy.lib.format.MAGIC_PREFIX:
return True
else:
return False
def load_npy_or_text(self, filename):
'''Load an array from an existing .npy file, or read a text file and
convert to a NumPy array. In either case, return a NumPy array. If a
pickled NumPy dataset is found, memory-map it read-only. If the specified
file does not contain a pickled NumPy array, attempt to read the file using
numpy.loadtxt(filename).'''
if self.is_npy(filename):
return numpy.load(filename, 'r')
else:
return numpy.loadtxt(filename)
def text_to_h5dataset(self, fileobj, group, dsname, dtype=numpy.float64,
skiprows=0, usecols=None,
chunksize=None):
'''Read text-format data from the given filename or file-like object ``fileobj`` and write to a newly-created dataset
called ``dsname`` in the HDF5 group ``group``. The data is stored as type ``dtype``. By default, the shape is
taken as (number of lines, number of columns); columns can be omitted by specifying a list for ``usecols``,
and lines can be skipped by using ``skiprows``. Data is read in chunks of ``chunksize`` rows.'''
try:
fileobj.readline
except AttributeError:
fileobj = file(fileobj, 'rt')
usecols = usecols or self.usecols
chunksize = chunksize or self.ext_input_chunksize
linenumber = 0
for iskip in xrange(skiprows or 0):
fileobj.readline()
linenumber += 1
nrows = 0
irow = 0
ncols_input = None # number of columns in input
ncols_store = None # number of columns to store
databuffer = None
dataset = None
re_split_comments = self.ext_input_comment_regexp
re_split_fields = self.ext_input_sep_regexp
for line in fileobj:
linenumber += 1
# Discard comments and extraneous whitespace
if re_split_comments is not None:
record_text = re_split_comments.split(line, 1)[0].strip()
else:
record_text = line.split('#', 1)[0].strip()
if not record_text:
continue
if re_split_fields is not None:
fields = re_split_fields.split(record_text)
else:
fields = record_text.split()
# Check that the input size hasn't change (blank lines excluded)
if not ncols_input:
ncols_input = len(fields)
elif len(fields) != ncols_input:
raise ValueError('expected {:d} columns at line {:d}, but found {:d}'
.format(ncols_input, linenumber, len(fields)))
# If this is the first time through the loop, allocate temporary storage
if not ncols_store:
ncols_store = len(usecols)
databuffer = numpy.empty((chunksize, ncols_store), dtype)
dataset = group.create_dataset(dsname,
shape=(0,ncols_store), maxshape=(None,ncols_store), chunks=(chunksize,ncols_store),
dtype=dtype)
if usecols:
for (ifield,iifield) in enumerate(usecols):
databuffer[irow,ifield] = dtype(fields[iifield])
else:
for (ifield, field) in enumerate(fields):
databuffer[irow,ifield] = dtype(field)
nrows+=1
irow+=1
# Flush to HDF5 if necessary
if irow == chunksize:
westpa.rc.pstatus('\r Read {:d} rows'.format(nrows), end='')
westpa.rc.pflush()
dataset.resize((nrows, ncols_store))
dataset[-irow:] = databuffer
irow = 0
# Flush last bit
if irow > 0:
dataset.resize((nrows, ncols_store))
dataset[-irow:] = databuffer[:irow]
westpa.rc.pstatus('\r Read {:d} rows'.format(nrows))
westpa.rc.pflush()
def npy_to_h5dataset(self, array, group, dsname, usecols=None, chunksize=None):
'''Store the given array into a newly-created dataset named ``dsname`` in the HDF5 group
``group``, optionally only storing a subset of columns. Data is written ``chunksize`` rows at a time,
allowing very large memory-mapped arrays to be copied.'''
usecols = usecols or self.ext_input_usecols
chunksize = chunksize or self.ext_input_chunksize
if usecols:
shape = (len(array),) + array[0][usecols].shape[1:]
else:
shape = array.shape
if len(shape) == 1:
shape = shape + (1,)
maxlen = len(array)
mw = len(str(maxlen))
dataset = group.create_dataset(dsname, shape=shape, dtype=array.dtype)
if usecols:
for istart in xrange(0,maxlen,chunksize):
iend = min(istart+chunksize,maxlen)
dataset[istart:iend] = array[istart:iend, usecols]
westpa.rc.pstatus('\r Read {:{mw}d}/{:>{mw}d} rows'.format(iend,maxlen, mw=mw), end='')
westpa.rc.pflush()
else:
for istart in xrange(0,maxlen,chunksize):
dataset[istart:iend] = array[istart:iend]
westpa.rc.pstatus('\r Read {:{mw}d}/{:>{mw}d} rows'.format(iend,maxlen, mw=mw), end='')
westpa.rc.pflush()
westpa.rc.pstatus()
class BFDataManager(AnalysisMixin):
'''A class to manage brute force trajectory data. The primary purpose is to read in and
manage brute force progress coordinate data for one or more trajectories. The trajectories need not
be the same length, but they do need to have the same time spacing for progress coordinate values.'''
traj_index_dtype = numpy.dtype( [ ('pcoord_len', numpy.uint64),
('source_data', h5py.new_vlen(str)) ] )
def __init__(self):
super(BFDataManager,self).__init__()
self.bf_h5name = None
self.bf_h5file = None
def add_args(self, parser, upcall = True):
if upcall:
try:
upcall = super(BFDataManager,self).add_args
except AttributeError:
pass
else:
upcall(parser)
group = parser.add_argument_group('brute force input data options')
group.add_argument('-B', '--bfdata', '--brute-force-data', dest='bf_h5name', metavar='BF_H5FILE', default='bf_system.h5',
help='''Brute force data is/will be stored in BF_H5FILE (default: %(default)s).''')
def process_args(self, args, upcall = True):
self.bf_h5name = args.bf_h5name
westpa.rc.pstatus("Using brute force data from '{}'".format(self.bf_h5name))
if upcall:
try:
upfunc = super(BFDataManager,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def _get_traj_group_name(self, traj_id):
return 'traj_{:09d}'.format(traj_id)
def update_traj_index(self, traj_id, pcoord_len, source_data):
self.bf_h5file['traj_index'][traj_id] = (pcoord_len, source_data)
def get_traj_group(self, traj_id):
return self.bf_h5file[self._get_traj_group_name(traj_id)]
def create_traj_group(self):
new_traj_id = self.get_n_trajs()
group = self.bf_h5file.create_group(self._get_traj_group_name(new_traj_id))
self.bf_h5file['traj_index'].resize((new_traj_id+1,))
return (new_traj_id, group)
def get_n_trajs(self):
return self.bf_h5file['traj_index'].shape[0]
def get_traj_len(self,traj_id):
return self.bf_h5file['traj_index'][traj_id]['pcoord_len']
def get_max_traj_len(self):
return self.bf_h5file['traj_index']['pcoord_len'].max()
def get_pcoord_array(self, traj_id):
return self.get_traj_group(traj_id)['pcoord'][...]
def get_pcoord_dataset(self, traj_id):
return self.get_traj_group(traj_id)['pcoord']
def require_bf_h5file(self):
if self.bf_h5file is None:
assert self.bf_h5name
self.bf_h5file = h5py.File(self.bf_h5name)
try:
self.bf_h5file['traj_index']
except KeyError:
# A new file; create the trajectory index
self.bf_h5file.create_dataset('traj_index', shape=(0,), maxshape=(None,), dtype=self.traj_index_dtype)
return self.bf_h5file
def close_bf_h5file(self):
if self.bf_h5file is not None:
self.bf_h5file.close()
self.bf_h5file = None
| nrego/westpa | src/oldtools/aframe/data_reader.py | Python | gpl-3.0 | 22,795 | 0.011099 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weibull bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Weibull",
]
class Weibull(bijector.Bijector):
"""Compute `Y = g(X) = 1 - exp((-X / scale) ** concentration), X >= 0`.
This bijector maps inputs from `[0, inf]` to [0, 1]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1) gives back a
random variable with the
[Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution):
```none
Y ~ Weibull(scale, concentration)
pdf(y; scale, concentration, y >= 0) = (scale / concentration) * (
scale / concentration) ** (concentration - 1) * exp(
-(y / scale) ** concentration)
```
"""
def __init__(self,
scale=1.,
concentration=1.,
event_ndims=0,
validate_args=False,
name="weibull"):
"""Instantiates the `Weibull` bijector.
Args:
scale: Positive Float-type `Tensor` that is the same dtype and is
broadcastable with `concentration`.
This is `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`.
concentration: Positive Float-type `Tensor` that is the same dtype and is
broadcastable with `scale`.
This is `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`.
event_ndims: Python scalar indicating the number of dimensions associated
with a particular draw from the distribution.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[scale, concentration]):
self._scale = ops.convert_to_tensor(scale, name="scale")
self._concentration = ops.convert_to_tensor(
concentration, name="concentration")
check_ops.assert_same_float_dtype([self._scale, self._concentration])
if validate_args:
self._scale = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._scale,
message="Argument scale was not positive")
], self._scale)
self._concentration = control_flow_ops.with_dependencies([
check_ops.assert_positive(
self._concentration,
message="Argument concentration was not positive")
], self._concentration)
super(Weibull, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
@property
def scale(self):
"""The `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`."""
return self._scale
@property
def concentration(self):
"""The `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`."""
return self._concentration
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
return -math_ops.expm1(-((x / self.scale) ** self.concentration))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return self.scale * (-math_ops.log1p(-y)) ** (1 / self.concentration)
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
event_dims = self._event_dims_tensor(y)
return math_ops.reduce_sum(
-math_ops.log1p(-y) +
(1 / self.concentration - 1) * math_ops.log(-math_ops.log1p(-y)) +
math_ops.log(self.scale / self.concentration),
axis=event_dims)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
event_dims = self._event_dims_tensor(x)
return math_ops.reduce_sum(
-(x / self.scale) ** self.concentration +
(self.concentration - 1) * math_ops.log(x) +
math_ops.log(self.concentration) +
-self.concentration * math_ops.log(self.scale),
axis=event_dims)
def _maybe_assert_valid_x(self, x):
if not self.validate_args:
return x
is_valid = check_ops.assert_non_negative(
x,
message="Forward transformation input must be at least {}.".format(0))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_positive = check_ops.assert_non_negative(
y, message="Inverse transformation input must be greater than 0.")
less_than_one = check_ops.assert_less_equal(
y, constant_op.constant(1., y.dtype),
message="Inverse transformation input must be less than or equal to 1.")
return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
| alistairlow/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/weibull_impl.py | Python | apache-2.0 | 5,615 | 0.003384 |
from test_HMM import *
| becxer/pytrain | test_pytrain/test_HMM/__init__.py | Python | mit | 23 | 0 |
from django.conf.urls.defaults import patterns, url, include
from . import views
urlpatterns = patterns(
'',
url(r'^browserid/mozilla/$', views.mozilla_browserid_verify,
name='mozilla_browserid_verify'),
url(r'^browserid/$', include('django_browserid.urls')),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'},
name='logout'),
)
| AdrianGaudebert/socorro-crashstats | crashstats/auth/urls.py | Python | mpl-2.0 | 385 | 0 |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
class UIException(Exception):
pass
class ServiceError(Exception):
pass
class NoRequestedProtocols(UIException):
"""
This excpetion is thrown when the service provides streams,
but not using any accepted protocol (as decided by
options.stream_prio).
"""
def __init__(self, requested, found):
"""
The constructor takes two mandatory parameters, requested
and found. Both should be lists. requested is the protocols
we want and found is the protocols that can be used to
access the stream.
"""
self.requested = requested
self.found = found
super(NoRequestedProtocols, self).__init__(
"None of the provided protocols (%s) are in "
"the current list of accepted protocols (%s)" % (
self.found, self.requested
)
)
def __repr__(self):
return "NoRequestedProtocols(requested=%s, found=%s)" % (
self.requested, self.found)
| qnorsten/svtplay-dl | lib/svtplay_dl/error.py | Python | mit | 1,137 | 0.002639 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shuup import configuration
from shuup.api.admin_module.views.permissions import APIPermissionView
from shuup.api.permissions import make_permission_config_key, PermissionLevel
from shuup.core import cache
from shuup.core.api.users import UserViewSet
from shuup.testing.factories import get_default_shop
from shuup.testing.utils import apply_request_middleware
def setup_function(fn):
cache.clear()
@pytest.mark.django_db
def test_consolidate_objects(rf):
get_default_shop()
# just visit to make sure GET is ok
request = apply_request_middleware(rf.get("/"))
response = APIPermissionView.as_view()(request)
assert response.status_code == 200
perm_key = make_permission_config_key(UserViewSet())
assert configuration.get(None, perm_key) is None
# now post the form to see what happens
request = apply_request_middleware(rf.post("/", {perm_key: PermissionLevel.ADMIN}))
response = APIPermissionView.as_view()(request)
assert response.status_code == 302 # good
assert int(configuration.get(None, perm_key)) == PermissionLevel.ADMIN
| suutari-ai/shoop | shuup_tests/api/test_admin.py | Python | agpl-3.0 | 1,366 | 0.000732 |
zhangyu | RobbieRain/he | test.py | Python | apache-2.0 | 7 | 0.142857 |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from pants.option.option_value_container import OptionValueContainerBuilder
from pants.option.ranked_value import Rank, RankedValue
class OptionValueContainerTest(unittest.TestCase):
def test_unknown_values(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.HARDCODED, 1)
o = ob.build()
self.assertEqual(1, o.foo)
with self.assertRaises(AttributeError):
o.bar
def test_value_ranking(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 11)
o = ob.build()
self.assertEqual(11, o.foo)
self.assertEqual(Rank.CONFIG, o.get_rank("foo"))
ob.foo = RankedValue(Rank.HARDCODED, 22)
o = ob.build()
self.assertEqual(11, o.foo)
self.assertEqual(Rank.CONFIG, o.get_rank("foo"))
ob.foo = RankedValue(Rank.ENVIRONMENT, 33)
o = ob.build()
self.assertEqual(33, o.foo)
self.assertEqual(Rank.ENVIRONMENT, o.get_rank("foo"))
ob.foo = RankedValue(Rank.FLAG, 44)
o = ob.build()
self.assertEqual(44, o.foo)
self.assertEqual(Rank.FLAG, o.get_rank("foo"))
def test_is_flagged(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.NONE, 11)
self.assertFalse(ob.build().is_flagged("foo"))
ob.foo = RankedValue(Rank.CONFIG, 11)
self.assertFalse(ob.build().is_flagged("foo"))
ob.foo = RankedValue(Rank.ENVIRONMENT, 11)
self.assertFalse(ob.build().is_flagged("foo"))
ob.foo = RankedValue(Rank.FLAG, 11)
self.assertTrue(ob.build().is_flagged("foo"))
def test_indexing(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 1)
o = ob.build()
self.assertEqual(1, o["foo"])
self.assertEqual(1, o.get("foo"))
self.assertEqual(1, o.get("foo", 2))
self.assertIsNone(o.get("unknown"))
self.assertEqual(2, o.get("unknown", 2))
with self.assertRaises(AttributeError):
o["bar"]
def test_iterator(self) -> None:
ob = OptionValueContainerBuilder()
ob.a = RankedValue(Rank.FLAG, 3)
ob.b = RankedValue(Rank.FLAG, 2)
ob.c = RankedValue(Rank.FLAG, 1)
o = ob.build()
names = list(iter(o))
self.assertListEqual(["a", "b", "c"], names)
def test_copy(self) -> None:
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.FLAG, 1)
ob.bar = RankedValue(Rank.FLAG, {"a": 111})
p = ob.build()
z = ob.build()
# Verify that the result is in fact a copy.
self.assertEqual(1, p.foo) # Has original attribute.
ob.baz = RankedValue(Rank.FLAG, 42)
self.assertFalse(hasattr(p, "baz")) # Does not have attribute added after the copy.
# Verify that it's a shallow copy by modifying a referent in o and reading it in p.
p.bar["b"] = 222
self.assertEqual({"a": 111, "b": 222}, z.bar)
| benjyw/pants | src/python/pants/option/option_value_container_test.py | Python | apache-2.0 | 3,316 | 0.000905 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from libmozdata import utils as lmdutils
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
from auto_nag.escalation import Escalation, NoActivityDays
from auto_nag.nag_me import Nag
from auto_nag.round_robin import RoundRobin
class P1NoAssignee(BzCleaner, Nag):
def __init__(self):
super(P1NoAssignee, self).__init__()
self.escalation = Escalation(
self.people,
data=utils.get_config(self.name(), "escalation"),
skiplist=utils.get_config("workflow", "supervisor_skiplist", []),
)
self.round_robin = RoundRobin.get_instance()
self.components_skiplist = utils.get_config("workflow", "components_skiplist")
def description(self):
return "P1 Bugs, no assignee and no activity for few days"
def nag_template(self):
return self.template()
def get_extra_for_template(self):
return {"ndays": self.ndays}
def get_extra_for_nag_template(self):
return self.get_extra_for_template()
def get_extra_for_needinfo_template(self):
return self.get_extra_for_template()
def ignore_meta(self):
return True
def has_last_comment_time(self):
return True
def has_product_component(self):
return True
def columns(self):
return ["component", "id", "summary", "last_comment"]
def handle_bug(self, bug, data):
# check if the product::component is in the list
if utils.check_product_component(self.components_skiplist, bug):
return None
return bug
def get_mail_to_auto_ni(self, bug):
# For now, disable the needinfo
return None
# Avoid to ni everyday...
if self.has_bot_set_ni(bug):
return None
mail, nick = self.round_robin.get(bug, self.date)
if mail and nick:
return {"mail": mail, "nickname": nick}
return None
def set_people_to_nag(self, bug, buginfo):
priority = "high"
if not self.filter_bug(priority):
return None
owners = self.round_robin.get(bug, self.date, only_one=False, has_nick=False)
real_owner = bug["triage_owner"]
self.add_triage_owner(owners, real_owner=real_owner)
if not self.add(owners, buginfo, priority=priority):
self.add_no_manager(buginfo["id"])
return bug
def get_bz_params(self, date):
self.ndays = NoActivityDays(self.name()).get(
(utils.get_next_release_date() - self.nag_date).days
)
self.date = lmdutils.get_date_ymd(date)
fields = ["triage_owner", "flags"]
params = {
"bug_type": "defect",
"include_fields": fields,
"resolution": "---",
"f1": "priority",
"o1": "equals",
"v1": "P1",
"f2": "days_elapsed",
"o2": "greaterthaneq",
"v2": self.ndays,
}
utils.get_empty_assignees(params)
return params
if __name__ == "__main__":
P1NoAssignee().run()
| mozilla/relman-auto-nag | auto_nag/scripts/workflow/p1_no_assignee.py | Python | bsd-3-clause | 3,281 | 0.00061 |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal client library for making calls directly to the servers rather than
through the proxy.
"""
import socket
from httplib import HTTPException
from time import time
from urllib import quote as _quote
from eventlet import sleep, Timeout
from swift.common.bufferedhttp import http_connect
from swiftclient import ClientException, json_loads
from swift.common.utils import normalize_timestamp
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
def quote(value, safe='/'):
if isinstance(value, unicode):
value = value.encode('utf8')
return _quote(value, safe)
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of containers) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/' + account
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Account server %s:%s direct GET %s gave status %s' % (node['ip'],
node['port'], repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s' % (account, container)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of objects) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/%s/%s' % (account, container)
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Container server %s:%s direct GET %s gave stats %s' % (node['ip'],
node['port'], repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers={}):
path = '/%s/%s' % (account, container)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path, headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, resp_chunk_size=None, headers={}):
"""
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, headers=headers)
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Object server %s:%s direct GET %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers, object_body
def direct_put_object(node, part, account, container, name, contents,
content_length=None, etag=None, content_type=None,
headers=None, conn_timeout=5, response_timeout=15,
resp_chunk_size=None):
"""
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: a string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
"""
# TODO: Add chunked puts
path = '/%s/%s/%s' % (account, container, name)
if headers is None:
headers = {}
if etag:
headers['ETag'] = etag.strip('"')
if content_length is not None:
headers['Content-Length'] = str(content_length)
if content_type is not None:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
if not contents:
headers['Content-Length'] = '0'
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'PUT', path, headers=headers)
conn.send(contents)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct PUT %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
return resp.getheader('etag').strip('"')
def direct_post_object(node, part, account, container, name, headers,
conn_timeout=5, response_timeout=15):
"""
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
"""
path = '/%s/%s/%s' % (account, container, name)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'POST', path, headers=headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct POST %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_delete_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15, headers={}):
"""
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: response from server
"""
path = '/%s/%s/%s' % (account, container, obj)
headers['X-Timestamp'] = normalize_timestamp(time())
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path, headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def retry(func, *args, **kwargs):
"""
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: restult of func
"""
retries = 5
if 'retries' in kwargs:
retries = kwargs['retries']
del kwargs['retries']
error_log = None
if 'error_log' in kwargs:
error_log = kwargs['error_log']
del kwargs['error_log']
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
return attempts, func(*args, **kwargs)
except (socket.error, HTTPException, Timeout), err:
if error_log:
error_log(err)
if attempts > retries:
raise
except ClientException, err:
if error_log:
error_log(err)
if attempts > retries or not is_server_error(err.http_status) or \
err.http_status == HTTP_INSUFFICIENT_STORAGE:
raise
sleep(backoff)
backoff *= 2
# Shouldn't actually get down here, but just in case.
if args and 'ip' in args[0]:
raise ClientException('Raise too many retries',
http_host=args[0]['ip'], http_port=args[0]['port'],
http_device=args[0]['device'])
else:
raise ClientException('Raise too many retries')
| VictorLowther/swift | swift/common/direct_client.py | Python | apache-2.0 | 18,186 | 0.001045 |
#local urls.py file
from django.conf.urls import url, include
from . import views
urlpatterns = [
#url(r'^', views.appView.postLocation, name = 'postLocation'),
url(r'^volunteer/', views.appView.member, name = 'member'),
#url(r'^(?P<member_id>[0-9]+)/$', views.appView.detail, name = 'detail'),
#url(r'^(?P<>))
]
| Fazer56/Assignment3 | charitysite/volunteer/urls.py | Python | mit | 352 | 0.028409 |
import agents as ag
def HW2Agent() -> object:
"An agent that keeps track of what locations are clean or dirty."
oldPercepts = [('None', 'Clean')]
oldActions = ['NoOp']
actionScores = [{
'Right': 0,
'Left': 0,
'Up': -1,
'Down': -1,
'NoOp': -100,
}]
level = 0
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
level = len(actionScores) - 1
bump, status = percept
lastBump, lastStatus = oldPercepts[-1]
lastAction = oldActions[-1]
if status == 'Dirty':
action = 'Suck'
actionScores[level][lastAction] += 2
else:
if bump == 'Bump':
actionScores[level][lastAction] -= 10
else:
if lastAction == 'Up' or lastAction == 'Down':
actionScores.append({
'Right': 0,
'Left': 0,
'Up': -1,
'Down': -1,
})
highest = -80
for actionType, score in actionScores[level].items():
if score > highest:
highest = score
action = actionType
print(actionScores)
oldPercepts.append(percept)
oldActions.append(action)
return action
return ag.Agent(program) | WhittKinley/aima-python | submissions/Sery/vacuum2.py | Python | mit | 1,432 | 0.002095 |
import qhsm
from qhsm import QSignals, QEvent
# generated by PythonGenerator version 0.1
class TestSample1(qhsm.QHsm):
def initialiseStateMachine(self):
self.initialiseState(self.s_StateX)
def s_StateX(self, ev):
if ev.QSignal == QSignals.Entry:
self.enterStateX()
elif ev.QSignal == QSignals.Exit:
self.exitStateX()
elif ev.QSignal == QSignals.Init:
self.initialiseState(self.s_State0)
else:
return self._TopState
return None
def s_State0(self, ev):
if ev.QSignal == "Bye":
pass
self.transitionTo(self.s_State1)
elif ev.QSignal == "Hello":
if self.Ok(ev):
self.sayHello3()
self.transitionTo(self.s_State0)
else:
self.sayHello1()
self.transitionTo(self.s_State1)
elif ev.QSignal == QSignals.Entry:
self.enterState0()
elif ev.QSignal == QSignals.Exit:
self.exitState0()
else:
return self.s_StateX
return None
def s_State1(self, ev):
if ev.QSignal == "Hello":
self.sayHello2()
self.transitionTo(self.s_State0)
elif ev.QSignal == QSignals.Entry:
self.enterState1()
elif ev.QSignal == QSignals.Exit:
self.exitState1()
else:
return self._TopState
return None
#end of TestSample1
pass
| poobalan-arumugam/stateproto | src/extensions/lang/python/qhsm/testsamplehsm1.py | Python | bsd-2-clause | 1,506 | 0.003984 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
"""Basic rdfvalue tests."""
import time
from grr.lib import rdfvalue
from grr.lib.rdfvalues import test_base
class RDFBytesTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFBytes
def GenerateSample(self, number=0):
return rdfvalue.RDFBytes("\x00hello%s\x01" % number)
class RDFStringTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFString
def GenerateSample(self, number=0):
return rdfvalue.RDFString(u"Grüezi %s" % number)
class RDFIntegerTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFInteger
def GenerateSample(self, number=0):
return rdfvalue.RDFInteger(number)
class DurationTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.Duration
def GenerateSample(self, number=5):
return rdfvalue.Duration("%ds" % number)
def testStringRepresentationIsTransitive(self):
t = rdfvalue.Duration("5m")
self.assertEqual(t.seconds, 300)
self.assertEqual(t, rdfvalue.Duration(300))
self.assertEqual(str(t), "5m")
class ByteSizeTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.ByteSize
def GenerateSample(self, number=5):
return rdfvalue.ByteSize("%sKib" % number)
def testParsing(self):
for string, expected in [("100gb", 100 * 1000**3),
("10kib", 10*1024),
("2.5kb", 2500)]:
self.assertEqual(expected, rdfvalue.ByteSize(string))
class RDFURNTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFURN
def GenerateSample(self, number=0):
return rdfvalue.RDFURN("aff4:/C.12342%s/fs/os/" % number)
def testRDFURN(self):
"""Test RDFURN handling."""
# Make a url object
str_url = "aff4:/hunts/W:AAAAAAAA/Results"
url = rdfvalue.RDFURN(str_url, age=1)
self.assertEqual(url.age, 1)
self.assertEqual(url.Path(), "/hunts/W:AAAAAAAA/Results")
self.assertEqual(url._urn.netloc, "")
self.assertEqual(url._urn.scheme, "aff4")
# Test the Add() function
url = url.Add("some", age=2).Add("path", age=3)
self.assertEqual(url.age, 3)
self.assertEqual(url.Path(), "/hunts/W:AAAAAAAA/Results/some/path")
self.assertEqual(url._urn.netloc, "")
self.assertEqual(url._urn.scheme, "aff4")
# Test that we can handle urns with a '?' and do not interpret them as
# a delimiter between url and parameter list.
str_url = "aff4:/C.0000000000000000/fs/os/c/regex.*?]&[+{}--"
url = rdfvalue.RDFURN(str_url, age=1)
self.assertEqual(url.Path(), str_url[5:])
def testInitialization(self):
"""Check that we can initialize from common initializers."""
# Empty Initializer not allowed.
self.assertRaises(ValueError, self.rdfvalue_class)
# Initialize from another instance.
sample = self.GenerateSample("aff4:/")
self.CheckRDFValue(self.rdfvalue_class(sample), sample)
def testSerialization(self, sample=None):
sample = self.GenerateSample("aff4:/")
super(RDFURNTest, self).testSerialization(sample=sample)
class RDFDatetimeTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.RDFDatetime
def GenerateSample(self, number=0):
result = self.rdfvalue_class()
result.ParseFromHumanReadable("2011/11/%02d" % (number+1))
return result
def testTimeZoneConversions(self):
time_string = "2011-11-01 10:23:00"
# Human readable strings are assumed to always be in UTC
# timezone. Initialize from the human readable string.
date1 = rdfvalue.RDFDatetime().ParseFromHumanReadable(time_string)
self.assertEqual(int(date1), 1320142980000000)
self.assertEqual(
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(int(date1) / 1e6)),
time_string)
# We always stringify the date in UTC timezone.
self.assertEqual(str(date1), time_string)
def testInitFromEmptyString(self):
orig_time = time.time
time.time = lambda: 1000
try:
# Init from an empty string should generate a DateTime object with a zero
# time.
date = rdfvalue.RDFDatetime("")
self.assertEqual(int(date), 0)
self.assertEqual(int(date.Now()), int(1000 * 1e6))
finally:
time.time = orig_time
def testAddNumber(self):
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date + 60), 1e9 + 60e6)
self.assertEqual(int(date + 1000.23), 1e9 + 1000230e3)
self.assertEqual(int(date + (-10)), 1e9 - 10e6)
def testSubNumber(self):
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date - 60), 1e9 - 60e6)
self.assertEqual(int(date - (-1000.23)), 1e9 + 1000230e3)
self.assertEqual(int(date - 1e12), 1e9 - 1e18)
def testAddDuration(self):
duration = rdfvalue.Duration("12h")
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date + duration), 1e9 + 12 * 3600e6)
duration = rdfvalue.Duration("-60s")
self.assertEqual(int(date + duration), 1e9 - 60e6)
def testSubDuration(self):
duration = rdfvalue.Duration("5m")
date = rdfvalue.RDFDatetime(1e9)
self.assertEqual(int(date - duration), 1e9 - 5 * 60e6)
duration = rdfvalue.Duration("-60s")
self.assertEqual(int(date - duration), 1e9 + 60e6)
duration = rdfvalue.Duration("1w")
self.assertEqual(int(date - duration), 1e9 - 7 * 24 * 3600e6)
class RDFDatetimeSecondsTest(RDFDatetimeTest):
rdfvalue_class = rdfvalue.RDFDatetimeSeconds
class HashDigestTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.HashDigest
def GenerateSample(self, number=0):
return rdfvalue.HashDigest("\xca\x97\x81\x12\xca\x1b\xbd\xca\xfa\xc21\xb3"
"\x9a#\xdcM\xa7\x86\xef\xf8\x14|Nr\xb9\x80w\x85"
"\xaf\xeeH\xbb%s" % number)
def testEqNeq(self):
binary_digest = ("\xca\x97\x81\x12\xca\x1b\xbd\xca\xfa\xc21\xb3"
"\x9a#\xdcM\xa7\x86\xef\xf8\x14|Nr\xb9\x80w\x85"
"\xaf\xeeH\xbb")
sample = rdfvalue.HashDigest(binary_digest)
hex_digest = ("ca978112ca1bbdcafac231b39a23dc4da786eff81"
"47c4e72b9807785afee48bb")
self.assertEqual(sample, hex_digest)
self.assertEqual(sample, binary_digest)
self.assertNotEqual(sample, "\xaa\xbb")
self.assertNotEqual(sample, "deadbeef")
| spnow/grr | lib/rdfvalues/basic_test.py | Python | apache-2.0 | 6,320 | 0.005697 |
# Copyright 2019 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_versionedobjects import base as object_base
from cyborg.db import api as dbapi
from cyborg.objects import base
from cyborg.objects import fields as object_fields
LOG = logging.getLogger(__name__)
ATTACH_TYPE = ["PCI", "MDEV"]
@base.CyborgObjectRegistry.register
class AttachHandle(base.CyborgObject, object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': object_fields.IntegerField(nullable=False),
'uuid': object_fields.UUIDField(nullable=False),
'deployable_id': object_fields.IntegerField(nullable=False),
'cpid_id': object_fields.IntegerField(nullable=False),
'attach_type': object_fields.EnumField(valid_values=ATTACH_TYPE,
nullable=False),
# attach_info should be JSON here.
'attach_info': object_fields.StringField(nullable=False),
'in_use': object_fields.BooleanField(nullable=False)
}
def create(self, context):
"""Create a AttachHandle record in the DB."""
self.in_use = False
values = self.obj_get_changes()
db_ah = self.dbapi.attach_handle_create(context, values)
self._from_db_object(self, db_ah)
@classmethod
def get(cls, context, uuid):
"""Find a DB AttachHandle and return an Obj AttachHandle."""
db_ah = cls.dbapi.attach_handle_get_by_uuid(context, uuid)
obj_ah = cls._from_db_object(cls(context), db_ah)
return obj_ah
@classmethod
def get_by_id(cls, context, id):
"""Find a DB AttachHandle by ID and return an Obj AttachHandle."""
db_ah = cls.dbapi.attach_handle_get_by_id(context, id)
obj_ah = cls._from_db_object(cls(context), db_ah)
return obj_ah
@classmethod
def list(cls, context, filters={}):
"""Return a list of AttachHandle objects."""
if filters:
sort_dir = filters.pop('sort_dir', 'desc')
sort_key = filters.pop('sort_key', 'create_at')
limit = filters.pop('limit', None)
marker = filters.pop('marker_obj', None)
db_ahs = cls.dbapi.attach_handle_get_by_filters(context, filters,
sort_dir=sort_dir,
sort_key=sort_key,
limit=limit,
marker=marker)
else:
db_ahs = cls.dbapi.attach_handle_list(context)
obj_ah_list = cls._from_db_object_list(db_ahs, context)
return obj_ah_list
def save(self, context):
"""Update an AttachHandle record in the DB"""
updates = self.obj_get_changes()
db_ahs = self.dbapi.attach_handle_update(context, self.uuid, updates)
self._from_db_object(self, db_ahs)
def destroy(self, context):
"""Delete a AttachHandle from the DB."""
self.dbapi.attach_handle_delete(context, self.uuid)
self.obj_reset_changes()
| openstack/nomad | cyborg/objects/attach_handle.py | Python | apache-2.0 | 3,799 | 0 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.util.contextutil import temporary_file
def atomic_copy(src, dst):
"""Copy the file src to dst, overwriting dst atomically."""
with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.rename(tmp_dst.name, dst)
def create_size_estimators():
def line_count(filename):
with open(filename, 'rb') as fh:
return sum(1 for line in fh)
return {
'linecount': lambda srcs: sum(line_count(src) for src in srcs),
'filecount': lambda srcs: len(srcs),
'filesize': lambda srcs: sum(os.path.getsize(src) for src in srcs),
'nosize': lambda srcs: 0,
}
| sameerparekh/pants | src/python/pants/util/fileutil.py | Python | apache-2.0 | 961 | 0.006243 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs: Any
) -> "_models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> "_models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosProtectionPlan"]:
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosProtectionPlan"]:
"""Update a DDoS protection plan tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the update DDoS protection plan resource tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_ddos_protection_plans_operations.py | Python | mit | 30,431 | 0.005159 |
"""empty message
Revision ID: 74af9cceeeaf
Revises: 6e7b88dc4544
Create Date: 2017-07-30 20:47:07.982489
"""
# revision identifiers, used by Alembic.
revision = '74af9cceeeaf'
down_revision = '6e7b88dc4544'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customer', sa.Column('vat_number', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('customer', 'vat_number')
# ### end Alembic commands ###
| Tethik/faktura | migrations/versions/74af9cceeeaf_.py | Python | mit | 634 | 0.004732 |
__author__ = "Christo Robison"
import numpy as np
from scipy import signal
from scipy import misc
import h5py
from PIL import Image
import os
import collections
import matplotlib.pyplot as plt
import convertBsqMulti as bsq
import png
'''This program reads in BSQ datacubes into an HDF file'''
def loadBSQ(path = '/home/crob/HyperSpec_Data/WBC v ALL/WBC25', debug=False):
d31 = []
d31_norm = []
d25 = []
d25_norm = []
l25 = []
l = []
l3 = []
lam = []
for root, dirs, files in os.walk(path):
print(dirs)
for name in sorted(files): #os walk iterates arbitrarily, sort fixes it
print(name)
if name.endswith(".png"):
# Import label image
im = np.array(Image.open(os.path.join(root,name)),'f')
print np.shape(im)
im = im[:,:,0:3] # > 250
# generate a mask for 3x3 conv layer (probably not needed)
#conv3bw = signal.convolve2d(bw, np.ones([22,22],dtype=np.int), mode='valid') >= 464
print(np.shape(im))
#p = open(name+'_22sqMask.png','wb')
#w = png.Writer(255)
#bw = np.flipud(bw)
im = np.flipud(im)
#l3.append(np.reshape(conv3bw, ))
#l.append(np.reshape(bw, 138659))
l.append(im)
print(np.shape(im))
print("Name = " + name)
if name.endswith(".bsq"):
bs = bsq.readbsq(os.path.join(root,name))
print(np.shape(bs[0]))
print(len(bs[1]))
#separate bsq files by prism
if len(bs[1]) == 31:
print('BSQ is size 31')
print(len(bs[1]))
lam = bs[1]
#d31.append(np.reshape(np.transpose(bs[0], (1, 2, 0)), 4298429))
d31.append(bs[0].astype(np.float32))
d31_norm.append(bs[0].astype(np.float32)/np.amax(bs[0]))
if len(bs[1]) == 25:
print('BSQ is size 25')
print(len(bs[1]))
lam = bs[1]
d25.append(bs[0].astype(np.float32))
d25_norm.append(bs[0].astype(np.float32)/np.amax(bs[0]))
#d25.append(np.reshape(bs[0],[138659,25]).astype(np.float32))
# old don't use #d25.append(np.reshape(np.transpose(bs[0], (1, 2, 0)), 3466475))
out = collections.namedtuple('examples', ['data31', 'data31_norm', 'data25', 'data25_norm', 'labels', 'lambdas'])
o = out(data31=np.dstack(d31),data31_norm=np.dstack(d31_norm), data25=d25, data25_norm=d25_norm, labels=np.dstack(l), lambdas=lam) #np.vstack(d25), labels=np.hstack(l)
return o
def convLabels(labelImg, numBands):
'''
takes a MxNx3 numpy array and creates binary labels based on predefined classes
background = 0
red = 1 WBC
green = 2 RBC
pink = 3 nuclear material
yellow = 4 ignore
'''
#b = np.uint8(numBands / 31)
# print(b / 31)
tempRed = labelImg[:,:,0] == 255
tempGreen = labelImg[:,:,1] == 255
tempBlue = labelImg[:,:,2] == 255
tempYellow = np.logical_and(tempRed, tempGreen)
tempPink = np.logical_and(tempRed, tempBlue)
temp = np.zeros(np.shape(tempRed))
temp[tempRed] = 1
temp[tempGreen] = 2
temp[tempPink] = 3
temp[tempYellow] = 4
print(temp)
print(tempRed, tempGreen, tempBlue, tempYellow, tempPink)
return temp
def convert_labels(labels,n_classes, debug = False):
for j in range(n_classes):
temp = labels == j
temp = temp.astype(int)
if j > 0:
conv_labels = np.append(conv_labels, temp)
print(temp[:])
else:
conv_labels = temp
print(np.shape(conv_labels))
conv_labels = np.reshape(conv_labels, [len(labels), n_classes], order='F')
if debug: print(np.shape(conv_labels))
if debug:
f = h5py.File("/home/crob/HyperSpec/Python/BSQ_whole.h5", "w")
f.create_dataset('bin_labels', data=conv_labels)
f.close()
return conv_labels
def getClassMean(data, classNum):
kee = np.equal(data['label'],classNum)
out = np.mean(data['data']*kee,axis=0)
return out
def getAverages(data, numClasses):
out = []
for i in range(numClasses):
a = getClassMean(data, i)
out.append(a)
return out
if __name__ == '__main__':
#A = loadBSQ()
path = '/home/crob/-_PreSortedData_Train_-' #oldpath=/HyperSpec_Data/WBC v ALL/WBC25
s = loadBSQ(path)
print(np.shape(s.data25))
f = h5py.File("HYPER_SPEC_TRAIN_RED.h5", "w")
f.create_dataset('data', data=s.data31, chunks=(443, 313, 1))
f.create_dataset('norm_data', data=s.data31_norm, chunks=(443,313,1))
f.create_dataset('labels', data=s.labels)
f.create_dataset('bands', data=s.lambdas)
g = np.shape(s.data31)
b = np.uint16(g[2] / 31) #issue with overflow if more than 256 samples. derp.
lab = np.reshape(s.labels, [443, 313, 3, b], 'f')
numExamples = np.shape(lab)
a = []
for j in range(np.uint16(numExamples[3])):
a.append(convLabels(lab[:, :, :, j], None))
f.create_dataset('classLabels', data=np.dstack(a))
#p = convert_labels(s.labels,2)
#f.create_dataset('bin_labels', data=p)
f.close() | Crobisaur/HyperSpec | Python/loadData2Hdf.py | Python | gpl-3.0 | 5,400 | 0.010741 |
import pygame
import sys
WINDOW_TITLE = "Game Of Life"
# Define some colors
BLACK = ( 0, 0, 0)
PURPLE = ( 22, 20, 48)
WHITE = (255, 255, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
BLUE = ( 67, 66, 88)
# This sets the width and height of each grid location
width = 20
height = 20
# This sets the margin between each cell
margin = 5
NR_ROWS = 20
NR_COLS = 20
SCREEN_WIDTH = 255
SCREEN_HEIGHT = 255
def add_padding(nr_rows, nr_cols, grid):
new_grid=create_grid(nr_rows+2, nr_cols+2)
for row in range(nr_rows):
for column in range(nr_cols):
new_grid[row][column]=grid[row][column]
return new_grid
def get_number_neighbours_cell(nr_rows, nr_cols, grid, row, column):
nr_neighbours = 0
if (grid[row][column-1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row][column+1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row-1][column-1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row-1][column+1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row+1][column-1] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row+1][column+1] != 0):
nr_neighbours = nr_neighbours + 1
if(grid[row-1][column] != 0):
nr_neighbours = nr_neighbours + 1
if (grid[row+1][column] != 0):
nr_neighbours = nr_neighbours + 1
return nr_neighbours
def next_generation_value(nr_rows, nr_cols, grid, row, column):
nr_neighbours = get_number_neighbours_cell(nr_rows, nr_cols, grid, row, column)
if (nr_neighbours < 2):
return 0
if (grid[row][column] == 1 and (nr_neighbours == 2 or nr_neighbours == 3)):
return 1
if (grid[row][column] == 0 and (nr_neighbours == 3)):
return 1
if (nr_neighbours > 3):
return 0
return 0
def next_generation(nr_rows, nr_cols, grid):
next_grid = create_grid(nr_rows, nr_cols)
for row in range(nr_rows):
for column in range(nr_cols):
value = next_generation_value(nr_rows, nr_cols, grid, row, column)
next_grid[row][column] = value
return next_grid
def reset(nr_rows, nr_cols, grid):
for row in range(nr_rows):
for column in range(nr_cols):
grid[row][column] = 0
return grid
def select_cell():
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // (width + margin)
row = pos[1] // (height + margin)
# Set that location to zero
grid[row][column] = 1
print("Click ", pos, "Grid coordinates: ", row, column)
return grid
def random_configuration():
pass
def process_events(nr_rows, nr_cols, grid, done):
next_grid = None
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
elif event.type == pygame.MOUSEBUTTONDOWN:
grid = select_cell()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
print("Reset")
grid = reset(nr_rows, nr_cols, grid)
elif event.key == pygame.K_n:
print "Next generation"
grid = add_padding(nr_rows, nr_cols, grid)
next_grid = next_generation(nr_rows, nr_cols, grid)
elif event.key == pygame.K_c:
print "Random configuration"
random_configuration()
elif event.key == pygame.K_ESCAPE:
print "Exit"
sys.exit(0)
return (grid, next_grid, done)
def draw_grid(nr_rows, nr_cols, grid, screen, width, height, margin):
# Draw the grid
for row in range(nr_rows):
for column in range(nr_cols):
color = BLACK
if grid[row][column] == 1:
color = BLUE
pygame.draw.rect(screen,
color,
[(margin+width)*column+margin,
(margin+height)*row+margin,
width,
height])
def create_grid(nr_rows, nr_cols):
# Create a 2 dimensional array. A two dimensional
# array is simply a list of lists.
grid = []
for row in range(nr_rows):
# Add an empty array that will hold each cell in this row
grid.append([])
for col in range(nr_cols):
grid[row].append(0) # Append a cell
return grid
if __name__ == '__main__':
grid = create_grid(NR_ROWS, NR_COLS)
# Initialize pygame
pygame.init()
# Set the height and width of the screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
# Set title of screen
pygame.display.set_caption(WINDOW_TITLE)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while done == False:
(grid, next_grid, done) = process_events(NR_ROWS, NR_COLS, grid, done)
# Set the screen background
screen.fill(PURPLE)
if next_grid is not None:
grid = next_grid
draw_grid(NR_ROWS, NR_COLS, grid, screen, width, height, margin)
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang' on exit.
pygame.quit() | adelinastanciu/game-of-life | game.py | Python | gpl-2.0 | 5,982 | 0.010532 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: Protocol. Possible values are: 'Http' and 'Https'.
Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2016_12_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayProbe, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.host = kwargs.get('host', None)
self.path = kwargs.get('path', None)
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.unhealthy_threshold = kwargs.get('unhealthy_threshold', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/application_gateway_probe.py | Python | mit | 3,438 | 0.000582 |
#!/usr/bin/env python
"""DB for Plant Detection.
For Plant Detection.
"""
import os
import json
import base64
import requests
import numpy as np
from plant_detection import CeleryPy
from plant_detection import ENV
class DB(object):
"""Known and detected plant data for Plant Detection."""
def __init__(self):
"""Set initial attributes."""
self.plants = {'known': [], 'save': [],
'remove': [], 'safe_remove': []}
self.object_count = None
self.pixel_locations = []
self.coordinate_locations = []
self.calibration_pixel_locations = []
self.dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
self.plants_file = "plant-detection_plants.json"
self.tmp_dir = None
self.weeder_destrut_r = 50
self.test_coordinates = [600, 400, 0]
self.coordinates = None
self.app = False
self.errors = {}
@staticmethod
def _api_info(api):
"""API requests setup."""
api_info = {}
if api == 'app':
try:
api_info['token'] = os.environ['API_TOKEN']
except KeyError:
api_info['token'] = 'x.{}.x'.format(
'eyJpc3MiOiAiLy9zdGFnaW5nLmZhcm1ib3QuaW86NDQzIn0')
try:
encoded_payload = api_info['token'].split('.')[1]
encoded_payload += '=' * (4 - len(encoded_payload) % 4)
json_payload = base64.b64decode(
encoded_payload).decode('utf-8')
server = json.loads(json_payload)['iss']
except: # noqa pylint:disable=W0702
server = '//my.farmbot.io:443'
api_info['url'] = 'http{}:{}/api/'.format(
's' if ':443' in server else '', server)
elif api == 'farmware':
try:
api_info['token'] = os.environ['FARMWARE_TOKEN']
except KeyError:
api_info['token'] = 'NA'
try:
os.environ['FARMWARE_URL']
except KeyError:
api_info['url'] = 'NA'
else:
api_info['url'] = CeleryPy.farmware_api_url()
api_info['headers'] = {
'Authorization': 'Bearer {}'.format(api_info['token']),
'content-type': "application/json"}
return api_info
def api_get(self, endpoint):
"""GET from an API endpoint."""
api = self._api_info('app')
response = requests.get(api['url'] + endpoint, headers=api['headers'])
self.api_response_error_collector(response)
self.api_response_error_printer()
return response
def api_response_error_collector(self, response):
"""Catch and log errors from API requests."""
self.errors = {} # reset
if response.status_code != 200:
try:
self.errors[str(response.status_code)] += 1
except KeyError:
self.errors[str(response.status_code)] = 1
def api_response_error_printer(self):
"""Print API response error output."""
error_string = ''
for key, value in self.errors.items():
error_string += '{} {} errors '.format(value, key)
print(error_string)
def _download_image_from_url(self, img_filename, url):
response = requests.get(url, stream=True)
self.api_response_error_collector(response)
self.api_response_error_printer()
if response.status_code == 200:
with open(img_filename, 'wb') as img_file:
for chunk in response:
img_file.write(chunk)
def _get_bot_state(self):
api = self._api_info('farmware')
response = requests.get(api['url'] + 'bot/state',
headers=api['headers'])
self.api_response_error_collector(response)
self.api_response_error_printer()
if response.status_code == 200:
return response.json()
def get_image(self, image_id):
"""Download an image from the FarmBot Web App API."""
response = self.api_get('images/' + str(image_id))
if response.status_code == 200:
image_json = response.json()
image_url = image_json['attachment_url']
try:
testfilename = self.dir + 'test_write.try_to_write'
testfile = open(testfilename, "w")
testfile.close()
os.remove(testfilename)
except IOError:
directory = '/tmp/'
else:
directory = self.dir
image_filename = directory + str(image_id) + '.jpg'
self._download_image_from_url(image_filename, image_url)
self.coordinates = list([int(image_json['meta']['x']),
int(image_json['meta']['y']),
int(image_json['meta']['z'])])
return image_filename
else:
return None
def _get_raw_coordinate_values(self, redis=None):
temp = []
legacy = int(os.getenv('FARMBOT_OS_VERSION', '0.0.0')[0]) < 6
if legacy:
for axis in ['x', 'y', 'z']:
temp.append(ENV.redis_load('location_data.position.' + axis,
other_redis=redis))
else:
state = self._get_bot_state()
for axis in ['x', 'y', 'z']:
try:
value = state['location_data']['position'][str(axis)]
except KeyError:
value = None
temp.append(value)
return temp
def getcoordinates(self, test_coordinates=False, redis=None):
"""Get machine coordinates from bot."""
location = None
raw_values = self._get_raw_coordinate_values(redis)
if all(axis_value is not None for axis_value in raw_values):
try:
location = [int(coordinate) for coordinate in raw_values]
except ValueError:
pass
if test_coordinates:
self.coordinates = self.test_coordinates # testing coordinates
elif location is None and not self.app:
self.coordinates = self.test_coordinates # testing coordinates
else:
self.coordinates = location # current bot coordinates
def save_plants(self):
"""Save plant detection plants to file.
'known', 'remove', 'safe_remove', and 'save'
"""
if self.tmp_dir is None:
json_dir = self.dir
else:
json_dir = self.tmp_dir
try:
with open(json_dir + self.plants_file, 'w') as plant_file:
json.dump(self.plants, plant_file)
except IOError:
self.tmp_dir = "/tmp/"
self.save_plants()
def load_plants_from_file(self):
"""Load plants from file."""
try:
with open(self.dir + self.plants_file, 'r') as plant_file:
self.plants = json.load(plant_file)
except IOError:
pass
def load_plants_from_web_app(self):
"""Download known plants from the FarmBot Web App API."""
response = self.api_get('points')
app_points = response.json()
if response.status_code == 200:
plants = []
for point in app_points:
if point['pointer_type'] == 'Plant':
plants.append({
'x': point['x'],
'y': point['y'],
'radius': point['radius']})
self.plants['known'] = plants
def identify_plant(self, plant_x, plant_y, known):
"""Identify a provided plant based on its location.
Args:
known: [x, y, r] array of known plants
plant_x, plant_y: x and y coordinates of plant to identify
Coordinate is:
within a known plant area: a plant to 'save' (it's the known plant)
within a known plant safe zone: a 'safe_remove' weed
outside a known plant area or safe zone: a 'remove' weed
"""
cxs, cys, crs = known[:, 0], known[:, 1], known[:, 2]
if all((plant_x - cx)**2 + (plant_y - cy)**2
> (cr + self.weeder_destrut_r)**2
for cx, cy, cr in zip(cxs, cys, crs)):
# Plant is outside of known plant safe zone
return 'remove'
elif all((plant_x - cx)**2 + (plant_y - cy)**2 > cr**2
for cx, cy, cr in zip(cxs, cys, crs)):
# Plant is inside known plant safe zone
return 'safe_remove'
else: # Plant is within known plant area
return 'save'
def identify(self, second_pass=False):
"""Compare detected plants to known to separate plants from weeds."""
def _round(number, places):
"""Round number to given number of decimal places."""
factor = 10 ** places
return int(number * factor) / float(factor)
if not second_pass:
self.plants['remove'] = []
self.plants['save'] = []
self.plants['safe_remove'] = []
if self.plants['known'] is None or self.plants['known'] == []:
self.plants['known'] = [{'x': 0, 'y': 0, 'radius': 0}]
kplants = np.array(
[[_['x'], _['y'], _['radius']] for _ in self.plants['known']])
for plant_coord in self.coordinate_locations:
plant_x = _round(plant_coord[0], 2)
plant_y = _round(plant_coord[1], 2)
plant_r = _round(plant_coord[2], 2)
plant_is = self.identify_plant(plant_x, plant_y, kplants)
if plant_is == 'remove':
self.plants['remove'].append(
{'x': plant_x, 'y': plant_y, 'radius': plant_r})
elif plant_is == 'safe_remove' and not second_pass:
self.plants['safe_remove'].append(
{'x': plant_x, 'y': plant_y, 'radius': plant_r})
else:
if not second_pass:
self.plants['save'].append(
{'x': plant_x, 'y': plant_y, 'radius': plant_r})
if self.plants['known'] == [{'x': 0, 'y': 0, 'radius': 0}]:
self.plants['known'] = []
def print_count(self, calibration=False):
"""Output text indicating the number of plants/objects detected."""
if calibration:
object_name = 'calibration objects'
else:
object_name = 'plants'
print("{} {} detected in image.".format(self.object_count,
object_name))
def print_identified(self):
"""Output text including data about identified detected plants."""
def _identified_plant_text_output(title, action, plants):
print("\n{} {}.".format(
len(self.plants[plants]), title))
if len(self.plants[plants]) > 0:
print("Plants at the following machine coordinates "
"( X Y ) with R = radius {}:".format(action))
for plant in self.plants[plants]:
print(" ( {x:5.0f} {y:5.0f} ) R = {r:.0f}".format(
x=plant['x'],
y=plant['y'],
r=plant['radius']))
# Print known
_identified_plant_text_output(
title='known plants inputted',
action='are to be saved',
plants='known')
# Print removal candidates
_identified_plant_text_output(
title='plants marked for removal',
action='are to be removed',
plants='remove')
# Print safe_remove plants
_identified_plant_text_output(
title='plants marked for safe removal',
action='were too close to the known plant to remove completely',
plants='safe_remove')
# Print saved
_identified_plant_text_output(
title='detected plants are known or have escaped removal',
action='have been saved',
plants='save')
def get_json_coordinates(self):
"""Return coordinate dictionaries."""
coordinate_list = []
for coordinate in self.coordinate_locations:
coordinate_list.append({"x": coordinate[0],
"y": coordinate[1],
"radius": coordinate[2]})
return coordinate_list
def print_coordinates(self):
"""Output coordinate data for detected (but not identified) plants."""
if len(self.coordinate_locations) > 0:
print("Detected object machine coordinates "
"( X Y ) with R = radius:")
for coordinate_location in self.coordinate_locations:
print(" ( {:5.0f} {:5.0f} ) R = {:.0f}".format(
coordinate_location[0],
coordinate_location[1],
coordinate_location[2]))
def print_pixel(self):
"""Output text pixel data for detected (but not identified) plants."""
if len(self.pixel_locations) > 0:
print("Detected object center pixel locations ( X Y ):")
for pixel_location in self.pixel_locations:
print(" ( {:5.0f}px {:5.0f}px )".format(pixel_location[0],
pixel_location[1]))
def output_celery_script(self):
"""Output JSON with identified plant coordinates and radii."""
unsent_cs = []
# Encode to CS
for mark in self.plants['remove']:
plant_x, plant_y = round(mark['x'], 2), round(mark['y'], 2)
plant_r = round(mark['radius'], 2)
unsent = CeleryPy.add_point(plant_x, plant_y, 0, plant_r)
unsent_cs.append(unsent)
return unsent_cs
@staticmethod
def prepare_point_data(point, name):
"""Prepare point payload for uploading to the FarmBot Web App."""
# color
if name == 'Weed':
color = 'red'
elif name == 'Detected Plant':
color = 'blue'
elif name == 'Known Plant':
color = 'green'
elif name == 'Safe-Remove Weed':
color = 'cyan'
else:
color = 'grey'
# payload
plant_x, plant_y = round(point['x'], 2), round(point['y'], 2)
plant_r = round(point['radius'], 2)
point_data = {'x': str(plant_x), 'y': str(plant_y), 'z': 0,
'radius': str(plant_r),
'meta': {'created_by': 'plant-detection',
'color': color},
'name': name, 'pointer_type': 'GenericPointer'}
return point_data
def upload_point(self, point, name, id_list):
"""Upload a point to the FarmBot Web App."""
payload = json.dumps(self.prepare_point_data(point, name))
# API Request
api = self._api_info('app')
response = requests.post(api['url'] + 'points',
data=payload, headers=api['headers'])
point_id = None
if response.status_code == 200:
point_id = response.json()['id']
id_list.append(point_id)
self.api_response_error_collector(response)
return id_list
def upload_plants(self):
"""Add plants to FarmBot Web App Farm Designer."""
point_ids = []
for plant in self.plants['remove']:
point_ids = self.upload_point(plant, 'Weed', point_ids)
for plant in self.plants['save']:
point_ids = self.upload_point(plant, 'Detected Plant', point_ids)
# for plant in self.plants['known']:
# point_ids = self.upload_point(plant, 'Known Plant', point_ids)
for plant in self.plants['safe_remove']:
point_ids = self.upload_point(plant, 'Safe-Remove Weed', point_ids)
self.api_response_error_printer()
if point_ids:
# Points have been added to the web app
# Indicate that a sync is required for the points
CeleryPy.data_update('points', point_ids)
| FBTUG/DevZone | ai/demoCamera/plant_detection/DB.py | Python | mit | 16,293 | 0 |
from turtle import *
mode('logo')
shape('turtle')
speed(5)
color('red', 'blue')
#draws rectangle
for i in range (4):
fd(200)
rt(90)
#draws roof
penup()
goto(0,200)
pendown()
rt(60)
goto(100,300)
rt(60)
goto(200,200)
penup()
#resizes turtle and changes his fill colour
shapesize(5,5)
color('red', 'orange')
goto(100,100)
rt(240)
width(200)
| cmulliss/turtles-doing-things | stars_etc/turtleHouse.py | Python | cc0-1.0 | 351 | 0.025641 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import copy
import gc
import operator
import struct
from google.apputils import basetest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class _MiniDecoder(object):
"""Decodes a stream of values from a string.
Once upon a time we actually had a class called decoder.Decoder. Then we
got rid of it during a redesign that made decoding much, much faster overall.
But a couple tests in this file used it to check that the serialized form of
a message was correct. So, this class implements just the methods that were
used by said tests, so that we don't have to rewrite the tests.
"""
def __init__(self, bytes):
self._bytes = bytes
self._pos = 0
def ReadVarint(self):
result, self._pos = decoder._DecodeVarint(self._bytes, self._pos)
return result
ReadInt32 = ReadVarint
ReadInt64 = ReadVarint
ReadUInt32 = ReadVarint
ReadUInt64 = ReadVarint
def ReadSInt64(self):
return wire_format.ZigZagDecode(self.ReadVarint())
ReadSInt32 = ReadSInt64
def ReadFieldNumberAndWireType(self):
return wire_format.UnpackTag(self.ReadVarint())
def ReadFloat(self):
result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0]
self._pos += 4
return result
def ReadDouble(self):
result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0]
self._pos += 8
return result
def EndOfStream(self):
return self._pos == len(self._bytes)
class ReflectionTest(basetest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self):
# Constructor with only scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string')
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
def testRepeatedScalarConstructor(self):
# Constructor with only repeated scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"])
self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(["optional_string"], list(proto.repeated_string))
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEquals(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def testMixedConstructor(self):
# Constructor with only mixed types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)])
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
def testConstructorTypeError(self):
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_int32="foo")
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=["foo"])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=[1234])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=[1234])
def testConstructorInvalidatesCachedByteSize(self):
message = unittest_pb2.TestAllTypes(optional_int32 = 12)
self.assertEquals(2, message.ByteSize())
message = unittest_pb2.TestAllTypes(
optional_nested_message = unittest_pb2.TestAllTypes.NestedMessage())
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(repeated_int32 = [12])
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(
repeated_nested_message = [unittest_pb2.TestAllTypes.NestedMessage()])
self.assertEquals(3, message.ByteSize())
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertTrue(not proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertTrue(not proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertTrue(not composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertTrue(not proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
self.assertTrue(old_composite_field is not composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testReferencesToNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testDisconnectingNestedMessageBeforeSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testGetDefaultMessageAfterDisconnectingDefaultMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message')
del proto
del nested
# Force a garbage collect so that the underlying CMessages are freed along
# with the Messages they point to. This is to make sure we're not deleting
# default message instances.
gc.collect()
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
def testDisconnectingNestedMessageAfterSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
self.assertTrue(proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertEqual(5, nested.bb)
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testDisconnectingNestedMessageBeforeGettingField(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
def testDisconnectingNestedMessageAfterMerge(self):
# This test exercises the code path that does not use ReleaseMessage().
# The underlying fear is that if we use ReleaseMessage() incorrectly,
# we will have memory leaks. It's hard to check that that doesn't happen,
# but at least we can exercise that code path to make sure it works.
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_nested_message.bb = 5
proto1.MergeFrom(proto2)
self.assertTrue(proto1.HasField('optional_nested_message'))
proto1.ClearField('optional_nested_message')
self.assertTrue(not proto1.HasField('optional_nested_message'))
def testDisconnectingLazyNestedMessage(self):
# This test exercises releasing a nested message that is lazy. This test
# only exercises real code in the C++ implementation as Python does not
# support lazy parsing, but the current C++ implementation results in
# memory corruption and a crash.
if api_implementation.Type() != 'python':
return
proto = unittest_pb2.TestAllTypes()
proto.optional_lazy_message.bb = 5
proto.ClearField('optional_lazy_message')
del proto
gc.collect()
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertTrue(not recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(not recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
# Access sub-message but don't set it yet.
nested_message = proto.optional_nested_message
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
proto.optional_nested_message.bb = 123
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'),
(proto.DESCRIPTOR.fields_by_name['optional_nested_message' ],
nested_message) ],
proto.ListFields())
def testRepeatedListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
proto.repeated_string.extend(str(x) for x in xrange(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz', '0', '1']) ],
proto.ListFields())
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual(b'', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual(b'world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
def testClearRemovesChildren(self):
# Make sure there aren't any implementation bugs that are only partially
# clearing the message (which can happen in the more complex C++
# implementation which has parallel message lists).
proto = unittest_pb2.TestRequiredForeign()
for i in range(10):
proto.repeated_message.add()
proto2 = unittest_pb2.TestRequiredForeign()
proto.CopyFrom(proto2)
self.assertRaises(IndexError, lambda: proto.repeated_message[5])
def testDisallowedAssignments(self):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = unittest_pb2.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
def testSingleScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
def testIntegerTypes(self):
def TestGetAndDeserialize(field_name, value, expected_type):
proto = unittest_pb2.TestAllTypes()
setattr(proto, field_name, value)
self.assertTrue(isinstance(getattr(proto, field_name), expected_type))
proto2 = unittest_pb2.TestAllTypes()
proto2.ParseFromString(proto.SerializeToString())
self.assertTrue(isinstance(getattr(proto2, field_name), expected_type))
TestGetAndDeserialize('optional_int32', 1, int)
TestGetAndDeserialize('optional_int32', 1 << 30, int)
TestGetAndDeserialize('optional_uint32', 1 << 30, int)
if struct.calcsize('L') == 4:
# Python only has signed ints, so 32-bit python can't fit an uint32
# in an int.
TestGetAndDeserialize('optional_uint32', 1 << 31, long)
else:
# 64-bit python can fit uint32 inside an int
TestGetAndDeserialize('optional_uint32', 1 << 31, int)
TestGetAndDeserialize('optional_int64', 1 << 30, long)
TestGetAndDeserialize('optional_int64', 1 << 60, long)
TestGetAndDeserialize('optional_uint64', 1 << 30, long)
TestGetAndDeserialize('optional_uint64', 1 << 60, long)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = unittest_pb2.TestAllTypes()
setattr(pb, field_name, expected_min)
self.assertEqual(expected_min, getattr(pb, field_name))
setattr(pb, field_name, expected_max)
self.assertEqual(expected_max, getattr(pb, field_name))
self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1)
self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
pb = unittest_pb2.TestAllTypes()
pb.optional_nested_enum = 1
self.assertEqual(1, pb.optional_nested_enum)
def testRepeatedScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
# Repeated enums tests.
#proto.repeated_nested_enum.append(0)
def testSingleScalarGettersAndSetters(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
proto.optional_uint64 = 0xffffffffffff
self.assertEqual(0xffffffffffff, proto.optional_uint64)
proto.optional_uint64 = 0xffffffffffffffff
self.assertEqual(0xffffffffffffffff, proto.optional_uint64)
# TODO(robinson): Test all other scalar field types.
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertTrue(not proto.HasField('optional_int32'))
# TODO(robinson): Test all other scalar field types.
def testEnums(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testEnum_Name(self):
self.assertEqual('FOREIGN_FOO',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_FOO))
self.assertEqual('FOREIGN_BAR',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAR))
self.assertEqual('FOREIGN_BAZ',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAZ))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Name, 11312)
proto = unittest_pb2.TestAllTypes()
self.assertEqual('FOO',
proto.NestedEnum.Name(proto.FOO))
self.assertEqual('FOO',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.FOO))
self.assertEqual('BAR',
proto.NestedEnum.Name(proto.BAR))
self.assertEqual('BAR',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAR))
self.assertEqual('BAZ',
proto.NestedEnum.Name(proto.BAZ))
self.assertEqual('BAZ',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAZ))
self.assertRaises(ValueError,
proto.NestedEnum.Name, 11312)
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Name, 11312)
def testEnum_Value(self):
self.assertEqual(unittest_pb2.FOREIGN_FOO,
unittest_pb2.ForeignEnum.Value('FOREIGN_FOO'))
self.assertEqual(unittest_pb2.FOREIGN_BAR,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAR'))
self.assertEqual(unittest_pb2.FOREIGN_BAZ,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAZ'))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Value, 'FO')
proto = unittest_pb2.TestAllTypes()
self.assertEqual(proto.FOO,
proto.NestedEnum.Value('FOO'))
self.assertEqual(proto.FOO,
unittest_pb2.TestAllTypes.NestedEnum.Value('FOO'))
self.assertEqual(proto.BAR,
proto.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAR,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAZ,
proto.NestedEnum.Value('BAZ'))
self.assertEqual(proto.BAZ,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAZ'))
self.assertRaises(ValueError,
proto.NestedEnum.Value, 'Foo')
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Value, 'Foo')
def testEnum_KeysAndValues(self):
self.assertEqual(['FOREIGN_FOO', 'FOREIGN_BAR', 'FOREIGN_BAZ'],
unittest_pb2.ForeignEnum.keys())
self.assertEqual([4, 5, 6],
unittest_pb2.ForeignEnum.values())
self.assertEqual([('FOREIGN_FOO', 4), ('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6)],
unittest_pb2.ForeignEnum.items())
proto = unittest_pb2.TestAllTypes()
self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], proto.NestedEnum.keys())
self.assertEqual([1, 2, 3, -1], proto.NestedEnum.values())
self.assertEqual([('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)],
proto.NestedEnum.items())
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(15)
self.assertTrue(proto.repeated_int32)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual([5, 10, 15], proto.repeated_int32)
# Test single retrieval.
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(15, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test single assignment.
proto.repeated_int32[1] = 20
self.assertEqual([5, 20, 15], proto.repeated_int32)
# Test insertion.
proto.repeated_int32.insert(1, 25)
self.assertEqual([5, 25, 20, 15], proto.repeated_int32)
# Test slice retrieval.
proto.repeated_int32.append(30)
self.assertEqual([25, 20, 15], proto.repeated_int32[1:4])
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
proto.repeated_int32[1:4] = (i for i in xrange(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
proto.repeated_int32[1:4] = [35, 40, 45]
self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 35, 40, 45, 30], result)
# Test single deletion.
del proto.repeated_int32[2]
self.assertEqual([5, 35, 45, 30], proto.repeated_int32)
# Test slice deletion.
del proto.repeated_int32[2:]
self.assertEqual([5, 35], proto.repeated_int32)
# Test extending.
proto.repeated_int32.extend([3, 13])
self.assertEqual([5, 35, 3, 13], proto.repeated_int32)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
self.assertEqual(1, proto.repeated_int32[-1])
# Test assignment to a negative index.
proto.repeated_int32[-1] = 2
self.assertEqual(2, proto.repeated_int32[-1])
# Test deletion at negative indices.
proto.repeated_int32[:] = [0, 1, 2, 3]
del proto.repeated_int32[-1]
self.assertEqual([0, 1, 2], proto.repeated_int32)
del proto.repeated_int32[-2]
self.assertEqual([0, 2], proto.repeated_int32)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300)
del proto.repeated_int32[-2:-1]
self.assertEqual([2], proto.repeated_int32)
del proto.repeated_int32[100:10000]
self.assertEqual([2], proto.repeated_int32)
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage))
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test slice retrieval.
m2 = proto.repeated_nested_message.add()
m3 = proto.repeated_nested_message.add()
m4 = proto.repeated_nested_message.add()
self.assertListsEqual(
[m1, m2, m3], proto.repeated_nested_message[1:4])
self.assertListsEqual(
[m0, m1, m2, m3, m4], proto.repeated_nested_message[:])
self.assertListsEqual(
[m0, m1], proto.repeated_nested_message[:2])
self.assertListsEqual(
[m2, m3, m4], proto.repeated_nested_message[2:])
self.assertEqual(
m0, proto.repeated_nested_message[0])
self.assertListsEqual(
[m0], proto.repeated_nested_message[:1])
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertListsEqual([m0, m1, m2, m3, m4], result)
# Test single deletion.
del proto.repeated_nested_message[2]
self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message)
# Test slice deletion.
del proto.repeated_nested_message[2:]
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
# Test extending.
n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1)
n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2)
proto.repeated_nested_message.extend([n1,n2])
self.assertEqual(4, len(proto.repeated_nested_message))
self.assertEqual(n1, proto.repeated_nested_message[2])
self.assertEqual(n2, proto.repeated_nested_message[3])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
# Test constructing an element while adding it.
proto.repeated_nested_message.add(bb=23)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(23, proto.repeated_nested_message[0].bb)
def testRepeatedCompositeRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
# Need to set some differentiating variable so m0 != m1 != m2:
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
self.assertTrue(m0 != m1)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertEqual(3, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m0)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
self.assertEqual(m2, proto.repeated_nested_message[1])
# Removing m0 again or removing None should raise error
self.assertRaises(ValueError, proto.repeated_nested_message.remove, m0)
self.assertRaises(ValueError, proto.repeated_nested_message.remove, None)
self.assertEqual(2, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m2)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions())
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
class MyProtoClass(message.Message):
DESCRIPTOR = mydescriptor
__metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
def testDescriptorProtoSupport(self):
# Hand written descriptors/reflection are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
def AddDescriptorField(proto, field_name, field_type):
AddDescriptorField.field_index += 1
new_field = proto.field.add()
new_field.name = field_name
new_field.type = field_type
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
AddDescriptorField.field_index = 0
desc_proto = descriptor_pb2.DescriptorProto()
desc_proto.name = 'Car'
fdp = descriptor_pb2.FieldDescriptorProto
AddDescriptorField(desc_proto, 'name', fdp.TYPE_STRING)
AddDescriptorField(desc_proto, 'year', fdp.TYPE_INT64)
AddDescriptorField(desc_proto, 'automatic', fdp.TYPE_BOOL)
AddDescriptorField(desc_proto, 'price', fdp.TYPE_DOUBLE)
# Add a repeated field
AddDescriptorField.field_index += 1
new_field = desc_proto.field.add()
new_field.name = 'owners'
new_field.type = fdp.TYPE_STRING
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
desc = descriptor.MakeDescriptor(desc_proto)
self.assertTrue(desc.fields_by_name.has_key('name'))
self.assertTrue(desc.fields_by_name.has_key('year'))
self.assertTrue(desc.fields_by_name.has_key('automatic'))
self.assertTrue(desc.fields_by_name.has_key('price'))
self.assertTrue(desc.fields_by_name.has_key('owners'))
class CarMessage(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = desc
prius = CarMessage()
prius.name = 'prius'
prius.year = 2010
prius.automatic = True
prius.price = 25134.75
prius.owners.extend(['bob', 'susan'])
serialized_prius = prius.SerializeToString()
new_prius = reflection.ParseMessage(desc, serialized_prius)
self.assertTrue(new_prius is not prius)
self.assertEqual(prius, new_prius)
# these are unnecessary assuming message equality works as advertised but
# explicitly check to be safe since we're mucking about in metaclass foo
self.assertEqual(prius.name, new_prius.name)
self.assertEqual(prius.year, new_prius.year)
self.assertEqual(prius.automatic, new_prius.automatic)
self.assertEqual(prius.price, new_prius.price)
self.assertEqual(prius.owners, new_prius.owners)
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(string_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertTrue(foreign_message is not extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertTrue(not extendee_proto.HasExtension(extension))
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(group_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertTrue(not extendee_proto.HasExtension(extension))
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertTrue(not extendee_proto.HasExtension(extension))
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
extendee_proto.ClearExtension(extension)
self.assertTrue(required is not extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testRegisteredExtensions(self):
self.assertTrue('protobuf_unittest.optional_int32_extension' in
unittest_pb2.TestAllExtensions._extensions_by_name)
self.assertTrue(1 in unittest_pb2.TestAllExtensions._extensions_by_number)
# Make sure extensions haven't been registered into types that shouldn't
# have any.
self.assertEquals(0, len(unittest_pb2.TestAllTypes._extensions_by_name))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertTrue(not toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertEqual(foreign, toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertTrue(extension_proto is not extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
unknown_handle = more_extensions_pb2.optional_int_extension
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testStaticParseFrom(self):
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
string1 = proto1.SerializeToString()
proto2 = unittest_pb2.TestAllTypes.FromString(string1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
def testMergeFromSingularField(self):
# Test merge with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = unittest_pb2.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self):
# Test merge with just a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromRepeatedNestedMessage(self):
# Test merge with a repeated nested message.
proto1 = unittest_pb2.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = unittest_pb2.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
proto3 = unittest_pb2.TestAllTypes()
proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message)
self.assertEqual(999, proto3.repeated_nested_message[0].bb)
self.assertEqual(123, proto3.repeated_nested_message[1].bb)
self.assertEqual(321, proto3.repeated_nested_message[2].bb)
def testMergeFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testMergeFromBug(self):
message1 = unittest_pb2.TestAllTypes()
message2 = unittest_pb2.TestAllTypes()
# Cause optional_nested_message to be instantiated within message1, even
# though it is not considered to be "present".
message1.optional_nested_message
self.assertFalse(message1.HasField('optional_nested_message'))
# Merge into message2. This should not instantiate the field is message2.
message2.MergeFrom(message1)
self.assertFalse(message2.HasField('optional_nested_message'))
def testCopyFromSingularField(self):
# Test copy with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self):
# Test copy with a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testCopyFromBadType(self):
# The python implementation doesn't raise an exception in this
# case. In theory it should.
if api_implementation.Type() == 'python':
return
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllExtensions()
self.assertRaises(TypeError, proto1.CopyFrom, proto2)
def testDeepCopy(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = copy.deepcopy(proto1)
self.assertEqual(1, proto2.optional_int32)
proto1.repeated_int32.append(2)
proto1.repeated_int32.append(3)
container = copy.deepcopy(proto1.repeated_int32)
self.assertEqual([2, 3], container)
# TODO(anuraag): Implement deepcopy for repeated composite / extension dict
def testClear(self):
proto = unittest_pb2.TestAllTypes()
# C++ implementation does not support lazy fields right now so leave it
# out for now.
if api_implementation.Type() == 'python':
test_util.SetAllFields(proto)
else:
test_util.SetAllNonLazyFields(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEquals(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEquals(proto, empty_proto)
def testDisconnectingBeforeClear(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
foreign = proto.optional_foreign_message
foreign.c = 6
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
self.assertTrue(foreign is not proto.optional_foreign_message)
self.assertEqual(5, nested.bb)
self.assertEqual(6, foreign.c)
nested.bb = 15
foreign.c = 16
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertFalse(proto.HasField('optional_foreign_message'))
self.assertEqual(0, proto.optional_foreign_message.c)
def testOneOf(self):
proto = unittest_pb2.TestAllTypes()
proto.oneof_uint32 = 10
proto.oneof_nested_message.bb = 11
self.assertEqual(11, proto.oneof_nested_message.bb)
self.assertFalse(proto.HasField('oneof_uint32'))
nested = proto.oneof_nested_message
proto.oneof_string = 'abc'
self.assertEqual('abc', proto.oneof_string)
self.assertEqual(11, nested.bb)
self.assertFalse(proto.HasField('oneof_nested_message'))
def assertInitialized(self, proto):
self.assertTrue(proto.IsInitialized())
# Neither method should raise an exception.
proto.SerializeToString()
proto.SerializePartialToString()
def assertNotInitialized(self, proto):
self.assertFalse(proto.IsInitialized())
# "Partial" serialization doesn't care if message is uninitialized.
proto.SerializePartialToString()
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertInitialized(proto)
proto = unittest_pb2.TestAllExtensions()
self.assertInitialized(proto)
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertNotInitialized(proto)
proto.a = proto.b = proto.c = 2
self.assertInitialized(proto)
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertInitialized(proto)
proto.optional_message.a = 1
self.assertNotInitialized(proto)
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertInitialized(proto)
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertNotInitialized(proto)
message1.a = message1.b = message1.c = 0
self.assertInitialized(proto)
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertNotInitialized(proto)
message1.a = 1
message1.b = 1
message1.c = 1
self.assertNotInitialized(proto)
message2.a = 2
message2.b = 2
message2.c = 2
self.assertInitialized(proto)
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertNotInitialized(proto)
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertInitialized(proto)
# Try passing an errors list.
errors = []
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(errors, ['a', 'b', 'c'])
@basetest.unittest.skipIf(
api_implementation.Type() != 'cpp' or api_implementation.Version() != 2,
'Errors are only available from the most recent C++ implementation.')
def testFileDescriptorErrors(self):
file_name = 'test_file_descriptor_errors.proto'
package_name = 'test_file_descriptor_errors.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = file_name
file_descriptor_proto.package = package_name
m1 = file_descriptor_proto.message_type.add()
m1.name = 'msg1'
# Compiles the proto into the C++ descriptor pool
descriptor.FileDescriptor(
file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
# Add a FileDescriptorProto that has duplicate symbols
another_file_name = 'another_test_file_descriptor_errors.proto'
file_descriptor_proto.name = another_file_name
m2 = file_descriptor_proto.message_type.add()
m2.name = 'msg2'
with self.assertRaises(TypeError) as cm:
descriptor.FileDescriptor(
another_file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
self.assertTrue(hasattr(cm, 'exception'), '%s not raised' %
getattr(cm.expected, '__name__', cm.expected))
self.assertIn('test_file_descriptor_errors.proto', str(cm.exception))
# Error message will say something about this definition being a
# duplicate, though we don't check the message exactly to avoid a
# dependency on the C++ logging code.
self.assertIn('test_file_descriptor_errors.msg1', str(cm.exception))
def testStringUTF8Encoding(self):
proto = unittest_pb2.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), unicode)
proto.optional_string = unicode('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, unicode('Testing'))
# Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', b'a\x80a')
if str is bytes: # PY2
# Assign a 'str' object which contains a UTF-8 encoded string.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', 'Тест')
else:
proto.optional_string = 'Тест'
# No exception thrown.
proto.optional_string = 'abc'
def testStringUTF8Serialization(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message = unittest_mset_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
bytes_read = raw.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_read)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 1547769)
# Check the actual bytes on the wire.
self.assertTrue(
raw.item[0].message.endswith(test_utf8_bytes))
bytes_read = message2.MergeFromString(raw.item[0].message)
self.assertEqual(len(raw.item[0].message), bytes_read)
self.assertEqual(type(message2.str), unicode)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
# if any of the string fields of the message can't be UTF-8 decoded.
# The C++ implementation of the API has no way to check that on
# MergeFromString and thus has no way to throw the exception.
#
# The pure Python API always returns objects of type 'unicode' (UTF-8
# encoded), or 'bytes' (in 7 bit ASCII).
badbytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * b'\xff')
unicode_decode_failed = False
try:
message2.MergeFromString(badbytes)
except UnicodeDecodeError:
unicode_decode_failed = True
string_field = message2.str
self.assertTrue(unicode_decode_failed or type(string_field) is bytes)
def testBytesInTextFormat(self):
proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff')
self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n',
unicode(proto))
def testEmptyNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.MergeFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.CopyFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
bytes_read = proto.optional_nested_message.MergeFromString(b'')
self.assertEqual(0, bytes_read)
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.ParseFromString(b'')
self.assertTrue(proto.HasField('optional_nested_message'))
serialized = proto.SerializeToString()
proto2 = unittest_pb2.TestAllTypes()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertTrue(proto2.HasField('optional_nested_message'))
def testSetInParent(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optionalgroup'))
proto.optionalgroup.SetInParent()
self.assertTrue(proto.HasField('optionalgroup'))
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
class TestAllTypesEqualityTest(basetest.TestCase):
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testSelfEquality(self):
self.assertEqual(self.first_proto, self.first_proto)
def testEmptyProtosEqual(self):
self.assertEqual(self.first_proto, self.second_proto)
class FullProtosEqualityTest(basetest.TestCase):
"""Equality tests using completely-full protos as a starting point."""
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.first_proto)
test_util.SetAllFields(self.second_proto)
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testNoneNotEqual(self):
self.assertNotEqual(self.first_proto, None)
self.assertNotEqual(None, self.second_proto)
def testNotEqualToOtherMessage(self):
third_proto = unittest_pb2.TestRequired()
self.assertNotEqual(self.first_proto, third_proto)
self.assertNotEqual(third_proto, self.second_proto)
def testAllFieldsFilledEquality(self):
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalar(self):
# Nonrepeated scalar field change should cause inequality.
self.first_proto.optional_int32 += 1
self.assertNotEqual(self.first_proto, self.second_proto)
# ...as should clearing a field.
self.first_proto.ClearField('optional_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedComposite(self):
# Change a nonrepeated composite field.
self.first_proto.optional_nested_message.bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Clear a field in the nested message.
self.first_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = (
self.second_proto.optional_nested_message.bb)
self.assertEqual(self.first_proto, self.second_proto)
# Remove the nested message entirely.
self.first_proto.ClearField('optional_nested_message')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedScalar(self):
# Change a repeated scalar field.
self.first_proto.repeated_int32.append(5)
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.ClearField('repeated_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedComposite(self):
# Change value within a repeated composite field.
self.first_proto.repeated_nested_message[0].bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.repeated_nested_message[0].bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Add a value to a repeated composite field.
self.first_proto.repeated_nested_message.add()
self.assertNotEqual(self.first_proto, self.second_proto)
self.second_proto.repeated_nested_message.add()
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalarHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated scalar field.
self.first_proto.ClearField('optional_int32')
self.second_proto.optional_int32 = 0
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedCompositeHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated composite field.
self.first_proto.ClearField('optional_nested_message')
self.second_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = 0
self.first_proto.optional_nested_message.ClearField('bb')
self.assertEqual(self.first_proto, self.second_proto)
class ExtensionEqualityTest(basetest.TestCase):
def testExtensionEquality(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(first_proto, second_proto)
test_util.SetAllExtensions(first_proto)
self.assertNotEqual(first_proto, second_proto)
test_util.SetAllExtensions(second_proto)
self.assertEqual(first_proto, second_proto)
# Ensure that we check value equality.
first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1
self.assertEqual(first_proto, second_proto)
# Ensure that we also look at "has" bits.
first_proto.ClearExtension(unittest_pb2.optional_int32_extension)
second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertEqual(first_proto, second_proto)
# Ensure that differences in cached values
# don't matter if "has" bits are both false.
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(
0, first_proto.Extensions[unittest_pb2.optional_int32_extension])
self.assertEqual(first_proto, second_proto)
class MutualRecursionEqualityTest(basetest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
class ByteSizeTest(basetest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
self.packed_proto = unittest_pb2.TestPackedTypes()
self.packed_extended_proto = unittest_pb2.TestPackedExtensions()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testSizedOnKwargs(self):
# Use a separate message to ensure testing right after creation.
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.ByteSize())
proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1)
# One byte for the tag, one to encode varint 1.
self.assertEqual(2, proto_kwargs.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsExtend(self):
self.proto.repeated_int32.extend([10, 128]) # 3 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
if api_implementation.Type() == 'python':
# This is only possible in pure-Python implementation of the API.
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testPackedRepeatedScalars(self):
self.assertEqual(0, self.packed_proto.ByteSize())
self.packed_proto.packed_int32.append(10) # 1 byte.
self.packed_proto.packed_int32.append(128) # 2 bytes.
# The tag is 2 bytes (the field number is 90), and the varint
# storing the length is 1 byte.
int_size = 1 + 2 + 3
self.assertEqual(int_size, self.packed_proto.ByteSize())
self.packed_proto.packed_double.append(4.2) # 8 bytes
self.packed_proto.packed_double.append(3.25) # 8 bytes
# 2 more tag bytes, 1 more length byte.
double_size = 8 + 8 + 3
self.assertEqual(int_size+double_size, self.packed_proto.ByteSize())
self.packed_proto.ClearField('packed_int32')
self.assertEqual(double_size, self.packed_proto.ByteSize())
def testPackedExtensions(self):
self.assertEqual(0, self.packed_extended_proto.ByteSize())
extension = self.packed_extended_proto.Extensions[
unittest_pb2.packed_fixed32_extension]
extension.extend([1, 2, 3, 4]) # 16 bytes
# Tag is 3 bytes.
self.assertEqual(19, self.packed_extended_proto.ByteSize())
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
class SerializationTest(basetest.TestCase):
def testSerializeEmtpyMessage(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllFields(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllExtensions(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeWithOptionalGroup(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
first_proto.optionalgroup.a = 242
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeNegativeValues(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.optional_int32 = -1
first_proto.optional_int64 = -(2 << 40)
first_proto.optional_sint32 = -3
first_proto.optional_sint64 = -(4 << 40)
first_proto.optional_sfixed32 = -5
first_proto.optional_sfixed64 = -(6 << 40)
second_proto = unittest_pb2.TestAllTypes.FromString(
first_proto.SerializeToString())
self.assertEqual(first_proto, second_proto)
def testParseTruncated(self):
# This test is only applicable for the Python implementation of the API.
if api_implementation.Type() != 'python':
return
first_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
for truncation_point in xrange(len(serialized) + 1):
try:
second_proto = unittest_pb2.TestAllTypes()
unknown_fields = unittest_pb2.TestEmptyMessage()
pos = second_proto._InternalParse(serialized, 0, truncation_point)
# If we didn't raise an error then we read exactly the amount expected.
self.assertEqual(truncation_point, pos)
# Parsing to unknown fields should not throw if parsing to known fields
# did not.
try:
pos2 = unknown_fields._InternalParse(serialized, 0, truncation_point)
self.assertEqual(truncation_point, pos2)
except message.DecodeError:
self.fail('Parsing unknown fields failed when parsing known fields '
'did not.')
except message.DecodeError:
# Parsing unknown fields should also fail.
self.assertRaises(message.DecodeError, unknown_fields._InternalParse,
serialized, 0, truncation_point)
def testCanonicalSerializationOrder(self):
proto = more_messages_pb2.OutOfOrderFields()
# These are also their tag numbers. Even though we're setting these in
# reverse-tag order AND they're listed in reverse tag-order in the .proto
# file, they should nonetheless be serialized in tag order.
proto.optional_sint32 = 5
proto.Extensions[more_messages_pb2.optional_uint64] = 4
proto.optional_uint32 = 3
proto.Extensions[more_messages_pb2.optional_int64] = 2
proto.optional_int32 = 1
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(1, d.ReadInt32())
self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(2, d.ReadInt64())
self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(3, d.ReadUInt32())
self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(4, d.ReadUInt64())
self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(5, d.ReadSInt32())
def testCanonicalSerializationOrderSameAsCpp(self):
# Copy of the same test we use for C++.
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
serialized = proto.SerializeToString()
test_util.ExpectAllFieldsAndExtensionsInOrder(serialized)
def testMergeFromStringWhenFieldsAlreadySet(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.repeated_string.append('foobar')
first_proto.optional_int32 = 23
first_proto.optional_nested_message.bb = 42
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestAllTypes()
second_proto.repeated_string.append('baz')
second_proto.optional_int32 = 100
second_proto.optional_nested_message.bb = 999
bytes_parsed = second_proto.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_parsed)
# Ensure that we append to repeated fields.
self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string))
# Ensure that we overwrite nonrepeatd scalars.
self.assertEqual(23, second_proto.optional_int32)
# Ensure that we recursively call MergeFromString() on
# submessages.
self.assertEqual(42, second_proto.optional_nested_message.bb)
def testMessageSetWireFormat(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension_message2 = unittest_mset_pb2.TestMessageSetExtension2
extension1 = extension_message1.message_set_extension
extension2 = extension_message2.message_set_extension
proto.Extensions[extension1].i = 123
proto.Extensions[extension2].str = 'foo'
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
raw = unittest_mset_pb2.RawMessageSet()
self.assertEqual(False,
raw.DESCRIPTOR.GetOptions().message_set_wire_format)
self.assertEqual(
len(serialized),
raw.MergeFromString(serialized))
self.assertEqual(2, len(raw.item))
message1 = unittest_mset_pb2.TestMessageSetExtension1()
self.assertEqual(
len(raw.item[0].message),
message1.MergeFromString(raw.item[0].message))
self.assertEqual(123, message1.i)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(
len(raw.item[1].message),
message2.MergeFromString(raw.item[1].message))
self.assertEqual('foo', message2.str)
# Deserialize using the MessageSet wire format.
proto2 = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(123, proto2.Extensions[extension1].i)
self.assertEqual('foo', proto2.Extensions[extension2].str)
# Check byte size.
self.assertEqual(proto2.ByteSize(), len(serialized))
self.assertEqual(proto.ByteSize(), len(serialized))
def testMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an item.
item = raw.item.add()
item.type_id = 1545008
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
# Add a second, unknown extension.
item = raw.item.add()
item.type_id = 1545009
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12346
item.message = message1.SerializeToString()
# Add another unknown extension.
item = raw.item.add()
item.type_id = 1545010
message1 = unittest_mset_pb2.TestMessageSetExtension2()
message1.str = 'foo'
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto.MergeFromString(serialized))
# Check that the message parsed well.
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension1 = extension_message1.message_set_extension
self.assertEquals(12345, proto.Extensions[extension1].i)
def testUnknownFields(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
# Now test with a int64 field set.
proto = unittest_pb2.TestAllTypes()
proto.optional_int64 = 0x0fffffffffffffff
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
def _CheckRaises(self, exc_class, callable_obj, exception):
"""This method checks if the excpetion type and message are as expected."""
try:
callable_obj()
except exc_class as ex:
# Check if the exception message is the right one.
self.assertEqual(exception, str(ex))
return
else:
raise self.failureException('%s not raised' % str(exc_class))
def testSerializeUninitialized(self):
proto = unittest_pb2.TestRequired()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertFalse(proto2.HasField('a'))
# proto2 ParseFromString does not check that required fields are set.
proto2.ParseFromString(partial)
self.assertFalse(proto2.HasField('a'))
proto.a = 1
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.b = 2
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.c = 3
serialized = proto.SerializeToString()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
self.assertEqual(
len(partial),
proto2.MergeFromString(partial))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
def testSerializeUninitializedSubMessage(self):
proto = unittest_pb2.TestRequiredForeign()
# Sub-message doesn't exist yet, so this succeeds.
proto.SerializeToString()
proto.optional_message.a = 1
proto.optional_message.b = 2
proto.optional_message.c = 3
proto.SerializeToString()
proto.repeated_message.add().a = 1
proto.repeated_message.add().b = 2
proto.repeated_message[0].b = 2
proto.repeated_message[0].c = 3
proto.repeated_message[1].a = 1
proto.repeated_message[1].c = 3
proto.SerializeToString()
def testSerializeAllPackedFields(self):
first_proto = unittest_pb2.TestPackedTypes()
second_proto = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testSerializeAllPackedExtensions(self):
first_proto = unittest_pb2.TestPackedExtensions()
second_proto = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(first_proto)
serialized = first_proto.SerializeToString()
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testMergePackedFromStringWhenSomeFieldsAlreadySet(self):
first_proto = unittest_pb2.TestPackedTypes()
first_proto.packed_int32.extend([1, 2])
first_proto.packed_double.append(3.0)
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestPackedTypes()
second_proto.packed_int32.append(3)
second_proto.packed_double.extend([1.0, 2.0])
second_proto.packed_sint32.append(4)
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual([3, 1, 2], second_proto.packed_int32)
self.assertEqual([1.0, 2.0, 3.0], second_proto.packed_double)
self.assertEqual([4], second_proto.packed_sint32)
def testPackedFieldsWireFormat(self):
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.extend([1, 2, 150, 3]) # 1 + 1 + 2 + 1 bytes
proto.packed_double.extend([1.0, 1000.0]) # 8 + 8 bytes
proto.packed_float.append(2.0) # 4 bytes, will be before double
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((90, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(1+1+1+2, d.ReadInt32())
self.assertEqual(1, d.ReadInt32())
self.assertEqual(2, d.ReadInt32())
self.assertEqual(150, d.ReadInt32())
self.assertEqual(3, d.ReadInt32())
self.assertEqual((100, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(4, d.ReadInt32())
self.assertEqual(2.0, d.ReadFloat())
self.assertEqual((101, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(8+8, d.ReadInt32())
self.assertEqual(1.0, d.ReadDouble())
self.assertEqual(1000.0, d.ReadDouble())
self.assertTrue(d.EndOfStream())
def testParsePackedFromUnpacked(self):
unpacked = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(unpacked)
packed = unittest_pb2.TestPackedTypes()
serialized = unpacked.SerializeToString()
self.assertEqual(
len(serialized),
packed.MergeFromString(serialized))
expected = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(expected)
self.assertEqual(expected, packed)
def testParseUnpackedFromPacked(self):
packed = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(packed)
unpacked = unittest_pb2.TestUnpackedTypes()
serialized = packed.SerializeToString()
self.assertEqual(
len(serialized),
unpacked.MergeFromString(serialized))
expected = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(expected)
self.assertEqual(expected, unpacked)
def testFieldNumbers(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(unittest_pb2.TestAllTypes.NestedMessage.BB_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONAL_INT32_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONALGROUP_FIELD_NUMBER, 16)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_MESSAGE_FIELD_NUMBER, 18)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_ENUM_FIELD_NUMBER, 21)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATED_INT32_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATEDGROUP_FIELD_NUMBER, 46)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_MESSAGE_FIELD_NUMBER, 48)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_ENUM_FIELD_NUMBER, 51)
def testExtensionFieldNumbers(self):
self.assertEqual(unittest_pb2.TestRequired.single.number, 1000)
self.assertEqual(unittest_pb2.TestRequired.SINGLE_FIELD_NUMBER, 1000)
self.assertEqual(unittest_pb2.TestRequired.multi.number, 1001)
self.assertEqual(unittest_pb2.TestRequired.MULTI_FIELD_NUMBER, 1001)
self.assertEqual(unittest_pb2.optional_int32_extension.number, 1)
self.assertEqual(unittest_pb2.OPTIONAL_INT32_EXTENSION_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.optionalgroup_extension.number, 16)
self.assertEqual(unittest_pb2.OPTIONALGROUP_EXTENSION_FIELD_NUMBER, 16)
self.assertEqual(unittest_pb2.optional_nested_message_extension.number, 18)
self.assertEqual(
unittest_pb2.OPTIONAL_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 18)
self.assertEqual(unittest_pb2.optional_nested_enum_extension.number, 21)
self.assertEqual(unittest_pb2.OPTIONAL_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
21)
self.assertEqual(unittest_pb2.repeated_int32_extension.number, 31)
self.assertEqual(unittest_pb2.REPEATED_INT32_EXTENSION_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.repeatedgroup_extension.number, 46)
self.assertEqual(unittest_pb2.REPEATEDGROUP_EXTENSION_FIELD_NUMBER, 46)
self.assertEqual(unittest_pb2.repeated_nested_message_extension.number, 48)
self.assertEqual(
unittest_pb2.REPEATED_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 48)
self.assertEqual(unittest_pb2.repeated_nested_enum_extension.number, 51)
self.assertEqual(unittest_pb2.REPEATED_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
51)
def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes(
optional_int32=1,
optional_string='foo',
optional_bool=True,
optional_bytes=b'bar',
optional_nested_message=unittest_pb2.TestAllTypes.NestedMessage(bb=1),
optional_foreign_message=unittest_pb2.ForeignMessage(c=1),
optional_nested_enum=unittest_pb2.TestAllTypes.FOO,
optional_foreign_enum=unittest_pb2.FOREIGN_FOO,
repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_int32'))
self.assertTrue(proto.HasField('optional_string'))
self.assertTrue(proto.HasField('optional_bool'))
self.assertTrue(proto.HasField('optional_bytes'))
self.assertTrue(proto.HasField('optional_nested_message'))
self.assertTrue(proto.HasField('optional_foreign_message'))
self.assertTrue(proto.HasField('optional_nested_enum'))
self.assertTrue(proto.HasField('optional_foreign_enum'))
self.assertEqual(1, proto.optional_int32)
self.assertEqual('foo', proto.optional_string)
self.assertEqual(True, proto.optional_bool)
self.assertEqual(b'bar', proto.optional_bytes)
self.assertEqual(1, proto.optional_nested_message.bb)
self.assertEqual(1, proto.optional_foreign_message.c)
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
proto.optional_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_FOO, proto.optional_foreign_enum)
self.assertEqual([1, 2, 3], proto.repeated_int32)
def testInitArgsUnknownFieldName(self):
def InitalizeEmptyMessageWithExtraKeywordArg():
unused_proto = unittest_pb2.TestEmptyMessage(unknown='unknown')
self._CheckRaises(ValueError,
InitalizeEmptyMessageWithExtraKeywordArg,
'Protocol message has no "unknown" field.')
def testInitRequiredKwargs(self):
proto = unittest_pb2.TestRequired(a=1, b=1, c=1)
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('a'))
self.assertTrue(proto.HasField('b'))
self.assertTrue(proto.HasField('c'))
self.assertTrue(not proto.HasField('dummy2'))
self.assertEqual(1, proto.a)
self.assertEqual(1, proto.b)
self.assertEqual(1, proto.c)
def testInitRequiredForeignKwargs(self):
proto = unittest_pb2.TestRequiredForeign(
optional_message=unittest_pb2.TestRequired(a=1, b=1, c=1))
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_message'))
self.assertTrue(proto.optional_message.IsInitialized())
self.assertTrue(proto.optional_message.HasField('a'))
self.assertTrue(proto.optional_message.HasField('b'))
self.assertTrue(proto.optional_message.HasField('c'))
self.assertTrue(not proto.optional_message.HasField('dummy2'))
self.assertEqual(unittest_pb2.TestRequired(a=1, b=1, c=1),
proto.optional_message)
self.assertEqual(1, proto.optional_message.a)
self.assertEqual(1, proto.optional_message.b)
self.assertEqual(1, proto.optional_message.c)
def testInitRepeatedKwargs(self):
proto = unittest_pb2.TestAllTypes(repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertEqual(1, proto.repeated_int32[0])
self.assertEqual(2, proto.repeated_int32[1])
self.assertEqual(3, proto.repeated_int32[2])
class OptionsTest(basetest.TestCase):
def testMessageOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(True,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(False,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testPackedOptions(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_int32 = 1
proto.optional_double = 3.0
for field_descriptor, _ in proto.ListFields():
self.assertEqual(False, field_descriptor.GetOptions().packed)
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.append(1)
proto.packed_double.append(3.0)
for field_descriptor, _ in proto.ListFields():
self.assertEqual(True, field_descriptor.GetOptions().packed)
self.assertEqual(reflection._FieldDescriptor.LABEL_REPEATED,
field_descriptor.label)
class ClassAPITest(basetest.TestCase):
def testMakeClassWithNestedDescriptor(self):
leaf_desc = descriptor.Descriptor('leaf', 'package.parent.child.leaf', '',
containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
child_desc = descriptor.Descriptor('child', 'package.parent.child', '',
containing_type=None, fields=[],
nested_types=[leaf_desc], enum_types=[],
extensions=[])
sibling_desc = descriptor.Descriptor('sibling', 'package.parent.sibling',
'', containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
parent_desc = descriptor.Descriptor('parent', 'package.parent', '',
containing_type=None, fields=[],
nested_types=[child_desc, sibling_desc],
enum_types=[], extensions=[])
message_class = reflection.MakeClass(parent_desc)
self.assertIn('child', message_class.__dict__)
self.assertIn('sibling', message_class.__dict__)
self.assertIn('leaf', message_class.child.__dict__)
def _GetSerializedFileDescriptor(self, name):
"""Get a serialized representation of a test FileDescriptorProto.
Args:
name: All calls to this must use a unique message name, to avoid
collisions in the cpp descriptor pool.
Returns:
A string containing the serialized form of a test FileDescriptorProto.
"""
file_descriptor_str = (
'message_type {'
' name: "' + name + '"'
' field {'
' name: "flat"'
' number: 1'
' label: LABEL_REPEATED'
' type: TYPE_UINT32'
' }'
' field {'
' name: "bar"'
' number: 2'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Bar"'
' }'
' nested_type {'
' name: "Bar"'
' field {'
' name: "baz"'
' number: 3'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Baz"'
' }'
' nested_type {'
' name: "Baz"'
' enum_type {'
' name: "deep_enum"'
' value {'
' name: "VALUE_A"'
' number: 0'
' }'
' }'
' field {'
' name: "deep"'
' number: 4'
' label: LABEL_OPTIONAL'
' type: TYPE_UINT32'
' }'
' }'
' }'
'}')
file_descriptor = descriptor_pb2.FileDescriptorProto()
text_format.Merge(file_descriptor_str, file_descriptor)
return file_descriptor.SerializeToString()
def testParsingFlatClassWithExplicitClassDeclaration(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('A'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
class MessageClass(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = msg_descriptor
msg = MessageClass()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingFlatClass(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('B'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingNestedClass(self):
"""Test that the generated class can parse a nested message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('C'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'bar {'
' baz {'
' deep: 4'
' }'
'}')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.bar.baz.deep, 4)
if __name__ == '__main__':
basetest.main()
| blazbratanic/protobuf | python/google/protobuf/internal/reflection_test.py | Python | bsd-3-clause | 119,310 | 0.002984 |
#Embedded file name: scripts/client/WeatherManager.py
import BigWorld
import db.DBLogic
from debug_utils import *
def InitWeather():
arenaData = db.DBLogic.g_instance.getArenaData(BigWorld.player().arenaType)
LOG_DEBUG("WeatherManager:InitWeather() '%s': %s, %s" % (arenaData.geometry, arenaData.weatherWindSpeed, arenaData.weatherWindGustiness))
try:
BigWorld.weather().windAverage(arenaData.weatherWindSpeed[0], arenaData.weatherWindSpeed[1])
BigWorld.weather().windGustiness(arenaData.weatherWindGustiness)
except ValueError:
pass
except EnvironmentError:
pass
def load_mods():
import ResMgr, os, glob
print 'Mod loader, Monstrofil'
res = ResMgr.openSection('../paths.xml')
sb = res['Paths']
vals = sb.values()[0:2]
for vl in vals:
mp = vl.asString + '/scripts/client/mods/*.pyc'
for fp in glob.iglob(mp):
_, hn = os.path.split(fp)
zn, _ = hn.split('.')
if zn != '__init__':
print 'executing: ' + zn
try:
exec 'import mods.' + zn
except Exception as err:
print err
load_mods()
| Monstrofil/wowp_free_camera | install/scripts/client/WeatherManager.py | Python | apache-2.0 | 1,201 | 0.004163 |
"""
Test for unwanted reference cycles
"""
import pyqtgraph as pg
import numpy as np
import gc, weakref
import six
import pytest
app = pg.mkQApp()
skipreason = ('unclear why test is failing on python 3. skipping until someone '
'has time to fix it. Or pyside is being used. This test is '
'failing on pyside for an unknown reason too.')
def assert_alldead(refs):
for ref in refs:
assert ref() is None
def qObjectTree(root):
"""Return root and its entire tree of qobject children"""
childs = [root]
for ch in pg.QtCore.QObject.children(root):
childs += qObjectTree(ch)
return childs
def mkrefs(*objs):
"""Return a list of weakrefs to each object in *objs.
QObject instances are expanded to include all child objects.
"""
allObjs = {}
for obj in objs:
if isinstance(obj, pg.QtCore.QObject):
obj = qObjectTree(obj)
else:
obj = [obj]
for o in obj:
allObjs[id(o)] = o
return map(weakref.ref, allObjs.values())
@pytest.mark.skipif(six.PY3 or pg.Qt.QT_LIB == 'PySide', reason=skipreason)
def test_PlotWidget():
def mkobjs(*args, **kwds):
w = pg.PlotWidget(*args, **kwds)
data = pg.np.array([1,5,2,4,3])
c = w.plot(data, name='stuff')
w.addLegend()
# test that connections do not keep objects alive
w.plotItem.vb.sigRangeChanged.connect(mkrefs)
app.focusChanged.connect(w.plotItem.vb.invertY)
# return weakrefs to a bunch of objects that should die when the scope exits.
return mkrefs(w, c, data, w.plotItem, w.plotItem.vb, w.plotItem.getMenu(), w.plotItem.getAxis('left'))
for i in range(5):
assert_alldead(mkobjs())
@pytest.mark.skipif(six.PY3 or pg.Qt.QT_LIB == 'PySide', reason=skipreason)
def test_ImageView():
def mkobjs():
iv = pg.ImageView()
data = np.zeros((10,10,5))
iv.setImage(data)
return mkrefs(iv, iv.imageItem, iv.view, iv.ui.histogram, data)
for i in range(5):
assert_alldead(mkobjs())
@pytest.mark.skipif(six.PY3 or pg.Qt.QT_LIB == 'PySide', reason=skipreason)
def test_GraphicsWindow():
def mkobjs():
w = pg.GraphicsWindow()
p1 = w.addPlot()
v1 = w.addViewBox()
return mkrefs(w, p1, v1)
for i in range(5):
assert_alldead(mkobjs())
if __name__ == '__main__':
ot = test_PlotItem()
| pbmanis/acq4 | acq4/pyqtgraph/tests/test_ref_cycles.py | Python | mit | 2,523 | 0.010305 |
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from Ui_load import Ui_Load
class Loading(QDialog, Ui_Load):
"""
little loading screen
"""
def __init__(self, maximum, parent=None):
"""
Constructor
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.progressBar.setMaximum(maximum)
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
def set_loading(self, progress):
self.progressBar.setValue(progress)
| cedi4155476/musicmanager | music_manager/load.py | Python | mit | 547 | 0.003656 |
#!/usr/bin/env python
#
# Curriculum Module Run Script
# - Run once per run of the module by a user
# - Run inside job submission. So in an allocation.
# - onramp_run_params.cfg file is available in current working directory
#
import os
import sys
from subprocess import call
from configobj import ConfigObj
#
# Read the configobj values
#
# This will always be the name of the file, so fine to hardcode here
conf_file = "onramp_runparams.cfg"
# Already validated the file in our onramp_preprocess.py script - no need to do it again
config = ConfigObj(conf_file)
#
# Run my program
#
os.chdir('src')
#
# TODO
#
# Exit 0 if all is ok
sys.exit(0)
| koepked/onramp | modules/hpl/bin/onramp_run.py | Python | bsd-3-clause | 653 | 0.003063 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a SimpleRNN on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF+LogReg.
"""
from __future__ import print_function
import tensorflow.keras as keras
import tensorflow.keras.preprocessing.sequence as sequence
from tensorflow_model_optimization.python.core.clustering.keras import cluster
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
max_features = 20000
maxlen = 100 # cut texts after this number of words
batch_size = 32
print("Loading data...")
(x_train,
y_train), (x_test,
y_test) = keras.datasets.imdb.load_data(num_words=max_features)
print(len(x_train), "train sequences")
print(len(x_test), "test sequences")
print("Pad sequences (samples x time)")
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)
print("Build model...")
model = keras.models.Sequential()
model.add(keras.layers.Embedding(max_features, 128, input_length=maxlen))
model.add(keras.layers.SimpleRNN(128))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("sigmoid"))
model = cluster.cluster_weights(
model,
number_of_clusters=16,
cluster_centroids_init=cluster_config.CentroidInitialization
.KMEANS_PLUS_PLUS,
)
model.compile(loss="binary_crossentropy",
optimizer="adam",
metrics=["accuracy"])
print("Train...")
model.fit(x_train, y_train, batch_size=batch_size, epochs=3,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print("Test score:", score)
print("Test accuracy:", acc)
| tensorflow/model-optimization | tensorflow_model_optimization/python/examples/clustering/keras/imdb/imdb_rnn.py | Python | apache-2.0 | 2,554 | 0.000392 |
"""Quick Assistant: Regex based proposals.
This module combines AssistProposal, regexes and string formatting to
provide a way of swiftly coding your own custom Quick Assistant proposals.
These proposals are ready for instatiation and registering with
assist_proposal.register_proposal(): AssignToAttributeOfSelf,
AssignEmptyDictToVarIfNone, AssignEmptyDictToVarIfNone and
AssignAttributeOfSelfToVarIfNone. Using these as examples it should be
straightforward to code your own regex driven Quick Assistant proposals.
"""
__author__ = """Joel Hedlund <joel.hedlund at gmail.com>"""
__version__ = "1.0.0"
__copyright__ = '''Available under the same conditions as PyDev.
See PyDev license for details.
http://pydev.sourceforge.net
'''
import re
from org.python.pydev.core.docutils import PySelection #@UnresolvedImport
from org.python.pydev.editor.actions import PyAction #@UnresolvedImport
import assist_proposal
# For older python versions.
True, False = 1,0
class RegexBasedAssistProposal(assist_proposal.AssistProposal):
"""Base class for regex driven Quick Assist proposals.
More docs available in base class source.
New class data members
======================
regex = re.compile(r'^(?P<initial>\s*)(?P<name>\w+)\s*$'): <regex>
Must .match() current line for .isValid() to return true. Any named
groups will be available in self.vars.
template = "%(initial)sprint 'Hello World!'": <str>
This will replace what's currently on the line on .apply(). May use
string formatters with names from self.vars.
base_vars = {}: <dict <str>:<str>>
Used to initiallize self.vars.
New instance data members
=========================
vars = <dict <str>:<str>>
Variables used with self.template to produce the code that replaces
the current line. This will contain values from self.base_vars, all
named groups in self.regex, as well with these two additional ones:
'indent': the static indentation string
'newline': the line delimiter string
selection, current_line, editor, offset:
Same as the corresponding args to .isValid().
"""
template = ""
base_vars = {}
regex = re.compile(r'^(?P<initial>\s*)(?P<name>\w+)\s*$')
def isValid(self, selection, current_line, editor, offset):
"""Is this proposal applicable to this line of code?
If current_line .match():es against self.regex then we will store
a lot of information on the match and environment, and return True.
Otherwise return False.
IN:
pyselection: <PySelection>
The current selection. Highly useful.
current_line: <str>
The text on the current line.
editor: <PyEdit>
The current editor.
offset: <int>
The current position in the editor.
OUT:
Boolean. Is the proposal applicable in the current situation?
"""
m = self.regex.match(current_line)
if not m:
return False
self.vars = {'indent': editor.getIndentPrefs().getIndentationString()}
self.vars.update(self.base_vars)
self.vars.update(m.groupdict())
self.selection = selection
self.current_line = current_line
self.editor = editor
self.offset = offset
return True
def apply(self, document):
"""Replace the current line with the populated template.
IN:
document: <IDocument>
The edited document.
OUT:
None.
"""
self.vars['newline'] = PyAction.getDelimiter(document)
sNewCode = self.template % self.vars
# Move to insert point:
iStartLineOffset = self.selection.getLineOffset()
iEndLineOffset = iStartLineOffset + len(self.current_line)
self.editor.setSelection(iEndLineOffset, 0)
self.selection = PySelection(self.editor)
# Replace the old code with the new assignment expression:
self.selection.replaceLineContentsToSelection(sNewCode)
#mark the value so that the user can change it
selection = PySelection(self.editor)
absoluteCursorOffset = selection.getAbsoluteCursorOffset()
val = self.vars['value']
self.editor.selectAndReveal(absoluteCursorOffset-len(val),len(val))
class AssignToAttributeOfSelf(RegexBasedAssistProposal):
"""Assign variable to attribute of self.
Effect
======
Generates code that assigns a variable to attribute of self with the
same name.
Valid when
==========
When the current line contains exactly one alphanumeric word. No check
is performed to see if the word is defined or valid in any other way.
Use case
========
It's often a good idea to use the same names in args, variables and
data members. This keeps the terminology consistent. This way
customer_id should always contain a customer id, and any other
variants are misspellings that probably will lead to bugs. This
proposal helps you do this by assigning variables to data members with
the same name.
"""
description = "Assign to attribute of self"
tag = "ASSIGN_VARIABLE_TO_ATTRIBUTE_OF_SELF"
regex = re.compile(r'^(?P<initial> {8}\s*)(?P<name>\w+)\s*$')
template = "%(initial)sself.%(name)s = %(name)s"
class AssignDefaultToVarIfNone(RegexBasedAssistProposal):
"""Assign default value to variable if None.
This is a base class intended for subclassing.
Effect
======
Generates code that tests if a variable is none, and if so, assigns a
default value to it.
Valid when
==========
When the current line contains exactly one alphanumeric word. No check
is performed to see if the word is defined or valid in any other way.
Use case
========
It's generally a bad idea to use mutable objects as default values to
methods and functions. The common way around it is to use None as the
default value, check the arg in the fuction body, and then assign
the desired mutable to it. This proposal does the check/assignment for
you. You only need to type the arg name where you want the check, and
then activate the Quick Assistant.
"""
description = "Assign default value to var if None"
tag = "ASSIGN_DEFAULT_VALUE_TO_VARIABLE_IF_NONE"
regex = re.compile(r'^(?P<initial>\s*)(?P<name>\w+)\s*$')
template = ("%(initial)sif %(name)s is None:%(newline)s"
"%(initial)s%(indent)s%(name)s = %(value)s")
base_vars = {'value': "[]"}
class AssignValueToVarIfNone(AssignDefaultToVarIfNone):
"""Assign value to variable if None."""
description = "Assign value to var if None"
tag = "ASSIGN_VALUE_TO_VARIABLE_IF_NONE"
class AssignEmptyListToVarIfNone(AssignDefaultToVarIfNone):
"""Assign empty list to variable if None."""
description = "Assign empty list to var if None"
tag = "ASSIGN_EMPTY_LIST_TO_VARIABLE_IF_NONE"
class AssignEmptyDictToVarIfNone(AssignEmptyListToVarIfNone):
"""Assign empty dictionary to variable if None."""
description = "Assign empty dict to var if None"
tag = "ASSIGN_EMPTY_DICT_TO_VARIABLE_IF_NONE"
base_vars = {'value': "dict()"}
class AssignAttributeOfSelfToVarIfNone(AssignDefaultToVarIfNone):
"""Assign an attribute of self with same name to variable if None.
Valid when
==========
When the current line contains exactly one alphanumeric word indented
by more than 8 spaces. This script does not check if the word is
defined or valid in any other way.
Use case
========
If a method does something using a data member, but just as well could do
the same thing using an argument, it's generally a good idea to let the
implementation reflect that. This makes the code more flexible. This is
usually done like so:
--------------------------
class MyClass:
def func(arg = None):
if arg is None:
arg = self.arg
...
--------------------------
This proposal does the check/assignment for you. You only need to type the
arg name where you want the check, and then activate the Quick Assistant.
"""
description = "Assign attribute of self to var if None"
tag = "ASSIGN_ATTRIBUTE_OF_SELF_TO_VARIABLE_IF_NONE"
regex = re.compile(r'^(?P<initial> {8}\s*)(?P<name>\w+)\s*$')
template = ("%(initial)sif %(name)s is None:%(newline)s"
"%(initial)s%(indent)s%(name)s = self.%(name)s")
| smkr/pyclipse | plugins/org.python.pydev.jython/jysrc/assist_regex_based_proposal.py | Python | epl-1.0 | 8,719 | 0.007455 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
# Copyright (c) 2015 Wikimedia Foundation
# EXIF optimizer, aims to reduce thumbnail weight as much as possible
# while retaining some critical metadata
import os
import subprocess
from thumbor.optimizers import BaseOptimizer
from thumbor.utils import logger
class Optimizer(BaseOptimizer):
def __init__(self, context):
super(Optimizer, self).__init__(context)
self.runnable = True
self.exiftool_path = self.context.config.EXIFTOOL_PATH
self.exif_fields_to_keep = self.context.config.EXIF_FIELDS_TO_KEEP
self.tinyrgb_path = self.context.config.EXIF_TINYRGB_PATH
self.tinyrgb_icc_replace = self.context.config.EXIF_TINYRGB_ICC_REPLACE
if not (os.path.isfile(self.exiftool_path)
and os.access(self.exiftool_path, os.X_OK)):
logger.error(
"ERROR exiftool path '{0}' is not accessible"
.format(self.exiftool_path)
)
self.runnable = False
if not (os.path.isfile(self.tinyrgb_path)
and os.access(self.tinyrgb_path, os.R_OK)):
logger.error(
"ERROR tinyrgb path '{0}' is not accessible"
.format(self.tinyrgb_path)
)
self.tinyrgb_path = False
def should_run(self, image_extension, buffer):
good_extension = 'jpg' in image_extension or 'jpeg' in image_extension
return good_extension and self.runnable
def optimize(self, buffer, input_file, output_file):
exif_fields = self.exif_fields_to_keep
# TinyRGB is a lightweight sRGB swap-in replacement created by Facebook
# If the image is sRGB, swap the existing heavy profile for TinyRGB
# Only works if icc_profile is configured to be preserved in
# EXIF_FIELDS_TO_KEEP
if (self.tinyrgb_path):
output = subprocess.check_output([
self.exiftool_path,
'-DeviceModelDesc',
'-S',
'-T',
input_file
])
logger.debug("[EXIFTOOL] exiftool output: " + output)
if (output.rstrip().lower() == self.tinyrgb_icc_replace.lower()):
new_icc = 'icc_profile<=%s' % (
self.tinyrgb_path
)
exif_fields = [
new_icc if i == 'icc_profile' else i for i in exif_fields
]
# Strip all EXIF fields except the ones we want to
# explicitely copy over
command = [
self.exiftool_path,
input_file,
'-all=',
'-tagsFromFile',
'@'
]
command += ['-{0}'.format(i) for i in exif_fields]
command += [
'-m',
'-o',
'-'
]
output = open(output_file, 'w')
subprocess.call(command, stdout=output)
| wikimedia/thumbor-exif-optimizer | wikimedia_thumbor_exif_optimizer/__init__.py | Python | mit | 3,157 | 0 |
from distutils.core import setup
import py2exe
setup(console=[
'../src/mouseup.py',
'../src/mousedown.py',
'../src/mouseclick.py',
'../src/mousemove.py',
'../src/keyboarddown.py',
'../src/keyboardkey.py',
'../src/keyboardtype.py',
'../src/keyboarddown.py',
'../src/keyboardup.py',
]) | nawarian/PHPBot | ext/pyautogui/bin/setup.py | Python | mit | 320 | 0.003125 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import six
import sys
from functools import wraps
from twisted.internet import defer
from twisted.trial import unittest
log = logging.getLogger(__name__)
class TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
self.disableLogToStdout()
def assertNotEmpty(self, collec):
self.assertNotEqual(len(collec), 0, msg="Collection unexpectedly empty")
@defer.inlineCallbacks
def assertInlineCallbacksRaises(self, exceptionClass, deferred, *args, **kwargs):
yield self.assertFailure(deferred(*args, **kwargs), exceptionClass)
def assertLengthEquals(self, collection, length):
self.assertEqual(len(collection), length, msg="Invalid lenght. Expecting: {}. Got: {}"
.format(length, len(collection)))
def enableLogging(self, level=logging.DEBUG):
self.rootLogger = logging.getLogger()
self.oldLoggingLevel = self.rootLogger.getEffectiveLevel()
self.rootLogger.setLevel(level)
self.streamHandler = logging.StreamHandler(sys.stdout)
self.streamHandler.setLevel(level)
# Complete format <date> - <module name> - <level> - <message>:
# '%(asctime)s - %(name)-40s - %(levelname)-7s - %(message)s'
formatter = logging.Formatter('%(name)-45s - %(levelname)-7s - %(message)s')
self.streamHandler.setFormatter(formatter)
self.rootLogger.addHandler(self.streamHandler)
# Simply write an empty string, in order to be sure the first line starts at the
# beginning of the line
sys.stdout.write("\n")
def disableLogging(self):
self.rootLogger.removeHandler(self.streamHandler)
self.rootLogger.setLevel(self.oldLoggingLevel)
@classmethod
def verboseLogging(cls, level=logging.DEBUG):
# Overwrite XTestCase.verboseLogging with deferred support
'''
I enable full logging for the given function or methods. This is extremely useful in order
to enable full log only for a simple test case during debugging, and don't display them
during normal execution.
Simply comment in or out the decorator in order to enable or disable the log display
Example:
.. code-block:: python
class TestClass(TxTestCase):
@TxTestCase.verboseLogging()
def testNormalTest(self):
...
@TxTestCase.verboseLogging()
@defer.inlineCallbacks
def testInlineCallbacksTest(self):
...
@TxTestCase.verboseLogging()
@patch("patch.this.object")
@defer.inlineCallbacks
def testWithPatchAndInlineCallbacksTest(self):
...
'''
def decorator(func):
@wraps(func)
def impl(*args, **kwargs):
# In order to reuse the enableLogging, we need a self to store some values in it,
# but we are in a classmethod (verboseLogging is a method decorator). I don't want
# to store the value in the class object, so I create a temporary object named self,
# used in order to execute the enableLogging method.
self = TestCase()
TestCase.enableLogging(self)
log.info("Log to stdout enabled")
try:
res = func(*args, **kwargs)
finally:
log.info("Log to stdout disabled")
TestCase.disableLogging(self)
return res
return impl
return decorator
def disableLogToStdout(self):
'''
I disable the output of the loggings to the console by filtering all logs out.
'''
root = logging.getLogger()
root.setLevel(logging.CRITICAL + 1)
#############################
# Python 3 compatibility
# See: http://pythonhosted.org/six/#unittest-assertions
def assertCountEqual(self, actual, expected, msg=None):
return six.assertCountEqual(actual, expected, msg=msg)
def assertRaisesRegex(sel, exception, regexp, callable, *args, **kwds):
return six.assertRaisesRegex(exception, regexp, callable, *args, **kwds)
def assertRegex(self, text, regex, msg=None):
return six.assertRegex(text, regex, msg=msg)
#############################
| Stibbons/Squirrel | backend/squirrel/common/unittest.py | Python | gpl-3.0 | 4,675 | 0.002781 |
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
from pywps import Process
from pywps.inout import LiteralInput, LiteralOutput
from pywps.inout.literaltypes import ValuesReference
class SimpleProcess(Process):
identifier = "simpleprocess"
def __init__(self):
self.add_input(LiteralInput())
class UltimateQuestion(Process):
def __init__(self):
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
title='Ultimate Question',
outputs=[LiteralOutput('outvalue', 'Output Value', data_type='string')])
@staticmethod
def _handler(request, response):
response.outputs['outvalue'].data = '42'
return response
class Greeter(Process):
def __init__(self):
super(Greeter, self).__init__(
self.greeter,
identifier='greeter',
title='Greeter',
inputs=[LiteralInput('name', 'Input name', data_type='string')],
outputs=[LiteralOutput('message', 'Output message', data_type='string')]
)
@staticmethod
def greeter(request, response):
name = request.inputs['name'][0].data
assert type(name) is text_type
response.outputs['message'].data = "Hello {}!".format(name)
return response
class InOut(Process):
def __init__(self):
super(InOut, self).__init__(
self.inout,
identifier='inout',
title='In and Out',
inputs=[
LiteralInput('string', 'String', data_type='string'),
LiteralInput('time', 'Time', data_type='time',
default='12:00:00'),
LiteralInput('ref_value', 'Referenced Value', data_type='string',
allowed_values=ValuesReference(reference="https://en.wikipedia.org/w/api.php?action=opensearch&search=scotland&format=json"), # noqa
default='Scotland',),
],
outputs=[
LiteralOutput('string', 'Output', data_type='string')
]
)
@staticmethod
def inout(request, response):
a_string = request.inputs['string'][0].data
response.outputs['string'].data = "".format(a_string)
return response
| bird-house/PyWPS | tests/processes/__init__.py | Python | mit | 2,519 | 0.001191 |
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import logging
import random
import threading
import time
import typing
from typing import Dict, Iterable, Optional, Union
try:
from collections.abc import KeysView
KeysView[None] # KeysView is only subscriptable in Python 3.9+
except TypeError:
# Deprecated since Python 3.9, thus only use as a fallback in older Python versions
from typing import KeysView
from google.cloud.pubsub_v1.subscriber._protocol import requests
if typing.TYPE_CHECKING: # pragma: NO COVER
from google.cloud.pubsub_v1.subscriber._protocol.streaming_pull_manager import (
StreamingPullManager,
)
_LOGGER = logging.getLogger(__name__)
_LEASE_WORKER_NAME = "Thread-LeaseMaintainer"
class _LeasedMessage(typing.NamedTuple):
sent_time: float
"""The local time when ACK ID was initially leased in seconds since the epoch."""
size: int
ordering_key: Optional[str]
class Leaser(object):
def __init__(self, manager: "StreamingPullManager"):
self._thread: Optional[threading.Thread] = None
self._manager = manager
# a lock used for start/stop operations, protecting the _thread attribute
self._operational_lock = threading.Lock()
# A lock ensuring that add/remove operations are atomic and cannot be
# intertwined. Protects the _leased_messages and _bytes attributes.
self._add_remove_lock = threading.Lock()
# Dict of ack_id -> _LeasedMessage
self._leased_messages: Dict[str, _LeasedMessage] = {}
self._bytes = 0
"""The total number of bytes consumed by leased messages."""
self._stop_event = threading.Event()
@property
def message_count(self) -> int:
"""The number of leased messages."""
return len(self._leased_messages)
@property
def ack_ids(self) -> KeysView[str]:
"""The ack IDs of all leased messages."""
return self._leased_messages.keys()
@property
def bytes(self) -> int:
"""The total size, in bytes, of all leased messages."""
return self._bytes
def add(self, items: Iterable[requests.LeaseRequest]) -> None:
"""Add messages to be managed by the leaser."""
with self._add_remove_lock:
for item in items:
# Add the ack ID to the set of managed ack IDs, and increment
# the size counter.
if item.ack_id not in self._leased_messages:
self._leased_messages[item.ack_id] = _LeasedMessage(
sent_time=float("inf"),
size=item.byte_size,
ordering_key=item.ordering_key,
)
self._bytes += item.byte_size
else:
_LOGGER.debug("Message %s is already lease managed", item.ack_id)
def start_lease_expiry_timer(self, ack_ids: Iterable[str]) -> None:
"""Start the lease expiry timer for `items`.
Args:
items: Sequence of ack-ids for which to start lease expiry timers.
"""
with self._add_remove_lock:
for ack_id in ack_ids:
lease_info = self._leased_messages.get(ack_id)
# Lease info might not exist for this ack_id because it has already
# been removed by remove().
if lease_info:
self._leased_messages[ack_id] = lease_info._replace(
sent_time=time.time()
)
def remove(
self,
items: Iterable[
Union[requests.AckRequest, requests.DropRequest, requests.NackRequest]
],
) -> None:
"""Remove messages from lease management."""
with self._add_remove_lock:
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug("Item %s was not managed.", item.ack_id)
if self._bytes < 0:
_LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes)
self._bytes = 0
def maintain_leases(self) -> None:
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
# Also update the deadline currently used if enough new ACK data has been
# gathered since the last deadline update.
deadline = self._manager._obtain_ack_deadline(maybe_update=True)
_LOGGER.debug("The current deadline value is %d seconds.", deadline)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are beyond the max lease time. This ensures
# that in the event of a badly behaving actor, we can drop messages
# and allow the Pub/Sub server to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size, item.ordering_key)
for ack_id, item in leased_messages.items()
if item.sent_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
assert self._manager.dispatcher is not None
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a modack request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
assert self._manager.dispatcher is not None
ack_id_gen = (ack_id for ack_id in ack_ids)
self._manager._send_lease_modacks(ack_id_gen, deadline)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, deadline * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
def start(self) -> None:
with self._operational_lock:
if self._thread is not None:
raise ValueError("Leaser is already running.")
# Create and start the helper thread.
self._stop_event.clear()
thread = threading.Thread(
name=_LEASE_WORKER_NAME, target=self.maintain_leases
)
thread.daemon = True
thread.start()
_LOGGER.debug("Started helper thread %s", thread.name)
self._thread = thread
def stop(self) -> None:
with self._operational_lock:
self._stop_event.set()
if self._thread is not None:
# The thread should automatically exit when the consumer is
# inactive.
self._thread.join()
self._thread = None
| googleapis/python-pubsub | google/cloud/pubsub_v1/subscriber/_protocol/leaser.py | Python | apache-2.0 | 9,379 | 0.001279 |
#!/usr/bin/python2.4
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import common
import os
import re
import socket # for gethostname
import xml.dom
import xml_fix
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.doc = None
def Create(self, name):
"""Creates the user file document.
Args:
name: Name of the user file.
"""
self.name = name
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioUserFile', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
def _AddConfigToNode(self, parent, config_type, config_name):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
"""
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
parent.appendChild(n_config)
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name)
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
n_cmd = self.doc.createElement('DebugSettings')
abs_command = _FindCommandInPath(command[0])
n_cmd.setAttribute('Command', abs_command)
n_cmd.setAttribute('WorkingDirectory', working_directory)
n_cmd.setAttribute('CommandArguments', " ".join(command[1:]))
n_cmd.setAttribute('RemoteMachine', socket.gethostname())
if environment and isinstance(environment, dict):
n_cmd.setAttribute('Environment',
" ".join(['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]))
else:
n_cmd.setAttribute('Environment', '')
n_cmd.setAttribute('EnvironmentMerge', 'true')
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
n_cmd.setAttribute('Attach', 'false')
n_cmd.setAttribute('DebuggerType', '3') # 'auto' debugger
n_cmd.setAttribute('Remote', '1')
n_cmd.setAttribute('RemoteCommand', '')
n_cmd.setAttribute('HttpUrl', '')
n_cmd.setAttribute('PDBPath', '')
n_cmd.setAttribute('SQLDebugging', '')
n_cmd.setAttribute('DebuggerFlavor', '0')
n_cmd.setAttribute('MPIRunCommand', '')
n_cmd.setAttribute('MPIRunArguments', '')
n_cmd.setAttribute('MPIRunWorkingDirectory', '')
n_cmd.setAttribute('ApplicationCommand', '')
n_cmd.setAttribute('ApplicationArguments', '')
n_cmd.setAttribute('ShimCommand', '')
n_cmd.setAttribute('MPIAcceptMode', '')
n_cmd.setAttribute('MPIAcceptFilter', '')
# Find the config, and add it if it doesn't exist.
found = False
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
found = True
if not found:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
config.appendChild(n_cmd)
break
def Write(self, writer=common.WriteOnDiff):
"""Writes the user file."""
f = writer(self.user_file_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
#------------------------------------------------------------------------------
| nawawi/wkhtmltopdf | webkit/Source/ThirdParty/gyp/pylib/gyp/MSVSUserFile.py | Python | lgpl-3.0 | 6,250 | 0.0056 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app # type: ignore
from absl import flags
import os # type: ignore
import time
from typing import Callable
import flax # type: ignore
from flax import linen as nn
from flax.training import common_utils # type: ignore
import jax # type: ignore
from jax import lax
from jax import numpy as jnp
from jax.experimental.jax2tf.examples import saved_model_lib # type: ignore
import numpy as np # type: ignore
import tensorflow as tf # type: ignore
from tensorflowjs.converters import convert_tf_saved_model # type: ignore
from jax.config import config # type: ignore
config.config_with_absl()
import utils
flags.DEFINE_boolean("run_eval_on_train", False,
("Also run eval on the train set after each epoch. This "
"slows down training considerably."))
flags.DEFINE_integer("num_epochs", 5,
("Number of epochs to train for."))
flags.DEFINE_integer("num_classes", 100, "Number of classification classes.")
flags.register_validator("num_classes",
lambda value: value >= 1 and value <= 100,
message="--num_classes must be in range [1, 100]")
FLAGS = flags.FLAGS
# The code below is an adaptation for Flax from the work published here:
# https://blog.tensorflow.org/2018/07/train-model-in-tfkeras-with-colab-and-run-in-browser-tensorflowjs.html
class QuickDrawModule(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.Conv(features=16, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=32, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3), padding='SAME')(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=128)(x)
x = nn.relu(x)
x = nn.Dense(features=FLAGS.num_classes)(x)
x = nn.softmax(x)
return x
def predict(params, inputs):
"""A functional interface to the trained Module."""
return QuickDrawModule().apply({'params': params}, inputs)
def categorical_cross_entropy_loss(logits, labels):
onehot_labels = common_utils.onehot(labels, logits.shape[-1])
return jnp.mean(-jnp.sum(onehot_labels * jnp.log(logits), axis=1))
def update(optimizer, inputs, labels):
def loss_fn(params):
logits = predict(params, inputs)
return categorical_cross_entropy_loss(logits, labels)
grad = jax.grad(loss_fn)(optimizer.target)
optimizer = optimizer.apply_gradient(grad)
return optimizer
def accuracy(predict: Callable, params, dataset):
def top_k_classes(x, k):
bcast_idxs = jnp.broadcast_to(np.arange(x.shape[-1]), x.shape)
sorted_vals, sorted_idxs = lax.sort_key_val(x, bcast_idxs)
topk_idxs = (
lax.slice_in_dim(sorted_idxs, -k, sorted_idxs.shape[-1], axis=-1))
return topk_idxs
def _per_batch(inputs, labels):
logits = predict(params, inputs)
predicted_classes = top_k_classes(logits, 1)
predicted_classes = predicted_classes.reshape((predicted_classes.shape[0],))
return jnp.mean(predicted_classes == labels)
batched = [_per_batch(inputs, labels) for inputs, labels in dataset]
return jnp.mean(jnp.stack(batched))
def train_one_epoch(optimizer, train_ds):
for inputs, labels in train_ds:
optimizer = jax.jit(update)(optimizer, inputs, labels)
return optimizer
def init_model():
rng = jax.random.PRNGKey(0)
init_shape = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = QuickDrawModule().init(rng, init_shape)["params"]
optimizer = flax.optim.Adam(
learning_rate=0.001, beta1=0.9, beta2=0.999).create(initial_params)
return optimizer, initial_params
def train(train_ds, test_ds, classes):
optimizer, params = init_model()
for epoch in range(1, FLAGS.num_epochs+1):
start_time = time.time()
optimizer = train_one_epoch(optimizer, train_ds)
if FLAGS.run_eval_on_train:
train_acc = accuracy(predict, optimizer.target, train_ds)
print("Training set accuracy {}".format(train_acc))
test_acc = accuracy(predict, optimizer.target, test_ds)
print("Test set accuracy {}".format(test_acc))
epoch_time = time.time() - start_time
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
return optimizer.target
def main(*args):
base_model_path = "/tmp/jax2tf/tf_js_quickdraw"
dataset_path = os.path.join(base_model_path, "data")
num_classes = FLAGS.num_classes
classes = utils.download_dataset(dataset_path, num_classes)
assert len(classes) == num_classes, classes
print(f"Classes are: {classes}")
print("Loading dataset into memory...")
train_ds, test_ds = utils.load_classes(dataset_path, classes)
print(f"Starting training for {FLAGS.num_epochs} epochs...")
flax_params = train(train_ds, test_ds, classes)
model_dir = os.path.join(base_model_path, "saved_models")
# the model must be converted with with_gradient set to True to be able to
# convert the saved model to TF.js, as "PreventGradient" is not supported
saved_model_lib.convert_and_save_model(predict, flax_params, model_dir,
input_signatures=[tf.TensorSpec([1, 28, 28, 1])],
with_gradient=True, compile_model=False,
enable_xla=False)
conversion_dir = os.path.join(base_model_path, 'tfjs_models')
convert_tf_saved_model(model_dir, conversion_dir)
if __name__ == "__main__":
app.run(main)
| google/jax | jax/experimental/jax2tf/examples/tf_js/quickdraw/quickdraw.py | Python | apache-2.0 | 6,160 | 0.011364 |
from autosar.base import splitRef
from autosar.element import Element
import sys
class SystemSignal(Element):
def __init__(self,name,dataTypeRef,initValueRef,length,desc=None,parent=None):
super().__init__(name,parent)
self.dataTypeRef=dataTypeRef
self.initValueRef=initValueRef
self.length=length
self.desc=desc
self.parent=parent
def asdict(self):
data={'type': self.__class__.__name__,'name':self.name,
'dataTypeRef': self.dataTypeRef,
'initValueRef': self.initValueRef,
'length': self.length
}
if self.desc is not None: data['desc']=self.desc
return data
class SystemSignalGroup(Element):
def __init__(self, name, systemSignalRefs=None,parent=None):
super().__init__(name,parent)
if isinstance(systemSignalRefs,list):
self.systemSignalRefs=systemSignalRefs
else:
self.systemSignalRefs=[]
| cogu/autosar | autosar/signal.py | Python | mit | 1,012 | 0.024704 |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "S20Control",
version = "0.1",
author = "Glen Pitt-Pladdy / Guy Sheffer",
author_email = "glenpp@users.noreply.github.com",
description = ("Python management utility for Orvibo S20 WiFi Plug"),
license = "GNU",
keywords = "s20 orvibo orvibos20",
url = "https://github.com/glenpp/OrviboS20",
packages=['S20control'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Beta",
"Topic :: Utilities",
"License :: GNU License",
],
entry_points = {
'console_scripts': [
'S20control = S20control.S20control:main',
],
},
)
| glenpp/OrviboS20 | setup.py | Python | gpl-2.0 | 1,055 | 0.020853 |
from __future__ import unicode_literals
import uuid
from random import randint, random
from moto.core import BaseBackend
from moto.ec2 import ec2_backends
from copy import copy
class BaseObject(object):
def camelCase(self, key):
words = []
for i, word in enumerate(key.split('_')):
if i > 0:
words.append(word.title())
else:
words.append(word)
return ''.join(words)
def gen_response_object(self):
response_object = copy(self.__dict__)
for key, value in response_object.items():
if '_' in key:
response_object[self.camelCase(key)] = value
del response_object[key]
return response_object
@property
def response_object(self):
return self.gen_response_object()
class Cluster(BaseObject):
def __init__(self, cluster_name):
self.active_services_count = 0
self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format(cluster_name)
self.name = cluster_name
self.pending_tasks_count = 0
self.registered_container_instances_count = 0
self.running_tasks_count = 0
self.status = 'ACTIVE'
@property
def physical_resource_id(self):
return self.name
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['clusterArn'] = self.arn
response_object['clusterName'] = self.name
del response_object['arn'], response_object['name']
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random name if necessary
cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))),
)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if original_resource.name != properties['ClusterName']:
ecs_backend = ecs_backends[region_name]
ecs_backend.delete_cluster(original_resource.arn)
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random name if necessary
cluster_name=properties.get('ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))),
)
else:
# no-op when nothing changed between old and new resources
return original_resource
class TaskDefinition(BaseObject):
def __init__(self, family, revision, container_definitions, volumes=None):
self.family = family
self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(family, revision)
self.container_definitions = container_definitions
if volumes is None:
self.volumes = []
else:
self.volumes = volumes
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['taskDefinitionArn'] = response_object['arn']
del response_object['arn']
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6)))
container_definitions = properties['ContainerDefinitions']
volumes = properties['Volumes']
ecs_backend = ecs_backends[region_name]
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
family = properties.get('Family', 'task-definition-{0}'.format(int(random() * 10 ** 6)))
container_definitions = properties['ContainerDefinitions']
volumes = properties['Volumes']
if (original_resource.family != family or
original_resource.container_definitions != container_definitions or
original_resource.volumes != volumes
# currently TaskRoleArn isn't stored at TaskDefinition instances
):
ecs_backend = ecs_backends[region_name]
ecs_backend.deregister_task_definition(original_resource.arn)
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes)
else:
# no-op when nothing changed between old and new resources
return original_resource
class Task(BaseObject):
def __init__(self, cluster, task_definition, container_instance_arn, overrides={}, started_by=''):
self.cluster_arn = cluster.arn
self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(str(uuid.uuid1()))
self.container_instance_arn = container_instance_arn
self.last_status = 'RUNNING'
self.desired_status = 'RUNNING'
self.task_definition_arn = task_definition.arn
self.overrides = overrides
self.containers = []
self.started_by = started_by
self.stopped_reason = ''
@property
def response_object(self):
response_object = self.gen_response_object()
return response_object
class Service(BaseObject):
def __init__(self, cluster, service_name, task_definition, desired_count):
self.cluster_arn = cluster.arn
self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(service_name)
self.name = service_name
self.status = 'ACTIVE'
self.running_count = 0
self.task_definition = task_definition.arn
self.desired_count = desired_count
self.events = []
self.load_balancers = []
self.pending_count = 0
@property
def physical_resource_id(self):
return self.arn
@property
def response_object(self):
response_object = self.gen_response_object()
del response_object['name'], response_object['arn']
response_object['serviceName'] = self.name
response_object['serviceArn'] = self.arn
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if isinstance(properties['Cluster'], Cluster):
cluster = properties['Cluster'].name
else:
cluster = properties['Cluster']
if isinstance(properties['TaskDefinition'], TaskDefinition):
task_definition = properties['TaskDefinition'].family
else:
task_definition = properties['TaskDefinition']
service_name = '{0}Service{1}'.format(cluster, int(random() * 10 ** 6))
desired_count = properties['DesiredCount']
# TODO: LoadBalancers
# TODO: Role
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_service(
cluster, service_name, task_definition, desired_count)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if isinstance(properties['Cluster'], Cluster):
cluster_name = properties['Cluster'].name
else:
cluster_name = properties['Cluster']
if isinstance(properties['TaskDefinition'], TaskDefinition):
task_definition = properties['TaskDefinition'].family
else:
task_definition = properties['TaskDefinition']
desired_count = properties['DesiredCount']
ecs_backend = ecs_backends[region_name]
service_name = original_resource.name
if original_resource.cluster_arn != Cluster(cluster_name).arn:
# TODO: LoadBalancers
# TODO: Role
ecs_backend.delete_service(cluster_name, service_name)
new_service_name = '{0}Service{1}'.format(cluster_name, int(random() * 10 ** 6))
return ecs_backend.create_service(
cluster_name, new_service_name, task_definition, desired_count)
else:
return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count)
class ContainerInstance(BaseObject):
def __init__(self, ec2_instance_id):
self.ec2_instance_id = ec2_instance_id
self.status = 'ACTIVE'
self.registeredResources = []
self.agentConnected = True
self.containerInstanceArn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(str(uuid.uuid1()))
self.pendingTaskCount = 0
self.remainingResources = []
self.runningTaskCount = 0
self.versionInfo = {
'agentVersion': "1.0.0",
'agentHash': '4023248',
'dockerVersion': 'DockerVersion: 1.5.0'
}
@property
def response_object(self):
response_object = self.gen_response_object()
del response_object['name'], response_object['arn']
return response_object
class ContainerInstanceFailure(BaseObject):
def __init__(self, reason, container_instance_id):
self.reason = reason
self.arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(container_instance_id)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['reason'] = self.reason
response_object['arn'] = self.arn
return response_object
class EC2ContainerServiceBackend(BaseBackend):
def __init__(self):
self.clusters = {}
self.task_definitions = {}
self.tasks = {}
self.services = {}
self.container_instances = {}
def describe_task_definition(self, task_definition_str):
task_definition_components = task_definition_str.split(':')
if len(task_definition_components) == 2:
family, revision = task_definition_components
revision = int(revision)
else:
family = task_definition_components[0]
revision = -1
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
return self.task_definitions[family][revision - 1]
elif family in self.task_definitions and revision == -1:
return self.task_definitions[family][revision]
else:
raise Exception("{0} is not a task_definition".format(task_definition_str))
def create_cluster(self, cluster_name):
cluster = Cluster(cluster_name)
self.clusters[cluster_name] = cluster
return cluster
def list_clusters(self):
"""
maxSize and pagination not implemented
"""
return [cluster.arn for cluster in self.clusters.values()]
def describe_clusters(self, list_clusters_name=None):
list_clusters = []
if list_clusters_name is None:
if 'default' in self.clusters:
list_clusters.append(self.clusters['default'].response_object)
else:
for cluster in list_clusters_name:
cluster_name = cluster.split('/')[-1]
if cluster_name in self.clusters:
list_clusters.append(self.clusters[cluster_name].response_object)
else:
raise Exception("{0} is not a cluster".format(cluster_name))
return list_clusters
def delete_cluster(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
return self.clusters.pop(cluster_name)
else:
raise Exception("{0} is not a cluster".format(cluster_name))
def register_task_definition(self, family, container_definitions, volumes):
if family in self.task_definitions:
revision = len(self.task_definitions[family]) + 1
else:
self.task_definitions[family] = []
revision = 1
task_definition = TaskDefinition(family, revision, container_definitions, volumes)
self.task_definitions[family].append(task_definition)
return task_definition
def list_task_definitions(self):
"""
Filtering not implemented
"""
task_arns = []
for task_definition_list in self.task_definitions.values():
task_arns.extend([task_definition.arn for task_definition in task_definition_list])
return task_arns
def describe_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split('/')[-1]
if ':' in task_definition_name:
family, revision = task_definition_name.split(':')
revision = int(revision)
else:
family = task_definition_name
revision = len(self.task_definitions.get(family, []))
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
return self.task_definitions[family][revision-1]
else:
raise Exception("{0} is not a task_definition".format(task_definition_name))
def deregister_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split('/')[-1]
family, revision = task_definition_name.split(':')
revision = int(revision)
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
return self.task_definitions[family].pop(revision - 1)
else:
raise Exception("{0} is not a task_definition".format(task_definition_name))
def run_task(self, cluster_str, task_definition_str, count, overrides, started_by):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
container_instances = list(self.container_instances.get(cluster_name, {}).keys())
if not container_instances:
raise Exception("No instances found in cluster {}".format(cluster_name))
for _ in range(count or 1):
container_instance_arn = self.container_instances[cluster_name][
container_instances[randint(0, len(container_instances) - 1)]
].containerInstanceArn
task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '')
tasks.append(task)
self.tasks[cluster_name][task.task_arn] = task
return tasks
def start_task(self, cluster_str, task_definition_str, container_instances, overrides, started_by):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
if not container_instances:
raise Exception("No container instance list provided")
container_instance_ids = [x.split('/')[-1] for x in container_instances]
for container_instance_id in container_instance_ids:
container_instance_arn = self.container_instances[cluster_name][
container_instance_id
].containerInstanceArn
task = Task(cluster, task_definition, container_instance_arn, overrides or {}, started_by or '')
tasks.append(task)
self.tasks[cluster_name][task.task_arn] = task
return tasks
def describe_tasks(self, cluster_str, tasks):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
if not tasks:
raise Exception("tasks cannot be empty")
response = []
for cluster, cluster_tasks in self.tasks.items():
for task_id, task in cluster_tasks.items():
if task_id in tasks or task.task_arn in tasks:
response.append(task)
return response
def list_tasks(self, cluster_str, container_instance, family, started_by, service_name, desiredStatus):
filtered_tasks = []
for cluster, tasks in self.tasks.items():
for arn, task in tasks.items():
filtered_tasks.append(task)
if cluster_str:
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
filtered_tasks = list(filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks))
if container_instance:
filtered_tasks = list(filter(lambda t: container_instance in t.container_instance_arn, filtered_tasks))
if started_by:
filtered_tasks = list(filter(lambda t: started_by == t.started_by, filtered_tasks))
return [t.task_arn for t in filtered_tasks]
def stop_task(self, cluster_str, task_str, reason):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not task_str:
raise Exception("A task ID or ARN is required")
task_id = task_str.split('/')[-1]
tasks = self.tasks.get(cluster_name, None)
if not tasks:
raise Exception("Cluster {} has no registered tasks".format(cluster_name))
for task in tasks.keys():
if task.endswith(task_id):
tasks[task].last_status = 'STOPPED'
tasks[task].desired_status = 'STOPPED'
tasks[task].stopped_reason = reason
return tasks[task]
raise Exception("Could not find task {} on cluster {}".format(task_str, cluster_name))
def create_service(self, cluster_str, service_name, task_definition_str, desired_count):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
desired_count = desired_count if desired_count is not None else 0
service = Service(cluster, service_name, task_definition, desired_count)
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
self.services[cluster_service_pair] = service
return service
def list_services(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
service_arns = []
for key, value in self.services.items():
if cluster_name + ':' in key:
service_arns.append(self.services[key].arn)
return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns):
cluster_name = cluster_str.split('/')[-1]
result = []
for existing_service_name, existing_service_obj in sorted(self.services.items()):
for requested_name_or_arn in service_names_or_arns:
cluster_service_pair = '{0}:{1}'.format(cluster_name, requested_name_or_arn)
if cluster_service_pair == existing_service_name or existing_service_obj.arn == requested_name_or_arn:
result.append(existing_service_obj)
return result
def update_service(self, cluster_str, service_name, task_definition_str, desired_count):
cluster_name = cluster_str.split('/')[-1]
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
if cluster_service_pair in self.services:
if task_definition_str is not None:
task_definition = self.describe_task_definition(task_definition_str)
self.services[cluster_service_pair].task_definition = task_definition_str
if desired_count is not None:
self.services[cluster_service_pair].desired_count = desired_count
return self.services[cluster_service_pair]
else:
raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name))
def delete_service(self, cluster_name, service_name):
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
if cluster_service_pair in self.services:
service = self.services[cluster_service_pair]
if service.desired_count > 0:
raise Exception("Service must have desiredCount=0")
else:
return self.services.pop(cluster_service_pair)
else:
raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name))
def register_container_instance(self, cluster_str, ec2_instance_id):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance = ContainerInstance(ec2_instance_id)
if not self.container_instances.get(cluster_name):
self.container_instances[cluster_name] = {}
container_instance_id = container_instance.containerInstanceArn.split('/')[-1]
self.container_instances[cluster_name][container_instance_id] = container_instance
return container_instance
def list_container_instances(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
container_instances_values = self.container_instances.get(cluster_name, {}).values()
container_instances = [ci.containerInstanceArn for ci in container_instances_values]
return sorted(container_instances)
def describe_container_instances(self, cluster_str, list_container_instance_ids):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
failures = []
container_instance_objects = []
for container_instance_id in list_container_instance_ids:
container_instance = self.container_instances[cluster_name].get(container_instance_id, None)
if container_instance is not None:
container_instance_objects.append(container_instance)
else:
failures.append(ContainerInstanceFailure('MISSING', container_instance_id))
return container_instance_objects, failures
def deregister_container_instance(self, cluster_str, container_instance_str):
pass
ecs_backends = {}
for region, ec2_backend in ec2_backends.items():
ecs_backends[region] = EC2ContainerServiceBackend()
| silveregg/moto | moto/ecs/models.py | Python | apache-2.0 | 23,919 | 0.002843 |
"""
The `~certbot_dns_nsone.dns_nsone` plugin automates the process of completing
a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and subsequently
removing, TXT records using the NS1 API.
.. note::
The plugin is not installed by default. It can be installed by heading to
`certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and
selecting the Wildcard tab.
Named Arguments
---------------
======================================== =====================================
``--dns-nsone-credentials`` NS1 credentials_ INI file.
(Required)
``--dns-nsone-propagation-seconds`` The number of seconds to wait for DNS
to propagate before asking the ACME
server to verify the DNS record.
(Default: 30)
======================================== =====================================
Credentials
-----------
Use of this plugin requires a configuration file containing NS1 API credentials,
obtained from your NS1
`account page <https://my.nsone.net/#/account/settings>`_.
.. code-block:: ini
:name: credentials.ini
:caption: Example credentials file:
# NS1 API credentials used by Certbot
dns_nsone_api_key = MDAwMDAwMDAwMDAwMDAw
The path to this file can be provided interactively or using the
``--dns-nsone-credentials`` command-line argument. Certbot records the path
to this file for use during renewal, but does not store the file's contents.
.. caution::
You should protect these API credentials as you would the password to your
NS1 account. Users who can read this file can use these credentials to issue
arbitrary API calls on your behalf. Users who can cause Certbot to run using
these credentials can complete a ``dns-01`` challenge to acquire new
certificates or revoke existing certificates for associated domains, even if
those domains aren't being managed by this server.
Certbot will emit a warning if it detects that the credentials file can be
accessed by other users on your system. The warning reads "Unsafe permissions
on credentials configuration file", followed by the path to the credentials
file. This warning will be emitted each time Certbot uses the credentials file,
including for renewal, and cannot be silenced except by addressing the issue
(e.g., by using a command like ``chmod 600`` to restrict access to the file).
Examples
--------
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``
certbot certonly \\
--dns-nsone \\
--dns-nsone-credentials ~/.secrets/certbot/nsone.ini \\
-d example.com
.. code-block:: bash
:caption: To acquire a single certificate for both ``example.com`` and
``www.example.com``
certbot certonly \\
--dns-nsone \\
--dns-nsone-credentials ~/.secrets/certbot/nsone.ini \\
-d example.com \\
-d www.example.com
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``, waiting 60 seconds
for DNS propagation
certbot certonly \\
--dns-nsone \\
--dns-nsone-credentials ~/.secrets/certbot/nsone.ini \\
--dns-nsone-propagation-seconds 60 \\
-d example.com
"""
| letsencrypt/letsencrypt | certbot-dns-nsone/certbot_dns_nsone/__init__.py | Python | apache-2.0 | 3,339 | 0.000599 |
__author__ = 'anthony <>'
from collections import OrderedDict
from django import forms
class FormOrderMixin(object):
def order_fields(self, field_order):
"""
Rearranges the fields according to field_order.
field_order is a list of field names specifying the order. Fields not
included in the list are appended in the default order for backward
compatibility with subclasses not overriding field_order. If field_order
is None, all fields are kept in the order defined in the class.
Unknown fields in field_order are ignored to allow disabling fields in
form subclasses without redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def get_form_field_no_validation(fieldname):
class FieldNoValidation(fieldname):
def clean(self, value):
return value
return FieldNoValidation
class Icons(object):
icons = {}
| unicefuganda/uSurvey | survey/forms/form_helper.py | Python | bsd-3-clause | 1,258 | 0.00159 |
"""
vUSBf: A KVM/QEMU based USB-fuzzing framework.
Copyright (C) 2015 Sergej Schumilo, OpenSource Security Ralf Spenneberg
This file is part of vUSBf.
See the file LICENSE for copying permission.
"""
__author__ = 'Sergej Schumilo'
from scapy.all import *
#####################################
####### SCAPY EXTENSION STUFF #######
#####################################
# XLEShortField
class XLEShortField(LEShortField, XShortField):
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x)
# XLEIntField
class XLEIntField(LEIntField, XIntField):
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x)
####################################
####### REDIR SPECIFIC STUFF #######
####################################
usbredir_type_enum = { # CONTROL PACKETS
0: "hello",
1: "device_connect",
2: "device_disconnect",
3: "reset",
4: "interface_info",
5: "ep_info",
6: "set_configuration",
7: "get_configuration",
8: "configuration_status",
9: "set_alt_setting",
10: "get_alt_setting",
11: "alt_setting_status",
12: "start_iso_stream",
13: "stop_iso_stream",
14: "iso_stream_status",
15: "start_interrupt_receiving",
16: "stop_interrupt_receiving",
17: "interrupt_receiving_status",
18: "alloc_bulk_streams",
19: "free_bulk_streams",
20: "bulk_streams_status",
21: "cancel_data_packet",
22: "filter_reject",
23: "filter_filter",
24: "device_disconnect_ack", # DATA PACKETS
100: "data_control_packet",
101: "data_bulk_packet",
102: "data_iso_packet",
103: "data_interrupt_packet"}
# DO NOT FUZZ THE FOLLOWING REDIR SPECIFIC PACKAGES! FUZZING WILL CAUSE IN QEMU CRASH!
class usbredirheader(Packet):
name = "UsbredirPacket"
fields_desc = [LEIntEnumField("Htype", -1, usbredir_type_enum),
LEIntField("HLength", 0),
LEIntField("Hid", -1)]
# Redir Packet No. 0 (redir hello)
class hello_redir_header(Packet):
name = "Hello_Packet"
fields_desc = [StrLenField("version", "", length_from=64), # StrLenField("caps", "", length_from=4)]
LEIntField("capabilites", 1)]
class hello_redir_header_host(Packet):
name = "Hello_Packet_Host"
fields_desc = [StrLenField("version", "", length_from=56)]
# Redir Packet No. 1 (redir connect)
class connect_redir_header(Packet):
name = "Connect_Packet"
fields_desc = [ByteField("speed", 0),
XByteField("device_class", 0),
XByteField("device_subclass", 0),
XByteField("device_protocol", 0),
XLEShortField("vendor_id", 0),
XLEShortField("product_id", 0),
XLEShortField("device_version_bcd", 0)]
# Redir Packet No. 4 (interface info) [SIZE 132 BYTES]
class if_info_redir_header(Packet):
name = "Interface Info Packet"
fields_desc = [LEIntField("interface_count", None),
FieldListField("interface", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_class", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_subclass", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface_protocol", None, ByteField("Value", 0), length_from=lambda p: 32)]
# Redir Packet No. 5 (endpoint info) [SIZE 160 BYTES]
class ep_info_redir_header(Packet):
name = "Endpoint Info Packet"
fields_desc = [FieldListField("ep_type", None, ByteEnumField("type_value", 0, {0: "type_control",
1: "type_iso",
2: "type interrupt",
255: "type invalid", })
, length_from=lambda p: 32),
FieldListField("interval", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("interface", None, ByteField("Value", 0), length_from=lambda p: 32),
FieldListField("max_packet_size", None, XLEShortField("Value", 0), length_from=lambda p: 32 * 2)]
# Redir Packet No. 100 (data control) [SIZE 10 BYTES]
class data_control_redir_header(Packet):
name = "Data_Control_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("request", 0),
ByteField("requesttype", 0),
ByteField("status", 0),
XLEShortField("value", 0),
LEShortField("index", 0),
LEShortField("length", 0)]
# Redir Packet No. 101 (data bulk) [SIZE 8 BYTES]
class data_bulk_redir_header(Packet):
name = "Data_Bulk_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", None),
LEIntField("stream_id", None),
LEShortField("length_high", None)]
# Redir Packet No. 102 (data iso) [SIZE 4 BYTES]
class data_iso_redir_header(Packet):
name = "Data_Iso_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", 0)]
# Redir Packet No. 103 (data interrupt) [SIZE 4 BYTES]
class data_interrupt_redir_header(Packet):
name = "Data_Interrupt_Packet"
fields_desc = [ByteField("endpoint", 0),
ByteField("status", 0),
LEShortField("length", 0)]
redir_specific_type = [[0, hello_redir_header],
[1, connect_redir_header],
[100, data_control_redir_header],
[101, data_bulk_redir_header],
[102, data_iso_redir_header],
[103, data_interrupt_redir_header]]
##################################
####### USB SPECIFIC STUFF #######
####### ENUMARATION PHASE #######
##################################
# USB Header (URB - replaced by usbredirheader)
class usb_header(Packet):
name = "USB_Packet"
fields_desc = [XLongField("id", 0xffff88003720d540),
ByteField("type", 43),
ByteField("transfer type", 2),
ByteField("endpoint", 80),
ByteField("device", 0),
LEShortField("bus_id", 0),
ByteField("device_setup_request", 0),
ByteField("data_present", 0),
LELongField("urb_sec", 0),
LEIntField("urb_usec", 0),
LEIntField("urb_status", 0),
LEIntField("urb_length", 0),
LEIntField("data_length", 0)]
# Generic USB Descriptor Header
class usb_generic_descriptor_header(Packet):
name = "USB_GENERIC_DESCRIPTOR_HEADER"
fields_desc = [ByteField("bLength", 0),
XByteField("bDescriptorType", 0x1)]
# USB Device Descriptor Packet (DescriptorType 0x01)
class usb_device_descriptor(Packet):
name = "USB_Device_Descriptor"
fields_desc = [ByteField("bLength", 18),
XByteField("bDescriptorType", 0x01),
XLEShortField("bcdUSB", 0x0),
XByteField("bDeviceClass", 0x1),
ByteField("bDeviceSubClass", 0),
ByteField("bDeviceProtocol", 0),
ByteField("bMaxPacketSize", 0),
XLEShortField("isVendor", 0x0),
XLEShortField("idProduct", 0x0),
XLEShortField("bcdDevice", 0x0),
ByteField("iManufacturer", 0),
ByteField("iProduct", 0),
ByteField("iSerialNumber", 0),
ByteField("bNumConfigurations", 1)]
# USB Configuration Descriptor
class usb_configuration_descriptor(Packet):
name = "USB_Configuration_Descriptor"
fields_desc = [ByteField("bLength", 9), # Size of Descriptor in Bytes
XByteField("bDescriptorType", 0x02), # Configuration Descriptor (0x02)
XLEShortField("wTotalLength", 0), # Total length in bytes of data returned
ByteField("bNumInterfaces", None), # Number of Interfaces
ByteField("bConfigurationValue", None), # Value to use as an argument to select this configuration
ByteField("iConfiguration", None), # Index of String Descriptor describing this configuration
FlagsField("bmAttributes", 0b11100000, 8, [
"Reserved_D0", # Reserved Bit
"Reserved_D1", # Reserved Bit
"Reserved_D2", # Reserved Bit
"Reserved_D3", # Reserved Bit
"Reserved_D4", # Reserved Bit
"Remote_Wakeup", # D5 Remote Wakeup
"Self_Powered", # D6 Self Powered
"Reserved_D7", # D7 Reserved: Must be 1 for USB1.1 and higher
]),
ByteField("bMaxPower", None) # Maximum Power consumption in 2mA units
]
# USB Interface_Descriptor
class usb_interface_descriptor(Packet):
name = "USB_Interface_Descriptor"
fields_desc = [ByteField("bLength", 9), # Size of Descriptor in Bytes (9 Bytes)
XByteField("bDescriptorType", 0x04), # Configuration Descriptor (0x04)
ByteField("bInterfaceNumber", None), # Number of Interface
ByteField("bAlternateSetting", None), # Value used to select alternative setting
ByteField("bNumEndpoints", None), # Number of Endpoints used for this interface
XByteField("bInterfaceClass", None), # Class Code [0x08: MASSSTORAGE, ...]
XByteField("bInterfaceSubClass", None), # Subclass Code
XByteField("bInterfaceProtocol", None), # Protocol Code
ByteField("iInterface", None) # Index of String Descriptor describing this interface
]
# USB Endpoint Descriptors
class usb_endpoint_descriptor(Packet):
name = "USB_Endpoint_Descriptor"
fields_desc = [ByteField("bLength", 7), # Size of Descriptor in Bytes (7 Bytes)
XByteField("bDescriptorType", 0x05), # Configuration Descriptor (0x05)
XByteField("bEndpointAddress", None), # Endpoint Adress TODO!
XByteField("bmAttribut", None), # TODO
LEShortField("wMaxPacketSize", None),
# Maximum Packet Size this endpoint is cabable of sending or recving
ByteField("bInterval", None) # Interval for polling endpoint data transfer. Value in frame counts
]
class usb_string_descriptor_langid(Packet):
name = "USB_String_Descriptor_LangID"
fields_desc = [ByteField("bLength", 0),
ByteField("bDescriptorType", 0),
FieldListField("wLANGID", 0x00, XLEShortField("Value", 1), count_from=lambda p: p.bLength)
]
class usb_string_descriptor(Packet):
name = "USB_String_Descriptor"
fields_desc = [ByteField("bLength", 0),
ByteField("bDescriptorType", 0),
FieldListField("UnicodeData", 0x00, XLEShortField("Char", 1), count_from=lambda p: p.bLength)
]
class usb_hid_descriptor(Packet):
name = "USB_HID_Descriptor"
fields_desc = [ByteField("bLength", 0x9),
ByteField("bDescriptorType", 0x21),
XLEShortField("bcdHID", 0x0),
ByteField("bCountryCode", 0x00),
ByteField("bNumDescriptors", 0x00), # WIEDERHOLT SICH IN RELATION ZUR ANZAHL DER DESCRIPTOREN
XByteField("bDescriptorType2", 0x22), # 0x22 REPORT DESCRIPTOR # 0x23 PYSICAL DESCRIPTOR
LEShortField("wDescriptorLength", 0x00)
]
class usb_hid_report_extension(Packet):
name = "USB_HID_Report_Extension"
fields_desc = [XByteField("bDescriptorType2", 0x22), # 0x22 REPORT DESCRIPTOR # 0x23 PYSICAL DESCRIPTOR
LEShortField("wDescriptorLength", 0x00)
]
class usb_hid_report_descriptor(Packet):
name = "USB_HID_Report_Descriptor"
fields_desc = []
descriptor_types = { 0x01: usb_device_descriptor,
0x02: usb_configuration_descriptor,
0x03: usb_string_descriptor,
0x04: usb_interface_descriptor,
0x05: usb_endpoint_descriptor,
0x09: usb_hid_descriptor
}
## PROTOTYPE FOR USB_HUB_DESCRIPTOR ##
##
## typedef struct _USB_HUB_DESCRIPTOR {
## UCHAR bDescriptorLength;
## UCHAR bDescriptorType;
## UCHAR bNumberOfPorts;
## USHORT wHubCharacteristics;
## UCHAR bPowerOnToPowerGood;
## UCHAR bHubControlCurrent;
## UCHAR bRemoveAndPowerMask[64];
## } USB_HUB_DESCRIPTOR, *PUSB_HUB_DESCRIPTOR;
##############################################
####### USB MASSSTORAGE SPECIFIC STUFF #######
###### SCSI #######
##############################################
# dCBWSignatur
dCBWSignature_magic_number = 0x43425355
#dCSWSignatur
dCSWSignature_magic_number = 0x53425355
# Command Generic Header
class massstorage_generic(Packet):
name = "Massstorage_Generic"
fields_desc = [ XLEIntField("dSignature", 0)]
# Command Block Wrapper (CBW) [SIZE: 12 Bytes]
class massstorage_cbw(Packet):
name = "Massstorage_CBW"
fields_desc = [ XLEIntField("dCBWSignature", 0),
IntField("dCBWTag", None),
XLEIntField("dCBWDataTransferLength", None),
ByteField("bmCBWFlags", None),
ByteField("bCBWLUN", None),
ByteField("bCBWCBLength", None)
]
# Command Status Wrapper (CSW)
class massstorage_csw(Packet):
name = "Massstorage_CSW"
fields_desc = [ XLEIntField("dCSWSignature", 0),
IntField("dCSWTag", None),
XLEIntField("dCSWDataResidue", None),
ByteField("bCSWStatus", None)
]
###################################
####### SCSI SPECIFIC STUFF #######
###################################
# SCSI_INQUIRY STRING LENGTH
SCSI_INQUIRY_VENDOR_ID_LENGTH = 8
SCSI_INQUIRY_PRODUCT_ID_LENGTH = 16
SCSI_INQUIRY_PRODUCT_REVISION_LEVEL_LENGTH = 4
# INQUIRY SCSI (SIZE: 36 Bytes)
class scsi_inquiry(Packet):
name = "SCSI_Inquiry"
fields_desc = [ ByteField("peripheral", None),
ByteField("RMB", None),
ByteField("version", None),
ByteField("?", None),
ByteField("additional_length", None),
ByteField("??", None),
ByteField("???", None),
ByteField("????", None),
StrFixedLenField("vendor_id", None, SCSI_INQUIRY_VENDOR_ID_LENGTH),
StrFixedLenField("product_id", None, SCSI_INQUIRY_PRODUCT_ID_LENGTH),
StrFixedLenField("product_revision_level", None, SCSI_INQUIRY_PRODUCT_REVISION_LEVEL_LENGTH)
]
# Raw INQUIRY SCSI
class scsi_raw_inquiry(Packet):
name = "SCSI_Raw_Inquiry"
fields_desc = [ ByteField("peripheral", None),
ByteField("RMB", None),
ByteField("version", None),
ByteField("?", None),
ByteField("additional_length", None),
ByteField("??", None),
ByteField("???", None),
ByteField("????", None),
#PAYLOAD VENDOR ID[8] PRODUCT ID[16] PRODUCT REV[4]
]
# READ CAPICITY SCSI
#class scsi_read_capicity(Packet):
# name = "SCSI_READ_CAPICITY"
# fields_desc = [ ByteField("opcode", 0x25),
# ByteField("reserved", None),
# XLEIntField("logical_block_adress", None),
# ShortField("reserverd", None),
# ByteField("reserverd", None),
# XByteField("control", None)
# ]
# READ CAPICITY SCSI RESONSE
class scsi_read_capicity(Packet):
name = "SCSI_READ_CAPICITY_RESPONSE"
fields_desc = [ XLEIntField("returned_logic_block_addr", None),
XLEIntField("block_length", None) ]
# MODE SELECT (6) SCSI RESPONSE
class scsi_mode_6(Packet):
name = "SCSI_MODE_SELECT_(6)_RESPONSE"
fields_desc = [ ByteField("mode_data_length", None),
ByteField("medium_field", None),
ByteField("dev-specific_parameter", None),
ByteField("block_desc_length", None) ]
# SCSI COMMAND LIST [OPCODE, NAME, SCAPYNAME]
SCSI_COMMAND_LIST = [ ['\x04', "FORMAT UNIT", None],
['\x12', "INQUIRY", scsi_inquiry],
['\x15', "MODE SELECT (6)", scsi_mode_6],
['\x55', "MODE SELECT (10)", None],
['\x1a', "MODE SENSE (6)", scsi_mode_6],
['\x5a', "MODE SENSE (10)", None],
['\x1e', "PREVENT ALLOW MEDIUM REMOVAL", None],
['\x08', "READ (6)", None],
['\x28', "READ (10)", None],
['\xa8', "READ (12)", None],
['\x25', "READ CAPACITY (10)", scsi_read_capicity],
['\x23', "READ FORMAT CAPACITY", None],
['\x43', "READ TOC/PMA/ATIP", None],
['\xa0', "REPORT LUNS", None],
['\x03', "REQUEST SENSE", None],
['\x1d', "SEND DIAGNOSITC", None],
['\x1b', "START STOP UNIT", None],
['\x35', "SYNCHRONIZE CACHE (10)", None],
['\x00', "TEST UNIT READY", None],
['\x2f', "VERIFY (10)", None],
['\x0a', "WRITE (6)", None],
['\x2a', "WRITE (10)", None],
['\xaa', "WRITE (12)", None]
]
| schumilo/vUSBf | usbscapy.py | Python | gpl-2.0 | 17,727 | 0.011677 |
from django.db.models import Model, CharField, URLField
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore import blocks
from wagtail.wagtailembeds.blocks import EmbedBlock
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.whitelist import attribute_rule, check_url, allow_without_attributes
class RenderInlineMixin(object):
def __init__(self):
pass
def render_inline(self):
template = get_template(self.ajax_template)
return template.render({
'self': self
})
class RelatedHowToMixin(object):
def __init__(self):
pass
def related_how_tos(self):
return [related_how_to_page.how_to_page
for related_how_to_page
in self.how_to_page.select_related().all()]
def related_how_to_theory_articles(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_theory_articles = []
for related_how_to in related_how_tos:
how_to_articles = related_how_to.theory_page_list()
related_articles = self.related_how_to_pages(how_to_articles, self_idx)
related_how_to_theory_articles.append({
'how_to': related_how_to,
'articles': related_articles
})
return related_how_to_theory_articles
def related_how_to_story_articles(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_story_articles = []
for related_how_to in related_how_tos:
how_to_articles = related_how_to.story_page_list()
related_articles = self.related_how_to_pages(how_to_articles, self_idx)
related_how_to_story_articles.append({
'how_to': related_how_to,
'articles': related_articles
})
return related_how_to_story_articles
def related_how_to_news_articles(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_news_articles = []
for related_how_to in related_how_tos:
how_to_articles = related_how_to.news_page_list()
related_articles = self.related_how_to_pages(how_to_articles, self_idx)
related_how_to_news_articles.append({
'how_to': related_how_to,
'articles': related_articles
})
return related_how_to_news_articles
def related_how_to_events(self, related_how_tos=None, self_idx=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
related_how_to_events = []
for related_how_to in related_how_tos:
how_to_events = related_how_to.event_page_list()
related_events = self.related_how_to_pages(how_to_events, self_idx)
related_how_to_events.append(related_events)
return related_how_to_events
def upcoming_related_event(self, related_how_tos=None):
if related_how_tos is None:
related_how_tos = self.related_how_tos()
how_to_event_lists = [how_to_page.event_page_list()
for how_to_page
in related_how_tos]
event_pages = []
for how_to_event_list in how_to_event_lists:
if len(how_to_event_list) > 0:
for how_to_event in how_to_event_list:
if how_to_event and how_to_event.is_upcoming:
event_pages.append(how_to_event)
if len(event_pages) > 0:
return sorted(event_pages, key=lambda event: event.start_date)[0]
return event_pages
@staticmethod
def related_how_to_pages(how_to_pages, self_idx=None):
previous_page_idx = 0
next_article_idx = 1
if self_idx:
for idx, page in enumerate(how_to_pages):
if page.id is self_idx:
self_idx = idx
previous_page_idx = self_idx - 1
next_article_idx = self_idx + 1
previous_page = None
next_page = None
if 0 <= previous_page_idx < len(how_to_pages):
previous_page = how_to_pages[previous_page_idx]
if 0 <= next_article_idx < len(how_to_pages):
next_page = how_to_pages[next_article_idx]
return (previous_page, next_page)
# Blocks
class PullQuoteBlock(blocks.StructBlock):
pull_quote = blocks.TextBlock(verbose_name=_('Pull quote'),
required=True,
rows=2)
attribution = blocks.CharBlock(verbose_name=_('Quote attribution to'),
help_text=_('The name of the person or organization that '
'the quote can be attributed to quote'),
required=False)
link = blocks.URLBlock(verbose_name=_('Link'),
help_text=_("Click quote to go to link."),
required=False)
class Meta:
template = 'articles/blocks/pullquote.html'
icon = 'openquote'
label = 'Pull Quote'
class FeaturedImageBlock(blocks.StructBlock):
image = ImageChooserBlock(required=True)
class Meta:
icon='image'
label=_('Image')
template='articles/blocks/featured_image.html'
help_text=_('The featured image is shown in the list-view and detail-view')
class FeaturedVideoBlock(blocks.StructBlock):
video = EmbedBlock(required=True)
class Meta:
icon='media'
label=_('Video')
template='articles/blocks/featured_video.html'
help_text=_('The featured video is only shown in the detail-view, make sure to also selecte a featured image')
class FeaturedAudioBlock(blocks.StructBlock):
audio = EmbedBlock(required=True)
class Meta:
icon='media'
label=_('Audio')
template='articles/blocks/featured_audio.html'
help_text=_('The featured audio is only shown in the detail-view, make sure to also selecte a featured image')
class PageFormat:
TEXT = ('text', _('Story'))
THEORY = ('theory', _('Theory'))
VIDEO = ('video', _('Video'))
AUDIO = ('audio', _('Audio'))
IMAGES = ('images', _('Photo report'))
EVENT = ('event', _('Activity'))
ORGANIZATION = ('organization', _('Practitioner'))
LINK = ('link', _('Link'))
DOCUMENT = ('document', _('Document'))
ALL = (
TEXT,
THEORY,
VIDEO,
AUDIO,
IMAGES,
EVENT,
ORGANIZATION,
LINK,
DOCUMENT
)
def __init__(self):
pass
| jeremy-c/unusualbusiness | unusualbusiness/utils/models.py | Python | bsd-3-clause | 7,002 | 0.003999 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
vivopump -- module of helper functions for the pump
"""
import sys
import csv
import string
import random
import logging
__author__ = "Michael Conlon"
__copyright__ = "Copyright (c) 2016 Michael Conlon"
__license__ = "New BSD license"
__version__ = "0.8.7"
logger = logging.getLogger(__name__)
class DefNotFoundException(Exception):
"""
Raise this exception when update definition fle is not found
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class InvalidDefException(Exception):
"""
Raise this exception when update definition contains values that can not be processed
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class InvalidSourceException(Exception):
"""
Raise this exception when update data contains values that can not be processed
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class PathLengthException(Exception):
"""
Raise this exception when update def has a path length greater than support
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class UnicodeCsvReader(object):
"""
From http://stackoverflow.com/questions/1846135/python-csv-
library-with-unicode-utf-8-support-that-just-works. Added errors='ignore'
to handle cases when the input file misrepresents itself as utf-8.
"""
def __init__(self, f, encoding="utf-8", **kwargs):
self.csv_reader = csv.reader(f, **kwargs)
self.encoding = encoding
def __iter__(self):
return self
def next(self):
"""
Read and split the csv row into fields
"""
row = self.csv_reader.next()
# now decode
return [unicode(cell, self.encoding, errors='ignore') for cell in row]
@property
def line_num(self):
"""
Return line number
"""
return self.csv_reader.line_num
class UnicodeDictReader(csv.DictReader):
"""
A Unicode CSV Reader
"""
def __init__(self, f, encoding="utf-8", fieldnames=None, **kwds):
csv.DictReader.__init__(self, f, fieldnames=fieldnames, **kwds)
self.reader = UnicodeCsvReader(f, encoding=encoding, **kwds)
def read_csv(filename, skip=True, delimiter='|'):
"""
Read a CSV file, return dictionary object
:param filename: name of file to read
:param skip: should lines with invalid number of columns be skipped? False=Throw Exception
:param delimiter: The delimiter for CSV files
:return: Dictionary object
"""
fp = open(filename, 'rU')
data = read_csv_fp(fp, skip, delimiter)
fp.close()
return data
def read_csv_fp(fp, skip=True, delimiter="|"):
"""
Given a filename, read the CSV file with that name. We use "|" as a
separator in CSV files to allow commas to appear in values.
CSV files read by this function follow these conventions:
-- use delimiter as a separator. Defaults to vertical bar.
-- have a first row that contains column headings.
-- all elements must have values. To specify a missing value, use
the string "None" or "NULL" between separators, that is |None| or |NULL|
-- leading and trailing whitespace in values is ignored. | The | will be
read as "The"
-- if skip=True, rows with too many or too few data elements are skipped.
if skip=False, a RowError is thrown
CSV files processed by read_csv will be returned as a dictionary of
dictionaries, one dictionary per row keyed by an integer row number. This supports
maintaining the order of the data input, which is important for some applications
"""
class RowError(Exception):
"""
Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
"""
pass
heading = []
row_number = 0
data = {}
for row in UnicodeCsvReader(fp, delimiter=delimiter):
i = 0
for r in row:
# remove white space fore and aft
row[i] = r.strip(string.whitespace).encode("utf-8")
i += 1
if len(heading) == 0:
heading = row # the first row is the heading
continue
row_number += 1
if len(row) == len(heading):
data[row_number] = {}
i = 0
for r in row:
data[row_number][heading[i]] = r
i += 1
elif not skip:
raise RowError("On row " + str(row_number) + ", expecting " +
str(len(heading)) + " data values. Found " +
str(len(row)) + " data values. Row contents = " +
str(row))
else:
pass # row has wrong number of columns and skip is True
logger.debug("loader returns {} rows".format(len(data)))
return data
def write_csv_fp(fp, data, delimiter='|'):
"""
Write a CSV to a file pointer. Used to support stdout.
:param fp: File pointer. Could be stdout.
:param data: data to be written
:param delimiter: field delimiter for output
:return:
"""
assert(len(data.keys()) > 0)
# create a list of var_names from the first row
var_names = data[data.keys()[0]].keys()
fp.write(delimiter.join(var_names).encode('utf-8') + '\n')
for key in sorted(data.keys()):
fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
def write_csv(filename, data, delimiter='|'):
"""
Given a filename, a data structure as produced by read_csv and an optional
delimiter, write a file that can be read by read_csv
The data structure is a dictionary keyed by an integer of "row numbers"
preserving the natural order of the data. Each element is in turn a
dictionary of name value pairs. All values are strings.
:param filename: name of file to write
:param data: data structure to be written to the file
:param delimiter: field delimiter. Popular choices are '|', '\t' and ','
:return:
"""
with open(filename, 'w') as f:
f.write(delimiter.join(data[data.keys()[0]].keys()).encode('utf-8') + '\n')
for key in sorted(data.keys()):
f.write(delimiter.join(data[key].values()).encode('utf-8') + '\n')
def replace_initials(s):
"""
For a string s, find all occurrences of A. B. etc and replace them with A B etc
:param s:
:return: string with replacements made
"""
import re
def repl_function(m):
"""
Helper function for re.sub
"""
return m.group(0)[0]
t = re.sub('[A-Z]\.', repl_function, s)
return t
def key_string(s):
"""
Given a string s, return a string with a bunch of punctuation and special
characters removed and then everything lower cased. Useful for matching
strings in which case, punctuation and special characters should not be
considered in the match
"""
k = s.encode("utf-8", "ignore").translate(None,
""" \t\n\r\f!@#$%^&*()_+:"<>?-=[]\\;'`~,./""")
k = k.lower()
return k
def get_vivo_types(selector, parms, separator=';'):
"""
Query VIVO using the selector and return a dictionary with keys of all uri satisfying the selector and
data of all the types for each uri, separated by the separator
:param: selector: query fragment for selecting the entities whose types will be returned
:param: parms: vivo_query parms
:return: dictionary of types keyed by uri
"""
query = """
select ?uri (GROUP_CONCAT(?type; separator="{{separator}}") AS ?types)
where {
{{selector}}
?uri rdf:type ?type .}
GROUP BY ?uri
"""
q = query.replace("{{separator}}", separator)
q = q.replace("{{selector}}", selector)
a = vivo_query(q, parms)
types = [x['types']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(uri, types))
def get_vivo_ufid(parms):
"""
Query VIVO and return a list of all the ufid found in VIVO
:param: parms: vivo_query parameters
:return: dictionary of uri keyed by ufid
"""
query = "select ?uri ?ufid where {?uri uf:ufid ?ufid .}"
a = vivo_query(query, parms)
ufid = [x['ufid']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(ufid, uri))
def get_vivo_publishers(parms):
"""
Query VIVO and return a list of all the publishers found in VIVO
:param: parms: vivo_query parameters
:return: dictionary of uri keyed by simplified publisher name
"""
query = "select ?uri ?label where {?uri a vivo:Publisher . ?uri rdfs:label ?label .}"
a = vivo_query(query, parms)
label = [key_string(x['label']['value']) for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(label, uri))
def get_vivo_journals(parms):
"""
Query VIVO and return a list of all the journals.
@see uf_examples/publications/filters/journal_match_filter.py
:param: parms: vivo_query params
:return: dictionary of uri keyed by ISSN
"""
query = "select ?uri ?issn where {?uri bibo:issn ?issn .}"
a = vivo_query(query, parms)
issn = [x['issn']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(issn, uri))
def get_vivo_ccn(parms):
"""
Query VIVO and return a list of all the ccn found in VIVO.
@see uf_examples/courses/merge_filter.py
:param: parms: vivo_query parms
:return: dictionary of uri keyed by ccn
"""
query = "select ?uri ?ccn where {?uri uf:ccn ?ccn .}"
a = vivo_query(query, parms)
ccn = [x['ccn']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(ccn, uri))
def get_vivo_sponsorid(parms):
"""
Query VIVO and return a list of all the sponsorid found in VIVO
:param: parms: vivo_query parms
:return: dictionary of uri keyed by sponsorid
"""
query = "select ?uri ?sponsorid where {?uri a vivo:FundingOrganization . ?uri ufVivo:sponsorID ?sponsorid .}"
a = vivo_query(query, parms)
sponsorid = [x['sponsorid']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(sponsorid, uri))
def get_vivo_authors(parms):
"""
Query VIVO and return a list of all the authors found in VIVO. Authors are people connected to
publications through authorships
:param: parms: vivo_query parms
:return: dictionary of author uri keyed by display_name (that won't work!)
"""
query = """
SELECT ?uri ?display_name
WHERE
{
?art a bibo:AcademicArticle .
?art bibo:doi ?doi .
?art vivo:relatedBy ?a .
?a a vivo:Authorship .
?a vivo:relates ?author .
?uri a foaf:Person .
?uri rdfs:label ?display_name .
}
"""
a = vivo_query(query, parms)
display_name = [x['display_name']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(display_name, uri))
def get_vivo_positions(parms):
"""
Query VIVO and return a list of all the UF positions found in VIVO. UF positions will
have an hrTitle. Non UF positions will not have this property
:param: parms: vivo_query parameters
:return: dictionary of position uri keyed by ufid, deptid, hr_title, start_date
"""
query = """
select ?uri ?ufid ?deptid ?hr_title ?start_date
where {
?uri a vivo:Position .
?uri vivo:relates ?x . ?x uf:ufid ?ufid .
?uri vivo:relates ?y . ?y uf:deptid ?deptid .
?uri uf:hrTitle ?hr_title .
?uri vivo:dateTimeInterval ?dti . ?dti vivo:start ?start . ?start vivo:dateTimeValue ?start_date .
}
"""
a = vivo_query(query, parms)
ufids = [x['ufid']['value'] for x in a['results']['bindings']]
deptids = [x['deptid']['value'] for x in a['results']['bindings']]
hr_titles = [x['hr_title']['value'] for x in a['results']['bindings']]
start_dates = [x['start_date']['value'] for x in a['results']['bindings']]
keys = [';'.join(x) for x in zip(ufids, deptids, hr_titles, start_dates)]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(keys, uri))
def read_update_def(filename, prefix):
"""
Read an update_def in JSON format, from a file
:param filename: name of file to read
:param prefix: text prefix for sparql queries
:rtype: dict
:return: JSON-like object from file, replacing all URI strings with URIRef objects
"""
def make_prefix_dict(prefix_text):
"""
Given prefix text, return a prefix dictionary with tags as keys and url strings as values
:param prefix_text:
:return: dictionary
:rtype: dict
"""
prefix_dictionary = {}
prefix_list = prefix_text.split()
for i in range(len(prefix_list) - 2):
if prefix_list[i].upper() == "PREFIX":
prefix_dictionary[prefix_list[i + 1]] = prefix_list[i + 2].replace('<', '').replace('>', '')
return prefix_dictionary
def cast_to_rdflib(t):
"""
Given a string t containing the name of an rdflib object, return the rdflib object. For now
this is returns xsd data types
Will throw a KeyValue error if t is not a known data type
:param t:
:return: an xsd data type
"""
from rdflib import XSD
cast_table = {
'xsd:integer': XSD.integer,
'xsd:string': XSD.string,
'xsd:datetime': XSD.datetime,
'xsd:boolean': XSD.boolean,
'xsd:decimal': XSD.decimal,
'xsd:anyURI': XSD.anyURI
}
r = cast_table[t]
return r
def fixit(current_object, prefix_dictionary):
"""
Read the def data structure and replace all string URIs with URIRef entities
:param current_object: the piece of the data structure to be fixed
:return current_object: the piece repaired in place
"""
from rdflib import URIRef
if isinstance(current_object, dict):
for k in current_object.keys():
current_object[k] = fixit(current_object[k], prefix_dictionary)
elif isinstance(current_object, list):
for i in range(0, len(current_object)):
current_object[i] = fixit(current_object[i], prefix_dictionary)
elif isinstance(current_object, basestring):
if current_object.startswith("http://"):
current_object = URIRef(current_object)
elif current_object.startswith("xsd:"):
current_object = cast_to_rdflib(current_object)
elif ':' in current_object:
k = current_object.find(':')
tag = str(current_object[0:k + 1])
if tag in prefix_dictionary:
current_object = URIRef(str(current_object).replace(tag, prefix_dictionary[tag]))
return current_object
def add_order(a, b):
"""
Given an update_def (a) and the string of the input file containing the update_def (b),
add an "order" parameter to the entity_def, specifying the column_def ordering. This
is used in subsequent processing to insure that the order in the input file is preserved
when output is created.
:param a: update_def
:param b: string of update_def from file
:return a new update_def dictionary with an order list in the entity def
"""
defn = a
loc = []
var_list = []
k = b.find("column_defs")
b = b[k:]
for var in defn['column_defs'].keys():
var_list.append(var)
loc.append(b.find(var + '": ['))
seq = sorted(loc)
order = [var_list[loc.index(v)] for v in seq]
defn['entity_def']['order'] = order
return defn
def validate_update_def(a):
"""
Validate the update_def. Throw InvalidDef if errors
:param a: update_def
:return None
"""
col_names = a['column_defs'].keys()
# Test that each closure_def name can be found in the column_def names
for name in a.get('closure_defs', {}).keys():
if name not in col_names:
raise InvalidDefException(name + 'in closure_def, not in column_def.')
# Test for agreement between closure_def and column_def last step object type and datatype
if 'closure_defs' in a:
for name in a.get('closure_defs').keys():
col_object = a['column_defs'][name][-1]['object'] # last object in the column_def
clo_object = a['closure_defs'][name][-1]['object'] # last object in the closure_def
if col_object.get('dataype', '') == clo_object.get('datatype', '') and \
col_object.get('type', '') == clo_object.get('type', ''):
continue
else:
raise InvalidDefException(name + ' has inconsistent datatype or type in closure')
# Test for paths having more than one multiple predicate
for name in col_names:
multiple = 0
for step in a['column_defs'][name]:
if step['predicate']['single'] == False:
multiple += 1
if multiple > 1:
raise InvalidDefException(name + ' has more than one multiple predicate')
# Test for presence of required boolean value
for name in col_names:
for step in a['column_defs'][name]:
if step['predicate']['single'] == 'boolean' and 'value' not in step['object']:
raise InvalidDefException(name + 'is boolean with no value')
return None
def add_object_names_and_step_attributes(a):
"""
handed an update_def structure a, return an improved structure b in which each object has a generated name
attribute based on the column_def or closure_def name
Assign multiple to each object. Object is multiple if any preceding predicate is not single
"""
b = dict(a)
for name, path in b['column_defs'].items():
multiple = False
for i in range(len(path)):
multiple = multiple or (b['column_defs'][name][i]['predicate']['single'] == False)
b['column_defs'][name][i]['closure'] = False
b['column_defs'][name][i]['column_name'] = name
b['column_defs'][name][i]['object']['multiple'] = multiple
if i==len(path) - 1:
b['column_defs'][name][i]['object']['name'] = name
b['column_defs'][name][i]['last'] = True
else:
b['column_defs'][name][i]['object']['name'] = name + '_' + str(len(path) - i - 1)
b['column_defs'][name][i]['last'] = False
if 'closure_defs' in b:
for name, path in b['closure_defs'].items():
multiple = False
for i in range(len(path)):
multiple = multiple or (b['closure_defs'][name][i]['predicate']['single'] == False)
b['closure_defs'][name][i]['closure'] = True
b['closure_defs'][name][i]['column_name'] = name
b['closure_defs'][name][i]['object']['multiple'] = multiple
if i==len(path) - 1:
b['closure_defs'][name][i]['object']['name'] = name
b['closure_defs'][name][i]['last'] = True
else:
b['closure_defs'][name][i]['object']['name'] = name + '_' + str(len(path) - i - 1)
b['closure_defs'][name][i]['last'] = False
return b
import json
with open(filename, "r") as my_file:
data = my_file.read()
prefix_dict = make_prefix_dict(prefix)
update_def = fixit(json.loads(data), prefix_dict)
update_def = add_order(update_def, data)
update_def = add_object_names_and_step_attributes(update_def)
validate_update_def(update_def)
return update_def
def add_qualifiers(input_path):
"""
Given an update_def input_path, generate the SPARQL fragment to express the qualifiers in the path, if any
:param input_path:
:return: qualifer SPARQL string
"""
return ' '.join([x['object'].get('qualifier', '') for x in input_path])
def gather_types(input_step, varname):
"""
Given and input step, return a SPARQL fragment to gather the types for the step
:param input_step:
:return: SPARQL fragment as string
"""
if not input_step['object']['literal']:
return ' ?' + input_step['object']['name'] + ' a ?' + varname + ' . '
else:
return ''
def make_update_query(entity_sparql, path):
"""
Given a path from an update_def data structure, generate the query needed to pull the triples from VIVO that might
be updated. Here's what the queries look like (psuedo code) by path length
Path length 1 example:
select ?uri (vivo:subOrganizationWithin as ?p) (?column_name as ?o)
where {
... entity sparql goes here ...
?uri vivo:subOrganizationWithin ?column_name . # ?uri ?p ?o
}
Path Length 2 example:
select ?uri (vivo:webpage as ?p1) (?column_name_1 as ?o1) (vivo:linkURI as ?p) (?column_name as ?o)
where {
... entity sparql goes here ...
?uri vivo:webpage ?column_name_1 . # ?uri ?p1 ?o1
?column_name_1 vivo:linkURI ?column_name . # ?o1 ?p ?o
}
Path length 3 example:
select ?uri (vivo:dateTimeInterval as ?p2) (?column_name_2 as ?o2) (vivo:end as ?p1)
(?column_name_1 as ?o1) (vivo:dateTime as ?p)
(?column_name as ?o)
where {
... entity sparql goes here ...
?uri vivo:dateTimeInterval ?column_name_2 . # ?uri ?p2 ?o2
?column_name_2 vivo:end ?column_name_1 . # ?o2 ?p1 ?o1
?column_name_1 vivo:dateTime ?column_name . # ?o1 ?p ?o
}
:return: a sparql query string
"""
query = ""
if len(path) == 1:
query = 'select ?uri (<' + str(path[0]['predicate']['ref']) + '> as ?p) (?' + path[0]['object']['name'] + \
' as ?o) ?t\n' + \
' where { ' + entity_sparql + '\n ?uri <' + str(path[0]['predicate']['ref']) + '> ?' + \
path[0]['object']['name'] + \
' . ' + gather_types(path[0], 't') + add_qualifiers(path) + ' \n}'
elif len(path) == 2:
query = 'select ?uri (<' + str(path[0]['predicate']['ref']) + '> as ?p1) ' + \
'(?' + path[0]['object']['name'] + ' as ?o1) ?t1 (<' + \
str(path[1]['predicate']['ref']) + '> as ?p) (?' + path[1]['object']['name'] + ' as ?o) ?t\n' + \
' where { ' + entity_sparql + '\n ?uri <' + str(path[0]['predicate']['ref']) + '> ?' + \
path[0]['object']['name'] + ' . ' + gather_types(path[0], 't1') + '?' + \
path[0]['object']['name'] + ' <' + str(path[1]['predicate']['ref']) + '> ?' + \
path[1]['object']['name'] + ' . ' + gather_types(path[1], 't') + add_qualifiers(path) + ' \n}'
elif len(path) == 3:
query = 'select ?uri (<' + str(path[0]['predicate']['ref']) + '> as ?p2) ' + \
'(?' + path[0]['object']['name'] + ' as ?o2) ?t2 (<' + str(path[1]['predicate']['ref']) + \
'> as ?p1) (?' + path[1]['object']['name'] + ' as ?o1) ?t1 (<' + str(path[2]['predicate']['ref']) + \
'> as ?p) (?' + path[2]['object']['name'] + ' as ?o) ?t\n' + \
'where { ' + entity_sparql + '\n ?uri <' + \
str(path[0]['predicate']['ref']) + '> ?' + path[0]['object']['name'] + ' . ' + \
gather_types(path[0], 't2') + ' ?' + \
path[0]['object']['name'] + ' <' + str(path[1]['predicate']['ref']) + '> ?' + \
path[1]['object']['name'] + ' . ' + gather_types(path[1], 't1') + ' ?' + \
path[1]['object']['name'] + ' <' + \
str(path[2]['predicate']['ref']) + '> ?' + path[2]['object']['name'] + ' . ' + \
gather_types(path[2], 't') + add_qualifiers(path) + ' \n}'
return query
def make_rdf_term(row_term):
"""
Given a row term from a JSON object returned by a SPARQL query (whew!) return a corresponding
rdflib term -- either a Literal or a URIRef
:param row_term:
:return: an rdf_term, either Literal or URIRef
"""
from rdflib import Literal, URIRef
if row_term['type'] == 'literal' or row_term['type'] == 'typed-literal':
rdf_term = Literal(row_term['value'], datatype=row_term.get('datatype', None),
lang=row_term.get('xml:lang', None))
else:
rdf_term = URIRef(row_term['value'])
return rdf_term
def get_graph(update_def, query_parms):
"""
Given the update def, get a graph from VIVO of the triples eligible for updating
:return: graph of triples
"""
from rdflib import Graph, URIRef, RDF
a = Graph()
entity_query = 'select ?uri (<http://www.w3.org/1999/02/22-rdf-syntax-ns#type> as ?p) (<' + \
str(update_def['entity_def']['type']) + '> as ?o)\nwhere {\n ' + \
update_def['entity_def']['entity_sparql'] + '\n}'
result = vivo_query(entity_query, query_parms)
for row in result['results']['bindings']:
s = URIRef(row['uri']['value'])
p = URIRef(row['p']['value'])
o = make_rdf_term(row['o'])
a.add((s, p, o))
for column_name, path in update_def['column_defs'].items() + \
update_def.get('closure_defs', {}).items():
update_query = make_update_query(update_def['entity_def']['entity_sparql'], path)
if len(update_query) == 0:
continue
result = vivo_query(update_query, query_parms)
for row in result['results']['bindings']:
if 'p2' in row and 'o2' in row:
uri = URIRef(row['uri']['value'])
p2 = URIRef(row['p2']['value'])
o2 = make_rdf_term(row['o2'])
a.add((uri, p2, o2))
if 't2' in row:
a.add((o2, RDF.type, make_rdf_term(row['t2'])))
p1 = URIRef(row['p1']['value'])
o1 = make_rdf_term(row['o1'])
a.add((o2, p1, o1))
if 't1' in row:
a.add((o1, RDF.type, make_rdf_term(row['t1'])))
p = URIRef(row['p']['value'])
o = make_rdf_term(row['o'])
a.add((o1, p, o))
if 't' in row:
a.add((o, RDF.type, make_rdf_term(row['t'])))
elif 'p1' in row and 'o1' in row:
uri = URIRef(row['uri']['value'])
p1 = URIRef(row['p1']['value'])
o1 = make_rdf_term(row['o1'])
a.add((uri, p1, o1))
if 't1' in row:
a.add((o1, RDF.type, make_rdf_term(row['t1'])))
p = URIRef(row['p']['value'])
o = make_rdf_term(row['o'])
a.add((o1, p, o))
if 't' in row:
a.add((o, RDF.type, make_rdf_term(row['t'])))
elif 'p' in row and 'o' in row:
uri = URIRef(row['uri']['value'])
p = URIRef(row['p']['value'])
o = make_rdf_term(row['o'])
a.add((uri, p, o))
if 't' in row:
a.add((o, RDF.type, make_rdf_term(row['t'])))
logger.debug(u"Triples in original graph {}".format(len(a)))
return a
def new_uri(parms):
"""
Find an unused VIVO URI in the VIVO defined by the parms
:param parms: dictionary with queryuri, username, password and uriprefix
:return: a URI not in VIVO
"""
test_uri = ""
while True:
test_uri = parms['uriprefix'] + str(random.randint(1, 9999999999))
query = """
SELECT (COUNT(?z) AS ?count) WHERE {
<""" + test_uri + """> ?y ?z
}"""
response = vivo_query(query, parms)
if int(response["results"]["bindings"][0]['count']['value']) == 0:
break
return test_uri
def vivo_query(query, parms):
"""
A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia
:param query: SPARQL query. VIVO PREFIX will be added
:param parms: dictionary with query parms: queryuri, username and password
:return: result object, typically JSON
:rtype: dict
"""
from SPARQLWrapper import SPARQLWrapper, JSON
logger.debug(u"in vivo_query\n{}".format(parms))
sparql = SPARQLWrapper(parms['queryuri'])
new_query = parms['prefix'] + '\n' + query
sparql.setQuery(new_query)
logger.debug(new_query)
sparql.setReturnFormat(JSON)
sparql.addParameter("email", parms['username'])
sparql.addParameter("password", parms['password'])
# sparql.setCredentials(parms['username'], parms['password'])
results = sparql.query()
results = results.convert()
return results
def write_update_def(update_def, filename):
"""
Write update_def to a json_file
:param filename: name of file to write
:return: None. A file is written
"""
import json
out_file = open(filename, "w")
json.dump(update_def, out_file, indent=4)
out_file.close()
return
def parse_pages(pages):
"""
Give a string possibly containing a start and end page, return the start and end page if any
:param pages:
:return: list with start and end pages
"""
if '-' in pages:
k = pages.find('-')
start = pages[0:k]
end = pages[k + 1:]
else:
start = pages
end = ''
return [start, end]
def parse_date_parts(month, year):
"""
Given a month string and a year string from publisher data, parse apart the month, day and year and create
a standard date string that can be used as input to VIVO
:param month: string from publisher data. May be text such as 'JUN' or 'Jun 15' with day number included
:param year: string of year such as '2015'
:return: date string in isoformat
"""
month_numbers = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12,
'SUM': 6, 'FAL': 9, 'WIN': 12, 'SPR': 3, '': 1}
from datetime import datetime
if ' ' in month:
k = month.find(' ')
month_name = month[0:k]
month_day = month[k + 1:]
elif '-' in month:
k = month.find('-')
month_name = month[0:k]
month_day = '1'
else:
month_name = month
month_day = '1'
month_number = month_numbers[month_name.upper()]
date_value = datetime(int(year), month_number, int(month_day))
return date_value.isoformat()
def get_args():
"""
Get the args specified by the user. Arg values are determined:
1. from hard coded values (see below)
2. Overridden by values in a specified config file (see below)
3. Overridden by values on the command line
Set the logging level based on args
:return: args structure as defined by argparser
"""
import argparse
import ConfigParser
program_defaults = {
'action': 'summarize',
'defn': 'pump_def.json',
'inter': '\t',
'intra': ';',
'username': 'vivo_root@school.edu',
'password': 'password',
'prefix':
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n'
'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n'
'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>\n'
'PREFIX vitro: <http://vitro.mannlib.cornell.edu/ns/vitro/0.7#>\n'
'PREFIX bibo: <http://purl.org/ontology/bibo/>\n'
'PREFIX event: <http://purl.org/NET/c4dm/event.owl#>\n'
'PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n'
'PREFIX obo: <http://purl.obolibrary.org/obo/>\n'
'PREFIX skos: <http://www.w3.org/2004/02/skos/core#>\n'
'PREFIX uf: <http://vivo.school.edu/ontology/uf-extension#>\n'
'PREFIX vitrop: <http://vitro.mannlib.cornell.edu/ns/vitro/public#>\n'
'PREFIX vivo: <http://vivoweb.org/ontology/core#>\n',
'rdfprefix': 'pump',
'queryuri': 'http://localhost:8080/vivo/api/sparqlQuery',
'uriprefix': 'http://vivo.school.edu/individual/n',
'src': 'pump_data.txt',
'config': 'sv.cfg',
'verbose': logging.WARNING,
'debug': logging.WARNING,
'nofilters': False
}
parser = argparse.ArgumentParser(description="Get or update row and column data from and to VIVO",
epilog="For more info, see http://github.com/mconlon17/vivo-pump")
parser.add_argument("-a", "--action", help="desired action. get = get data from VIVO. update = update VIVO "
"data from a spreadsheet. summarize = show def summary. serialize = serial version of the pump"
". test = test pump configuration.",
nargs='?')
parser.add_argument("-d", "--defn", help="name of definition file", nargs="?")
parser.add_argument("-i", "--inter", help="interfield delimiter", nargs="?")
parser.add_argument("-j", "--intra", help="intrafield delimiter", nargs="?")
parser.add_argument("-u", "--username", help="username for API", nargs="?")
parser.add_argument("-p", "--password", help="password for API", nargs="?")
parser.add_argument("-q", "--queryuri", help="URI for API", nargs="?")
parser.add_argument("-r", "--rdfprefix", help="RDF prefix", nargs="?")
parser.add_argument("-x", "--uriprefix", help="URI prefix", nargs="?")
parser.add_argument("-s", "--src", help="name of source file containing data to be updated in VIVO", nargs='?')
parser.add_argument("-c", "--config", help="name of file containing config data. Config data overrides program "
"defaults. Command line overrides config file values", nargs='?')
parser.add_argument("-v", "--verbose", action="store_const", dest='loglevel', const=logging.INFO,
help="write informational messages to the log")
parser.add_argument("-b", "--debug", action="store_const", dest='loglevel', const=logging.DEBUG,
default=logging.WARNING, help="write debugging messages to the log")
parser.add_argument("-n", "--nofilters", action="store_true", help="turn off filters")
args = parser.parse_args()
if args.config is None:
args.config = program_defaults['config']
logger.debug(u"No config file specified -- using hardcoded defaults")
else:
logger.debug(u"Reading config file: {}".format(args.config))
# Read the config parameters from the file specified in the command line
config = ConfigParser.ConfigParser()
try:
config.read(args.config)
except IOError:
logger.error(u"Config file {} not found.".format(args.config))
sys.exit(1)
# Config file values overwrite program defaults
for section in config.sections():
for name, val in config.items(section):
program_defaults[name] = val
if 'prefix' != name:
logger.debug(u"Param {} = {}".format(name, val))
# Non null command line values overwrite the config file values
for name, val in vars(args).items():
if val is not None:
program_defaults[name] = val
# Put the final values back in args
for name, val in program_defaults.items():
if val == 'tab':
val = '\t'
vars(args)[name] = val
# Set the level of logging if verbose and/or debug args were used
if args.loglevel:
logging.basicConfig(level=args.loglevel)
return args
def get_parms():
"""
Use get_args to get the args, and return a dictionary of the args ready for
use in pump software.
@see get_args()
:return: dict: parms
"""
parms = {}
args = get_args()
for name, val in vars(args).items():
if val is not None:
parms[name] = val
return parms
def add_type_restriction(step):
"""
for a given step, look for object type and construct a SPARQL fragement to restrict the graph
to objects of the type. If the object does not have a type restriction, return an empty string.
:param step: The step for which an object restriction is requested
:return: the SPARQL fragement for thr restriction, or an empty string if no type is specified
"""
if 'type' in step['object']:
return '?' + step['object']['name'] + ' a <' + str(step['object']['type']) + '> . '
else:
return ""
def make_get_query(update_def):
"""
Given an update_def, return the sparql query needed to produce a spreadsheet of the data to be managed.
See do_get
:return: a sparql query string
"""
front_query = 'SELECT ?uri ?' + ' ?'.join(update_def['column_defs'].keys()) + '\nWHERE {\n ' + \
update_def['entity_def']['entity_sparql'] + '\n'
# Fake recursion here to depth 3. Could be replaced by real recursion to arbitrary path length
middle_query = ""
for name, path in update_def['column_defs'].items():
middle_query += ' OPTIONAL { ?uri <' + str(path[0]['predicate']['ref']) + '> ?'
if len(path) == 1:
middle_query += name + ' . ' + add_type_restriction(path[0]) + add_qualifiers(path) + ' }\n'
else:
middle_query += path[0]['object']['name'] + ' . ' + add_type_restriction(path[0]) + '?' + \
path[0]['object']['name'] + ' <' + str(path[1]['predicate']['ref']) + '> ?'
if len(path) == 2:
middle_query += name + ' . ' + add_type_restriction(path[1]) + add_qualifiers(path) + ' }\n'
else:
middle_query += path[1]['object']['name'] + ' . ' + add_type_restriction(path[1]) + '?' + \
path[1]['object']['name'] + ' <' + str(path[2]['predicate']['ref']) + '> ?'
if len(path) == 3:
middle_query += name + ' . ' + add_type_restriction(path[2]) + add_qualifiers(path) + ' }\n'
else:
raise PathLengthException('Path length >3 not supported in do_get')
if 'order_by' in update_def['entity_def']:
back_query = '}\nORDER BY ?' + update_def['entity_def']['order_by']
else:
back_query = '}\n'
return front_query + middle_query + back_query
def unique_path(path):
"""
Given a path, determine if all its elements are single-valued predicates. If so, the path is unique,
regardless of length. If any one of the steps in the path has a non single-valued predicated, the path is not
unique.
:param path: a definition path
:return: True if path is unique
:rtype: boolean
"""
unique = True
for elem in path:
if elem['predicate']['single'] != True:
unique = False
break
return unique
def make_get_data(update_def, result_set):
"""
Given a query result set, produce a dictionary keyed by uri with values of dictionaries keyed by column
names. Where columns have multiple values, create sets of values.
:param result_set: SPARQL result set
:return: dictionary
:rtype: dict
"""
data = {}
for binding in result_set['results']['bindings']:
uri = str(binding['uri']['value'])
if uri not in data:
data[uri] = {}
for name in ['uri'] + update_def['column_defs'].keys():
if name != 'uri':
last_step = update_def['column_defs'][name][len(update_def['column_defs'][name]) - 1]
if name != 'uri' and last_step['predicate']['single'] == 'boolean':
if name in binding and (str(last_step['object']['value']) == binding[name]['value']):
data[uri][name] = '1'
elif name not in data[uri]:
data[uri][name] = '0'
else:
if name in binding:
if name in data[uri]:
data[uri][name].add(binding[name]['value'])
else:
data[uri][name] = {binding[name]['value']}
return data
def make_rdf_term_from_source(value, step):
"""
Given a text string value and a step definition, return the rdflib term as defined by the step def
:param: value: string from source
:param: step: step definition from update_def
:return: rdf_term: an rdf_term from rdflib -- either Literal or URIRef
"""
from rdflib import Literal, URIRef
if step["object"]["literal"]:
datatype = step["object"].get('datatype', None)
if datatype is not None and datatype[:4] == 'xsd:':
datatype = datatype.replace('xsd:', 'http://www.w3.org/2001/XMLSchema#')
rdf_term = Literal(value, datatype=datatype, lang=step["object"].get('lang', None))
else:
rdf_term = URIRef(value)
return rdf_term
def prepare_column_values(update_string, intra, step_def, enum, row, column_name):
"""
Given the string of data from the update file, the step definition, the row and column name of the
update_string in the update file, enumerations and filters, prepare the column values and return them
as a list of rdflib terms
:return: column_values a list of rdflib terms
:rtype: list[str]
"""
# Three cases: boolean, single valued and multiple valued
if step_def['predicate']['single'] == 'boolean':
update_string = update_string.strip()
if update_string == '':
column_values = ['']
elif update_string == '0' or update_string == 'None' or update_string.lower() == 'false' or \
update_string.lower() == 'n' or update_string.lower() == 'no':
column_values = ['0']
else:
column_values = ['1']
elif not step_def['object']['multiple']:
column_values = [update_string.strip()]
else:
column_values = update_string.split(intra)
if 'include' in step_def['predicate']:
column_values += step_def['predicate']['include']
for i in range(len(column_values)):
column_values[i] = column_values[i].strip()
# Check column values for consistency with single and multi-value paths
if step_def['object']['multiple'] != True and len(column_values) > 1:
raise InvalidSourceException(str(row) + str(column_name) +
'Path is single-valued, multiple values in source.')
while '' in column_values:
column_values.remove('')
if 'None' in column_values and len(column_values) > 1:
raise InvalidSourceException(str(row) + str(column_name) +
'None value in multi-valued predicate set')
# Handle enumerations
if 'enum' in step_def['object']:
for i in range(len(column_values)):
try:
column_values[i] = enum[step_def['object']['enum']]['update'][column_values[i]]
except KeyError:
logger.error(u"{} not found in enumeration. Blank value substituted.".format(column_values[i]))
column_values[i] = ''
# Convert to rdflib terms
column_terms = [make_rdf_term_from_source(column_value, step_def) for column_value in column_values]
return column_terms
def load_enum(update_def):
"""
Find all enumerations in the update_def. for each, read the corresponding enum file and build the corresponding
pair of enum dictionaries.
The two columns in the tab delimited input file must be called "short" and "vivo". "vivo" is the value to put in
vivo (update) or get from vivo. short is the human usable short form.
The input file name appears as the 'enum' value in update_def
:return enumeration structure. Pairs of dictionaries, one pair for each enumeration. short -> vivo, vivo -> short
"""
enum = {}
for path in update_def['column_defs'].values():
for step in path:
if 'object' in step and 'enum' in step['object']:
enum_name = step['object']['enum']
if enum_name not in enum:
enum[enum_name] = {}
enum[enum_name]['get'] = {}
enum[enum_name]['update'] = {}
enum_data = read_csv(enum_name, delimiter='\t')
for enum_datum in enum_data.values():
try:
enum[enum_name]['get'][enum_datum['vivo']] = enum_datum['short']
except KeyError:
logger.error(
u"Enumeration {} does not have required columns named short and vivo".format(enum_name))
raise KeyError
enum[enum_name]['update'][enum_datum['short']] = enum_datum['vivo']
return enum
def create_enum(filename, query, parms, trim=0, skip=0):
"""
Given, query, parms and a filename, execute the query and write the enum into the file
:param: filename: name of the file to contain the enumeration
:param: query: the query to be used to create the columns for the enumeration
:param: parms: dictionary of VIVO SPARQL API parameters
:param: trim: If 0, no trim. If k, return the first k characters as a trimmed value for short
:param: skip: If 0, no skip. If k, skip the first k characters as a trimmed value for short
:return: None
"""
import codecs
data = vivo_query(query, parms)
outfile = codecs.open(filename, mode='w', encoding='utf_8', errors='xmlcharrefreplace')
outfile.write("short\tvivo\n")
for item in data['results']['bindings']:
if trim == 0 and skip==0:
outfile.write(item["short"]["value"] + "\t" + item["vivo"]["value"] + "\n")
elif trim != 0 and skip == 0:
outfile.write(item["short"]["value"][:trim] + "\t" + item["vivo"]["value"] + "\n")
elif trim == 0 and skip != 0:
outfile.write(item["short"]["value"][skip:] + "\t" + item["vivo"]["value"] + "\n")
else:
outfile.write(item["short"]["value"][skip:-trim] + "\t" + item["vivo"]["value"] + "\n")
outfile.close() | dofeldsc/vivo_uos | my_pump/pump/vivopump.py | Python | gpl-3.0 | 47,667 | 0.003168 |
class URLOpener(object):
def __init__(self, x):
self.x = x
def urlopen(self):
return file(self.x) | idea4bsd/idea4bsd | python/testData/refactoring/move/class/before/src/lib1.py | Python | apache-2.0 | 122 | 0.008197 |
import time
import logging
from typing import Callable, List, TypeVar, Text
from psycopg2.extensions import cursor
CursorObj = TypeVar('CursorObj', bound=cursor)
from django.db import connection
from zerver.models import UserProfile
'''
NOTE! Be careful modifying this library, as it is used
in a migration, and it needs to be valid for the state
of the database that is in place when the 0104_fix_unreads
migration runs.
'''
logger = logging.getLogger('zulip.fix_unreads')
logger.setLevel(logging.WARNING)
def build_topic_mute_checker(cursor, user_profile):
# type: (CursorObj, UserProfile) -> Callable[[int, Text], bool]
'''
This function is similar to the function of the same name
in zerver/lib/topic_mutes.py, but it works without the ORM,
so that we can use it in migrations.
'''
query = '''
SELECT
recipient_id,
topic_name
FROM
zerver_mutedtopic
WHERE
user_profile_id = %s
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
tups = {
(recipient_id, topic_name.lower())
for (recipient_id, topic_name) in rows
}
def is_muted(recipient_id, topic):
# type: (int, Text) -> bool
return (recipient_id, topic.lower()) in tups
return is_muted
def update_unread_flags(cursor, user_message_ids):
# type: (CursorObj, List[int]) -> None
um_id_list = ', '.join(str(id) for id in user_message_ids)
query = '''
UPDATE zerver_usermessage
SET flags = flags | 1
WHERE id IN (%s)
''' % (um_id_list,)
cursor.execute(query)
def get_timing(message, f):
# type: (str, Callable) -> None
start = time.time()
logger.info(message)
f()
elapsed = time.time() - start
logger.info('elapsed time: %.03f\n' % (elapsed,))
def fix_unsubscribed(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
recipient_ids = []
def find_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
(NOT zerver_subscription.active)
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'get recipients',
find_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
query = '''
SELECT
zerver_usermessage.id
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
user_message_ids.append(row[0])
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding unread messages for non-active streams',
find
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for non-active streams',
fix
)
def fix_pre_pointer(cursor, user_profile):
# type: (CursorObj, UserProfile) -> None
pointer = user_profile.pointer
if not pointer:
return
recipient_ids = []
def find_non_muted_recipients():
# type: () -> None
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
zerver_subscription.in_home_view AND
zerver_subscription.active
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'find_non_muted_recipients',
find_non_muted_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find_old_ids():
# type: () -> None
recips = ', '.join(str(id) for id in recipient_ids)
is_topic_muted = build_topic_mute_checker(cursor, user_profile)
query = '''
SELECT
zerver_usermessage.id,
zerver_message.recipient_id,
zerver_message.subject
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
zerver_usermessage.message_id <= %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, pointer, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for (um_id, recipient_id, topic) in rows:
if not is_topic_muted(recipient_id, topic):
user_message_ids.append(um_id)
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding pre-pointer messages that are not muted',
find_old_ids
)
if not user_message_ids:
return
def fix():
# type: () -> None
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for pre-pointer non-muted messages',
fix
)
def fix(user_profile):
# type: (UserProfile) -> None
logger.info('\n---\nFixing %s:' % (user_profile.email,))
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user_profile)
fix_pre_pointer(cursor, user_profile)
| amanharitsh123/zulip | zerver/lib/fix_unreads.py | Python | apache-2.0 | 6,949 | 0.000863 |
"""Test cli"""
import asyncio
import contextlib
import io
import json
import sys
import traceback
from unittest import mock
import pytest
from egtaonline import __main__ as main
from egtaonline import api
from egtaonline import mockserver
# TODO async fixtures may be possible with python 3.6, but it's not possible
# with async_generator
async def run(*args):
"""Run a command line and return if it ran successfully"""
try:
await main.amain(*args)
except SystemExit as ex:
return not int(str(ex))
except Exception: # pylint: disable=broad-except
traceback.print_exc()
return False
return True
def stdin(inp):
"""Patch stdin with input"""
return mock.patch.object(sys, 'stdin', io.StringIO(inp))
# This is a hack to allow "writing" to the underlying buffer of a stringio
class _StringBytes(io.BytesIO):
"""A wrapper for bytes io that allows getting the string
This is necessary because for zip files, the result needs to be written to
a byte stream."""
def close(self):
pass
def getvalue(self):
return super().getvalue().decode('utf8')
@contextlib.contextmanager
def stdout():
"""Patch stdout and return stringio"""
buff = _StringBytes()
with mock.patch.object(sys, 'stdout', io.TextIOWrapper(buff)):
yield buff
def stderr():
"""Patch stderr and return stringio"""
return mock.patch.object(sys, 'stderr', io.StringIO())
@pytest.mark.asyncio
async def test_help():
"""Test getting help by itself"""
with stderr() as err:
assert await run('-h'), err.getvalue()
@pytest.mark.asyncio
@pytest.mark.parametrize('cmd', ['sim', 'game', 'sched', 'sims'])
async def test_cmd_help(cmd):
"""Test getting help from commands"""
with stderr() as err:
assert await run(cmd, '-h'), err.getvalue()
@pytest.mark.asyncio
async def test_sim():
"""Test sim functionality"""
async with mockserver.server() as server:
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sim'), err.getvalue()
assert not out.getvalue()
server.create_simulator('sim', '1')
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sim'), err.getvalue()
# get by id
sim = json.loads(out.getvalue())
with stderr() as err:
assert await run('-a', '', 'sim', str(sim['id'])), err.getvalue()
# get by name
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sim', sim['name'], '-n',
sim['version']), err.getvalue()
assert sim['id'] == json.loads(out.getvalue())['id']
assert not await run('-a', '', 'sim', '--', '-1')
# add role
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-rr'), err.getvalue()
# add strategy
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-rr', '-ss'), err.getvalue()
# add strategies
with stdin(json.dumps({'r': ['q'], 'a': ['b']})), stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-j-'), err.getvalue()
# remove strategy
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-drr', '-sq'), err.getvalue()
# remove role
with stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-dra'), err.getvalue()
# remove strategies
with stdin(json.dumps({'r': ['s']})), stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-dj-'), err.getvalue()
# get zip
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sim', str(sim['id']), '-z'), err.getvalue()
@pytest.mark.asyncio
async def test_game(tmpdir): # pylint: disable=too-many-statements
"""Test game functionality"""
conf = str(tmpdir.join('conf.json'))
with open(conf, 'w') as fil:
json.dump({}, fil)
async with mockserver.server() as server:
with stdout() as out, stderr() as err:
assert await run('-a', '', 'game'), err.getvalue()
assert not out.getvalue()
sim_id = server.create_simulator('sim', '1')
game_spec = {
'players': {
'r': 2,
},
'strategies': {
'r': ['s0', 's1'],
},
}
with stdin(json.dumps(game_spec['strategies'])), stderr() as err:
assert await run(
'-a', '', 'sim', str(sim_id), '-j-'), err.getvalue()
# get canon game
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf',
conf), err.getvalue()
game = json.loads(out.getvalue())
# verify its now listed with games
with stdout() as out, stderr() as err:
assert await run('-a', '', 'game'), err.getvalue()
game2 = json.loads(out.getvalue())
assert game == game2
# get game structure
with stdout() as out, stderr() as err:
assert await run('-a', '', 'game', str(game['id'])), err.getvalue()
struct = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf',
conf), err.getvalue()
assert struct == json.loads(out.getvalue())
# get game summary
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '--summary'), err.getvalue()
summ = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf', conf,
'--summary'), err.getvalue()
assert summ == json.loads(out.getvalue())
# get observations
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']),
'--observations'), err.getvalue()
obs = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf', conf,
'--observations'), err.getvalue()
assert obs == json.loads(out.getvalue())
# get full data
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '--full'), err.getvalue()
full = json.loads(out.getvalue())
with stdin(json.dumps(game_spec)), stdout() as out, \
stderr() as err:
assert await run(
'-a', '', 'game', str(sim_id), '-j-', '--fetch-conf', conf,
'--full'), err.getvalue()
assert full == json.loads(out.getvalue())
# test name works
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'game', game['name'], '-n'), err.getvalue()
assert game['id'] == json.loads(out.getvalue())['id']
# remove strategy
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-drr',
'-ss0'), err.getvalue()
# remove strategys
with stdin(json.dumps({'r': ['s1']})), stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-dj-'), err.getvalue()
# remove role
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-drr'), err.getvalue()
# add role
assert not await run('-a', '', 'game', str(game['id']), '-rr')
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-rr',
'-c2'), err.getvalue()
# add strategies
with stdin(json.dumps({'r': ['s1']})), stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-j-'), err.getvalue()
# add strategy
with stderr() as err:
assert await run(
'-a', '', 'game', str(game['id']), '-rr',
'-ss0'), err.getvalue()
@pytest.mark.asyncio
async def test_sched():
"""Test scheduler functionality"""
async with mockserver.server() as server:
# verify no schedulers
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sched'), err.getvalue()
assert not out.getvalue()
# create one
sim_id = server.create_simulator('sim', '1')
async with api.api() as egta:
await egta.create_generic_scheduler(
sim_id, 'sched', True, 1, 2, 1, 1)
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sched'), err.getvalue()
sched = json.loads(out.getvalue())
with stderr() as err:
assert await run(
'-a', '', 'sched', str(sched['id'])), err.getvalue()
with stderr() as err:
assert await run(
'-a', '', 'sched', str(sched['id']), '-r'), err.getvalue()
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sched', sched['name'], '-n'), err.getvalue()
assert sched['id'] == json.loads(out.getvalue())['id']
# deactivate scheduler
with stderr() as err:
assert await run(
'-a', '', 'sched', str(sched['id']),
'--deactivate'), err.getvalue()
# verify deactivated
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sched', str(sched['id'])), err.getvalue()
assert not json.loads(out.getvalue())['active']
# delete scheduler
with stderr() as err:
assert await run(
'-a', '', 'sched', str(sched['id']), '-d'), err.getvalue()
# assert no schedulers
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sched'), err.getvalue()
assert not out.getvalue()
@pytest.mark.asyncio
async def test_sched_running():
"""Test running scheduler functionality"""
async with mockserver.server() as server:
# verify no schedulers
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sched'), err.getvalue()
assert not out.getvalue()
# create one
sim_id = server.create_simulator('sim', '1', delay_dist=lambda: 1)
async with api.api() as egta:
sim = await egta.get_simulator(sim_id)
await sim.add_strategies({'r': ['s0', 's1']})
sched = await egta.create_generic_scheduler(
sim_id, 'sched', True, 1, 2, 1, 1)
await sched.add_role('r', 2)
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sched', '--running'), err.getvalue()
assert not out.getvalue()
await sched.add_profile('r: 1 s0, 1 s1', 1)
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sched', '--running'), err.getvalue()
lines = out.getvalue()[:-1].split('\n')
assert len(lines) == 1
running = json.loads(lines[0])
assert running['active']
# Wait to complete
await asyncio.sleep(1.5)
with stdout() as out, stderr() as err:
assert await run(
'-a', '', 'sched', '--running'), err.getvalue()
assert not out.getvalue()
@pytest.mark.asyncio
async def test_sims():
"""Test getting simulations"""
async with mockserver.server() as server:
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sched'), err.getvalue()
assert not out.getvalue()
sim_id = server.create_simulator('sim', '1')
async with api.api() as egta:
sim = await egta.get_simulator(sim_id)
await sim.add_strategies({'r': ['s0', 's1']})
sched = await egta.create_generic_scheduler(
sim_id, 'sched', True, 1, 2, 1, 1)
await sched.add_role('r', 2)
# This fails because we don't implement search, so this is as if no
# job exists
assert not await run('-a', '', 'sims', '-j', '0')
await sched.add_profile('r: 1 s0, 1 s1', 1)
# This works because there's only one simulation so far and we
# don't implement search
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sims', '-j', '0'), err.getvalue()
await sched.add_profile('r: 2 s0', 2)
# This fails because we don't implement search and now there are
# several simulations
assert not await run('-a', '', 'sims', '-j', '0')
with stdout() as out, stderr() as err:
assert await run('-a', '', 'sims'), err.getvalue()
sims = [json.loads(line) for line in out.getvalue()[:-1].split('\n')]
assert len(sims) == 3
with stderr() as err:
assert await run(
'-a', '', 'sims', str(sims[0]['folder'])), err.getvalue()
@pytest.mark.asyncio
async def test_authfile():
"""Test supplying auth file"""
async with mockserver.server():
with stdin(''):
assert await run('-f-', 'sim')
| egtaonline/egtaonline-api | test/test_eo.py | Python | apache-2.0 | 14,077 | 0.000142 |
def SimplenoteList():
if (float(vim.eval("a:0"))>=1):
try:
# check for valid date string
datetime.datetime.strptime(vim.eval("a:1"), "%Y-%m-%d")
interface.list_note_index_in_scratch_buffer(since=vim.eval("a:1"))
except ValueError:
interface.list_note_index_in_scratch_buffer(tags=vim.eval("a:1").split(","))
else:
interface.list_note_index_in_scratch_buffer()
try:
set_cred()
SimplenoteList()
except simplenote.SimplenoteLoginFailed:
# Note: error has to be caught here and not in __init__
reset_user_pass('Login Failed. Check token?')
# vim: expandtab
| mrtazz/simplenote.vim | autoload/SimplenoteList.py | Python | mit | 651 | 0.004608 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='scrapy-sci',
version='0.1.0',
description='Improve your scrapy pipeline with machine learning',
long_description=readme + '\n\n' + history,
author='John Cadigan',
author_email='johnpaulcadigan@gmail.com',
url='https://github.com/johncadigan/scrapy-sci',
packages=[
'scrapy_sci',
"scrapy_sci.commands",
"scrapy_sci.templates",
],
package_dir={'scrapy_sci':
'scrapy_sci'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='machine learning',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements
)
| dangra/scrapy-sci | setup.py | Python | bsd-3-clause | 1,322 | 0 |
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bson.max_key import *
| reedobrien/mongo-python-driver | pymongo/max_key.py | Python | apache-2.0 | 604 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-22 09:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0013_auto_20180109_1302'),
]
operations = [
migrations.CreateModel(
name='WorkaroundPoll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condorcet', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.AlterModelOptions(
name='option',
options={'ordering': ('id',)},
),
migrations.AlterModelOptions(
name='vote',
options={'ordering': ('time_updated',)},
),
migrations.AddField(
model_name='option',
name='poll_new1',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='options_new', to='polls.WorkaroundPoll'),
),
]
| stadtgestalten/stadtgestalten | grouprise/features/polls/migrations/0014_auto_20180222_1033.py | Python | agpl-3.0 | 1,195 | 0.001674 |
'''
Created on 21 de oct. de 2015
@author: cgimenop
'''
from xml.dom import minidom
class TLCase:
CODE = 0;
TITLE = 1
SUMMARY = 2
IMPORTANCE = 3
PRECONDITIONS = 4
STEPS = 5
EXPECTED_RESULT = 6
EXTRA_DETAILS = 7 #UNUSED
EXECUTION_TYPE = 8
E2E = 9 #UNUSED
REGRESSION = 10 #UNUSED
LINKED_STORIES = 11
STATE = 12 #UNUSED
COMMENTS = 13 #UNUSED
EXECUTION_TYPE_MANUAL = "1"
EXECUTION_TYPE_AUTO = "2"
EXCEL_IMPORTANCE_LOW = "L"
EXCEL_IMPORTANCE_MEDIUM = "M"
EXCEL_IMPORTANCE_HIGH = "H"
IMPORTANCE_LOW = "1"
IMPORTANCE_MEDIUM = "2"
IMPORTANCE_HIGH = "3"
'''
classdocs
'''
def __init__(self, params = [], req_spec = None):
'''
Constructor
'''
if (len(params) == 0 or req_spec is None):
print("Invalid test case parameters")
raise
self.req_spec_title = req_spec
self.title = params[self.TITLE].value
self.summary = "</br>".join([params[self.CODE].value, params[self.TITLE].value, "Covers: ", params[self.LINKED_STORIES].value.strip()])
self.importance = self.importance_value(params[self.IMPORTANCE].value)
self.preconditions = params[self.PRECONDITIONS].value.replace("\n", "</br>")
#TODO: This will need further work to split the excel cell in multiple steps
self.steps = params[self.STEPS].value.replace("\n", "</br>")
self.expected_result = "</br>".join([str(params[self.EXPECTED_RESULT].value), str(params[self.EXTRA_DETAILS].value)])
self.expected_result = self.expected_result.replace("\n", "</br>")
if (params[self.EXECUTION_TYPE].value == "yes"):
self.execution_type = self.EXECUTION_TYPE_AUTO
else:
self.execution_type = self.EXECUTION_TYPE_MANUAL
self.requirements = dict()
self.get_requirements(params[self.LINKED_STORIES].value.split(","))
def importance_value(self, value):
if (value == None):
return self.IMPORTANCE_MEDIUM
switcher = {
self.EXCEL_IMPORTANCE_LOW: self.IMPORTANCE_LOW,
self.EXCEL_IMPORTANCE_MEDIUM: self.IMPORTANCE_MEDIUM,
self.EXCEL_IMPORTANCE_HIGH: self.IMPORTANCE_HIGH,
}
return switcher.get(value.upper(), self.IMPORTANCE_MEDIUM)
def get_requirements(self, requirements_list = None):
if (requirements_list is None):
return self.requirements
xml_doc = minidom.Document()
self.requirements = dict()
for requirement in requirements_list:
stripped_requirement = requirement.strip()
xml_requirement = xml_doc.createElement("requirement")
req_spec = xml_doc.createElement("req_spec_title")
cdata = xml_doc.createCDATASection(self.req_spec_title)
req_spec.appendChild(cdata)
title = xml_doc.createElement("title")
title_cdata = xml_doc.createCDATASection(stripped_requirement)
title.appendChild(title_cdata)
xml_requirement.appendChild(req_spec)
xml_requirement.appendChild(title)
if (stripped_requirement not in self.requirements):
self.requirements[stripped_requirement] = xml_requirement
return self.requirements
def to_xml(self):
xml_doc = minidom.Document()
xml_test_case = xml_doc.createElement("testcase")
xml_test_case.setAttribute("name", self.title)
summary = xml_doc.createElement("summary")
cdata = xml_doc.createCDATASection(self.summary)
summary.appendChild(cdata)
xml_test_case.appendChild(summary)
preconditions = xml_doc.createElement("preconditions")
cdata = xml_doc.createCDATASection(self.preconditions)
preconditions.appendChild(cdata)
xml_test_case.appendChild(preconditions)
steps = xml_doc.createElement("steps")
xml_test_case.appendChild(steps)
step = xml_doc.createElement("step")
steps.appendChild(step)
actions = xml_doc.createElement("actions")
step.appendChild(actions)
cdata = xml_doc.createCDATASection(self.steps)
actions.appendChild(cdata)
expected_results = xml_doc.createElement("expectedresults")
step.appendChild(expected_results)
cdata = xml_doc.createCDATASection(self.expected_result)
expected_results.appendChild(cdata)
#TODO: When test description is correctly splited into steps this will have to change accordingly
step_number = xml_doc.createElement("step_number")
step.appendChild(step_number)
cdata = xml_doc.createCDATASection("1")
step_number.appendChild(cdata)
execution_type = xml_doc.createElement("execution_type")
cdata = xml_doc.createCDATASection(self.execution_type)
execution_type.appendChild(cdata)
xml_test_case.appendChild(execution_type)
importance = xml_doc.createElement("importance")
cdata = xml_doc.createCDATASection(self.importance)
importance.appendChild(cdata)
xml_test_case.appendChild(importance)
xml_requirements = xml_doc.createElement("requirements")
for requirement_index in self.requirements:
case_requirement = self.requirements[requirement_index]
doc_id = xml_doc.createElement("doc_id")
doc_id_cdata = xml_doc.createCDATASection(requirement_index)
doc_id.appendChild(doc_id_cdata)
case_requirement.appendChild(doc_id)
xml_requirements.appendChild(case_requirement)
xml_test_case.appendChild(xml_requirements)
return xml_test_case
| cgimenop/Excel2Testlink | ExcelParser/TLCase.py | Python | mit | 6,315 | 0.012985 |
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <lunar@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import pytest
from diffoscope.config import Config
from diffoscope.comparators.fonts import TtfFile
from diffoscope.comparators.missing_file import MissingFile
from utils.data import data, load_fixture
from utils.tools import skip_unless_tools_exist
ttf1 = load_fixture('Samyak-Malayalam1.ttf')
ttf2 = load_fixture('Samyak-Malayalam2.ttf')
def test_identification(ttf1):
assert isinstance(ttf1, TtfFile)
def test_no_differences(ttf1):
difference = ttf1.compare(ttf1)
assert difference is None
@pytest.fixture
def differences(ttf1, ttf2):
return ttf1.compare(ttf2).details
@skip_unless_tools_exist('showttf')
def test_diff(differences):
expected_diff = open(data('ttf_expected_diff')).read()
assert differences[0].unified_diff == expected_diff
@skip_unless_tools_exist('showttf')
def test_compare_non_existing(monkeypatch, ttf1):
monkeypatch.setattr(Config(), 'new_file', True)
difference = ttf1.compare(MissingFile('/nonexisting', ttf1))
assert difference.source2 == '/nonexisting'
assert len(difference.details) > 0
| brettcs/diffoscope | tests/comparators/test_fonts.py | Python | gpl-3.0 | 1,851 | 0.002706 |
#!/usr/bin/env vpython3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Using colorama.Fore/Back/Style members
# pylint: disable=no-member
from __future__ import print_function
import argparse
import collections
import json
import logging
import os
import pipes
import posixpath
import random
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import textwrap
import zipfile
import adb_command_line
import devil_chromium
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.sdk import adb_wrapper
from devil.android.sdk import build_tools
from devil.android.sdk import intent
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
_DIR_SOURCE_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..'))
_JAVA_HOME = os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'jdk', 'current')
with devil_env.SysPath(
os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'colorama', 'src')):
import colorama
from incremental_install import installer
from pylib import constants
from pylib.symbols import deobfuscator
from pylib.utils import simpleperf
from pylib.utils import app_bundle_utils
with devil_env.SysPath(
os.path.join(_DIR_SOURCE_ROOT, 'build', 'android', 'gyp')):
import bundletool
BASE_MODULE = 'base'
def _Colorize(text, style=''):
return (style
+ text
+ colorama.Style.RESET_ALL)
def _InstallApk(devices, apk, install_dict):
def install(device):
if install_dict:
installer.Install(device, install_dict, apk=apk, permissions=[])
else:
device.Install(apk, permissions=[], allow_downgrade=True, reinstall=True)
logging.info('Installing %sincremental apk.', '' if install_dict else 'non-')
device_utils.DeviceUtils.parallel(devices).pMap(install)
# A named tuple containing the information needed to convert a bundle into
# an installable .apks archive.
# Fields:
# bundle_path: Path to input bundle file.
# bundle_apk_path: Path to output bundle .apks archive file.
# aapt2_path: Path to aapt2 tool.
# keystore_path: Path to keystore file.
# keystore_password: Password for the keystore file.
# keystore_alias: Signing key name alias within the keystore file.
# system_image_locales: List of Chromium locales to include in system .apks.
BundleGenerationInfo = collections.namedtuple(
'BundleGenerationInfo',
'bundle_path,bundle_apks_path,aapt2_path,keystore_path,keystore_password,'
'keystore_alias,system_image_locales')
def _GenerateBundleApks(info,
output_path=None,
minimal=False,
minimal_sdk_version=None,
mode=None,
optimize_for=None):
"""Generate an .apks archive from a bundle on demand.
Args:
info: A BundleGenerationInfo instance.
output_path: Path of output .apks archive.
minimal: Create the minimal set of apks possible (english-only).
minimal_sdk_version: When minimal=True, use this sdkVersion.
mode: Build mode, either None, or one of app_bundle_utils.BUILD_APKS_MODES.
optimize_for: Override split config, either None, or one of
app_bundle_utils.OPTIMIZE_FOR_OPTIONS.
"""
logging.info('Generating .apks file')
app_bundle_utils.GenerateBundleApks(
info.bundle_path,
# Store .apks file beside the .aab file by default so that it gets cached.
output_path or info.bundle_apks_path,
info.aapt2_path,
info.keystore_path,
info.keystore_password,
info.keystore_alias,
system_image_locales=info.system_image_locales,
mode=mode,
minimal=minimal,
minimal_sdk_version=minimal_sdk_version,
optimize_for=optimize_for)
def _InstallBundle(devices, apk_helper_instance, modules, fake_modules):
def Install(device):
device.Install(
apk_helper_instance,
permissions=[],
modules=modules,
fake_modules=fake_modules,
allow_downgrade=True)
# Basic checks for |modules| and |fake_modules|.
# * |fake_modules| cannot include 'base'.
# * If |fake_modules| is given, ensure |modules| includes 'base'.
# * They must be disjoint (checked by device.Install).
modules_set = set(modules) if modules else set()
fake_modules_set = set(fake_modules) if fake_modules else set()
if BASE_MODULE in fake_modules_set:
raise Exception('\'-f {}\' is disallowed.'.format(BASE_MODULE))
if fake_modules_set and BASE_MODULE not in modules_set:
raise Exception(
'\'-f FAKE\' must be accompanied by \'-m {}\''.format(BASE_MODULE))
logging.info('Installing bundle.')
device_utils.DeviceUtils.parallel(devices).pMap(Install)
def _UninstallApk(devices, install_dict, package_name):
def uninstall(device):
if install_dict:
installer.Uninstall(device, package_name)
else:
device.Uninstall(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(uninstall)
def _IsWebViewProvider(apk_helper_instance):
meta_data = apk_helper_instance.GetAllMetadata()
meta_data_keys = [pair[0] for pair in meta_data]
return 'com.android.webview.WebViewLibrary' in meta_data_keys
def _SetWebViewProvider(devices, package_name):
def switch_provider(device):
if device.build_version_sdk < version_codes.NOUGAT:
logging.error('No need to switch provider on pre-Nougat devices (%s)',
device.serial)
else:
device.SetWebViewImplementation(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(switch_provider)
def _NormalizeProcessName(debug_process_name, package_name):
if not debug_process_name:
debug_process_name = package_name
elif debug_process_name.startswith(':'):
debug_process_name = package_name + debug_process_name
elif '.' not in debug_process_name:
debug_process_name = package_name + ':' + debug_process_name
return debug_process_name
def _LaunchUrl(devices, package_name, argv=None, command_line_flags_file=None,
url=None, apk=None, wait_for_java_debugger=False,
debug_process_name=None, nokill=None):
if argv and command_line_flags_file is None:
raise Exception('This apk does not support any flags.')
if url:
# TODO(agrieve): Launch could be changed to require only package name by
# parsing "dumpsys package" rather than relying on the apk.
if not apk:
raise Exception('Launching with URL is not supported when using '
'--package-name. Use --apk-path instead.')
view_activity = apk.GetViewActivityName()
if not view_activity:
raise Exception('APK does not support launching with URLs.')
debug_process_name = _NormalizeProcessName(debug_process_name, package_name)
def launch(device):
# --persistent is required to have Settings.Global.DEBUG_APP be set, which
# we currently use to allow reading of flags. https://crbug.com/784947
if not nokill:
cmd = ['am', 'set-debug-app', '--persistent', debug_process_name]
if wait_for_java_debugger:
cmd[-1:-1] = ['-w']
# Ignore error since it will fail if apk is not debuggable.
device.RunShellCommand(cmd, check_return=False)
# The flags are first updated with input args.
if command_line_flags_file:
changer = flag_changer.FlagChanger(device, command_line_flags_file)
flags = []
if argv:
adb_command_line.CheckBuildTypeSupportsFlags(device,
command_line_flags_file)
flags = shlex.split(argv)
try:
changer.ReplaceFlags(flags)
except device_errors.AdbShellCommandFailedError:
logging.exception('Failed to set flags')
if url is None:
# Simulate app icon click if no url is present.
cmd = [
'am', 'start', '-p', package_name, '-c',
'android.intent.category.LAUNCHER', '-a', 'android.intent.action.MAIN'
]
device.RunShellCommand(cmd, check_return=True)
else:
launch_intent = intent.Intent(action='android.intent.action.VIEW',
activity=view_activity, data=url,
package=package_name)
device.StartActivity(launch_intent)
device_utils.DeviceUtils.parallel(devices).pMap(launch)
if wait_for_java_debugger:
print('Waiting for debugger to attach to process: ' +
_Colorize(debug_process_name, colorama.Fore.YELLOW))
def _ChangeFlags(devices, argv, command_line_flags_file):
if argv is None:
_DisplayArgs(devices, command_line_flags_file)
else:
flags = shlex.split(argv)
def update(device):
adb_command_line.CheckBuildTypeSupportsFlags(device,
command_line_flags_file)
changer = flag_changer.FlagChanger(device, command_line_flags_file)
changer.ReplaceFlags(flags)
device_utils.DeviceUtils.parallel(devices).pMap(update)
def _TargetCpuToTargetArch(target_cpu):
if target_cpu == 'x64':
return 'x86_64'
if target_cpu == 'mipsel':
return 'mips'
return target_cpu
def _RunGdb(device, package_name, debug_process_name, pid, output_directory,
target_cpu, port, ide, verbose):
if not pid:
debug_process_name = _NormalizeProcessName(debug_process_name, package_name)
pid = device.GetApplicationPids(debug_process_name, at_most_one=True)
if not pid:
# Attaching gdb makes the app run so slow that it takes *minutes* to start
# up (as of 2018). Better to just fail than to start & attach.
raise Exception('App not running.')
gdb_script_path = os.path.dirname(__file__) + '/adb_gdb'
cmd = [
gdb_script_path,
'--package-name=%s' % package_name,
'--output-directory=%s' % output_directory,
'--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
'--device=%s' % device.serial,
'--pid=%s' % pid,
'--port=%d' % port,
]
if ide:
cmd.append('--ide')
# Enable verbose output of adb_gdb if it's set for this script.
if verbose:
cmd.append('--verbose')
if target_cpu:
cmd.append('--target-arch=%s' % _TargetCpuToTargetArch(target_cpu))
logging.warning('Running: %s', ' '.join(pipes.quote(x) for x in cmd))
print(_Colorize('All subsequent output is from adb_gdb script.',
colorama.Fore.YELLOW))
os.execv(gdb_script_path, cmd)
def _PrintPerDeviceOutput(devices, results, single_line=False):
for d, result in zip(devices, results):
if not single_line and d is not devices[0]:
sys.stdout.write('\n')
sys.stdout.write(
_Colorize('{} ({}):'.format(d, d.build_description),
colorama.Fore.YELLOW))
sys.stdout.write(' ' if single_line else '\n')
yield result
def _RunMemUsage(devices, package_name, query_app=False):
cmd_args = ['dumpsys', 'meminfo']
if not query_app:
cmd_args.append('--local')
def mem_usage_helper(d):
ret = []
for process in sorted(_GetPackageProcesses(d, package_name)):
meminfo = d.RunShellCommand(cmd_args + [str(process.pid)])
ret.append((process.name, '\n'.join(meminfo)))
return ret
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(mem_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print('No processes found.')
else:
for name, usage in sorted(result):
print(_Colorize('==== Output of "dumpsys meminfo %s" ====' % name,
colorama.Fore.GREEN))
print(usage)
def _DuHelper(device, path_spec, run_as=None):
"""Runs "du -s -k |path_spec|" on |device| and returns parsed result.
Args:
device: A DeviceUtils instance.
path_spec: The list of paths to run du on. May contain shell expansions
(will not be escaped).
run_as: Package name to run as, or None to run as shell user. If not None
and app is not android:debuggable (run-as fails), then command will be
run as root.
Returns:
A dict of path->size in KiB containing all paths in |path_spec| that exist
on device. Paths that do not exist are silently ignored.
"""
# Example output for: du -s -k /data/data/org.chromium.chrome/{*,.*}
# 144 /data/data/org.chromium.chrome/cache
# 8 /data/data/org.chromium.chrome/files
# <snip>
# du: .*: No such file or directory
# The -d flag works differently across android version, so use -s instead.
# Without the explicit 2>&1, stderr and stdout get combined at random :(.
cmd_str = 'du -s -k ' + path_spec + ' 2>&1'
lines = device.RunShellCommand(cmd_str, run_as=run_as, shell=True,
check_return=False)
output = '\n'.join(lines)
# run-as: Package 'com.android.chrome' is not debuggable
if output.startswith('run-as:'):
# check_return=False needed for when some paths in path_spec do not exist.
lines = device.RunShellCommand(cmd_str, as_root=True, shell=True,
check_return=False)
ret = {}
try:
for line in lines:
# du: .*: No such file or directory
if line.startswith('du:'):
continue
size, subpath = line.split(None, 1)
ret[subpath] = int(size)
return ret
except ValueError:
logging.error('du command was: %s', cmd_str)
logging.error('Failed to parse du output:\n%s', output)
raise
def _RunDiskUsage(devices, package_name):
# Measuring dex size is a bit complicated:
# https://source.android.com/devices/tech/dalvik/jit-compiler
#
# For KitKat and below:
# dumpsys package contains:
# dataDir=/data/data/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-1.apk
# resourcePath=/data/app/org.chromium.chrome-1.apk
# nativeLibraryPath=/data/app-lib/org.chromium.chrome-1
# To measure odex:
# ls -l /data/dalvik-cache/data@app@org.chromium.chrome-1.apk@classes.dex
#
# For Android L and M (and maybe for N+ system apps):
# dumpsys package contains:
# codePath=/data/app/org.chromium.chrome-1
# resourcePath=/data/app/org.chromium.chrome-1
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-1/lib
# To measure odex:
# # Option 1:
# /data/dalvik-cache/arm/data@app@org.chromium.chrome-1@base.apk@classes.dex
# /data/dalvik-cache/arm/data@app@org.chromium.chrome-1@base.apk@classes.vdex
# ls -l /data/dalvik-cache/profiles/org.chromium.chrome
# (these profiles all appear to be 0 bytes)
# # Option 2:
# ls -l /data/app/org.chromium.chrome-1/oat/arm/base.odex
#
# For Android N+:
# dumpsys package contains:
# dataDir=/data/user/0/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# resourcePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-GUID/lib
# Instruction Set: arm
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm/base.odex[status=kOatUpToDate, compilation_f
# ilter=quicken]
# Instruction Set: arm64
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm64/base.odex[status=..., compilation_filter=q
# uicken]
# To measure odex:
# ls -l /data/app/.../oat/arm/base.odex
# ls -l /data/app/.../oat/arm/base.vdex (optional)
# To measure the correct odex size:
# cmd package compile -m speed org.chromium.chrome # For webview
# cmd package compile -m speed-profile org.chromium.chrome # For others
def disk_usage_helper(d):
package_output = '\n'.join(d.RunShellCommand(
['dumpsys', 'package', package_name], check_return=True))
# Does not return error when apk is not installed.
if not package_output or 'Unable to find package:' in package_output:
return None
# Ignore system apks that have updates installed.
package_output = re.sub(r'Hidden system packages:.*?^\b', '',
package_output, flags=re.S | re.M)
try:
data_dir = re.search(r'dataDir=(.*)', package_output).group(1)
code_path = re.search(r'codePath=(.*)', package_output).group(1)
lib_path = re.search(r'(?:legacyN|n)ativeLibrary(?:Dir|Path)=(.*)',
package_output).group(1)
except AttributeError as e:
raise Exception('Error parsing dumpsys output: ' + package_output) from e
if code_path.startswith('/system'):
logging.warning('Measurement of system image apks can be innacurate')
compilation_filters = set()
# Match "compilation_filter=value", where a line break can occur at any spot
# (refer to examples above).
awful_wrapping = r'\s*'.join('compilation_filter=')
for m in re.finditer(awful_wrapping + r'([\s\S]+?)[\],]', package_output):
compilation_filters.add(re.sub(r'\s+', '', m.group(1)))
# Starting Android Q, output looks like:
# arm: [status=speed-profile] [reason=install]
for m in re.finditer(r'\[status=(.+?)\]', package_output):
compilation_filters.add(m.group(1))
compilation_filter = ','.join(sorted(compilation_filters))
data_dir_sizes = _DuHelper(d, '%s/{*,.*}' % data_dir, run_as=package_name)
# Measure code_cache separately since it can be large.
code_cache_sizes = {}
code_cache_dir = next(
(k for k in data_dir_sizes if k.endswith('/code_cache')), None)
if code_cache_dir:
data_dir_sizes.pop(code_cache_dir)
code_cache_sizes = _DuHelper(d, '%s/{*,.*}' % code_cache_dir,
run_as=package_name)
apk_path_spec = code_path
if not apk_path_spec.endswith('.apk'):
apk_path_spec += '/*.apk'
apk_sizes = _DuHelper(d, apk_path_spec)
if lib_path.endswith('/lib'):
# Shows architecture subdirectory.
lib_sizes = _DuHelper(d, '%s/{*,.*}' % lib_path)
else:
lib_sizes = _DuHelper(d, lib_path)
# Look at all possible locations for odex files.
odex_paths = []
for apk_path in apk_sizes:
mangled_apk_path = apk_path[1:].replace('/', '@')
apk_basename = posixpath.basename(apk_path)[:-4]
for ext in ('dex', 'odex', 'vdex', 'art'):
# Easier to check all architectures than to determine active ones.
for arch in ('arm', 'arm64', 'x86', 'x86_64', 'mips', 'mips64'):
odex_paths.append(
'%s/oat/%s/%s.%s' % (code_path, arch, apk_basename, ext))
# No app could possibly have more than 6 dex files.
for suffix in ('', '2', '3', '4', '5'):
odex_paths.append('/data/dalvik-cache/%s/%s@classes%s.%s' % (
arch, mangled_apk_path, suffix, ext))
# This path does not have |arch|, so don't repeat it for every arch.
if arch == 'arm':
odex_paths.append('/data/dalvik-cache/%s@classes%s.dex' % (
mangled_apk_path, suffix))
odex_sizes = _DuHelper(d, ' '.join(pipes.quote(p) for p in odex_paths))
return (data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter)
def print_sizes(desc, sizes):
print('%s: %d KiB' % (desc, sum(sizes.values())))
for path, size in sorted(sizes.items()):
print(' %s: %s KiB' % (path, size))
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(disk_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print('APK is not installed.')
continue
(data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter) = result
total = sum(sum(sizes.values()) for sizes in result[:-1])
print_sizes('Apk', apk_sizes)
print_sizes('App Data (non-code cache)', data_dir_sizes)
print_sizes('App Data (code cache)', code_cache_sizes)
print_sizes('Native Libs', lib_sizes)
show_warning = compilation_filter and 'speed' not in compilation_filter
compilation_filter = compilation_filter or 'n/a'
print_sizes('odex (compilation_filter=%s)' % compilation_filter, odex_sizes)
if show_warning:
logging.warning('For a more realistic odex size, run:')
logging.warning(' %s compile-dex [speed|speed-profile]', sys.argv[0])
print('Total: %s KiB (%.1f MiB)' % (total, total / 1024.0))
class _LogcatProcessor:
ParsedLine = collections.namedtuple(
'ParsedLine',
['date', 'invokation_time', 'pid', 'tid', 'priority', 'tag', 'message'])
class NativeStackSymbolizer:
"""Buffers lines from native stacks and symbolizes them when done."""
# E.g.: #06 pc 0x0000d519 /apex/com.android.runtime/lib/libart.so
# E.g.: #01 pc 00180c8d /data/data/.../lib/libbase.cr.so
_STACK_PATTERN = re.compile(r'\s*#\d+\s+(?:pc )?(0x)?[0-9a-f]{8,16}\s')
def __init__(self, stack_script_context, print_func):
# To symbolize native stacks, we need to pass all lines at once.
self._stack_script_context = stack_script_context
self._print_func = print_func
self._crash_lines_buffer = None
def _FlushLines(self):
"""Prints queued lines after sending them through stack.py."""
crash_lines = self._crash_lines_buffer
self._crash_lines_buffer = None
with tempfile.NamedTemporaryFile(mode='w') as f:
f.writelines(x[0].message + '\n' for x in crash_lines)
f.flush()
proc = self._stack_script_context.Popen(
input_file=f.name, stdout=subprocess.PIPE)
lines = proc.communicate()[0].splitlines()
for i, line in enumerate(lines):
parsed_line, dim = crash_lines[min(i, len(crash_lines) - 1)]
d = parsed_line._asdict()
d['message'] = line
parsed_line = _LogcatProcessor.ParsedLine(**d)
self._print_func(parsed_line, dim)
def AddLine(self, parsed_line, dim):
# Assume all lines from DEBUG are stacks.
# Also look for "stack-looking" lines to catch manual stack prints.
# It's important to not buffer non-stack lines because stack.py does not
# pass them through.
is_crash_line = parsed_line.tag == 'DEBUG' or (self._STACK_PATTERN.match(
parsed_line.message))
if is_crash_line:
if self._crash_lines_buffer is None:
self._crash_lines_buffer = []
self._crash_lines_buffer.append((parsed_line, dim))
return
if self._crash_lines_buffer is not None:
self._FlushLines()
self._print_func(parsed_line, dim)
# Logcat tags for messages that are generally relevant but are not from PIDs
# associated with the apk.
_ALLOWLISTED_TAGS = {
'ActivityManager', # Shows activity lifecycle messages.
'ActivityTaskManager', # More activity lifecycle messages.
'AndroidRuntime', # Java crash dumps
'DEBUG', # Native crash dump.
}
# Matches messages only on pre-L (Dalvik) that are spammy and unimportant.
_DALVIK_IGNORE_PATTERN = re.compile('|'.join([
r'^Added shared lib',
r'^Could not find ',
r'^DexOpt:',
r'^GC_',
r'^Late-enabling CheckJNI',
r'^Link of class',
r'^No JNI_OnLoad found in',
r'^Trying to load lib',
r'^Unable to resolve superclass',
r'^VFY:',
r'^WAIT_',
]))
def __init__(self,
device,
package_name,
stack_script_context,
deobfuscate=None,
verbose=False):
self._device = device
self._package_name = package_name
self._verbose = verbose
self._deobfuscator = deobfuscate
self._native_stack_symbolizer = _LogcatProcessor.NativeStackSymbolizer(
stack_script_context, self._PrintParsedLine)
# Process ID for the app's main process (with no :name suffix).
self._primary_pid = None
# Set of all Process IDs that belong to the app.
self._my_pids = set()
# Set of all Process IDs that we've parsed at some point.
self._seen_pids = set()
# Start proc 22953:com.google.chromeremotedesktop/
self._pid_pattern = re.compile(r'Start proc (\d+):{}/'.format(package_name))
# START u0 {act=android.intent.action.MAIN \
# cat=[android.intent.category.LAUNCHER] \
# flg=0x10000000 pkg=com.google.chromeremotedesktop} from uid 2000
self._start_pattern = re.compile(r'START .*pkg=' + package_name)
self.nonce = 'Chromium apk_operations.py nonce={}'.format(random.random())
# Holds lines buffered on start-up, before we find our nonce message.
self._initial_buffered_lines = []
self._UpdateMyPids()
# Give preference to PID reported by "ps" over those found from
# _start_pattern. There can be multiple "Start proc" messages from prior
# runs of the app.
self._found_initial_pid = self._primary_pid is not None
# Retrieve any additional patterns that are relevant for the User.
self._user_defined_highlight = None
user_regex = os.environ.get('CHROMIUM_LOGCAT_HIGHLIGHT')
if user_regex:
self._user_defined_highlight = re.compile(user_regex)
if not self._user_defined_highlight:
print(_Colorize(
'Rejecting invalid regular expression: {}'.format(user_regex),
colorama.Fore.RED + colorama.Style.BRIGHT))
def _UpdateMyPids(self):
# We intentionally do not clear self._my_pids to make sure that the
# ProcessLine method below also includes lines from processes which may
# have already exited.
self._primary_pid = None
for process in _GetPackageProcesses(self._device, self._package_name):
# We take only the first "main" process found in order to account for
# possibly forked() processes.
if ':' not in process.name and self._primary_pid is None:
self._primary_pid = process.pid
self._my_pids.add(process.pid)
def _GetPidStyle(self, pid, dim=False):
if pid == self._primary_pid:
return colorama.Fore.WHITE
if pid in self._my_pids:
# TODO(wnwen): Use one separate persistent color per process, pop LRU
return colorama.Fore.YELLOW
if dim:
return colorama.Style.DIM
return ''
def _GetPriorityStyle(self, priority, dim=False):
# pylint:disable=no-self-use
if dim:
return ''
style = colorama.Fore.BLACK
if priority in ('E', 'F'):
style += colorama.Back.RED
elif priority == 'W':
style += colorama.Back.YELLOW
elif priority == 'I':
style += colorama.Back.GREEN
elif priority == 'D':
style += colorama.Back.BLUE
return style
def _ParseLine(self, line):
tokens = line.split(None, 6)
def consume_token_or_default(default):
return tokens.pop(0) if len(tokens) > 0 else default
def consume_integer_token_or_default(default):
if len(tokens) == 0:
return default
try:
return int(tokens.pop(0))
except ValueError:
return default
date = consume_token_or_default('')
invokation_time = consume_token_or_default('')
pid = consume_integer_token_or_default(-1)
tid = consume_integer_token_or_default(-1)
priority = consume_token_or_default('')
tag = consume_token_or_default('')
original_message = consume_token_or_default('')
# Example:
# 09-19 06:35:51.113 9060 9154 W GCoreFlp: No location...
# 09-19 06:01:26.174 9060 10617 I Auth : [ReflectiveChannelBinder]...
# Parsing "GCoreFlp:" vs "Auth :", we only want tag to contain the word,
# and we don't want to keep the colon for the message.
if tag and tag[-1] == ':':
tag = tag[:-1]
elif len(original_message) > 2:
original_message = original_message[2:]
return self.ParsedLine(
date, invokation_time, pid, tid, priority, tag, original_message)
def _PrintParsedLine(self, parsed_line, dim=False):
tid_style = colorama.Style.NORMAL
user_match = self._user_defined_highlight and (
re.search(self._user_defined_highlight, parsed_line.tag)
or re.search(self._user_defined_highlight, parsed_line.message))
# Make the main thread bright.
if not dim and parsed_line.pid == parsed_line.tid:
tid_style = colorama.Style.BRIGHT
pid_style = self._GetPidStyle(parsed_line.pid, dim)
msg_style = pid_style if not user_match else (colorama.Fore.GREEN +
colorama.Style.BRIGHT)
# We have to pad before adding color as that changes the width of the tag.
pid_str = _Colorize('{:5}'.format(parsed_line.pid), pid_style)
tid_str = _Colorize('{:5}'.format(parsed_line.tid), tid_style)
tag = _Colorize('{:8}'.format(parsed_line.tag),
pid_style + ('' if dim else colorama.Style.BRIGHT))
priority = _Colorize(parsed_line.priority,
self._GetPriorityStyle(parsed_line.priority))
messages = [parsed_line.message]
if self._deobfuscator:
messages = self._deobfuscator.TransformLines(messages)
for message in messages:
message = _Colorize(message, msg_style)
sys.stdout.write('{} {} {} {} {} {}: {}\n'.format(
parsed_line.date, parsed_line.invokation_time, pid_str, tid_str,
priority, tag, message))
def _TriggerNonceFound(self):
# Once the nonce is hit, we have confidence that we know which lines
# belong to the current run of the app. Process all of the buffered lines.
if self._primary_pid:
for args in self._initial_buffered_lines:
self._native_stack_symbolizer.AddLine(*args)
self._initial_buffered_lines = None
self.nonce = None
def ProcessLine(self, line):
if not line or line.startswith('------'):
return
if self.nonce and self.nonce in line:
self._TriggerNonceFound()
nonce_found = self.nonce is None
log = self._ParseLine(line)
if log.pid not in self._seen_pids:
self._seen_pids.add(log.pid)
if nonce_found:
# Update list of owned PIDs each time a new PID is encountered.
self._UpdateMyPids()
# Search for "Start proc $pid:$package_name/" message.
if not nonce_found:
# Capture logs before the nonce. Start with the most recent "am start".
if self._start_pattern.match(log.message):
self._initial_buffered_lines = []
# If we didn't find the PID via "ps", then extract it from log messages.
# This will happen if the app crashes too quickly.
if not self._found_initial_pid:
m = self._pid_pattern.match(log.message)
if m:
# Find the most recent "Start proc" line before the nonce.
# Track only the primary pid in this mode.
# The main use-case is to find app logs when no current PIDs exist.
# E.g.: When the app crashes on launch.
self._primary_pid = m.group(1)
self._my_pids.clear()
self._my_pids.add(m.group(1))
owned_pid = log.pid in self._my_pids
if owned_pid and not self._verbose and log.tag == 'dalvikvm':
if self._DALVIK_IGNORE_PATTERN.match(log.message):
return
if owned_pid or self._verbose or (log.priority == 'F' or # Java crash dump
log.tag in self._ALLOWLISTED_TAGS):
if nonce_found:
self._native_stack_symbolizer.AddLine(log, not owned_pid)
else:
self._initial_buffered_lines.append((log, not owned_pid))
def _RunLogcat(device, package_name, stack_script_context, deobfuscate,
verbose):
logcat_processor = _LogcatProcessor(
device, package_name, stack_script_context, deobfuscate, verbose)
device.RunShellCommand(['log', logcat_processor.nonce])
for line in device.adb.Logcat(logcat_format='threadtime'):
try:
logcat_processor.ProcessLine(line)
except:
sys.stderr.write('Failed to process line: ' + line + '\n')
# Skip stack trace for the common case of the adb server being
# restarted.
if 'unexpected EOF' in line:
sys.exit(1)
raise
def _GetPackageProcesses(device, package_name):
return [
p for p in device.ListProcesses(package_name)
if p.name == package_name or p.name.startswith(package_name + ':')]
def _RunPs(devices, package_name):
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_processes = parallel_devices.pMap(
lambda d: _GetPackageProcesses(d, package_name)).pGet(None)
for processes in _PrintPerDeviceOutput(devices, all_processes):
if not processes:
print('No processes found.')
else:
proc_map = collections.defaultdict(list)
for p in processes:
proc_map[p.name].append(str(p.pid))
for name, pids in sorted(proc_map.items()):
print(name, ','.join(pids))
def _RunShell(devices, package_name, cmd):
if cmd:
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(
cmd, run_as=package_name).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print(line)
else:
adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
cmd = [adb_path, '-s', devices[0].serial, 'shell']
# Pre-N devices do not support -t flag.
if devices[0].build_version_sdk >= version_codes.NOUGAT:
cmd += ['-t', 'run-as', package_name]
else:
print('Upon entering the shell, run:')
print('run-as', package_name)
print()
os.execv(adb_path, cmd)
def _RunCompileDex(devices, package_name, compilation_filter):
cmd = ['cmd', 'package', 'compile', '-f', '-m', compilation_filter,
package_name]
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(cmd, timeout=120).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print(line)
def _RunProfile(device, package_name, host_build_directory, pprof_out_path,
process_specifier, thread_specifier, extra_args):
simpleperf.PrepareDevice(device)
device_simpleperf_path = simpleperf.InstallSimpleperf(device, package_name)
with tempfile.NamedTemporaryFile() as fh:
host_simpleperf_out_path = fh.name
with simpleperf.RunSimpleperf(device, device_simpleperf_path, package_name,
process_specifier, thread_specifier,
extra_args, host_simpleperf_out_path):
sys.stdout.write('Profiler is running; press Enter to stop...')
sys.stdin.read(1)
sys.stdout.write('Post-processing data...')
sys.stdout.flush()
simpleperf.ConvertSimpleperfToPprof(host_simpleperf_out_path,
host_build_directory, pprof_out_path)
print(textwrap.dedent("""
Profile data written to %(s)s.
To view profile as a call graph in browser:
pprof -web %(s)s
To print the hottest methods:
pprof -top %(s)s
pprof has many useful customization options; `pprof --help` for details.
""" % {'s': pprof_out_path}))
class _StackScriptContext:
"""Maintains temporary files needed by stack.py."""
def __init__(self,
output_directory,
apk_path,
bundle_generation_info,
quiet=False):
self._output_directory = output_directory
self._apk_path = apk_path
self._bundle_generation_info = bundle_generation_info
self._staging_dir = None
self._quiet = quiet
def _CreateStaging(self):
# In many cases, stack decoding requires APKs to map trace lines to native
# libraries. Create a temporary directory, and either unpack a bundle's
# APKS into it, or simply symlink the standalone APK into it. This
# provides an unambiguous set of APK files for the stack decoding process
# to inspect.
logging.debug('Creating stack staging directory')
self._staging_dir = tempfile.mkdtemp()
bundle_generation_info = self._bundle_generation_info
if bundle_generation_info:
# TODO(wnwen): Use apk_helper instead.
_GenerateBundleApks(bundle_generation_info)
logging.debug('Extracting .apks file')
with zipfile.ZipFile(bundle_generation_info.bundle_apks_path, 'r') as z:
files_to_extract = [
f for f in z.namelist() if f.endswith('-master.apk')
]
z.extractall(self._staging_dir, files_to_extract)
elif self._apk_path:
# Otherwise an incremental APK and an empty apks directory is correct.
output = os.path.join(self._staging_dir, os.path.basename(self._apk_path))
os.symlink(self._apk_path, output)
def Close(self):
if self._staging_dir:
logging.debug('Clearing stack staging directory')
shutil.rmtree(self._staging_dir)
self._staging_dir = None
def Popen(self, input_file=None, **kwargs):
if self._staging_dir is None:
self._CreateStaging()
stack_script = os.path.join(
constants.host_paths.ANDROID_PLATFORM_DEVELOPMENT_SCRIPTS_PATH,
'stack.py')
cmd = [
stack_script, '--output-directory', self._output_directory,
'--apks-directory', self._staging_dir
]
if self._quiet:
cmd.append('--quiet')
if input_file:
cmd.append(input_file)
logging.info('Running stack.py')
return subprocess.Popen(cmd, universal_newlines=True, **kwargs)
def _GenerateAvailableDevicesMessage(devices):
devices_obj = device_utils.DeviceUtils.parallel(devices)
descriptions = devices_obj.pMap(lambda d: d.build_description).pGet(None)
msg = 'Available devices:\n'
for d, desc in zip(devices, descriptions):
msg += ' %s (%s)\n' % (d, desc)
return msg
# TODO(agrieve):add "--all" in the MultipleDevicesError message and use it here.
def _GenerateMissingAllFlagMessage(devices):
return ('More than one device available. Use --all to select all devices, ' +
'or use --device to select a device by serial.\n\n' +
_GenerateAvailableDevicesMessage(devices))
def _DisplayArgs(devices, command_line_flags_file):
def flags_helper(d):
changer = flag_changer.FlagChanger(d, command_line_flags_file)
return changer.GetCurrentFlags()
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.pMap(flags_helper).pGet(None)
print('Existing flags per-device (via /data/local/tmp/{}):'.format(
command_line_flags_file))
for flags in _PrintPerDeviceOutput(devices, outputs, single_line=True):
quoted_flags = ' '.join(pipes.quote(f) for f in flags)
print(quoted_flags or 'No flags set.')
def _DeviceCachePath(device, output_directory):
file_name = 'device_cache_%s.json' % device.serial
return os.path.join(output_directory, file_name)
def _LoadDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
if os.path.exists(cache_path):
logging.debug('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
# Delete the cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
else:
logging.debug('No cache present for device: %s', d)
def _SaveDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
class _Command:
name = None
description = None
long_description = None
needs_package_name = False
needs_output_directory = False
needs_apk_helper = False
supports_incremental = False
accepts_command_line_flags = False
accepts_args = False
need_device_args = True
all_devices_by_default = False
calls_exec = False
supports_multiple_devices = True
def __init__(self, from_wrapper_script, is_bundle):
self._parser = None
self._from_wrapper_script = from_wrapper_script
self.args = None
self.apk_helper = None
self.additional_apk_helpers = None
self.install_dict = None
self.devices = None
self.is_bundle = is_bundle
self.bundle_generation_info = None
# Only support incremental install from APK wrapper scripts.
if is_bundle or not from_wrapper_script:
self.supports_incremental = False
def RegisterBundleGenerationInfo(self, bundle_generation_info):
self.bundle_generation_info = bundle_generation_info
def _RegisterExtraArgs(self, group):
pass
def RegisterArgs(self, parser):
subp = parser.add_parser(
self.name, help=self.description,
description=self.long_description or self.description,
formatter_class=argparse.RawDescriptionHelpFormatter)
self._parser = subp
subp.set_defaults(command=self)
if self.need_device_args:
subp.add_argument('--all',
action='store_true',
default=self.all_devices_by_default,
help='Operate on all connected devices.',)
subp.add_argument('-d',
'--device',
action='append',
default=[],
dest='devices',
help='Target device for script to work on. Enter '
'multiple times for multiple devices.')
subp.add_argument('-v',
'--verbose',
action='count',
default=0,
dest='verbose_count',
help='Verbose level (multiple times for more)')
group = subp.add_argument_group('%s arguments' % self.name)
if self.needs_package_name:
# Three cases to consider here, since later code assumes
# self.args.package_name always exists, even if None:
#
# - Called from a bundle wrapper script, the package_name is already
# set through parser.set_defaults(), so don't call add_argument()
# to avoid overriding its value.
#
# - Called from an apk wrapper script. The --package-name argument
# should not appear, but self.args.package_name will be gleaned from
# the --apk-path file later.
#
# - Called directly, then --package-name is required on the command-line.
#
if not self.is_bundle:
group.add_argument(
'--package-name',
help=argparse.SUPPRESS if self._from_wrapper_script else (
"App's package name."))
if self.needs_apk_helper or self.needs_package_name:
# Adding this argument to the subparser would override the set_defaults()
# value set by on the parent parser (even if None).
if not self._from_wrapper_script and not self.is_bundle:
group.add_argument(
'--apk-path', required=self.needs_apk_helper, help='Path to .apk')
if self.supports_incremental:
group.add_argument('--incremental',
action='store_true',
default=False,
help='Always install an incremental apk.')
group.add_argument('--non-incremental',
action='store_true',
default=False,
help='Always install a non-incremental apk.')
# accepts_command_line_flags and accepts_args are mutually exclusive.
# argparse will throw if they are both set.
if self.accepts_command_line_flags:
group.add_argument(
'--args', help='Command-line flags. Use = to assign args.')
if self.accepts_args:
group.add_argument(
'--args', help='Extra arguments. Use = to assign args')
if not self._from_wrapper_script and self.accepts_command_line_flags:
# Provided by wrapper scripts.
group.add_argument(
'--command-line-flags-file',
help='Name of the command-line flags file')
self._RegisterExtraArgs(group)
def _CreateApkHelpers(self, args, incremental_apk_path, install_dict):
"""Returns true iff self.apk_helper was created and assigned."""
if self.apk_helper is None:
if args.apk_path:
self.apk_helper = apk_helper.ToHelper(args.apk_path)
elif incremental_apk_path:
self.install_dict = install_dict
self.apk_helper = apk_helper.ToHelper(incremental_apk_path)
elif self.is_bundle:
_GenerateBundleApks(self.bundle_generation_info)
self.apk_helper = apk_helper.ToHelper(
self.bundle_generation_info.bundle_apks_path)
if args.additional_apk_paths and self.additional_apk_helpers is None:
self.additional_apk_helpers = [
apk_helper.ToHelper(apk_path)
for apk_path in args.additional_apk_paths
]
return self.apk_helper is not None
def ProcessArgs(self, args):
self.args = args
# Ensure these keys always exist. They are set by wrapper scripts, but not
# always added when not using wrapper scripts.
args.__dict__.setdefault('apk_path', None)
args.__dict__.setdefault('incremental_json', None)
incremental_apk_path = None
install_dict = None
if args.incremental_json and not (self.supports_incremental and
args.non_incremental):
with open(args.incremental_json) as f:
install_dict = json.load(f)
incremental_apk_path = os.path.join(args.output_directory,
install_dict['apk_path'])
if not os.path.exists(incremental_apk_path):
incremental_apk_path = None
if self.supports_incremental:
if args.incremental and args.non_incremental:
self._parser.error('Must use only one of --incremental and '
'--non-incremental')
elif args.non_incremental:
if not args.apk_path:
self._parser.error('Apk has not been built.')
elif args.incremental:
if not incremental_apk_path:
self._parser.error('Incremental apk has not been built.')
args.apk_path = None
if args.apk_path and incremental_apk_path:
self._parser.error('Both incremental and non-incremental apks exist. '
'Select using --incremental or --non-incremental')
# Gate apk_helper creation with _CreateApkHelpers since for bundles it takes
# a while to unpack the apks file from the aab file, so avoid this slowdown
# for simple commands that don't need apk_helper.
if self.needs_apk_helper:
if not self._CreateApkHelpers(args, incremental_apk_path, install_dict):
self._parser.error('App is not built.')
if self.needs_package_name and not args.package_name:
if self._CreateApkHelpers(args, incremental_apk_path, install_dict):
args.package_name = self.apk_helper.GetPackageName()
elif self._from_wrapper_script:
self._parser.error('App is not built.')
else:
self._parser.error('One of --package-name or --apk-path is required.')
self.devices = []
if self.need_device_args:
abis = None
if self._CreateApkHelpers(args, incremental_apk_path, install_dict):
abis = self.apk_helper.GetAbis()
self.devices = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.devices,
enable_device_files_cache=bool(args.output_directory),
default_retries=0,
abis=abis)
# TODO(agrieve): Device cache should not depend on output directory.
# Maybe put into /tmp?
_LoadDeviceCaches(self.devices, args.output_directory)
try:
if len(self.devices) > 1:
if not self.supports_multiple_devices:
self._parser.error(device_errors.MultipleDevicesError(self.devices))
if not args.all and not args.devices:
self._parser.error(_GenerateMissingAllFlagMessage(self.devices))
# Save cache now if command will not get a chance to afterwards.
if self.calls_exec:
_SaveDeviceCaches(self.devices, args.output_directory)
except:
_SaveDeviceCaches(self.devices, args.output_directory)
raise
class _DevicesCommand(_Command):
name = 'devices'
description = 'Describe attached devices.'
all_devices_by_default = True
def Run(self):
print(_GenerateAvailableDevicesMessage(self.devices))
class _PackageInfoCommand(_Command):
name = 'package-info'
description = 'Show various attributes of this app.'
need_device_args = False
needs_package_name = True
needs_apk_helper = True
def Run(self):
# Format all (even ints) as strings, to handle cases where APIs return None
print('Package name: "%s"' % self.args.package_name)
print('versionCode: %s' % self.apk_helper.GetVersionCode())
print('versionName: "%s"' % self.apk_helper.GetVersionName())
print('minSdkVersion: %s' % self.apk_helper.GetMinSdkVersion())
print('targetSdkVersion: %s' % self.apk_helper.GetTargetSdkVersion())
print('Supported ABIs: %r' % self.apk_helper.GetAbis())
class _InstallCommand(_Command):
name = 'install'
description = 'Installs the APK or bundle to one or more devices.'
needs_apk_helper = True
supports_incremental = True
default_modules = []
def _RegisterExtraArgs(self, group):
if self.is_bundle:
group.add_argument(
'-m',
'--module',
action='append',
default=self.default_modules,
help='Module to install. Can be specified multiple times.')
group.add_argument(
'-f',
'--fake',
action='append',
default=[],
help='Fake bundle module install. Can be specified multiple times. '
'Requires \'-m {0}\' to be given, and \'-f {0}\' is illegal.'.format(
BASE_MODULE))
# Add even if |self.default_modules| is empty, for consistency.
group.add_argument('--no-module',
action='append',
choices=self.default_modules,
default=[],
help='Module to exclude from default install.')
def Run(self):
if self.additional_apk_helpers:
for additional_apk_helper in self.additional_apk_helpers:
_InstallApk(self.devices, additional_apk_helper, None)
if self.is_bundle:
modules = list(
set(self.args.module) - set(self.args.no_module) -
set(self.args.fake))
_InstallBundle(self.devices, self.apk_helper, modules, self.args.fake)
else:
_InstallApk(self.devices, self.apk_helper, self.install_dict)
class _UninstallCommand(_Command):
name = 'uninstall'
description = 'Removes the APK or bundle from one or more devices.'
needs_package_name = True
def Run(self):
_UninstallApk(self.devices, self.install_dict, self.args.package_name)
class _SetWebViewProviderCommand(_Command):
name = 'set-webview-provider'
description = ("Sets the device's WebView provider to this APK's "
"package name.")
needs_package_name = True
needs_apk_helper = True
def Run(self):
if not _IsWebViewProvider(self.apk_helper):
raise Exception('This package does not have a WebViewLibrary meta-data '
'tag. Are you sure it contains a WebView implementation?')
_SetWebViewProvider(self.devices, self.args.package_name)
class _LaunchCommand(_Command):
name = 'launch'
description = ('Sends a launch intent for the APK or bundle after first '
'writing the command-line flags file.')
needs_package_name = True
accepts_command_line_flags = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument('-w', '--wait-for-java-debugger', action='store_true',
help='Pause execution until debugger attaches. Applies '
'only to the main process. To have renderers wait, '
'use --args="--renderer-wait-for-java-debugger"')
group.add_argument('--debug-process-name',
help='Name of the process to debug. '
'E.g. "privileged_process0", or "foo.bar:baz"')
group.add_argument('--nokill', action='store_true',
help='Do not set the debug-app, nor set command-line '
'flags. Useful to load a URL without having the '
'app restart.')
group.add_argument('url', nargs='?', help='A URL to launch with.')
def Run(self):
if self.args.url and self.is_bundle:
# TODO(digit): Support this, maybe by using 'dumpsys' as described
# in the _LaunchUrl() comment.
raise Exception('Launching with URL not supported for bundles yet!')
_LaunchUrl(self.devices, self.args.package_name, argv=self.args.args,
command_line_flags_file=self.args.command_line_flags_file,
url=self.args.url, apk=self.apk_helper,
wait_for_java_debugger=self.args.wait_for_java_debugger,
debug_process_name=self.args.debug_process_name,
nokill=self.args.nokill)
class _StopCommand(_Command):
name = 'stop'
description = 'Force-stops the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ForceStop(
self.args.package_name)
class _ClearDataCommand(_Command):
name = 'clear-data'
descriptions = 'Clears all app data.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ClearApplicationState(
self.args.package_name)
class _ArgvCommand(_Command):
name = 'argv'
description = 'Display and optionally update command-line flags file.'
needs_package_name = True
accepts_command_line_flags = True
all_devices_by_default = True
def Run(self):
_ChangeFlags(self.devices, self.args.args,
self.args.command_line_flags_file)
class _GdbCommand(_Command):
name = 'gdb'
description = 'Runs //build/android/adb_gdb with apk-specific args.'
long_description = description + """
To attach to a process other than the APK's main process, use --pid=1234.
To list all PIDs, use the "ps" command.
If no apk process is currently running, sends a launch intent.
"""
needs_package_name = True
needs_output_directory = True
calls_exec = True
supports_multiple_devices = False
def Run(self):
_RunGdb(self.devices[0], self.args.package_name,
self.args.debug_process_name, self.args.pid,
self.args.output_directory, self.args.target_cpu, self.args.port,
self.args.ide, bool(self.args.verbose_count))
def _RegisterExtraArgs(self, group):
pid_group = group.add_mutually_exclusive_group()
pid_group.add_argument('--debug-process-name',
help='Name of the process to attach to. '
'E.g. "privileged_process0", or "foo.bar:baz"')
pid_group.add_argument('--pid',
help='The process ID to attach to. Defaults to '
'the main process for the package.')
group.add_argument('--ide', action='store_true',
help='Rather than enter a gdb prompt, set up the '
'gdb connection and wait for an IDE to '
'connect.')
# Same default port that ndk-gdb.py uses.
group.add_argument('--port', type=int, default=5039,
help='Use the given port for the GDB connection')
class _LogcatCommand(_Command):
name = 'logcat'
description = 'Runs "adb logcat" with filters relevant the current APK.'
long_description = description + """
"Relevant filters" means:
* Log messages from processes belonging to the apk,
* Plus log messages from log tags: ActivityManager|DEBUG,
* Plus fatal logs from any process,
* Minus spamy dalvikvm logs (for pre-L devices).
Colors:
* Primary process is white
* Other processes (gpu, renderer) are yellow
* Non-apk processes are grey
* UI thread has a bolded Thread-ID
Java stack traces are detected and deobfuscated (for release builds).
To disable filtering, (but keep coloring), use --verbose.
"""
needs_package_name = True
supports_multiple_devices = False
def Run(self):
deobfuscate = None
if self.args.proguard_mapping_path and not self.args.no_deobfuscate:
deobfuscate = deobfuscator.Deobfuscator(self.args.proguard_mapping_path)
stack_script_context = _StackScriptContext(
self.args.output_directory,
self.args.apk_path,
self.bundle_generation_info,
quiet=True)
try:
_RunLogcat(self.devices[0], self.args.package_name, stack_script_context,
deobfuscate, bool(self.args.verbose_count))
except KeyboardInterrupt:
pass # Don't show stack trace upon Ctrl-C
finally:
stack_script_context.Close()
if deobfuscate:
deobfuscate.Close()
def _RegisterExtraArgs(self, group):
if self._from_wrapper_script:
group.add_argument('--no-deobfuscate', action='store_true',
help='Disables ProGuard deobfuscation of logcat.')
else:
group.set_defaults(no_deobfuscate=False)
group.add_argument('--proguard-mapping-path',
help='Path to ProGuard map (enables deobfuscation)')
class _PsCommand(_Command):
name = 'ps'
description = 'Show PIDs of any APK processes currently running.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunPs(self.devices, self.args.package_name)
class _DiskUsageCommand(_Command):
name = 'disk-usage'
description = 'Show how much device storage is being consumed by the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunDiskUsage(self.devices, self.args.package_name)
class _MemUsageCommand(_Command):
name = 'mem-usage'
description = 'Show memory usage of currently running APK processes.'
needs_package_name = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument('--query-app', action='store_true',
help='Do not add --local to "dumpsys meminfo". This will output '
'additional metrics (e.g. Context count), but also cause memory '
'to be used in order to gather the metrics.')
def Run(self):
_RunMemUsage(self.devices, self.args.package_name,
query_app=self.args.query_app)
class _ShellCommand(_Command):
name = 'shell'
description = ('Same as "adb shell <command>", but runs as the apk\'s uid '
'(via run-as). Useful for inspecting the app\'s data '
'directory.')
needs_package_name = True
@property
def calls_exec(self):
return not self.args.cmd
@property
def supports_multiple_devices(self):
return not self.args.cmd
def _RegisterExtraArgs(self, group):
group.add_argument(
'cmd', nargs=argparse.REMAINDER, help='Command to run.')
def Run(self):
_RunShell(self.devices, self.args.package_name, self.args.cmd)
class _CompileDexCommand(_Command):
name = 'compile-dex'
description = ('Applicable only for Android N+. Forces .odex files to be '
'compiled with the given compilation filter. To see existing '
'filter, use "disk-usage" command.')
needs_package_name = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'compilation_filter',
choices=['verify', 'quicken', 'space-profile', 'space',
'speed-profile', 'speed'],
help='For WebView/Monochrome, use "speed". For other apks, use '
'"speed-profile".')
def Run(self):
_RunCompileDex(self.devices, self.args.package_name,
self.args.compilation_filter)
class _PrintCertsCommand(_Command):
name = 'print-certs'
description = 'Print info about certificates used to sign this APK.'
need_device_args = False
needs_apk_helper = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'--full-cert',
action='store_true',
help=("Print the certificate's full signature, Base64-encoded. "
"Useful when configuring an Android image's "
"config_webview_packages.xml."))
def Run(self):
keytool = os.path.join(_JAVA_HOME, 'bin', 'keytool')
if self.is_bundle:
# Bundles are not signed until converted to .apks. The wrapper scripts
# record which key will be used to sign though.
with tempfile.NamedTemporaryFile() as f:
logging.warning('Bundles are not signed until turned into .apk files.')
logging.warning('Showing signing info based on associated keystore.')
cmd = [
keytool, '-exportcert', '-keystore',
self.bundle_generation_info.keystore_path, '-storepass',
self.bundle_generation_info.keystore_password, '-alias',
self.bundle_generation_info.keystore_alias, '-file', f.name
]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
cmd = [keytool, '-printcert', '-file', f.name]
logging.warning('Running: %s', ' '.join(cmd))
subprocess.check_call(cmd)
if self.args.full_cert:
# Redirect stderr to hide a keytool warning about using non-standard
# keystore format.
full_output = subprocess.check_output(
cmd + ['-rfc'], stderr=subprocess.STDOUT)
else:
cmd = [
build_tools.GetPath('apksigner'), 'verify', '--print-certs',
'--verbose', self.apk_helper.path
]
logging.warning('Running: %s', ' '.join(cmd))
env = os.environ.copy()
env['PATH'] = os.path.pathsep.join(
[os.path.join(_JAVA_HOME, 'bin'),
env.get('PATH')])
stdout = subprocess.check_output(cmd, env=env)
print(stdout)
if self.args.full_cert:
if 'v1 scheme (JAR signing): true' not in stdout:
raise Exception(
'Cannot print full certificate because apk is not V1 signed.')
cmd = [keytool, '-printcert', '-jarfile', self.apk_helper.path, '-rfc']
# Redirect stderr to hide a keytool warning about using non-standard
# keystore format.
full_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if self.args.full_cert:
m = re.search(
r'-+BEGIN CERTIFICATE-+([\r\n0-9A-Za-z+/=]+)-+END CERTIFICATE-+',
full_output, re.MULTILINE)
if not m:
raise Exception('Unable to parse certificate:\n{}'.format(full_output))
signature = re.sub(r'[\r\n]+', '', m.group(1))
print()
print('Full Signature:')
print(signature)
class _ProfileCommand(_Command):
name = 'profile'
description = ('Run the simpleperf sampling CPU profiler on the currently-'
'running APK. If --args is used, the extra arguments will be '
'passed on to simpleperf; otherwise, the following default '
'arguments are used: -g -f 1000 -o /data/local/tmp/perf.data')
needs_package_name = True
needs_output_directory = True
supports_multiple_devices = False
accepts_args = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'--profile-process', default='browser',
help=('Which process to profile. This may be a process name or pid '
'such as you would get from running `%s ps`; or '
'it can be one of (browser, renderer, gpu).' % sys.argv[0]))
group.add_argument(
'--profile-thread', default=None,
help=('(Optional) Profile only a single thread. This may be either a '
'thread ID such as you would get by running `adb shell ps -t` '
'(pre-Oreo) or `adb shell ps -e -T` (Oreo and later); or it may '
'be one of (io, compositor, main, render), in which case '
'--profile-process is also required. (Note that "render" thread '
'refers to a thread in the browser process that manages a '
'renderer; to profile the main thread of the renderer process, '
'use --profile-thread=main).'))
group.add_argument('--profile-output', default='profile.pb',
help='Output file for profiling data')
def Run(self):
extra_args = shlex.split(self.args.args or '')
_RunProfile(self.devices[0], self.args.package_name,
self.args.output_directory, self.args.profile_output,
self.args.profile_process, self.args.profile_thread,
extra_args)
class _RunCommand(_InstallCommand, _LaunchCommand, _LogcatCommand):
name = 'run'
description = 'Install, launch, and show logcat (when targeting one device).'
all_devices_by_default = False
supports_multiple_devices = True
def _RegisterExtraArgs(self, group):
_InstallCommand._RegisterExtraArgs(self, group)
_LaunchCommand._RegisterExtraArgs(self, group)
_LogcatCommand._RegisterExtraArgs(self, group)
group.add_argument('--no-logcat', action='store_true',
help='Install and launch, but do not enter logcat.')
def Run(self):
logging.warning('Installing...')
_InstallCommand.Run(self)
logging.warning('Sending launch intent...')
_LaunchCommand.Run(self)
if len(self.devices) == 1 and not self.args.no_logcat:
logging.warning('Entering logcat...')
_LogcatCommand.Run(self)
class _BuildBundleApks(_Command):
name = 'build-bundle-apks'
description = ('Build the .apks archive from an Android app bundle, and '
'optionally copy it to a specific destination.')
need_device_args = False
def _RegisterExtraArgs(self, group):
group.add_argument(
'--output-apks', required=True, help='Destination path for .apks file.')
group.add_argument(
'--minimal',
action='store_true',
help='Build .apks archive that targets the bundle\'s minSdkVersion and '
'contains only english splits. It still contains optional splits.')
group.add_argument(
'--sdk-version', help='The sdkVersion to build the .apks for.')
group.add_argument(
'--build-mode',
choices=app_bundle_utils.BUILD_APKS_MODES,
help='Specify which type of APKs archive to build. "default" '
'generates regular splits, "universal" generates an archive with a '
'single universal APK, "system" generates an archive with a system '
'image APK, while "system_compressed" generates a compressed system '
'APK, with an additional stub APK for the system image.')
group.add_argument(
'--optimize-for',
choices=app_bundle_utils.OPTIMIZE_FOR_OPTIONS,
help='Override split configuration.')
def Run(self):
_GenerateBundleApks(
self.bundle_generation_info,
output_path=self.args.output_apks,
minimal=self.args.minimal,
minimal_sdk_version=self.args.sdk_version,
mode=self.args.build_mode,
optimize_for=self.args.optimize_for)
class _ManifestCommand(_Command):
name = 'dump-manifest'
description = 'Dump the android manifest from this bundle, as XML, to stdout.'
need_device_args = False
def Run(self):
sys.stdout.write(
bundletool.RunBundleTool([
'dump', 'manifest', '--bundle',
self.bundle_generation_info.bundle_path
]))
class _StackCommand(_Command):
name = 'stack'
description = 'Decodes an Android stack.'
need_device_args = False
def _RegisterExtraArgs(self, group):
group.add_argument(
'file',
nargs='?',
help='File to decode. If not specified, stdin is processed.')
def Run(self):
context = _StackScriptContext(self.args.output_directory,
self.args.apk_path,
self.bundle_generation_info)
try:
proc = context.Popen(input_file=self.args.file)
if proc.wait():
raise Exception('stack script returned {}'.format(proc.returncode))
finally:
context.Close()
# Shared commands for regular APKs and app bundles.
_COMMANDS = [
_DevicesCommand,
_PackageInfoCommand,
_InstallCommand,
_UninstallCommand,
_SetWebViewProviderCommand,
_LaunchCommand,
_StopCommand,
_ClearDataCommand,
_ArgvCommand,
_GdbCommand,
_LogcatCommand,
_PsCommand,
_DiskUsageCommand,
_MemUsageCommand,
_ShellCommand,
_CompileDexCommand,
_PrintCertsCommand,
_ProfileCommand,
_RunCommand,
_StackCommand,
]
# Commands specific to app bundles.
_BUNDLE_COMMANDS = [
_BuildBundleApks,
_ManifestCommand,
]
def _ParseArgs(parser, from_wrapper_script, is_bundle):
subparsers = parser.add_subparsers()
command_list = _COMMANDS + (_BUNDLE_COMMANDS if is_bundle else [])
commands = [clazz(from_wrapper_script, is_bundle) for clazz in command_list]
for command in commands:
if from_wrapper_script or not command.needs_output_directory:
command.RegisterArgs(subparsers)
# Show extended help when no command is passed.
argv = sys.argv[1:]
if not argv:
argv = ['--help']
return parser.parse_args(argv)
def _RunInternal(parser,
output_directory=None,
additional_apk_paths=None,
bundle_generation_info=None):
colorama.init()
parser.set_defaults(
additional_apk_paths=additional_apk_paths,
output_directory=output_directory)
from_wrapper_script = bool(output_directory)
args = _ParseArgs(parser, from_wrapper_script, bool(bundle_generation_info))
run_tests_helper.SetLogLevel(args.verbose_count)
if bundle_generation_info:
args.command.RegisterBundleGenerationInfo(bundle_generation_info)
if args.additional_apk_paths:
for path in additional_apk_paths:
if not path or not os.path.exists(path):
raise Exception('Invalid additional APK path "{}"'.format(path))
args.command.ProcessArgs(args)
args.command.Run()
# Incremental install depends on the cache being cleared when uninstalling.
if args.command.name != 'uninstall':
_SaveDeviceCaches(args.command.devices, output_directory)
def Run(output_directory, apk_path, additional_apk_paths, incremental_json,
command_line_flags_file, target_cpu, proguard_mapping_path):
"""Entry point for generated wrapper scripts."""
constants.SetOutputDirectory(output_directory)
devil_chromium.Initialize(output_directory=output_directory)
parser = argparse.ArgumentParser()
exists_or_none = lambda p: p if p and os.path.exists(p) else None
parser.set_defaults(
command_line_flags_file=command_line_flags_file,
target_cpu=target_cpu,
apk_path=exists_or_none(apk_path),
incremental_json=exists_or_none(incremental_json),
proguard_mapping_path=proguard_mapping_path)
_RunInternal(
parser,
output_directory=output_directory,
additional_apk_paths=additional_apk_paths)
def RunForBundle(output_directory, bundle_path, bundle_apks_path,
additional_apk_paths, aapt2_path, keystore_path,
keystore_password, keystore_alias, package_name,
command_line_flags_file, proguard_mapping_path, target_cpu,
system_image_locales, default_modules):
"""Entry point for generated app bundle wrapper scripts.
Args:
output_dir: Chromium output directory path.
bundle_path: Input bundle path.
bundle_apks_path: Output bundle .apks archive path.
additional_apk_paths: Additional APKs to install prior to bundle install.
aapt2_path: Aapt2 tool path.
keystore_path: Keystore file path.
keystore_password: Keystore password.
keystore_alias: Signing key name alias in keystore file.
package_name: Application's package name.
command_line_flags_file: Optional. Name of an on-device file that will be
used to store command-line flags for this bundle.
proguard_mapping_path: Input path to the Proguard mapping file, used to
deobfuscate Java stack traces.
target_cpu: Chromium target CPU name, used by the 'gdb' command.
system_image_locales: List of Chromium locales that should be included in
system image APKs.
default_modules: List of modules that are installed in addition to those
given by the '-m' switch.
"""
constants.SetOutputDirectory(output_directory)
devil_chromium.Initialize(output_directory=output_directory)
bundle_generation_info = BundleGenerationInfo(
bundle_path=bundle_path,
bundle_apks_path=bundle_apks_path,
aapt2_path=aapt2_path,
keystore_path=keystore_path,
keystore_password=keystore_password,
keystore_alias=keystore_alias,
system_image_locales=system_image_locales)
_InstallCommand.default_modules = default_modules
parser = argparse.ArgumentParser()
parser.set_defaults(
package_name=package_name,
command_line_flags_file=command_line_flags_file,
proguard_mapping_path=proguard_mapping_path,
target_cpu=target_cpu)
_RunInternal(
parser,
output_directory=output_directory,
additional_apk_paths=additional_apk_paths,
bundle_generation_info=bundle_generation_info)
def main():
devil_chromium.Initialize()
_RunInternal(argparse.ArgumentParser())
if __name__ == '__main__':
main()
| ric2b/Vivaldi-browser | chromium/build/android/apk_operations.py | Python | bsd-3-clause | 73,936 | 0.009157 |
__version__ = '20.0.0'
| ofek/pypinfo | pypinfo/__init__.py | Python | mit | 23 | 0 |
""" Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
import struct
import sys
import unittest
import warnings
from test import support, string_tests
import _string
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest):
type2test = str
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.assertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa', 'count', 'a', 0, -10)
def test_find(self):
self.checkequalnofix(0, 'abcdefghiabc', 'find', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
self.assertRaises(TypeError, 'hello'.find)
self.assertRaises(TypeError, 'hello'.find, 42)
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
def test_index(self):
string_tests.CommonTest.test_index(self)
self.checkequalnofix(0, 'abcdefghiabc', 'index', '')
self.checkequalnofix(3, 'abcdefghiabc', 'index', 'def')
self.checkequalnofix(0, 'abcdefghiabc', 'index', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'index', 'abc', 1)
self.assertRaises(ValueError, 'abcdefghiabc'.index, 'hib')
self.assertRaises(ValueError, 'abcdefghiab'.index, 'abc', 1)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', 8)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', -1)
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
self.checkequalnofix(12, 'abcdefghiabc', 'rindex', '')
self.checkequalnofix(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequalnofix(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequalnofix(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghiabc'.rindex, 'hib')
self.assertRaises(ValueError, 'defghiabc'.rindex, 'def', 1)
self.assertRaises(ValueError, 'defghiabc'.rindex, 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, 8)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, -1)
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
def test_split(self):
string_tests.CommonTest.test_split(self)
# Mixed arguments
self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequalnofix(['endcase ', ''], 'endcase test', 'split', 'test')
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
class MyWrapper:
def __init__(self, sval): self.sval = sval
def __str__(self): return self.sval
# mixed arguments
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkraises(TypeError, ' ', 'join', ['1', '2', MyWrapper('foo')])
self.checkraises(TypeError, ' ', 'join', ['1', '2', '3', bytes()])
self.checkraises(TypeError, ' ', 'join', [1, 2, 3])
self.checkraises(TypeError, ' ', 'join', ['1', '2', 3])
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.assertRaises(TypeError, 'replace'.replace, "r", 42)
def test_bytes_comparison(self):
with support.check_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual('abc' == b'abc', False)
self.assertEqual('abc' != b'abc', True)
self.assertEqual('abc' == bytearray(b'abc'), False)
self.assertEqual('abc' != bytearray(b'abc'), True)
def test_comparison(self):
# Comparisons:
self.assertEqual('abc', 'abc')
self.assertTrue('abcd' > 'abc')
self.assertTrue('abc' < 'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assertTrue('\u0061' < '\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assertTrue('\u0061' < '\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assertTrue(s < s2)
def test_fixup(s):
s2 = '\ud800\udc01'
test_lecmp(s, s2)
s2 = '\ud900\udc01'
test_lecmp(s, s2)
s2 = '\uda00\udc01'
test_lecmp(s, s2)
s2 = '\udb00\udc01'
test_lecmp(s, s2)
s2 = '\ud800\udd01'
test_lecmp(s, s2)
s2 = '\ud900\udd01'
test_lecmp(s, s2)
s2 = '\uda00\udd01'
test_lecmp(s, s2)
s2 = '\udb00\udd01'
test_lecmp(s, s2)
s2 = '\ud800\ude01'
test_lecmp(s, s2)
s2 = '\ud900\ude01'
test_lecmp(s, s2)
s2 = '\uda00\ude01'
test_lecmp(s, s2)
s2 = '\udb00\ude01'
test_lecmp(s, s2)
s2 = '\ud800\udfff'
test_lecmp(s, s2)
s2 = '\ud900\udfff'
test_lecmp(s, s2)
s2 = '\uda00\udfff'
test_lecmp(s, s2)
s2 = '\udb00\udfff'
test_lecmp(s, s2)
test_fixup('\ue000')
test_fixup('\uff61')
# Surrogates on both sides, no fixup required
self.assertTrue('\ud800\udc02' < '\ud84d\udc56')
def test_islower(self):
string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
self.checkequalnofix(False, '\u1FFc', 'islower')
def test_isupper(self):
string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
if not sys.platform.startswith('java'):
self.checkequalnofix(False, '\u1FFc', 'isupper')
def test_istitle(self):
string_tests.MixinStrUnicodeUserStringTest.test_title(self)
self.checkequalnofix(True, '\u1FFc', 'istitle')
self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
def test_isspace(self):
string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
self.checkequalnofix(True, '\u2000', 'isspace')
self.checkequalnofix(True, '\u200a', 'isspace')
self.checkequalnofix(False, '\u2014', 'isspace')
def test_isalpha(self):
string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
self.checkequalnofix(True, '\u1FFc', 'isalpha')
def test_isdecimal(self):
self.checkequalnofix(False, '', 'isdecimal')
self.checkequalnofix(False, 'a', 'isdecimal')
self.checkequalnofix(True, '0', 'isdecimal')
self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, '0123456789', 'isdecimal')
self.checkequalnofix(False, '0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
def test_isdigit(self):
string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
self.checkequalnofix(True, '\u2460', 'isdigit')
self.checkequalnofix(False, '\xbc', 'isdigit')
self.checkequalnofix(True, '\u0660', 'isdigit')
def test_isnumeric(self):
self.checkequalnofix(False, '', 'isnumeric')
self.checkequalnofix(False, 'a', 'isnumeric')
self.checkequalnofix(True, '0', 'isnumeric')
self.checkequalnofix(True, '\u2460', 'isnumeric')
self.checkequalnofix(True, '\xbc', 'isnumeric')
self.checkequalnofix(True, '\u0660', 'isnumeric')
self.checkequalnofix(True, '0123456789', 'isnumeric')
self.checkequalnofix(False, '0123456789a', 'isnumeric')
self.assertRaises(TypeError, "abc".isnumeric, 42)
def test_isidentifier(self):
self.assertTrue("a".isidentifier())
self.assertTrue("Z".isidentifier())
self.assertTrue("_".isidentifier())
self.assertTrue("b0".isidentifier())
self.assertTrue("bc".isidentifier())
self.assertTrue("b_".isidentifier())
self.assertTrue("µ".isidentifier())
self.assertTrue("𝔘𝔫𝔦𝔠𝔬𝔡𝔢".isidentifier())
self.assertFalse(" ".isidentifier())
self.assertFalse("[".isidentifier())
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
self.assertTrue("abcdefg".isprintable())
self.assertFalse("abcdefg\n".isprintable())
# some defined Unicode character
self.assertTrue("\u0374".isprintable())
# undefined character
self.assertFalse("\u0378".isprintable())
# single surrogate character
self.assertFalse("\ud800".isprintable())
def test_contains(self):
# Testing Unicode contains method
self.assertIn('a', 'abdb')
self.assertIn('a', 'bdab')
self.assertIn('a', 'bdaba')
self.assertIn('a', 'bdba')
self.assertNotIn('a', 'bdb')
self.assertIn('a', 'bdba')
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertNotIn('a', ('x',1,'y'))
self.assertNotIn('a', ('x',1,None))
self.assertNotIn('abcd', 'abcxxxx')
self.assertIn('ab', 'abcd')
self.assertIn('ab', 'abc')
self.assertIn('ab', (1,None,'ab'))
self.assertIn('', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertNotIn('\0', 'abc')
self.assertIn('\0', '\0abc')
self.assertIn('\0', 'abc\0')
self.assertIn('a', '\0abc')
self.assertIn('asdf', 'asdf')
self.assertNotIn('asdf', 'asd')
self.assertNotIn('asdf', '')
self.assertRaises(TypeError, "abc".__contains__)
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
# self.assertEqual('{ 0 }'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r, !s and !a coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!r}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!r}'.format('\u0374'), "'\u0374'") # printable
self.assertEqual('{0!r}'.format(F('\u0374')), 'F(\u0374)')
self.assertEqual('{0!a}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!a}'.format('\u0374'), "'\\u0374'") # printable
self.assertEqual('{0!a:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!a}'.format(F('\u0374')), 'F(\\u0374)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
msg = 'object.__format__ with a non-empty format string is deprecated'
with support.check_warnings((msg, PendingDeprecationWarning)):
self.assertEqual('{0:^10}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:^10s}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(IndexError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
big = "23098475029384702983476098230754973209482573"
self.assertRaises(ValueError, ("{" + big + "}").format)
self.assertRaises(ValueError, ("{[" + big + "]}").format, [0])
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
def test_format_map(self):
self.assertEqual(''.format_map({}), '')
self.assertEqual('a'.format_map({}), 'a')
self.assertEqual('ab'.format_map({}), 'ab')
self.assertEqual('a{{'.format_map({}), 'a{')
self.assertEqual('a}}'.format_map({}), 'a}')
self.assertEqual('{{b'.format_map({}), '{b')
self.assertEqual('}}b'.format_map({}), '}b')
self.assertEqual('a{{b'.format_map({}), 'a{b')
# using mappings
class Mapping(dict):
def __missing__(self, key):
return key
self.assertEqual('{hello}'.format_map(Mapping()), 'hello')
self.assertEqual('{a} {world}'.format_map(Mapping(a='hello')), 'hello world')
class InternalMapping:
def __init__(self):
self.mapping = {'a': 'hello'}
def __getitem__(self, key):
return self.mapping[key]
self.assertEqual('{a}'.format_map(InternalMapping()), 'hello')
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
self.assertRaises(TypeError, '{'.format_map)
self.assertRaises(TypeError, '}'.format_map)
self.assertRaises(TypeError, 'a{'.format_map)
self.assertRaises(TypeError, 'a}'.format_map)
self.assertRaises(TypeError, '{a'.format_map)
self.assertRaises(TypeError, '}a'.format_map)
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
self.assertRaises(ValueError, '{}'.format_map, 'a')
self.assertRaises(ValueError, '{a} {}'.format_map, {"a" : 2, "b" : 1})
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000, 3.50')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000, 3.57')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual("%r, %r" % (b"abc", "abc"), "b'abc', 'abc'")
self.assertEqual("%r" % ("\u1234",), "'\u1234'")
self.assertEqual("%a" % ("\u1234",), "'\\u1234'")
self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
self.assertEqual('%c' % 0x1234, '\u1234')
self.assertEqual('%c' % 0x21483, '\U00021483')
self.assertRaises(OverflowError, "%c".__mod__, (0x110000,))
self.assertEqual('%c' % '\U00021483', '\U00021483')
self.assertRaises(TypeError, "%c".__mod__, "aa")
self.assertRaises(ValueError, "%.1\u1032f".__mod__, (1.0/3))
self.assertRaises(TypeError, "%i".__mod__, "aa")
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % "abc", '...abc...')
self.assertEqual('%*s' % (5,'abc',), ' abc')
self.assertEqual('%*s' % (-5,'abc',), 'abc ')
self.assertEqual('%*.*s' % (5,2,'abc',), ' ab')
self.assertEqual('%*.*s' % (5,3,'abc',), ' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103 abc')
self.assertEqual('%c' % 'a', 'a')
class Wrapper:
def __str__(self):
return '\u1234'
self.assertEqual('%s' % Wrapper(), '\u1234')
# issue 3382
NAN = float('nan')
INF = float('inf')
self.assertEqual('%f' % NAN, 'nan')
self.assertEqual('%F' % NAN, 'NAN')
self.assertEqual('%f' % INF, 'inf')
self.assertEqual('%F' % INF, 'INF')
def test_startswith_endswith_errors(self):
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
@support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual('1.0', '%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
str('unicode remains unicode'),
'unicode remains unicode'
)
class UnicodeSubclass(str):
pass
self.assertEqual(
str(UnicodeSubclass('unicode subclass becomes unicode')),
'unicode subclass becomes unicode'
)
self.assertEqual(
str('strings are converted to unicode'),
'strings are converted to unicode'
)
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
str(StringCompat('__str__ compatible objects are recognized')),
'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
for obj in (123, 123.45, 123):
self.assertEqual(str(obj), str(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
str,
'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
str(b'strings are decoded to unicode', 'utf-8', 'strict'),
'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
str(
memoryview(b'character buffers are decoded to unicode'),
'utf-8',
'strict'
),
'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, str, 42, 42, 42)
def test_codecs_utf7(self):
utfTests = [
('A\u2262\u0391.', b'A+ImIDkQ.'), # RFC2152 example
('Hi Mom -\u263a-!', b'Hi Mom -+Jjo--!'), # RFC2152 example
('\u65E5\u672C\u8A9E', b'+ZeVnLIqe-'), # RFC2152 example
('Item 3 is \u00a31.', b'Item 3 is +AKM-1.'), # RFC2152 example
('+', b'+-'),
('+-', b'+--'),
('+?', b'+-?'),
('\?', b'+AFw?'),
('+?', b'+-?'),
(r'\\?', b'+AFwAXA?'),
(r'\\\?', b'+AFwAXABc?'),
(r'++--', b'+-+---'),
('\U000abcde', b'+2m/c3g-'), # surrogate pairs
('/', b'/'),
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# Unpaired surrogates not supported
self.assertRaises(UnicodeError, str, b'+3ADYAA-', 'utf-7')
self.assertEqual(str(b'+3ADYAA-', 'utf-7', 'replace'), '\ufffd\ufffd')
# Issue #2242: crash on some Windows/MSVC versions
self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1')
# Direct encoded characters
set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
# Optional direct characters
set_o = '!"#$%&*;<=>@[]^_`{|}'
for c in set_d:
self.assertEqual(c.encode('utf7'), c.encode('ascii'))
self.assertEqual(c.encode('ascii').decode('utf7'), c)
for c in set_o:
self.assertEqual(c.encode('ascii').decode('utf7'), c)
def test_codecs_utf8(self):
self.assertEqual(''.encode('utf-8'), b'')
self.assertEqual('\u20ac'.encode('utf-8'), b'\xe2\x82\xac')
if sys.maxunicode == 65535:
self.assertEqual('\ud800\udc02'.encode('utf-8'), b'\xf0\x90\x80\x82')
self.assertEqual('\ud84d\udc56'.encode('utf-8'), b'\xf0\xa3\x91\x96')
self.assertEqual('\ud800'.encode('utf-8', 'surrogatepass'), b'\xed\xa0\x80')
self.assertEqual('\udc00'.encode('utf-8', 'surrogatepass'), b'\xed\xb0\x80')
if sys.maxunicode == 65535:
self.assertEqual(
('\ud800\udc02'*1000).encode('utf-8'),
b'\xf0\x90\x80\x82'*1000)
self.assertEqual(
'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
' Nunstuck git und'.encode('utf-8'),
b'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
b'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
b'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
b'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
b'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
b'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
b'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
b'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
b'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(str(b'\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
self.assertEqual(str(b'\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
self.assertEqual(str(b'\xe2\x82\xac', 'utf-8'), '\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
(b'\x00', '\x00'), (b'a', 'a'), (b'\x7f', '\x7f'),
# 2 bytes
(b'\xc2\x80', '\x80'), (b'\xdf\xbf', '\u07ff'),
# 3 bytes
(b'\xe0\xa0\x80', '\u0800'), (b'\xed\x9f\xbf', '\ud7ff'),
(b'\xee\x80\x80', '\uE000'), (b'\xef\xbf\xbf', '\uffff'),
# 4 bytes
(b'\xF0\x90\x80\x80', '\U00010000'),
(b'\xf4\x8f\xbf\xbf', '\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = [bytes([x]) for x in range(0x80, 0xC0)]
# start bytes of a 2-byte sequence equivalent to codepoints < 0x7F
invalid_2B_seq_start_bytes = [bytes([x]) for x in range(0xC0, 0xC2)]
# start bytes of a 4-byte sequence equivalent to codepoints > 0x10FFFF
invalid_4B_seq_start_bytes = [bytes([x]) for x in range(0xF5, 0xF8)]
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + [bytes([x]) for x in range(0xF7, 0x100)]
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+b'\x80'+cb3).decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0xA0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\xBF').decode, 'utf-8')
# surrogates
for cb in [bytes([x]) for x in range(0xA0, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0x90)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\xBF\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x90, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = '\ufffd'
sequences = [
# invalid start bytes
(b'\x80', FFFD), # continuation byte
(b'\x80\x80', FFFD*2), # 2 continuation bytes
(b'\xc0', FFFD),
(b'\xc0\xc0', FFFD*2),
(b'\xc1', FFFD),
(b'\xc1\xc0', FFFD*2),
(b'\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
(b'\xc2', FFFD), # only the start byte
(b'\xc2\xc2', FFFD*2), # 2 start bytes
(b'\xc2\xc2\xc2', FFFD*3), # 2 start bytes
(b'\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
(b'\xe1', FFFD), # only the start byte
(b'\xe1\xe1', FFFD*2), # 2 start bytes
(b'\xe1\xe1\xe1', FFFD*3), # 3 start bytes
(b'\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
(b'\xe1\x80', FFFD), # only 1 continuation byte
(b'\xe1\x41', FFFD+'A'), # invalid continuation byte
(b'\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
(b'\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
(b'\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
(b'\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
(b'\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
(b'\xf1', FFFD), # only the start byte
(b'\xf1\xf1', FFFD*2), # 2 start bytes
(b'\xf1\xf1\xf1', FFFD*3), # 3 start bytes
(b'\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
(b'\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
(b'\xf1\x80', FFFD), # only 1 continuation bytes
(b'\xf1\x80\x80', FFFD), # only 2 continuation bytes
(b'\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
(b'\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
(b'\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
(b'\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
(b'\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
(b'\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
(b'\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
(b'\xf1\xf1\x80\x41', FFFD*2+'A'),
(b'\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
(b'\xf5', FFFD), # only the start byte
(b'\xf5\xf5', FFFD*2), # 2 start bytes
(b'\xf5\x80', FFFD*2), # only 1 continuation byte
(b'\xf5\x80\x80', FFFD*3), # only 2 continuation byte
(b'\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
(b'\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
(b'\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
(b'\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
(b'\xf8', FFFD), # only the start byte
(b'\xf8\xf8', FFFD*2), # 2 start bytes
(b'\xf8\x80', FFFD*2), # only one continuation byte
(b'\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
(b'\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
(b'\xfc', FFFD), # only the start byte
(b'\xfc\xfc', FFFD*2), # 2 start bytes
(b'\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
(b'\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
(b'\xfe', FFFD),
(b'\xfe\x80\x80', FFFD*3),
# other sequences
(b'\xf1\x80\x41\x42\x43', '\ufffd\x41\x42\x43'),
(b'\xf1\x80\xff\x42\x43', '\ufffd\ufffd\x42\x43'),
(b'\xf1\x80\xc2\x81\x43', '\ufffd\x81\x43'),
(b'\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+b'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace('\uFFFD', ''))
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual("www.python.org.".encode("idna"), b"www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual('Andr\202 x'.encode('ascii','ignore'), b"Andr x")
self.assertEqual('Andr\202 x'.encode('ascii','replace'), b"Andr? x")
self.assertEqual('Andr\202 x'.encode('ascii', 'replace'),
'Andr\202 x'.encode('ascii', errors='replace'))
self.assertEqual('Andr\202 x'.encode('ascii', 'ignore'),
'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
# Error handling (decoding)
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict')
self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x")
self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x')
# Error handling (unknown character names)
self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, b"\\".decode, "unicode-escape")
self.assertRaises(TypeError, b"hello".decode, "test.unicode1")
self.assertRaises(TypeError, str, b"hello", "test.unicode2")
self.assertRaises(TypeError, "hello".encode, "test.unicode1")
self.assertRaises(TypeError, "hello".encode, "test.unicode2")
# executes PyUnicode_Encode()
import imp
self.assertRaises(
ImportError,
imp.find_module,
"non-existing module",
["non-existing dir"]
)
# Error handling (wrong arguments)
self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
# Error handling (lone surrogate in PyUnicode_TransformDecimalToASCII())
self.assertRaises(UnicodeError, int, "\ud800")
self.assertRaises(UnicodeError, int, "\udf00")
self.assertRaises(UnicodeError, float, "\ud800")
self.assertRaises(UnicodeError, float, "\udf00")
self.assertRaises(UnicodeError, complex, "\ud800")
self.assertRaises(UnicodeError, complex, "\udf00")
def test_codecs(self):
# Encoding
self.assertEqual('hello'.encode('ascii'), b'hello')
self.assertEqual('hello'.encode('utf-7'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf8'), b'hello')
self.assertEqual('hello'.encode('utf-16-le'), b'h\000e\000l\000l\000o\000')
self.assertEqual('hello'.encode('utf-16-be'), b'\000h\000e\000l\000l\000o')
self.assertEqual('hello'.encode('latin-1'), b'hello')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in range(1024):
u = chr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in range(256):
u = chr(c)
for encoding in ('latin-1',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in range(128):
u = chr(c)
for encoding in ('ascii',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
#'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all UCS-2 code points
# This excludes surrogates: in the full range, there would be
# a surrogate pair (\udbff\udc00), which gets converted back
# to a non-BMP character (\U0010fc00)
u = ''.join(map(chr, list(range(0,0xd800)) +
list(range(0xe000,0x10000))))
for encoding in ('utf-8',):
self.assertEqual(str(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = bytes(range(128))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(str(s, encoding).encode(encoding), s)
# 128-255
s = bytes(range(128, 256))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(str(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print('abc', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc\n', file=out)
print('abc\n', end=' ', file=out)
print('abc\n', end=' ', file=out)
print('def\n', file=out)
print('def\n', file=out)
def test_ucs4(self):
x = '\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
br'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __unicode__() works properly
class Foo0:
def __str__(self):
return "foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return "foo"
class Foo4(str):
def __str__(self):
return "foo"
class Foo5(str):
def __str__(self):
return "foo"
class Foo6(str):
def __str__(self):
return "foos"
def __str__(self):
return "foou"
class Foo7(str):
def __str__(self):
return "foos"
def __str__(self):
return "foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "not unicode"
self.assertEqual(str(Foo0()), "foo")
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foou")
self.assertEqual(str(Foo7("bar")), "foou")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "not unicode")
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return '\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_printable_repr(self):
self.assertEqual(repr('\U00010000'), "'%c'" % (0x10000,)) # printable
self.assertEqual(repr('\U00014000'), "'\\U00014000'") # nonprintable
def test_expandtabs_overflows_gracefully(self):
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
if sys.maxsize > (1 << 32) or struct.calcsize('P') != 4:
return
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxsize)
def test_raiseMemError(self):
# Ensure that the freelist contains a consistent object, even
# when a string allocation fails with a MemoryError.
# This used to crash the interpreter,
# or leak references when the number was smaller.
charwidth = 4 if sys.maxunicode >= 0x10000 else 2
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t.
alloc = lambda: "a" * (sys.maxsize // charwidth * 2)
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_format_subclass(self):
class S(str):
def __str__(self):
return '__str__ overridden'
s = S('xxx')
self.assertEqual("%s" % s, '__str__ overridden')
self.assertEqual("{}".format(s), '__str__ overridden')
# Test PyUnicode_FromFormat()
def test_from_format(self):
support.import_module('ctypes')
from ctypes import pythonapi, py_object, c_int
if sys.maxunicode == 65535:
name = "PyUnicodeUCS2_FromFormat"
else:
name = "PyUnicodeUCS4_FromFormat"
_PyUnicode_FromFormat = getattr(pythonapi, name)
_PyUnicode_FromFormat.restype = py_object
def PyUnicode_FromFormat(format, *args):
cargs = tuple(
py_object(arg) if isinstance(arg, str) else arg
for arg in args)
return _PyUnicode_FromFormat(format, *cargs)
# ascii format, non-ascii argument
text = PyUnicode_FromFormat(b'ascii\x7f=%U', 'unicode\xe9')
self.assertEqual(text, 'ascii\x7f=unicode\xe9')
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
# raises an error
self.assertRaisesRegex(ValueError,
'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
'string, got a non-ASCII byte: 0xe9$',
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
self.assertEqual(PyUnicode_FromFormat(b'%c', c_int(0xabcd)), '\uabcd')
self.assertEqual(PyUnicode_FromFormat(b'%c', c_int(0x10ffff)), '\U0010ffff')
# other tests
text = PyUnicode_FromFormat(b'%%A:%A', 'abc\xe9\uabcd\U0010ffff')
self.assertEqual(text, r"%A:'abc\xe9\uabcd\U0010ffff'")
text = PyUnicode_FromFormat(b'repr=%V', 'abc', b'xyz')
self.assertEqual(text, 'repr=abc')
# Test string decode from parameter of %s using utf-8.
# b'\xe4\xba\xba\xe6\xb0\x91' is utf-8 encoded byte sequence of
# '\u4eba\u6c11'
text = PyUnicode_FromFormat(b'repr=%V', None, b'\xe4\xba\xba\xe6\xb0\x91')
self.assertEqual(text, 'repr=\u4eba\u6c11')
#Test replace error handler.
text = PyUnicode_FromFormat(b'repr=%V', None, b'abc\xff')
self.assertEqual(text, 'repr=abc\ufffd')
# Test PyUnicode_AsWideChar()
def test_aswidechar(self):
from _testcapi import unicode_aswidechar
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidechar('abcdef', 2)
self.assertEqual(size, 2)
self.assertEqual(wchar, 'ab')
wchar, size = unicode_aswidechar('abc', 3)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc')
wchar, size = unicode_aswidechar('abc', 4)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc', 10)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc\0def', 20)
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
buflen = 3
nchar = 2
else: # sizeof(c_wchar) == 4
buflen = 2
nchar = 1
wchar, size = unicode_aswidechar(nonbmp, buflen)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsWideCharString()
def test_aswidecharstring(self):
from _testcapi import unicode_aswidecharstring
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidecharstring('abc')
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidecharstring('abc\0def')
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
nchar = 2
else: # sizeof(c_wchar) == 4
nchar = 1
wchar, size = unicode_aswidecharstring(nonbmp)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
class StringModuleTest(unittest.TestCase):
def test_formatter_parser(self):
def parse(format):
return list(_string.formatter_parser(format))
formatter = parse("prefix {2!s}xxx{0:^+10.3f}{obj.attr!s} {z[0]!s:10}")
self.assertEqual(formatter, [
('prefix ', '2', '', 's'),
('xxx', '0', '^+10.3f', None),
('', 'obj.attr', '', 's'),
(' ', 'z[0]', '10', 's'),
])
formatter = parse("prefix {} suffix")
self.assertEqual(formatter, [
('prefix ', '', '', None),
(' suffix', None, None, None),
])
formatter = parse("str")
self.assertEqual(formatter, [
('str', None, None, None),
])
formatter = parse("")
self.assertEqual(formatter, [])
formatter = parse("{0}")
self.assertEqual(formatter, [
('', '0', '', None),
])
self.assertRaises(TypeError, _string.formatter_parser, 1)
def test_formatter_field_name_split(self):
def split(name):
items = list(_string.formatter_field_name_split(name))
items[1] = list(items[1])
return items
self.assertEqual(split("obj"), ["obj", []])
self.assertEqual(split("obj.arg"), ["obj", [(True, 'arg')]])
self.assertEqual(split("obj[key]"), ["obj", [(False, 'key')]])
self.assertEqual(split("obj.arg[key1][key2]"), [
"obj",
[(True, 'arg'),
(False, 'key1'),
(False, 'key2'),
]])
self.assertRaises(TypeError, _string.formatter_field_name_split, 1)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| theheros/kbengine | kbe/res/scripts/common/Lib/test/test_unicode.py | Python | lgpl-3.0 | 71,973 | 0.00335 |
"""
Course Group Configurations page.
"""
from .course_page import CoursePage
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
def is_browser_on_page(self):
return self.q(css='body.view-group-configurations').present
def group_configurations(self):
"""
Return list of the group configurations for the course.
"""
css = '.group-configurations-list-item'
return [GroupConfiguration(self, index) for index in xrange(len(self.q(css=css)))]
def create(self):
"""
Creates new group configuration.
"""
self.q(css=".new-button").first.click()
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, index):
self.page = page
self.SELECTOR = '.group-configurations-list-item-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
css = 'a.group-toggle'
self.find_css(css).first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def edit(self):
"""
Open editing view for the group configuration.
"""
css = '.action-edit .edit'
self.find_css(css).first.click()
def save(self):
"""
Save group configuration.
"""
css = '.action-primary'
self.find_css(css).first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
css = '.action-secondary'
self.find_css(css).first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.group-configuration-edit').present:
return 'edit'
elif self.find_css('.group-configuration-details').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.group-configuration-title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
css = '.group-configuration-name-input'
self.find_css(css).first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
"""
Set group configuration description.
"""
css = '.group-configuration-description-input'
self.find_css(css).first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
css = '.group'
def group_selector(config_index, group_index):
return self.get_selector('.groups-{} .group-{} '.format(config_index, group_index))
return [Group(self.page, group_selector(self.index, index)) for index, element in enumerate(self.find_css(css))]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return group name.
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
| LICEF/edx-platform | common/test/acceptance/pages/studio/settings_group_configurations.py | Python | agpl-3.0 | 4,712 | 0.000849 |
# coding: utf-8
from datetime import datetime
from decimal import Decimal
from unittest import TestCase
import httpretty
from pyqiwi import QiwiError, Qiwi
class QiwiErrorTestCase(TestCase):
def test_error_code(self):
error = QiwiError(143)
self.assertEqual(error.code, 143)
@httpretty.activate
class QiwiClientTestCase(TestCase):
shop_id = '123'
api_id = '456'
api_password = '123qwe'
notification_password = 'qwe123'
def setUp(self):
self.client = Qiwi(self.shop_id, self.api_id, self.api_password, self.notification_password)
def tearDown(self):
httpretty.reset()
def parse(self, data):
if isinstance(data, bytes):
data = data.decode('utf-8')
return dict(map(lambda x: x.split('='), data.split('&')))
def test__get_invoice_url(self):
self.assertEqual(
self.client._get_invoice_url('10001'),
'https://api.qiwi.com/api/v2/prv/123/bills/10001'
)
def test__get_refund_url(self):
self.assertEqual(
self.client._get_refund_url('1', '002'),
'https://api.qiwi.com/api/v2/prv/123/bills/1/refund/002'
)
def test_url_encode(self):
encoded = self.client._urlencode({
'foo': 'bar',
'ext': '',
'user': 'tel:+79998887766',
})
self.assertEqual(self.parse(encoded), {
'foo': 'bar',
'user': 'tel%3A%2B79998887766',
})
def test_make_auth(self):
self.assertEqual(
self.client._make_auth('user1', 'password'),
b'Basic dXNlcjE6cGFzc3dvcmQ='
)
self.assertEqual(
self.client._make_auth('123456', 'zLQkZDdRvBNUkf9spassword'),
b'Basic MTIzNDU2OnpMUWtaRGRSdkJOVWtmOXNwYXNzd29yZA=='
)
def test__make_signature(self):
signature = self.client._make_signature({
'b': 'bar',
'a': 'foo',
'some': 'param',
'comment': u'Заказ №101'
})
self.assertEqual(signature, b'7nHZIf/w6DLq+CuvzV2BmhT71xA=')
def test__request(self):
url = 'http://example.com'
auth = self.client._make_auth(self.api_id, self.api_password).decode('utf-8')
httpretty.register_uri(httpretty.GET, url, '{"response": {"result_code": 0}}')
response = self.client._request(url)
request = httpretty.HTTPretty.last_request
self.assertEqual(response, {'result_code': 0})
self.assertEqual(request.headers.get('Accept'), 'application/json')
self.assertEqual(request.headers.get('Authorization'), auth)
httpretty.register_uri(httpretty.PUT, url, '{"response": {"result_code": 0}}')
response = self.client._request(url, {'user': 'tel:+79998887766'})
request = httpretty.HTTPretty.last_request
self.assertEqual(response, {'result_code': 0})
self.assertEqual(request.headers.get('Accept'), 'application/json')
self.assertEqual(request.headers.get('Authorization'), auth)
self.assertEqual(request.headers.get('Content-Type'), 'application/x-www-form-urlencoded')
self.assertEqual(request.body, b'user=tel%3A%2B79998887766')
httpretty.reset()
httpretty.register_uri(
httpretty.GET, url, '{"response": {"result_code": 33}}', status=400)
try:
self.client._request(url)
except QiwiError as e:
self.assertEqual(e.code, 33)
else:
self.fail('QiwiError not raised')
def test_create_invoice(self):
invoice_id = '101'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.PUT, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "101"
}
}
}""")
invoice = self.client.create_invoice(
invoice_id=invoice_id,
amount=Decimal('22.00'),
currency='RUB',
comment='Order #101',
user='tel:+79998887766',
lifetime=datetime(2017, 1, 2, 15, 22, 33),
)
self.assertEqual(invoice, {'invoice_id': '101'})
self.assertEqual(self.parse(httpretty.HTTPretty.last_request.body), {
'amount': '22.00',
'ccy': 'RUB',
'comment': 'Order+%23101',
'user': 'tel%3A%2B79998887766',
'lifetime': '2017-01-02T15%3A22%3A33',
})
def test_cancel_invoice(self):
invoice_id = '102'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.PATCH, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "102",
"status": "rejected"
}
}
}""")
invoice = self.client.cancel_invoice(invoice_id)
self.assertEqual(invoice, {
'invoice_id': '102',
'status': "rejected",
})
self.assertEqual(
httpretty.HTTPretty.last_request.body,
b'status=rejected'
)
def test_get_invoice(self):
invoice_id = '103'
url = self.client._get_invoice_url(invoice_id)
httpretty.register_uri(httpretty.GET, url, body="""{
"response": {
"result_code": 0,
"bill": {
"invoice_id": "103",
"status": "paid"
}
}
}""")
invoice = self.client.get_invoice(invoice_id)
self.assertEqual(invoice, {
'invoice_id': '103',
'status': "paid",
})
def test_create_refund(self):
invoice_id = '104'
refund_id = '1'
url = self.client._get_refund_url(invoice_id, refund_id)
httpretty.register_uri(httpretty.PUT, url, body="""{
"response": {
"result_code": 0,
"refund": {
"invoice_id": "104",
"refund_id": "1",
"amount": "100.00"
}
}
}""")
refund = self.client.create_refund(invoice_id, refund_id, Decimal('100.00'))
self.assertEqual(refund, {
'invoice_id': '104',
'refund_id': '1',
'amount': '100.00',
})
self.assertEqual(
httpretty.HTTPretty.last_request.body,
b'amount=100.00'
)
def test_get_refund(self):
invoice_id = '105'
refund_id = '1'
url = self.client._get_refund_url(invoice_id, refund_id)
httpretty.register_uri(httpretty.GET, url, body="""{
"response": {
"result_code": 0,
"refund": {
"invoice_id": "104",
"refund_id": "1",
"amount": "100.00",
"status": "fail"
}
}
}""")
refund = self.client.get_refund(invoice_id, refund_id)
self.assertEqual(refund, {
'invoice_id': '104',
'refund_id': '1',
'amount': '100.00',
'status': 'fail',
})
def test_get_invoice_url(self):
url = self.client.get_invoice_url('106')
expected = 'https://bill.qiwi.com/order/external/main.action?' + self.client._urlencode({
'shop': self.client.shop_id,
'transaction': '106',
})
self.assertEqual(url, expected)
url = self.client.get_invoice_url('107', True, 'http://google.com/success', 'http://google.com/fail', 'iframe', 'qw')
expected = 'https://bill.qiwi.com/order/external/main.action?' + self.client._urlencode({
'shop': self.client.shop_id,
'transaction': '107',
'iframe': True,
'success_url': 'http://google.com/success',
'fail_url': 'http://google.com/fail',
'target': 'iframe',
'pay_source': 'qw',
})
self.assertEqual(url, expected)
def test_check_auth(self):
self.assertFalse(self.client.check_auth(''))
self.assertFalse(self.client.check_auth(None))
self.assertFalse(self.client.check_auth(b'Basic MTExOjIyMg=='))
self.assertTrue(self.client.check_auth(b'Basic MTIzOnF3ZTEyMw=='))
def test_check_signature(self):
self.assertFalse(self.client.check_signature('', {}))
self.assertFalse(self.client.check_signature('', {'foo': 'bar'}))
self.assertFalse(self.client.check_signature(b'W18ltrPJoSb2N7AEM5Iik02wE10=', {'foo': '111'}))
self.assertTrue(self.client.check_signature(b'4C8pyw0rweDE0gZDYWT3E1B92aQ=', {
'foo': 'bar',
'commend': u'Заказ №102',
}))
| onrik/pyqiwi | tests/test_client.py | Python | mit | 8,897 | 0.001351 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clone Google Cloud Platform git repository.
"""
import textwrap
from googlecloudsdk.api_lib.source import git
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import store as c_store
class Clone(base.Command):
"""Clone project git repository in the current directory."""
detailed_help = {
'DESCRIPTION': """\
This command clones git repository for the currently active
Google Cloud Platform project into the specified folder in the
current directory.
""",
'EXAMPLES': textwrap.dedent("""\
To use the default Google Cloud repository for development, use the
following commands. We recommend that you use your project name as
TARGET_DIR to make it apparent which directory is used for which
project. We also recommend to clone the repository named 'default'
since it is automatically created for each project, and its
contents can be browsed and edited in the Developers Console.
$ gcloud init
$ gcloud source repos clone default TARGET_DIR
$ cd TARGET_DIR
... create/edit files and create one or more commits ...
$ git push origin master
"""),
}
@staticmethod
def Args(parser):
parser.add_argument(
'--dry-run',
action='store_true',
help=('If provided, prints the command that would be run to standard '
'out instead of executing it.'))
parser.add_argument(
'src',
metavar='REPOSITORY_NAME',
help=('Name of the repository. '
'Note: Google Cloud Platform projects generally have (if '
'created) a repository named "default"'))
parser.add_argument(
'dst',
metavar='DIRECTORY_NAME',
nargs='?',
help=('Directory name for the cloned repo. Defaults to the repository '
'name.'))
@c_exc.RaiseToolExceptionInsteadOf(git.Error, c_store.Error)
def Run(self, args):
"""Clone a GCP repository to the current directory.
Args:
args: argparse.Namespace, the arguments this command is run with.
Raises:
ToolException: on project initialization errors.
Returns:
The path to the new git repository.
"""
# Ensure that we're logged in.
c_store.Load()
project_id = properties.VALUES.core.project.Get(required=True)
project_repo = git.Git(project_id, args.src)
path = project_repo.Clone(destination_path=args.dst or args.src,
dry_run=args.dry_run)
if path and not args.dry_run:
log.status.write('Project [{prj}] repository [{repo}] was cloned to '
'[{path}].\n'.format(prj=project_id, path=path,
repo=project_repo.GetName()))
| flgiordano/netcash | +/google-cloud-sdk/lib/surface/source/repos/clone.py | Python | bsd-3-clause | 3,580 | 0.002235 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-24 00:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Menus', '0002_auto_20170824_0051'),
]
operations = [
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.FloatField()),
('item_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Menus.MenuItemType')),
],
),
]
| AlexHagerman/code_louisville_django | LouiePizza/Menus/migrations/0003_menuitem.py | Python | mpl-2.0 | 780 | 0.002564 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
fib = 0
for num in it:
fib += num
self.assertEqual(__ , fib)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual(__, next(stages))
next(stages)
self.assertEqual(__, next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegexpMatches(err_msg, __)
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, type(mapping).__name__)
self.assertEqual(__, type(mapping).__name__)
# In Python 3 built in iterator funcs return iteratable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual(__, mapped_seq)
# None, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual(__, even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual(__, name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(__, type(result).__name__)
# Reduce() syntax is same as Python 2
self.assertEqual(__, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(__, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_creating_lists_with_list_comprehensions(self):
feast = ['lambs', 'sloths', 'orangutans', 'breakfast cereals', 'fruit bats']
comprehension = [delicacy.capitalize() for delicacy in feast]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(__, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iteratable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual(__, list(result))
try:
# Files act like a collection of lines
file = open("example_file.txt")
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(__, list(upcase_lines))
# NOTE: You can create your own collections that work with each,
# map, select, etc.
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
if file:
file.close() | caalle/Python-koans | python 3/koans/about_iteration.py | Python | mit | 4,469 | 0.010293 |
"""
:created: 24.04.2018 by Jens Diemer
:copyleft: 2018 by the django-cms-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import os
from django.core.management import call_command
from cms.models import Page
# https://github.com/jedie/django-tools
from django_tools.unittest_utils.django_command import DjangoCommandMixin
from django_tools.unittest_utils.stdout_redirect import StdoutStderrBuffer
from django_tools.unittest_utils.unittest_base import BaseUnittestCase
# Django CMS Tools
import django_cms_tools_test_project
from django_cms_tools.fixture_helper.pages import CmsPageCreator
MANAGE_DIR = os.path.abspath(os.path.dirname(django_cms_tools_test_project.__file__))
class CmsPluginUnittestGeneratorTestCase(DjangoCommandMixin, BaseUnittestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
page, created = CmsPageCreator().create()
assert created
assert Page.objects.all().count() == 2 # draft + publish
def test_list_all_plugins(self):
with StdoutStderrBuffer() as buff:
call_command("list_page_by_plugin")
output = buff.get_output()
print(output)
self.assertEqual_dedent(output,
"""
No plugin-type given.
All CMS plugin types:
12 instances: 'djangocms_text_ckeditor.TextPlugin'
There are 1 plugins.
"""
)
def test_wrong_plugin_type(self):
with StdoutStderrBuffer() as buff:
call_command("list_page_by_plugin", "foobar_app.FooBarPlugin")
output = buff.get_output()
print(output)
self.assertEqual_dedent(output,
"""
ERROR: Given plugin type 'foobar_app.FooBarPlugin' doesn't exists!
Hint: Maybe you mean: 'FooBarPlugin' ?!?
All CMS plugin types:
12 instances: 'djangocms_text_ckeditor.TextPlugin'
There are 1 plugins.
"""
)
def test_TextPlugin(self):
with StdoutStderrBuffer() as buff:
call_command("list_page_by_plugin", "TextPlugin")
output = buff.get_output()
print(output)
self.assertIn("Found 12 'TextPlugin' plugins... 2 placeholders... 1 pages:", output)
self.assertIn("* CmsPageCreator in en", output)
self.assertIn("* /de/", output)
self.assertIn("* /en/", output)
self.assertIn("There are 2 app models with PlaceholderFields:", output)
self.assertIn("* StaticPlaceholder 'draft,public' - 0 total entries Skip", output)
self.assertIn("* LandingPageModel 'content' - 0 total entries Skip", output)
| jedie/django-cms-tools | django_cms_tools_tests/test_command_list_page_by_plugin.py | Python | gpl-3.0 | 2,800 | 0.005357 |
#!/usr/bin/python3.3
import somnTCP
import somnUDP
import somnPkt
import somnRouteTable
from somnLib import *
import struct
import queue
import threading
import socket
import time
import random
PING_TIMEOUT = 5
class somnData():
def __init__(self, ID, data):
self.nodeID = ID
self.data = data
class somnMesh(threading.Thread):
TCPTxQ = queue.Queue()
TCPRxQ = queue.Queue()
UDPRxQ = queue.Queue()
UDPAlive = threading.Event()
networkAlive = threading.Event()
routeTable = somnRouteTable.somnRoutingTable()
cacheId = [0,0,0,0]
cacheRoute = [0,0,0,0]
cacheNextIndex = 0
_mainLoopRunning = 0
enrolled = False
nodeID = 0
nodeIP = "127.0.0.1"
nodePort = 0
lastEnrollReq = 0
connCache = [('',0),('',0),('',0)]
_printCallbackFunction = None
def __init__(self, TxDataQ, RxDataQ, printCallback = None):
threading.Thread.__init__(self)
self.CommTxQ = TxDataQ
self.CommRxQ = RxDataQ
random.seed()
self.nodeID = random.getrandbits(16)
self.nextConnCacheIndex = 0
self._printCallbackFunction = printCallback
TCPTxQ = queue.Queue()
TCPRxQ = queue.Queue()
UDPRxQ = queue.Queue()
self.pendingRouteID = 0
self.pendingRoute = 0
self.pendingRouteHTL = 1
self.routeLock = threading.Lock()
self.routeBlock = threading.Event()
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingCache = [0,0,0,0,0]
self.pingLock = threading.Lock()
def printinfo(self, outputStr):
if self._printCallbackFunction == None:
print("{0:04X}: {1}".format(self.nodeID, outputStr))
else:
self._printCallbackFunction(self.nodeID, outputStr)
def enroll(self):
#self.printinfo("enrolling")
tcpRespTimeout = False
ACK = random.getrandbits(16)
enrollPkt = somnPkt.SomnPacket()
enrollPkt.InitEmpty("NodeEnrollment")
enrollPkt.PacketFields['ReqNodeID'] = self.nodeID
enrollPkt.PacketFields['ReqNodeIP'] = IP2Int(self.nodeIP)
enrollPkt.PacketFields['ReqNodePort'] = self.nodePort
enrollPkt.PacketFields['AckSeq'] = ACK
udp = somnUDP.somnUDPThread(enrollPkt, self.UDPRxQ, self.networkAlive, self.UDPAlive)
udp.start()
while not tcpRespTimeout and self.routeTable.getNodeCount() < 3:
try:
enrollResponse = self.TCPRxQ.get(timeout = 1)
except queue.Empty:
tcpRespTimeout = True
break
respNodeID = enrollResponse.PacketFields['RespNodeID']
respNodeIP = enrollResponse.PacketFields['RespNodeIP']
respNodePort = enrollResponse.PacketFields['RespNodePort']
#self.printinfo("Got enrollment response from {0:04X}".format(respNodeID))
self.routeTable.getNodeIndexFromId(respNodeID)
if self.routeTable.getNodeIndexFromId(respNodeID) > 0:
self.TCPRxQ.task_done()
continue
elif enrollResponse.PacketType == somnPkt.SomnPacketType.NodeEnrollment and enrollResponse.PacketFields['AckSeq'] == ACK:
if self.routeTable.addNode(respNodeID, Int2IP(respNodeIP), respNodePort) < 0:
self.printinfo("Something went wrong in adding the node")
#TODO: Can we make this an exception?
packedEnrollResponse = somnPkt.SomnPacketTxWrapper(enrollResponse, Int2IP(respNodeIP),respNodePort)
self.TCPTxQ.put(packedEnrollResponse)
self.enrolled = True
self.printinfo("Enrolled to: {0:04X}".format(respNodeID))
self.TCPRxQ.task_done()
#break
return udp
def run(self):
socket.setdefaulttimeout(5)
self.networkAlive.set()
Rx = somnTCP.startSomnRx(self.nodeIP, self.nodePort, self.networkAlive, self.TCPRxQ)
Tx = somnTCP.startSomnTx(self.networkAlive, self.TCPTxQ)
while True:
if Rx.bound and Tx.bound: break
self.nodePort = Rx.port
#self.printinfo("Port: {0}".format(self.nodePort))
enrollAttempts = 0
while not self.enrolled:
self.UDPAlive.set()
UDP = self.enroll()
if self.enrolled:
break
elif enrollAttempts < 2:
self.UDPAlive.clear()
UDP.join()
enrollAttempts = enrollAttempts + 1
else:
self.enrolled = True
self.printinfo("Enrolled as Alpha Node")
break
#start main loop to handle incoming queueus
self._mainLoopRunning = 1
rxThread = threading.Thread(target = self._handleTcpRx)
rxThread.start()
self.pingTimer.start()
while self._mainLoopRunning:
self._handleUdpRx()
self._handleTx()
# Do a bunch of stuff
try:
self.pingTimer.cancel()
except:
pass
self.networkAlive.clear()
UDP.networkAlive = False
UDP.join()
Rx.join()
Tx.join()
self.TCPRxQ.join()
self.TCPTxQ.join()
self.CommTxQ.join()
self.CommRxQ.join()
def _pingRouteTable(self):
# check if previous route requests were returned
self.pingLock.acquire()
for idx, node in enumerate(self.pingCache):
if node != 0:
# remove nodes where no response was returned
self.printinfo("Dropping Node: {0:04X}".format(node))
self.routeTable.removeNodeByIndex(self.routeTable.getNodeIndexFromId(node))
# unset returned route cache
self.pingCache[idx] = 0
self.pingLock.release()
# send a RouteReqeust for node 0xFFFF to each entry in the routing table
for node in self.routeTable.getConnectedNodes():
nodeIndex = self.routeTable.getNodeIndexFromId(node)
self.pingLock.acquire()
self.pingCache[nodeIndex - 1] = node
self.pingLock.release()
pingPkt = somnPkt.SomnPacket()
pingPkt.InitEmpty(somnPkt.SomnPacketType.RouteRequest)
pingPkt.PacketFields['SourceID'] = self.nodeID
pingPkt.PacketFields['LastNodeID'] = self.nodeID
pingPkt.PacketFields['DestID'] = 0xFFFF
pingPkt.PacketFields['HTL'] = 1
TxInfo = self.routeTable.getNodeInfoByIndex(nodeIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(pingPkt, TxInfo.nodeAddress, TxInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingTimer.start()
def _handleTx(self):
#print("Handle TX")
try:
TxData = self.CommTxQ.get(False)
except:
return
#TODO: Tx Data coming from the Comm Layer needs to packetized
route = 0
#check cache for route to dest ID
if TxData.nodeID in self.cacheId:
route = self.cacheRoute[self.cacheId.index(TxData.nodeID)]
else:
route = self._getRoute(TxData.nodeID)
#TODO Lock around this
self.pendingRouteID = 0
self.pendingRouteHTL = 1
if route == 0: # no valid rout found
self.printinfo(" *** NO ROUTE FOUND *** ")
return
# inset path into cache, for now this is a FIFO eviction policy, should upgrade to an LFU policy
self.cacheId[self.cacheNextIndex] = TxData.nodeID
self.cacheRoute[self.cacheNextIndex] = route
self.cacheNextIndex = self.cacheNextIndex + 1
if self.cacheNextIndex > 3:
self.cacheNextIndex = 0
#pop first step in route from route string
nextRoute, newRoute = self._popRoute(route)
#nextRouteStep = newRoute[0]
#set route string in packet
TxPkt = somnPkt.SomnPacket()
TxPkt.InitEmpty(somnPkt.SomnPacketType.Message)
TxPkt.PacketFields['SourceID'] = self.nodeID
TxPkt.PacketFields['DestID'] = TxData.nodeID
TxPkt.PacketFields['Message'] = TxData.data
TxPkt.PacketFields['Route'] = newRoute
#create wrapper packet to send to next step in route
TxNodeInfo = self.routeTable.getNodeInfoByIndex(nextRoute)
if TxNodeInfo is None:
self.cacheRoute[self.cacheId.index(TxData.nodeID)] = 0
self.CommTxQ.task_done()
self.CommTxQ.put(TxData)
return
txPktWrapper = somnPkt.SomnPacketTxWrapper(TxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
#send packet to TX layer
self.TCPTxQ.put(txPktWrapper)
self.CommTxQ.task_done()
def _handleTcpRx(self):
while self._mainLoopRunning:
try:
RxPkt = self.TCPRxQ.get(False)
except:
continue
pktType = RxPkt.PacketType
#self.printinfo("Rx'd TCP packet of type: {0}".format(pktType))
if pktType == somnPkt.SomnPacketType.NodeEnrollment:
#print("Enrollment Packet Received")
self.pingTimer.cancel()
# There is a potential for stale enroll responses from enrollment phase, drop stale enroll responses
if RxPkt.PacketFields['ReqNodeID'] == self.nodeID: continue
# We need to disable a timer, enroll the node, if timer has expired, do nothing
for idx, pendingEnroll in enumerate(self.connCache):
if (RxPkt.PacketFields['ReqNodeID'], RxPkt.PacketFields['AckSeq']) == pendingEnroll[0]:
# disable timer
pendingEnroll[1].cancel()
# clear connCache entry
self.connCache[idx] = (('',0),)
# add node
self.routeTable.addNode(RxPkt.PacketFields['ReqNodeID'], Int2IP(RxPkt.PacketFields['ReqNodeIP']), RxPkt.PacketFields['ReqNodePort'])
#self.printinfo("Enrolled Node:{0:04X} ".format(RxPkt.PacketFields['ReqNodeID']))
break
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingTimer.start()
elif pktType == somnPkt.SomnPacketType.Message:
#print("({0:X}) Message Packet Received".format(self.nodeID))
# Check if we are the dest node
if RxPkt.PacketFields['DestID'] == self.nodeID:
self.printinfo("{0:04X} -> {1:04X}: {2}".format(RxPkt.PacketFields['SourceID'], self.nodeID, RxPkt.PacketFields['Message']))
# strip headers before pushing onto queue
commData = somnData(RxPkt.PacketFields['SourceID'], RxPkt.PacketFields['Message'])
self.CommRxQ.put(commData)
# otherwise, propagate the message along the route
elif not RxPkt.PacketFields['Route']:
# generate bad_route event
print("nothing to see here, move along folks")
else:
nextHop, RxPkt.PacketFields['Route'] = self._popRoute(RxPkt.PacketFields['Route'])
TxNodeInfo = self.routeTable.getNodeInfoByIndex(nextHop)
if TxNodeInfo is None:
# this should generate a bad route pacekt
self.printinfo("Invalid Route Event")
self.TCPRxQ.task_done()
continue
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
elif pktType == somnPkt.SomnPacketType.RouteRequest:
#print("Route Req Packet Received")
if RxPkt.PacketFields['SourceID'] == self.nodeID:
# this our route request, deal with it.
if self.pendingRouteID == RxPkt.PacketFields['DestID']:
self.routeLock.acquire()
#self.printinfo("Servicing Returned Route for {0:04X}".format(self.pendingRouteID))
if RxPkt.PacketFields['Route'] != 0:
self.pendingRoute = self._pushRoute(RxPkt.PacketFields['Route'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
self.routeBlock.set()
self.routeLock.release()
self.TCPRxQ.task_done()
continue
elif RxPkt.PacketFields['HTL'] < 10:
self.routeLock.release()
self.pendingRouteHTL = self.pendingRouteHTL + 1
RxPkt.PacketFields['HTL'] = self.pendingRouteHTL
RxPkt.PacketFields['ReturnRoute'] = 0
TxNodeInfo = self.routeTable.getNodeInfoByIndex(self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
elif RxPkt.PacketFields['DestID'] == 0xFFFF:
self.pingLock.acquire()
for idx, node in enumerate(self.pingCache):
if node == RxPkt.PacketFields['LastNodeID']:
self.pingCache[idx] = 0
self.pingLock.release()
self.TCPRxQ.task_done()
continue
else: # this route has been served
#self.routeLock.release()
#RxPkt.Reset()
self.TCPRxQ.task_done()
continue
# if route field is -0-, then it is an in-progress route request
# otherwise it is a returning route request
elif not RxPkt.PacketFields['Route']:
# check if we have the destid in our routeTable
idx = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['DestID'])
if idx < 0: # Continue route request
if RxPkt.PacketFields['HTL'] > 1:
#print("got multi Hop route request")
RxPkt.PacketFields['ReturnRoute'] = self._pushRoute(RxPkt.PacketFields['ReturnRoute'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['HTL'] = RxPkt.PacketFields['HTL'] - 1
lastID = RxPkt.PacketFields['LastNodeID']
RxPkt.PacketFields['LastNodeID'] = self.nodeID
#transmit to all nodes, except the transmitting node
i = 1
while i <= self.routeTable.getNodeCount():
TxNodeInfo = self.routeTable.getNodeInfoByIndex(i)
i = i + 1
if not TxNodeInfo.nodeID == lastID:
#continue
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
elif RxPkt.PacketFields['HTL'] == 1: # Last Node in query path
RxPkt.PacketFields['HTL'] = RxPkt.PacketFields['HTL'] - 1
TxNodeInfo = self.routeTable.getNodeInfoByIndex(self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
else:
#if RxPkt.PacketFields['ReturnRoute'] == 0:
# TxIndex = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['SourceID'])
#else:
TxIndex, RxPkt.PacketFields['ReturnRoute'] = self._popRoute(RxPkt.PacketFields['ReturnRoute'])
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxNodeInfo = self.routeTable.getNodeInfoByIndex(TxIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
else: # Dest Node is contained in route table
RxPkt.PacketFields['HTL'] = 0
RxPkt.PacketFields['Route'] = self._pushRoute(RxPkt.PacketFields['Route'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['DestID']))
#if RxPkt.PacketFields['ReturnRoute'] == 0: # Route did not go past HTL = 1
# TxIndex = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['SourceID'])
#else:
# TxIndex, RxPkt.PacketFields['ReturnRoute'] = self._popRoute(RxPkt.PacketFields['ReturnRoute'])
TxIndex = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID'])
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxNodeInfo = self.routeTable.getNodeInfoByIndex(TxIndex)
#print("Dest Node Found: ",RxPkt.PacketFields)
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
else: # route path is non-empty
RxPkt.PacketFields['Route'] = self._pushRoute(RxPkt.PacketFields['Route'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['LastNodeID'] = self.nodeID
#print("Route Non Empty: ",RxPkt.PacketFields)
TxIndex, RxPkt.PacketFields['ReturnRoute'] = self._popRoute(RxPkt.PacketFields['ReturnRoute'])
TxNodeInfo = self.routeTable.getNodeInfoByIndex(TxIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
elif pktType == somnPkt.SomnPacketType.BadRoute:
print("Bad Route Packet Received")
self.TCPRxQ.task_done()
continue
elif pktType == somnPkt.SomnPacketType.AddConnection:
for pendingConn in self.connCache:
if (RxPkt.PacketFields['RespNodeID'], RxPkt.PacketFields['AckSeq']) == pendingConn[1]: # This is response
# cancel timer
pendingConn[2].cancel()
# add node
routeTable.addNode(RxPkt.PacketFields['RespNodeID'], Int2IP(RxPkt.PacketFields['RespNodeIP']), RxPkt.PacketFields['RespNodePort'])
# send AddConnection ACK packet
packedTxPkt = somnPkt.SomnPacketTxWrapper(somnPkt.SomnPacket(RxPkt.ToBytes()),Int2IP(RxPkt.PacketFields['RespNodeIP']), RxPkt.PacketFields['RespNodePort'])
self.TCPTxQ.put(packedTxPkt)
continue
# This is an incoming request
# generate a TCP Tx packet, start a timer, store ReqNodeID and timer object
TxPkt = somnPkt.SomnPacket(RxPkt.ToBytes())
TxPkt.Packetfields['RespNodeID'] = self.nodeID
TxPkt.Packetfields['RespNodeIP'] = self.nodeIP
TxPkt.Packetfields['RespNodePort'] = self.nodePort
connCacheTag = (TxPkt.PacketFilds['ReqNodeID'], TxtPkt.PacketFields['AckSeq'])
TxTimer = threading.Timer(5.0, self._connTimeout, connCacheTag)
self.connCache[self.nextconnCacheEntry] = (connCacheTag, TxTimer)
self.nextConnCacheEntry = self.nextConnCacheEntry + 1
if self.nextConnCacheEntry >= len(self.connCache):
self.nextConnCacheEntry = 0
print("Add Conn Packet Received")
elif pktType == somnPkt.SomnPacketType.DropConnection:
print("Drop Conn Packet Received")
else:
#RxPkt.Reset()
self.TCPRxQ.task_done()
continue
#RxPkt.Reset()
self.TCPRxQ.task_done()
continue
def _handleUdpRx(self):
#print("handleUDP")
try:
enrollPkt = self.UDPRxQ.get(False)
except:
return
enrollRequest = somnPkt.SomnPacket(enrollPkt)
self.UDPRxQ.task_done()
#ignore incoming enrollment requests from self
if enrollRequest.PacketFields['ReqNodeID'] == self.nodeID:
return
#self.printinfo("Got enrollment request from {0:04X}".format(enrollRequest.PacketFields['ReqNodeID']))
if self.routeTable.getNodeIndexFromId(enrollRequest.PacketFields['ReqNodeID']) > 0:
#self.printinfo("Node already connected, ignoring")
#self.UDPRxQ.task_done()
return
if self.routeTable.getAvailRouteCount() > 4 or (self.lastEnrollReq == enrollRequest.PacketFields['ReqNodeID'] and self.routeTable.getAvailRouteCount() > 0):
enrollRequest.PacketFields['RespNodeID'] = self.nodeID
enrollRequest.PacketFields['RespNodeIP'] = IP2Int(self.nodeIP)
enrollRequest.PacketFields['RespNodePort'] = self.nodePort
packedEnrollResponse = somnPkt.SomnPacketTxWrapper(enrollRequest, Int2IP(enrollRequest.PacketFields['ReqNodeIP']), enrollRequest.PacketFields['ReqNodePort'])
connCacheTag = (enrollRequest.PacketFields['ReqNodeID'], enrollRequest.PacketFields['AckSeq'])
TxTimer = threading.Timer(10.0, self._enrollTimeout, connCacheTag)
self.connCache[self.nextConnCacheIndex] = (connCacheTag, TxTimer)
self.nextConnCacheIndex = self.nextConnCacheIndex + 1
if self.nextConnCacheIndex >= len(self.connCache): self.nextConnCacheIndex = 0
#print("------- START UDP LISTEN -----------")
#print(self.routeTable.getAvailRouteCount())
#print("Responded to Enroll Request")
#print("---------- END UDP LISTEN-----------")
self.TCPTxQ.put(packedEnrollResponse)
TxTimer.start()
else:
self.lastEnrollReq = enrollRequest.PacketFields['ReqNodeID']
#self.UDPRxQ.task_done()
return
#get route from this node to dest node
def _getRoute(self, destId):
#first, check if the dest is a neighboring node
routeIndex = self.routeTable.getNodeIndexFromId(destId)
if routeIndex != -1:
return routeIndex & 0x7
#unknown route (discover from mesh)
routePkt = somnPkt.SomnPacket()
routePkt.InitEmpty(somnPkt.SomnPacketType.RouteRequest)
routePkt.PacketFields['SourceID'] = self.nodeID
routePkt.PacketFields['LastNodeID'] = self.nodeID
routePkt.PacketFields['RouteRequestCode'] = 1 #random.getrandbits(16)
routePkt.PacketFields['DestID'] = destId
routePkt.PacketFields['HTL'] = 1
self.pendingRouteID = destId
self.pendingRoute = 0
t = threading.Timer(10, self._routeTimeout)
idx = 1
while idx <= self.routeTable.getNodeCount():
TxNodeInfo = self.routeTable.getNodeInfoByIndex(idx)
#print("getRoute Packet Type: ", routePkt.PacketFields)
TxPkt = somnPkt.SomnPacketTxWrapper(routePkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
idx = idx + 1
t.start()
#self.printinfo("Waiting for route")
self.routeBlock.wait()
self.routeBlock.clear()
#self.printinfo("Waiting Done")
try:
t.cancel()
except:
pass
return self.pendingRoute
def _routeTimeout(self):
self.routeLock.acquire()
if not self.routeBlock.isSet():
#self.printinfo("routeTimer Activate")
self.pendingRoute = 0
self.pendingRouteID = 0
self.routeBlock.set()
self.routeLock.release()
#self.printinfo("routeTimer exit")
def _popRoute(self, route):
firstStep = route & 0x7
newRoute = route >> 3
return (firstStep, newRoute)
def _pushRoute(self, route, nextStep):
newRoute = (route << 3) | (nextStep & 0x7)
return newRoute
def _enrollTimeout(self, nodeID, ACK):
for idx, pendingEnroll in enumerate(self.connCache):
if (nodeID, ACK) == pendingEnroll[0]:
self.connCache[idx] = (('',0),)
break
return
def _connTimeout(self, nodeIP, nodePort):
for idx, connAttempt in enumerate(self.connCache):
if (nodeIP, nodePort) == connAttempt[0]:
self.connCache[idx] = (('',0),)
break
return
def addConnection(self, DestNodeID):
addConnPkt = somnPkt.SomnPkt()
addConnPkt.InitEmpty(somnPkt.SomnPacketType.AddConnection)
addConnPkt.PacketFields['ReqNodeID'] = self.nodeID
addConnPkt.PacketFields['ReqNodeIP'] = self.nodeIP
addConnPkt.PacketFields['ReqNodePort'] = self.nodePort
addConnPkt.PacketFields['AckSeq'] = random.randbits(16)
route = self._getRoute(DestNodeID)
if route > 0:
addConnPkt.PacketFields['Route'] = route
else:
self.printinfo("AddConnection Failed to get route")
def CreateNode(printCallback = None):
mesh = somnMesh(queue.Queue(), queue.Queue(), printCallback)
return mesh
if __name__ == "__main__":
mesh = CreateNode()
mesh.start()
| squidpie/somn | src/somnMesh.py | Python | mit | 23,564 | 0.018673 |
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.utils.encoding import smart_text
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.utils.http import urlquote
from unittest import mock
from pyquery import PyQuery
from olympia import amo
from olympia.access import acl
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon
from olympia.amo.templatetags.jinja_helpers import user_media_url
from olympia.amo.tests import TestCase, addon_factory
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlencode, urlparams
from olympia.files.models import File
from olympia.users.models import UserProfile
def decode_http_header_value(value):
"""
Reverse the encoding that django applies to bytestrings in
HttpResponse._convert_to_charset(). Needed to test header values that we
explicitly pass as bytes such as filenames for content-disposition or
xsendfile headers.
"""
return value.encode('latin-1').decode('utf-8')
class TestViews(TestCase):
def setUp(self):
super(TestViews, self).setUp()
self.addon = addon_factory(
slug=u'my-addôn', file_kw={'size': 1024},
version_kw={'version': '1.0'})
self.version = self.addon.current_version
self.addon.current_version.update(created=self.days_ago(3))
def test_version_update_info(self):
self.version.release_notes = {
'en-US': u'Fix for an important bug',
'fr': u'Quelque chose en français.\n\nQuelque chose d\'autre.'
}
self.version.save()
file_ = self.version.files.all()[0]
file_.update(platform=amo.PLATFORM_WIN.id)
# Copy the file to create a new one attached to the same version.
# This tests https://github.com/mozilla/addons-server/issues/8950
file_.pk = None
file_.platform = amo.PLATFORM_MAC.id
file_.save()
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
# pyquery is annoying to use with XML and namespaces. Use the HTML
# parser, but do check that xmlns attribute is present (required by
# Firefox for the notes to be shown properly).
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == 'Fix for an important bug'
# Test update info in another language.
with self.activate(locale='fr'):
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 200
assert response['Content-Type'] == 'application/xhtml+xml'
assert b'<br/>' in response.content, (
'Should be using XHTML self-closing tags!')
doc = PyQuery(response.content, parser='html')
assert doc('html').attr('xmlns') == 'http://www.w3.org/1999/xhtml'
assert doc('p').html() == (
u"Quelque chose en français.<br/><br/>Quelque chose d'autre.")
def test_version_update_info_legacy_redirect(self):
response = self.client.get('/versions/updateInfo/%s' % self.version.id,
follow=True)
url = reverse('addons.versions.update_info',
args=(self.version.addon.slug, self.version.version))
self.assert3xx(response, url, 302)
def test_version_update_info_legacy_redirect_deleted(self):
self.version.delete()
response = self.client.get(
'/en-US/firefox/versions/updateInfo/%s' % self.version.id)
assert response.status_code == 404
def test_version_update_info_no_unlisted(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(
reverse('addons.versions.update_info',
args=(self.addon.slug, self.version.version)))
assert response.status_code == 404
class TestDownloadsBase(TestCase):
fixtures = ['base/addon_5299_gcal', 'base/users']
def setUp(self):
super(TestDownloadsBase, self).setUp()
self.addon = Addon.objects.get(id=5299)
self.file = File.objects.get(id=33046)
self.file_url = reverse('downloads.file', args=[self.file.id])
self.latest_url = reverse('downloads.latest', args=[self.addon.slug])
def assert_served_by_host(self, response, host, file_=None):
if not file_:
file_ = self.file
assert response.status_code == 302
assert response.url == (
urlparams('%s%s/%s' % (
host, self.addon.id, urlquote(file_.filename)
), filehash=file_.hash))
assert response['X-Target-Digest'] == file_.hash
def assert_served_internally(self, response, guarded=True):
assert response.status_code == 200
file_path = (self.file.guarded_file_path if guarded else
self.file.file_path)
assert response[settings.XSENDFILE_HEADER] == file_path
def assert_served_locally(self, response, file_=None, attachment=False):
path = user_media_url('addons')
if attachment:
path += '_attachments/'
self.assert_served_by_host(response, path, file_)
def assert_served_by_cdn(self, response, file_=None):
assert response.url.startswith(settings.MEDIA_URL)
assert response.url.startswith('http')
self.assert_served_by_host(response, user_media_url('addons'), file_)
class TestDownloadsUnlistedVersions(TestDownloadsBase):
def setUp(self):
super(TestDownloadsUnlistedVersions, self).setUp()
self.make_addon_unlisted(self.addon)
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
assert self.client.get(self.file_url).status_code == 404
assert self.client.get(self.latest_url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.assert_served_internally(self.client.get(self.file_url), False)
assert self.client.get(self.latest_url).status_code == 404
class TestDownloads(TestDownloadsBase):
def test_file_404(self):
r = self.client.get(reverse('downloads.file', args=[234]))
assert r.status_code == 404
def test_public(self):
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
self.assert_served_by_cdn(self.client.get(self.file_url))
def test_public_addon_unreviewed_file(self):
self.file.status = amo.STATUS_AWAITING_REVIEW
self.file.save()
self.assert_served_locally(self.client.get(self.file_url))
def test_unreviewed_addon(self):
self.addon.status = amo.STATUS_NULL
self.addon.save()
self.assert_served_locally(self.client.get(self.file_url))
def test_type_attachment(self):
self.assert_served_by_cdn(self.client.get(self.file_url))
url = reverse('downloads.file', args=[self.file.id, 'attachment'])
self.assert_served_locally(self.client.get(url), attachment=True)
def test_trailing_filename(self):
url = self.file_url + self.file.filename
self.assert_served_by_cdn(self.client.get(url))
def test_null_datestatuschanged(self):
self.file.update(datestatuschanged=None)
self.assert_served_locally(self.client.get(self.file_url))
def test_unicode_url(self):
self.file.update(filename=u'图像浏览器-0.5-fx.xpi')
self.assert_served_by_cdn(self.client.get(self.file_url))
class TestDisabledFileDownloads(TestDownloadsBase):
def test_admin_disabled_404(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_user_disabled_404(self):
self.addon.update(disabled_by_user=True)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_anon_404(self):
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_unprivileged_404(self):
assert self.client.login(email='regular@mozilla.com')
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.file_url).status_code == 404
def test_file_disabled_ok_for_author(self):
self.file.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='g@gmail.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_file_disabled_ok_for_reviewer(self):
self.file.update(status=amo.STATUS_DISABLED)
self.client.login(email='reviewer@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_file_disabled_ok_for_admin(self):
self.file.update(status=amo.STATUS_DISABLED)
self.client.login(email='admin@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_admin_disabled_ok_for_author(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='g@gmail.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_admin_disabled_ok_for_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
self.client.login(email='admin@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_user_disabled_ok_for_author(self):
self.addon.update(disabled_by_user=True)
assert self.client.login(email='g@gmail.com')
self.assert_served_internally(self.client.get(self.file_url))
def test_user_disabled_ok_for_admin(self):
self.addon.update(disabled_by_user=True)
self.client.login(email='admin@mozilla.com')
self.assert_served_internally(self.client.get(self.file_url))
class TestUnlistedDisabledFileDownloads(TestDisabledFileDownloads):
def setUp(self):
super(TestDisabledFileDownloads, self).setUp()
self.make_addon_unlisted(self.addon)
self.grant_permission(
UserProfile.objects.get(email='reviewer@mozilla.com'),
'Addons:ReviewUnlisted')
class TestDownloadsLatest(TestDownloadsBase):
def setUp(self):
super(TestDownloadsLatest, self).setUp()
self.platform = 5
def test_404(self):
url = reverse('downloads.latest', args=[123])
assert self.client.get(url).status_code == 404
def test_type_none(self):
r = self.client.get(self.latest_url)
assert r.status_code == 302
url = '%s?%s' % (self.file.filename,
urlencode({'filehash': self.file.hash}))
assert r['Location'].endswith(url), r['Location']
def test_success(self):
assert self.addon.current_version
self.assert_served_by_cdn(self.client.get(self.latest_url))
def test_platform(self):
# We still match PLATFORM_ALL.
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5})
self.assert_served_by_cdn(self.client.get(url))
# And now we match the platform in the url.
self.file.platform = self.platform
self.file.save()
self.assert_served_by_cdn(self.client.get(url))
# But we can't match platform=3.
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 3})
assert self.client.get(url).status_code == 404
def test_type(self):
url = reverse('downloads.latest', kwargs={'addon_id': self.addon.slug,
'type': 'attachment'})
self.assert_served_locally(self.client.get(url), attachment=True)
def test_platform_and_type(self):
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5,
'type': 'attachment'})
self.assert_served_locally(self.client.get(url), attachment=True)
def test_trailing_filename(self):
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 5,
'type': 'attachment'})
url += self.file.filename
self.assert_served_locally(self.client.get(url), attachment=True)
def test_platform_multiple_objects(self):
f = File.objects.create(platform=3, version=self.file.version,
filename='unst.xpi', status=self.file.status)
url = reverse('downloads.latest',
kwargs={'addon_id': self.addon.slug, 'platform': 3})
self.assert_served_locally(self.client.get(url), file_=f)
class TestDownloadSource(TestCase):
fixtures = ['base/addon_3615', 'base/admin']
def setUp(self):
super(TestDownloadSource, self).setUp()
self.addon = Addon.objects.get(pk=3615)
# Make sure non-ascii is ok.
self.addon.update(slug=u'crosswarpex-확장')
self.version = self.addon.current_version
tdir = temp.gettempdir()
self.source_file = temp.NamedTemporaryFile(suffix=".zip", dir=tdir)
self.source_file.write(b'a' * (2 ** 21))
self.source_file.seek(0)
self.version.source = DjangoFile(self.source_file)
self.version.save()
self.filename = os.path.basename(self.version.source.path)
self.user = UserProfile.objects.get(email="del@icio.us")
self.group = Group.objects.create(
name='Editors BinarySource',
rules='Editors:BinarySource'
)
self.url = reverse('downloads.source', args=(self.version.pk, ))
def test_owner_should_be_allowed(self):
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER]
assert 'Content-Disposition' in response
filename = smart_text(self.filename)
content_disposition = response['Content-Disposition']
assert filename in decode_http_header_value(content_disposition)
expected_path = smart_text(self.version.source.path)
xsendfile_header = decode_http_header_value(
response[settings.XSENDFILE_HEADER])
assert xsendfile_header == expected_path
def test_anonymous_should_not_be_allowed(self):
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version(self):
self.version.delete()
GroupUser.objects.create(user=self.user, group=self.group)
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 404
def test_group_binarysource_should_be_allowed(self):
GroupUser.objects.create(user=self.user, group=self.group)
self.client.login(email=self.user.email)
response = self.client.get(self.url)
assert response.status_code == 200
assert response[settings.XSENDFILE_HEADER]
assert 'Content-Disposition' in response
filename = smart_text(self.filename)
content_disposition = response['Content-Disposition']
assert filename in decode_http_header_value(content_disposition)
expected_path = smart_text(self.version.source.path)
xsendfile_header = decode_http_header_value(
response[settings.XSENDFILE_HEADER])
assert xsendfile_header == expected_path
def test_no_source_should_go_in_404(self):
self.version.source = None
self.version.save()
response = self.client.get(self.url)
assert response.status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_returns_404(self):
"""File downloading isn't allowed for unlisted addons."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: True)
def test_download_for_unlisted_addon_owner(self):
"""File downloading is allowed for addon owners."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: True)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_reviewer(self):
"""File downloading isn't allowed for reviewers."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
@mock.patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
@mock.patch.object(acl, 'check_addon_ownership',
lambda *args, **kwargs: False)
def test_download_for_unlisted_addon_unlisted_reviewer(self):
"""File downloading is allowed for unlisted reviewers."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
| psiinon/addons-server | src/olympia/versions/tests/test_views.py | Python | bsd-3-clause | 19,783 | 0 |
#!/usr/bin/env python3
"""
MIT License
Copyright (c) 2016 Steven P. Crain, SUNY Plattsburgh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""First, read in the office stats.
STATS Office Precincts VoteLimit Votes Voters
STATS Lammersville Joint USD Governing Board Members 1 3 150 50
"""
stats=dict()
fstats=open("offices.stats","r")
for line in fstats:
if line[-1]=="\n":
line=line[:-1]
line=line.split("\t")
stats[line[1]]=line[1:]+[0,]
fstats.close()
fin=open("precincts.tsv", "r")
"""Read the header line containing the names of the offices."""
line=fin.readline()
if(line[-1]=='\n'):
line=line[:-1]
offices=line.split("\t")
"""Read the office assignments for each precinct."""
for line in fin:
if line[-1]=="\n":
line=line[:-1]
line=line.split("\t")
for i in range(1,len(line)):
if line[i]:
"""This precinct votes for this office, so tally the number of
voters that we have available.
"""
stats[offices[i]][5]+=int(line[0])
fin.close()
for office in offices[1:]:
if float(stats[office][4])>stats[office][5]:
print(stats[office])
| scrain777/MassivelyUnreliableSystems | Voting/Utilities/check.py | Python | mit | 2,156 | 0.009276 |
#! python
from carousel.core.data_sources import DataSource, DataParameter
from carousel.core.outputs import Output, OutputParameter
from carousel.core.formulas import Formula, FormulaParameter
from carousel.core.calculations import Calc, CalcParameter
from carousel.core.simulations import Simulation, SimParameter
from carousel.core.models import Model, ModelParameter
from carousel.contrib.readers import ArgumentReader
from carousel.core import UREG
import numpy as np
import os
DATA = {'PythagoreanData': {'adjacent_side': 3.0, 'opposite_side': 4.0}}
class PythagoreanData(DataSource):
adjacent_side = DataParameter(units='cm', uncertainty=1.0)
opposite_side = DataParameter(units='cm', uncertainty=1.0)
def __prepare_data__(self):
for k, v in self.parameters.iteritems():
self.uncertainty[k] = {k: v['uncertainty'] * UREG.percent}
class Meta:
data_cache_enabled = False
data_reader = ArgumentReader
class PythagoreanOutput(Output):
hypotenuse = OutputParameter(units='cm')
def f_pythagorean(a, b):
a, b = np.atleast_1d(a), np.atleast_1d(b)
return np.sqrt(a * a + b * b).reshape(1, -1)
class PythagoreanFormula(Formula):
f_pythagorean = FormulaParameter(
units=[('=A', ), ('=A', '=A')],
isconstant=[]
)
class Meta:
module = __name__
class PythagoreanCalc(Calc):
pythagorean_thm = CalcParameter(
formula='f_pythagorean',
args={'data': {'a': 'adjacent_side', 'b': 'opposite_side'}},
returns=['hypotenuse']
)
class PythagoreanSim(Simulation):
settings = SimParameter(
ID='Pythagorean Theorem',
commands=['start', 'load', 'run'],
sim_length=[0, 'hour'],
write_fields={
'data': ['adjacent_side', 'opposite_side'],
'outputs': ['hypotenuse']
}
)
class PythagoreanModel(Model):
data = ModelParameter(sources=[PythagoreanData])
outputs = ModelParameter(sources=[PythagoreanOutput])
formulas = ModelParameter(sources=[PythagoreanFormula])
calculations = ModelParameter(sources=[PythagoreanCalc])
simulations = ModelParameter(sources=[PythagoreanSim])
class Meta:
modelpath = os.path.dirname(__file__)
if __name__ == '__main__':
m = PythagoreanModel()
m.command('run', data=DATA)
out_reg = m.registries['outputs']
fmt = {
'output': out_reg['hypotenuse'],
'uncertainty': out_reg.uncertainty['hypotenuse']['hypotenuse']
}
print 'hypotenuse = %(output)s +/- %(uncertainty)s' % fmt
| mikofski/Carousel | examples/PythagoreanThm/pythagorean_thm.py | Python | bsd-3-clause | 2,570 | 0 |
import os
import xbmc
import time
os.system("sudo service HelgeInterface stop")
time.sleep(1)
xbmc.executebuiltin('XBMC.Powerdown')
pass
| harryberlin/BMW-RaspControl-Skin | skin.confluence-vertical/scripts/system_shutdown.py | Python | gpl-2.0 | 138 | 0 |
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from functools import wraps
from htmlmin.minify import html_minify
def minified_response(f):
@wraps(f)
def minify(*args, **kwargs):
response = f(*args, **kwargs)
minifiable_status = response.status_code == 200
minifiable_content = 'text/html' in response['Content-Type']
if minifiable_status and minifiable_content:
response.content = html_minify(response.content)
return response
return minify
def not_minified_response(f):
@wraps(f)
def not_minify(*args, **kwargs):
response = f(*args, **kwargs)
response.minify_response = False
return response
return not_minify
| erikdejonge/django-htmlmin | htmlmin/decorators.py | Python | bsd-2-clause | 837 | 0 |
__author__ = 'Chao'
import numpy as np
from sklearn import svm, cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
activity_label = {'1': 'WALKING',
'2': 'WALKING_UPSTAIRS',
'3': 'WALKING_DOWNSTAIRS',
'4': 'SITTING',
'5': 'STANDING',
'6': 'LAYING'}
# ############################# Open data set ###############################
X = []
y = []
X_fin = []
y_fin = []
print "Opening dataset..."
try:
with open("X_train.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X.append(pair)
f.close()
with open("y_train.txt", 'rU') as f:
res = list(f)
for line in res:
y.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
try:
with open("X_test.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X_fin.append(pair)
f.close()
with open("y_test.txt", 'rU') as f:
res = list(f)
for line in res:
y_fin.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
print "Dataset opened."
X = np.array(X)
y = np.array(y)
###### Separate data set into 70% training set and 30% test set
print "Separating data into 70% training set & 30% test set..."
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3)
print "Dataset separated."
###### Get best parameters ######
############################### Kernel=Linear ###############################
print "######## SVM, Kernel = Linear #########"
#C_linear = [0.1, 1, 10, 100]
C_linear = [3]
result_linear = []
print "C value chosen from: ", C_linear
print "Calculating accuracy with K-fold..."
for C in C_linear:
svc_linear = svm.SVC(kernel='linear', C=C)
scores = cross_validation.cross_val_score(svc_linear, X_train, y_train, scoring='accuracy', cv=6)
result_linear.append(scores.mean())
print "result:", result_linear
#Result with different C are equal, so here choose C=1 directly as the best parameter.
best_param_linear = {"C": 3}
#linear_test_score = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X_test, y_test).score(X_test, y_test)
#rbf_test_score = svm.SVC(kernel='rbf', C=best_param_rbf.get("C"), gamma=best_param_rbf.get("gamma")).fit(X_test, y_test).score(X_test, y_test)
#poly_test_score = svm.SVC(kernel='poly', C=best_param_poly.get("C"), degree=best_param_poly.get("degree")).fit(X_test, y_test).score(X_test, y_test)
linear_test = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = linear_test.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
#print "Linear Kernel test score: ", linear_test_score
#print "RBF Kernel test score: ", rbf_test_score
#print "Poly Kernel test score: ", poly_test_score
################################### Random Forests ####################################
print "##### Random Forest ######"
n_estimators_list = range(1, 16, 1)
result_random_forests = []
max_score_rf = float("-inf")
best_param_rf = None
for n_estimators in n_estimators_list:
print "Testing n_estimators = ", n_estimators
rf_clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=None, min_samples_split=1, random_state=0)
scores = cross_validation.cross_val_score(rf_clf, X_train, y_train, scoring="accuracy", cv=6)
result_random_forests.append(scores.mean())
if scores.mean() > max_score_rf:
max_score_rf = scores.mean()
best_param_rf = {"n_estimators": n_estimators}
print "number of trees: ", n_estimators_list
print "results: ", result_random_forests
print "best accuracy: ", max_score_rf
print "best parameter: ", best_param_rf
rf_clf_test_score = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None,
min_samples_split=1, random_state=0).fit(X_test, y_test).score(X_test,
y_test)
print "Test set accuracy: ", rf_clf_test_score
rf_clf = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None, min_samples_split=1,
random_state=0).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = rf_clf.predict(X_fin[i])
b = y_fin[i]
print "+ ", a[0],
print "- ", b
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
################################### K Nearest Neighbors ####################################
print "##### K Nearest Neighbors ######"
n_neighbors_list = range(1, 6, 1)
result_n_neighbors = []
max_score_knn = float("-inf")
best_param_knn = None
for n_neighbors in n_neighbors_list:
print "Testing n_neighbors = ", n_neighbors
neigh = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_validation.cross_val_score(neigh, X_train, y_train, scoring="accuracy", cv=6)
result_n_neighbors.append(scores.mean())
if scores.mean() > max_score_knn:
max_score_knn = scores.mean()
best_param_knn = {"n_neighbors": n_neighbors}
print "number of neighbors: ", n_neighbors_list
print "results: ", result_n_neighbors
print "best accuracy: ", max_score_knn
print "best parameter: ", best_param_knn
neigh_test_score = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X_test, y_test).score(X_test, y_test)
print "Test set accuracy: ", neigh_test_score
neigh = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = neigh.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
| Sapphirine/Human-Activity-Monitoring-and-Prediction | analysis.py | Python | apache-2.0 | 6,718 | 0.004614 |
import sys
if sys.version_info >= (3, 8):
from functools import singledispatchmethod
else:
from functools import singledispatch, update_wrapper
def singledispatchmethod(func):
dispatcher = singledispatch(func)
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register
update_wrapper(wrapper, func)
return wrapper
| adamcharnock/lightbus | lightbus/utilities/singledispatch.py | Python | apache-2.0 | 447 | 0 |
#!/usr/bin/env python
import functools
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import time
import kmc2
from collections import namedtuple
from scipy import stats
from sklearn import cluster
from sklearn.decomposition import TruncatedSVD
from numba import jit
from datasets import load_dataset, Datasets
from joblib import Memory
_memory = Memory('.', verbose=0)
# ================================================================ Distances
def dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def _learn_expected_dists_for_diffs(X_embed, X_quant, base_dist_func=dists_sq,
samples_per_bin=1e3):
# TODO try fitting dists based on orig data, not embedded
assert np.array_equal(X_quant[:10], X_quant[:10].astype(np.int))
assert X_embed.shape == X_quant.shape
uniqs = np.unique(X_quant)
cardinality = len(uniqs)
dists = np.zeros(cardinality)
counts = np.zeros(cardinality)
assert np.max(uniqs) == (cardinality - 1) # must be ints 0..b for some b
nsamples = int(counts.size * samples_per_bin)
for n in range(nsamples):
row1, row2 = np.random.randint(X_embed.shape[0], size=2)
col1, col2 = np.random.randint(X_embed.shape[1], size=2)
diff = np.abs(X_quant[row1, col1] - X_quant[row2, col2]).astype(np.int)
dist = base_dist_func(X_embed[row1, col1], X_embed[row2, col2])
counts[diff] += 1
dists[diff] += dist
assert np.min(counts) > 0
dists /= counts
return dists - np.min(dists)
# return dists / counts # TODO uncomment
# return np.array([base_dist_func(i, 0) for i in np.arange(cardinality)])
def learn_dists_func(X_embed, X_quant, base_dist_func=dists_sq,
samples_per_bin=1e3):
"""
Args:
X_embed (2D, array-like): the data just before quantization
X_quant (2D, array-like): quantized version of `X_embed`
base_dist_func (f(a, b) -> R+): function used to compute distances
between pairs of scalars
samples_per_bin (scalar > 0): the expected number of samples per bin
Returns:
f(X, q), a function with the same signature as `dists_sq` and `dists_l1`
"""
expected_dists = _learn_expected_dists_for_diffs(
X_embed, X_quant, base_dist_func, samples_per_bin)
print "expected_dists: ", expected_dists
def f(X, q, expected_dists=expected_dists):
diffs = np.abs(X - q)
orig_shape = diffs.shape
# assert np.min(diffs)
dists = expected_dists[diffs.ravel().astype(np.int)]
return dists.reshape(orig_shape).sum(axis=-1)
return f
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
LUT_QUANTIZE_FLOOR = 'floor'
def learn_query_lut(X_embed, X_quant, q_embed,
elemwise_dist_func=dists_elemwise_sq,
samples_per_bin=1e3,
quantize_algo=LUT_QUANTIZE_FLOOR):
assert np.array_equal(X_embed.shape, X_quant.shape)
# assert np.array_equal(q_embed.shape, q_quant.shape)
assert np.equal(X_embed.shape[-1], q_embed.shape[-1])
ndims = q_embed.shape[-1]
uniqs = np.unique(X_quant)
cardinality = len(uniqs)
distances = np.zeros((cardinality, ndims))
counts = np.zeros((cardinality, ndims))
# assert cardinality == 4 # TODO rm
assert np.max(uniqs) == (cardinality - 1) # must be ints 0..b for some b
nsamples = min(int(cardinality * samples_per_bin), X_embed.shape[0])
all_cols = np.arange(ndims, dtype=np.int)
for n in range(nsamples):
bins = X_quant[n].astype(np.int)
dists = elemwise_dist_func(X_embed[n], q_embed)
counts[bins, all_cols] += 1
distances[bins, all_cols] += dists.ravel()
# TODO also learn avg dist and set scale factor such that avg point will
# just barely integer overflow
assert np.min(counts) > 0
return np.asfortranarray(distances / counts)
# def _inner_dists_lut(X_quant, q_lut, all_cols):
# @jit
def _inner_dists_lut(X_quant, q_lut):
# ret = np.zeros(X_quant.shape[0])
# offset cols of X_quant so that col i has offset of i * `cardinality`;
# this will allow us to directly index into q_lut all at once
cardinality, ndims = q_lut.shape
offsets = np.arange(ndims, dtype=np.int) * cardinality
X_quant_offset = X_quant + offsets
dists = q_lut.T.ravel()[X_quant_offset.ravel()]
# dists = q_lut.T.ravel()[X_quant_offset.reshape(-1)]
dists = dists.reshape(X_quant.shape)
return np.sum(dists, axis=-1)
# ndims = q_lut.shape[-1]
# for i in range(X_quant.shape[0]):
# row = X_quant[i]
# for i, row in enumerate(X_quant):
# for j in range(ndims):
# ret[i] += q_lut[row[j], j]
# dists = q_lut[row, all_cols]
# ret[i] = np.sum(dists)
# return ret
# @profile
def dists_lut(X_quant, q_lut): # q_lut is [cardinality, ndims]
"""
>>> X_quant = np.array([[0, 2], [1, 0]], dtype=np.int)
>>> q_lut = np.array([[10, 11, 12], [20, 21, 22]]).T
>>> dists_lut(X_quant, q_lut)
array([ 32., 31.])
"""
assert X_quant.shape[-1] == q_lut.shape[-1]
# return dists_sq(X_quant, q_lut) # TODO rm # yep, identical output
# ndims = q_lut.shape[-1]
X_quant = np.atleast_2d(X_quant.astype(np.int))
# all_cols = np.arange(ndims, dtype=np.int)
return _inner_dists_lut(X_quant, q_lut)
# cardinality, ndims = q_lut.shape
# offsets = np.arange(ndims, dtype=np.int) * cardinality
# X_quant_offset = X_quant + offsets
# dists = q_lut.T.ravel()[X_quant_offset.ravel()]
# # dists = q_lut.T.ravel()[X_quant_offset.reshape(-1)]
# dists = dists.reshape(X_quant.shape)
# return np.sum(dists, axis=-1)
def dists_to_vects(X, q):
row_norms = np.sum(X*X, axis=1, keepdims=True)
q_norms = np.sum(q*q, axis=1)
prods = np.dot(X, q.T)
return -2 * prods + row_norms + q_norms
def hamming_dist(v1, v2):
return np.count_nonzero(v1 != v2)
def hamming_dists(X, q):
return np.array([hamming_dist(row, q) for row in X])
# ================================================================ Misc
def randwalk(*args):
ret = np.random.randn(*args)
ret = np.cumsum(ret, axis=-1)
return ret / np.linalg.norm(ret, axis=-1, keepdims=True) * ret.shape[-1]
def top_k_idxs(elements, k, smaller_better=False):
if smaller_better:
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn)[:k]
else:
which_nn = len(elements) - 1 - np.arange(k)
return np.argpartition(elements, kth=which_nn)[-k:][::-1]
def find_knn(X, q, k):
dists = dists_sq(X, q)
idxs = top_k_idxs(dists, k, smaller_better=True)
return idxs, dists[idxs]
def orthogonalize_rows(A):
Q, R = np.linalg.qr(A.T)
return Q.T
# ================================================================ Clustering
@_memory.cache
def kmeans(X, k):
seeds = kmc2.kmc2(X, k)
# plt.imshow(centroids, interpolation=None)
estimator = cluster.MiniBatchKMeans(k, init=seeds, max_iter=16).fit(X)
# estimator = cluster.KMeans(k, max_iter=4).fit(X)
return estimator.cluster_centers_, estimator.labels_
def groups_from_labels(X, labels, num_centroids):
# form groups associated with each centroid
groups = [[] for _ in range(num_centroids)]
for i, lbl in enumerate(labels):
groups[lbl].append(X[i])
for i, g in enumerate(groups[:]):
groups[i] = np.array(g, order='F')
# groups[i] = np.array(g)
# group_sizes = [len(g) for g in groups]
# huh; these are like 80% singleton clusters in 64D and 32 kmeans iters...
# print sorted(group_sizes)
# plt.hist(labels)
# plt.hist(group_sizes, bins=num_centroids)
# plt.show()
return groups
@_memory.cache
def load_dataset_and_groups(which_dataset, num_centroids=256,
**load_dataset_kwargs):
X, q = load_dataset(which_dataset, **load_dataset_kwargs)
assert q.shape[-1] == X.shape[-1]
centroids, labels = kmeans(X, num_centroids)
groups = groups_from_labels(X, labels, num_centroids)
return Dataset(X, q, centroids, groups)
Dataset = namedtuple('Dataset', ['X', 'q', 'centroids', 'groups'])
# ================================================================ Preproc
# ------------------------------------------------ Z-Normalization
class Normalizer(object):
def __init__(self, X):
self.means = np.mean(X, axis=0)
self.stds = np.std(X, axis=0)
self.std = np.max(self.stds)
def znormalize(self, A):
return (A - self.means) / self.std
# ------------------------------------------------ Quantization (incl DBQ)
def cutoff_quantize(A, thresholds):
out = np.empty(A.shape, dtype=np.int)
if len(thresholds.shape) == 1:
return np.digitize(A, thresholds)
for i, col in enumerate(A.T):
threshs = thresholds[:, i] # use col i of threshs for col i of A
out[:, i] = np.digitize(col, threshs)
return out
def gauss_quantize_old(A, means, std, nbits=8, max_sigma=-1, normalize=True):
nbins = int(2 ** nbits)
if max_sigma <= 0:
# set this such that end bins each have 1/nbins of the distro
max_sigma = -stats.norm.ppf(1. / nbins)
max_sigma *= (nbins / 2) / ((nbins / 2) - 1)
# print "gauss_quantize: nbits = ", nbits
# print "gauss_quantize: nbins = ", nbins
# assert nbits == 2
A_z = (A - means) / std if normalize else A
max_val = 2 ** int(nbits - 1) - 1
# max_val = 2 ** int(nbits) - 1 # TODO remove after debug
min_val = -(max_val + 1)
print "gauss_quantize: minval, maxval = ", min_val, max_val
# print "gauss_quantize: nbins = ", nbins
# assert nbits == 2
scale_by = max_val / float(max_sigma)
quantized = np.floor(A_z * scale_by)
return np.clip(quantized, min_val, max_val).astype(np.int)
def fit_gauss_thresholds(A, nbits, shared=True, max_sigma=-1):
nbins = int(2 ** nbits)
quantiles = np.arange(1, nbins) / float(nbins)
threshs = stats.norm.ppf(quantiles)
thresholds = np.empty((nbins - 1, A.shape[1]))
means = np.mean(A, axis=0)
stds = np.std(A, axis=0)
if shared:
std = np.mean(stds)
# XXX assumes means subtracted off
# return threshs * std
for i, std in enumerate(stds):
thresholds[:, i] = threshs * std + means[i]
else:
# thresholds = np.empty(nbins - 1, A.shape[1])
for i, std in enumerate(stds):
thresholds[:, i] = threshs * std + means[i]
return thresholds
# if max_sigma <= 0:
# # set this such that end bins each have 1/nbins of the distro
# max_sigma = -stats.norm.ppf(1. / nbins)
# max_sigma *= (nbins / 2) / ((nbins / 2) - 1)
def fit_quantile_thresholds(X, nbits=-1, shared=True, nbins=-1):
if nbins < 1:
nbins = int(2 ** nbits)
quantiles = np.arange(1, nbins) / float(nbins)
percentiles = quantiles * 100
if shared:
return np.percentile(X, q=percentiles, interpolation='midpoint')
return np.percentile(X, q=percentiles, axis=0, interpolation='midpoint')
def fit_kmeans_thresholds(X, nbits, shared=True): # for manhattan hashing
nbins = int(2 ** nbits)
if shared or X.shape[1] == 1: # one set of thresholds shared by all dims
centroids, _ = kmeans(X.reshape((-1, 1)), nbins)
centroids = np.sort(centroids.ravel())
return (centroids[:-1] + centroids[1:]) / 2.
# uniq set of thresholds for each dim
thresholds = np.empty((nbins - 1, X.shape[1]))
for i, col in enumerate(X.T):
thresholds[:, i] = fit_kmeans_thresholds(col, nbits, shared=True)
return thresholds
def dbq_quantize(A, lower_threshs, upper_threshs):
# we take sqrt so dist_sq() will yield hamming dist
# EDIT: no, this is broken cuz (sqrt(2) - 1)^2 != (1 - 0)^2
# return np.sqrt((A > lower_threshs).astype(np.float) + (A > upper_threshs))
return (A > lower_threshs).astype(np.float) + (A > upper_threshs)
def fit_dbq_thresholds(A, shared=True):
# return fit_quantile_thresholds(A, nbins=2, shared=shared)
if shared:
return np.percentile(A, q=[33, 67], interpolation='midpoint')
return np.percentile(A, q=[33, 67], axis=0, interpolation='midpoint')
class Quantizer(object):
GAUSS = 'gauss'
DBQ = 'dbq'
KMEANS = 'kmeans'
QUANTILE = 'quantile'
def __init__(self, X, nbits=2, how=GAUSS, shared_bins=True):
self.X = X
self.nbits = nbits
self.how = how
self.normalizer = Normalizer(X) # just to store means and std
if how == Quantizer.DBQ:
self.dbq_thresholds = fit_dbq_thresholds(X, shared=shared_bins)
elif how == Quantizer.KMEANS:
self.kmeans_thresholds = fit_kmeans_thresholds(
X, nbits=nbits, shared=shared_bins)
elif how == Quantizer.QUANTILE:
self.quantile_thresholds = fit_quantile_thresholds(
X, nbits=nbits, shared=shared_bins)
elif how == Quantizer.GAUSS:
self.gauss_thresholds = fit_gauss_thresholds(
X, nbits=nbits, shared=shared_bins)
else:
raise ValueError("Unrecognized quantization method: {}".format(how))
def gauss_quantize(self, A, **kwargs):
# return cutoff_quantize(A, self.gauss_thresholds)
ret = cutoff_quantize(A, self.gauss_thresholds)
assert self.nbits == 2
# print "min, max quantized value: ", np.min(ret), np.max(ret)
assert np.min(ret) >= 0
assert np.max(ret) <= 3
return ret
# return gauss_quantize(A, self.normalizer.means, self.normalizer.std,
# nbits=self.nbits, **kwargs)
# ret = gauss_quantize(A, self.normalizer.means, self.normalizer.std,
# nbits=self.nbits, **kwargs)
# assert self.nbits == 2
# print "min, max quantized value: ", np.min(ret), np.max(ret)
# assert np.min(ret) >= -2
# assert np.max(ret) <= 1
# return ret
# def dbq_quantize(self, A, **kwargs):
# ret = dbq_quantize(A, self.dbq_thresholds[0], self.dbq_thresholds[1])
# assert self.nbits == 2
# assert np.min(ret) == 0
# assert np.max(ret) == 2
def transform(self, A):
if self.how == Quantizer.DBQ:
return dbq_quantize(A, self.dbq_thresholds[0], self.dbq_thresholds[1])
# return self.dbq_quantize(A)
# return cutoff_quantize(A, self.dbq_thresholds)
elif self.how == Quantizer.KMEANS:
return cutoff_quantize(A, self.kmeans_thresholds)
elif self.how == Quantizer.QUANTILE:
return cutoff_quantize(A, self.quantile_thresholds)
elif self.how == Quantizer.GAUSS:
return cutoff_quantize(A, self.gauss_thresholds)
# return self.gauss_quantize(A)
else:
raise ValueError("Unrecognized quantization method: {}".format(
self.how))
# ================================================================ Embedding
# ------------------------------------------------ CBE
class CirculantBinaryEmbedder(object):
def __init__(self, X, nbits):
D = X.shape[-1]
self.nbits = nbits
self.r = np.random.randn(D)
self.R = np.fft.fft(self.r)
self.signs = (np.random.randn(D) > 0) * 2 - 1
def transform(self, X):
# X_preproc = (X - centroids[idx]) * signs
X_preproc = X * self.signs # this yeilds corrs of like .9 on randwalks
# X_preproc = (X - np.ones(D)) * signs # alright, this is fine
X_fft = np.fft.fft(X_preproc, axis=-1)
X_rotated = np.fft.ifft(X_fft * self.R, axis=-1)
X_rotated = X_rotated[..., :self.nbits]
return np.real(X_rotated) > 0
# ------------------------------------------------ Random hyperplanes
class SignedRandomProjections(object):
def __init__(self, X, nbits, orthogonal=False):
self.hyperplanes = np.random.randn(nbits, X.shape[-1])
if orthogonal:
self.hyperplanes = orthogonalize_rows(self.hyperplanes)
def transform(self, X):
return np.dot(X, self.hyperplanes.T) > 0
# ------------------------------------------------ Striped rand projections
class StripedRandomProjections(object):
def __init__(self, X, nbits, orthogonal=False):
self.hyperplanes = np.random.randn(nbits, X.shape[-1])
if orthogonal:
self.hyperplanes = orthogonalize_rows(self.hyperplanes)
def transform(self, X):
prod = np.dot(X, self.hyperplanes.T)
interval = np.max(prod) - np.min(prod)
stripe_width = interval / 3.
bins = np.floor(prod / stripe_width).astype(np.int)
return np.mod(bins, 2).astype(np.bool)
# ------------------------------------------------ Partially Orthogonal SRP
class SuperbitLSH(object):
def __init__(self, X, nbits, subvect_len=64):
D = X.shape[-1]
self.nbits = nbits
self.subvect_len = subvect_len
num_subvects = D / subvect_len
assert D % subvect_len == 0
self.projections = np.random.randn(nbits / num_subvects, subvect_len)
# orthagonalize groups of subvect_len projections
for i in range(0, len(self.projections), subvect_len):
self.projections[i:i+subvect_len] = orthogonalize_rows(
self.projections[i:i+subvect_len])
def transform(self, X):
new_shape = list(X.shape)
new_shape[-1] = self.nbits
X = X.reshape((-1, self.subvect_len))
prods = np.dot(X, self.projections.T)
prods = prods.reshape(new_shape)
return prods > 0
# ------------------------------------------------ Sample subset of dims
class SampleDimsSketch(object):
def __init__(self, X, ndims=64):
self.keep_idxs = np.random.randint(X.shape[-1], size=ndims)
def transform(self, X):
return X[:, self.keep_idxs] if len(X.shape) == 2 else X[self.keep_idxs]
class QuantizedSampleDimsSketch(object):
def __init__(self, X, ndims=64, **quantize_kwargs):
self.inner_sketch = SampleDimsSketch(X, ndims=ndims)
X = self.inner_sketch.transform(X)
self.quantizer = Quantizer(X, **quantize_kwargs)
def transform(self, X):
ret = self.inner_sketch.transform(X)
return self.quantizer.transform(ret)
# ------------------------------------------------ PCA / IsoHash
@_memory.cache
def _fit_svd(X_train, n_components):
return TruncatedSVD(n_components=n_components).fit(X_train)
@_memory.cache
def _pca(svd, X, ndims):
return svd.transform(X)[:, 1:(ndims+1)]
class Pca(object):
DEFAULT_MAX_NUM_DIMS = 64
def __init__(self, X, ndims=DEFAULT_MAX_NUM_DIMS):
self.means = np.mean(X, axis=0)
self.pre_normalizer = Normalizer(X)
X_train = self.pre_normalizer.znormalize(X)
self.svd = _fit_svd(X_train, ndims + 1)
X_pca = self.transform(X_train, postnormalize=False)
self.post_normalizer = Normalizer(X_pca)
def transform(self, A, ndims=DEFAULT_MAX_NUM_DIMS, postnormalize=True):
A_in = self.pre_normalizer.znormalize(A)
A_pca = _pca(self.svd, A_in, ndims=ndims)
# A_pca = self.svd.transform(A_in)[:, 1:(ndims+1)]
if postnormalize:
return self.post_normalizer.znormalize(A_pca)
return A_pca
class PcaSketch(object):
def __init__(self, X, ndims=64):
self.ndims = ndims
self.pca = Pca(X, ndims=ndims)
def transform(self, X):
return self.pca.transform(np.atleast_2d(X))
class RandomIsoHash(object):
def __init__(self, X, ndims=64):
self.inner_sketch = PcaSketch(X, ndims=ndims)
hyperplanes = np.random.randn(ndims, ndims)
self.rotation = orthogonalize_rows(hyperplanes)
def rotate(self, A):
return np.dot(A, self.rotation.T)
def transform(self, X):
ret = self.inner_sketch.transform(X)
return self.rotate(ret)
class QuantizedRandomIsoHash(object):
def __init__(self, X, ndims=64, **quantize_kwargs):
self.inner_sketch = RandomIsoHash(X, ndims=ndims)
X = self.inner_sketch.transform(X)
self.quantizer = Quantizer(X, **quantize_kwargs)
def transform(self, X):
ret = self.inner_sketch.transform(X)
return self.quantizer.transform(ret)
# ------------------------------------------------ Random rotation
class RandomProjections(object):
def __init__(self, X, ndims=64, orthogonal=False):
self.ndims = ndims
self.hyperplanes = np.random.randn(ndims, X.shape[-1])
if orthogonal:
self.hyperplanes = orthogonalize_rows(self.hyperplanes)
def transform(self, X):
return np.dot(X, self.hyperplanes.T)
class QuantizedRandomProjections(object):
def __init__(self, X, ndims=64, orthogonal=False, **quantize_kwargs):
self.inner_sketch = RandomProjections(
X, ndims=ndims, orthogonal=orthogonal)
X = self.inner_sketch.transform(X)
self.quantizer = Quantizer(X, **quantize_kwargs)
def transform(self, X):
ret = self.inner_sketch.transform(X)
return self.quantizer.transform(ret)
# ------------------------------------------------ Product Quantization
class PQEncoder(object):
def __init__(self, dataset, code_bits=64, bits_per_subvect=4,
elemwise_dist_func=dists_sq):
X = dataset.X
ncols = X.shape[1]
nsubvects = code_bits // bits_per_subvect
ncentroids = int(2 ** bits_per_subvect)
subvect_len = ncols / nsubvects
assert code_bits % bits_per_subvect == 0
assert ncols % subvect_len == 0 # TODO rm this constraint
self.centroids = np.empty(ncentroids, nsubvects, subvect_len)
for i in range(nsubvects):
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
centroids, labels = kmeans(X_in, ncentroids)
self.centroids[:, i, :] = centroids
self.elemwise_dist_func = elemwise_dist_func
self.nsubvects = ncentroids
self.subvect_len = subvect_len
self.offsets = np.arange(self.nsubvects, dtype=np.int) * ncentroids
def encode_X(self, X, **sink):
assert X.shape[1] == (self.nsubvects * self.subvect_len)
idxs = np.empty((X.shape[0], self.nsubvects))
X = X.reshape((X.shape[0], self.nsubvects, self.subvect_len))
for i, row in enumerate(X):
row = row.reshape((1, self.nsubvects, self.subvect_len))
dists = self.elemwise_dist_func(self.centroids, row)
idxs[i, :] = np.argmin(dists, axis=0)
return idxs + self.offsets # offsets let us index into raveled dists
def encode_q(self, q, **sink):
return None # we use fit_query() instead, so fail fast
# return np.encode_X(np.atleast_2d(q))
def dists_true(self, X, q):
return np.sum(self.elemwise_dist_func(X, q), axis=-1)
def fit_query(self, q, **sink):
assert len(q) == self.nsubvects * self.subvect_len
q = q.reshape((1, self.nsubvects, self.subvect_len))
self.q_dists_ = self.elemwise_dist_func(self.centroids, q)
self.q_dists_ = np.sum(self.q_dists_, axis=-1)
self.q_dists_ = np.asfortranarray(self.q_dists_)
return self
def dists_enc(self, X_enc, q_unused):
# this line has each element of X_enc index to the flattened
# version of q's distances to the centroids; we had to add
# offsets to each col of X_enc above for this to work
centroid_dists = self.q_dists_.T.ravel()[X_enc.ravel()]
return np.sum(centroid_dists.reshape(X_enc.shape), axis=-1)
# ================================================================ Main
def create_q_encoding_func(X, encoder, elemwise_dist_func):
X_embed = encoder.inner_sketch.transform(X)
X_quant = encoder.quantizer.transform(X_embed)
def q_encoding_func(q, X_embed=X_embed, X_quant=X_quant,
encoder=encoder, elemwise_dist_func=elemwise_dist_func):
q_embed = encoder.inner_sketch.transform(q)
return learn_query_lut(X_embed, X_quant, q_embed,
elemwise_dist_func=elemwise_dist_func)
return q_encoding_func
def eval_embedding(dataset, encoding_func, dist_func=dists_sq, plot=False,
q_encoding_func=None, bits_dist_func=None):
X, queries, centroids, groups = dataset
if len(queries.shape) == 1:
queries = [queries.ravel()]
if q_encoding_func is None:
q_encoding_func = encoding_func
if bits_dist_func is None:
bits_dist_func = dist_func
search_k = 20
fracs = []
for i, q in enumerate(queries):
# # print "q: ", q
# plt.plot(encoder.inner_sketch.transform(q))
q_bits = q_encoding_func(q)
# plt.plot(q_bits.T, 'r-')
# # # print "q lut: ", q_bits
# plt.show()
# return
all_true_dists = []
all_bit_dists = []
dists_to_centroids = dists_sq(centroids, q)
idxs = top_k_idxs(dists_to_centroids, search_k, smaller_better=True)
for idx in idxs:
X = groups[idx]
true_dists = dists_sq(X, q)
all_true_dists.append(true_dists)
X_bits = encoding_func(X)
bit_dists = bits_dist_func(X_bits, q_bits)
all_bit_dists.append(bit_dists)
all_true_dists = np.hstack(all_true_dists)
all_bit_dists = np.hstack(all_bit_dists)
# ------------------------ begin analysis / reporting code
knn_idxs = top_k_idxs(all_true_dists, 10, smaller_better=True)
cutoff = all_true_dists[knn_idxs[-1]]
knn_bit_dists = all_bit_dists[knn_idxs]
max_bit_dist = np.max(knn_bit_dists)
num_below_max = np.sum(all_bit_dists <= max_bit_dist)
frac_below_max = float(num_below_max) / len(all_bit_dists)
fracs.append(frac_below_max)
# print "bit dists: {}; max = {:.1f};\tfrac = {:.4f}".format(
# np.round(knn_bit_dists).astype(np.int), max_bit_dist, frac_below_max)
# print stats.describe(all_true_dists)
# print stats.describe(all_bit_dists)
if plot and i < 3: # at most 3 plots
# plt.figure()
# xlim = [np.min(all_true_dists + .5), np.max(all_true_dists)]
# xlim = [0, np.max(all_true_dists) / 2]
# ylim = [-1, num_bits]
# ylim = [-1, np.max(all_bit_dists) / 2]
num_nn = min(10000, len(all_true_dists) - 1)
xlim = [0, np.partition(all_true_dists, num_nn)[num_nn]]
ylim = [0, np.partition(all_bit_dists, num_nn)[num_nn]]
grid = sb.jointplot(x=all_true_dists, y=all_bit_dists,
xlim=xlim, ylim=ylim, joint_kws=dict(s=10))
# hack to bully the sb JointGrid into plotting a vert line
grid.x = [cutoff, cutoff]
grid.y = ylim
grid.plot_joint(plt.plot, color='r', linestyle='--')
# also make it plot cutoff in terms of quantized dist
grid.x = xlim
grid.y = [max_bit_dist, max_bit_dist]
grid.plot_joint(plt.plot, color='k', linestyle='--')
if plot:
# plt.figure()
# plt.plot(queries.T)
plt.show()
stats = np.array(fracs)
print "mean, 90th pctile, std of fracs to search: " \
"{:.3f}, {:.3f}, {:.3f}".format(np.mean(stats),
np.percentile(stats, q=90),
np.std(stats))
return fracs
def main():
import doctest
# doctest.testmod() # TODO uncomment after debug
# N = -1 # set this to not limit real datasets to first N entries
# N = 10 * 1000
N = 50 * 1000
# N = 1000 * 1000
D = 128
# D = 66
num_centroids = 256
num_queries = 128
dataset_func = functools.partial(load_dataset_and_groups,
num_centroids=num_centroids, N=N, D=D,
num_queries=num_queries)
# dataset = dataset_func(Datasets.RAND_WALK, norm_len=True) # 1.002
# dataset = dataset_func(Datasets.RAND_UNIF, norm_len=True) # 1.002
# dataset = dataset_func(Datasets.RAND_GAUSS, norm_len=True) # 1.03
# dataset = dataset_func(Datasets.RAND_GAUSS, norm_mean=True) # 1.03
# dataset = dataset_func(Datasets.GLOVE_100, norm_mean=True) # 2.5ish?
dataset = dataset_func(Datasets.SIFT_100, norm_mean=True) # 5ish?
# dataset = dataset_func(Datasets.GLOVE_200, norm_mean=True) #
# dataset = dataset_func(Datasets.SIFT_200, norm_mean=True) #
# dataset = dataset_func(Datasets.GLOVE, norm_mean=True) #
# dataset = dataset_func(Datasets.SIFT, norm_mean=True) #
# X, q, centroids, groups = dataset
# encoder = PcaSketch(dataset.X, 64)
# encoder = RandomIsoHash(dataset.X, 64)
# encoder = SampleDimsSketch(dataset.X, 64)
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.GAUSS)
# encoder = QuantizedRandomIsoHash(dataset.X, 32, nbits=2, how=Quantizer.GAUSS)
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.DBQ)
# encoder = QuantizedRandomIsoHash(dataset.X, 32, nbits=2, how=Quantizer.DBQ)
# encoder = QuantizedSampleDimsSketch(dataset.X, 64, nbits=2)
# encoder = QuantizedSampleDimsSketch(dataset.X, 32, nbits=2)
# encoder = CirculantBinaryEmbedder(dataset.X, 256)
# encoder = SignedRandomProjections(dataset.X, 64, orthogonal=False)
# encoder = SignedRandomProjections(dataset.X, 64, orthogonal=True)
# encoder = StripedRandomProjections(dataset.X, 64, orthogonal=False)
# encoder = StripedRandomProjections(dataset.X, 64, orthogonal=True)
# encoder = SuperbitLSH(dataset.X, 64, subvect_len=16)
# encoder = SuperbitLSH(dataset.X, 64, subvect_len=32)
# encoder = SuperbitLSH(dataset.X, 64, subvect_len=64)
print "------------------------ dbq l1"
encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.DBQ)
eval_embedding(dataset, encoder.transform, dist_func=dists_l1)
print "------------------------ dbq l2"
# # encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.DBQ)
eval_embedding(dataset, encoder.transform, dist_func=dists_sq)
print "------------------------ manhattan l1"
# note that we need shared_bins=False to mimic the paper
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.KMEANS)
encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2,
how=Quantizer.KMEANS, shared_bins=False)
eval_embedding(dataset, encoder.transform, dist_func=dists_l1)
print "------------------------ manhattan l2"
eval_embedding(dataset, encoder.transform, dist_func=dists_sq)
print "------------------------ gauss l1"
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2,
# how=Quantizer.GAUSS, shared_bins=False)
encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.GAUSS)
# encoder = QuantizedRandomProjections(dataset.X, 64, nbits=2, how=Quantizer.GAUSS)
eval_embedding(dataset, encoder.transform, dist_func=dists_l1)
print "------------------------ gauss l2"
eval_embedding(dataset, encoder.transform, dist_func=dists_sq)
print "------------------------ pca l1"
encoder = PcaSketch(dataset.X, 64)
eval_embedding(dataset, encoder.transform, dist_func=dists_l1)
print "------------------------ pca l2" # hmm; much better than quantized
eval_embedding(dataset, encoder.transform, dist_func=dists_sq)
print "------------------------ quantile l1" # yep, same performance as gauss
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2,
# how=Quantizer.QUANTILE, shared_bins=False)
encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2,
how=Quantizer.QUANTILE)
eval_embedding(dataset, encoder.transform, dist_func=dists_l1)
print "------------------------ quantile l2"
eval_embedding(dataset, encoder.transform, dist_func=dists_sq)
print "------------------------ q lut l1"
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=8, # 8 -> same as pca
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=4, # 4 -> little worse
encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, # 2 ->
how=Quantizer.QUANTILE, shared_bins=False)
q_encoding_func = create_q_encoding_func(dataset.X, encoder,
dists_elemwise_l1)
t1 = time.time()
eval_embedding(dataset, encoder.transform, dist_func=dists_sq,
q_encoding_func=q_encoding_func, bits_dist_func=dists_lut)
print "time to compute dists with lut: ", time.time() - t1
print "------------------------ q lut l2"
# t0 = time.time()
q_encoding_func = create_q_encoding_func(dataset.X, encoder,
dists_elemwise_sq)
t1 = time.time()
# print "time to learn lut: ", t1 - t0
eval_embedding(dataset, encoder.transform, dist_func=dists_sq,
q_encoding_func=q_encoding_func, bits_dist_func=dists_lut)
print "time to compute dists with lut: ", time.time() - t1
# print "===="
# TODO this should be encapsulated in the encoder and/or in a func
# that knows how to reach into encoders and just takes in X and the encoder
X_embed = encoder.inner_sketch.transform(dataset.X)
X_quant = encoder.quantizer.transform(X_embed)
learned_sq = learn_dists_func(X_embed, X_quant, base_dist_func=dists_sq)
learned_l1 = learn_dists_func(X_embed, X_quant, base_dist_func=dists_l1)
print "------------------------ quantile l1, learned dists"
encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2,
how=Quantizer.GAUSS, shared_bins=False)
# encoder = QuantizedRandomIsoHash(dataset.X, 64, nbits=2, how=Quantizer.QUANTILE)
eval_embedding(dataset, encoder.transform, dist_func=learned_l1)
print "------------------------ quantile l2, learned dists"
eval_embedding(dataset, encoder.transform, dist_func=learned_sq)
if __name__ == '__main__':
main()
| dblalock/dig | tests/exper_bits.py | Python | mit | 34,782 | 0.000633 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Dictionary
# Copyright 2008 Santhosh Thottingal <santhosh.thottingal@gmail.com>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# If you find any bugs or have any suggestions email: santhosh.thottingal@gmail.com
# URL: http://www.smc.org.in
from common import *
from utils import silpalogger
import os
from dictdlib import DictDB
from jsonrpc import *
from wiktionary import get_def
try:
from modules.render import render
except:
silpalogger.exception("Failed to import render module")
# Have the render instance initiated only once
renderer = render.getInstance()
# One image for No image found
no_meaning_found = renderer.render_text("No meanings found","png",0,0,"Red",font_size=10)
class Dictionary(SilpaModule):
def __init__(self):
self.template=os.path.join(os.path.dirname(__file__), 'dictionary.html')
self.response = SilpaResponse(self.template)
self.imageyn=None
self.text=None
self.dictionaryname=None
self.fontsize=16
self.imagewidth=300
self.imageheight=300
def set_request(self,request):
self.request=request
self.response.populate_form(self.request)
self.text=self.request.get('text')
self.imageyn=self.request.get('image')
if self.request.get('fontsize')!=None:
self.fontsize= int( self.request.get('fontsize'))
if self.request.get('imagewidth')!=None:
self.imagewidth=int(self.request.get('imagewidth'))
if self.request.get('imageheight')!=None:
self.imageheight=int(self.request.get('imageheight'))
self.dictionaryname=self.request.get('dict')
def get_response(self):
if self.imageyn != None:
if self.imageyn.lower()=="y":
image_url = self.getdef_image(self.text,self.dictionaryname,"png",self.imagewidth,self.imageheight,"Black",self.fontsize)
self.response.response_code = "303 see other"
self.response.header = [('Location', image_url)]
elif self.imageyn.lower() == "w":
image_url = self.get_wiktionary_def_image(self.text,self.dictionaryname,"png",self.imageheight,self.imagewidth,"Black",self.fontsize)
self.response.response_code = "303 See other"
self.response.header = [('Location',image_url)]
else:
wordmeaning=self.getdef(self.text,self.dictionaryname)
self.response.content = wordmeaning.decode("utf-8")
self.response.response_code = "200 OK"
self.response.mime_type="text/plain;charset=UTF-8"
self.response.header = [('Content-Type','text/plain;charset=UTF-8')]
return self.response
def get_json_result(self):
error=None
_id = 0
try:
if self.request.get('word'):
definition = self.getdef(self.request.get('word'),self.request.get('dictionary'))
data = dumps({"result":definition, "id":_id, "error":error})
except JSONEncodeException:
#translate the exception also to the error
error = {"name": "JSONEncodeException", "message":"Result Object Not Serializable"}
data = dumps({"result":None, "id":id_, "error":error})
return data
def get_form(self):
page = open(self.template,'r').read()
return page
def get_free_dict(self, src, dest):
dict_dir=os.path.join(os.path.dirname(__file__), 'dictionaries')
dictdata=dict_dir+ "/freedict-"+src+"-"+dest
if os.path.isfile(dictdata+".index"):
return dictdata
return None
@ServiceMethod
def getdef(self, word, dictionary):
meaningstring= ""
src = dictionary.split("-")[0]
dest = dictionary.split("-")[1]
dictdata = self.get_free_dict(src,dest)
if dictdata:
dict = DictDB(dictdata)
clean_word = word.lower()
clean_word = clean_word.strip()
meanings = dict.getdef(clean_word)
for meaning in meanings:
meaningstring += meaning
if meaningstring == "None":
meaningstring = "No definition found"
return meaningstring
return meaningstring.decode("utf-8")
@ServiceMethod
def getdef_image(self,word,dictionary,file_type='png', width=0, height=0,color="Black",fontsize=10):
meaning = self.getdef(word,dictionary)
if meaning == "No definition found":
return no_meaning_found
else:
return renderer.render_text(meaning,file_type,width,height,color,font_size=fontsize)
@ServiceMethod
def get_wiktionary_def_image(self,word,dictionary,file_type='png',width=0,height=0,color="Black",fontsize=10):
tmp = dictionary.split("-")
src_lang = tmp[0]
dest_lang = tmp[1]
meaning = get_def(word,src_lang,dest_lang)
if meaning == None:
return no_meaning_found
else:
return renderer.render_text(meaning,file_type,0,0,color,font_size=fontsize)
def get_module_name(self):
return "Dictionary"
def get_info(self):
return "Bilingual Dictionaries"
def getInstance():
return Dictionary()
| santhoshtr/silpa | src/silpa/modules/dictionary/dictionary.py | Python | agpl-3.0 | 6,167 | 0.020593 |
"""
"""
# Created on 2016.08.09
#
# Author: Giovanni Cannata
#
# Copyright 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
from ...utils.conv import to_raw
# Validators return True if value is valid, False if value is not valid,
# or a value different from True and False that is a valid value to substitute to the input value
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(input_value):
return True
def validate_generic_single_value(input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
try: # object couldn't have a __len__ method
if len(input_value) == 1:
return True
except Exception:
pass
return False
def validate_integer(input_value):
if check_type(input_value, (float, bool)):
return False
if str is bytes: # Python 2, check for long too
if check_type(input_value, (int, long)):
return True
else: # Python 3, int only
if check_type(input_value, int):
return True
sequence = True # indicates if a sequence must be returned
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
else:
sequence = True # indicates if a sequence must be returned
valid_values = [] # builds a list of valid int values
for element in input_value:
try: # try to convert any type to int, an invalid conversion raise TypeError of ValueError, if both are valid and equal then then int() value is used
float_value = float(element)
int_value = int(element)
if float_value == int_value:
valid_values.append(int(element))
else:
return False
except (ValueError, TypeError):
return False
if sequence:
return valid_values
else:
return valid_values[0]
def validate_bytes(input_value):
return check_type(input_value, bytes)
def validate_boolean(input_value):
# it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed
if validate_generic_single_value(input_value): # valid only if a single value or a sequence with a single element
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(input_value):
# if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
else:
sequence = True # indicates if a sequence must be returned
valid_values = []
changed = False
for element in input_value:
if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time
if isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo: # a datetime with a timezone
valid_values.append(element.strftime('%Y%m%d%H%M%S%z'))
else: # datetime without timezone, assumed local and adjusted to UTC
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
| Varbin/EEH | _vendor/ldap3/protocol/formatters/validators.py | Python | bsd-2-clause | 5,077 | 0.001773 |
# -*- coding: utf-8 -*-
"""
DrQueue main module
Copyright (C) 2011-2013 Andreas Schroeder
This file is part of DrQueue.
Licensed under GNU General Public License version 3. See LICENSE for details.
"""
import platform
import os
import sys
import smtplib
import json
from email.mime.text import MIMEText
from .client import Client
from .job import Job
from .computer import Computer
supported_renderers = ['3delight', '3dsmax', 'aftereffects', 'aqsis', \
'blender', 'cinema4d', 'general', 'lightwave', 'luxrender', 'mantra', \
'maya', 'mentalray', 'nuke', 'shake', 'terragen', 'turtle', 'vray', 'xsi']
supported_os = ['Windows', 'Mac OSX', 'Linux', 'FreeBSD', 'NetBSD', 'OpenBSD', \
'AIX', 'Solaris']
def check_renderer_support(renderer):
"""Check if renderer is supported."""
if renderer in supported_renderers:
return True
else:
return False
def get_rendertemplate(renderer):
"""Return template filename from renderer name"""
filename = ""
if renderer == '3delight':
filename = '3delight_sg.py'
if renderer == '3dsmax':
filename = '3dsmax_sg.py'
if renderer == 'aftereffects':
filename = 'aftereffects_sg.py'
if renderer == 'aqsis':
filename = 'aqsis_sg.py'
if renderer == 'blender':
filename = 'blender_sg.py'
if renderer == 'cinema4d':
filename = 'cinema4d_sg.py'
if renderer == 'general':
filename = 'general_sg.py'
if renderer == 'lightwave':
filename = 'lightwave_sg.py'
if renderer == 'luxrender':
filename = 'luxrender_sg.py'
if renderer == 'mantra':
filename = 'mantra_sg.py'
if renderer == 'maya':
filename = 'maya_sg.py'
if renderer == 'mentalray':
filename = 'mentalray_sg.py'
if renderer == 'nuke':
filename = 'nuke_sg.py'
if renderer == 'pixie':
filename = 'pixie_sg.py'
if renderer == 'shake':
filename = 'shake_sg.py'
if renderer == 'terragen':
filename = 'terragen_sg.py'
if renderer == 'turtle':
filename = 'turtle_sg.py'
if renderer == 'vray':
filename = 'vray_sg.py'
if renderer == 'xsi':
filename = 'xsi_sg.py'
return filename
def get_osname():
"""Return operating system name"""
osname = platform.system()
if osname == 'Darwin':
osname = 'Mac OSX'
return osname
def run_script_with_env(render_script, env_dict):
"""Run template script on IPython engine"""
import platform, os, sys
# set some variables on target machine
env_dict['DRQUEUE_OS'] = platform.system()
env_dict['DRQUEUE_ETC'] = os.path.join(os.getenv('DRQUEUE_ROOT'), "etc")
env_dict['DRQUEUE_LOGFILE'] = os.path.join(os.getenv('DRQUEUE_ROOT'),
"logs", env_dict['DRQUEUE_LOGFILE'])
# import specific render template
sys.path.append(env_dict['DRQUEUE_ETC'])
impmod = render_script.replace('.py', '')
__import__(impmod)
template = sys.modules[impmod]
# run template with env_dict
status = template.run_renderer(env_dict)
return status
def check_deps(dep_dict):
"""Run all dependency checking functions. This method runs directly on the engine."""
if ('os_name' in dep_dict) and (engine_has_os(dep_dict['os_name']) == False):
return False
elif ('minram' in dep_dict) and (engine_has_minram(dep_dict['minram']) == False):
return False
elif ('mincores' in dep_dict) and (engine_has_mincores(dep_dict['mincores']) == False):
return False
elif ('pool_name' in dep_dict) and (engine_is_in_pool(dep_dict['pool_name']) == False):
return False
elif ('job_id' in dep_dict) and (job_is_enabled(dep_dict['job_id']) == False):
return False
else:
return True
def engine_is_in_pool(pool_name):
"""Check if engine belongs to certain pool. This method runs directly on the engine."""
# check os.environ["DRQUEUE_POOL"]
if ("DRQUEUE_POOL" in os.environ) and (pool_name in os.environ["DRQUEUE_POOL"]):
return True
else:
return False
def engine_has_os(os_name):
"""Check if engine is running on certain OS. This method runs directly on the engine."""
running_os = get_osname()
if os_name == running_os:
return True
else:
return False
def engine_has_minram(minram):
"""Check if engine has at least minram GB RAM. This method runs directly on the engine."""
mem = Computer.get_memory()
if mem >= minram:
return True
else:
return False
def engine_has_mincores(mincores):
"""Check if engine has at least mincores CPU cores. This method runs directly on the engine."""
ncpus = Computer.get_ncpus()
ncorescpu = Computer.get_ncorescpu()
cores = ncpus * ncorescpu
if cores >= mincores:
return True
else:
return False
def job_is_enabled(job_id):
"""Check if job is enabled. This method runs directly on the engine."""
job = Job.query_db(job_id)
if (job != None) and (job["enabled"] == True):
return True
else:
return False
def send_email(job_name, recipients):
"""Notify recipients about finish of job."""
# load email configuration
user_dir = os.path.expanduser("~")
config_file = os.path.join(user_dir, ".drqueue", "email_config.json")
try:
fp = open(config_file, "rb")
except:
print("Email configuration could not be loaded.")
try:
config = json.load(fp)
except:
print("Email configuration could not be parsed.")
print(config)
mail_from = config['from']
body_text = "Your render job \"%s\" is finished." % job_name
# Create a text/plain message
msg = MIMEText(body_text)
# subject, sender and recipients
msg['Subject'] = "Job \"%s\" is finished" % job_name
msg['From'] = mail_from
msg['To'] = recipients
if config['smtp_ssl'] == "1":
# connect via SSL
smtp = smtplib.SMTP_SSL(config['smtp_server'], int(config['smtp_port']))
else:
# connect without SSL
smtp = smtplib.SMTP(config['smtp_server'], int(config['smtp_port']))
# start TLS encryption
if config['smtp_tls'] == "1":
smtp.starttls()
if config['smtp_auth'] == "1":
# authenticate if required
smtp.login(config['smtp_user'], config['smtp_passwd'])
try:
smtp.sendmail(msg['From'], msg['To'], msg.as_string())
except:
print("Email could not be sent.")
smtp.quit()
| jedie/DrQueueIPython | DrQueue/__init__.py | Python | gpl-3.0 | 6,522 | 0.005213 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class VM:
def __init__(self, num_reg = 4):
self.regs = [0 for _ in range(num_reg)] # registers
self.pc = 0 # program counter
self.prog = None
self.reg1 = self.reg2 = self.reg3 = self.imm = None
self.running = False
def fetch(self):
instruction = self.prog[self.pc]
self.pc += 1
return instruction
def decode(self, instr):
instrNum = (instr & 0xF000) >> 12
self.reg2 = (instr & 0xF0 ) >> 4;
self.reg1 = (instr & 0xF00 ) >> 8;
self.reg3 = (instr & 0xF )
self.imm = (instr & 0xFF )
return instrNum
def eval(self, instrNum):
if (instrNum == 0):
print("halt")
self.running = False
elif (instrNum == 1):
print(f"loadi r{self.reg1} #{self.imm}")
self.regs[self.reg1] = self.imm
elif (instrNum == 2):
print(f"add r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] + self.regs[self.reg3]
elif (instrNum == 3):
print(f"sub r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] - self.regs[self.reg3]
elif (instrNum == 4):
print(f"mult r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] * self.regs[self.reg3]
elif (instrNum == 5):
print(f"div r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] / self.regs[self.reg3]
elif (instrNum == 6):
print(f"and r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] & self.regs[self.reg3]
elif (instrNum == 7):
print(f"or r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] | self.regs[self.reg3]
elif (instrNum == 8):
print(f"xor r{self.reg1} r{self.reg2} r{self.reg3}")
self.regs[self.reg1] = self.regs[self.reg2] ^ self.regs[self.reg3]
def showRegs(self):
res = "regs ="
for k in range(len(self.regs)):
res += " " + str(hex(self.regs[k]))[2:].zfill(4)
print(res)
def run(self, prog, show_regs=True):
self.prog = prog
self.running = True
while self.running:
instruction = self.fetch()
instrNum = self.decode(instruction)
self.eval(instrNum)
if show_regs: self.showRegs()
self.prog = None
if __name__ == "__main__":
# Structure d'une instruction :
# 2 3 0 1 = 0x1301
# num_instr addr_reg_1 addr_reg_2 addr_reg_3
#
# Variante (pour charger un entier)
# 1 0 6 4 = 0x1064
# num_instr addr_reg valeur_immédiate (hex)
prog = [0x1064, 0x11C8, 0x12FA, 0x2301, 0x3132, 0x2201, 0x0000]
vm = VM(num_reg=4)
vm.run(prog)
| NightlySide/nightlyside.github.io | cb7b5b05c87711611f4700ff52c23409/iss.py | Python | mit | 3,088 | 0.006479 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend:1.4"
if arch() == 's390x':
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend-s390x:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13"
if arch() == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
| cncf/cross-cloud | validate-cluster/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | Python | apache-2.0 | 36,222 | 0.000055 |
from random import randint
import re
STARTTAG = "<start>"
ENDTAG = "<end>"
class MarkovChainBot:
''' A Markov Chain text generator
data is a list of strings that it is trained on, ie a list of books.
'''
def __init__(self, exclusion_list):
''' '''
self.data = []
self.probs = {STARTTAG: [ENDTAG]}
self.trained = True
self.exclusions = [re.compile(x) for x in exclusion_list]
def Train(self):
assert type(self.data) == list
for obj in self.data:
assert type(obj) == str
if len(self.data) == 0:
return
self.probs = {}
def addWordToProbsDict(dic, index, target):
if index in dic.keys():
dic[index].append(target)
else:
dic[index] = [target]
for text in self.data:
words = list(map(lambda x: x.lower(), text.split()))
if not words:
continue
addWordToProbsDict(self.probs, STARTTAG, words[0])
for i in range(len(words)-1):
addWordToProbsDict(self.probs, words[i], words[i+1])
addWordToProbsDict(self.probs, words[len(words)-1], ENDTAG)
def GenerateText(self):
ret = ''
curWord = STARTTAG
while(curWord != ENDTAG):
nextIn = randint(0, len(self.probs[curWord])-1)
curWord = self.probs[curWord][nextIn]
if(curWord == ENDTAG or curWord == STARTTAG):
continue
render = True
for pat in self.exclusions:
if(pat.match(curWord)):
render = False
if render:
ret += curWord
if(curWord != ENDTAG):
ret += ' '
return ret
| lukejuusola/MarkovMimic | MarkovChainBot.py | Python | mit | 1,445 | 0.039446 |
# -*- coding: utf-8 -*-
import http_lazy_headers as hlh
from . import utils
class TransferEncodingTest(utils.FieldTestCase):
field = hlh.TransferEncoding
def test_raw_values(self):
self.assertFieldRawEqual(
['gzip, chunked', 'foobar;bar=qux'],
((hlh.Encodings.gzip, hlh.ParamsCI()),
(hlh.Encodings.chunked, hlh.ParamsCI()),
('foobar', hlh.ParamsCI([('bar', 'qux')]))))
self.assertFieldRawEqual(
['GziP'],
((hlh.Encodings.gzip, hlh.ParamsCI()),))
def test_str(self):
self.assertFieldStrEqual(
((hlh.Encodings.gzip, hlh.ParamsCI()),
(hlh.Encodings.chunked, hlh.ParamsCI()),
('foobar', hlh.ParamsCI([('bar', 'qux')]))),
'transfer-encoding: gzip, chunked, foobar; bar=qux')
def test_raw_empty(self):
"""
Should NOT allow empty raw value
"""
self.assertRaisesHeaderError([''])
def test_empty(self):
"""
Should NOT allow empty value
"""
self.assertRaisesInternalError(())
def test_raw_bad_values(self):
"""
Should not allow bad raw values
"""
self.assertRawOK(['foo'])
self.assertRawOK(['foo;bar=baz'])
self.assertRaisesHeaderError(['^='])
self.assertRaisesHeaderError(['foo;'])
self.assertRaisesHeaderError(['foo;='])
self.assertRaisesHeaderError(['foo;bar='])
self.assertRaisesHeaderError(['foo;bar = baz'])
self.assertRaisesHeaderError(['foo;bar= baz'])
self.assertRaisesHeaderError(['foo;bar =baz'])
def test_bad_values(self):
"""
Should not allow bad values
"""
good_te = ('foo', hlh.ParamsCI())
self.assertOK([good_te])
self.assertRaisesInternalError([1])
self.assertRaisesInternalError(['foo'])
self.assertRaisesInternalError([None])
self.assertRaisesInternalError([('', hlh.ParamsCI())])
self.assertRaisesInternalError([(None, hlh.ParamsCI())])
self.assertRaisesInternalError([('foo', None)])
| nitely/http-lazy-headers | tests/tests_fields_/tests_transfer_encoding.py | Python | mit | 2,131 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.