text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
__author__ = 'mark'
"""
User Profile Extension based on One-to-One fields code in Django Docs here:
https://docs.djangoproject.com/en/1.7/topics/auth/customizing/
"""
from django.db import models
from django.contrib.auth.models import User
from uuid import uuid4
class Member(models.Model):
user = models.OneToOneField(User)
member_guid = models.CharField(max_length=100, null=True, blank=True)
ext_uid = models.CharField(max_length=100, null=True, blank=True)
user_token = models.CharField(max_length=100, null=True, blank=True)
|
ekivemark/my_device
|
bbp/bbp/member/models.py
|
Python
|
apache-2.0
| 548 | 0.00365 |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.pets.PetBase
from toontown.pets.PetConstants import AnimMoods
from toontown.pets import PetMood
class PetBase:
def getSetterName(self, valueName, prefix = 'set'):
return '%s%s%s' % (prefix, valueName[0].upper(), valueName[1:])
def getAnimMood(self):
if self.mood.getDominantMood() in PetMood.PetMood.ExcitedMoods:
return AnimMoods.EXCITED
elif self.mood.getDominantMood() in PetMood.PetMood.UnhappyMoods:
return AnimMoods.SAD
else:
return AnimMoods.NEUTRAL
def isExcited(self):
return self.getAnimMood() == AnimMoods.EXCITED
def isSad(self):
return self.getAnimMood() == AnimMoods.SAD
|
DedMemez/ODS-August-2017
|
pets/PetBase.py
|
Python
|
apache-2.0
| 778 | 0.006427 |
#!/usr/bin/env python
import os
import numpy as np
from cereal import car
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET, get_events
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.honda.carstate import CarState, get_can_parser
from selfdrive.car.honda.values import CruiseButtons, CM, BP, AH, CAR, HONDA_BOSCH
from selfdrive.controls.lib.planner import A_ACC_MAX
try:
from selfdrive.car.honda.carcontroller import CarController
except ImportError:
CarController = None
# msgs sent for steering controller by camera module on can 0.
# those messages are mutually exclusive on CRV and non-CRV cars
CAMERA_MSGS = [0xe4, 0x194]
def compute_gb_honda(accel, speed):
creep_brake = 0.0
creep_speed = 2.3
creep_brake_value = 0.15
if speed < creep_speed:
creep_brake = (creep_speed - speed) / creep_speed * creep_brake_value
return float(accel) / 4.8 - creep_brake
def get_compute_gb_acura():
# generate a function that takes in [desired_accel, current_speed] -> [-1.0, 1.0]
# where -1.0 is max brake and 1.0 is max gas
# see debug/dump_accel_from_fiber.py to see how those parameters were generated
w0 = np.array([[ 1.22056961, -0.39625418, 0.67952657],
[ 1.03691769, 0.78210306, -0.41343188]])
b0 = np.array([ 0.01536703, -0.14335321, -0.26932889])
w2 = np.array([[-0.59124422, 0.42899439, 0.38660881],
[ 0.79973811, 0.13178682, 0.08550351],
[-0.15651935, -0.44360259, 0.76910877]])
b2 = np.array([ 0.15624429, 0.02294923, -0.0341086 ])
w4 = np.array([[-0.31521443],
[-0.38626176],
[ 0.52667892]])
b4 = np.array([-0.02922216])
def compute_output(dat, w0, b0, w2, b2, w4, b4):
m0 = np.dot(dat, w0) + b0
m0 = leakyrelu(m0, 0.1)
m2 = np.dot(m0, w2) + b2
m2 = leakyrelu(m2, 0.1)
m4 = np.dot(m2, w4) + b4
return m4
def leakyrelu(x, alpha):
return np.maximum(x, alpha * x)
def _compute_gb_acura(accel, speed):
# linearly extrap below v1 using v1 and v2 data
v1 = 5.
v2 = 10.
dat = np.array([accel, speed])
if speed > 5.:
m4 = compute_output(dat, w0, b0, w2, b2, w4, b4)
else:
dat[1] = v1
m4v1 = compute_output(dat, w0, b0, w2, b2, w4, b4)
dat[1] = v2
m4v2 = compute_output(dat, w0, b0, w2, b2, w4, b4)
m4 = (speed - v1) * (m4v2 - m4v1) / (v2 - v1) + m4v1
return float(m4)
return _compute_gb_acura
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.frame = 0
self.last_enable_pressed = 0
self.last_enable_sent = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.cp = get_can_parser(CP)
# *** init the major players ***
self.CS = CarState(CP)
self.VM = VehicleModel(CP)
# sending if read only is False
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(self.cp.dbc_name, CP.enableCamera)
if self.CS.CP.carFingerprint == CAR.ACURA_ILX:
self.compute_gb = get_compute_gb_acura()
else:
self.compute_gb = compute_gb_honda
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
# limit the pcm accel cmd if:
# - v_ego exceeds v_target, or
# - a_ego exceeds a_target and v_ego is close to v_target
eA = a_ego - a_target
valuesA = [1.0, 0.1]
bpA = [0.3, 1.1]
eV = v_ego - v_target
valuesV = [1.0, 0.1]
bpV = [0.0, 0.5]
valuesRangeV = [1., 0.]
bpRangeV = [-1., 0.]
# only limit if v_ego is close to v_target
speedLimiter = interp(eV, bpV, valuesV)
accelLimiter = max(interp(eA, bpA, valuesA), interp(eV, bpRangeV, valuesRangeV))
# accelOverride is more or less the max throttle allowed to pcm: usually set to a constant
# unless aTargetMax is very high and then we scale with it; this help in quicker restart
return float(max(0.714, a_target / A_ACC_MAX)) * min(speedLimiter, accelLimiter)
@staticmethod
def get_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carName = "honda"
ret.carFingerprint = candidate
if candidate in HONDA_BOSCH:
ret.safetyModel = car.CarParams.SafetyModels.hondaBosch
ret.enableCamera = True
ret.radarOffCan = True
else:
ret.safetyModel = car.CarParams.SafetyModels.honda
ret.enableCamera = not any(x for x in CAMERA_MSGS if x in fingerprint)
ret.enableGasInterceptor = 0x201 in fingerprint
cloudlog.warn("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warn("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.enableCruise = not ret.enableGasInterceptor
# kg of standard extra cargo to count for drive, gas, etc...
std_cargo = 136
# FIXME: hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923 * CV.LB_TO_KG + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 192150
tireStiffnessRear_civic = 202500
# Optimized car params: tire_stiffness_factor and steerRatio are a result of a vehicle
# model optimization process. Certain Hondas have an extra steering sensor at the bottom
# of the steering rack, which improves controls quality as it removes the steering column
# torsion from feedback.
# Tire stiffness factor fictitiously lower if it includes the steering column torsion effect.
# For modeling details, see p.198-200 in "The Science of Vehicle Dynamics (2014), M. Guiggiani"
ret.steerKiBP, ret.steerKpBP = [[0.], [0.]]
ret.steerKf = 0.00006 # conservative feed-forward
if candidate == CAR.CIVIC:
stop_and_go = True
ret.mass = mass_civic
ret.wheelbase = wheelbase_civic
ret.centerToFront = centerToFront_civic
ret.steerRatio = 14.63 # 10.93 is end-to-end spec
tire_stiffness_factor = 1.
# Civic at comma has modified steering FW, so different tuning for the Neo in that car
is_fw_modified = os.getenv("DONGLE_ID") in ['99c94dc769b5d96e']
ret.steerKpV, ret.steerKiV = [[0.33], [0.10]] if is_fw_modified else [[0.8], [0.24]]
if is_fw_modified:
ret.steerKf = 0.00003
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [3.6, 2.4, 1.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.54, 0.36]
elif candidate == CAR.CIVIC_HATCH:
stop_and_go = True
ret.mass = 2916. * CV.LB_TO_KG + std_cargo
ret.wheelbase = wheelbase_civic
ret.centerToFront = centerToFront_civic
ret.steerRatio = 14.63 # 10.93 is spec end-to-end
tire_stiffness_factor = 1.
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACCORD:
stop_and_go = True
ret.safetyParam = 1 # Accord and CRV 5G use an alternate user brake msg
ret.mass = 3279. * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.83
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 15.96 # 11.82 is spec end-to-end
tire_stiffness_factor = 0.8467
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACURA_ILX:
stop_and_go = False
ret.mass = 3095 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.37
ret.steerRatio = 18.61 # 15.3 is spec end-to-end
tire_stiffness_factor = 0.72
# Acura at comma has modified steering FW, so different tuning for the Neo in that car
is_fw_modified = os.getenv("DONGLE_ID") in ['ff83f397542ab647']
ret.steerKpV, ret.steerKiV = [[0.45], [0.00]] if is_fw_modified else [[0.8], [0.24]]
if is_fw_modified:
ret.steerKf = 0.00003
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.CRV:
stop_and_go = False
ret.mass = 3572 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.62
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.3 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.CRV_5G:
stop_and_go = True
ret.safetyParam = 1 # Accord and CRV 5G use an alternate user brake msg
ret.mass = 3410. * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
tire_stiffness_factor = 0.677
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX:
stop_and_go = False
ret.mass = 3935 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.68
ret.centerToFront = ret.wheelbase * 0.38
ret.steerRatio = 15.0 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY:
stop_and_go = False
ret.mass = 4354 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 3.00
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 14.35 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.PILOT:
stop_and_go = False
ret.mass = 4303 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.81
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.38], [0.11]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.RIDGELINE:
stop_and_go = False
ret.mass = 4515 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 3.18
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.59 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.38], [0.11]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
else:
raise ValueError("unsupported car %s" % candidate)
ret.steerControlType = car.CarParams.SteerControlType.torque
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter. Otherwise, add 0.5 mph margin to not
# conflict with PCM acc
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 25.5 * CV.MPH_TO_MS
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = (tireStiffnessFront_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = (tireStiffnessRear_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
# no max steer limit VS speed
ret.steerMaxBP = [0.] # m/s
ret.steerMaxV = [1.] # max steer allowed
ret.gasMaxBP = [0.] # m/s
ret.gasMaxV = [0.6] if ret.enableGasInterceptor else [0.] # max gas allowed
ret.brakeMaxBP = [5., 20.] # m/s
ret.brakeMaxV = [1., 0.8] # max brake allowed
ret.longPidDeadzoneBP = [0.]
ret.longPidDeadzoneV = [0.]
ret.stoppingControl = True
ret.steerLimitAlert = True
ret.startAccel = 0.5
ret.steerActuatorDelay = 0.1
ret.steerRateCost = 0.5
return ret
# returns a car.CarState
def update(self, c):
# ******************* do can recv *******************
canMonoTimes = []
self.cp.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.cp)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.aEgo = self.CS.a_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gas pedal
ret.gas = self.CS.car_gas / 256.0
if not self.CP.enableGasInterceptor:
ret.gasPressed = self.CS.pedal_gas > 0
else:
ret.gasPressed = self.CS.user_gas_pressed
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
# FIXME: read sendcan for brakelights
brakelights_threshold = 0.02 if self.CS.CP.carFingerprint == CAR.CIVIC else 0.1
ret.brakeLights = bool(self.CS.brake_switch or
c.actuators.brake > brakelights_threshold)
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
# gear shifter lever
ret.gearShifter = self.CS.gear_shifter
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringPressed = self.CS.steer_override
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_status != 0
ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = self.CS.cruise_speed_offset
ret.cruiseState.standstill = False
# TODO: button presses
buttonEvents = []
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_buttons != 0:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
be.type = 'accelCruise'
elif but == CruiseButtons.DECEL_SET:
be.type = 'decelCruise'
elif but == CruiseButtons.CANCEL:
be.type = 'cancel'
elif but == CruiseButtons.MAIN:
be.type = 'altButton3'
buttonEvents.append(be)
if self.CS.cruise_setting != self.CS.prev_cruise_setting:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_setting != 0:
be.pressed = True
but = self.CS.cruise_setting
else:
be.pressed = False
but = self.CS.prev_cruise_setting
if but == 1:
be.type = 'altButton1'
# TODO: more buttons?
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
# TODO: I don't like the way capnp does enums
# These strings aren't checked at compile time
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.steer_error:
events.append(create_event('steerUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
elif self.CS.steer_warning:
events.append(create_event('steerTempUnavailable', [ET.WARNING]))
if self.CS.brake_error:
events.append(create_event('brakeUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if not ret.gearShifter == 'drive':
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == 'reverse':
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.brake_hold:
events.append(create_event('brakeHold', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CS.park_brake:
events.append(create_event('parkBrake', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CP.enableCruise and ret.vEgo < self.CP.minEnableSpeed:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
# it can happen that car cruise disables while comma system is enabled: need to
# keep braking if needed or if the speed is very low
if self.CP.enableCruise and not ret.cruiseState.enabled and c.actuators.brake <= 0.:
# non loud alert if cruise disbales below 25mph as expected (+ a little margin)
if ret.vEgo < self.CP.minEnableSpeed + 2.:
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
else:
events.append(create_event("cruiseDisabled", [ET.IMMEDIATE_DISABLE]))
if self.CS.CP.minEnableSpeed > 0 and ret.vEgo < 0.001:
events.append(create_event('manualRestart', [ET.WARNING]))
cur_time = sec_since_boot()
enable_pressed = False
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in ["accelCruise", "decelCruise"] and not b.pressed:
self.last_enable_pressed = cur_time
enable_pressed = True
# do disable on button down
if b.type == "cancel" and b.pressed:
events.append(create_event('buttonCancel', [ET.USER_DISABLE]))
if self.CP.enableCruise:
# KEEP THIS EVENT LAST! send enable event if button is pressed and there are
# NO_ENTRY events, so controlsd will display alerts. Also not send enable events
# too close in time, so a no_entry will not be followed by another one.
# TODO: button press should be the only thing that triggers enble
if ((cur_time - self.last_enable_pressed) < 0.2 and
(cur_time - self.last_enable_sent) > 0.2 and
ret.cruiseState.enabled) or \
(enable_pressed and get_events(events, [ET.NO_ENTRY])):
events.append(create_event('buttonEnable', [ET.ENABLE]))
self.last_enable_sent = cur_time
elif enable_pressed:
events.append(create_event('buttonEnable', [ET.ENABLE]))
ret.events = events
ret.canMonoTimes = canMonoTimes
# update previous brake/gas pressed
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
# cast to reader so it can't be modified
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if c.hudControl.speedVisible:
hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH
else:
hud_v_cruise = 255
hud_alert = {
"none": AH.NONE,
"fcw": AH.FCW,
"steerRequired": AH.STEER,
"brakePressed": AH.BRAKE_PRESSED,
"wrongGear": AH.GEAR_NOT_D,
"seatbeltUnbuckled": AH.SEATBELT,
"speedTooHigh": AH.SPEED_TOO_HIGH}[str(c.hudControl.visualAlert)]
snd_beep, snd_chime = {
"none": (BP.MUTE, CM.MUTE),
"beepSingle": (BP.SINGLE, CM.MUTE),
"beepTriple": (BP.TRIPLE, CM.MUTE),
"beepRepeated": (BP.REPEATED, CM.MUTE),
"chimeSingle": (BP.MUTE, CM.SINGLE),
"chimeDouble": (BP.MUTE, CM.DOUBLE),
"chimeRepeated": (BP.MUTE, CM.REPEATED),
"chimeContinuous": (BP.MUTE, CM.CONTINUOUS)}[str(c.hudControl.audibleAlert)]
pcm_accel = int(clip(c.cruiseControl.accelOverride,0,1)*0xc6)
self.CC.update(self.sendcan, c.enabled, self.CS, self.frame, \
c.actuators, \
c.cruiseControl.speedOverride, \
c.cruiseControl.override, \
c.cruiseControl.cancel, \
pcm_accel, \
hud_v_cruise, c.hudControl.lanesVisible, \
hud_show_car = c.hudControl.leadVisible, \
hud_alert = hud_alert, \
snd_beep = snd_beep, \
snd_chime = snd_chime)
self.frame += 1
|
TheMutley/openpilot
|
selfdrive/car/honda/interface.py
|
Python
|
mit
| 23,054 | 0.013794 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
__all__ = []
# imports
import sys
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.util import *
from pysollib.stack import *
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import AbstractHint, DefaultHint, CautiousDefaultHint, Yukon_Hint
from pysollib.games.gypsy import Gypsy
# ************************************************************************
# * Sanibel
# * play similar to Yukon
# ************************************************************************
class Sanibel(Gypsy):
Layout_Method = Layout.klondikeLayout
Talon_Class = StackWrapper(WasteTalonStack, max_rounds=1)
Foundation_Class = StackWrapper(SS_FoundationStack, max_move=0)
RowStack_Class = Yukon_AC_RowStack
Hint_Class = Yukon_Hint
def createGame(self):
Gypsy.createGame(self, rows=10, waste=1, playcards=23)
def startGame(self):
for i in range(3):
self.s.talon.dealRow(flip=0, frames=0)
for i in range(6):
self.s.talon.dealRow(frames=0)
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
def getHighlightPilesStacks(self):
return ()
registerGame(GameInfo(201, Sanibel, "Sanibel",
GI.GT_YUKON | GI.GT_CONTRIB | GI.GT_ORIGINAL, 2, 0, GI.SL_MOSTLY_SKILL))
|
TrevorLowing/PyGames
|
pysollib/games/sanibel.py
|
Python
|
gpl-2.0
| 2,437 | 0.007386 |
import tty
import sys
import termios
fd = sys.stdin.fileno()
fdattrorig = termios.tcgetattr(fd)
try:
tty.setraw(fd)
done = False
while not done:
ch = sys.stdin.read(1)
sys.stdout.write('test: %s\r\n' % ord(ch))
if ord(ch) == 27:
ch = sys.stdin.read(1)
sys.stdout.write('esc: %s\r\n' % ord(ch))
if ord(ch) == 3:
done = True
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, fdattrorig)
|
coreyabshire/marv
|
bin/experiments/key_test.py
|
Python
|
mit
| 472 | 0.002119 |
from collections import namedtuple
class HitsChecker:
REJECTED = -1
AMBIGUOUS = -2
CIGAR_GOOD = 0
CIGAR_LESS_GOOD = 1
CIGAR_FAIL = 2
CIGAR_OP_MATCH = 0 # From pysam
CIGAR_OP_REF_INSERTION = 1 # From pysam
CIGAR_OP_REF_DELETION = 2 # From pysam
CIGAR_OP_REF_SKIP = 3 # From pysam
ThresholdData = namedtuple(
'ThresholdData',
['index', 'violated', 'multimaps', 'mismatches', 'cigar_check'])
def __init__(self, mismatch_thresh, minmatch_thresh, multimap_thresh,
reject_multimaps, logger):
self.logger = logger
self.mismatch_thresh = mismatch_thresh / 100.0
self.minmatch_thresh = minmatch_thresh / 100.0
self.multimap_thresh = multimap_thresh
self._assign_hits = self._assign_hits_reject_multimaps \
if reject_multimaps else self._assign_hits_standard
logger.debug(("PARAMS: mismatch - {mism}, minmatch - {minm}, " +
"multimap - {mult}").format(
mism=self.mismatch_thresh,
minm=self.minmatch_thresh,
mult=self.multimap_thresh))
def compare_and_write_hits(self, hits_managers):
# Compare the hits for a particular read in each species and decide whether
# the read can be assigned to one species or another, or if it must be
# rejected as ambiguous
for m in hits_managers:
m.update_hits_info()
threshold_data = [self._check_thresholds(i, m) for i, m
in enumerate(hits_managers)]
if __debug__:
for t in threshold_data:
self.logger.debug(t)
assignee = self._assign_hits(threshold_data)
if assignee == self.REJECTED:
for hits_manager in hits_managers:
hits_manager.add_rejected_hits_to_stats()
elif assignee == self.AMBIGUOUS:
for hits_manager in hits_managers:
hits_manager.add_ambiguous_hits_to_stats()
else:
for i, hits_manager in enumerate(hits_managers):
if i == assignee:
hits_manager.add_accepted_hits_to_stats()
hits_manager.write_hits()
# self.check_and_write_hits_for_read(hits_manager)
else:
hits_manager.add_rejected_hits_to_stats()
for hits_manager in hits_managers:
hits_manager.clear_hits()
def check_and_write_hits_for_read(self, hits_manager):
if hits_manager.hits_info is None:
hits_manager.update_hits_info()
if self.check_hits(hits_manager.hits_info):
hits_manager.add_accepted_hits_to_stats()
hits_manager.write_hits()
else:
hits_manager.add_rejected_hits_to_stats()
hits_manager.clear_hits()
def check_and_write_hits_for_remaining_reads(self, hits_manager):
try:
while True:
if hits_manager.hits_for_read is None:
hits_manager.get_next_read_hits()
self.check_and_write_hits_for_read(hits_manager)
except StopIteration:
pass
def check_hits(self, hits_info):
# check that the hits for a read are - in themselves - satisfactory to
# be assigned to a species.
violated = False
if hits_info.get_multimaps() > self.multimap_thresh:
violated = True
if __debug__:
self.logger.debug(
'only one competing hits manager but violated multimap.')
if hits_info.get_primary_mismatches() > \
round(self.mismatch_thresh * hits_info.get_total_length()):
violated = True
if __debug__:
self.logger.debug(
'only one competing hits manager but violated primary mismatches.')
if self._check_cigars(hits_info) == self.CIGAR_FAIL:
violated = True
if __debug__:
self.logger.debug(
'only one competing hits manager but violated primary CIGAR.')
if __debug__:
if not violated:
self.logger.debug('assigned due to only one competing filterer!')
return not violated
def _assign_hits_standard(self, threshold_data):
threshold_data = [t for t in threshold_data if not t.violated]
num_hits_managers = len(threshold_data)
if num_hits_managers == 0:
return self.REJECTED
elif num_hits_managers == 1:
if __debug__:
self.logger.debug('assigned due to only one filter left after checking threshold!')
return threshold_data[0].index
min_mismatches = min([m.mismatches for m in threshold_data])
threshold_data = [t for t in threshold_data
if t.mismatches == min_mismatches]
if len(threshold_data) == 1:
if __debug__:
self.logger.debug('assigne due to primary hit min_mismatches!')
return threshold_data[0].index
min_cigar_check = min([m.cigar_check for m in threshold_data])
threshold_data = [t for t in threshold_data
if t.cigar_check == min_cigar_check]
if len(threshold_data) == 1:
if __debug__:
self.logger.debug('assigne due to primart hit CIGAR!')
return threshold_data[0].index
min_multimaps = min([m.multimaps for m in threshold_data])
threshold_data = [t for t in threshold_data
if t.multimaps == min_multimaps]
if len(threshold_data) == 1:
# # todo remove debug multimap
if __debug__:
self.logger.debug('assigned due to number of multimap!')
return threshold_data[0].index
if __debug__:
self.logger.debug('assigned due to Ambigous!')
return self.AMBIGUOUS
def _assign_hits_reject_multimaps(self, threshold_data):
if len([t for t in threshold_data if t.multimaps > 1]) > 0:
return self.REJECTED
return self._assign_hits_standard(threshold_data)
def _check_thresholds(self, index, hits_manager):
hits_info = hits_manager.hits_info
violated = False
multimaps = hits_info.get_multimaps()
if multimaps > self.multimap_thresh:
# # todo remove debug multimap
if __debug__:
self.logger.debug('violated due to multimap!')
violated = True
mismatches = hits_info.get_primary_mismatches()
if mismatches > round(self.mismatch_thresh *
hits_info.get_total_length()):
if __debug__:
self.logger.debug('violated due to primary mismatches!')
violated = True
cigar_check = self._check_cigars(hits_info)
if cigar_check == self.CIGAR_FAIL:
if __debug__:
self.logger.debug('violated due to primary CIGAR!')
violated = True
return self.ThresholdData(
index, violated, multimaps, mismatches, cigar_check)
def _check_cigars(self, hits_info):
total_length = hits_info.get_total_length()
min_match = total_length - round(self.minmatch_thresh * total_length)
cigars = hits_info.get_primary_cigars()
response = self.CIGAR_GOOD
num_matches = 0
for cigar in cigars:
for operation, length in cigar:
if operation == self.CIGAR_OP_MATCH:
num_matches += length
elif operation == self.CIGAR_OP_REF_INSERTION or \
operation == self.CIGAR_OP_REF_DELETION:
response = self.CIGAR_LESS_GOOD
if num_matches < min_match:
return self.CIGAR_FAIL
elif num_matches < total_length:
return self.CIGAR_LESS_GOOD
return response
|
statbio/Sargasso
|
sargasso/filter/hits_checker.py
|
Python
|
mit
| 8,017 | 0.000624 |
#!/usr/bin/env python3
import os
import shutil
import subprocess
import sys
if os.environ.get('DESTDIR'):
install_root = os.environ.get('DESTDIR') + os.path.abspath(sys.argv[1])
else:
install_root = sys.argv[1]
if not os.environ.get('DESTDIR'):
schemadir = os.path.join(install_root, 'glib-2.0', 'schemas')
print('Compile gsettings schemas...')
subprocess.call(['glib-compile-schemas', schemadir])
# FIXME: Meson is unable to copy a generated target file:
# https://groups.google.com/forum/#!topic/mesonbuild/3iIoYPrN4P0
dst_dir = os.path.join(install_root, 'wayland-sessions')
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
src = os.path.join(install_root, 'xsessions', 'gnome.desktop')
dst = os.path.join(dst_dir, 'gnome.desktop')
shutil.copyfile(src, dst)
|
GNOME/gnome-session
|
meson_post_install.py
|
Python
|
gpl-2.0
| 789 | 0.007605 |
####################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import os
import subprocess
####################################################################################################
def file_name_has_extension(file_name, extension):
return file_name.endswith(extension)
####################################################################################################
def file_extension(filename):
# index = filename.rfind(os.path.extsep)
# if index == -1:
# return None
# else:
# return filename[index:]
return os.path.splitext(filename)[1]
####################################################################################################
def run_shasum(filename, algorithm=1, text=False, binary=False, portable=False):
if algorithm not in (1, 224, 256, 384, 512, 512224, 512256):
raise ValueError
args = ['shasum', '--algorithm=' + str(algorithm)]
if text:
args.append('--text')
elif binary:
args.append('--binary')
elif portable:
args.append('--portable')
args.append(filename)
output = subprocess.check_output(args)
shasum = output[:output.find(' ')]
return shasum
####################################################################################################
class Path:
##############################################
def __init__(self, path):
self._path = str(path)
##############################################
def __bool__(self):
return os.path.exists(self._path)
##############################################
def __str__(self):
return self._path
##############################################
@property
def path(self):
return self._path
##############################################
def is_absolut(self):
return os.path.isabs(self._path)
##############################################
def absolut(self):
return self.clone_for_path(os.path.abspath(self._path))
##############################################
def normalise(self):
return self.clone_for_path(os.path.normpath(self._path))
##############################################
def normalise_case(self):
return self.clone_for_path(os.path.normcase(self._path))
##############################################
def expand_vars_and_user(self):
return self.clone_for_path(os.path.expandvars(os.path.expanduser(self._path)))
##############################################
def real_path(self):
return self.clone_for_path(os.path.realpath(self._path))
##############################################
def relative_to(self, directory):
return self.clone_for_path(os.path.relpath(self._path, str(directory)))
##############################################
def clone_for_path(self, path):
return self.__class__(path)
##############################################
def split(self):
return self._path.split(os.path.sep)
##############################################
def directory_part(self):
return Directory(os.path.dirname(self._path))
##############################################
def filename_part(self):
return os.path.basename(self._path)
##############################################
def is_directory(self):
return os.path.isdir(self._path)
##############################################
def is_file(self):
return os.path.isfile(self._path)
##############################################
@property
def inode(self):
return os.stat(self._path).st_ino
##############################################
@property
def creation_time(self):
return os.stat(self._path).st_ctime
####################################################################################################
class Directory(Path):
##############################################
def __bool__(self):
return super().__nonzero__() and self.is_directory()
##############################################
def join_directory(self, directory):
return self.__class__(os.path.join(self._path, str(directory)))
##############################################
def join_filename(self, filename):
return File(filename, self._path)
##############################################
def iter_file(self, followlinks=False):
for root, directories, files in os.walk(self._path, followlinks=followlinks):
for filename in files:
yield File(filename, root)
##############################################
def iter_directories(self, followlinks=False):
for root, directories, files in os.walk(self._path, followlinks=followlinks):
for directory in directories:
yield Path(os.path.join(root, directory))
####################################################################################################
class File(Path):
default_shasum_algorithm = 256
##############################################
def __init__(self, filename, path=''):
super().__init__(os.path.join(str(path), str(filename)))
self._filename = self.filename_part()
if not self._filename:
raise ValueError
self._directory = self.directory_part()
self._shasum = None # lazy computation
##############################################
def __bool__(self):
return super().__nonzero__() and os.path.isfile(self._path)
##############################################
@property
def directory(self):
return self._directory
##############################################
@property
def filename(self):
return self._filename
##############################################
@property
def extension(self):
return file_extension(self._filename)
##############################################
@property
def shasum(self):
if self._shasum is None:
return self.compute_shasum()
else:
return self._shasum
##############################################
def compute_shasum(self, algorithm=None):
if algorithm is None:
algorithm = self.default_shasum_algorithm
self._shasum = run_shasum(self._path, algorithm, portable=True)
return self._shasum
####################################################################################################
#
# End
#
####################################################################################################
|
thomaslima/PySpice
|
PySpice/Tools/File.py
|
Python
|
gpl-3.0
| 7,812 | 0.005248 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
"""
LANG_INFO is a dictionary structure to provide meta information about languages.
About name_local: capitalize it as if your language name was appearing
inside a sentence in your language.
The 'fallback' key can be used to specify a special fallback logic which doesn't
follow the traditional 'fr-ca' -> 'fr' fallback logic.
"""
LANG_INFO = {
'af': {
'bidi': False,
'code': 'af',
'name': 'Afrikaans',
'name_local': 'Afrikaans',
},
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': 'العربيّة',
},
'ast': {
'bidi': False,
'code': 'ast',
'name': 'Asturian',
'name_local': 'asturianu',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'Azərbaycanca',
},
'be': {
'bidi': False,
'code': 'be',
'name': 'Belarusian',
'name_local': 'беларуская',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': 'български',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': 'বাংলা',
},
'br': {
'bidi': False,
'code': 'br',
'name': 'Breton',
'name_local': 'brezhoneg',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'català',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': 'česky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': 'Ελληνικά',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-au': {
'bidi': False,
'code': 'en-au',
'name': 'Australian English',
'name_local': 'Australian English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'español',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'español de Argentina',
},
'es-co': {
'bidi': False,
'code': 'es-co',
'name': 'Colombian Spanish',
'name_local': 'español de Colombia',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'español de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'español de Nicaragua',
},
'es-ve': {
'bidi': False,
'code': 'es-ve',
'name': 'Venezuelan Spanish',
'name_local': 'español de Venezuela',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': 'فارسی',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'français',
},
'fy': {
'bidi': False,
'code': 'fy',
'name': 'Frisian',
'name_local': 'frysk',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gd': {
'bidi': False,
'code': 'gd',
'name': 'Scottish Gaelic',
'name_local': 'Gàidhlig',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': 'עברית',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'ia': {
'bidi': False,
'code': 'ia',
'name': 'Interlingua',
'name_local': 'Interlingua',
},
'io': {
'bidi': False,
'code': 'io',
'name': 'Ido',
'name_local': 'ido',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': 'Íslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '日本語',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': 'ქართული',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': 'Қазақ',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '한국어',
},
'lb': {
'bidi': False,
'code': 'lb',
'name': 'Luxembourgish',
'name_local': 'Lëtzebuergesch',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lietuviškai',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latviešu',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': 'Македонски',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'mr': {
'bidi': False,
'code': 'mr',
'name': 'Marathi',
'name_local': 'मराठी',
},
'my': {
'bidi': False,
'code': 'my',
'name': 'Burmese',
'name_local': 'မြန်မာဘာသာ',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'norsk (bokmål)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': 'नेपाली',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'norsk',
},
'os': {
'bidi': False,
'code': 'os',
'name': 'Ossetic',
'name_local': 'Ирон',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Português',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Português Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Română',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': 'Русский',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'Slovensky',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Slovenščina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'shqip',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': 'српски',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': 'தமிழ்',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': 'తెలుగు',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'ภาษาไทย',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'Türkçe',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': 'Татарча',
},
'udm': {
'bidi': False,
'code': 'udm',
'name': 'Udmurt',
'name_local': 'Удмурт',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': 'Українська',
},
'ur': {
'bidi': True,
'code': 'ur',
'name': 'Urdu',
'name_local': 'اردو',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Tiếng Việt',
},
'zh-cn': {
'fallback': ['zh-hans'],
},
'zh-hans': {
'bidi': False,
'code': 'zh-hans',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hant': {
'bidi': False,
'code': 'zh-hant',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
'zh-hk': {
'fallback': ['zh-hant'],
},
'zh-mo': {
'fallback': ['zh-hant'],
},
'zh-my': {
'fallback': ['zh-hans'],
},
'zh-sg': {
'fallback': ['zh-hans'],
},
'zh-tw': {
'fallback': ['zh-hant'],
},
}
|
yephper/django
|
django/conf/locale/__init__.py
|
Python
|
bsd-3-clause
| 12,721 | 0.000161 |
# Author: Seamus Wassman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
# This file was adapted for MoreThanTV from the freshontv scraper by
# Sparhawk76, this is my first foray into python, so there most likely
# are some mistakes or things I could have done better.
import re
import requests
import traceback
import logging
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.exceptions import AuthException
class MoreThanTVProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "MoreThanTV")
self.supportsBacklog = True
self._uid = None
self._hash = None
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
# self.freeleech = False
self.urls = {'base_url': 'https://www.morethan.tv/',
'login': 'https://www.morethan.tv/login.php',
'detail': 'https://www.morethan.tv/torrents.php?id=%s',
'search': 'https://www.morethan.tv/torrents.php?tags_type=1&order_by=time&order_way=desc&action=basic&searchsubmit=1&searchstr=%s',
'download': 'https://www.morethan.tv/torrents.php?action=download&id=%s'}
self.url = self.urls[b'base_url']
self.cookies = None
self.proper_strings = ['PROPER', 'REPACK']
self.cache = MoreThanTVCache(self)
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
if self._uid and self._hash:
requests.utils.add_dict_to_cookiejar(self.session.cookies, self.cookies)
else:
login_params = {'username': self.username,
'password': self.password,
'login': 'Log in',
'keeplogged': '1'}
response = self.getURL(self.urls[b'login'], post_data=login_params, timeout=30)
if not response:
logging.warning("Unable to connect to provider")
return False
if re.search('Your username or password was incorrect.', response):
logging.warning("Invalid username or password. Check your settings")
return False
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
# freeleech = '3' if self.freeleech else '0'
if not self._doLogin():
return results
for mode in search_params.keys():
logging.debug("Search Mode: %s" % mode)
for search_string in search_params[mode]:
if mode is not 'RSS':
logging.debug("Search string: %s " % search_string)
searchURL = self.urls[b'search'] % (search_string.replace('(', '').replace(')', ''))
logging.debug("Search URL: %s" % searchURL)
# returns top 15 results by default, expandable in user profile to 100
data = self.getURL(searchURL)
if not data:
continue
try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
torrent_table = html.find('table', attrs={'class': 'torrent_table'})
torrent_rows = torrent_table.findChildren('tr') if torrent_table else []
# Continue only if one Release is found
if len(torrent_rows) < 2:
logging.debug("Data returned from provider does not contain any torrents")
continue
# skip colheader
for result in torrent_rows[1:]:
cells = result.findChildren('td')
link = cells[1].find('a', attrs={'title': 'Download'})
# skip if torrent has been nuked due to poor quality
if cells[1].find('img', alt='Nuked') != None:
continue
torrent_id_long = link[b'href'].replace('torrents.php?action=download&id=', '')
try:
if link.has_key('title'):
title = cells[1].find('a', {'title': 'View torrent'}).contents[0].strip()
else:
title = link.contents[0]
download_url = self.urls[b'download'] % (torrent_id_long)
seeders = cells[6].contents[0]
leechers = cells[7].contents[0]
size = -1
if re.match(r'\d+([,\.]\d+)?\s*[KkMmGgTt]?[Bb]', cells[4].contents[0]):
size = self._convertSize(cells[4].text.strip())
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logging.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers))
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logging.debug("Found result: %s " % title)
items[mode].append(item)
except Exception as e:
logging.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def _convertSize(self, sizeString):
size = sizeString[:-2].strip()
modifier = sizeString[-2:].upper()
try:
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024 ** 2
elif modifier in 'GB':
size = size * 1024 ** 3
elif modifier in 'TB':
size = size * 1024 ** 4
except Exception:
size = -1
return int(size)
class MoreThanTVCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# poll delay in minutes
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = MoreThanTVProvider()
|
mcus/SickRage
|
sickbeard/providers/morethantv.py
|
Python
|
gpl-3.0
| 8,361 | 0.00311 |
import numpy
def doPCA(data, dim):
data = makeDataMatrix(data)
means = getMeanVector(data)
data = normalizeData(data, means)
cov = getCov(data)
eigvals, eigvecs = getEigs(cov)
principalComponents = sortEigs(eigvals, eigvecs)
return getDimensions(dim, principalComponents)
def getDimensions(d, pc):
if d <= len(pc):
result = numpy.zeros((d, len(pc[0])))
for i in range(d):
result[i] = pc[:,i]
return result
else: return None
def sortEigs(vals, vecs):
result = numpy.zeros((len(vecs), len(vecs[0])))
#selection sort because vals is short for now so it should be fast enough
lastMax = float("inf")
for i in range(len(vals)):
currentMax = float("-inf")
currentInd = -1
for j in range(len(vals)):
if vals[j] > currentMax and vals[j] < lastMax:
currentMax = vals[j]
currentInd = j
if currentInd != -1:
result[i] = vecs[currentInd]
lastMax = currentMax
return result
def getEigs(cov):
return numpy.linalg.eig(cov)
def getCov(data):
return numpy.cov(data)
def getMeanVector(data):
result = numpy.zeros(len(data))
for i in range(len(data)):
result[i] = numpy.mean(data[i,:])
return result
def normalizeData(data, means):
result = numpy.zeros((len(data), len(data[0])))
for i in range(len(data)):
result[i] = data[i,:] - means[i]
return result
def makeDataMatrix(data):
return numpy.transpose(data)
|
hakuliu/inf552
|
hw3/pca.py
|
Python
|
apache-2.0
| 1,541 | 0.008436 |
from polybori import BooleSet, interpolate_smallest_lex
class PartialFunction(object):
"""docstring for PartialFunction"""
def __init__(self, zeros, ones):
super(PartialFunction, self).__init__()
self.zeros = zeros.set()
self.ones = ones.set()
def interpolate_smallest_lex(self):
return interpolate_smallest_lex(self.zeros, self.ones)
def __str__(self):
return "PartialFunction(zeros=" + str(self.zeros) + ", ones=" + str(
self.ones) + ")"
def definedOn(self):
return self.zeros.union(self.ones)
def __add__(self, other):
domain = self.definedOn().intersect(other.definedOn())
zeros = self.zeros.intersect(other.zeros).union(self.ones.intersect(
other.ones))
ones = self.zeros.intersect(other.ones).union(self.ones.intersect(
other.zeros))
assert zeros.diff(domain).empty()
assert ones.diff(domain).empty()
return PartialFunction(zeros, ones)
def __repr__(self):
return str(self)
def __mul__(self, other):
zeros = self.zeros.union(other.zeros)
ones = self.ones.intersect(other.ones)
return PartialFunction(zeros, ones)
def __or__(self, other):
zeros = self.zeros.intersect(other.zeros)
ones = self.ones.union(other.ones)
return PartialFunction(zeros, ones)
def __xor__(self, other):
return self + other
def __and__(self, other):
return self * other
|
ohanar/PolyBoRi
|
pyroot/polybori/partial.py
|
Python
|
gpl-2.0
| 1,509 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Example program to receive packets from the radio link
#
import virtGPIO as GPIO
from lib_nrf24 import NRF24
import time
pipes = [[0xe7, 0xe7, 0xe7, 0xe7, 0xe7], [0xc2, 0xc2, 0xc2, 0xc2, 0xc2]]
radio2 = NRF24(GPIO, GPIO.SpiDev())
radio2.begin(9, 7)
radio2.setRetries(15,15)
radio2.setPayloadSize(32)
radio2.setChannel(0x60)
radio2.setDataRate(NRF24.BR_2MBPS)
radio2.setPALevel(NRF24.PA_MIN)
radio2.setAutoAck(True)
radio2.enableDynamicPayloads()
radio2.enableAckPayload()
radio2.openWritingPipe(pipes[0])
radio2.openReadingPipe(1, pipes[1])
radio2.startListening()
radio2.stopListening()
radio2.printDetails()
radio2.startListening()
c=1
while True:
akpl_buf = [c,1, 2, 3,4,5,6,7,8,9,0,1, 2, 3,4,5,6,7,8]
pipe = [0]
while not radio2.available(pipe):
time.sleep(10000/1000000.0)
recv_buffer = []
radio2.read(recv_buffer, radio2.getDynamicPayloadSize())
print ("Received:") ,
print (recv_buffer)
c = c + 1
if (c&1) == 0:
radio2.writeAckPayload(1, akpl_buf, len(akpl_buf))
print ("Loaded payload reply:"),
print (akpl_buf)
else:
print ("(No return payload)")
|
CarlosPena00/Mobbi
|
Rasp/nrf/lib_nrf24/example-nrf24-recv.py
|
Python
|
mit
| 1,196 | 0.020067 |
#!/usr/bin/env python
import sys
import os
import tempfile
import glob
import filecmp
import time
from argparse import ArgumentParser
usage = "usage: %prog [options] program_to_test"
parser = ArgumentParser(description="""Testrunner for programming puzzles, runs a program against each
.in-file and checks the output against the corresponding .out-file using unix diff""")
parser.add_argument("-v", "--verbose", action="store_true", help="Be verbose", required=False, default=False)
parser.add_argument("-e", "--executor", dest="executor", default="", help="Execute the program with this executor (ex: java or python)")
parser.add_argument("-d", "--directory", dest="directory", default="", help="""The directory where test files with extensions .in
and .ans can be found (default is a a folder named test placed as
a subfolder to the folder where the program is located)""")
parser.add_argument("program")
args = parser.parse_args()
program = args.program
if program[0] != '.':
program = "./" + program
f = open(program)
program_path = os.path.dirname(program)
if args.directory:
test_search_path = "%s/*.in" % args.directory
else:
test_search_path = "%s/test/*.in" % program_path
success = True
tests_found = False
try:
for test_file in glob.glob(test_search_path):
tests_found = True
start = time.time()
os.system(args.executor + " " + program + "<" + test_file + " > answer.tmp")
end = time.time()
test_exp_file = test_file.replace(".in", ".ans")
if not filecmp.cmp(test_exp_file, "answer.tmp"):
success = False
print(test_file + ", FAILED")
elif args.verbose:
print(test_file + ", succes")
if args.verbose:
print(test_file + ", execution time = " + str(end - start))
finally:
if os.path.isfile("answer.tmp"):
os.remove("answer.tmp")
if not tests_found:
print("No test files found")
elif success:
print("Success")
else:
print("Failed (%s)" % program)
|
plilja/algolib
|
util/checksol.py
|
Python
|
apache-2.0
| 2,044 | 0.005382 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(r'^' + settings.ADMIN_URL, include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# User management
url(r'^users/', include("therapyinvoicing.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^customers/', include("therapyinvoicing.customers.urls", namespace="customers")),
url(r'^customerinvoicing/', include("therapyinvoicing.customerinvoicing.urls", namespace="customerinvoicing")),
url(r'^kelainvoicing/', include("therapyinvoicing.kelainvoicing.urls", namespace="kelainvoicing")),
url(r'^api/', include("therapyinvoicing.api.urls", namespace="api")),
url(r'^reporting/', include("therapyinvoicing.reporting.urls", namespace="reporting")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
ylitormatech/terapialaskutus
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,837 | 0.003266 |
# -*- coding: utf-8 -*-
#
# davos documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 29 08:01:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'davos'
copyright = u'2017, Josh Stark'
author = u'Josh Stark'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.2'
# The full version, including alpha/beta/rc tags.
release = u'2.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'davosdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'davos.tex', u'davos Documentation',
u'Josh Stark', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'davos', u'davos Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'davos', u'davos Documentation',
author, 'davos', 'One line description of project.',
'Miscellaneous'),
]
|
linuxserver/davos
|
docs/source/conf.py
|
Python
|
mit
| 5,191 | 0.000385 |
#!/usr/bin/env python
import os
import setuptools
def _clean_line(line):
line = line.strip()
line = line.split("#")[0]
line = line.strip()
return line
def read_requires(base):
path = os.path.join('tools', base)
requires = []
if not os.path.isfile(path):
return requires
with open(path, 'rb') as h:
for line in h.read().splitlines():
line = _clean_line(line)
if not line:
continue
requires.append(line)
return requires
setuptools.setup(
name='taskflow',
version='0.0.1',
author='OpenStack',
license='Apache Software License',
description='Taskflow structured state management library.',
long_description='The taskflow library provides core functionality that '
'can be used to build [resumable, reliable, '
'easily understandable, ...] highly available '
'systems which process workflows in a structured manner.',
author_email='openstack-dev@lists.openstack.org',
url='http://www.openstack.org/',
packages=setuptools.find_packages(),
tests_require=read_requires('test-requires'),
install_requires=read_requires('pip-requires'),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6', ],
)
|
JohnGarbutt/taskflow-1
|
setup.py
|
Python
|
apache-2.0
| 1,467 | 0 |
def es_vocal(letra):
if letra in 'aeiou':
return True
else:
return False
def contar_vocales_y_consonantes(palabra):
cuenta_vocal = 0
cuenta_consonante = 0
for letra in palabra:
if es_vocal(letra):
cuenta_vocal += 1
else:
cuenta_consonante += 1
return (cuenta_vocal, cuenta_consonante)
palabra = raw_input("Ingrese palabra: ")
vocal, consonante = contar_vocales_y_consonantes(palabra)
print "Tiene", vocal, "vocales y", consonante, "consonantes"
|
csaldias/python-usm
|
Ejercicios progra.usm.cl/Parte 2/7- Procesamiento de Texto/vocales_consonantes.py
|
Python
|
mit
| 466 | 0.032189 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Logging-like module for creating artifacts.
In order to actually create artifacts, RegisterArtifactImplementation must be
called from somewhere with an artifact implementation to use, otherwise
CreateArtifact will just end up logging the first 100 characters of the given
data.
This registration is automatically handled in tests that use Telemetry or typ as
their test runner, so it should only really need to be used if you are adding a
new test runner type.
Example usage:
# During test setup.
artifact_logger.RegisterArtifactImplementation(self.results)
# At any point in the test afterwards, from any module.
artifact_logger.CreateArtifact('some/crash/stack.txt', GetStackTrace())
"""
import datetime
from telemetry.internal.results import (
artifact_compatibility_wrapper as artifact_wrapper)
artifact_impl = artifact_wrapper.ArtifactCompatibilityWrapperFactory(None)
def CreateArtifact(name, data):
"""Create an artifact with the given data.
Args:
name: The name of the artifact, can include '/' to organize artifacts
within a hierarchy.
data: The data to write to the artifact.
"""
artifact_impl.CreateArtifact(name, data)
def RegisterArtifactImplementation(artifact_implementation):
"""Register the artifact implementation used to log future artifacts.
Args:
artifact_implementation: The artifact implementation to use for future
artifact creations. Must be supported in
artifact_compatibility_wrapper.ArtifactCompatibilityWrapperFactory.
"""
global artifact_impl # pylint: disable=global-statement
artifact_impl = artifact_wrapper.ArtifactCompatibilityWrapperFactory(
artifact_implementation)
def GetTimestampSuffix():
"""Gets the current time as a human-readable string.
The returned value is suitable to use as a suffix for avoiding artifact name
collision across different tests.
"""
# Format is YYYY-MM-DD-HH-MM-SS-microseconds. The microseconds are to prevent
# name collision if two artifacts with the same name are created in close
# succession.
return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
|
endlessm/chromium-browser
|
third_party/catapult/telemetry/telemetry/internal/results/artifact_logger.py
|
Python
|
bsd-3-clause
| 2,300 | 0.004783 |
import atexit
connection = None
connection_function = None
reconnect_function = None
hooks = None
def set_connection_function(_connection_function):
global connection
global connection_function
connection_function = _connection_function
connection = connection_function()
def disconnect():
global connection
try:
connection.close()
except:
pass
def set_on_reconnect(_reconnect_function):
global reconnect_function
reconnect_function = _reconnect_function
def set_hooks(_hooks):
global hooks
hooks = _hooks
def reconnect():
global connection
global connection_function
global reconnect_function
connection = connection_function()
print("***********RECONNECTING DATABASE************")
reconnect_function(connection)
if hooks is not None:
for hook in hooks:
hook()
atexit.register(disconnect)
|
Dark-Bob/mro
|
mro/connection.py
|
Python
|
mit
| 910 | 0.002198 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C)
# 2004-2011: Pexego Sistemas Informáticos. (http://pexego.es)
# 2013: Top Consultant Software Creations S.L.
# (http://www.topconsultant.es/)
# 2014: Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# Autores originales: Luis Manuel Angueira Blanco (Pexego)
# Omar Castiñeira Saavedra(omar@pexego.es)
# Migración OpenERP 7.0: Ignacio Martínez y Miguel López.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, exceptions, _
class Mod349ExportToBoe(models.TransientModel):
_inherit = "l10n.es.aeat.report.export_to_boe"
_name = "l10n.es.aeat.mod349.export_to_boe"
_description = "Export AEAT Model 349 to BOE format"
@api.multi
def _get_company_name_with_title(self, company_obj):
"""Returns company name with title."""
if company_obj.partner_id and company_obj.partner_id.title:
return company_obj.name + ' ' + \
company_obj.partner_id.title.name.capitalize()
return company_obj.name
@api.multi
def _get_formatted_declaration_record(self, report):
"""Returns a type 1, declaration/company, formated record.
· All amounts must be positives
· Numeric fields with no data must be filled with zeros
· Alfanumeric/Alfabetic fields with no data must be filled with
empty spaces
· Numeric fields must be right aligned and filled with zeros on
the left
· Alfanumeric/Alfabetic fields must be uppercase left aligned,
filled with empty spaces on right side. No special characters
allowed unless specified in field description
Format of the record:
Tipo registro 1 – Registro de declarante:
Posiciones Naturaleza Descripción
1 Numérico Tipo de Registro Constante = '1'
2-4 Numérico Modelo Declaración Constante = '349'
5-8 Numérico Ejercicio
9-17 Alfanumérico NIF del declarante
18-57 Alfanumérico Apellidos y nombre o razón social del
declarante
58 Alfabético Tipo de soporte
59-67 Numérico (9) Teléfono contacto
68-107 Alfabético Apellidos y nombre contacto
108-120 Numérico Número identificativo de la declaración
121-122 Alfabético Declaración complementaria o
substitutiva
123-135 Numérico Número identificativo de la declaración
anterior
136-137 Alfanumérico Período
138-146 Numérico Número total de operadores
intracomunitarios
147-161 Numérico Importe de las operaciones
intracomunitarias
147-159 Numérico Importe de las operaciones
intracomunitarias (parte entera)
160-161 Numérico Importe de las operaciones
intracomunitarias (parte decimal)
162-170 Numérico Número total de operadores
intracomunitarios con rectificaciones
171-185 Numérico Importe total de las rectificaciones
171-183 Numérico Importe total de las rectificaciones
(parte entera)
184-185 Numérico Importe total de las rectificaciones
(parte decimal)
186 Alfabético Indicador cambio periodicidad en la
obligación a declarar (X o '')
187-390 Blancos ---------------------------------------
391-399 Alfanumérico NIF del representante legal
400-487 Blancos ---------------------------------------
488-500 Sello electrónico
"""
assert report, 'No Report defined'
period = (report.period_selection == 'MO' and report.month_selection or
report.period_selection)
text = super(Mod349ExportToBoe,
self)._get_formatted_declaration_record(report)
text += self._formatString(period, 2) # Período
# Número total de operadores intracomunitarios
text += self._formatNumber(report.total_partner_records, 9)
# Importe total de las operaciones intracomunitarias (parte entera)
text += self._formatNumber(report.total_partner_records_amount, 13, 2)
# Número total de operadores intracomunitarios con rectificaciones
text += self._formatNumber(report.total_partner_refunds, 9)
# Importe total de las rectificaciones
text += self._formatNumber(report.total_partner_refunds_amount, 13, 2)
# Indicador cambio periodicidad en la obligación a declarar
text += self._formatBoolean(report.frequency_change)
text += 204 * ' ' # Blancos
# NIF del representante legal
text += self._formatString(report.representative_vat, 9)
# text += 9*' '
text += 88 * ' ' # Blancos
text += 13 * ' ' # Sello electrónico
text += '\r\n' # Retorno de carro + Salto de línea
assert len(text) == 502, \
_("The type 1 record must be 502 characters long")
return text
@api.multi
def _get_formatted_main_record(self, report):
file_contents = ''
for partner_record in report.partner_record_ids:
file_contents += self._get_formated_partner_record(
report, partner_record)
for refund_record in report.partner_refund_ids:
file_contents += self._get_formatted_partner_refund(
report, refund_record)
return file_contents
@api.multi
def _get_formated_partner_record(self, report, partner_record):
"""Returns a type 2, partner record
Format of the record:
Tipo registro 2
Posiciones Naturaleza Descripción
1 Numérico Tipo de Registro Constante = '2'
2-4 Numérico Modelo Declaración onstante = '349'
5-8 Numérico Ejercicio
9-17 Alfanumérico NIF del declarante
18-75 Blancos ---------------------------------------
76-92 Alfanumérico NIF operador Intracomunitario
76-77 Alfanumérico Codigo de País
78-92 Alfanumérico NIF
93-132 Alfanumérico Apellidos y nombre o razón social del
operador intracomunitario
133 Alfanumérico Clave de operación
134-146 Numérico Base imponible
134-144 Numérico Base imponible (parte entera)
145-146 Numérico Base imponible (parte decimal)
147-500 Blancos ---------------------------------------
"""
assert report, 'No AEAT 349 Report defined'
assert partner_record, 'No Partner record defined'
text = ''
try:
fiscal_year = int((report.fiscalyear_id.code or '')[:4])
except:
raise exceptions.Warning(
_('First four characters of fiscal year code must be numeric '
'and contain the fiscal year number. Please, fix it and try '
'again.'))
# Formateo de algunos campos (debido a que pueden no ser correctos)
# NIF : Se comprueba que no se incluya el código de pais
company_vat = report.company_vat
if len(report.company_vat) > 9:
company_vat = report.company_vat[2:]
text += '2' # Tipo de registro
text += '349' # Modelo de declaración
text += self._formatNumber(fiscal_year, 4) # Ejercicio
text += self._formatString(company_vat, 9) # NIF del declarante
text += 58 * ' ' # Blancos
# NIF del operador intracomunitario
text += self._formatString(partner_record.partner_vat, 17)
# Apellidos y nombre o razón social del operador intracomunitario
text += self._formatString(partner_record.partner_id.name, 40)
# Clave de operación
text += self._formatString(partner_record.operation_key, 1)
# Base imponible (parte entera)
text += self._formatNumber(partner_record.total_operation_amount, 11,
2)
text += 354 * ' ' # Blancos
text += '\r\n' # Retorno de carro + Salto de línea
assert len(text) == 502, \
_("The type 2 record must be 502 characters long")
return text
@api.multi
def _get_formatted_partner_refund(self, report, refund_record):
"""Returns a type 2, refund record
Format of the record:
Tipo registro 2
Posiciones Naturaleza Descripción
1 Numérico Tipo de Registro Constante = '2'
2-4 Numérico Modelo Declaración Constante = '349'
5-8 Numérico Ejercicio
9-17 Alfanumérico NIF del declarante
18-75 Blancos ---------------------------------------
76-92 Alfanumérico NIF operador Intracomunitario
76-77 Alfanumérico Codigo de Pais
78-92 Alfanumérico NIF
93-132 Alfanumérico Apellidos y nombre o razón social del
operador intracomunitario
133 Alfanumérico Clave de operación
134-146 Blancos ---------------------------------------
147-178 Alfanumérico Rectificaciones
147-150 Numérico Ejercicio
151-152 Alfanumérico Periodo
153-165 Numérico Base Imponible rectificada
153-163 Numérico Base Imponible (parte entera)
164-165 Numérico Base Imponible (parte decimal)
166-178 Numérico Base imponible declarada anteriormente
166-176 Numérico Base imponible declarada anteriormente
(parte entera)
177-176 Numérico Base imponible declarada anteriormente
(parte decimal)
179-500 Blancos ---------------------------------------
"""
assert report, 'No AEAT 349 Report defined'
assert refund_record, 'No Refund record defined'
text = ''
period = (refund_record.period_selection == 'MO' and
refund_record.month_selection or
refund_record.period_selection)
text += '2' # Tipo de registro
text += '349' # Modelo de declaración
# Ejercicio
text += self._formatNumber(report.fiscalyear_id.code[:4], 4)
text += self._formatString(report.company_vat, 9) # NIF del declarante
text += 58 * ' ' # Blancos
# NIF del operador intracomunitario
text += self._formatString(refund_record.partner_id.vat, 17)
# Apellidos y nombre o razón social del operador intracomunitario
text += self._formatString(refund_record.partner_id.name, 40)
# Clave de operación
text += self._formatString(refund_record.operation_key, 1)
text += 13 * ' ' # Blancos
# Ejercicio (de la rectificación)
text += self._formatNumber(refund_record.fiscalyear_id.code[:4], 4)
# Periodo (de la rectificación)
text += self._formatString(period, 2)
# Base imponible de la rectificación
text += self._formatNumber(refund_record.total_operation_amount, 11, 2)
# Base imponible declarada anteriormente
text += self._formatNumber(refund_record.total_origin_amount, 11, 2)
text += 322 * ' ' # Blancos
text += '\r\n' # Retorno de carro + Salto de línea
assert len(text) == 502, _("The type 2 record must be 502 characters "
"long")
return text
|
Jortolsa/l10n-spain
|
l10n_es_aeat_mod349/wizard/export_mod349_to_boe.py
|
Python
|
agpl-3.0
| 13,634 | 0.000074 |
import unittest
import transaction
from pyramid import testing
from climasng.tests import ProseMakerTestCase
from climasng.parsing.prosemaker import ProseMaker
# ===================================================================
class TestProseMakerConditions(ProseMakerTestCase):
# ------------------------------------------------------- test --
def test_pm_condition_rangeequality_litnum_comparison(self):
samples = {
# these sources should result in 'showing'
'showing': [ '[[10 =2= 11]]showing',
'[[11 =2= 10]]showing',
'[[10 =5= 6]]showing',
'[[1.0 =0.1= 1.1]]showing',
'[[1 =0= 1]]showing',
],
# all these docs should result in ''
'': [ '[[10 =3= 6]]hiding',
'[[6 =3= 10]]hiding',
'[[1 =0= 1.01]]hiding',
'[[1 =0.1= 1.2]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangeequality_litnumpercent_comparison(self):
samples = {
# these sources should result in 'showing'
'showing': [ '[[1 =15%= 1.1]]showing',
'[[10 =15%= 11]]showing',
'[[1000 =15%= 1100]]showing',
'[[79 =25%= 100]]showing',
'[[1234 =1%= 1236]]showing',
],
# all these docs should result in ''
'': [ '[[10 =10%= 6]]hiding',
'[[100 =25%= 79]]hiding',
'[[1.01 =10%= 10]]hiding',
'[[99.5 =0.1%= 100]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangeequality_varnum_comparison(self):
self.pm.data = { 'one': 1, 'two': 2, 'aten': 10 }
samples = {
# these sources should result in 'showing'
'showing': [ '[[aten =2= 11]]showing',
'[[11 =2= aten]]showing',
'[[aten =5= 6]]showing',
'[[1 =0= one]]showing'
],
# all these docs should result in ''
'': [ '[[aten =3= 6]]hiding',
'[[6 =3= aten]]hiding'
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangeleftrocket_litnum_comparison(self):
samples = {
# these sources should result in 'showing'
'showing': [ '[[10 <2= 11]]showing',
'[[6 <5= 10]]showing',
'[[1.0 <0.1= 1.1]]showing',
],
# all these docs should result in ''
'': [ '[[10 <3= 6]]hiding',
'[[1 <0= 1]]hiding',
'[[10 <5= 6]]hiding',
'[[11 <2= 10]]hiding',
'[[6 <3= 10]]hiding',
'[[1 <0= 1.01]]hiding',
'[[1 <0.1= 1.2]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangeleftrocket_varnum_comparison(self):
self.pm.data = { 'one': 1, 'two': 2, 'aten': 10 }
samples = {
# these sources should result in 'showing'
'showing': [ '[[aten <2= 11]]showing',
'[[6 <5= aten]]showing',
'[[one <0.1= 1.1]]showing',
],
# all these docs should result in ''
'': [ '[[aten <3= 6]]hiding',
'[[one <0= one]]hiding',
'[[aten <5= 6]]hiding',
'[[11 <2= aten]]hiding',
'[[6 <3= aten]]hiding',
'[[one <0.1= 1.2]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangerightrocket_litnum_comparison(self):
samples = {
# these sources should result in 'showing'
'showing': [ '[[11 =2> 10]]showing',
'[[10 =5> 6]]showing',
'[[1.1 =0.1> 1.0]]showing',
],
# all these docs should result in ''
'': [ '[[6 =3> 10]]hiding',
'[[1 =0> 1]]hiding',
'[[6 =5> 10]]hiding',
'[[10 =2> 11]]hiding',
'[[10 =3> 6]]hiding',
'[[1.01 =0> 1]]hiding',
'[[1.2 =0.1> 1]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangemuchlessthan_litnum_comparison(self):
samples = {
# these sources should result in 'showing'
'showing': [ '[[6 <3< 10]]showing',
'[[1.0 <0.1< 1.101]]showing',
'[[1.0 <0.1< 1.2]]showing',
'[[0.99 <0< 1]]showing',
],
# all these docs should result in ''
'': [ '[[1.01 <0.1< 1.1]]hiding',
'[[1 <0.1< 1.1]]hiding',
'[[6 <5< 10]]hiding',
'[[1 <0< 1]]hiding',
'[[1 <1< 0.99]]hiding',
'[[10 <2< 11]]hiding',
'[[1.01 <0< 1]]hiding',
'[[1.2 <0.1< 1]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ------------------------------------------------------- test --
def test_pm_condition_rangemuchgreaterthan_litnum_comparison(self):
samples = {
# these sources should result in 'showing'
'showing': [ '[[10 >3> 6]]showing',
'[[1.101 >0.1> 1.0]]showing',
'[[1.2 >0.1> 1.0]]showing',
'[[1 >0> 0.99]]showing',
],
# all these docs should result in ''
'': [ '[[1.1 >0.1> 1.01]]hiding',
'[[1.1 >0.1> 1]]hiding',
'[[10 >5> 6]]hiding',
'[[1 >0> 1]]hiding',
'[[0.99 >1> 1]]hiding',
'[[11 >2> 10]]hiding',
'[[1 >0> 1.01]]hiding',
'[[1 >0.1> 1.2]]hiding',
]
}
for sample_result, sample_docs in samples.items():
for sample_doc in sample_docs:
self.assertParses(sample_doc, sample_result)
# ===================================================================
|
DanielBaird/CliMAS-Next-Generation
|
climas-ng/climasng/tests/test_prosemaker_conditions_rangenum.py
|
Python
|
mit
| 8,241 | 0.005096 |
from .clev import *
|
infoscout/weighted-levenshtein
|
weighted_levenshtein/__init__.py
|
Python
|
mit
| 20 | 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.optimizers import SGD
from keras.optimizers import Adam
from keras.optimizers import adadelta
from keras.optimizers import rmsprop
from keras.layers import Layer
from keras import backend as K
K.set_image_dim_ordering('tf')
import socket
import os
# -------------------------------------------------
# Background config:
hostname = socket.gethostname()
if hostname == 'baymax':
path_var = 'baymax/'
elif hostname == 'walle':
path_var = 'walle/'
elif hostname == 'bender':
path_var = 'bender/'
else:
path_var = 'zhora/'
DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_128/train/'
# DATA_DIR= '/local_home/data/KITTI_data/'
HD_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_256/train/'
VAL_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_128/val/'
VAL_HD_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_256/val/'
TEST_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_128/test/'
MODEL_DIR = './../' + path_var + 'models'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
CHECKPOINT_DIR = './../' + path_var + 'checkpoints'
if not os.path.exists(CHECKPOINT_DIR):
os.mkdir(CHECKPOINT_DIR)
ATTN_WEIGHTS_DIR = './../' + path_var + 'attn_weights'
if not os.path.exists(ATTN_WEIGHTS_DIR):
os.mkdir(ATTN_WEIGHTS_DIR)
GEN_IMAGES_DIR = './../' + path_var + 'generated_images'
if not os.path.exists(GEN_IMAGES_DIR):
os.mkdir(GEN_IMAGES_DIR)
CLA_GEN_IMAGES_DIR = GEN_IMAGES_DIR + '/cla_gen/'
if not os.path.exists(CLA_GEN_IMAGES_DIR):
os.mkdir(CLA_GEN_IMAGES_DIR)
LOG_DIR = './../' + path_var + 'logs'
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
TF_LOG_DIR = './../' + path_var + 'tf_logs'
if not os.path.exists(TF_LOG_DIR):
os.mkdir(TF_LOG_DIR)
TF_LOG_GAN_DIR = './../' + path_var + 'tf_gan_logs'
if not os.path.exists(TF_LOG_GAN_DIR):
os.mkdir(TF_LOG_GAN_DIR)
TEST_RESULTS_DIR = './../' + path_var + 'test_results'
if not os.path.exists(TEST_RESULTS_DIR):
os.mkdir(TEST_RESULTS_DIR)
PRINT_MODEL_SUMMARY = True
SAVE_MODEL = True
PLOT_MODEL = True
SAVE_GENERATED_IMAGES = True
SHUFFLE = True
VIDEO_LENGTH = 30
IMG_SIZE = (128, 128, 3)
ADVERSARIAL = False
BUF_SIZE = 10
LOSS_WEIGHTS = [1, 1]
ATTN_COEFF = 0
KL_COEFF = 0
# -------------------------------------------------
# Network configuration:
print ("Loading network/training configuration.")
print ("Config file: " + str(__name__))
BATCH_SIZE = 7
NB_EPOCHS_AUTOENCODER = 30
NB_EPOCHS_GAN = 0
OPTIM_A = Adam(lr=0.0001, beta_1=0.5)
OPTIM_G = Adam(lr=0.00001, beta_1=0.5)
# OPTIM_D = Adam(lr=0.000001, beta_1=0.5)
# OPTIM_D = SGD(lr=0.000001, momentum=0.5, nesterov=True)
OPTIM_D = rmsprop(lr=0.000001)
lr_schedule = [10, 20, 30] # epoch_step
def schedule(epoch_idx):
if (epoch_idx + 1) < lr_schedule[0]:
return 0.0001
elif (epoch_idx + 1) < lr_schedule[1]:
return 0.0001 # lr_decay_ratio = 10
elif (epoch_idx + 1) < lr_schedule[2]:
return 0.00001
return 0.000001
|
AutonomyLab/deep_intent
|
code/autoencoder_model/scripts/config_nmta.py
|
Python
|
bsd-3-clause
| 3,079 | 0.003573 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp import addons
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diogocs1/comps
|
web/addons/l10n_at/account_wizard.py
|
Python
|
apache-2.0
| 1,234 | 0.009724 |
#! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <nils@we155.de>
# Copyright (C) Enrico Pozzobon <enricopozzobon@gmail.com>
# Copyright (C) Alexander Schroeder <alexander1.schroeder@st.othr.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = ISO-TP (ISO 15765-2)
# scapy.contrib.status = loads
"""
ISOTPSocket.
"""
import ctypes
from ctypes.util import find_library
import struct
import socket
import time
from threading import Thread, Event, Lock, Semaphore
from scapy.packet import Packet
from scapy.fields import BitField, FlagsField, StrLenField, \
ThreeBytesField, XBitField, ConditionalField, \
BitEnumField, ByteField, XByteField, BitFieldLenField, StrField
from scapy.compat import chb, orb
from scapy.layers.can import CAN
import scapy.modules.six as six
import scapy.automaton as automaton
import six.moves.queue as queue
from scapy.error import Scapy_Exception, warning, log_loading
from scapy.supersocket import SuperSocket
from scapy.config import conf
from scapy.consts import LINUX
from scapy.contrib.cansocket import PYTHON_CAN
from scapy.sendrecv import sniff
from scapy.sessions import DefaultSession
__all__ = ["ISOTP", "ISOTPHeader", "ISOTPHeaderEA", "ISOTP_SF", "ISOTP_FF",
"ISOTP_CF", "ISOTP_FC", "ISOTPSoftSocket", "ISOTPSession",
"ISOTPSocket", "ISOTPSocketImplementation", "ISOTPMessageBuilder",
"ISOTPScan"]
USE_CAN_ISOTP_KERNEL_MODULE = False
if six.PY3 and LINUX:
LIBC = ctypes.cdll.LoadLibrary(find_library("c"))
try:
if conf.contribs['ISOTP']['use-can-isotp-kernel-module']:
USE_CAN_ISOTP_KERNEL_MODULE = True
except KeyError:
log_loading.info("Specify 'conf.contribs['ISOTP'] = "
"{'use-can-isotp-kernel-module': True}' to enable "
"usage of can-isotp kernel module.")
CAN_MAX_IDENTIFIER = (1 << 29) - 1 # Maximum 29-bit identifier
CAN_MTU = 16
CAN_MAX_DLEN = 8
ISOTP_MAX_DLEN_2015 = (1 << 32) - 1 # Maximum for 32-bit FF_DL
ISOTP_MAX_DLEN = (1 << 12) - 1 # Maximum for 12-bit FF_DL
N_PCI_SF = 0x00 # /* single frame */
N_PCI_FF = 0x10 # /* first frame */
N_PCI_CF = 0x20 # /* consecutive frame */
N_PCI_FC = 0x30 # /* flow control */
class ISOTP(Packet):
name = 'ISOTP'
fields_desc = [
StrField('data', B"")
]
__slots__ = Packet.__slots__ + ["src", "dst", "exsrc", "exdst"]
def answers(self, other):
if other.__class__ == self.__class__:
return self.payload.answers(other.payload)
return 0
def __init__(self, *args, **kwargs):
self.src = None
self.dst = None
self.exsrc = None
self.exdst = None
if "src" in kwargs:
self.src = kwargs["src"]
del kwargs["src"]
if "dst" in kwargs:
self.dst = kwargs["dst"]
del kwargs["dst"]
if "exsrc" in kwargs:
self.exsrc = kwargs["exsrc"]
del kwargs["exsrc"]
if "exdst" in kwargs:
self.exdst = kwargs["exdst"]
del kwargs["exdst"]
Packet.__init__(self, *args, **kwargs)
self.validate_fields()
def validate_fields(self):
if self.src is not None:
if not 0 <= self.src <= CAN_MAX_IDENTIFIER:
raise Scapy_Exception("src is not a valid CAN identifier")
if self.dst is not None:
if not 0 <= self.dst <= CAN_MAX_IDENTIFIER:
raise Scapy_Exception("dst is not a valid CAN identifier")
if self.exsrc is not None:
if not 0 <= self.exsrc <= 0xff:
raise Scapy_Exception("exsrc is not a byte")
if self.exdst is not None:
if not 0 <= self.exdst <= 0xff:
raise Scapy_Exception("exdst is not a byte")
def fragment(self):
data_bytes_in_frame = 7
if self.exdst is not None:
data_bytes_in_frame = 6
if len(self.data) > ISOTP_MAX_DLEN_2015:
raise Scapy_Exception("Too much data in ISOTP message")
if len(self.data) <= data_bytes_in_frame:
# We can do this in a single frame
frame_data = struct.pack('B', len(self.data)) + self.data
if self.exdst:
frame_data = struct.pack('B', self.exdst) + frame_data
pkt = CAN(identifier=self.dst, data=frame_data)
return [pkt]
# Construct the first frame
if len(self.data) <= ISOTP_MAX_DLEN:
frame_header = struct.pack(">H", len(self.data) + 0x1000)
else:
frame_header = struct.pack(">HI", 0x1000, len(self.data))
if self.exdst:
frame_header = struct.pack('B', self.exdst) + frame_header
idx = 8 - len(frame_header)
frame_data = self.data[0:idx]
frame = CAN(identifier=self.dst, data=frame_header + frame_data)
# Construct consecutive frames
n = 1
pkts = [frame]
while idx < len(self.data):
frame_data = self.data[idx:idx + data_bytes_in_frame]
frame_header = struct.pack("b", (n % 16) + N_PCI_CF)
n += 1
idx += len(frame_data)
if self.exdst:
frame_header = struct.pack('B', self.exdst) + frame_header
pkt = CAN(identifier=self.dst, data=frame_header + frame_data)
pkts.append(pkt)
return pkts
@staticmethod
def defragment(can_frames, use_extended_addressing=None):
if len(can_frames) == 0:
raise Scapy_Exception("ISOTP.defragment called with 0 frames")
dst = can_frames[0].identifier
for frame in can_frames:
if frame.identifier != dst:
warning("Not all CAN frames have the same identifier")
parser = ISOTPMessageBuilder(use_extended_addressing)
for c in can_frames:
parser.feed(c)
results = []
while parser.count > 0:
p = parser.pop()
if (use_extended_addressing is True and p.exdst is not None) \
or (use_extended_addressing is False and p.exdst is None) \
or (use_extended_addressing is None):
results.append(p)
if len(results) == 0:
return None
if len(results) > 0:
warning("More than one ISOTP frame could be defragmented from the "
"provided CAN frames, returning the first one.")
return results[0]
class ISOTPHeader(CAN):
name = 'ISOTPHeader'
fields_desc = [
FlagsField('flags', 0, 3, ['error',
'remote_transmission_request',
'extended']),
XBitField('identifier', 0, 29),
ByteField('length', None),
ThreeBytesField('reserved', 0),
]
def extract_padding(self, p):
return p, None
def post_build(self, pkt, pay):
"""
This will set the ByteField 'length' to the correct value.
"""
if self.length is None:
pkt = pkt[:4] + chb(len(pay)) + pkt[5:]
return pkt + pay
def guess_payload_class(self, payload):
"""
ISOTP encodes the frame type in the first nibble of a frame.
"""
t = (orb(payload[0]) & 0xf0) >> 4
if t == 0:
return ISOTP_SF
elif t == 1:
return ISOTP_FF
elif t == 2:
return ISOTP_CF
else:
return ISOTP_FC
class ISOTPHeaderEA(ISOTPHeader):
name = 'ISOTPHeaderExtendedAddress'
fields_desc = ISOTPHeader.fields_desc + [
XByteField('extended_address', 0),
]
def post_build(self, p, pay):
"""
This will set the ByteField 'length' to the correct value.
'chb(len(pay) + 1)' is required, because the field 'extended_address'
is counted as payload on the CAN layer
"""
if self.length is None:
p = p[:4] + chb(len(pay) + 1) + p[5:]
return p + pay
ISOTP_TYPE = {0: 'single',
1: 'first',
2: 'consecutive',
3: 'flow_control'}
class ISOTP_SF(Packet):
name = 'ISOTPSingleFrame'
fields_desc = [
BitEnumField('type', 0, 4, ISOTP_TYPE),
BitFieldLenField('message_size', None, 4, length_of='data'),
StrLenField('data', '', length_from=lambda pkt: pkt.message_size)
]
class ISOTP_FF(Packet):
name = 'ISOTPFirstFrame'
fields_desc = [
BitEnumField('type', 1, 4, ISOTP_TYPE),
BitField('message_size', 0, 12),
ConditionalField(BitField('extended_message_size', 0, 32),
lambda pkt: pkt.message_size == 0),
StrField('data', '', fmt="B")
]
class ISOTP_CF(Packet):
name = 'ISOTPConsecutiveFrame'
fields_desc = [
BitEnumField('type', 2, 4, ISOTP_TYPE),
BitField('index', 0, 4),
StrField('data', '', fmt="B")
]
class ISOTP_FC(Packet):
name = 'ISOTPFlowControlFrame'
fields_desc = [
BitEnumField('type', 3, 4, ISOTP_TYPE),
BitEnumField('fc_flag', 0, 4, {0: 'continue',
1: 'wait',
2: 'abort'}),
ByteField('block_size', 0),
ByteField('separation_time', 0),
]
class ISOTPMessageBuilderIter(object):
slots = ["builder"]
def __init__(self, builder):
self.builder = builder
def __iter__(self):
return self
def __next__(self):
while self.builder.count:
return self.builder.pop()
raise StopIteration
next = __next__
class ISOTPMessageBuilder:
"""
Utility class to build ISOTP messages out of CAN frames, used by both
ISOTP.defragment() and ISOTPSession.
This class attempts to interpret some CAN frames as ISOTP frames, both with
and without extended addressing at the same time. For example, if an
extended address of 07 is being used, all frames will also be interpreted
as ISOTP single-frame messages.
CAN frames are fed to an ISOTPMessageBuilder object with the feed() method
and the resulting ISOTP frames can be extracted using the pop() method.
"""
class Bucket:
def __init__(self, total_len, first_piece, ts=None):
self.pieces = list()
self.total_len = total_len
self.current_len = 0
self.ready = None
self.src = None
self.exsrc = None
self.time = ts
self.push(first_piece)
def push(self, piece):
self.pieces.append(piece)
self.current_len += len(piece)
if self.current_len >= self.total_len:
if six.PY3:
isotp_data = b"".join(self.pieces)
else:
isotp_data = "".join(map(str, self.pieces))
self.ready = isotp_data[:self.total_len]
def __init__(self, use_ext_addr=None, did=None, basecls=None):
"""
Initialize a ISOTPMessageBuilder object
:param use_ext_addr: True for only attempting to defragment with
extended addressing, False for only attempting
to defragment without extended addressing,
or None for both
:param basecls: the class of packets that will be returned,
defaults to ISOTP
"""
self.ready = []
self.buckets = {}
self.use_ext_addr = use_ext_addr
self.basecls = basecls or ISOTP
self.dst_ids = None
self.last_ff = None
self.last_ff_ex = None
if did is not None:
if hasattr(did, "__iter__"):
self.dst_ids = did
else:
self.dst_ids = [did]
def feed(self, can):
"""Attempt to feed an incoming CAN frame into the state machine"""
if not isinstance(can, Packet) and hasattr(can, "__iter__"):
for p in can:
self.feed(p)
return
identifier = can.identifier
if self.dst_ids is not None and identifier not in self.dst_ids:
return
data = bytes(can.data)
if len(data) > 1 and self.use_ext_addr is not True:
self._try_feed(identifier, None, data, can.time)
if len(data) > 2 and self.use_ext_addr is not False:
ea = six.indexbytes(data, 0)
self._try_feed(identifier, ea, data[1:], can.time)
@property
def count(self):
"""Returns the number of ready ISOTP messages built from the provided
can frames"""
return len(self.ready)
def __len__(self):
return self.count
def pop(self, identifier=None, ext_addr=None):
"""
Returns a built ISOTP message
:param identifier: if not None, only return isotp messages with this
destination
:param ext_addr: if identifier is not None, only return isotp messages
with this extended address for destination
:returns: an ISOTP packet, or None if no message is ready
"""
if identifier is not None:
for i in range(len(self.ready)):
b = self.ready[i]
iden = b[0]
ea = b[1]
if iden == identifier and ext_addr == ea:
return ISOTPMessageBuilder._build(self.ready.pop(i),
self.basecls)
return None
if len(self.ready) > 0:
return ISOTPMessageBuilder._build(self.ready.pop(0), self.basecls)
return None
def __iter__(self):
return ISOTPMessageBuilderIter(self)
@staticmethod
def _build(t, basecls=ISOTP):
bucket = t[2]
p = basecls(bucket.ready)
if hasattr(p, "dst"):
p.dst = t[0]
if hasattr(p, "exdst"):
p.exdst = t[1]
if hasattr(p, "src"):
p.src = bucket.src
if hasattr(p, "exsrc"):
p.exsrc = bucket.exsrc
if hasattr(p, "time"):
p.time = bucket.time
return p
def _feed_first_frame(self, identifier, ea, data, ts):
if len(data) < 3:
# At least 3 bytes are necessary: 2 for length and 1 for data
return False
header = struct.unpack('>H', bytes(data[:2]))[0]
expected_length = header & 0x0fff
isotp_data = data[2:]
if expected_length == 0 and len(data) >= 6:
expected_length = struct.unpack('>I', bytes(data[2:6]))[0]
isotp_data = data[6:]
key = (ea, identifier, 1)
if ea is None:
self.last_ff = key
else:
self.last_ff_ex = key
self.buckets[key] = self.Bucket(expected_length, isotp_data, ts)
return True
def _feed_single_frame(self, identifier, ea, data, ts):
if len(data) < 2:
# At least 2 bytes are necessary: 1 for length and 1 for data
return False
length = six.indexbytes(data, 0) & 0x0f
isotp_data = data[1:length + 1]
if length > len(isotp_data):
# CAN frame has less data than expected
return False
self.ready.append((identifier, ea,
self.Bucket(length, isotp_data, ts)))
return True
def _feed_consecutive_frame(self, identifier, ea, data):
if len(data) < 2:
# At least 2 bytes are necessary: 1 for sequence number and
# 1 for data
return False
first_byte = six.indexbytes(data, 0)
seq_no = first_byte & 0x0f
isotp_data = data[1:]
key = (ea, identifier, seq_no)
bucket = self.buckets.pop(key, None)
if bucket is None:
# There is no message constructor waiting for this frame
return False
bucket.push(isotp_data)
if bucket.ready is None:
# full ISOTP message is not ready yet, put it back in
# buckets list
next_seq = (seq_no + 1) % 16
key = (ea, identifier, next_seq)
self.buckets[key] = bucket
else:
self.ready.append((identifier, ea, bucket))
return True
def _feed_flow_control_frame(self, identifier, ea, data):
if len(data) < 3:
# At least 2 bytes are necessary: 1 for sequence number and
# 1 for data
return False
keys = [self.last_ff, self.last_ff_ex]
if not any(keys):
return False
buckets = [self.buckets.pop(k, None) for k in keys]
self.last_ff = None
self.last_ff_ex = None
if not any(buckets):
# There is no message constructor waiting for this frame
return False
for key, bucket in zip(keys, buckets):
if bucket is None:
continue
bucket.src = identifier
bucket.exsrc = ea
self.buckets[key] = bucket
return True
def _try_feed(self, identifier, ea, data, ts):
first_byte = six.indexbytes(data, 0)
if len(data) > 1 and first_byte & 0xf0 == N_PCI_SF:
self._feed_single_frame(identifier, ea, data, ts)
if len(data) > 2 and first_byte & 0xf0 == N_PCI_FF:
self._feed_first_frame(identifier, ea, data, ts)
if len(data) > 1 and first_byte & 0xf0 == N_PCI_CF:
self._feed_consecutive_frame(identifier, ea, data)
if len(data) > 1 and first_byte & 0xf0 == N_PCI_FC:
self._feed_flow_control_frame(identifier, ea, data)
class ISOTPSession(DefaultSession):
"""Defragment ISOTP packets 'on-the-flow'.
Usage:
>>> sniff(session=ISOTPSession)
"""
def __init__(self, *args, **kwargs):
DefaultSession.__init__(self, *args, **kwargs)
self.m = ISOTPMessageBuilder(
use_ext_addr=kwargs.pop("use_ext_addr", None),
did=kwargs.pop("did", None),
basecls=kwargs.pop("basecls", None))
def on_packet_received(self, pkt):
if not pkt:
return
if isinstance(pkt, list):
for p in pkt:
ISOTPSession.on_packet_received(self, p)
return
self.m.feed(pkt)
while len(self.m) > 0:
rcvd = self.m.pop()
if self._supersession:
self._supersession.on_packet_received(rcvd)
else:
DefaultSession.on_packet_received(self, rcvd)
class ISOTPSoftSocket(SuperSocket):
"""
This class is a wrapper around the ISOTPSocketImplementation, for the
reasons described below.
The ISOTPSoftSocket aims to be fully compatible with the Linux ISOTP
sockets provided by the can-isotp kernel module, while being usable on any
operating system.
Therefore, this socket needs to be able to respond to an incoming FF frame
with a FC frame even before the recv() method is called.
A thread is needed for receiving CAN frames in the background, and since
the lower layer CAN implementation is not guaranteed to have a functioning
POSIX select(), each ISOTP socket needs its own CAN receiver thread.
Additionally, 2 timers are necessary to keep track of the timeouts and
frame separation times, and each timer is implemented in its own thread.
In total, each ISOTPSoftSocket spawns 3 background threads when
constructed, which must be terminated afterwards by calling the close()
method.
SuperSocket automatically calls the close() method when the GC destroys an
ISOTPSoftSocket. However, note that if any thread holds a reference to
an ISOTPSoftSocket object, it will not be collected by the GC.
The implementation of the ISOTP protocol, along with the necessary
threads, are stored in the ISOTPSocketImplementation class, and therefore:
* There no reference from ISOTPSocketImplementation to ISOTPSoftSocket
* ISOTPSoftSocket can be normally garbage collected
* Upon destruction, ISOTPSoftSocket.close() will be called
* ISOTPSoftSocket.close() will call ISOTPSocketImplementation.close()
* All background threads can be stopped by the garbage collector
"""
nonblocking_socket = True
def __init__(self,
can_socket=None,
sid=0,
did=0,
extended_addr=None,
extended_rx_addr=None,
rx_block_size=0,
rx_separation_time_min=0,
padding=False,
listen_only=False,
basecls=ISOTP):
"""
Initialize an ISOTPSoftSocket using the provided underlying can socket
:param can_socket: a CANSocket instance, preferably filtering only can
frames with identifier equal to did
:param sid: the CAN identifier of the sent CAN frames
:param did: the CAN identifier of the received CAN frames
:param extended_addr: the extended address of the sent ISOTP frames
(can be None)
:param extended_rx_addr: the extended address of the received ISOTP
frames (can be None)
:param rx_block_size: block size sent in Flow Control ISOTP frames
:param rx_separation_time_min: minimum desired separation time sent in
Flow Control ISOTP frames
:param padding: If True, pads sending packets with 0x00 which not
count to the payload.
Does not affect receiving packets.
:param basecls: base class of the packets emitted by this socket
"""
if six.PY3 and LINUX and isinstance(can_socket, six.string_types):
from scapy.contrib.cansocket import CANSocket
can_socket = CANSocket(can_socket)
elif isinstance(can_socket, six.string_types):
raise Scapy_Exception("Provide a CANSocket object instead")
self.exsrc = extended_addr
self.exdst = extended_rx_addr
self.src = sid
self.dst = did
impl = ISOTPSocketImplementation(
can_socket,
src_id=sid,
dst_id=did,
padding=padding,
extended_addr=extended_addr,
extended_rx_addr=extended_rx_addr,
rx_block_size=rx_block_size,
rx_separation_time_min=rx_separation_time_min,
listen_only=listen_only
)
self.ins = impl
self.outs = impl
self.impl = impl
if basecls is None:
warning('Provide a basecls ')
self.basecls = basecls
def close(self):
if not self.closed:
self.impl.close()
self.outs = None
self.ins = None
SuperSocket.close(self)
def begin_send(self, p):
"""Begin the transmission of message p. This method returns after
sending the first frame. If multiple frames are necessary to send the
message, this socket will unable to send other messages until either
the transmission of this frame succeeds or it fails."""
if hasattr(p, "sent_time"):
p.sent_time = time.time()
return self.outs.begin_send(bytes(p))
def recv_raw(self, x=0xffff):
"""Receive a complete ISOTP message, blocking until a message is
received or the specified timeout is reached.
If self.timeout is 0, then this function doesn't block and returns the
first frame in the receive buffer or None if there isn't any."""
msg = self.ins.recv()
t = time.time()
return self.basecls, msg, t
def recv(self, x=0xffff):
msg = SuperSocket.recv(self, x)
if hasattr(msg, "src"):
msg.src = self.src
if hasattr(msg, "dst"):
msg.dst = self.dst
if hasattr(msg, "exsrc"):
msg.exsrc = self.exsrc
if hasattr(msg, "exdst"):
msg.exdst = self.exdst
return msg
@staticmethod
def select(sockets, remain=None):
"""This function is called during sendrecv() routine to wait for
sockets to be ready to receive
"""
blocking = remain is None or remain > 0
def find_ready_sockets():
return list(filter(lambda x: not x.ins.rx_queue.empty(), sockets))
ready_sockets = find_ready_sockets()
if len(ready_sockets) > 0 or not blocking:
return ready_sockets, None
exit_select = Event()
def my_cb(msg):
exit_select.set()
try:
for s in sockets:
s.ins.rx_callbacks.append(my_cb)
exit_select.wait(remain)
finally:
for s in sockets:
try:
s.ins.rx_callbacks.remove(my_cb)
except ValueError:
pass
ready_sockets = find_ready_sockets()
return ready_sockets, None
ISOTPSocket = ISOTPSoftSocket
class CANReceiverThread(Thread):
"""
Helper class that receives CAN frames and feeds them to the provided
callback. It relies on CAN frames being enqueued in the CANSocket object
and not being lost if they come before the sniff method is called. This is
true in general since sniff is usually implemented as repeated recv(), but
might be false in some implementation of CANSocket
"""
def __init__(self, can_socket, callback):
"""
Initialize the thread. In order for this thread to be able to be
stopped by the destructor of another object, it is important to not
keep a reference to the object in the callback function.
:param socket: the CANSocket upon which this class will call the
sniff() method
:param callback: function to call whenever a CAN frame is received
"""
self.socket = can_socket
self.callback = callback
self.exiting = False
self._thread_started = Event()
self.exception = None
Thread.__init__(self)
self.name = "CANReceiver" + self.name
def start(self):
Thread.start(self)
self._thread_started.wait()
def run(self):
self._thread_started.set()
try:
def prn(msg):
if not self.exiting:
self.callback(msg)
while 1:
try:
sniff(store=False, timeout=1, count=1,
stop_filter=lambda x: self.exiting,
prn=prn, opened_socket=self.socket)
except ValueError as ex:
if not self.exiting:
raise ex
if self.exiting:
return
except Exception as ex:
self.exception = ex
def stop(self):
self.exiting = True
class TimeoutThread(Thread):
"""
Utility class implementing a timer, useful for both timeouts and
waiting between sent CAN frames.
Contrary to the threading.Timer implementation, this timer thread can be
reused for multiple timeouts. This avoids the overhead of creating a new
pthread every time a timeout is planned.
"""
def __init__(self):
Thread.__init__(self)
self._thread_started = Event()
self._cancelled = Event()
self._ready_sem = Semaphore(1)
self._busy_sem = Semaphore(0)
self._timeout = 1
self._callback = None
self._exception = None
self._killed = False
self._dead = False
self.name = "ISOTP Timer " + self.name
def run(self):
self._thread_started.set()
try:
while not self._killed:
self._busy_sem.acquire()
f = self._cancelled.wait(self._timeout)
self._ready_sem.release()
if f is False:
if self._callback is not None:
self._callback()
except Exception as ex:
self._exception = ex
warning(self.name + " is now stopped")
raise ex
finally:
self._dead = True
def start(self):
"""Start the thread, and make sure it is running"""
Thread.start(self)
self._thread_started.wait()
def set_timeout(self, timeout, callback):
"""Call 'callback' in 'timeout' seconds, unless cancelled."""
if not self._ready_sem.acquire(False):
raise Scapy_Exception("Timer was already started")
self._callback = callback
self._timeout = timeout
self._cancelled.clear()
self._busy_sem.release()
def cancel(self):
"""Stop the timer without executing the callback."""
self._cancelled.set()
if not self._dead:
self._ready_sem.acquire()
self._ready_sem.release()
def stop(self):
"""Stop the thread, making this object unusable."""
if not self._dead:
self._killed = True
self._cancelled.set()
self._busy_sem.release()
self.join()
if not self._ready_sem.acquire(False):
warning("ISOTP Timer thread may not have stopped "
"correctly")
"""ISOTPSoftSocket definitions."""
# Enum states
ISOTP_IDLE = 0
ISOTP_WAIT_FIRST_FC = 1
ISOTP_WAIT_FC = 2
ISOTP_WAIT_DATA = 3
ISOTP_SENDING = 4
# /* Flow Status given in FC frame */
ISOTP_FC_CTS = 0 # /* clear to send */
ISOTP_FC_WT = 1 # /* wait */
ISOTP_FC_OVFLW = 2 # /* overflow */
class ISOTPSocketImplementation(automaton.SelectableObject):
"""
Implementation of an ISOTP "state machine".
Most of the ISOTP logic was taken from
https://github.com/hartkopp/can-isotp/blob/master/net/can/isotp.c
This class is separated from ISOTPSoftSocket to make sure the background
threads can't hold a reference to ISOTPSoftSocket, allowing it to be
collected by the GC.
"""
def __init__(self,
can_socket,
src_id,
dst_id,
padding=False,
extended_addr=None,
extended_rx_addr=None,
rx_block_size=0,
rx_separation_time_min=0,
listen_only=False):
"""
:param can_socket: a CANSocket instance, preferably filtering only can
frames with identifier equal to did
:param src_id: the CAN identifier of the sent CAN frames
:param dst_id: the CAN identifier of the received CAN frames
:param padding: If True, pads sending packets with 0x00 which not
count to the payload.
Does not affect receiving packets.
:param extended_addr: Extended Address byte to be added at the
beginning of every CAN frame _sent_ by this object. Can be None
in order to disable extended addressing on sent frames.
:param extended_rx_addr: Extended Address byte expected to be found at
the beginning of every CAN frame _received_ by this object. Can
be None in order to disable extended addressing on received
frames.
:param rx_block_size: Block Size byte to be included in every Control
Flow Frame sent by this object. The default value of 0 means
that all the data will be received in a single block.
:param rx_separation_time_min: Time Minimum Separation byte to be
included in every Control Flow Frame sent by this object. The
default value of 0 indicates that the peer will not wait any
time between sending frames.
:param listen_only: Disables send of flow control frames
"""
automaton.SelectableObject.__init__(self)
self.can_socket = can_socket
self.dst_id = dst_id
self.src_id = src_id
self.padding = padding
self.fc_timeout = 1
self.cf_timeout = 1
self.filter_warning_emitted = False
self.extended_rx_addr = extended_rx_addr
self.ea_hdr = b""
if extended_addr is not None:
self.ea_hdr = struct.pack("B", extended_addr)
self.listen_only = listen_only
self.rxfc_bs = rx_block_size
self.rxfc_stmin = rx_separation_time_min
self.rx_queue = queue.Queue()
self.rx_len = -1
self.rx_buf = None
self.rx_sn = 0
self.rx_bs = 0
self.rx_idx = 0
self.rx_state = ISOTP_IDLE
self.txfc_bs = 0
self.txfc_stmin = 0
self.tx_gap = 0
self.tx_buf = None
self.tx_sn = 0
self.tx_bs = 0
self.tx_idx = 0
self.rx_ll_dl = 0
self.tx_state = ISOTP_IDLE
self.tx_timer = TimeoutThread()
self.rx_timer = TimeoutThread()
self.rx_thread = CANReceiverThread(can_socket, self.on_can_recv)
self.tx_mutex = Lock()
self.rx_mutex = Lock()
self.send_mutex = Lock()
self.tx_done = Event()
self.tx_exception = None
self.tx_callbacks = []
self.rx_callbacks = []
self.tx_timer.start()
self.rx_timer.start()
self.rx_thread.start()
def __del__(self):
self.close()
def can_send(self, load):
if self.padding:
load += bytearray(CAN_MAX_DLEN - len(load))
self.can_socket.send(CAN(identifier=self.src_id, data=load))
def on_can_recv(self, p):
if not isinstance(p, CAN):
raise Scapy_Exception("argument is not a CAN frame")
if p.identifier != self.dst_id:
if not self.filter_warning_emitted:
warning("You should put a filter for identifier=%x on your"
"CAN socket" % self.dst_id)
self.filter_warning_emitted = True
else:
self.on_recv(p)
def close(self):
self.rx_timer.stop()
self.tx_timer.stop()
self.rx_thread.stop()
def _rx_timer_handler(self):
"""Method called every time the rx_timer times out, due to the peer not
sending a consecutive frame within the expected time window"""
with self.rx_mutex:
if self.rx_state == ISOTP_WAIT_DATA:
# we did not get new data frames in time.
# reset rx state
self.rx_state = ISOTP_IDLE
warning("RX state was reset due to timeout")
def _tx_timer_handler(self):
"""Method called every time the tx_timer times out, which can happen in
two situations: either a Flow Control frame was not received in time,
or the Separation Time Min is expired and a new frame must be sent."""
with self.tx_mutex:
if (self.tx_state == ISOTP_WAIT_FC or
self.tx_state == ISOTP_WAIT_FIRST_FC):
# we did not get any flow control frame in time
# reset tx state
self.tx_state = ISOTP_IDLE
self.tx_exception = "TX state was reset due to timeout"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
elif self.tx_state == ISOTP_SENDING:
# push out the next segmented pdu
src_off = len(self.ea_hdr)
max_bytes = 7 - src_off
while 1:
load = self.ea_hdr
load += struct.pack("B", N_PCI_CF + self.tx_sn)
load += self.tx_buf[self.tx_idx:self.tx_idx + max_bytes]
self.can_send(load)
self.tx_sn = (self.tx_sn + 1) % 16
self.tx_bs += 1
self.tx_idx += max_bytes
if len(self.tx_buf) <= self.tx_idx:
# we are done
self.tx_state = ISOTP_IDLE
self.tx_done.set()
for cb in self.tx_callbacks:
cb()
return
if self.txfc_bs != 0 and self.tx_bs >= self.txfc_bs:
# stop and wait for FC
self.tx_state = ISOTP_WAIT_FC
self.tx_timer.set_timeout(self.fc_timeout,
self._tx_timer_handler)
return
if self.tx_gap == 0:
continue
else:
self.tx_timer.set_timeout(self.tx_gap,
self._tx_timer_handler)
def on_recv(self, cf):
"""Function that must be called every time a CAN frame is received, to
advance the state machine."""
data = bytes(cf.data)
if len(data) < 2:
return
ae = 0
if self.extended_rx_addr is not None:
ae = 1
if len(data) < 3:
return
if six.indexbytes(data, 0) != self.extended_rx_addr:
return
n_pci = six.indexbytes(data, ae) & 0xf0
if n_pci == N_PCI_FC:
with self.tx_mutex:
self._recv_fc(data[ae:])
elif n_pci == N_PCI_SF:
with self.rx_mutex:
self._recv_sf(data[ae:])
elif n_pci == N_PCI_FF:
with self.rx_mutex:
self._recv_ff(data[ae:])
elif n_pci == N_PCI_CF:
with self.rx_mutex:
self._recv_cf(data[ae:])
def _recv_fc(self, data):
"""Process a received 'Flow Control' frame"""
if (self.tx_state != ISOTP_WAIT_FC and
self.tx_state != ISOTP_WAIT_FIRST_FC):
return 0
self.tx_timer.cancel()
if len(data) < 3:
self.tx_state = ISOTP_IDLE
self.tx_exception = "CF frame discarded because it was too short"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
# get communication parameters only from the first FC frame
if self.tx_state == ISOTP_WAIT_FIRST_FC:
self.txfc_bs = six.indexbytes(data, 1)
self.txfc_stmin = six.indexbytes(data, 2)
if ((self.txfc_stmin > 0x7F) and
((self.txfc_stmin < 0xF1) or (self.txfc_stmin > 0xF9))):
self.txfc_stmin = 0x7F
if six.indexbytes(data, 2) <= 127:
tx_gap = six.indexbytes(data, 2) / 1000.0
elif 0xf1 <= six.indexbytes(data, 2) <= 0xf9:
tx_gap = (six.indexbytes(data, 2) & 0x0f) / 10000.0
else:
tx_gap = 0
self.tx_gap = tx_gap
self.tx_state = ISOTP_WAIT_FC
isotp_fc = six.indexbytes(data, 0) & 0x0f
if isotp_fc == ISOTP_FC_CTS:
self.tx_bs = 0
self.tx_state = ISOTP_SENDING
# start cyclic timer for sending CF frame
self.tx_timer.set_timeout(self.tx_gap, self._tx_timer_handler)
elif isotp_fc == ISOTP_FC_WT:
# start timer to wait for next FC frame
self.tx_state = ISOTP_WAIT_FC
self.tx_timer.set_timeout(self.fc_timeout, self._tx_timer_handler)
elif isotp_fc == ISOTP_FC_OVFLW:
# overflow in receiver side
self.tx_state = ISOTP_IDLE
self.tx_exception = "Overflow happened at the receiver side"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
else:
self.tx_state = ISOTP_IDLE
self.tx_exception = "Unknown FC frame type"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
return 0
def _recv_sf(self, data):
"""Process a received 'Single Frame' frame"""
self.rx_timer.cancel()
if self.rx_state != ISOTP_IDLE:
warning("RX state was reset because single frame was received")
self.rx_state = ISOTP_IDLE
length = six.indexbytes(data, 0) & 0xf
if len(data) - 1 < length:
return 1
msg = data[1:1 + length]
self.rx_queue.put(msg)
for cb in self.rx_callbacks:
cb(msg)
self.call_release()
return 0
def _recv_ff(self, data):
"""Process a received 'First Frame' frame"""
self.rx_timer.cancel()
if self.rx_state != ISOTP_IDLE:
warning("RX state was reset because first frame was received")
self.rx_state = ISOTP_IDLE
if len(data) < 7:
return 1
self.rx_ll_dl = len(data)
# get the FF_DL
self.rx_len = (six.indexbytes(data, 0) & 0x0f) * 256 + six.indexbytes(
data, 1)
ff_pci_sz = 2
# Check for FF_DL escape sequence supporting 32 bit PDU length
if self.rx_len == 0:
# FF_DL = 0 => get real length from next 4 bytes
self.rx_len = six.indexbytes(data, 2) << 24
self.rx_len += six.indexbytes(data, 3) << 16
self.rx_len += six.indexbytes(data, 4) << 8
self.rx_len += six.indexbytes(data, 5)
ff_pci_sz = 6
# copy the first received data bytes
data_bytes = data[ff_pci_sz:]
self.rx_idx = len(data_bytes)
self.rx_buf = data_bytes
# initial setup for this pdu reception
self.rx_sn = 1
self.rx_state = ISOTP_WAIT_DATA
# no creation of flow control frames
if not self.listen_only:
# send our first FC frame
load = self.ea_hdr
load += struct.pack("BBB", N_PCI_FC, self.rxfc_bs, self.rxfc_stmin)
self.can_send(load)
# wait for a CF
self.rx_bs = 0
self.rx_timer.set_timeout(self.cf_timeout, self._rx_timer_handler)
return 0
def _recv_cf(self, data):
"""Process a received 'Consecutive Frame' frame"""
if self.rx_state != ISOTP_WAIT_DATA:
return 0
self.rx_timer.cancel()
# CFs are never longer than the FF
if len(data) > self.rx_ll_dl:
return 1
# CFs have usually the LL_DL length
if len(data) < self.rx_ll_dl:
# this is only allowed for the last CF
if self.rx_len - self.rx_idx > self.rx_ll_dl:
warning("Received a CF with insuffifient length")
return 1
if six.indexbytes(data, 0) & 0x0f != self.rx_sn:
# Wrong sequence number
warning("RX state was reset because wrong sequence number was "
"received")
self.rx_state = ISOTP_IDLE
return 1
self.rx_sn = (self.rx_sn + 1) % 16
self.rx_buf += data[1:]
self.rx_idx = len(self.rx_buf)
if self.rx_idx >= self.rx_len:
# we are done
self.rx_buf = self.rx_buf[0:self.rx_len]
self.rx_state = ISOTP_IDLE
self.rx_queue.put(self.rx_buf)
for cb in self.rx_callbacks:
cb(self.rx_buf)
self.call_release()
self.rx_buf = None
return 0
# perform blocksize handling, if enabled
if self.rxfc_bs != 0:
self.rx_bs += 1
# check if we reached the end of the block
if self.rx_bs >= self.rxfc_bs and not self.listen_only:
# send our FC frame
load = self.ea_hdr
load += struct.pack("BBB", N_PCI_FC, self.rxfc_bs,
self.rxfc_stmin)
self.can_send(load)
# wait for another CF
self.rx_timer.set_timeout(self.cf_timeout, self._rx_timer_handler)
return 0
def begin_send(self, x):
"""Begins sending an ISOTP message. This method does not block."""
with self.tx_mutex:
if self.tx_state != ISOTP_IDLE:
raise Scapy_Exception("Socket is already sending, retry later")
self.tx_done.clear()
self.tx_exception = None
self.tx_state = ISOTP_SENDING
length = len(x)
if length > ISOTP_MAX_DLEN_2015:
raise Scapy_Exception("Too much data for ISOTP message")
if len(self.ea_hdr) + length <= 7:
# send a single frame
data = self.ea_hdr
data += struct.pack("B", length)
data += x
self.tx_state = ISOTP_IDLE
self.can_send(data)
self.tx_done.set()
for cb in self.tx_callbacks:
cb()
return
# send the first frame
data = self.ea_hdr
if length > ISOTP_MAX_DLEN:
data += struct.pack(">HI", 0x1000, length)
else:
data += struct.pack(">H", 0x1000 | length)
load = x[0:8 - len(data)]
data += load
self.can_send(data)
self.tx_buf = x
self.tx_sn = 1
self.tx_bs = 0
self.tx_idx = len(load)
self.tx_state = ISOTP_WAIT_FIRST_FC
self.tx_timer.set_timeout(self.fc_timeout, self._tx_timer_handler)
def send(self, p):
"""Send an ISOTP frame and block until the message is sent or an error
happens."""
with self.send_mutex:
self.begin_send(p)
# Wait until the tx callback is called
self.tx_done.wait()
if self.tx_exception is not None:
raise Scapy_Exception(self.tx_exception)
return
def recv(self, timeout=None):
"""Receive an ISOTP frame, blocking if none is available in the buffer
for at most 'timeout' seconds."""
try:
return self.rx_queue.get(timeout is None or timeout > 0, timeout)
except queue.Empty:
return None
def check_recv(self):
"""Implementation for SelectableObject"""
return not self.rx_queue.empty()
if six.PY3 and LINUX:
from scapy.arch.linux import get_last_packet_timestamp, SIOCGIFINDEX
"""ISOTPNativeSocket definitions:"""
CAN_ISOTP = 6 # ISO 15765-2 Transport Protocol
SOL_CAN_BASE = 100 # from can.h
SOL_CAN_ISOTP = SOL_CAN_BASE + CAN_ISOTP
# /* for socket options affecting the socket (not the global system) */
CAN_ISOTP_OPTS = 1 # /* pass struct can_isotp_options */
CAN_ISOTP_RECV_FC = 2 # /* pass struct can_isotp_fc_options */
# /* sockopts to force stmin timer values for protocol regression tests */
CAN_ISOTP_TX_STMIN = 3 # /* pass __u32 value in nano secs */
CAN_ISOTP_RX_STMIN = 4 # /* pass __u32 value in nano secs */
CAN_ISOTP_LL_OPTS = 5 # /* pass struct can_isotp_ll_options */
CAN_ISOTP_LISTEN_MODE = 0x001 # /* listen only (do not send FC) */
CAN_ISOTP_EXTEND_ADDR = 0x002 # /* enable extended addressing */
CAN_ISOTP_TX_PADDING = 0x004 # /* enable CAN frame padding tx path */
CAN_ISOTP_RX_PADDING = 0x008 # /* enable CAN frame padding rx path */
CAN_ISOTP_CHK_PAD_LEN = 0x010 # /* check received CAN frame padding */
CAN_ISOTP_CHK_PAD_DATA = 0x020 # /* check received CAN frame padding */
CAN_ISOTP_HALF_DUPLEX = 0x040 # /* half duplex error state handling */
CAN_ISOTP_FORCE_TXSTMIN = 0x080 # /* ignore stmin from received FC */
CAN_ISOTP_FORCE_RXSTMIN = 0x100 # /* ignore CFs depending on rx stmin */
CAN_ISOTP_RX_EXT_ADDR = 0x200 # /* different rx extended addressing */
# /* default values */
CAN_ISOTP_DEFAULT_FLAGS = 0
CAN_ISOTP_DEFAULT_EXT_ADDRESS = 0x00
CAN_ISOTP_DEFAULT_PAD_CONTENT = 0xCC # /* prevent bit-stuffing */
CAN_ISOTP_DEFAULT_FRAME_TXTIME = 0
CAN_ISOTP_DEFAULT_RECV_BS = 0
CAN_ISOTP_DEFAULT_RECV_STMIN = 0x00
CAN_ISOTP_DEFAULT_RECV_WFTMAX = 0
CAN_ISOTP_DEFAULT_LL_MTU = CAN_MTU
CAN_ISOTP_DEFAULT_LL_TX_DL = CAN_MAX_DLEN
CAN_ISOTP_DEFAULT_LL_TX_FLAGS = 0
class SOCKADDR(ctypes.Structure):
# See /usr/include/i386-linux-gnu/bits/socket.h for original struct
_fields_ = [("sa_family", ctypes.c_uint16),
("sa_data", ctypes.c_char * 14)]
class TP(ctypes.Structure):
# This struct is only used within the SOCKADDR_CAN struct
_fields_ = [("rx_id", ctypes.c_uint32),
("tx_id", ctypes.c_uint32)]
class ADDR_INFO(ctypes.Union):
# This struct is only used within the SOCKADDR_CAN struct
# This union is to future proof for future can address information
_fields_ = [("tp", TP)]
class SOCKADDR_CAN(ctypes.Structure):
# See /usr/include/linux/can.h for original struct
_fields_ = [("can_family", ctypes.c_uint16),
("can_ifindex", ctypes.c_int),
("can_addr", ADDR_INFO)]
class IFREQ(ctypes.Structure):
# The two fields in this struct were originally unions.
# See /usr/include/net/if.h for original struct
_fields_ = [("ifr_name", ctypes.c_char * 16),
("ifr_ifindex", ctypes.c_int)]
class ISOTPNativeSocket(SuperSocket):
desc = "read/write packets at a given CAN interface using CAN_ISOTP " \
"socket "
can_isotp_options_fmt = "@2I4B"
can_isotp_fc_options_fmt = "@3B"
can_isotp_ll_options_fmt = "@3B"
sockaddr_can_fmt = "@H3I"
def __build_can_isotp_options(
self,
flags=CAN_ISOTP_DEFAULT_FLAGS,
frame_txtime=0,
ext_address=CAN_ISOTP_DEFAULT_EXT_ADDRESS,
txpad_content=0,
rxpad_content=0,
rx_ext_address=CAN_ISOTP_DEFAULT_EXT_ADDRESS):
return struct.pack(self.can_isotp_options_fmt,
flags,
frame_txtime,
ext_address,
txpad_content,
rxpad_content,
rx_ext_address)
# == Must use native not standard types for packing ==
# struct can_isotp_options {
# __u32 flags; /* set flags for isotp behaviour. */
# /* __u32 value : flags see below */
#
# __u32 frame_txtime; /* frame transmission time (N_As/N_Ar) */
# /* __u32 value : time in nano secs */
#
# __u8 ext_address; /* set address for extended addressing */
# /* __u8 value : extended address */
#
# __u8 txpad_content; /* set content of padding byte (tx) */
# /* __u8 value : content on tx path */
#
# __u8 rxpad_content; /* set content of padding byte (rx) */
# /* __u8 value : content on rx path */
#
# __u8 rx_ext_address; /* set address for extended addressing */
# /* __u8 value : extended address (rx) */
# };
def __build_can_isotp_fc_options(self,
bs=CAN_ISOTP_DEFAULT_RECV_BS,
stmin=CAN_ISOTP_DEFAULT_RECV_STMIN,
wftmax=CAN_ISOTP_DEFAULT_RECV_WFTMAX):
return struct.pack(self.can_isotp_fc_options_fmt,
bs,
stmin,
wftmax)
# == Must use native not standard types for packing ==
# struct can_isotp_fc_options {
#
# __u8 bs; /* blocksize provided in FC frame */
# /* __u8 value : blocksize. 0 = off */
#
# __u8 stmin; /* separation time provided in FC frame */
# /* __u8 value : */
# /* 0x00 - 0x7F : 0 - 127 ms */
# /* 0x80 - 0xF0 : reserved */
# /* 0xF1 - 0xF9 : 100 us - 900 us */
# /* 0xFA - 0xFF : reserved */
#
# __u8 wftmax; /* max. number of wait frame transmiss. */
# /* __u8 value : 0 = omit FC N_PDU WT */
# };
def __build_can_isotp_ll_options(self,
mtu=CAN_ISOTP_DEFAULT_LL_MTU,
tx_dl=CAN_ISOTP_DEFAULT_LL_TX_DL,
tx_flags=CAN_ISOTP_DEFAULT_LL_TX_FLAGS
):
return struct.pack(self.can_isotp_ll_options_fmt,
mtu,
tx_dl,
tx_flags)
# == Must use native not standard types for packing ==
# struct can_isotp_ll_options {
#
# __u8 mtu; /* generated & accepted CAN frame type */
# /* __u8 value : */
# /* CAN_MTU (16) -> standard CAN 2.0 */
# /* CANFD_MTU (72) -> CAN FD frame */
#
# __u8 tx_dl; /* tx link layer data length in bytes */
# /* (configured maximum payload length) */
# /* __u8 value : 8,12,16,20,24,32,48,64 */
# /* => rx path supports all LL_DL values */
#
# __u8 tx_flags; /* set into struct canfd_frame.flags */
# /* at frame creation: e.g. CANFD_BRS */
# /* Obsolete when the BRS flag is fixed */
# /* by the CAN netdriver configuration */
# };
def __get_sock_ifreq(self, sock, iface):
socket_id = ctypes.c_int(sock.fileno())
ifr = IFREQ()
ifr.ifr_name = iface.encode('ascii')
ret = LIBC.ioctl(socket_id, SIOCGIFINDEX, ctypes.byref(ifr))
if ret < 0:
m = u'Failure while getting "{}" interface index.'.format(
iface)
raise Scapy_Exception(m)
return ifr
def __bind_socket(self, sock, iface, sid, did):
socket_id = ctypes.c_int(sock.fileno())
ifr = self.__get_sock_ifreq(sock, iface)
if sid > 0x7ff:
sid = sid | socket.CAN_EFF_FLAG
if did > 0x7ff:
did = did | socket.CAN_EFF_FLAG
# select the CAN interface and bind the socket to it
addr = SOCKADDR_CAN(ctypes.c_uint16(socket.PF_CAN),
ifr.ifr_ifindex,
ADDR_INFO(TP(ctypes.c_uint32(did),
ctypes.c_uint32(sid))))
error = LIBC.bind(socket_id, ctypes.byref(addr),
ctypes.sizeof(addr))
if error < 0:
warning("Couldn't bind socket")
def __set_option_flags(self, sock, extended_addr=None,
extended_rx_addr=None,
listen_only=False,
padding=False,
transmit_time=100):
option_flags = CAN_ISOTP_DEFAULT_FLAGS
if extended_addr is not None:
option_flags = option_flags | CAN_ISOTP_EXTEND_ADDR
else:
extended_addr = CAN_ISOTP_DEFAULT_EXT_ADDRESS
if extended_rx_addr is not None:
option_flags = option_flags | CAN_ISOTP_RX_EXT_ADDR
else:
extended_rx_addr = CAN_ISOTP_DEFAULT_EXT_ADDRESS
if listen_only:
option_flags = option_flags | CAN_ISOTP_LISTEN_MODE
if padding:
option_flags = option_flags | CAN_ISOTP_TX_PADDING \
| CAN_ISOTP_RX_PADDING
sock.setsockopt(SOL_CAN_ISOTP,
CAN_ISOTP_OPTS,
self.__build_can_isotp_options(
frame_txtime=transmit_time,
flags=option_flags,
ext_address=extended_addr,
rx_ext_address=extended_rx_addr))
def __init__(self,
iface=None,
sid=0,
did=0,
extended_addr=None,
extended_rx_addr=None,
listen_only=False,
padding=False,
transmit_time=100,
basecls=ISOTP):
self.iface = conf.contribs['NativeCANSocket']['iface'] \
if iface is None else iface
self.can_socket = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM,
CAN_ISOTP)
self.__set_option_flags(self.can_socket,
extended_addr,
extended_rx_addr,
listen_only,
padding,
transmit_time)
self.src = sid
self.dst = did
self.exsrc = extended_addr
self.exdst = extended_rx_addr
self.can_socket.setsockopt(SOL_CAN_ISOTP,
CAN_ISOTP_RECV_FC,
self.__build_can_isotp_fc_options())
self.can_socket.setsockopt(SOL_CAN_ISOTP,
CAN_ISOTP_LL_OPTS,
self.__build_can_isotp_ll_options())
self.__bind_socket(self.can_socket, iface, sid, did)
self.ins = self.can_socket
self.outs = self.can_socket
if basecls is None:
warning('Provide a basecls ')
self.basecls = basecls
def recv_raw(self, x=0xffff):
"""
Receives a packet, then returns a tuple containing
(cls, pkt_data, time)
""" # noqa: E501
try:
pkt = self.can_socket.recvfrom(x)[0]
except BlockingIOError: # noqa: F821
warning('Captured no data, socket in non-blocking mode.')
return None
except socket.timeout:
warning('Captured no data, socket read timed out.')
return None
except OSError:
# something bad happened (e.g. the interface went down)
warning("Captured no data.")
return None
ts = get_last_packet_timestamp(self.can_socket)
return self.basecls, pkt, ts
def recv(self, x=0xffff):
msg = SuperSocket.recv(self, x)
if hasattr(msg, "src"):
msg.src = self.src
if hasattr(msg, "dst"):
msg.dst = self.dst
if hasattr(msg, "exsrc"):
msg.exsrc = self.exsrc
if hasattr(msg, "exdst"):
msg.exdst = self.exdst
return msg
__all__.append("ISOTPNativeSocket")
if USE_CAN_ISOTP_KERNEL_MODULE:
ISOTPSocket = ISOTPNativeSocket
# ###################################################################
# #################### ISOTPSCAN ####################################
# ###################################################################
def send_multiple_ext(sock, ext_id, packet, number_of_packets):
""" Send multiple packets with extended addresses at once
Args:
sock: socket for can interface
ext_id: extended id. First id to send.
packet: packet to send
number_of_packets: number of packets send
This function is used for scanning with extended addresses.
It sends multiple packets at once. The number of packets
is defined in the number_of_packets variable.
It only iterates the extended ID, NOT the actual ID of the packet.
This method is used in extended scan function.
"""
end_id = min(ext_id + number_of_packets, 255)
for i in range(ext_id, end_id + 1):
packet.extended_address = i
sock.send(packet)
def get_isotp_packet(identifier=0x0, extended=False):
""" Craft ISO TP packet
Args:
identifier: identifier of crafted packet
extended: boolean if packet uses extended address
"""
if extended:
pkt = ISOTPHeaderEA() / ISOTP_FF()
pkt.extended_address = 0
pkt.data = b'\x00\x00\x00\x00\x00'
else:
pkt = ISOTPHeader() / ISOTP_FF()
pkt.data = b'\x00\x00\x00\x00\x00\x00'
pkt.identifier = identifier
pkt.message_size = 100
return pkt
def filter_periodic_packets(packet_dict, verbose=False):
""" Filter for periodic packets
Args:
packet_dict: Dictionary with Send-to-ID as key and a tuple
(received packet, Recv_ID)
verbose: Displays further information
ISOTP-Filter for periodic packets (same ID, always same timegap)
Deletes periodic packets in packet_dict
"""
filter_dict = {}
for key, value in packet_dict.items():
pkt = value[0]
idn = value[1]
if idn not in filter_dict:
filter_dict[idn] = ([key], [pkt])
else:
key_lst, pkt_lst = filter_dict[idn]
filter_dict[idn] = (key_lst + [key], pkt_lst + [pkt])
for idn in filter_dict:
key_lst = filter_dict[idn][0]
pkt_lst = filter_dict[idn][1]
if len(pkt_lst) < 3:
continue
tg = [p1.time - p2.time for p1, p2 in zip(pkt_lst[1:], pkt_lst[:-1])]
if all(abs(t1 - t2) < 0.001 for t1, t2 in zip(tg[1:], tg[:-1])):
if verbose:
print("[i] Identifier 0x%03x seems to be periodic. "
"Filtered.")
for k in key_lst:
del packet_dict[k]
def get_isotp_fc(id_value, id_list, noise_ids, extended, packet,
verbose=False):
"""Callback for sniff function when packet received
Args:
id_value: packet id of send packet
id_list: list of received IDs
noise_ids: list of packet IDs which will not be considered when
received during scan
extended: boolean if extended scan
packet: received packet
verbose: displays information during scan
If received packet is a FlowControl
and not in noise_ids
append it to id_list
"""
if packet.flags:
return
if noise_ids is not None and packet.identifier in noise_ids:
return
try:
index = 1 if extended else 0
isotp_pci = orb(packet.data[index]) >> 4
isotp_fc = orb(packet.data[index]) & 0x0f
if isotp_pci == 3 and 0 <= isotp_fc <= 2:
if verbose:
print("[+] Found flow-control frame from identifier 0x%03x"
" when testing identifier 0x%03x" %
(packet.identifier, id_value))
if isinstance(id_list, dict):
id_list[id_value] = (packet, packet.identifier)
elif isinstance(id_list, list):
id_list.append(id_value)
else:
raise TypeError("Unknown type of id_list")
else:
noise_ids.append(packet.identifier)
except Exception as e:
print("[!] Unknown message Exception: %s on packet: %s" %
(e, repr(packet)))
def scan(sock, scan_range=range(0x800), noise_ids=None, sniff_time=0.1,
verbose=False):
"""Scan and return dictionary of detections
Args:
sock: socket for can interface
scan_range: hexadecimal range of IDs to scan.
Default is 0x0 - 0x7ff
noise_ids: list of packet IDs which will not be considered when
received during scan
sniff_time: time the scan waits for isotp flow control responses
after sending a first frame
verbose: displays information during scan
ISOTP-Scan - NO extended IDs
found_packets = Dictionary with Send-to-ID as
key and a tuple (received packet, Recv_ID)
"""
return_values = dict()
for value in scan_range:
sock.sniff(prn=lambda pkt: get_isotp_fc(value, return_values,
noise_ids, False, pkt,
verbose),
timeout=sniff_time,
started_callback=lambda: sock.send(
get_isotp_packet(value)))
return return_values
def scan_extended(sock, scan_range=range(0x800), scan_block_size=100,
noise_ids=None, sniff_time=0.1, verbose=False):
"""Scan with extended addresses and return dictionary of detections
Args:
sock: socket for can interface
scan_range: hexadecimal range of IDs to scan.
Default is 0x0 - 0x7ff
scan_block_size: count of packets send at once
noise_ids: list of packet IDs which will not be considered when
received during scan
sniff_time: time the scan waits for isotp flow control responses
after sending a first frame
verbose: displays information during scan
If an answer-packet found -> slow scan with
single packages with extended ID 0 - 255
found_packets = Dictionary with Send-to-ID
as key and a tuple (received packet, Recv_ID)
"""
return_values = dict()
scan_block_size = scan_block_size or 1
for value in scan_range:
pkt = get_isotp_packet(value, extended=True)
id_list = []
for extended_id in range(0, 256, scan_block_size):
sock.sniff(prn=lambda p: get_isotp_fc(extended_id, id_list,
noise_ids, True, p,
verbose),
timeout=sniff_time * 3,
started_callback=send_multiple_ext(sock, extended_id,
pkt,
scan_block_size))
# sleep to prevent flooding
time.sleep(1)
# remove duplicate IDs
id_list = list(set(id_list))
for extended_id in id_list:
for ext_id in range(extended_id, min(extended_id +
scan_block_size, 256)):
pkt.extended_address = ext_id
full_id = (value << 8) + ext_id
sock.sniff(prn=lambda pkt: get_isotp_fc(full_id,
return_values,
noise_ids, True,
pkt, verbose),
timeout=sniff_time,
started_callback=lambda: sock.send(pkt))
return return_values
def ISOTPScan(sock, scan_range=range(0x7ff + 1), extended_addressing=False,
noise_listen_time=2,
sniff_time=0.1,
output_format=None,
can_interface="can0",
verbose=False):
"""Scan for ISOTP Sockets on a bus and return findings
Args:
sock: CANSocket object to communicate with the bus under scan
scan_range: hexadecimal range of CAN-Identifiers to scan.
Default is 0x0 - 0x7ff
extended_addressing: scan with ISOTP extended addressing
noise_listen_time: seconds to listen for default
communication on the bus
sniff_time: time the scan waits for isotp flow control responses
after sending a first frame
output_format: defines the format of the returned
results (text, code or sockets). Provide a string
e.g. "text". Default is "socket".
can_interface: interface used to create the returned code/sockets
verbose: displays information during scan
Scan for ISOTP Sockets in the defined range and returns found sockets
in a specified format. The format can be:
- text: human readable output
- code: python code for copy&paste
- sockets: if output format is not specified, ISOTPSockets will be
created and returned in a list
"""
if verbose:
print("Filtering background noise...")
# Send dummy packet. In most cases, this triggers activity on the bus.
dummy_pkt = CAN(identifier=0x123,
data=b'\xaa\xbb\xcc\xdd\xee\xff\xaa\xbb')
background_pkts = sock.sniff(timeout=noise_listen_time,
started_callback=lambda:
sock.send(dummy_pkt))
noise_ids = list(set(pkt.identifier for pkt in background_pkts))
if extended_addressing:
found_packets = scan_extended(sock, scan_range, noise_ids=noise_ids,
sniff_time=sniff_time, verbose=verbose)
else:
found_packets = scan(sock, scan_range, noise_ids=noise_ids,
sniff_time=sniff_time, verbose=verbose)
filter_periodic_packets(found_packets, verbose)
if output_format == "text":
return generate_text_output(found_packets)
if output_format == "code":
return generate_code_output(found_packets, can_interface)
return generate_isotp_list(found_packets, can_interface)
def generate_text_output(found_packets):
"""Generate a human readable output from the result of the `scan` or
the `scan_extended` function.
Args:
found_packets: result of the `scan` or `scan_extended` function
"""
if not found_packets:
return "No packets found."
text = "\nFound %s ISOTP-FlowControl Packet(s):" % len(found_packets)
for pack in found_packets:
extended_id = pack > 0x7ff
if extended_id:
send_id = pack // 256
send_ext = pack - (send_id * 256)
ext_id = hex(orb(found_packets[pack][0].data[0]))
text += "\nSend to ID: %s" \
"\nSend to extended ID: %s" \
"\nReceived ID: %s" \
"\nReceived extended ID: %s" \
"\nMessage: %s" % \
(hex(send_id), hex(send_ext),
hex(found_packets[pack][0].identifier), ext_id,
repr(found_packets[pack][0]))
else:
text += "\nSend to ID: %s" \
"\nReceived ID: %s" \
"\nMessage: %s" % \
(hex(pack),
hex(found_packets[pack][0].identifier),
repr(found_packets[pack][0]))
padding = found_packets[pack][0].length == 8
if padding:
text += "\nPadding enabled"
else:
text += "\nNo Padding"
text += "\n"
return text
def generate_code_output(found_packets, can_interface):
"""Generate a copy&past-able output from the result of the `scan` or
the `scan_extended` function.
Args:
found_packets: result of the `scan` or `scan_extended` function
can_interface: description string for a CAN interface to be
used for the creation of the output.
"""
result = ""
if not found_packets:
return result
header = "\n\nimport can\n" \
"conf.contribs['CANSocket'] = {'use-python-can': %s}\n" \
"load_contrib('cansocket')\n" \
"load_contrib('isotp')\n\n" % PYTHON_CAN
for pack in found_packets:
extended_id = pack > 0x7ff
if extended_id:
send_id = pack // 256
send_ext = pack - (send_id * 256)
ext_id = orb(found_packets[pack][0].data[0])
result += "ISOTPSocket(%s, sid=%s, did=%s, padding=%s, " \
"extended_addr=%s, extended_rx_addr=%s, " \
"basecls=ISOTP)\n" % \
(can_interface, hex(send_id),
hex(int(found_packets[pack][0].identifier)),
found_packets[pack][0].length == 8,
hex(send_ext),
hex(ext_id))
else:
result += "ISOTPSocket(%s, sid=%s, did=%s, padding=%s, " \
"basecls=ISOTP)\n" % \
(can_interface, hex(pack),
hex(int(found_packets[pack][0].identifier)),
found_packets[pack][0].length == 8)
return header + result
def generate_isotp_list(found_packets, can_interface):
"""Generate a list of ISOTPSocket objects from the result of the `scan` or
the `scan_extended` function.
Args:
found_packets: result of the `scan` or `scan_extended` function
can_interface: description string for a CAN interface to be
used for the creation of the output.
"""
socket_list = []
for pack in found_packets:
extended_id = pack > 0x7ff
pkt = found_packets[pack][0]
dest_id = pkt.identifier
pad = True if pkt.length == 8 else False
if extended_id:
source_id = pack >> 8
source_ext = int(pack - (source_id * 256))
dest_ext = orb(pkt.data[0])
socket_list.append(ISOTPSocket(can_interface, sid=source_id,
extended_addr=source_ext,
did=dest_id,
extended_rx_addr=dest_ext,
padding=pad,
basecls=ISOTP))
else:
source_id = pack
socket_list.append(ISOTPSocket(can_interface, sid=source_id,
did=dest_id, padding=pad,
basecls=ISOTP))
return socket_list
|
mtury/scapy
|
scapy/contrib/isotp.py
|
Python
|
gpl-2.0
| 75,260 | 0 |
"""//***********************************************************************
* Exp6_LineFollowing_IRSensors -- RedBot Experiment 6
*
* This code reads the three line following sensors on A3, A6, and A7
* and prints them out to the Serial Monitor. Upload this example to your
* RedBot and open up the Serial Monitor by clicking the magnifying glass
* in the upper-right hand corner.
*
* This sketch was written by SparkFun Electronics,with lots of help from
* the Arduino community. This code is completely free for any use.
*
* 8 Oct 2013 M. Hord
* Revised, 31 Oct 2014 B. Huang
* Revices, 2 Oct 2015 L Mathews
***********************************************************************/"""
import sys
import signal
from pymata_aio.pymata3 import PyMata3
from library.redbot import RedBotSensor
WIFLY_IP_ADDRESS = None # Leave set as None if not using WiFly
WIFLY_IP_ADDRESS = "10.0.1.18" # If using a WiFly on the RedBot, set the ip address here.
if WIFLY_IP_ADDRESS:
board = PyMata3(ip_address=WIFLY_IP_ADDRESS)
else:
# Use a USB cable to RedBot or an XBee connection instead of WiFly.
COM_PORT = None # Use None for automatic com port detection, or set if needed i.e. "COM7"
board = PyMata3(com_port=COM_PORT)
LEFT_LINE_FOLLOWER = 3 # pin number assignments for each IR sensor
CENTRE_LINE_FOLLOWER = 6
RIGHT_LINE_FOLLOWER = 7
IR_sensor_1 = RedBotSensor(board, LEFT_LINE_FOLLOWER)
IR_sensor_2 = RedBotSensor(board, CENTRE_LINE_FOLLOWER)
IR_sensor_3 = RedBotSensor(board, RIGHT_LINE_FOLLOWER)
def signal_handler(sig, frame):
"""Helper method to shutdown the RedBot if Ctrl-c is pressed"""
print('\nYou pressed Ctrl+C')
if board is not None:
board.send_reset()
board.shutdown()
sys.exit(0)
def setup():
signal.signal(signal.SIGINT, signal_handler)
print("Welcome to Experiment 6!")
print("------------------------")
def loop():
board.sleep(0.1)
print("IR Sensor Readings: {}, {}, {}".format(IR_sensor_1.read(), IR_sensor_2.read(), IR_sensor_3.read()))
if __name__ == "__main__":
setup()
while True:
loop()
|
Rosebotics/pymata-aio
|
examples/sparkfun_redbot/sparkfun_experiments/Exp6_LineFollowing_IRSensors.py
|
Python
|
gpl-3.0
| 2,131 | 0.001877 |
from cavicapture import CaviCapture
from process import CaviProcess
import sys, os, getopt
import time, datetime
import numpy as np
import matplotlib.pyplot as plt
def main():
config_path = './config.ini' # default
try:
opts, args = getopt.getopt(sys.argv[1:], "c", ["config="])
except getopt.GetoptError:
print("Argument error")
sys.exit(2)
for opt, arg in opts:
if opt in ("--config"):
config_path = arg
calibrator = CaviCalibrate(config_path)
calibrator.init_calibration()
class CaviCalibrate:
def __init__(self, config_path):
self.output_dir = "./calibration"
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.output_dir = self.output_dir + "/" + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.cavi_capture = CaviCapture(config_path)
self.cavi_capture.log_file = self.output_dir + "/capture.log.txt"
self.cavi_capture.get_ini_config()
self.cavi_capture.setup_gpio()
self.cavi_capture.setup_camera()
self.cavi_process = CaviProcess(self.output_dir)
self.cavi_process.log_file = self.output_dir + "/process.log.txt"
def init_calibration(self):
files = []
self.cavi_capture.lights(True)
time.sleep(3) # Let lights settle
files.append(self.capture_image(self.output_dir + "/" + "image_1.png"))
files.append(self.capture_image(self.output_dir + "/" + "image_2.png"))
files.append(self.capture_image(self.output_dir + "/" + "image_3.png"))
files.append(self.capture_image(self.output_dir + "/" + "image_4.png"))
self.cavi_capture.lights(False)
self.process_files(files)
def process_files(self, files):
file_1 = files[0]
file_2 = files[1]
file_3 = files[2]
file_4 = files[3]
# Get the image difference and summary using 2 images
# diff = self.cavi_process.subtract_images(file_1, file_2)
# self.cavi_process.write_image(self.output_dir + "/diff.png", diff)
# self.summarise(diff, self.output_dir + "/noise_hist.png")
# Image difference first two and last two
img_group_1_diff = self.cavi_process.subtract_images(file_1, file_2)
self.cavi_process.write_image(self.output_dir + "/image_group_1_diff.png", img_group_1_diff)
self.summarise(img_group_1_diff, self.output_dir + "/image_group_1_diff_hist.png")
img_group_2_diff = self.cavi_process.subtract_images(file_3, file_4)
self.cavi_process.write_image(self.output_dir + "/image_group_2_diff.png", img_group_2_diff)
self.summarise(img_group_2_diff, self.output_dir + "/image_group_2_diff_hist.png")
groups_min = np.minimum(img_group_1_diff, img_group_2_diff)
self.cavi_process.write_image(self.output_dir + "/groups_min.png", groups_min)
self.summarise(groups_min, self.output_dir + "/groups_min_hist.png")
# diff = self.cavi_process.subtract_images(self.output_dir + "/image_1_average.png", self.output_dir + "/image_2_average.png")
# self.cavi_process.write_image(self.output_dir + "/image_average_diff.png", diff)
# self.summarise(diff, self.output_dir + "/image_average_noise_hist.png")
def summarise(self, img, hist_path):
average_pixel = np.average(img[img>0])
max_pixel = np.max(img[img>0])
min_pixel = np.min(img[img>0])
total_area = len(img[img>0])
self.cavi_process.log("Noise max: " + str(max_pixel))
self.cavi_process.log("Noise min: " + str(min_pixel))
self.cavi_process.log("Noise average: " + str(average_pixel))
self.cavi_process.log("Noise area: " + str(total_area))
plt.hist(img.ravel(),max_pixel,[min_pixel,max_pixel])
plt.savefig(hist_path)
def gen_file_path(self):
return self.output_dir + "/" + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + ".png"
def capture_image(self, file_path):
self.cavi_capture.camera.capture(file_path, 'png')
return file_path
if __name__ == '__main__':
main()
|
OpenSourceOV/cavicapture
|
calibrate.py
|
Python
|
gpl-3.0
| 3,982 | 0.010296 |
# -*- coding:utf-8 -*-
import re
# Обработка телефонных номеров
phonePattern = re.compile(r'^(\d{3})\D*(\d{3})\D*(\d{4})\D*(\d*)$')
print phonePattern.search('80055512121234').groups()
# ('800', '555', '1212', '1234')
print phonePattern.search('800.555.1212 x1234').groups()
# ('800', '555', '1212', '1234')
print phonePattern.search('800-555-1212').groups()
# ('800', '555', '1212', '')
print phonePattern.search('(800)5551212 x1234')
|
janusnic/21v-python
|
unit_13/re6.py
|
Python
|
mit
| 469 | 0.013544 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transactions in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
from decimal import Decimal
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
wait_until,
)
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.debug("Send 5 transactions from node2 (to its own address)")
tx_creation_time_lower = int(time.time())
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
tx_creation_time_higher = int(time.time())
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)['time']
assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower)
assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time)
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"], timeout=1)
wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug('Verify time is loaded correctly')
assert_equal(tx_creation_time, self.nodes[0].getmempoolentry(txid=last_txid)['time'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: self.nodes[1].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
|
tjps/bitcoin
|
test/functional/mempool_persist.py
|
Python
|
mit
| 6,912 | 0.002604 |
#!/usr/bin/env python3
import os
print("root prints out directories only from what you specified")
print("dirs prints out sub-directories from root")
print("files prints out all files from root and directories")
print("*" * 20)
'''
for root, dirs, files in os.walk("/var/log"):
print('Root: '.format(root))
print('Dirs: '.format(dirs))
print('Files: '.format(files))
'''
for root, dirs, files in os.walk("/var/log"):
print(root)
print(dirs)
print(files)
|
talapus/Ophidian
|
Academia/Filesystem/demo_os_walk.py
|
Python
|
bsd-3-clause
| 483 | 0.00207 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2020 Didotech S.r.l. (<http://www.didotech.com/>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase Order Lines With Combined Discounts",
"author": "Didotech.com",
"version": "1.0.0",
"category": "Generic Modules/Sales & Purchases",
'description': """ """,
"depends": [
"stock",
'product',
"purchase",
"purchase_discount",
],
"data": [
"views/purchase_discount_view.xml",
],
"active": False,
"installable": True
}
|
iw3hxn/LibrERP
|
purchase_discount_combined/__openerp__.py
|
Python
|
agpl-3.0
| 1,361 | 0 |
# -*- coding: utf-8 -*-
###############################################################################
# License, author and contributors information in: #
# __manifest__.py file at the root folder of this module. #
###############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from itertools import groupby
from operator import itemgetter
from collections import defaultdict
class WizardValuationStockInventory(models.TransientModel):
_name = 'wizard.valuation.stock.inventory'
_description = 'Wizard that opens the stock Inventory by Location'
location_id = fields.Many2one('stock.location', string='Location', required=True)
product_categ_id = fields.Many2one('product.category', string='Category')
product_sub_categ_id = fields.Many2one('product.category', string='Sub Category')
line_ids = fields.One2many('wizard.valuation.stock.inventory.line', 'wizard_id', required=True, ondelete='cascade')
@api.multi
def print_pdf_stock_inventory(self, data):
line_ids_all_categ = []
line_ids_filterd_categ = []
line_ids = []
# Unlink All one2many Line Ids from same wizard
for wizard_id in self.env['wizard.valuation.stock.inventory.line'].search([('wizard_id', '=', self.id)]):
if wizard_id.wizard_id.id == self.id:
self.write({'line_ids': [(3, wizard_id.id)]})
child_loc_ids = []
if self.location_id:
child_loc_ids = self.env['stock.location'].sudo().search([('location_id', 'child_of', self.location_id.id)]).mapped('id')
# Creating Temp dictionry for Product List
if data["product_sub_categ_id"]:
for resource in self.env['stock.quant'].search(
['|', ('location_id', '=', self.location_id.id), ('location_id', 'in', child_loc_ids)]):
if resource.product_id.categ_id.id == data[
"product_sub_categ_id"] or resource.product_id.categ_id.parent_id.id == data[
"product_sub_categ_id"]:
line_ids_filterd_categ.append({
'location_id': resource.location_id.id,
'product_id': resource.product_id.id,
'product_categ_id': resource.product_id.categ_id.parent_id.id,
'product_sub_categ_id': resource.product_id.categ_id.id,
'product_uom_id': resource.product_id.uom_id.id,
'qty': resource.qty,
'standard_price': resource.product_id.standard_price,
})
else:
for resource in self.env['stock.quant'].search(
['|', ('location_id', '=', self.location_id.id), ('location_id', 'in', child_loc_ids)]):
line_ids_all_categ.append({
'location_id': resource.location_id.id,
'product_id': resource.product_id.id,
'product_categ_id': resource.product_id.categ_id.parent_id.id,
'product_sub_categ_id': resource.product_id.categ_id.id,
'product_uom_id': resource.product_id.uom_id.id,
'qty': resource.qty,
'standard_price': resource.product_id.standard_price,
})
if data["product_sub_categ_id"]:
# Merging stock moves into single product item line
grouper = itemgetter("product_id", "product_categ_id", "product_sub_categ_id", "location_id",
"product_uom_id", "standard_price")
for key, grp in groupby(sorted(line_ids_filterd_categ, key=grouper), grouper):
temp_dict = dict(zip(
["product_id", "product_categ_id", "product_sub_categ_id", "location_id", "product_uom_id",
"standard_price"], key))
temp_dict["qty"] = sum(item["qty"] for item in grp)
temp_dict["amount"] = temp_dict["standard_price"] * temp_dict["qty"]
line_ids.append((0, 0, temp_dict))
else:
# Merging stock moves into single product item line
grouper = itemgetter("product_id", "product_categ_id", "product_sub_categ_id", "location_id",
"product_uom_id", "standard_price")
for key, grp in groupby(sorted(line_ids_all_categ, key=grouper), grouper):
temp_dict = dict(zip(
["product_id", "product_categ_id", "product_sub_categ_id", "location_id", "product_uom_id",
"standard_price"], key))
temp_dict["qty"] = sum(item["qty"] for item in grp)
temp_dict["amount"] = temp_dict["standard_price"] * temp_dict["qty"]
line_ids.append((0, 0, temp_dict))
if len(line_ids) == 0:
raise ValidationError(_('Material is not available on this location.'))
# writing to One2many line_ids
self.write({'line_ids': line_ids})
context = {
'lang': 'en_US',
'active_ids': [self.id],
}
return {
'context': context,
'data': None,
'type': 'ir.actions.report.xml',
'report_name': 'dvit_report_inventory_valuation_multi_uom.report_stock_inventory_location',
'report_type': 'qweb-pdf',
'report_file': 'dvit_report_inventory_valuation_multi_uom.report_stock_inventory_location',
'name': 'Stock Inventory',
'flags': {'action_buttons': True},
}
class WizardValuationStockInventoryLine(models.TransientModel):
_name = 'wizard.valuation.stock.inventory.line'
wizard_id = fields.Many2one('wizard.valuation.stock.inventory', required=True, ondelete='cascade')
location_id = fields.Many2one('stock.location', 'Location')
product_id = fields.Many2one('product.product', 'Product')
product_categ_id = fields.Many2one('product.category', string='Category')
product_sub_categ_id = fields.Many2one('product.category', string='Sub Category')
product_uom_id = fields.Many2one('product.uom')
qty = fields.Float('Quantity')
standard_price = fields.Float('Rate')
amount = fields.Float('Amount')
@api.model
def convert_qty_in_uom(self, from_uom, to_uom, qty):
return (qty / from_uom.factor) * to_uom.factor
|
mohamedhagag/dvit-odoo
|
dvit_report_inventory_valuation_multi_uom/wizard/stock_quant_report.py
|
Python
|
agpl-3.0
| 6,534 | 0.004132 |
import sys
import os
import time
import resetMbed
import serialMonitor
# Program the mbed, restart it, launch a serial monitor to record streaming logs
def runMbedProgramWithLogging(argv):
for arg in argv:
if 'startup=1' in arg:
time.sleep(10)
# If a bin file was given as argument, program it onto the mbed
remount = True
for arg in argv:
if '.bin' in arg:
print 'Copying bin file...'
#os.system('sudo rm /media/MBED/*.bin')
#time.sleep(1)
#os.system('sudo cp /home/pi/Downloads/*.bin /media/MBED')
#time.sleep(1)
os.system('sudo /home/pi/fish/mbed/programMbed.sh ' + arg)
if 'remount' in arg:
remount=int(arg.split('=')[1].strip())==1
# Remount mbed
if remount:
os.system("sudo /home/pi/fish/mbed/remountMbed.sh")
# Start mbed program and serial monitor
print 'Resetting mbed and starting serial monitor'
print ''
resetMbed.reset()
print '============'
serialMonitor.run(argv)
if __name__ == '__main__':
runMbedProgramWithLogging(sys.argv)
|
tarquasso/softroboticfish6
|
fish/pi/runMbedProgramWithLogging.py
|
Python
|
mit
| 1,134 | 0.0097 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Workflow:
# 1. Check if the folder has been analysed before
# 1.1 Status: checked, converted, reported, compiled, emailed, running, error, completed
# 2. If the sequencer is NextSeq:
# 2.1 Run bcl2fastq to create the FASTQ files
# 2.1.1 Execution:
# nohup /usr/local/bin/bcl2fastq
# --runfolder-dir 160225_NB501279_0002_AHTGNYBGXX/
# --output-dir 160225_NB501279_0002_AHTGNYBGXX_fastq &
# 3. Run FastQC with the files created on output-dir on 2.1
# 3.1 /data/runs/FastQC/FastQC/fastqc --extract -t 8 Undetermined_S0_L00?_R1_001.fastq.gz
# 4. Compile tex with the results on 3.1
# 4.1 pdflatex -output-directory [DIR] tex.tex
# 5. Send email with the PDF created on 4.1
# 5.1 sendmail ...
import argparse
import os
import subprocess
import shutil
import csv
import re
from collections import OrderedDict
from bs4 import BeautifulSoup
import datetime
BCL2FASTQ_PATH = '/usr/local/bin/bcl2fastq'
FASTQC_PATH = '/data/runs/FastQC/FastQC/fastqc'
WORKING_DIR = os.path.dirname(os.path.abspath(__file__))
REPORT_FILE = 'FastQC_report.tex'
REPORTS_PATH = 'FastQC_reports'
STATUS_FILE = 'run_report'
# informações do experimento
SAMPLESHEET = 'SampleSheet.csv'
BCL2FASTQ_REPORT = 'laneBarcode.html'
def getDatetime():
try:
d = datetime.datetime.now()
return "{0}{1}{2}_{3}{4}{5}".format(
d.day,
d.month,
d.year,
d.hour,
d.minute,
d.second)
except Exception as e:
raise e
def getLogfile():
try:
d = getDatetime()
logfile = os.open(os.path.join(
WORKING_DIR, 'logfile-%s.log' % d), os.O_WRONLY | os.O_CREAT, 0o600)
return logfile
except Exception as e:
raise e
def get_status_folder(file_status):
if(not os.path.exists(file_status)):
return False
fs = open(file_status, 'r')
status = fs.readline().strip()
fs.close()
return status
def get_run_details(args):
try:
if(os.path.exists(
os.path.join(WORKING_DIR, args.runPath, SAMPLESHEET))):
csv_file = open(os.path.join(WORKING_DIR, args.runPath, SAMPLESHEET), 'rb')
ssheet = csv.reader(csv_file, delimiter=',')
lines = OrderedDict([])
key = ''
not_null = [row for row in ssheet if len(row) > 0]
for row in not_null:
if(row[0].startswith('[')):
key = row[0].upper()
lines[key] = []
else:
v = lines.get(key)
v.append(row)
lines[key] = v
return lines
except Exception as e:
raise e
def get_bcl2fastq_report(args, fastq_path):
try:
if(os.path.exists(
os.path.join(fastq_path, 'Reports'))):
html = open(os.path.join(
fastq_path,
'Reports', 'html', 'index.html'), 'r').read()
soup = BeautifulSoup(html, 'html.parser')
for fr in soup.find_all('frame'):
src = fr.get('src')
src = src.replace('lane.html', BCL2FASTQ_REPORT)
report = open(os.path.join(
fastq_path,
'Reports', 'html', src), 'r').read()
soup = BeautifulSoup(report, 'html.parser')
result = OrderedDict([])
ncolums = 0
hs = soup.find_all('h2')
result['h2'] = [ele.text.strip() for ele in hs]
tables = soup.find_all(id="ReportTable")
for i, table in enumerate(tables):
result['table-%i' % i] = OrderedDict([])
for j, row in enumerate(table.find_all('tr')):
if('head' not in result['table-%i' % i]):
heads = row.find_all('th')
heads = [ele.text.strip() for ele in heads]
result['table-%i' % i]['head'] = heads
if(len(heads) > ncolums):
ncolums = len(heads)
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
if(len(cols) > 0):
result['table-%i' % i]['%i-col' % j] = cols
if(len(cols) > ncolums):
ncolums = len(cols)
return ncolums, result
except Exception as e:
raise e
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def build_run_details_tex_table(args, data):
if(data):
tex_table = ''
ncoluns = len(data['[DATA]'][0])
# {|l|l|l|l|l|l|l|}
columns_table = '{'
for c in range(ncoluns):
columns_table += '|l'
columns_table += '|}'
for key in data.keys():
# HEADER
values = data.get(key)
tex_table += "\multicolumn{%s}{|c|}{%s} \\\\ \hline\n" % (
ncoluns, key.replace('[', '').replace(']', ''))
if(key == '[HEADER]'):
for value in values:
tex_table += "%s & \multicolumn{%s}{l|}{%s} \\\\ \hline\n" % (
value[0].replace('_', '\_'), ncoluns - 1, value[1].replace('_', '\_'))
# READS
elif(key == '[READS]'):
for value in values:
tex_table += "\multicolumn{%s}{|l|}{%s} \\\\ \hline\n" % (
ncoluns, value[0].replace('_', '\_'))
# SETTINGS
elif(key == '[SETTINGS]'):
for value in values:
tex_table += "%s & \multicolumn{%s}{l|}{%s} \\\\ \hline\n" % (
value[0].replace('_', '\_'), ncoluns - 1, value[1].replace('_', '\_'))
# DATA
elif(key == '[DATA]'):
for value in values:
tex_table += ''.join('%s & ' % v.replace('\\', '_').replace(
'_', '\_') for v in value)
tex_table = rreplace(tex_table, '&', ' ', 1)
tex_table += '\\\\ \hline\n'
return columns_table, tex_table
def build_bcl2fastq_report_tex_table(args, fastq_path):
ncoluns, data = get_bcl2fastq_report(args, fastq_path)
if(data):
tex_table = OrderedDict([])
headers = data.get('h2')
for i, head in enumerate(headers):
if(head == 'Top Unknown Barcodes'):
pass
else:
tb = data.get('table-%i' % i)
tex = ''
cols = len(tb['head'])
tex += "\multicolumn{%s}{|c|}{%s} \\\\ \hline\n" % (cols, head)
for key in tb.keys():
values = tb.get(key)
if(key == 'head'):
for v in values:
if(len(v.rsplit(" ", 1)) > 1):
v = "%s\\\\ %s" % (
v.rsplit(" ", 1)[0], v.rsplit(" ", 1)[1])
line = "\\begin{tabular}[c]{@{}l@{}}%s\\end{tabular} &" % v.replace(
'_', '\_').replace('%', '\%')
tex += line
else:
line = "%s &" % v
tex += line
tex = rreplace(tex, '&', ' ', 1)
tex += '\\\\ \hline\n'
else:
tex += ''.join('%s & ' % v.replace('_', '\_') for v in values)
tex = rreplace(tex, '&', ' ', 1)
tex += '\\\\ \hline\n'
tex_table[head] = tex
return tex_table
def check_analysed_folder(args, file_status):
status = get_status_folder(file_status)
if(status and status in ['emailed', 'running', 'completed']):
return False
if(not os.path.exists(file_status)):
fs = open(file_status, 'w+')
fs.write('checked\n')
fs.close()
return True
def run_blc2fastq(args, file_status, fastq_path, logfile):
status = get_status_folder(file_status)
if(status and status in ['converted']):
return True
if(os.path.exists(fastq_path)):
return True
cl = [
'/usr/local/bin/bcl2fastq',
'--runfolder-dir',
args.runPath,
'--output-dir',
fastq_path]
print('running blc2fastq')
fs = open(file_status, 'w+')
fs.write('running\n')
fs.close()
retProcess = subprocess.Popen(
cl, 0, stdout=logfile, stderr=logfile, shell=False)
retCode = retProcess.wait()
if(retCode != 0):
fs = open(file_status, 'w+')
fs.write('error\n')
fs.close()
print(os.system('tail %s' % logfile))
return False
fs = open(file_status, 'w+')
fs.write('converted\n')
fs.close()
print('finished')
return True
def rename_fastq_file(args, fastq_path):
try:
fastq_files = {}
for read in range(1, 3):
npattern = 'L{0}_L00{0}_R{1}_{2}.'
regex = '.*L00%d\\_R%d.*\\.gz\\Z(?ms)'
lane = 'L00%d'
if(args.sequencerName.upper() == 'NEXTSEQ'):
lanes = 4
else:
lanes = 1
for l in range(1, lanes + 1): # NextSeq has 4 lanes
clane = lane % l
reobj = re.compile(regex % (l, read))
files = [f for f in os.listdir(fastq_path) if reobj.match(f)]
filedirs = [f for f in os.listdir(fastq_path) if os.path.isdir(
os.path.join(fastq_path, f))]
for d in filedirs:
filelist = [f for f in os.listdir(
os.path.join(fastq_path, d)) if reobj.match(f)]
for f in filelist:
files.append(os.path.join(d, f))
nfiles = []
for i, f in enumerate(files):
name, ext = f.split('.', 1)
digits = len(str(i + 1))
if(digits == 1):
group = '00' + str(i + 1)
elif(digits == 2):
group = '0' + str(i + 1)
else:
group = str(i + 1)
f_npattern = npattern.format(l, read, group)
nname = f_npattern + ext
if(not os.path.islink(
os.path.join(
fastq_path, nname))):
os.symlink(
os.path.join(fastq_path, f),
os.path.join(fastq_path, nname))
nfiles.append(nname)
if(nfiles):
if(clane in fastq_files):
values = fastq_files.get(clane)
for f in nfiles:
values.append(f)
fastq_files[clane] = values
else:
fastq_files[clane] = (nfiles)
return fastq_files
except Exception as e:
raise e
def run_fastqc(args, file_status, fastq_path, logfile):
status = get_status_folder(file_status)
if(status and status in ['reported']):
return True
if(not os.path.exists(fastq_path)):
return False
fasta_files = rename_fastq_file(args, fastq_path)
if(not fasta_files):
return False
# Check if there already is a fastq folder
regex = '.*\\.html\\Z(?ms)'
reobj = re.compile(regex)
paths = [f for f in os.listdir(fastq_path) if reobj.match(f)]
if(paths):
return True
if(args.sequencerName.upper() == 'NEXTSEQ'):
lanes = 4
else:
lanes = 1
for l in range(1, lanes + 1):
lane = 'L00%d' % l
files = fasta_files[lane]
fileList = [' '.join(
os.path.join(fastq_path, f) for f in files)]
cl = ['/data/runs/FastQC/FastQC/fastqc --extract --casava -t 8 ' + fileList[0]]
print('running fastqc')
fs = open(file_status, 'w+')
fs.write('running\n')
fs.close()
retProcess = subprocess.Popen(
cl, 0, stdout=logfile, stderr=logfile, shell=True)
retCode = retProcess.wait()
if(retCode != 0):
fs = open(file_status, 'w+')
fs.write('error\n')
fs.close()
return False
files = fasta_files[lane]
for f in files:
if(os.path.islink(os.path.join(fastq_path, f))):
try:
os.unlink(os.path.join(fastq_path, f))
except OSError as e:
'It was not possible to unlink the file \n%s. Error: %s' % (
os.path.join(fastq_path, f), e)
fs = open(file_status, 'w+')
fs.write('reported\n')
fs.close()
print('finished')
return True
def compile_tex(args, file_status, fastq_path, logfile):
status = get_status_folder(file_status)
if(status and status in ['compiled']):
return True
images_dir = []
reports_dir = []
regex = '.*\\.html\\Z(?ms)'
reobj = re.compile(regex)
paths = [f for f in os.listdir(fastq_path) if reobj.match(f)]
for path in paths:
path_fastqc = path.split('.', 1)[0]
s_image = os.path.join(fastq_path, path_fastqc, 'Images')
report_dir = os.path.join(
WORKING_DIR, args.runPath, REPORTS_PATH, path_fastqc)
if(not os.path.exists(os.path.join(fastq_path, path_fastqc))):
return False
images_dir.append(s_image)
reports_dir.append(report_dir)
tex = open(os.path.join(WORKING_DIR, REPORT_FILE), 'r')
rel = tex.read()
tex.close()
if(os.path.exists(os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH))):
shutil.rmtree(os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH))
os.mkdir(os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH))
data = get_run_details(args)
tex_columns_table, tex_table_run_details = build_run_details_tex_table(args, data)
tex_table_bcl2fastq_report = build_bcl2fastq_report_tex_table(args, fastq_path)
for image_dir, report_dir in zip(images_dir, reports_dir):
new_rel = rel.replace("$PATH$", image_dir)
new_rel = new_rel.replace("$EQUIPAMENTO$", args.sequencerName)
new_rel = new_rel.replace("$TABLECOLUMNS$", tex_columns_table)
new_rel = new_rel.replace("$TABLECONTENTS$", tex_table_run_details)
lane = report_dir.rsplit('_', 3)[1][-1]
new_rel = new_rel.replace("$LANE$", lane)
read = report_dir.rsplit('_', 2)[1] # R1 or R2
new_rel = new_rel.replace("$READ$", read)
for i, key in enumerate(tex_table_bcl2fastq_report.keys()):
char = chr(i + ord('A'))
tex = tex_table_bcl2fastq_report.get(key)
new_rel = new_rel.replace("$TABLE%sHEADER$" % char, key.encode('utf-8'))
new_rel = new_rel.replace("$TABLE%sCONTENTS$" % char, tex.encode('utf-8'))
os.mkdir(os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH, report_dir))
tex = open(
os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH, report_dir, REPORT_FILE), 'w+')
tex.write(new_rel)
tex.close()
filename = '{0}-L00{1}-{2}'.format(REPORT_FILE.rsplit('.', 1)[0], lane, read)
cl = [
'pdflatex',
'-output-directory',
os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH, report_dir),
'--jobname=%s' % filename,
os.path.join(WORKING_DIR, args.runPath, REPORTS_PATH, report_dir, REPORT_FILE)
]
print('compiling tex')
fs = open(file_status, 'w+')
fs.write('running\n')
fs.close()
retProcess = subprocess.Popen(
cl, 0, stdout=logfile, stderr=logfile, shell=False)
retCode = retProcess.wait()
if(retCode != 0):
fs = open(file_status, 'w+')
fs.write('error\n')
fs.close()
return False
fs = open(file_status, 'w+')
fs.write('compiled\n')
fs.close()
print('tex compiled')
return True
def send_email():
pass
def main():
parser = argparse.ArgumentParser(description='Generate a PDF report with FastQC analysis')
parser.add_argument(
'--runPath', '-p', required=True,
default=None, help='Path with the files of the run (default: %(default)s)')
parser.add_argument(
'--sequencerName', '-s',
default='miseq',
choices=['miseq', 'nextseq'],
required=True,
help='Sequencer name (default: %(default)s)')
parser.add_argument(
'--runName', '-r',
default=None, help='Name of the run (default: %(default)s)')
args = parser.parse_args()
args.sequencerName = args.sequencerName.upper()
file_status = os.path.join(WORKING_DIR, args.runPath, STATUS_FILE)
if(not args.runName):
if(args.runPath.endswith('/')):
args.runName = os.path.join(WORKING_DIR, args.runPath).rsplit('/', 2)[-2]
elif('/' in args.runPath):
args.runName = os.path.join(WORKING_DIR, args.runPath).rsplit('/', 1)[-1]
else:
args.runName = os.path.join(WORKING_DIR, args.runPath)
if(not os.path.exists(os.path.join(WORKING_DIR, args.runPath))):
raise Exception(
"Path of the run not found. \n %s" % os.path.join(WORKING_DIR, args.runPath))
print('path exist')
if(not check_analysed_folder(args, file_status)):
raise Exception(
'The folder has the status "%s". Execution aborted.' %
get_status_folder(file_status).strip())
print('path checked')
fastq_path = ''
logfile = getLogfile()
fastq_path = os.path.join(WORKING_DIR, args.runPath, '%s_fastq/' % args.runName)
if(not run_blc2fastq(args, file_status, fastq_path, logfile)):
raise Exception("Error on bcl2fastq. Execution aborted.")
print('converted')
if(not run_fastqc(args, file_status, fastq_path, logfile)):
raise Exception("Error on fastqc. Execution aborted.")
print('reported')
if(not compile_tex(args, file_status, fastq_path, logfile)):
raise Exception("Error on compile tex. Execution aborted.")
print('generated pdf')
build_bcl2fastq_report_tex_table(args, fastq_path)
try:
os.remove(file_status)
except Exception as e:
raise e
if __name__ == '__main__':
main()
|
CEFAP-USP/fastqc-report
|
RunFastQC.py
|
Python
|
gpl-3.0
| 18,788 | 0.002715 |
from typing import Any, Sequence, Union
from dataclasses import dataclass
from . import RequestMsg, ReplyMsg, Message, SimpleMessage
from hedgehog.protocol.proto import ack_pb2
from hedgehog.utils import protobuf
__all__ = ['Acknowledgement']
# <GSL customizable: module-header>
from hedgehog.protocol.proto.ack_pb2 import OK, UNKNOWN_COMMAND, INVALID_COMMAND, UNSUPPORTED_COMMAND, FAILED_COMMAND
__all__ += ['OK', 'UNKNOWN_COMMAND', 'INVALID_COMMAND', 'UNSUPPORTED_COMMAND', 'FAILED_COMMAND']
# </GSL customizable: module-header>
@ReplyMsg.message(ack_pb2.Acknowledgement, 'acknowledgement', fields=('code', 'message',))
@dataclass(frozen=True, repr=False)
class Acknowledgement(SimpleMessage):
code: int = OK
message: str = ''
def __post_init__(self):
# <default GSL customizable: Acknowledgement-init-validation>
pass
# </GSL customizable: Acknowledgement-init-validation>
# <default GSL customizable: Acknowledgement-extra-members />
@classmethod
def _parse(cls, msg: ack_pb2.Acknowledgement) -> 'Acknowledgement':
code = msg.code
message = msg.message
return cls(code, message=message)
def _serialize(self, msg: ack_pb2.Acknowledgement) -> None:
msg.code = self.code
msg.message = self.message
|
PRIArobotics/HedgehogProtocol
|
hedgehog/protocol/messages/ack.py
|
Python
|
agpl-3.0
| 1,301 | 0.002306 |
# coding: utf-8
import pygame
import sys
from pygame.locals import *
from gui import *
from conexao import *
from jogador import *
from Queue import Queue
from threading import Thread
"""
Cliente
Tp de Redes - Truco
UFSJ
Carlos Magno
Lucas Geraldo
Requisitos:
*python 2.7
*pygame
Modulo Principal.
"""
class Principal(Gui):
"""
Classe Principal
"""
def __init__(self):
#---HABILITAR BOTAO TRUCO---
# Ative para ativar a opção de pedir truco..
self.truco_habilitado = 1
#--------------------
self.mensagem_servidor = ""
self.carta_selecionada = -1
self.sua_vez = 0
self.conexao = Conexao()
self.conexao.conectar()
self.gui = Gui()
self.jogador = Jogador()
self.recebe_cartas()
self.gui.carrega_cartas()
#--------------------
self.pede_truco = "0"
self.rodada = 1
self.gui.valor_rodada = "0"
self.flag_truco = 0
self.gui.pontos = "0000"
self.gui.partidas = "000"
self.question_truco = "0"
self.proposta_truco_equipe = "0"
self.resposta_proposta_truco = "0"
self.mesa_jogo = "000000"
self.gui.mensagem_vez = "Aguarde..."
self.gui.cont_cartas = 3
#-----------------
self.quee = Queue()
self.verifica = Thread(target=self.verifica_resposta_servidor, args=(
self.quee, self.conexao))
self.verifica.daemon = True
self.verifica.start()
def atualiza_mensagem(self):
"Atualiza o campo de mensagens.."
if(self.sua_vez is 0):
self.gui.mensagem_vez = "Aguarde..."
self.gui.escrever(self.gui.mensagem_vez, (40, 430), (255, 0, 0))
if(self.sua_vez is 1):
self.gui.mensagem_vez = "Sua Vez..."
self.gui.escrever(self.gui.mensagem_vez, (40, 430), (0, 255, 0))
def agrupa_cartas(self, lista):
"""Agrupa as cartas recebidas do servidor"""
final = ""
c1 = ""
for i in lista:
c1 = c1 + i
if(len(c1) == 2):
final = final + c1 + ","
c1 = ""
lista = final.split(',')
lista.pop()
return lista
def recebe_cartas(self):
"""
Carrega as cartas recebidas do servidor.
Extrai os dados iniciais da primeira conexão.
"""
self.mensagem_servidor = self.conexao.ler_socket()
#--Extrai os dados iniciais...
self.jogador.id = self.mensagem_servidor[0:1]
self.jogador.equipe = self.mensagem_servidor[1:2]
self.sua_vez = int(self.mensagem_servidor[2:3])
cartas = self.mensagem_servidor[4:10]
print "ID ", self.jogador.id, "Equipe ", self.jogador.equipe, "Sua Vez ", self.sua_vez
self.jogador.cartas_mao = cartas
cartas = self.agrupa_cartas(cartas)
for i in cartas:
self.gui.cartas_recebidas.append(i)
def verifica_resposta_servidor(self, fila, conexao):
"""Verifica a conexao.."""
while (True):
palavra = conexao.ler_socket()
if(palavra is not None):
self.quee.put(palavra)
def verifica_erro_mensagem(self,lista):
"""Verifica e corrige erro na mensagem recebida"""
tamanho=len(lista)
if(tamanho<30):
lista = lista[
:0] + "00" + lista[1:]
print "Mensagem corrigida ",lista
return lista
def processa_resposta(self, lista):
"""Vai processar a mensagem recebida"""
self.mensagem_servidor = lista
if(lista is not None):
print "resposta vinda do servidor ", lista
#lista = self.verifica_erro_mensagem(lista)
self.sua_vez = int(lista[2:3])
self.atualiza_mensagem()
self.finaliza_rodada(int(lista[3:4]))
self.rodada = int(lista[3:4])
cartas = lista[4:10]
if(cartas is not "000000"):
pass
else:
# Considerando que nos decorrer das partida o servidor não envia as
# cartas. Redefine a mão do jogador.
self.gui.cartas_recebidas = []
self.jogador.cartas_mao = cartas
cartas = self.agrupa_cartas(cartas)
for i in cartas:
self.gui.cartas_recebidas.append(i)
self.gui.pontos = lista[10:14]
self.gui.partidas = lista[14:17]
self.gui.valor_rodada = lista[17:19]
self.question_truco = lista[19:20]
self.proposta_truco_equipe = lista[20:21]
self.mesa_jogo = lista[22:30]
self.renderiza_mesa()
print self.sua_vez
if(self.gui.cont_cartas > 1):
self.gui.cont_cartas = self.gui.cont_cartas - 1
def renderiza_mesa(self):
"""Função que renderiza_mesa"""
# 00 00 00 00
self.gui.caminho_cartas
print self.mensagem_servidor
cartas = self.agrupa_cartas(self.mesa_jogo)
print "Cartas Mesa ", cartas
cont = 0
for i in cartas:
if not (i == "00" or i == "0"):
i = self.gui.caminho_cartas + i + ".png"
if(self.jogador.id == "0"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
if cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
if cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
2, self.gui.cont_cartas)
if cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif(self.jogador.id == "1"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
elif cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
elif cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif(self.jogador.id == "2"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
2, self.gui.cont_cartas)
elif cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
elif cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
elif (self.jogador.id == "3"):
if cont is 0:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_1)
self.gui.update_card_adversario(
1, self.gui.cont_cartas)
elif cont is 1:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_2)
self.gui.update_card_adversario(
2, self.gui.cont_cartas)
elif cont is 2:
self.gui.renderiza_cartas_jogadas(
i, self.gui.pos_cartas_jog_3)
self.gui.update_card_adversario(
3, self.gui.cont_cartas)
elif cont is 3:
self.gui.renderiza_cartas_jogadas(
i, self.gui.sua_pos_carta)
cont = cont + 1
def finaliza_rodada(self, valor):
"""Verifica se a rodada terminou e limpa a tela"""
if(int(self.rodada) is not valor):
self.gui.tela_padrao(self.jogador.equipe)
print "Limpando a rodada"
def prepara_mensagem(self, carta_jogada):
"""Prepara uma mensagem da carta jogada para o envio"""
# Acerta a posicao da carta na mesa
if(int(self.jogador.id) is 0):
self.mensagem_servidor = self.mensagem_servidor[
:22] + carta_jogada + self.mensagem_servidor[24:]
if(int(self.jogador.id) is 1):
self.mensagem_servidor = self.mensagem_servidor[
:24] + carta_jogada + self.mensagem_servidor[26:]
if(int(self.jogador.id) is 2):
self.mensagem_servidor = self.mensagem_servidor[
:26] + carta_jogada + self.mensagem_servidor[28:]
if(int(self.jogador.id) is 3):
self.mensagem_servidor = self.mensagem_servidor[
:28] + carta_jogada + self.mensagem_servidor[30:]
def verifica_proposta_truco(self):
"""Exibe a tela de Truco"""
if(self.question_truco == "1") and self.sua_vez is 1:
self.gui.tela_truco()
self.flag_truco = 1
def solicita_truco(self):
"""Solicitar Truco"""
if(self.sua_vez is 1):
print "Solicitando Truco.."
self.mensagem_servidor = self.mensagem_servidor[
:19] + self.pede_truco + self.mensagem_servidor[20:]
print "Mensagem enviada na solicitação de Truco..", self.mensagem_servidor
self.conexao.envia_mensagem(self.mensagem_servidor)
self.pede_truco = "0"
def responde_truco(self):
"""Envia uma mensagem para o servidor com a resposta do truco"""
self.mensagem_servidor = self.mensagem_servidor[
:21] + self.resposta_proposta_truco + self.mensagem_servidor[22:]
print "Enviando a Seguinte resposta de Truco ", self.mensagem_servidor
self.conexao.envia_mensagem(self.mensagem_servidor)
def envia_carta_servidor(self, carta_jogada):
"""Dispara cartas para o servidor e altera os campos necessarios.."""
if carta_jogada is not None:
carta_jogada = carta_jogada.split("/")[1].split(".")[0]
# 1(ID)|a(Equipe)|0(vez)|0(rodada)|4p7c7o(mao)|0000(placar_jogo)|000(placar_rodada)|00(valor
# rodada)|0(question)|0(equipe question)|0(resposta
# truco)|00000000(mesa)|0(virada)
self.prepara_mensagem(carta_jogada)
# envia a mensagem para o servidor..
print "mensagem para o envio ", self.mensagem_servidor
self.conexao.envia_mensagem(self.mensagem_servidor)
def main(self):
"""Realiza a renderização.."""
pygame.init()
pygame.display.set_caption("Truco")
pygame.DOUBLEBUF
self.gui.iniciar()
self.carta_selecionada = -1
select = 0
# print "Mensagem das Cartas ",self.mensagem_servidor
while True:
for event in pygame.event.get():
self.gui.mostra_pontuacao(self.jogador.equipe)
self.gui.rodadas(self.jogador.equipe)
self.atualiza_mensagem()
self.verifica_proposta_truco()
self.gui.desenha_botao_truco(
self.gui.valor_rodada, self.proposta_truco_equipe)
if event.type == QUIT:
print "Encerrando conexão...."
pygame.quit()
sys.exit()
self.verifica.exit()
self.quee.join()
if event.type == KEYDOWN and self.sua_vez == 1:
op = event.unicode
print op
op = str(op)
if op is "":
op = str(event.key)
print op
if op == "1":
self.gui.update_card(
self.gui.mao[0], self.gui.pos_cartas_jog)
self.carta_selecionada = 0
if op == "2":
self.gui.update_card(
self.gui.mao[1], self.gui.pos_cartas_jog)
self.carta_selecionada = 1
if op == "3":
self.gui.update_card(
self.gui.mao[2], self.gui.pos_cartas_jog)
self.carta_selecionada = 2
if (op == "275" or op == "276") and self.rodada is not 1:
"""Teclas de Seta esq e dir
carta oculta
"""
self.gui.update_card(
self.gui.mao[3], self.gui.pos_cartas_jog)
self.carta_selecionada = 3
else:
print "Jogada não permitida."
if op == "273":
print "carta jogada", self.gui.mao[self.carta_selecionada]
if (self.carta_selecionada != -1):
self.sua_vez = 1 # Bloqueia a mão ..
self.envia_carta_servidor(
self.gui.mao[self.carta_selecionada])
if self.carta_selecionada is not 3:
self.gui.mao[self.carta_selecionada] = None
self.gui.verifica_mao(self.gui.mao, self.conexao)
if event.type == MOUSEBUTTONDOWN and select == 0:
"""Define a mudança da tela"""
print event.button, event.pos
fundo = pygame.image.load(
self.gui.caminho_background + "fundo.jpg")
self.gui.novo_tamanho_janela()
self.gui.tela.blit(fundo, [0, 0])
self.gui.update_card_adversario(0, 3)
self.gui.escrever(
"Para selecionar cartas escolha [1,2,3]", (30, 30),
self.gui.branco)
self.gui.escrever(
"Para Jogar a carta utilize seta para frente", (
30, 50),
self.gui.branco)
self.gui.escrever(
"Utilize as setas direcionais para ocultar", (30, 70),
self.gui.branco)
select = 1
if event.type == MOUSEBUTTONDOWN and self.sua_vez == 1:
pos = event.pos
print "Posicao ", pos
if (pos[0] > 670 and pos[0] < 780):
if(pos[1] > 471 and pos[1] < 471 + 20):
# self.gui.desenha_botao_truco(self.gui.valor_rodada)
if (self.truco_habilitado is 1):
print "entrouuu"
print "Variaveis do truco Sua Vez ", self.sua_vez, type(self.sua_vez), "Minha equipe ", self.jogador.equipe, type(self.jogador.equipe), "Proposta truco equipe ", self.proposta_truco_equipe, type(self.proposta_truco_equipe)
if(self.sua_vez is 1 and (self.jogador.equipe == self.proposta_truco_equipe or self.proposta_truco_equipe == "0")):
print "pedindo truco"
self.pede_truco = "1"
self.solicita_truco()
self.flag_truco = 1
else:
print self.gui.mao
print "Não é permitido pedir truco na mão de 12"
else:
print "A opção de truco não está Habilitada."
if (pos[0] > 363 and pos[0] < 392) and self.flag_truco is 1:
if (pos[1] > 236 and pos[1] < 276):
print "Truco Aceito"
self.resposta_proposta_truco = "1"
self.responde_truco()
self.gui.tela_padrao(self.jogador.equipe)
self.flag_truco = 0
if (pos[0] > 410 and pos[0] < 441) and self.flag_truco is 1:
if (pos[1] > 237 and pos[1] < 266):
print "Truco Não Foi aceito"
self.gui.tela_padrao(self.jogador.equipe)
self.resposta_proposta_truco = "0"
se.responde_truco()
self.flag_truco = 0
# self.cartas_jogadas()
pygame.display.update()
for i in range(0, 1):
# Percorre a fila lendo as mensagens recebidas do servidor
if not self.quee.empty():
retorno = self.quee.get(i)
self.verifica_erro_mensagem(retorno)
self.processa_resposta(retorno)
# Adiciona um evento na pilha de eventos para atualizar a
# tela.
evento = pygame.event.Event(USEREVENT)
pygame.event.post(evento)
if __name__ == '__main__':
new = Principal()
new.main()
|
Exterminus/Redes
|
Cliente/Cliente_Interface/cliente_gui.py
|
Python
|
mit
| 18,741 | 0.001122 |
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import unittest
import openapi_client
from openapi_client.api.blue_ocean_api import BlueOceanApi # noqa: E501
class TestBlueOceanApi(unittest.TestCase):
"""BlueOceanApi unit test stubs"""
def setUp(self):
self.api = BlueOceanApi() # noqa: E501
def tearDown(self):
pass
def test_delete_pipeline_queue_item(self):
"""Test case for delete_pipeline_queue_item
"""
pass
def test_get_authenticated_user(self):
"""Test case for get_authenticated_user
"""
pass
def test_get_classes(self):
"""Test case for get_classes
"""
pass
def test_get_json_web_key(self):
"""Test case for get_json_web_key
"""
pass
def test_get_json_web_token(self):
"""Test case for get_json_web_token
"""
pass
def test_get_organisation(self):
"""Test case for get_organisation
"""
pass
def test_get_organisations(self):
"""Test case for get_organisations
"""
pass
def test_get_pipeline(self):
"""Test case for get_pipeline
"""
pass
def test_get_pipeline_activities(self):
"""Test case for get_pipeline_activities
"""
pass
def test_get_pipeline_branch(self):
"""Test case for get_pipeline_branch
"""
pass
def test_get_pipeline_branch_run(self):
"""Test case for get_pipeline_branch_run
"""
pass
def test_get_pipeline_branches(self):
"""Test case for get_pipeline_branches
"""
pass
def test_get_pipeline_folder(self):
"""Test case for get_pipeline_folder
"""
pass
def test_get_pipeline_folder_pipeline(self):
"""Test case for get_pipeline_folder_pipeline
"""
pass
def test_get_pipeline_queue(self):
"""Test case for get_pipeline_queue
"""
pass
def test_get_pipeline_run(self):
"""Test case for get_pipeline_run
"""
pass
def test_get_pipeline_run_log(self):
"""Test case for get_pipeline_run_log
"""
pass
def test_get_pipeline_run_node(self):
"""Test case for get_pipeline_run_node
"""
pass
def test_get_pipeline_run_node_step(self):
"""Test case for get_pipeline_run_node_step
"""
pass
def test_get_pipeline_run_node_step_log(self):
"""Test case for get_pipeline_run_node_step_log
"""
pass
def test_get_pipeline_run_node_steps(self):
"""Test case for get_pipeline_run_node_steps
"""
pass
def test_get_pipeline_run_nodes(self):
"""Test case for get_pipeline_run_nodes
"""
pass
def test_get_pipeline_runs(self):
"""Test case for get_pipeline_runs
"""
pass
def test_get_pipelines(self):
"""Test case for get_pipelines
"""
pass
def test_get_scm(self):
"""Test case for get_scm
"""
pass
def test_get_scm_organisation_repositories(self):
"""Test case for get_scm_organisation_repositories
"""
pass
def test_get_scm_organisation_repository(self):
"""Test case for get_scm_organisation_repository
"""
pass
def test_get_scm_organisations(self):
"""Test case for get_scm_organisations
"""
pass
def test_get_user(self):
"""Test case for get_user
"""
pass
def test_get_user_favorites(self):
"""Test case for get_user_favorites
"""
pass
def test_get_users(self):
"""Test case for get_users
"""
pass
def test_post_pipeline_run(self):
"""Test case for post_pipeline_run
"""
pass
def test_post_pipeline_runs(self):
"""Test case for post_pipeline_runs
"""
pass
def test_put_pipeline_favorite(self):
"""Test case for put_pipeline_favorite
"""
pass
def test_put_pipeline_run(self):
"""Test case for put_pipeline_run
"""
pass
def test_search(self):
"""Test case for search
"""
pass
def test_search_classes(self):
"""Test case for search_classes
"""
pass
if __name__ == '__main__':
unittest.main()
|
cliffano/swaggy-jenkins
|
clients/python-experimental/generated/test/test_blue_ocean_api.py
|
Python
|
mit
| 4,757 | 0 |
"""Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import operator
_itemgetter = operator.itemgetter
_property = property
_tuple = tuple
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
_parse_cache.clear()
class ResultMixin(object):
"""Shared methods for the parsed result objects."""
# @property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
username = property(username)
# @property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
password = property(password)
# @property
def hostname(self):
netloc = self.netloc.split('@')[-1]
if '[' in netloc and ']' in netloc:
return netloc.split(']')[0][1:].lower()
elif ':' in netloc:
return netloc.split(':')[0].lower()
elif netloc == '':
return None
else:
return netloc.lower()
hostname = property(hostname)
# @property
def port(self):
netloc = self.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
if port:
port = int(port, 10)
# verify legal port
if (0 <= port <= 65535):
return port
return None
port = property(port)
# from collections import namedtuple
class _SplitResult(tuple):
'SplitResult(scheme, netloc, path, query, fragment)'
__slots__ = ()
_fields = ('scheme', 'netloc', 'path', 'query', 'fragment')
def __new__(_cls, scheme, netloc, path, query, fragment):
'Create new instance of SplitResult(scheme, netloc, path, query, fragment)'
return _tuple.__new__(_cls, (scheme, netloc, path, query, fragment))
# @classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new SplitResult object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 5:
raise TypeError('Expected 5 arguments, got %d' % len(result))
return result
_make = classmethod(_make)
def __repr__(self):
'Return a nicely formatted representation string'
return 'SplitResult(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new SplitResult object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('scheme', 'netloc', 'path', 'query', 'fragment'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
__dict__ = _property(_asdict)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
pass
scheme = _property(_itemgetter(0), doc='Alias for field number 0')
netloc = _property(_itemgetter(1), doc='Alias for field number 1')
path = _property(_itemgetter(2), doc='Alias for field number 2')
query = _property(_itemgetter(3), doc='Alias for field number 3')
fragment = _property(_itemgetter(4), doc='Alias for field number 4')
# class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
class SplitResult(_SplitResult, ResultMixin):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class _ParseResult(tuple):
'ParseResult(scheme, netloc, path, params, query, fragment)'
__slots__ = ()
_fields = ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')
def __new__(_cls, scheme, netloc, path, params, query, fragment):
'Create new instance of ParseResult(scheme, netloc, path, params, query, fragment)'
return _tuple.__new__(_cls, (scheme, netloc, path, params, query, fragment))
# @classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new ParseResult object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 6:
raise TypeError('Expected 6 arguments, got %d' % len(result))
return result
_make = classmethod(_make)
def __repr__(self):
'Return a nicely formatted representation string'
return 'ParseResult(scheme=%r, netloc=%r, path=%r, params=%r, query=%r, fragment=%r)' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new ParseResult object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('scheme', 'netloc', 'path', 'params', 'query', 'fragment'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
__dict__ = _property(_asdict)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
pass
scheme = _property(_itemgetter(0), doc='Alias for field number 0')
netloc = _property(_itemgetter(1), doc='Alias for field number 1')
path = _property(_itemgetter(2), doc='Alias for field number 2')
params = _property(_itemgetter(3), doc='Alias for field number 3')
query = _property(_itemgetter(4), doc='Alias for field number 4')
fragment = _property(_itemgetter(5), doc='Alias for field number 5')
# class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
class ParseResult(_ParseResult, ResultMixin):
__slots__ = ()
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit(data):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment = data
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return url
if scheme in uses_netloc:
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return urlunparse((scheme, netloc, path,
params, query, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
try:
unicode
except NameError:
def _is_unicode(x):
return 0
else:
def _is_unicode(x):
return isinstance(x, unicode)
# unquote method for parse_qs and parse_qsl
# Cannot use directly from urllib as it would create a circular reference
# because urllib uses urlparse methods (urljoin). If you update this function,
# update it also in urllib. This code duplication does not existin in Python3.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a+b, chr(int(a+b,16)))
for a in _hexdig for b in _hexdig)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
if _is_unicode(s):
if '%' not in s:
return s
bits = _asciire.split(s)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote(str(bits[i])).decode('latin1'))
append(bits[i + 1])
return ''.join(res)
bits = s.split('%')
# fastpath
if len(bits) == 1:
return s
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextochr[item[:2]])
append(item[2:])
except KeyError:
append('%')
append(item)
return ''.join(res)
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
|
google/grumpy
|
third_party/stdlib/urlparse.py
|
Python
|
apache-2.0
| 19,619 | 0.001988 |
'''
Created on 21.03.2012
@author: michi
'''
from PyQt4.QtGui import QTableView
from ems.qt4.gui.mapper.base import BaseStrategy #@UnresolvedImport
from ems.xtype.base import DictType, ObjectInstanceType #@UnresolvedImport
from ems.qt4.gui.itemdelegate.xtypes.objectinstancetype import ObjectInstanceDelegate #@UnresolvedImport
class DictStrategy(BaseStrategy):
def match(self, param):
if isinstance(param, DictType):
return True
return False
def getDelegateForItem(self, mapper, type_, parent=None):
return ObjectInstanceDelegate(type_, parent)
def addMapping(self, mapper, widget, columnName, type_):
if isinstance(widget, QTableView):
columnIndex = mapper.model.columnOfName(columnName)
mapper.dataWidgetMapper.addMapping(widget, columnIndex)
|
mtils/ems
|
ems/qt4/gui/mapper/strategies/dict_strategy.py
|
Python
|
mit
| 857 | 0.016336 |
# -*-coding:UTF-8 -*
import os
import Auth.authentication as auth
import Auth.login as log
import Menu.barreOutils as barre
import Users.Model as U
import Projects.Model as P
#On appelle le module d'identification - Commenté pour les pahses de test d'autres modules
login = log.Login()
login.fenetre.mainloop()
#auth.Auth.access = True
#auth.Auth.current_user_id = 1
#On lance le programme
while auth.Auth.access == True:
print("programme en cours")
user = U.User(auth.Auth.current_user_id)
print("Bonjour", user.nom, user.prenom, "vous êtes dans la boucle")
# Instanciation d'un objet de la classe BarreOutils
barreOutils = barre.BarreOutils()
barreOutils.fenetre.mainloop()
# Test de l'attribut fermer de la classe BarreOutils() -> true si appuie sur le bouton deconnexion
print("fermer = ",barreOutils.fermer)
if barreOutils.fermer == True:
auth.Auth.access = False
else:
os.system("pause")
# Test de l'attribut access qui détermine si on entre ou pas dans la boucle while
print("access = ", auth.Auth.access)
# Fin while
|
Aveias/gestt
|
main.py
|
Python
|
gpl-3.0
| 1,104 | 0.009991 |
"""
policy.py
Janbaanz Launde
Apr 1, 2017
"""
class Policy(object):
"""Abstract class for all policies"""
name = 'POLICY'
def __init__(self, contexts):
self.contexts = contexts
def predict_arm(self, contexts=None):
raise NotImplementedError("You need to override this function in child class.")
def pull_arm(self, arm, reward, contexts=None):
raise NotImplementedError("You need to override this function in child class.")
|
rakshify/News_Recommender
|
policy/policy.py
|
Python
|
mit
| 472 | 0.004237 |
"""
Extensions called during training to generate samples and diagnostic plots and printouts.
"""
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import theano.tensor as T
import theano
from blocks.extensions import SimpleExtension
import viz
import sampler
class PlotSamples(SimpleExtension):
def __init__(self, model, algorithm, X, path, n_samples=49, **kwargs):
"""
Generate samples from the model. The do() function is called as an extension during training.
Generates 3 types of samples:
- Sample from generative model
- Sample from image denoising posterior distribution (default signal to noise of 1)
- Sample from image inpainting posterior distribution (inpaint left half of image)
"""
super(PlotSamples, self).__init__(**kwargs)
self.model = model
self.path = path
self.X = X[:n_samples].reshape(
(n_samples, model.n_colors, model.spatial_width, model.spatial_width))
self.n_samples = n_samples
X_noisy = T.tensor4('X noisy samp')
t = T.matrix('t samp')
self.get_mu_sigma = theano.function([X_noisy, t], model.get_mu_sigma(X_noisy, t),
allow_input_downcast=True)
def do(self, callback_name, *args):
print "generating samples"
base_fname_part1 = self.path + '/samples-'
base_fname_part2 = '_epoch%04d'%self.main_loop.status['epochs_done']
sampler.generate_samples(self.model, self.get_mu_sigma,
n_samples=self.n_samples, inpaint=False, denoise_sigma=None, X_true=None,
base_fname_part1=base_fname_part1, base_fname_part2=base_fname_part2)
sampler.generate_samples(self.model, self.get_mu_sigma,
n_samples=self.n_samples, inpaint=True, denoise_sigma=None, X_true=self.X,
base_fname_part1=base_fname_part1, base_fname_part2=base_fname_part2)
sampler.generate_samples(self.model, self.get_mu_sigma,
n_samples=self.n_samples, inpaint=False, denoise_sigma=1, X_true=self.X,
base_fname_part1=base_fname_part1, base_fname_part2=base_fname_part2)
class PlotParameters(SimpleExtension):
def __init__(self, model, blocks_model, path, **kwargs):
super(PlotParameters, self).__init__(**kwargs)
self.path = path
self.model = model
self.blocks_model = blocks_model
def do(self, callback_name, *args):
print "plotting parameters"
for param_name, param in self.blocks_model.params.iteritems():
filename_safe_name = '-'.join(param_name.split('/')[2:]).replace(' ', '_')
base_fname_part1 = self.path + '/params-' + filename_safe_name
base_fname_part2 = '_epoch%04d'%self.main_loop.status['epochs_done']
viz.plot_parameter(param.get_value(), base_fname_part1, base_fname_part2,
title=param_name, n_colors=self.model.n_colors)
class PlotGradients(SimpleExtension):
def __init__(self, model, blocks_model, algorithm, X, path, **kwargs):
super(PlotGradients, self).__init__(**kwargs)
self.path = path
self.X = X
self.model = model
self.blocks_model = blocks_model
gradients = []
for param_name in sorted(self.blocks_model.params.keys()):
gradients.append(algorithm.gradients[self.blocks_model.params[param_name]])
self.grad_f = theano.function(algorithm.inputs, gradients, allow_input_downcast=True)
def do(self, callback_name, *args):
print "plotting gradients"
grad_vals = self.grad_f(self.X)
keynames = sorted(self.blocks_model.params.keys())
for ii in xrange(len(keynames)):
param_name = keynames[ii]
val = grad_vals[ii]
filename_safe_name = '-'.join(param_name.split('/')[2:]).replace(' ', '_')
base_fname_part1 = self.path + '/grads-' + filename_safe_name
base_fname_part2 = '_epoch%04d'%self.main_loop.status['epochs_done']
viz.plot_parameter(val, base_fname_part1, base_fname_part2,
title="grad " + param_name, n_colors=self.model.n_colors)
class PlotInternalState(SimpleExtension):
def __init__(self, model, blocks_model, state, features, X, path, **kwargs):
super(PlotInternalState, self).__init__(**kwargs)
self.path = path
self.X = X
self.model = model
self.blocks_model = blocks_model
self.internal_state_f = theano.function([features], state, allow_input_downcast=True)
self.internal_state_names = []
for var in state:
self.internal_state_names.append(var.name)
def do(self, callback_name, *args):
print "plotting internal state of network"
state = self.internal_state_f(self.X)
for ii in xrange(len(state)):
param_name = self.internal_state_names[ii]
val = state[ii]
filename_safe_name = param_name.replace(' ', '_').replace('/', '-')
base_fname_part1 = self.path + '/state-' + filename_safe_name
base_fname_part2 = '_epoch%04d'%self.main_loop.status['epochs_done']
viz.plot_parameter(val, base_fname_part1, base_fname_part2,
title="state " + param_name, n_colors=self.model.n_colors)
class PlotMonitors(SimpleExtension):
def __init__(self, path, burn_in_iters=0, **kwargs):
super(PlotMonitors, self).__init__(**kwargs)
self.path = path
self.burn_in_iters = burn_in_iters
def do(self, callback_name, *args):
print "plotting monitors"
try:
df = self.main_loop.log.to_dataframe()
except AttributeError:
# This starting breaking after a Blocks update.
print "Failed to generate monitoring plots due to Blocks interface change."
return
iter_number = df.tail(1).index
# Throw out the first burn_in values
# as the objective is often much larger
# in that period.
if iter_number > self.burn_in_iters:
df = df.loc[self.burn_in_iters:]
cols = [col for col in df.columns if col.startswith(('cost', 'train', 'test'))]
df = df[cols].interpolate(method='linear')
# If we don't have any non-nan dataframes, don't plot
if len(df) == 0:
return
try:
axs = df.interpolate(method='linear').plot(
subplots=True, legend=False, figsize=(5, len(cols)*2))
except TypeError:
# This starting breaking after a different Blocks update.
print "Failed to generate monitoring plots due to Blocks interface change."
return
for ax, cname in zip(axs, cols):
ax.set_title(cname)
fn = os.path.join(self.path,
'monitors_subplots_epoch%04d.png' % self.main_loop.status['epochs_done'])
plt.savefig(fn, bbox_inches='tight')
plt.clf()
df.plot(subplots=False, figsize=(15,10))
plt.gcf().tight_layout()
fn = os.path.join(self.path,
'monitors_epoch%04d.png' % self.main_loop.status['epochs_done'])
plt.savefig(fn, bbox_inches='tight')
plt.close('all')
def decay_learning_rate(iteration, old_value):
# TODO the numbers in this function should not be hard coded
# this is called every epoch
# reduce the learning rate by 10 every 1000 epochs
decay_rate = np.exp(np.log(0.1)/1000.)
new_value = decay_rate*old_value
if new_value < 1e-5:
new_value = 1e-5
print "learning rate %g"%new_value
return np.float32(new_value)
|
JesseLivezey/Diffusion-Probabilistic-Models
|
extensions.py
|
Python
|
mit
| 7,673 | 0.006907 |
import sys
import os
import glob
import inspect
import pylab as pl
from numpy import *
from scipy import optimize
import pickle
import time
import copy
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]) + "/templates")
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from templutils import *
import pylabsetup
pl.ion()
#fits the vacca leibundgut model to data:
# a linear decay, with a gaussian peak on top, an exponential rise, and possibly a second gaussian (typically the Ia second bump around phase=25 days
def minfunc(p, y, x, e, secondg, plot=False):
'''
p is the parameter list
if secondg=1: secondgaussian added
if secondg=0: secondgaussian not
parameters are:
p[0]=first gaussian normalization (negative if fitting mag)
p[1]=first gaussian mean
p[2]=first gaussian sigma
p[3]=linear decay offset
p[4]=linear decay slope
p[5]=exponxential rise slope
p[6]=exponential zero point
p[7]=second gaussian normalization (negative if fitting mag)
p[8]=second gaussian mean
p[9]=second gaussian sigma
'''
if plot:
pl.figure(3)
pl.errorbar(x, y, yerr=e, color='k')
import time
# time.sleep(1)
# print sum(((y-mycavvaccaleib(x,p,secondg=True))**2))
if secondg > 0:
return sum(((y - mycavvaccaleib(x, p, secondg=True)) ** 2) / e ** 2)
else:
return sum(((y - mycavvaccaleib(x, p, secondg=False)) ** 2) / e ** 2)
import scipy.optimize
if __name__ == '__main__':
lcv = np.loadtxt(sys.argv[1], unpack=True)
secondg = False
try:
if int(sys.argv[2]) > 0:
secondg = True
except:
pass
x = lcv[1]
y = lcv[2]
e = lcv[3]
mjd = lcv[0]
ax = pl.figure(0, figsize=(10,5)).add_subplot(111)
#pl.errorbar(x, y, yerr=e, color="#47b56c", label="data")
p0 = [0] * 10
p0[0] = -4
peakdate = x[np.where(y == min(y))[0]]
if len(peakdate) > 1:
peakdate = peakdate[0]
p0[1] = peakdate + 5
p0[2] = 10 # sigma
#pl.draw()
lintail = np.where(x > peakdate + 50)[0]
if len(lintail) < 1:
print "no tail data"
linfit = np.polyfit(x[-2:], y[-2:], 1)
p0[3] = linfit[1]
p0[4] = linfit[0]
else:
linfit = np.polyfit(x[lintail], y[lintail], 1)
p0[3] = linfit[1]
p0[4] = linfit[0]
p0[5] = 0.1
p0[6] = peakdate - 20
p0[7] = -1
p0[8] = peakdate + 25
p0[9] = 10
pl.figure(3)
pl.clf()
# pf= scipy.optimize.minimize(minfunc,p0,args=(y,x,1), method='Powell')#,options={'maxiter':5})
if secondg:
p0[0] += 1.5
p0[1] *= 2
pl.plot(x[10:], mycavvaccaleib(x[10:], p0, secondg=True), 'm')
pf = scipy.optimize.minimize(minfunc, p0, args=(y[10:], x[10:], e[10:], 1), method='Powell') # ,options={'maxiter':5})
else:
pl.plot(x[10:], mycavvaccaleib(x[10:], p0, secondg=False), 'k')
pf = scipy.optimize.minimize(minfunc, p0, args=(y[10:], x[10:], e[10:], 0), method='Powell') # ,options={'maxiter':5})
#pl.figure(4)
pl.figure(0)
ax.errorbar(mjd+0.5-53000, y, yerr=e, fmt=None, ms=7,
alpha = 0.5, color='k', markersize=10,)
ax.plot(mjd+0.5-53000, y, '.', ms=7,
alpha = 0.5, color='#47b56c', markersize=10,
label = "SN 19"+sys.argv[1].split('/')[-1].\
replace('.dat', '').replace('.', ' '))
# mycavvaccaleib(x,pf.x, secondg=True)
mycavvaccaleib(x, pf.x, secondg=secondg)
ax.plot(mjd[10:]+0.5-53000, mycavvaccaleib(x[10:], pf.x, secondg=secondg), 'k',
linewidth=2, label="vacca leibundgut fit") # , alpha=0.5)
# pl.plot(x,mycavvaccaleib(x,pf.x, secondg=True), 'k',linewidth=2, label="fit")
xlen = mjd.max() - mjd.min()
ax.set_xlim(mjd.min()-xlen*0.02+0.5-53000, mjd.max()+xlen*0.02+0.5-53000)
ax.set_ylim(max(y + 0.1), min(y - 0.1))
ax2 = ax.twiny()
Vmax = 2449095.23-2453000
ax2.tick_params('both', length=10, width=1, which='major')
ax2.tick_params('both', length=5, width=1, which='minor')
ax2.set_xlabel("phase (days)")
ax2.set_xlim((ax.get_xlim()[0] - Vmax, ax.get_xlim()[1] - Vmax))
# pl.ylim(10,21)
pl.draw()
pl.legend()
ax.set_xlabel("JD - 24530000")
ax.set_ylabel("magnitude")
#pl.title(sys.argv[1].split('/')[-1].replace('.dat', '').replace('.', ' '))
#pl.show()
pl.tight_layout()
pl.savefig("../fits/" + sys.argv[1].split('/')[-1].replace('.dat', '.vdfit.pdf'))
cmd = "pdfcrop " + "../fits/" + sys.argv[1].split('/')[-1].replace('.dat', '.vdfit.pdf')
print cmd
os.system(cmd)
|
fedhere/SESNCfAlib
|
vaccaleibundgut.py
|
Python
|
mit
| 4,704 | 0.00744 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openipmi(AutotoolsPackage):
"""The Open IPMI project aims to develop an open code base
to allow access to platform information using Intelligent
Platform Management Interface (IPMI)."""
homepage = "https://sourceforge.net/projects/openipmi/"
url = "https://sourceforge.net/projects/openipmi/files/OpenIPMI%202.0%20Library/OpenIPMI-2.0.29.tar.gz"
version('2.0.28', sha256='8e8b1de2a9a041b419133ecb21f956e999841cf2e759e973eeba9a36f8b40996')
version('2.0.27', sha256='f3b1fafaaec2e2bac32fec5a86941ad8b8cb64543470bd6d819d7b166713d20b')
depends_on('popt')
depends_on('python')
depends_on('termcap')
depends_on('ncurses')
def configure_args(self):
args = ['LIBS=' + self.spec['ncurses'].libs.link_flags]
return args
def install(self, spec, prefix):
make('install', parallel=False)
|
iulian787/spack
|
var/spack/repos/builtin/packages/openipmi/package.py
|
Python
|
lgpl-2.1
| 1,087 | 0.00368 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy for reward prediction and boltzmann exploration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Tuple, Sequence
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.networks import heteroscedastic_q_network
from tf_agents.bandits.policies import constraints as constr
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.distributions import shifted_categorical
from tf_agents.policies import tf_policy
from tf_agents.policies import utils as policy_utilities
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.typing import types
@gin.configurable
class BoltzmannRewardPredictionPolicy(tf_policy.TFPolicy):
"""Class to build Reward Prediction Policies with Boltzmann exploration."""
def __init__(self,
time_step_spec: types.TimeStep,
action_spec: types.NestedTensorSpec,
reward_network: types.Network,
temperature: types.FloatOrReturningFloat = 1.0,
boltzmann_gumbel_exploration_constant: Optional[
types.Float] = None,
observation_and_action_constraint_splitter: Optional[
types.Splitter] = None,
accepts_per_arm_features: bool = False,
constraints: Tuple[constr.NeuralConstraint, ...] = (),
emit_policy_info: Tuple[Text, ...] = (),
num_samples_list: Sequence[tf.Variable] = (),
name: Optional[Text] = None):
"""Builds a BoltzmannRewardPredictionPolicy given a reward network.
This policy takes a tf_agents.Network predicting rewards and chooses an
action with weighted probabilities (i.e., using a softmax over the network
estimates of value for each action).
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
reward_network: An instance of a `tf_agents.network.Network`,
callable via `network(observation, step_type) -> (output, final_state)`.
temperature: float or callable that returns a float. The temperature used
in the Boltzmann exploration.
boltzmann_gumbel_exploration_constant: optional positive float. When
provided, the policy implements Neural Bandit with Boltzmann-Gumbel
exploration from the paper:
N. Cesa-Bianchi et al., "Boltzmann Exploration Done Right", NIPS 2017.
observation_and_action_constraint_splitter: A function used for masking
valid/invalid actions with each state of the environment. The function
takes in a full observation and returns a tuple consisting of 1) the
part of the observation intended as input to the network and 2) the
mask. The mask should be a 0-1 `Tensor` of shape
`[batch_size, num_actions]`. This function should also work with a
`TensorSpec` as input, and should output `TensorSpec` objects for the
observation and mask.
accepts_per_arm_features: (bool) Whether the policy accepts per-arm
features.
constraints: iterable of constraints objects that are instances of
`tf_agents.bandits.agents.NeuralConstraint`.
emit_policy_info: (tuple of strings) what side information we want to get
as part of the policy info. Allowed values can be found in
`policy_utilities.PolicyInfo`.
num_samples_list: list or tuple of tf.Variable's. Used only in
Boltzmann-Gumbel exploration. Otherwise, empty.
name: The name of this policy. All variables in this module will fall
under that name. Defaults to the class name.
Raises:
NotImplementedError: If `action_spec` contains more than one
`BoundedTensorSpec` or the `BoundedTensorSpec` is not valid.
"""
policy_utilities.check_no_mask_with_arm_features(
accepts_per_arm_features, observation_and_action_constraint_splitter)
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise NotImplementedError(
'action_spec can only contain a single BoundedTensorSpec.')
self._temperature = temperature
action_spec = flat_action_spec[0]
if (not tensor_spec.is_bounded(action_spec) or
not tensor_spec.is_discrete(action_spec) or
action_spec.shape.rank > 1 or
action_spec.shape.num_elements() != 1):
raise NotImplementedError(
'action_spec must be a BoundedTensorSpec of type int32 and shape (). '
'Found {}.'.format(action_spec))
self._expected_num_actions = action_spec.maximum - action_spec.minimum + 1
self._action_offset = action_spec.minimum
reward_network.create_variables()
self._reward_network = reward_network
self._constraints = constraints
self._boltzmann_gumbel_exploration_constant = (
boltzmann_gumbel_exploration_constant)
self._num_samples_list = num_samples_list
if self._boltzmann_gumbel_exploration_constant is not None:
if self._boltzmann_gumbel_exploration_constant <= 0.0:
raise ValueError(
'The Boltzmann-Gumbel exploration constant is expected to be ',
'positive. Found: ', self._boltzmann_gumbel_exploration_constant)
if self._action_offset > 0:
raise NotImplementedError('Action offset is not supported when ',
'Boltzmann-Gumbel exploration is enabled.')
if accepts_per_arm_features:
raise NotImplementedError(
'Boltzmann-Gumbel exploration is not supported ',
'for arm features case.')
if len(self._num_samples_list) != self._expected_num_actions:
raise ValueError(
'Size of num_samples_list: ', len(self._num_samples_list),
' does not match the expected number of actions:',
self._expected_num_actions)
self._emit_policy_info = emit_policy_info
predicted_rewards_mean = ()
if policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info:
predicted_rewards_mean = tensor_spec.TensorSpec(
[self._expected_num_actions])
bandit_policy_type = ()
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE in emit_policy_info:
bandit_policy_type = (
policy_utilities.create_bandit_policy_type_tensor_spec(shape=[1]))
if accepts_per_arm_features:
# The features for the chosen arm is saved to policy_info.
chosen_arm_features_info = (
policy_utilities.create_chosen_arm_features_info_spec(
time_step_spec.observation))
info_spec = policy_utilities.PerArmPolicyInfo(
predicted_rewards_mean=predicted_rewards_mean,
bandit_policy_type=bandit_policy_type,
chosen_arm_features=chosen_arm_features_info)
else:
info_spec = policy_utilities.PolicyInfo(
predicted_rewards_mean=predicted_rewards_mean,
bandit_policy_type=bandit_policy_type)
self._accepts_per_arm_features = accepts_per_arm_features
super(BoltzmannRewardPredictionPolicy, self).__init__(
time_step_spec, action_spec,
policy_state_spec=reward_network.state_spec,
clip=False,
info_spec=info_spec,
emit_log_probability='log_probability' in emit_policy_info,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter),
name=name)
@property
def accepts_per_arm_features(self):
return self._accepts_per_arm_features
def _variables(self):
policy_variables = self._reward_network.variables
for c in self._constraints:
policy_variables.append(c.variables)
return policy_variables
def _get_temperature_value(self):
if callable(self._temperature):
return self._temperature()
return self._temperature
def _distribution(self, time_step, policy_state):
observation = time_step.observation
if self.observation_and_action_constraint_splitter is not None:
observation, _ = self.observation_and_action_constraint_splitter(
observation)
predictions, policy_state = self._reward_network(
observation, time_step.step_type, policy_state)
batch_size = tf.shape(predictions)[0]
if isinstance(self._reward_network,
heteroscedastic_q_network.HeteroscedasticQNetwork):
predicted_reward_values = predictions.q_value_logits
else:
predicted_reward_values = predictions
predicted_reward_values.shape.with_rank_at_least(2)
predicted_reward_values.shape.with_rank_at_most(3)
if predicted_reward_values.shape[
-1] is not None and predicted_reward_values.shape[
-1] != self._expected_num_actions:
raise ValueError(
'The number of actions ({}) does not match the reward_network output'
' size ({}).'.format(self._expected_num_actions,
predicted_reward_values.shape[1]))
mask = constr.construct_mask_from_multiple_sources(
time_step.observation, self._observation_and_action_constraint_splitter,
self._constraints, self._expected_num_actions)
if self._boltzmann_gumbel_exploration_constant is not None:
logits = predicted_reward_values
# Apply masking if needed. Overwrite the logits for invalid actions to
# logits.dtype.min.
if mask is not None:
almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)
logits = tf.compat.v2.where(
tf.cast(mask, tf.bool), logits, almost_neg_inf)
gumbel_dist = tfp.distributions.Gumbel(loc=0., scale=1.)
gumbel_samples = gumbel_dist.sample(tf.shape(logits))
num_samples_list_float = tf.stack(
[tf.cast(x.read_value(), tf.float32) for x in self._num_samples_list],
axis=-1)
exploration_weights = tf.math.divide_no_nan(
self._boltzmann_gumbel_exploration_constant,
tf.sqrt(num_samples_list_float))
final_logits = logits + exploration_weights * gumbel_samples
actions = tf.cast(
tf.math.argmax(final_logits, axis=1), self._action_spec.dtype)
# Log probability is not available in closed form. We treat this as a
# deterministic policy at the moment.
log_probability = tf.zeros([batch_size], tf.float32)
else:
# Apply the temperature scaling, needed for Boltzmann exploration.
logits = predicted_reward_values / self._get_temperature_value()
# Apply masking if needed. Overwrite the logits for invalid actions to
# logits.dtype.min.
if mask is not None:
almost_neg_inf = tf.constant(logits.dtype.min, dtype=logits.dtype)
logits = tf.compat.v2.where(
tf.cast(mask, tf.bool), logits, almost_neg_inf)
if self._action_offset != 0:
distribution = shifted_categorical.ShiftedCategorical(
logits=logits,
dtype=self._action_spec.dtype,
shift=self._action_offset)
else:
distribution = tfp.distributions.Categorical(
logits=logits,
dtype=self._action_spec.dtype)
actions = distribution.sample()
log_probability = distribution.log_prob(actions)
bandit_policy_values = tf.fill([batch_size, 1],
policy_utilities.BanditPolicyType.BOLTZMANN)
if self._accepts_per_arm_features:
# Saving the features for the chosen action to the policy_info.
def gather_observation(obs):
return tf.gather(params=obs, indices=actions, batch_dims=1)
chosen_arm_features = tf.nest.map_structure(
gather_observation,
observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
policy_info = policy_utilities.PerArmPolicyInfo(
log_probability=log_probability if
policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info
else (),
predicted_rewards_mean=(
predicted_reward_values if policy_utilities.InfoFields
.PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),
bandit_policy_type=(bandit_policy_values
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
in self._emit_policy_info else ()),
chosen_arm_features=chosen_arm_features)
else:
policy_info = policy_utilities.PolicyInfo(
log_probability=log_probability if
policy_utilities.InfoFields.LOG_PROBABILITY in self._emit_policy_info
else (),
predicted_rewards_mean=(
predicted_reward_values if policy_utilities.InfoFields
.PREDICTED_REWARDS_MEAN in self._emit_policy_info else ()),
bandit_policy_type=(bandit_policy_values
if policy_utilities.InfoFields.BANDIT_POLICY_TYPE
in self._emit_policy_info else ()))
return policy_step.PolicyStep(
tfp.distributions.Deterministic(loc=actions), policy_state, policy_info)
|
tensorflow/agents
|
tf_agents/bandits/policies/boltzmann_reward_prediction_policy.py
|
Python
|
apache-2.0
| 13,821 | 0.004197 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Tools/Analysis and Exploration/Interactive Descendant Browser"""
#------------------------------------------------------------------------
#
# GTK/GNOME modules
#
#------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gen.display.name import displayer as name_displayer
from gramps.gui.plug import tool
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
from gramps.gui.glade import Glade
from gramps.gui.editors import EditPerson
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Interactive_Descendant_Browser...')
class DesBrowse(tool.ActivePersonTool, ManagedWindow):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
tool.ActivePersonTool.__init__(self, dbstate, uistate, options_class,
name)
if self.fail:
return
self.dbstate = dbstate
active_handle = uistate.get_active('Person')
self.active = dbstate.db.get_person_from_handle(active_handle)
self.callback = callback
self.active_name = _("Descendant Browser: %s") % (
name_displayer.display(self.active)
)
ManagedWindow.__init__(self, uistate, [], self)
self.glade = Glade()
self.glade.connect_signals({
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_delete_event" : self.close,
})
window = self.glade.toplevel
self.set_window(window,self.glade.get_object('title'),
self.active_name)
self.tree = self.glade.get_object("tree1")
col = Gtk.TreeViewColumn('',Gtk.CellRendererText(),text=0)
self.tree.append_column(col)
self.tree.set_rules_hint(True)
self.tree.set_headers_visible(False)
self.tree.connect('button-press-event', self.button_press_event)
self.make_new_model()
self.show()
def build_menu_names(self, obj):
return (self.active_name,_("Descendant Browser tool"))
def make_new_model(self):
self.model = Gtk.TreeStore(str, object)
self.tree.set_model(self.model)
self.add_to_tree(None, None, self.active.get_handle())
self.tree.expand_all()
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def add_to_tree(self, parent_id, sib_id, person_handle):
item_id = self.model.insert_after(parent_id, sib_id)
person = self.db.get_person_from_handle(person_handle)
self.model.set(item_id, 0, name_displayer.display(person))
self.model.set(item_id, 1, person_handle)
prev_id = None
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
prev_id = self.add_to_tree(item_id, prev_id, child_ref.ref)
return item_id
def button_press_event(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
store, node = self.tree.get_selection().get_selected()
if node:
person_handle = store.get_value(node, 1)
person = self.db.get_person_from_handle(person_handle)
EditPerson(self.dbstate, self.uistate, self.track, person,
self.this_callback)
def this_callback(self, obj):
self.callback()
self.make_new_model()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class DesBrowseOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
|
Forage/Gramps
|
gramps/plugins/tool/desbrowser.py
|
Python
|
gpl-2.0
| 5,538 | 0.004514 |
import subprocess
from utlz import func_has_arg, namedtuple
CmdResult = namedtuple(
typename='CmdResult',
field_names=[
'exitcode',
'stdout', # type: bytes
'stderr', # type: bytes
'cmd',
'input',
],
lazy_vals={
'stdout_str': lambda self: self.stdout.decode('utf-8'),
'stderr_str': lambda self: self.stderr.decode('utf-8'),
}
)
def run_cmd(cmd, input=None, timeout=30, max_try=3, num_try=1):
'''Run command `cmd`.
It's like that, and that's the way it is.
'''
if type(cmd) == str:
cmd = cmd.split()
process = subprocess.Popen(cmd,
stdin=open('/dev/null', 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
communicate_has_timeout = func_has_arg(func=process.communicate,
arg='timeout')
exception = Exception
if communicate_has_timeout:
exception = subprocess.TimeoutExpired # python 3.x
stdout = stderr = b''
exitcode = None
try:
if communicate_has_timeout:
# python 3.x
stdout, stderr = process.communicate(input, timeout)
exitcode = process.wait()
else:
# python 2.x
if timeout is None:
stdout, stderr = process.communicate(input)
exitcode = process.wait()
else:
# thread-recipe: https://stackoverflow.com/a/4825933
def target():
# closure-recipe: https://stackoverflow.com/a/23558809
target.out, target.err = process.communicate(input)
import threading
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
process.terminate()
thread.join()
exitcode = None
else:
exitcode = process.wait()
stdout = target.out
stderr = target.err
except exception:
if num_try < max_try:
return run_cmd(cmd, input, timeout, max_try, num_try+1)
else:
return CmdResult(exitcode, stdout, stderr, cmd, input)
return CmdResult(exitcode, stdout, stderr, cmd, input)
|
theno/utlz
|
utlz/cmd.py
|
Python
|
mit
| 2,420 | 0 |
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import pytest
import radish.utils as utils
@pytest.mark.filterwarnings("ignore")
def test_getting_any_debugger():
"""When asking for a debugger it should return one
It shouldn't matter if IPython is installed or not,
just give me that debugger.
"""
# when
debugger = utils.get_debugger()
# then
assert callable(debugger.runcall)
def test_utils_should_locate_arbitrary_python_object():
# when
obj = utils.locate_python_object("str")
# then
assert obj == str
def test_converting_pos_args_into_kwargs():
# given
def func(_, arg1, arg2, kwarg1=1, kwargs2=2):
pass
pos_arg_values = ["arg1-value", "arg2-value"]
# when
kwargs = utils.get_func_pos_args_as_kwargs(func, pos_arg_values)
# then
assert kwargs == {"arg1": "arg1-value", "arg2": "arg2-value"}
|
radish-bdd/radish
|
tests/unit/test_utils.py
|
Python
|
mit
| 1,022 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from airflow.models import Connection
from airflow.models.dag import DAG
from airflow.providers.mongo.hooks.mongo import MongoHook
from airflow.providers.mongo.sensors.mongo import MongoSensor
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
@pytest.mark.integration("mongo")
class TestMongoSensor(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(conn_id='mongo_test', conn_type='mongo', host='mongo', port='27017', schema='test')
)
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
hook = MongoHook('mongo_test')
hook.insert_one('foo', {'bar': 'baz'})
self.sensor = MongoSensor(
task_id='test_task',
mongo_conn_id='mongo_test',
dag=self.dag,
collection='foo',
query={'bar': 'baz'},
)
def test_poke(self):
assert self.sensor.poke(None)
|
nathanielvarona/airflow
|
tests/providers/mongo/sensors/test_mongo.py
|
Python
|
apache-2.0
| 1,820 | 0.000549 |
# Copyright 2009 - 2014 Insight Centre for Data Analytics, UCC
UNSAT, SAT, UNKNOWN, LIMITOUT = 0, 1, 2, 3
LUBY, GEOMETRIC = 0, 1
MAXCOST = 100000000
from .solvers import available_solvers
import weakref
import exceptions
import datetime
import types
import sys
#SDG: extend recursive limit for predicate decomposition
sys.setrecursionlimit(10000)
#SDG: needed by the default eval method in BinPredicate
import operator
val_heuristics = ['Lex', 'AntiLex', 'Random', 'RandomMinMax', 'DomainSplit', 'RandomSplit', 'Promise', 'Impact', 'No', 'Guided']
var_heuristics = ['No', 'MinDomain', 'Lex', 'AntiLex', 'MaxDegree', 'MinDomainMinVal', 'Random', 'MinDomainMaxDegree', 'DomainOverDegree', 'DomainOverWDegree', 'DomainOverWLDegree', 'Neighbour', 'Impact', 'ImpactOverDegree', 'ImpactOverWDegree', 'ImpactOverWLDegree', 'Scheduling']
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring) and not issubclass(type(el), Expression):
result.extend(flatten(el))
else:
result.append(el)
return result
def numeric(x):
tx = type(x)
return tx is int or tx is float
# Numberjack exceptions:
class ConstraintNotSupportedError(exceptions.Exception):
"""
Raised if the solver being loaded does not support the constraint, and no
decomposition is available for the constraint. For example in the case of
loading a divison expression with a Mixed Integer Programming solver.
"""
def __init__(self, value, solver=None):
self.value = value
self.solver = solver
def __str__(self):
return "ERROR: Constraint %s not supported by solver %s and no decomposition is available." % (self.value, self.solver)
class UnsupportedSolverFunction(exceptions.Exception):
"""
Raised if a solver does not support a particular API call.
"""
def __init__(self, solver, func_name, msg=""):
self.solver = solver
self.func_name = func_name
self.msg = msg
def __str__(self):
return "ERROR: The solver %s does not support the function '%s'. %s" % (self.solver, self.func_name, self.msg)
class InvalidEncodingException(exceptions.Exception):
"""
Raised if an invalid encoding was specified, for example if no domain
encoding is turned on.
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return "ERROR: Invalid encoding configuration. %s" % self.msg
class InvalidConstraintSpecification(exceptions.Exception):
"""
Raised in the case of the invalid use of a constraint.
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return "ERROR: Invalid constraint specification. %s" % self.msg
class ModelSizeError(exceptions.Exception):
"""
Raised if the size of a model has grown excessively large when decomposing
some constraints for a solver.
"""
def __init__(self, value, solver=None):
self.value = value
self.solver = solver
def __str__(self):
return "ERROR: Model decomposition size too big %s for solver %s." % (self.value, self.solver)
# Numberjack domain and expressions:
class Domain(list):
def __init__(self, arg1, arg2=None):
"""
\internal
This class is used to wrap the domain of variables
in order to print them and/or iterate over values
Initialised from a list of values, or a lower and an upper bound
"""
if arg2 is None:
list.__init__(self, arg1)
self.sort()
self.is_bound = False
else:
list.__init__(self, [arg1, arg2])
self.is_bound = True
self.current = -1
def next(self):
"""
\internal
Returns the next value when iterating
"""
self.current += 1
if self.is_bound:
if self[0] + self.current > self[-1]:
raise StopIteration
else:
return self[0] + self.current
else:
if self.current >= list.__len__(self):
raise StopIteration
else:
return list.__getitem__(self, self.current)
def __str__(self):
"""
\internal
"""
if self.is_bound:
lb = self[0]
ub = self[-1]
if lb + 1 == ub and type(lb) is int:
return '{' + str(lb) + ',' + str(ub) + '}'
else:
return '{' + str(lb) + '..' + str(ub) + '}'
def extend(idx):
x = self[idx]
y = x
idx += 1
while idx < len(self):
if type(self[idx]) is int and self[idx] == y + 1:
y = self[idx]
else:
break
idx += 1
return (x, y, idx)
ret_str = '{'
idx = 0
while idx < len(self):
if idx > 0:
ret_str += ','
(x, y, idx) = extend(idx)
ret_str += str(x)
if type(x) is int and x + 1 < y:
ret_str += ('..' + str(y))
elif x != y:
ret_str += (',' + str(y))
return ret_str + '}'
class Expression(object):
"""
Base class from which all expressions and variables inherit.
:param str operator: the name of this expression operator or variable name.
"""
def __init__(self, operator):
#self.mod = None
self.ident = -1
self.operator = operator
# This is the stuff for maintaining multiple representations of the
# model among different solvers
self.var_list = []
self.encoding = None
self.solver = None
def __iter__(self):
return self.get_domain()
def get_solver(self):
"""
Returns the solver with which this expression was last loaded.
:return: The last loaded solver, or `None` if it has not been loaded
anywhere.
:rtype: `NBJ_STD_Solver`
"""
if getattr(self, 'solver', False):
return self.solver
else:
return None
def initial(self):
"""
Returns a string representing the initial domain of the expression. For
example:
.. code-block:: python
var1 = Variable(0, 10)
print var1.initial()
>>> x0 in {0..10}
:return: A String representation of original expression definition
:rtype: str
"""
output = self.name()
if self.domain_ is None:
output += ' in ' + str(Domain(self.lb, self.ub))
else:
output += ' in ' + str(Domain(self.domain_))
return output
def domain(self, solver=None):
"""
Returns a string representing the current domain of the expression.
:param `NBJ_STD_Solver` solver: If specified, the solver for
which this expression has been loaded. If not specified, the solver
that has most recenlty loaded the expression will be used.
"""
output = self.name() + ' in ' + str(self.get_domain(solver=solver))
return output
## Returns a string containing the value of the expression
# @param solver Solver from which expression solution will be sourced
# @return String representation of expression solution
#
# solution(self, solver=None) :- Returns a string representing the current solution
# of the expression in the solver specified. If no solver is specified then
# the returned string represents the solution to the expression in the solver
# that has most recently loaded and solved the expression.
#
def solution(self, solver=None):
"""
.. deprecated:: 1.1
Instead you should use :func:`get_value`, this function is equivalent
to calling :func:`str` on that.
Returns a string containing the solution value of the expression. For a
native representation of the solution value, use :func:`get_value`
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
solution will be sourced, if `None` then the most recently loaded
solver is used.
:return: String representation of the expressions solution.
:rtype: str
"""
return str(self.get_value(solver))
def name(self):
"""
The name of the variable or the operator if this is an expression.
"""
return self.operator
def __str__(self):
if self.is_built() and self.solver.is_sat():
return self.solution()
else:
return self.domain()
def is_str(self):
lb = getattr(self, 'lb', None)
if lb is not None:
return not numeric(lb)
return False
def getVar(self, solver_id):
# \internal
return self.var_list[solver_id - 1]
def setVar(self, solver_id, solver_name, variable, new_solver=None):
# \internal
if (solver_id - 1) < len(self.var_list):
self.var_list[solver_id - 1] = variable
else:
self.var_list.append(variable)
def has_children(self):
# \internal
return hasattr(self, 'children')
def has_parameters(self):
# \internal
return hasattr(self, 'parameters')
def is_built(self, solver=None):
if solver is None:
return len(self.var_list) > 0
else:
return solver.solver_id - 1 < len(self.var_list)
def is_var(self):
return not issubclass(type(self), Predicate)
def close(self):
# \internal
if self.has_children():
for child in self.children:
tc = type(child)
if tc not in [int, long, float, str, bool]:
child.close()
def get_domain(self, solver=None):
"""
Creates a new :class:`Domain` instance representing the current domain
of the expression.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
domain will be sourced, if `None` then the most recently loaded
solver is used.
:return: The current domain of the expression.
:rtype: Domain
"""
if self.is_built(solver):
if solver is None:
solver = self.solver
lb, ub = self.get_min(solver), self.get_max(solver)
if self.get_size(solver) == (ub - lb + 1):
dom = range(lb, ub + 1)
else:
# we should make that more efficient by using the underlying
# solvers to iterate
dom = [lb]
while True:
v = solver.next(self, dom[-1])
if v <= dom[-1]:
break
else:
dom.append(v)
return Domain(dom)
elif self.domain_ is not None:
return Domain(self.domain_)
else:
return Domain(self.lb, self.ub)
def get_value(self, solver=None):
"""
The current value of the expression. Should be used to retrieve the
assigned value of a variable and the value of expressions like the
objective function.
In the case of variables, it may not be passed in to the solver if it is
not involved in a non-trivial constraint. For example, `x <= 1`, `x`
will not get added (by this constraint alone) if it has a upper bound
which is less or equal to 1. The variable's lower bound will be returned
as the value in this case.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
value will be sourced, if `None` then the most recently loaded
solver is used.
:return: The current value of the expression.
:rtype: The same as the original domain, either `int`, `float`, or
`str`.
"""
has_value = False
if self.is_built(solver):
if self.solver.is_sat():
has_value = True
value = None
# In the case of a variable not being created in the interface, return
# lb as per the doc above.
if len(self.var_list) == 0 or \
(solver and ((solver.solver_id - 1) < len(self.var_list) or
(solver.solver_id - 1) == 0)):
has_value = False
value = self.lb
if has_value:
if solver is not None:
var = self.var_list[solver.solver_id - 1]
else:
var = self.var_list[-1]
if self.is_str():
value = self.model.strings[var.get_value()]
else:
value = var.get_value()
if isinstance(self, Variable):
value = type(self.lb)(value)
return value
def get_size(self, solver=None):
"""
The current size of the expression's domain.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
domain size will be sourced, if `None` then the most recently loaded
solver is used.
:return: The size of the expression's domain.
:rtype: `int`
"""
if solver is not None:
if self.is_built(solver):
return self.var_list[solver.solver_id - 1].get_size()
else:
return self.ub - self.lb + 1
elif self.is_built():
return self.var_list[-1].get_size()
else:
return self.ub - self.lb + 1
def get_min(self, solver=None):
"""
Current lower bound of the expression.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
lower bound will be sourced, if `None` then the most recently loaded
solver is used.
:return: The current lower bound of the expression.
:rtype: The same as the original domain, either `int`, `float`, or
`str`.
"""
the_min = self.lb
if solver is not None:
if self.is_built(solver):
the_min = self.var_list[solver.solver_id - 1].get_min()
elif self.is_built():
the_min = self.var_list[-1].get_min()
if self.is_str():
return self.model.strings[the_min]
return the_min
def get_max(self, solver=None):
"""
Current upper bound of variable.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
upper bound will be sourced, if `None` then the most recently loaded
solver is used.
:return: The current upper bound of the variable.
:rtype: The same as the original domain, either `int`, `float`, or
`str`.
"""
the_max = self.ub
if solver is not None:
if self.is_built(solver):
the_max = self.var_list[solver.solver_id - 1].get_max()
elif self.is_built():
the_max = self.var_list[-1].get_max()
if self.is_str():
return self.model.strings[the_max]
return the_max
# SDG: methods to access initial lb and ub and domain of the expression or
# one of its children
def get_ub(self, child=None):
if (child == None):
return self.ub
else:
return self.children[child].ub if issubclass(type(self.children[child]), Expression) else self.children[child]
def get_lb(self, child=None):
if (child == None):
return self.lb
else:
return self.children[child].lb if issubclass(type(self.children[child]), Expression) else self.children[child]
# not safe! FIXME
def get_domain_tuple(self):
if self.is_str():
tmp_domain = sorted([self.model.string_map[value] for value in self.domain_])
return (tmp_domain[0], tmp_domain[len(tmp_domain) - 1], tmp_domain)
else:
return (self.lb, self.ub, self.domain_)
def get_children(self):
# \internal
if self.has_children():
return self.children
else:
return None
def get_operator(self):
# \internal
return self.operator
def __and__(self, pred):
return And([self, pred])
def __rand__(self, pred):
return And([self, pred])
def __or__(self, pred):
return Or([self, pred])
def __ror__(self, pred):
return Or([self, pred])
def __add__(self, pred):
return Sum([self, pred], [1, 1])
def __radd__(self, pred):
return Sum([pred, self], [1, 1])
def __sub__(self, pred):
var = Sum([self, pred], [1, -1])
var.name = '(' + str(self) + '-' + str(pred) + ')'
return var
def __rsub__(self, pred):
return Sum([pred, self], [1, -1])
def __div__(self, pred):
return Div([self, pred])
def __rdiv__(self, pred):
return Div([pred, self])
def __mul__(self, pred):
return Mul([self, pred])
def __rmul__(self, pred):
return Mul([self, pred])
def __mod__(self, pred):
return Mod([self, pred])
def __rmod__(self, pred):
return Mod([pred, self])
def __eq__(self, pred):
return Eq([self, pred])
def __ne__(self, pred):
return Ne([self, pred])
def __lt__(self, pred):
return Lt([self, pred])
def __gt__(self, pred):
return Gt([self, pred])
def __le__(self, pred):
return Le([self, pred])
def __ge__(self, pred):
return Ge([self, pred])
def __neg__(self):
return Neg([self])
def __contains__(self, v):
if self.is_built():
return self.var_list[-1].contain(v)
elif self.domain_ is None:
return self.lb <= v <= self.ub
else:
return v in self.domain_
class Model(object):
"""
Model object which stores the variables and constraints. The constraints
declarations are trees, whose internal nodes are predicates or constraints
and leaves are variables.
Model can be initialized with any number of arguments. Each argument will be
treated as an :class:`Expression` or a list of :class:`Expression` to be
added into the model. If no argument is given the Model will be initially
empty. An :class:`Expression` can be subsequently added using the method
:func:`add` or the operator '+='.
"""
def __init__(self, *expr):
## \internal - List of expressions (predicate trees) that where added to
# the model
self.__expressions = []
#: A :class:`VarArray`: containing the leaves of the predicate trees.
self.variables = VarArray([])
#: A :class:`VarArray`: containing the roots of the predicate trees.
self.constraints = VarArray([])
#SDG: initial bounds for optimization problems
## Initial upper bound for a minimization problem
self.upper_bound = MAXCOST
## Initial lower bound for a maximization problem
self.lower_bound = -MAXCOST
## \internal - Before giving an expression to a solver, or before
# printing it, it needs to
self.closed = 0
## \internal - Every new solver get assigned an unique id
self.current_id = 0
## \internal - Initialise from an expression?
if len(expr) > 0:
self.add_prime(expr)
def getSolverId(self):
# \internal - generates an ident for each new solver
self.current_id += 1
return self.current_id
def add(self, *expr):
"""Add an expresion, or a list/tuple/dict of expressions to the model.
:param expr: Any number of (or nested lists of) Expression instances.
"""
self.add_prime(expr)
def add_prime(self, expr):
## \internal - Used to distinguish between a single Expression and a
# list of Expressions
if issubclass(type(expr), list):
for exp in expr:
self.add_prime(exp)
elif issubclass(type(expr), tuple):
for exp in expr:
self.add_prime(exp)
elif issubclass(type(expr), dict):
for key in expr:
self.add_prime(exp[key])
else:
self.__expressions.append(expr)
self.close_exp()
def __iadd__(self, *expr):
"""Can be used to add an expression or a collection of expressions to
the model like: `model += expression`
:param expr: Any number of (or nested lists of) Expression instances.
"""
self.add_prime(expr)
return self
def add_expression(self, exp, level):
## \internal - add the Expression tree to the model and assign identifiers to the nodes
# this expression is new, choose an identifiant for it
te = type(exp)
if te not in [int, long, float, str, bool]:
## THIS IS BUGGY, WE CANNOT ADD THE SAME VARIABLE TO SEVERAL MODELS
if exp.ident == -1:
#if exp.mod != self:
#exp.mod = self
if exp.get_children() is None:
if exp.is_var():
exp.ident = len(self.variables)
self.variables.append(exp)
else: # it is a constraint
exp.ident = -2 - len(self.constraints)
self.constraints.append(exp)
for child in exp.get_children():
self.add_expression(child, level + 1)
def close_exp(self):
## \internal - close() is used to fire up preprocessing requiring
# knowledge about the whole model
for i in range(self.closed, len(self.__expressions)):
self.add_expression(self.__expressions[i], 0)
self.closed = len(self.__expressions)
def close(self, solver=None):
## \internal - close() is used to fire up preprocessing requiring
# knowledge about the whole model and the solver used.
#SDG: check if it is an optimization problem
if not any([issubclass(type(expr), Minimise) or issubclass(type(expr), Maximise) or issubclass(type(expr), CostFunction) for expr in self.__expressions]):
self.upper_bound = 1
if solver is not None and solver.Library is 'Toulbar2' and self.upper_bound is not None:
solver.setOption('updateUb', str(self.upper_bound))
if self.closed == len(self.__expressions):
tmp_strings = []
for var in self.variables:
if var.is_str():
var.model = self
for value in var.domain_:
tmp_strings.append(value)
self.strings = sorted(set(tmp_strings))
self.string_map = {}.fromkeys(self.strings)
for k in range(len(self.strings)):
self.string_map[self.strings[k]] = k
self.closed += 1
if self.closed == len(self.__expressions) + 1:
for expr in self.get_exprs():
expr.close()
if getattr(solver, "Library", None) == 'Toulbar2':
#print self #SDG: VERY USEFUL FOR DEBUGGING
occur = {}
def rec_occur(expr):
if not(issubclass(type(expr), Expression)):
return
if expr.is_var():
occur[expr] = occur.get(expr, 0) + 1
else:
for j,subexpr in enumerate(expr.children):
rec_occur(subexpr)
for expr in self.__expressions:
rec_occur(expr)
def rec_functional(objexpr, objvar, j, minimization):
if not(issubclass(type(objvar), Expression)):
return
if objvar.is_var():
if objvar.lb==objvar.ub or occur[objvar] != 2: # avoid replacing non intermediate variables
return
# replace [Predicate(obj,..),Eq(Sum([obj]+vars, [+-1]+coefs),expr)] by [Predicate(Sum(vars+[expr],[-+]coefs+[-1]),..),Eq(0,0)]
# and [Predicate(obj,..),Eq(expr,Sum([obj]+vars, [+-1]+coefs))] by [Predicate(Sum(vars+[expr],[-+]coefs+[-1]),..),Eq(0,0)]
objconstr = filter(lambda expr: issubclass(type(expr), Eq) and ((issubclass(type(expr.children[0]), Sum) and any(map(lambda u: expr.children[0].children[u] is objvar, xrange(len(expr.children[0].children))))) or (issubclass(type(expr.children[1]), Sum) and any(map(lambda u: expr.children[1].children[u] is objvar, xrange(len(expr.children[1].children)))))), self.__expressions)
if (len(objconstr)==1):
if issubclass(type(objconstr[0].children[0]), Sum):
mysum = 0
else:
mysum = 1
pos = filter(lambda u: objconstr[0].children[mysum].children[u] is objvar, xrange(len(objconstr[0].children[mysum].children)))[0]
coefobj = objconstr[0].children[mysum].parameters[0][pos]
if (coefobj != -1 and coefobj != 1):
return
del objconstr[0].children[mysum].children[pos]
del objconstr[0].children[mysum].parameters[0][pos]
coefeq = objconstr[0].children[1-mysum]
if not issubclass(type(coefeq), int) or coefeq != 0:
objconstr[0].children[mysum].children.append(Variable(coefeq,coefeq,str(coefeq)) if issubclass(type(coefeq), int) else coefeq)
objconstr[0].children[mysum].parameters[0].append(-1)
if (coefobj==1):
for u in xrange(len(objconstr[0].children[mysum].children)):
objconstr[0].children[mysum].parameters[0][u] = -objconstr[0].children[mysum].parameters[0][u]
objexpr.children[j] = objconstr[0].children[mysum]
#print "REPLACE",objvar,"by",objexpr.children[j],"in",objexpr
objconstr[0].children[0] = Variable(0,0,'0')
objconstr[0].children[1] = 0
occur[objvar] -= 2
rec_functional(objexpr, objexpr.children[j], j, minimization)
if issubclass(type(objexpr.children[j]), Expression): objexpr.children[j].close()
else:
# replace [Predicate(obj,..),Eq(obj,expr)] by [Predicate(expr,..),Eq(0,0)]
# and [Predicate(obj,..),Eq(expr,obj)] by [Predicate(expr,..),Eq(0,0)]
objconstr = filter(lambda expr: issubclass(type(expr), Eq) and ((expr.children[0] is objvar) or (expr.children[1] is objvar)), self.__expressions)
if (len(objconstr)==1):
if (objconstr[0].children[0] is objvar):
objexpr.children[j] = objconstr[0].children[1]
else:
objexpr.children[j] = objconstr[0].children[0]
#print "REPLACE",objvar,"by",objexpr.children[j],"in",objexpr
objconstr[0].children[0] = Variable(0,0,'0')
objconstr[0].children[1] = 0
occur[objvar] -= 2
rec_functional(objexpr, objexpr.children[j], j, minimization)
if issubclass(type(objexpr.children[j]), Expression): objexpr.children[j].close()
elif minimization is not None:
# ONLY in the objective function: replace [Predicate(obj,..),Table([obj]+vars,tuples,'support')] by [Predicate(Function(vars,dict),..), Table([],[],'support')]
objconstr = filter(lambda expr: issubclass(type(expr), Table) and any(map(lambda u: expr.children[u] is objvar, xrange(len(expr.children)))) and (expr.parameters[1] == 'support'), self.__expressions)
if (len(objconstr)==1):
pos = filter(lambda u: objconstr[0].children[u] is objvar, xrange(len(objconstr[0].children)))[0]
dictionary = {}
for t in objconstr[0].parameters[0]:
mytuple = t[:pos]+t[pos+1:]
if minimization:
dictionary[mytuple] = min(dictionary.get(mytuple, MAXCOST), t[pos])
else:
dictionary[mytuple] = max(dictionary.get(mytuple, -MAXCOST), t[pos])
objexpr.children[j] = Function(objconstr[0].children[:pos] + objconstr[0].children[pos+1:], dictionary, MAXCOST if minimization else -MAXCOST)
objconstr[0].children = []
objconstr[0].parameters = [[],0]
occur[objvar] -= 2
rec_functional(objexpr, objexpr.children[j], j, minimization)
if issubclass(type(objexpr.children[j]), Expression): objexpr.children[j].close()
else:
for j,var in enumerate(objvar.children):
rec_functional(objvar, var, j, minimization)
objexpr = filter(lambda expr: issubclass(type(expr), Minimise) or issubclass(type(expr), Maximise), self.__expressions)
if (len(objexpr)==1 and issubclass(type(objexpr[0].children[0]), Expression) and objexpr[0].children[0].is_var()):
objvar = objexpr[0].children[0]
# replace Eq('objective',obj) or Eq('obj',obj) by Eq(0,0) #SDG: VERY HUGLY!!! (avoid creating an objective variable just for Minizinc output purposes)
objconstr = filter(lambda expr: issubclass(type(expr), Eq) and expr.children[0].is_var() and (expr.children[0].name()=='objective' or expr.children[0].name()=='obj') and (expr.children[1] is objvar), self.__expressions)
if (len(objconstr)==1):
objconstr[0].children[0] = Variable(0,0,'0')
objconstr[0].children[1] = 0
occur[objvar] -= 1
# remove first intermediate variables in the objective function
rec_functional(objexpr[0], objvar, 0, issubclass(type(objexpr[0]), Minimise))
# remove intermediate variables in the constraints if possible
for expr in self.__expressions:
if issubclass(type(expr), Predicate) and not(issubclass(type(expr), (Minimise, Maximise))):
for (j,var) in enumerate(expr.children):
if not( issubclass(type(expr), Eq) and (issubclass(type(var), Sum) or (issubclass(type(var), Expression) and var.is_var())) ):
rec_functional(expr, var, j, None)
# remove dummy equations or dummy Table
pos = 0
while (pos < len(self.__expressions)):
expr = self.__expressions[pos]
if (issubclass(type(expr), Eq) and issubclass(type(expr.children[0]), Variable) and issubclass(type(expr.children[1]), int) and expr.children[0].get_lb()==expr.children[0].get_ub()==expr.children[1]) or (issubclass(type(expr), Table) and len(expr.children)==0):
del self.__expressions[pos]
else:
pos += 1
#print self #SDG: VERY USEFUL FOR DEBUGGING
def __str__(self):
## \internal - print
mod = 'assign:\n '
for var in self.variables:
mod += var.domain() + '\n '
mod += '\nsubject to:\n '
for con in self.__expressions:
mod += con.__str__() + '\n '
return mod
def get_exprs(self):
## \internal - return the list of Expressions
return self.__expressions
def load(self, solvername, X=None, encoding=None):
"""
The solver is passed as a string, the corresponding module is imported,
a Solver object created, initialised, and returned.
:param str solvername: the name of the solver being loaded. Should be
one of the modules in :mod:`Numberjack.solvers`.
:param str X: the decision variables.
:type X: :class:`list` or :class:`VarArray`
:param EncodingConfiguration encoding: An :class:`EncodingConfiguration`
instance defining the default encoding for expressions.
:type encoding: :class:`EncodingConfiguration`
:raises ImportError: if the named solver could not be loaded.
:returns: an instance of a :class:`NBJ_STD_Solver` subclass.
"""
try:
solverspkg = "Numberjack.solvers"
solverstring = "%s.%s" % (solverspkg, solvername)
lib = __import__(solverstring, fromlist=[solverspkg])
solver = lib.Solver(self, X, encoding=encoding)
except ImportError:
raise ImportError(
"ERROR: Failed during import, wrong module name? (%s)" %
solvername)
return solver
def solve_with(self, library, encoding=None):
"""
.. deprecated:: 1.1
Instead you should use :func:`load` first and call solve on that
solver object instead.
The solver is passed as a string, the corresponding module is
imported, a Solver object created, initialised and called.
A Solution object (dictionary: var -> val) is returned, if the
Model is unsatisfiable, or the solver fails to solve it, the
Solution will be empty (None)
"""
solver = self.load(library, encoding)
solver.solve()
return solver.get_solution()
def set_upper_bound(self, ub):
"""
For weighted CSPs, sets the initial upper bound.
:param int ub: The initial upper bound.
"""
self.upper_bound = str(ub)
def set_lower_bound(self, lb):
"""
For weighted CSPs, sets the initial lower bound.
:param int ub: The initial lower bound.
"""
self.lower_bound = lb
class Variable(Expression):
"""
Creates a new variable. The following tables shows example calls to the
constructor which results in different kinds of variables.
.. code-block:: python
Variable() # Binary variable
Variable(N) # Variable in the domain of {0, N-1}
Variable('x') # Binary variable called 'x'
Variable(N, 'x') # Variable in the domain of {0, N-1} called 'x'
Variable(l,u) # Variable in the domain of {l, u}
Variable(l,u, 'x') # Variable in the domain of {l, u} called 'x'
Variable(list) # Variable with domain specified as a list
Variable(list, 'x') # Variable with domain specified as a list called x
To create a continuous variable just use float values instead of integer
values when specifying the domain of the variable.
.. note::
Typically, just the Mixed Integer Programming solvers support continuous
valued variables, other solvers use finite domains.
The variable's domain may also be specified as a list of strings, and the
interface between the solver's representation will be handled by Numberjack.
For example in a map-colouring problem, we may have something like the
following:
.. code-block:: python
v = Variable(['red', 'green', 'blue'])
# ... load and solve the model
v.get_value() # Returns 'red'
"""
def __init__(self, argopt1=None, argopt2=None, argopt3=None):
domain = None
lb = 0
ub = 1
name = 'x'
if argopt3 is not None:
lb = argopt1
ub = argopt2
name = argopt3
elif argopt2 is not None:
if type(argopt2) is str:
if numeric(argopt1):
ub = argopt1 - 1
lb = type(ub)(lb) # Ensure lb has the same datatype as ub
else:
domain = sorted(argopt1)
lb = domain[0]
ub = domain[-1]
name = argopt2
else:
lb = argopt1
ub = argopt2
elif argopt1 is not None:
if type(argopt1) is str:
name = argopt1
elif numeric(argopt1):
ub = argopt1 - 1
lb = type(ub)(lb) # Ensure lb has the same datatype as ub
else:
domain = sorted(argopt1)
lb = domain[0]
ub = domain[-1]
tlb = type(lb)
tub = type(ub)
if tlb not in [int, long, float, str]:
raise TypeError("Warning lower bound of %s is not an int or a float or a string" % name)
elif tub not in [int, long, float, str]:
raise TypeError("Warning upper bound of %s is not an int or a float or a string" % name)
elif type(name) is not str:
raise TypeError("Warning name variable is not a string")
elif lb > ub:
raise ValueError("Warning lower bound (%r) of %s greater than upper bound (%r)" % (lb, name, ub))
Expression.__init__(self, name)
self.domain_ = domain
self.lb = lb
self.ub = ub
class VarArray(list):
"""
A VarArray is a list of :class:`Expression` objects. Various methods are
overloaded to allow easy declaration, formatted printing, and syntactic
sugars for modelling. The following tables shows example calls to the
constructor which results in different kinds of variable arrays.
.. code-block:: python
VarArray(l) # creates an array from a list l
VarArray(n) # creates an array of n Boolean variables
VarArray(n, 'x') # creates an array of n Boolean variables with
# names 'x0..xn-1'
VarArray(n, m) # creates an array of n variables with domains [0..m-1]
VarArray(n, m, 'x') # creates an array of n variables with domains
# [0..m-1] and names 'x0..xn-1'
VarArray(n, d) # creates an array of n variables with domains specified
# in the list 'd'
VarArray(n, d, 'x') # creates an array of n variables with domains
# specified in the list 'd' and names 'x0..xn-1'
VarArray(n, l, u, 'x') # creates an array of n variables with domains
# [l..u] and names 'x0..xn-1'
VarArray(n, l, u) # creates an array of n variables with domains [l..u]
VarArray's allow you to state :class:`Element` and lexicographic ordering
constraints over a sequence of variables using, respectively the operator
'[]' and '<', '>', '<=', '>='. For instance, given two VarArray X and Y, and
an Expression x:
.. code-block:: python
X[x] # returns an Element expression, that is, a variable equal to the
# xth element of the array X
X <= Y # returns a LeqLex constraint between X and Y
"""
def __init__(self, n, optarg1=None, optarg2=None, optarg3=None):
domain = None
if hasattr(n, '__iter__'):
list.__init__(self, n)
return
else:
lb = 0
ub = 1
name = 'x'
if optarg1 is not None:
if type(optarg1) is str:
name = optarg1
elif type(optarg2) is int or type(optarg2) is float:
lb = optarg1
ub = optarg2
if optarg3 is not None:
name = optarg3
else:
if issubclass(type(optarg1), list) or issubclass(type(optarg1), list):
domain = optarg1
domain.sort()
lb = domain[0]
ub = domain[-1]
else:
ub = optarg1 - 1
if optarg2 is not None:
name = optarg2
names = name
if type(name) is str:
names = [name + str(i) for i in range(n)]
if domain is None:
self.__init__([Variable(lb, ub, names[i]) for i in range(n)])
else:
self.__init__([Variable(domain, names[i]) for i in range(n)])
def initial(self):
"""
Returns a string representing the initial definition of the content of
the array.
:rtype: str
"""
return "[" + ", ".join([var.initial() for var in self]) + "]"
def domain(self, solver=None):
"""
Returns a string representing the current state of the content of the
array.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
state will be sourced, if `None` then the most recently loaded
solver is used.
:rtype: str
"""
return "[" + ", ".join([var.domain(solver) for var in self]) + "]"
def name(self):
"""
Returns a string containing a brief view of the content of the array.
:rtype: str
"""
return "[" + ", ".join([var.name() for var in self]) + "]"
def solution(self, solver=None):
"""
.. deprecated:: 1.1
Instead you should use :func:`Expression.get_value` on each item and
call :func:`str` on that.
Returns a string containing the valuation of the content of the array.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
state will be sourced, if `None` then the most recently loaded
solver is used.
:rtype: str
"""
return "[" + ", ".join([var.solution(solver) for var in self]) + "]"
def __str__(self):
return "[" + ", ".join([var.__str__() for var in self]) + "]"
def __getitem__(self, expr):
if type(expr) is int:
return list.__getitem__(self, expr)
else:
return Element(self, expr)
def __getslice__(self, i, j):
return VarArray(list.__getslice__(self, i, j))
def __lt__(self, other):
"""
Syntactic sugar for the lexicographic order constraint :class:`LessLex`
so it can be specified on two VarArray like so: `X < Y`
:param VarArray other: Another VarArray of the same length.
:rtype: LessLex
"""
return LessLex(self, other)
def __le__(self, other):
"""
Syntactic sugar for the lexicographic order constraint :class:`LeqLex`
so it can be specified on two VarArray like so: `X <= Y`
:param VarArray other: Another VarArray of the same length.
:rtype: LeqLex
"""
return LeqLex(self, other)
def __gt__(self, other):
"""
Syntactic sugar for the lexicographic order constraint :class:`LessLex`
so it can be specified on two VarArray like so: `X > Y`
:param VarArray other: Another VarArray of the same length.
:rtype: LessLex
"""
return LessLex(other, self)
def __ge__(self, other):
"""
Syntactic sugar for the lexicographic order constraint :class:`LeqLex`
so it can be specified on two VarArray like so: `X >= Y`
:param VarArray other: Another VarArray of the same length.
:rtype: LeqLex
"""
return LeqLex(other, self)
def __eq__(self, other):
"""
Syntactic sugar for the equality constraint `X == Y`.
:param VarArray other: Another VarArray of the same length.
:rtype: A list of equality (:class:`Eq`) expressions.
"""
return [Eq((x, y)) for x, y in zip(self, other)]
class Matrix(list):
"""
A Matrix is a two-dimensional list of variables or
:class:`.Expression` objects. Various methods are overloaded to
allow easy declaration, formatted printing, and syntactic sugars for
modelling. The following tables shows example calls to the constructor
which results in different kinds of matrices.
.. code-block:: python
Matrix(l) # creates a Matrix from a list l
Matrix(n, m) # creates a n x m Matrix of Boolean variables
Matrix(n, m, 'x') # creates a n x m Matrix of Boolean variables with
# names 'x0.0..xn-1.m-1'
Matrix(n, m, u) # creates a n x m Matrix of variables with domains
# [0..u-1]
Matrix(n, m, u, 'x') # creates a n x m Matrix of variables with
# domains [0..u-1] and names 'x0.0..xn-1.m-1'
Matrix(n, m, l, u) # creates a n x m Matrix of variables with domains
# [l..u]
Matrix(n, m, l, u, 'x') # creates a n x m Matrix of variables with
# domains [l..u] and names 'x0.0..xn-1.m-1'
Matrices feature specific handlers to access (subsets of) rows and columns.
The fields `row`, `col`, and `flat` respectively refer to the list of rows,
columns and cells in the matrix. For instance:
.. code-block:: python
m = Matrix(5,4,1,3,'cell_')
print m
>>> [[cell_0.0, cell_0.1, cell_0.2, cell_0.3],
>>> [cell_1.0, cell_1.1, cell_1.2, cell_1.3],
>>> [cell_2.0, cell_2.1, cell_2.2, cell_2.3],
>>> [cell_3.0, cell_3.1, cell_3.2, cell_3.3],
>>> [cell_4.0, cell_4.1, cell_4.2, cell_4.3]]
print m.row
>>> [[cell_0.0, cell_0.1, cell_0.2, cell_0.3],
>>> [cell_1.0, cell_1.1, cell_1.2, cell_1.3],
>>> [cell_2.0, cell_2.1, cell_2.2, cell_2.3],
>>> [cell_3.0, cell_3.1, cell_3.2, cell_3.3],
>>> [cell_4.0, cell_4.1, cell_4.2, cell_4.3]]
print m.col
>>> [[cell_0.0, cell_1.0, cell_2.0, cell_3.0, cell_4.0],
>>> [cell_0.1, cell_1.1, cell_2.1, cell_3.1, cell_4.1],
>>> [cell_0.2, cell_1.2, cell_2.2, cell_3.2, cell_4.2],
>>> [cell_0.3, cell_1.3, cell_2.3, cell_3.3, cell_4.3]]
print m.flat
>>> [cell_0.0, cell_0.1, cell_0.2, cell_0.3, cell_1.0, cell_1.1, ...]
Matrices support Element constraints on row, column or flatten views.
"""
def __init__(self, optarg1=None, optarg2=None, optarg3=None, optarg4=None, optarg5=None):
n = 1
m = 1
lb = 0
ub = 1
name = 'x'
self.row = None # accessor to the list of rows
self.col = None # accessor to the list of columns
self.flat = None # accessor to the list of cells
if optarg2 == None:
if optarg1 != None:
# BH: This could create rows with varying numbers of columns if given a list with different values.
# Should this be allowed? If so, then we need to verify any assumptions being made in this code.
list.__init__(self, [VarArray(row, "%s%d." % (name, i)) for i, row in enumerate(optarg1)])
else:
list.__init__(self)
return
else:
n = optarg1
m = optarg2
if optarg5 is not None:
lb = optarg3
ub = optarg4
name = optarg5
elif optarg4 is not None:
if type(optarg4) is str:
name = optarg4
ub = optarg3 - 1
else:
ub = optarg4
lb = optarg3
elif optarg3 is not None:
if type(optarg3) is str:
name = optarg3
else:
ub = optarg3 - 1
list.__init__(self, [VarArray(m, lb, ub, name + str(j) + '.') for j in range(n)])
self.row = self
self.col = Matrix()
for column in zip(*self):
self.col.append(VarArray(column))
self.col.col = self
self.col.row = self.col
self.flat = VarArray([var for row in self for var in row])
self.col.flat = self.flat
def initial(self):
"""
Returns a string representing the initial definition of the content of
the matrix.
:rtype: str
"""
return "[" + ",\n ".join([row.initial() for row in self]) + "]"
def domain(self, solver=None):
"""
Returns a string representing the current state of the content of the
matrix.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
state will be sourced, if `None` then the most recently loaded
solver is used.
:rtype: str
"""
return "[" + ",\n ".join([row.domain(solver) for row in self]) + "]"
def name(self):
"""
Returns a string containing a brief view of the content of the matrix.
:rtype: str
"""
return "[" + ",\n ".join([row.name() for row in self]) + "]"
def solution(self, solver=None):
"""
.. deprecated:: 1.1
Instead you should use :func:`Expression.get_value` on each item and
call :func:`str` on that.
Returns a string containing the valuation of the content of the array.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
state will be sourced, if `None` then the most recently loaded
solver is used.
:rtype: str
"""
return "[" + ",\n ".join([row.solution(solver) for row in self]) + "]"
def __str__(self):
return "[" + ",\n ".join([row.__str__() for row in self]) + "]"
def __getitem__(self, i):
if type(i) is int:
return list.__getitem__(self, i)
elif type(i) is tuple:
if type(i[0]) is int:
return list.__getitem__(self.row, i[0]).__getitem__(i[1])
elif type(i[1]) is int:
return list.__getitem__(self.col, i[1]).__getitem__(i[0])
elif type(i[0]) is slice:
aux = Matrix(list.__getitem__(self, i[0])).col
aux = Matrix(list.__getitem__(aux, i[1])).col
return aux
else:
return Element(self.flat, (i[0] * len(self.col)) + i[1])
else:
return MatrixWrapper(i, self)
def __getslice__(self, i, j):
return Matrix(list.__getslice__(self, i, j))
class MatrixWrapper(list):
def __init__(self, var, matrix):
self.var = var
self.matrix = matrix
def __getitem__(self, item):
return self.matrix[self.var, item]
def __str__(self):
return str(self.var) + " th index of " + str(self.matrix)
## Class that all constraints inherit from
# All constraints in Numberjack extend the Predicate class. It provides
# accessors to get information about the predicate trees and the variables
# the constraints constrain.
#
# A given predicate can have a different meaning when posted at the top-level
# or nested in a predicate tree. For instance:
#
# \code
# from Numberjack import *
# x = Variable(1,5)
# y = Variable(1,4)
# x_lt_y = (x<y)
# m1 = Model( x_lt_y )
# print m1
# >>> assign:
# >>> x0 in {1..5}
# >>> x1 in {1..4}
# >>>
# >>> subject to:
# >>> (x0 < x1)
#
# x = Variable(1,5)
# y = Variable(1,4)
# x_gt_y = (x>y)
# m2 = Model( x_lt_y | x_gt_y )
# print m2
# >>> assign:
# >>> x0 in {1..5}
# >>> x1 in {1..4}
# >>>
# >>> subject to:
# >>> ((x0 < x1) or (x0 > x1))
# \endcode
#
# - In the first Model (m1), the object x_lt_y is understood as a precedence Constraint
# between the Variables x and y
#
# - In the second Model (m2), the same object x_lt_y is understood as a Boolean variable,
# whose truth value corresponds to the relation (x<y) and that can be constrained,
# here with an "Or" constraint.
class Predicate(Expression):
"""
All constraints in Numberjack extend the this class. It provides accessors
to get information about the predicate trees and the variables the
constraints constrain.
""" # FIXME add doc about creating subclasses for custom constraints
def __init__(self, vars, op):
Expression.__init__(self, op)
self.set_children(vars)
def set_children(self, children):
#self.children = children
## List of children of the predicate
#self.children = [child for child in children]
self.children = flatten(children)
def initial(self):
"""
Returns a string representing the initial definition of the content of
the predicate and all its children. For example:
.. code-block:: python
var1 = Variable(0, 10)
constraint = var1 < 10
print constraint.initial()
>>> (x0 < 10)
:rtype: str
"""
save_str = Expression.__str__
Expression.__str__ = Expression.initial
output = self.__str__()
Expression.__str__ = save_str
return output
def domain(self, solver=None):
"""
Returns a string representing the current domain of the predicate.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
state will be sourced, if `None` then the most recently loaded
solver is used.
:rtype: str
"""
save_str = Expression.__str__
Expression.__str__ = lambda x: x.domain(solver)
output = self.__str__()
Expression.__str__ = save_str
return output
def solution(self, solver=None):
"""
.. deprecated:: 1.1
Instead you should use :func:`Expression.get_value` and call
:func:`str` on that.
Returns a string containing the valuation of the predicate.
:param `NBJ_STD_Solver` solver: If specified, the solver from which the
state will be sourced, if `None` then the most recently loaded
solver is used.
:rtype: str
"""
save_str = Expression.__str__
Expression.__str__ = lambda x: x.solution(solver)
output = self.__str__()
Expression.__str__ = save_str
return output
def name(self):
"""
Returns a string that represents the name of the Predicate and the name
of all its children.
:rtype: str
"""
return self.__str__()
def __str__(self):
save_str = Expression.__str__
Expression.__str__ = Expression.name
output = self.operator + "(" + ", ".join(map(str, self.children)) + ")"
Expression.__str__ = save_str
return output
class BinPredicate(Predicate):
"""
All binary predicates such as And, LessThan and GreaterThan extend this
class. They are separated from the base Predicate class to facilitate
ease of print representations of the predicates.
"""
def __init__(self, vars, op):
Predicate.__init__(self, vars, op)
def get_symbol(self):
return 'x'
def __str__(self):
save_str = Expression.__str__
Expression.__str__ = Expression.name
output = '(' + str(self.children[0]) + ' ' + self.get_symbol() + ' ' + str(self.children[1]) + ')'
Expression.__str__ = save_str
return output
#SDG: generic eval method using Predicate operator name
def eval(self, x,y):
try:
return int(getattr(operator, self.operator)(x,y))
except AttributeError:
return int(eval(str(x) + ' ' + self.get_symbol() + ' ' + str(y)))
class And(BinPredicate):
"""
Logical 'and' expression. Can be used at both the top-level to specify an
'and' constraint, or reified to equal the truth value of the relation.
.. code-block:: python
var1 = Variable() # Binary variable
var2 = Variable() # Binary variable
model.add(var1 & var2) # Post var1 And var2 constraint
var1 = Variable() # Binary variable
var2 = Variable() # Binary variable
var3 = Variable() # Binary variable
model.add( var3 == (var1 & var2) ) # Used as an expression
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "and")
self.lb = min(self.get_lb(0), self.get_lb(1))
self.ub = min(self.get_ub(0), self.get_ub(1))
def get_symbol(self):
return '&'
class Or(BinPredicate):
"""
Logical 'or' expression. Can be used at both the top-level to specify an
'or' constraint, or reified to equal the truth value of the relation.
.. code-block:: python
var1 = Variable() # Binary variable
var2 = Variable() # Binary variable
model.add(var1 | var2) # Post var1 Or var2 constraint
var1 = Variable() # Binary variable
var2 = Variable() # Binary variable
var3 = Variable() # Binary variable
model.add( var3 == (var1 | var2) ) # Used as an expression
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "or")
self.lb = max(self.get_lb(0), self.get_lb(1)) #SDG: initialize lb,ub
self.ub = max(self.get_ub(0), self.get_ub(1))
def get_symbol(self):
return 'or'
class Div(BinPredicate):
"""
Division expression to equal the integral division of the two operands.
Cannot be used as a top-level constraint.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
divexp1 = var2 / var1
divexp2 = var2 / 10
.. warning::
Cannot be used with all solvers and
:class:`.ConstraintNotSupportedError` will be raised when loading the
model if this is the case.
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "div")
#SDG: initialize lb,ub
if (self.get_lb(1) < 0 and self.get_ub(1) > 0):
self.lb = min(self.get_lb(0), -1 * self.get_ub(0)) #SDG: Warning! It assumes var2 can be equal to -1 or 1
self.ub = max(self.get_ub(0), -1 * self.get_lb(0))
elif (self.get_ub(1) < 0):
self.lb = min(self.get_lb(0) / self.get_ub(1), self.get_ub(0) / self.get_ub(1))
self.ub = max(self.get_lb(0) / self.get_ub(1), self.get_ub(0) / self.get_ub(1))
elif (self.get_lb(1) > 0):
self.lb = min(self.get_lb(0) / self.get_lb(1), self.get_ub(0) / self.get_lb(1))
self.ub = max(self.get_lb(0) / self.get_lb(1), self.get_ub(0) / self.get_lb(1))
else:
self.lb = None
self.ub = None
def get_symbol(self):
return '/'
class Mul(BinPredicate):
"""
Multiplication expression to equal the multiplication of the two operands.
Cannot be used as a top-level constraint.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
mulxp1 = var2 * var1
mulexp2 = var2 * 10
.. warning::
Cannot be used with all solvers and
:class:`.ConstraintNotSupportedError` will be raised when loading the
model if this is the case.
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "mul")
#SDG: initialize lb,ub
self.lb = min(self.get_lb(0) * self.get_lb(1), self.get_lb(0) * self.get_ub(1), self.get_ub(0) * self.get_lb(1), self.get_ub(0) * self.get_ub(1))
self.ub = max(self.get_lb(0) * self.get_lb(1), self.get_lb(0) * self.get_ub(1), self.get_ub(0) * self.get_lb(1), self.get_ub(0) * self.get_ub(1))
def get_symbol(self):
return '*'
class Mod(BinPredicate):
"""
Modulus expression to equal the modulo two expressions or an expression and
a constraint. Cannot be used as a top-level constraint.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
modexp1 = var2 % var1
modexp2 = var2 % 10
.. note::
For MIP and SAT, the constraint is encoded such that the remainder takes
the sign of the numerator, as per the C standard. This differs from
Python where the remainder takes the sign of the denominator.
.. warning::
Cannot be used with all solvers and
:class:`.ConstraintNotSupportedError` will be raised when loading the
model if this is the case.
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "mod")
#SDG: initialize lb,ub
if (self.get_ub(1) > 0):
self.lb = 0
self.ub = self.get_ub(1) - 1
else:
self.lb = None #SDG: Warning! lb and ub undefined if var2 can be negative
self.ub = None
def get_symbol(self):
return '%'
class Eq(BinPredicate):
"""
Equality expression between two expressions, or an expression and a
constant. It can be used as either a top-level hard constraint or reified
as a sub-expression.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
var3 = Variable()
model.add(var1 == var2)
model.add(var1 == 5)
model.add( var3 == (var1 == var2) )
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "eq")
#SDG: initialize lb,ub
self.lb = int((self.get_lb(0) == self.get_ub(0)) and (self.get_lb(1) == self.get_ub(1)) and (self.get_lb(0) == self.get_lb(1)))
self.ub = int(not((self.get_ub(0) < self.get_lb(1)) or (self.get_lb(0) > self.get_ub(1))))
def get_symbol(self):
return '=='
class Ne(BinPredicate):
"""
Disequality expression between two expressions, or an expression and a
constant. It can be used as either a top-level hard constraint or reified as
a sub-expression.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
var3 = Variable()
model.add(var1 != var2)
model.add(var1 != 5)
model.add( var3 != (var1 != var2) )
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "ne")
#SDG: initialize lb,ub
self.lb = int((self.get_ub(0) < self.get_lb(1)) or (self.get_lb(0) > self.get_ub(1)))
self.ub = int(not((self.get_lb(0) == self.get_ub(0)) and (self.get_lb(1) == self.get_ub(1)) and (self.get_lb(0) == self.get_lb(1))))
def get_symbol(self):
return '!=' #SDG: operator '=/=' does not belong to python language
class Lt(BinPredicate):
"""
Less-than expression between two expressions, or an expression and a
constant. It can be used as either a top-level hard constraint or reified as
a sub-expression.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
var3 = Variable()
model.add(var1 < var2)
model.add(var1 < 5)
model.add( var3 < (var1 < var2) )
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "lt")
#SDG: initialize lb,ub
self.lb = int(self.get_ub(0) < self.get_lb(1))
self.ub = int(not(self.get_lb(0) >= self.get_ub(1)))
def get_symbol(self):
return '<'
class Gt(BinPredicate):
"""
Greater-than expression between two expressions, or an expression and a
constant. It can be used as either a top-level hard constraint or reified as
a sub-expression.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
var3 = Variable()
model.add(var1 > var2)
model.add(var1 > 5)
model.add( var3 > (var1 > var2) )
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "gt")
#SDG: initialize lb,ub
self.lb = int(self.get_lb(0) > self.get_ub(1))
self.ub = int(not(self.get_ub(0) <= self.get_lb(1)))
def get_symbol(self):
return '>'
class Le(BinPredicate):
"""
Less than or equal expression between two expressions, or an expression and
a constant. It can be used as either a top-level hard constraint or reified
as a sub-expression.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
var3 = Variable()
model.add(var1 <= var2)
model.add(var1 <= 5)
model.add( var3 <= (var1 <= var2) )
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "le")
#SDG: initialize lb,ub
self.lb = int(self.get_ub(0) <= self.get_lb(1))
self.ub = int(not(self.get_lb(0) > self.get_ub(1)))
def get_symbol(self):
return '<='
class Ge(BinPredicate):
"""
Greater than or equal expression between two expressions, or an expression
and a constant. It can be used as either a top-level hard constraint or
reified as a sub-expression.
.. code-block:: python
var1 = Variable(0, 10)
var2 = Variable(0, 100)
var3 = Variable()
model.add(var1 >= var2)
model.add(var1 >= 5)
model.add( var3 >= (var1 >= var2) )
"""
def __init__(self, vars):
BinPredicate.__init__(self, vars, "ge")
#SDG: initialize lb,ub
self.lb = int(self.get_lb(0) >= self.get_ub(1))
self.ub = int(not(self.get_ub(0) < self.get_lb(1)))
def get_symbol(self):
return '>='
class Neg(Predicate):
"""
Negate expression, used to negate another expression. It is equivalent to
multiplying by -1.
.. code-block:: python
var = Variable(1, 10)
model.add(-var < 3)
"""
def __init__(self, vars):
Predicate.__init__(self, vars, "neg")
self.lb = -(self.get_ub(0)) #SDG: initialize lb/ub
self.ub = -(self.get_lb(0))
def get_min(self, solver=None):
return -1 * self.children[0].get_max(solver)
def get_max(self, solver=None):
return -1 * self.children[0].get_min(solver)
def __str__(self):
return '-' + str(self.children[0])
def decompose(self):
return [self.children[0] * -1]
class Abs(Predicate):
"""
Absolute expression, represents the absolute value of an expression or
variable.
.. code-block:: python
var = Variable(-5, 5)
model.add(Abs(var) < 3)
"""
def __init__(self, vars):
Predicate.__init__(self, [vars], "Abs")
if (self.get_lb(0) < 0 and self.get_ub(0) > 0): #SDG: initialize lb/ub
self.lb = 0
self.ub = max(abs(self.get_lb(0)),abs(self.get_ub(0)))
else:
self.lb = min(abs(self.get_lb(0)),abs(self.get_ub(0)))
self.ub = max(abs(self.get_lb(0)),abs(self.get_ub(0)))
def __str__(self):
return "Abs(" + str(self.children[0]) + ")"
def decompose(self):
return [Max([self.children[0], Neg([self.children[0]])])]
class Table(Predicate):
"""
Table constraint explicityly specifying the list of allowed or forbidden
tuples. Must be used as a top-level constraint, it cannot be used as a
sub-expression.
:param list vars: the variables to be constrained by the constraint.
:param tuples: list of tuples used for the table constraint.
:param type: type of table constraint, either support of conflict
"""
def __init__(self, vars, tuples=[], type='support'):
Predicate.__init__(self, vars, "Table")
self.parameters = [[tuple for tuple in tuples], type]
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def addSupport(self, tuple):
"""
Adds in a support tuple to the list of tuples, if the Table constraint is
of type support. If the table constraint is of type conflict then if the
provided tuple is a conflict tuple in the tuple list, it is removed.
:param tuple: tuple to be added.
"""
if self.parameters[1] == 'support':
self.parameters[0].append(tuple)
else:
self.parameters[0].remove(tuple)
def addConflict(self, tuple):
"""
Adds in a conflict tuple to the list of tuples, if the Table constraint
is of type conflict. If the table constraint is of type support then if
the provided tuple is a support tuple in the tuple list, it is removed.
:param tuple: tuple to be added.
"""
if self.parameters[1] != 'support':
self.parameters[0].append(tuple)
else:
self.parameters[0].remove(tuple)
def printTable(self):
"""
Prints the table of tuples to standard output.
"""
print self.parameters[1]
for var in self.children:
print var,
print '\n (' + self.parameters[1] + ')', self.parameters[0]
def __str__(self): #SDG: pretty print of Table Predicate
return self.operator + "([" + ",".join([str(var) for var in self.children]) + "]," + str(self.parameters[0]) + ",'" + self.parameters[1] + "')"
class Sum(Predicate):
"""
Sum expression with linear coefficients. Numberjack will detect inline sum
expressions to extract the sum expression. For example, the following three
statements are equivalent however the last one requires the least amount of
overhead by Numberjack.
.. code-block:: python
2*a + b + 0.5*c + 3*d == e
Sum([2*a, b, 0.5*c, 3*d]) == e
Sum([a,b,c,d], [2, 1, 0.5, 3]) == e
.. note::
Cannot be used as a top-level constraint, but can be used as the objective funcion.
:param vars: the variables to be summed.
:param coefs: list of coefficients, which is [1,1,..,1] by default.
"""
def __init__(self, vars, coefs=None, offset=0):
Predicate.__init__(self, vars, "Sum")
if coefs is None:
coefs = [1 for var in self.children]
self.parameters = [coefs, offset]
#SDG: initial bounds
self.lb = sum(c*self.get_lb(i) if (c >= 0) else c*self.get_ub(i) for i,c in enumerate(coefs)) + offset
self.ub = sum(c*self.get_ub(i) if (c >= 0) else c*self.get_lb(i) for i,c in enumerate(coefs)) + offset
def close(self):
# This handles the scalar constraint, i.e. with weights
Predicate.close(self)
def extract_sum(var, coef):
"""
Function that extracts A + B + C into SUM(A,B,C) also works with coefs
It's kinda clever
"""
if hasattr(var, 'operator'):
if var.operator == "Sum":
res = []
for (s_var, s_coef) in zip(var.get_children(), var.parameters[0]):
res.extend(extract_sum(s_var, s_coef * coef))
if var.parameters[1] != 0:
res.append((var.parameters[1], 1))
return res
elif var.operator == "mul":
if (var.get_children()[0].operator == "var" and
(type(var.get_children()[1]) == types.IntType or
type(var.get_children()[1]) == types.FloatType)):
return [(var.get_children()[0], var.get_children()[1] * coef)]
elif (type(var.get_children()[1]) == types.IntType or
type(var.get_children()[1]) == types.FloatType):
return [(new_var, new_coef * var.get_children()[1] * coef)
for (new_var, new_coef) in extract_sum(var.get_children()[0], 1)]
else:
return [(var, 1 * coef)] # It is quadratic?
else:
return [(var, 1 * coef)]
else:
return [(var, 1 * coef)]
# This is where it should go looking for +s
set_vars = set([])
list_vars = []
map_coefs = {}
offset = self.parameters[1]
for (var, coef) in zip(self.children, self.parameters[0]):
list = extract_sum(var, coef)
for (nVar, nCoef) in list:
if type(nVar) == types.IntType or type(nVar) == types.FloatType:
offset += (nVar * nCoef)
else:
if nVar in set_vars:
map_coefs[nVar] += nCoef
else:
set_vars.add(nVar)
list_vars.append(nVar)
map_coefs[nVar] = nCoef
flat_vars = []
flat_coefs = []
for nVar in list_vars:
if map_coefs[nVar] != 0:
flat_vars.append(nVar)
flat_coefs.append(map_coefs[nVar])
self.set_children(flat_vars)
self.parameters = [flat_coefs, offset]
def __str__(self):
#print len(self.children)
op = '('
if len(self.parameters[0]):
if self.parameters[0][0] != 1:
op += (str(self.parameters[0][0]) + '*')
op += (self.children[0].__str__())
for i in range(1, len(self.children)):
if self.parameters[0][i] == 1:
op += (' + ' + self.children[i].__str__())
elif self.parameters[0][i] == -1:
op += (' - ' + self.children[i].__str__())
elif self.parameters[0][i] > 0:
op += (' + ' + str(self.parameters[0][i]) + '*' + self.children[i].__str__())
elif self.parameters[0][i] < 0:
op += (' - ' + str(-self.parameters[0][i]) + '*' + self.children[i].__str__())
if self.parameters[1] > 0:
op += (' + ' + str(self.parameters[1]))
elif self.parameters[1] < 0:
op += (' - ' + str(-self.parameters[1]))
return op + ')'
def decompose(self):
def addition(X):
if len(X) == 1:
return X[0]
else:
return Add([X[0], addition(X[1:])]) #SDG: use specific Add BinPredicate instead of Sum
return [addition([(child if coef is 1 else (child * Variable(coef,coef,str(coef)))) for child, coef in zip(self.children, self.parameters[0])] + [Variable(e,e,str(e)) for e in self.parameters[1:] if e is not 0])]
class OrderedSum(Predicate):
"""
Conjunction of a chain of precedence with a sum expression (without linear coefficients)
The following:
OrderedSum([a,b,c,d], l, u)
is logically equivalent to:
Sum([a,b,c,d]) >= l
Sum([a,b,c,d]) <= u
a >= b
b >= c
c >= d
:param vars: the variables to be summed/sequenced.
:param l: lower bound of the sum
:param u: upper bound of the sum
"""
def __init__(self, vars, l, u):
Predicate.__init__(self, vars, "OrderedSum")
self.parameters = [l, u]
def close(self):
Predicate.close(self)
def __str__(self):
#print len(self.children)
op = str(self.parameters[0]) + ' <= ('+(self.children[0].__str__())
for i in range(1, len(self.children)):
op += (' + ' + self.children[i].__str__())
return op + ') <= ' + str(self.parameters[1])
class AllDiff(Predicate):
"""
All-different constraint on a list of :class:`.Expression`, enforces that
each takes a different value.
:param vars: the variables or expressions which must take different values.
This should be a :class:`.VarArray` or `list` with at least two items.
.. note::
Can only be used as a top-level constraint, not reified.
"""
def __init__(self, vars, type=None):
Predicate.__init__(self, vars, "AllDiff")
if len(vars) < 2:
raise InvalidConstraintSpecification("AllDiff requires a list of at least 2 expressions.")
if type != None:
self.parameters = [type]
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
#def __str__(self):
# return " AllDiff(" + " ".join(map(str, self.children)) + " ) "
class AllDiffExcept0(Predicate):
"""
All-different except zero constraint on a list of :class:`.Expression`,
enforces that each takes a different value, except those which take the
value 0.
:param vs: the variables or expressions which must take different values.
This should be a :class:`.VarArray` or `list` with at least two items.
.. note::
Can only be used as a top-level constraint, not reified.
"""
def __init__(self, vs):
Predicate.__init__(self, vs, "AllDiffExcept0")
if len(vs) < 2:
raise InvalidConstraintSpecification("AllDiff requires a list of at least 2 expressions.")
def decompose(self):
from itertools import combinations
return [Disjunction([x == 0, y == 0, x != y]) for x, y in combinations(self.children, 2)]
class Gcc(Predicate):
"""
The Global Cardinality Constraint limits the number of times that certain
values can be used within a set of variables. For example, we might want the
value 1 to occur at least once and at most twice, the value 2 to occur
exactly twice, the value 3 at most four times, and so on.
:param vars: the variables which are being constrained. This should be a
:class:`.VarArray` or `list` with at least two items.
:param dict cards: A dictionary mapping each constrained value to a two item
tuple for the lower and upper bounds on the number of occurrences of
that value.
.. code-block:: python
X = VarArray(5, 1, 4)
cards = {1: (1, 2), 2: (2, 2), 3: (0, 3), 4: (1, 2)}
model = Model(Gcc(X,cards))
.. note::
Can only be used as a top-level constraint, not reified.
"""
def __init__(self, vars, cards):
Predicate.__init__(self, vars, "Gcc")
values = cards.keys()
values.sort()
lb = []
ub = []
for val in values:
lb.append(cards[val][0])
ub.append(cards[val][1])
self.parameters = [values, lb, ub]
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def __str__(self):
save_str = Expression.__str__
Expression.__str__ = Expression.name
output = " Gcc(" + " ".join(map(str, self.children)) + " | "
for v, l, u in zip(*(self.parameters)):
output += str(v) + ' in [' + str(l) + ',' + str(u) + '] '
Expression.__str__ = save_str
return output + ')'
def decompose(self):
X = self.children
decomp = []
for val, l, u in zip(self.parameters[0], self.parameters[1], self.parameters[2]):
card = Variable(l, u)
decomp.append((card == Cardinality(X, val)))
return decomp
class Max(Predicate):
"""
The maximum value of a set of Variables.
:param vars: the variables or expressions. This should be a
:class:`.VarArray` or `list` with at least two items.
.. note::
Cannot be used as a top-level constraint, only as a sub-expression.
"""
def __init__(self, vars):
Predicate.__init__(self, vars, "Max")
#SDG: initial bounds
self.lb = max(self.get_lb(i) for i in range(len(vars)))
self.ub = max(self.get_ub(i) for i in range(len(vars)))
def get_min(self, solver=None):
return max([x.get_min(solver) if type(x) not in [int, long, float, str, bool] else x for x in self.children])
def get_max(self, solver=None):
return max([x.get_max(solver) if type(x) not in [int, long, float, str, bool] else x for x in self.children])
def get_value(self, solver=None):
return max([x.get_value(solver) if type(x) not in [int, long, float, str, bool] else x for x in self.children])
def decompose(self):
X = self.children
M = Variable(self.get_min(), self.get_max(), 'Max')
decomp = [M]
decomp.extend([M >= x for x in X])
decomp.append(Disjunction([M <= x for x in X]))
return decomp
#def __str__(self):
# return " MAX ( " + " ".join(map(str, self.children)) + " ) "
class Min(Predicate):
"""
The minimum value of a set of Variables.
:param vars: the variables or expressions. This should be a
:class:`.VarArray` or `list` with at least two items.
.. note::
Cannot be used as a top-level constraint, only as a sub-expression.
"""
def __init__(self, vars):
Predicate.__init__(self, vars, "Min")
#SDG: initial bounds
self.lb = min(self.get_lb(i) for i in range(len(vars)))
self.ub = min(self.get_ub(i) for i in range(len(vars)))
def get_min(self, solver=None):
return min([x.get_min(solver) if type(x) not in [int, long, float, str, bool] else x for x in self.children])
def get_max(self, solver=None):
return min([x.get_max(solver) if type(x) not in [int, long, float, str, bool] else x for x in self.children])
def get_value(self, solver=None):
return min([x.get_value(solver) if type(x) not in [int, long, float, str, bool] else x for x in self.children])
def decompose(self):
X = self.children
M = Variable(self.get_min(), self.get_max(), 'Min')
decomp = [M]
decomp.extend([M <= x for x in X])
decomp.append(Disjunction([M >= x for x in X]))
return decomp
#def __str__(self):
# return " MIN ( " + " ".join(map(str, self.children)) + " ) "
class Element(Predicate):
"""
Given an integer Variable \e index and a VarArray \e vars, Element is the
Predicate holding the value of the variable at index `index` of the array
`vars`.
:param vars: the variables or expressions. This should be a
:class:`.VarArray` or `list` with at least two items.
.. code-block:: python
vars = VarArray(5, 1, 4)
index = Variable(0, 4)
elt1 = Element(vars, index)
elt2 = vars[index]
.. note::
Cannot be used as a top-level constraint, only as a sub-expression.
"""
def __init__(self, vars, index):
children = list(vars)
children.append(index)
Predicate.__init__(self, children, "Element")
#SDG: initial bounds
self.lb = min(self.get_lb(i) for i in range(len(vars)))
self.ub = max(self.get_ub(i) for i in range(len(vars)))
## Boolean Clause
class Clause(Predicate):
def __init__(self, *vars):
Predicate.__init__(self, [], "Clause")
polarity = []
self.children = []
for literal in vars:
if literal.operator == 'neg':
polarity.append(0)
self.children.append(literal.children[0])
else:
polarity.append(1)
self.children.append(literal)
self.parameters = [polarity]
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def add(self, literal):
if literal.operator == 'neg':
self.parameters[0].append(0)
self.children.append(literal.children[0])
else:
self.parameters[0].append(1)
self.children.append(literal)
def __str__(self):
ret_str = "Clause("
for i in range(len(self.children)):
ret_str += ' '
if self.parameters[0][i] == 1:
ret_str += str(self.children[i])
else:
ret_str += ('~' + str(self.children[i]))
return ret_str + ' )'
class LessLex(Predicate):
"""
Less-than lexicographic ordering constraint between two lists of
expressions.
:param vars_1: the fist list of variables or expressions.
:param vars_1: the second list of variables or expressions.
.. note::
Can only be used as a top-level constraint, not reified.
"""
def __init__(self, vars_1, vars_2):
children = list(vars_1)
children.extend(vars_2)
Predicate.__init__(self, children, "LessLex")
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def __str__(self):
length = len(self.children) / 2
toprint = '[' + str(self.children[0])
for i in range(1, length):
toprint += (', ' + str(self.children[i]))
toprint += '] < [' + str(self.children[length])
for i in range(length + 1, 2 * length):
toprint += (', ' + str(self.children[i]))
return toprint + ']'
class LeqLex(Predicate):
"""
Less-than-or-equal lexicographic ordering constraint between two lists of
expressions.
:param vars_1: the fist list of variables or expressions.
:param vars_1: the second list of variables or expressions.
.. note::
Can only be used as a top-level constraint, not reified.
"""
def __init__(self, vars_1, vars_2):
children = list(vars_1)
children.extend(vars_2)
Predicate.__init__(self, children, "LeqLex")
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def __str__(self):
length = len(self.children) / 2
toprint = '[' + str(self.children[0])
for i in range(1, length):
toprint += (', ' + str(self.children[i]))
toprint += '] <= [' + str(self.children[length])
for i in range(length + 1, 2 * length):
toprint += (', ' + str(self.children[i]))
return toprint + ']'
class Maximise(Predicate):
"""
Maximisation objective function, sets the goal of search to be the
maximisation of its arguments.
:param vars: The :class:`.Variable` or :class:`.Expression` to be maximized.
"""
def __init__(self, vars):
Predicate.__init__(self, [vars], "Maximise")
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
#def __str__(self):
# return " Maximise ( " + " ".join(map(str, self.children)) + " ) "
def Maximize(var):
"""
Alias for American spelling of :class:`.Maximise`.
"""
return Maximise(var)
class Minimise(Predicate):
"""
Minimisation objective function, sets the goal of search to be the
minimisation of its arguments.
:param vars: The :class:`.Variable` or :class:`.Expression` to be minimized.
"""
def __init__(self, vars):
Predicate.__init__(self, [vars], "Minimise")
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
#def __str__(self):
# return " Minimise ( " + " ".join(map(str, self.children)) + " ) "
def Minimize(var):
"""
Alias for American spelling of :class:`.Minimise`.
"""
return Minimise(var)
class Disjunction(Predicate):
"""
Disjunction specifying that at least one of the sub-expressions should be
true. This can be a top-level constraint or reified as a sub-expression.
:param vars: the variables or expressions. This should be a
:class:`.VarArray` or `list` with at least two items.
"""
def __init__(self, vars):
Predicate.__init__(self, vars, "OR")
self.lb = max(self.get_lb(i) for i in range(len(vars))) #SDG: initialize lb,ub
self.ub = max(self.get_ub(i) for i in range(len(vars)))
def decompose(self):
return [Sum(self.children) > 0]
class Conjunction(Predicate):
"""
Conjunction specifying that all the sub-expressions should be true. This
should only be used as a reified sub-expression, otherwise, there is an
implicity conjunction across all top-level constraints anyway.
:param vars: the variables or expressions. This should be a
:class:`.VarArray` or `list` with at least two items.
"""
def __init__(self, vars):
Predicate.__init__(self, vars, "AND")
self.lb = min(self.get_lb(i) for i in range(len(vars))) #SDG: initialize lb,ub
self.ub = min(self.get_ub(i) for i in range(len(vars)))
def decompose(self):
return [Sum(self.children) == len(self.children)]
# BH (2014/10/15): disabled as it appears to be buggy and not used anywhere.
# class Convex(Predicate):
# def __init__(self, vars):
# Predicate.__init__(self, [var for var in vars], "Convex")
# self.lb = None #SDG: initial lb/ub undefined
# self.ub = None
# def __str__(self):
# return "[" + " ".join(map(str, self.children)) + "] is row-convex"
# def decompose(self):
# ### BUGGY!!
# print "Decomposing Row convexity constraint", self
# X = self.children
# n = len(X)
# first = Variable(n)
# last = Variable(n)
# decomposition = [X[i] <= (first <= i) for i in range(n)]
# decomposition.extend([X[i] <= (last >= i) for i in range(n)])
# decomposition.extend([((first <= i) & (i <= last)) <= X[i] for i in range(n)])
# decomposition.append(first <= last)
# print VarArray(decomposition)
# return decomposition
class Cardinality(Predicate):
"""
Counts the number of expressions which have been assigned a specific value.
:param vars: the variables or expressions. This should be a
:class:`.VarArray` or `list` with at least two items.
:param int value: the value for which the cardinality of is being counted.
.. note::
Cannot be used as a top-level constraint, only as a sub-expression.
"""
def __init__(self, vars, value):
Predicate.__init__(self, [var for var in vars], "Card")
self.parameters = [value]
#SDG: initial lb/ub
self.lb = sum(((value == x.get_lb()) and (value == x.get_ub())) for x in vars)
self.ub = sum(((value >= x.get_lb()) and (value <= x.get_ub())) for x in vars)
def __str__(self):
return "card of " + str(self.parameters[0]) + " in [" + " ".join(map(str, self.children)) + "]"
def decompose(self):
X = self.children
val = self.parameters[0]
return [Sum([x == val for x in X])]
# BH (2014/10/15): disabled as it does not appear to be used anywhere
# class Cmp2(BinPredicate):
# ## expression equal to -1 if x_0 < x_1, 1 if x_1 < x_0 and 0 otherwise
# def __init__(self, vars):
# BinPredicate.__init__(self, vars, "Cmp")
# def get_symbol(self):
# return 'cmp'
# def decompose(self):
# X = self.children
# return [(X[1] < X[0]) - (X[0] < X[1])]
## @defgroup sched_group Scheduling constructs
# The scheduling constructs and constraints
# @{
#
## A special class for simple representations of scheduling tasks.
#
# The Task class allows for simplified modeling of tasks in scheduling
# applications. It encapsulates the earliest start time, latest end time
# (makespan), and duration.
#
# There are various ways of declaring a Task:
#
# - M = Task() creates a Task with an earliest start time of 0, latest end time of 1, and duration 1
# - M = Task(ub) creates a Task with an earliest start time of 0, latest end time of 'ub', and duration 1
# - M = Task(ub, dur) creates a Task with an earliest start time of 0, latest end time of 'ub', and duration 'dur'
# - M = Task(lb, ub, dur) creates a Task with an earliest start time of 0, latest end time of 'ub', and duration 'dur'
#
# When the model is solved, @get_value() returns the start
# time of the task.
#
class Task(Expression):
"""
The Task class allows for simplified modeling of tasks in scheduling
applications. It encapsulates the earliest start time, latest end time
(makespan), and duration. The following tables shows example calls to the constructor
which results in different kinds of matrices.
.. code-block:: python
Task() # creates a Task with an earliest start time of 0, latest end
# time of 1, and duration 1
Task(ub) # creates a Task with an earliest start time of 0, latest end
# time of 'ub', and duration 1
Task(ub, dur) # creates a Task with an earliest start time of 0,
# latest end time of 'ub', and duration 'dur'
Task(lb, ub, dur) # creates a Task with an earliest start time of 0,
# latest end time of 'ub', and duration 'dur'
When the model is solved, :func:`Numberjack.Expression.get_value` returns
the start time of the task.
"""
def __init__(self, arg1=None, arg2=None, arg3=None):
lb = 0
ub = 1
self.duration = 1
if arg1 != None:
if arg2 != None:
if arg3 != None:
lb = arg1
ub = arg2 - arg3
self.duration = arg3
else: # only 2 args, read as <makespan,duration>
ub = arg1 - arg2
self.duration = arg2
else: # only 1 arg, read as the makespan
ub = arg2 - 1
Expression.__init__(self, "t")
self.lb = lb
self.ub = ub
self.domain_ = None
def __str__(self):
if self.is_built() and self.solver.is_sat():
return str(self.get_value())
else:
ident = str(self.ident)
if self.ident == -1:
ident = ''
return 't' + str(ident) + ': [' + str(self.get_min()) + ':' + str(self.duration) + ':' + str(self.get_max() + self.duration) + ']'
def __lt__(self, pred):
"""
Creates a precedence expression on the task. Can be used to specify
precedence between two tasks or that a task has finished before a
certain time.
:param pred: if `pred` is an `int` then constrains that the task is
finished by `pred`, if `pred` is another :class:`.Task` instance,
then creates a top-level :class:`.Precedence` constraint.
"""
if type(pred) is int:
return Le([self, pred - self.duration])
return Precedence(self, pred, self.duration)
def __gt__(self, pred):
if type(pred) is int:
return Gt([self, pred])
return pred.__lt__(self)
def requires(self, resource):
resource.add(self)
def reset(self, makespan):
self.ub = makespan - self.duration
class Precedence(Predicate):
"""
Precedence constraint enforces a certain gap between the start time of two
tasks. Equivalent to `task_i + duration <= task_j`.
:param Task task_i: the first task.
:param Task task_j: the second task.
:param int dur: the gap to maintain between the two tasks.
"""
def __init__(self, task_i, task_j, dur):
Predicate.__init__(self, [task_i, task_j], "Precedence")
self.parameters = [dur]
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def decompose(self):
return [((self.children[0] + self.parameters[0]) <= self.children[1])]
def __str__(self):
return str(self.children[0]) + ' + ' + str(self.parameters[0]) + ' <= ' + str(self.children[1])
class NoOverlap(Predicate):
"""
Binary disjunctive constraint enforces that two tasks do not overlap.
Equivalent to `(task_i + duration_i <= task_j) | (task_j + duration_j <=
task_i)` .
:param Task task_i: the first task.
:param Task task_j: the second task.
:param int dur_i: the duration of the first task, if `None` then
`task_i.duration` will be used.
:param int dur_j: the duration of the second task, if `None` then
`task_j.duration` will be used.
"""
def __init__(self, task_i, task_j, dur_i=None, dur_j=None):
if dur_i == None:
dur_i = task_i.duration
if dur_j == None:
dur_j = task_j.duration
Predicate.__init__(self, [task_i, task_j], "NoOverlap")
self.parameters = [dur_i, dur_j]
self.lb = None #SDG: initial lb/ub undefined
self.ub = None
def decompose(self):
return [(((self.children[0] + self.parameters[0]) <= (self.children[1])) |
((self.children[1] + self.parameters[1]) <= (self.children[0])))]
def __str__(self):
return str(self.children[0]) + ' + ' + str(self.parameters[0]) + ' <= ' + str(self.children[1]) + ' OR ' + str(self.children[1]) + ' + ' + str(self.parameters[1]) + ' <= ' + str(self.children[0])
class UnaryResource(list):
"""
Unary resource constraint ensures that only one of the specified list of
tasks are running at each time point. An optional distance between tasks can
be specified also.
:param arg: a list of :class:`.Task` instances.
:param int distance: optional distance between tasks.
"""
def __init__(self, arg=[], distance=0):
list.__init__(self, [NoOverlap(arg[i], arg[j], arg[i].duration + distance, arg[j].duration + distance) for i in range(1, len(arg)) for j in range(i)])
self.tasks = [task for task in arg]
self.distance = distance
def add(self, new_task):
"""
Add an additional task to the existing list of tasks obeying this unary
resource.
:param Task new_task: the additional task to include.
"""
for task in self.tasks:
self.append(NoOverlap(new_task, task, new_task.duration + self.distance, task.duration + self.distance))
self.tasks.append(new_task)
def __str__(self):
return "[" + " ".join(map(str, self.tasks)) + "] share a unary resource" # "+" ".join(map(str, self))
# BH (2014/10/15): disabled as it does not appear to be used anywhere and
# appears to duplicate UnaryResource
# class UnaryResourceB(Predicate):
# def __init__(self, tasks, distance=0):
# Predicate.__init__(self, [task for task in tasks], "UnaryResource")
# self.distance = distance
# def add(self, new_task):
# self.children.append(new_task)
# def __str__(self):
# return "[" + " ".join(map(str, self.children)) + "] share a unary resource"
# def decompose(self):
# return [NoOverlap(task1, task2) # , task1.duration+self.distance, task2.duration+self.distance)
# for task1, task2 in pair_of(self.children)]
class ParamList(dict):
def __init__(self, X):
dict.__init__(self, X)
def __call__(self, *args):
return [self.__getitem__(arg) for arg in args]
def input(default):
"""
Read command line arguments from the user. This is useful to establish some
default parameters of your model and solving process, and to subsequently
allow these to be easily changed by specifying a command line argument.
The `default` argument allows you to specify the list of allowed options, as
well as their default values. Any option given on the command line that is
not present in this list will raise an error. Values specified on the
command line will be coerced into the same data type as is given for that
option in `default`.
:param dict default: a dictionary of the valid options and their default
values.
:return: a dictionary of the options from `default`, possibly with updated
values from the command line.
:rtype: dict
For example, if launching I can change the default parameters of a model
like so:
.. code-block:: bash
python numberjackfile.py -solver MiniSat -N 10
.. code-block:: python
# numberjackfile.py
default = {'N': 5, 'solver': 'Mistral', 'tcutoff': 30}
param = input(default)
# param will be a dict {'N': 10, 'solver': 'MiniSat', 'tcutoff': 30}
.. deprecated:: 1.1
This function will be renamed or replaced in 2.0 to avoid the naming
clash with the builtin input function when imported with `*`.
"""
import sys
param_list = ParamList(default)
option = None
params = []
commandline = [arg for arg in sys.argv[1:]]
commandline.append('-end_argument')
for arg in commandline:
#print arg
if arg[0] == '-' and arg != '-1.0': # new argument
# first take previous param into account
#print 'end of option:', params
if option != None and option != '1.0':
if len(params) == 0:
param_list[option] = 'yes'
elif len(params) == 1:
if type(param_list[option]) == int:
#print 'int'
param_list[option] = int(params[0])
elif type(param_list[option]) == float:
#print 'float'
param_list[option] = float(params[0])
else:
#print 'string'
param_list[option] = params[0]
else:
if len(param_list[option]) > 0:
if type(param_list[option][0]) == int:
#print 'int'
np = [int(p) for p in params]
params = np
elif type(param_list[option][0]) == float:
#print 'float'
np = [float(p) for p in params]
params = np
param_list[option] = params
option = arg[1:]
#print 'new option', option
if option != 'end_argument':
if not param_list.has_key(option):
#print 'unknwn option'
if option == 'h' or option == '-h' or option == 'help' or option == '-help':
#print 'help'
the_keys = param_list.keys()
the_keys.sort()
for key in the_keys:
print ('-' + key).ljust(20) + ":",
if type(param_list[key]) == int:
print 'int'.ljust(14),
if type(param_list[key]) == float:
print 'float'.ljust(14),
elif type(param_list[key]) == str:
print 'string'.ljust(14),
elif hasattr(param_list[key], '__iter__'):
print 'list',
if len(param_list[key]) > 0:
if type(param_list[key][0]) == int:
print 'of int ',
elif type(param_list[key][0]) == float:
print 'of float ',
else:
print 'of string',
else:
print 'of string',
print ' (default=' + str(param_list[key]) + ')'
exit(1)
else:
print 'Warning: wrong parameter name, ignoring arguments following', option
else:
#print 'fine option init param list'
params = []
else:
params.append(arg)
#print 'input param:', params
return param_list
def pair_of(l):
return [pair for k in range(1, len(l)) for pair in zip(l, l[k:])]
def value(x):
return x.get_value()
def total_seconds(td):
# Python 2.6 doesn't have timedelta.total_seconds
f = getattr(td, 'total_seconds', None)
if f:
return f()
else:
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def load_in_decompositions():
import Decomp
# First add the constraints
for attr_name in dir(Decomp):
attr = getattr(Decomp, attr_name)
if hasattr(attr, "__name__") and attr.__name__.find("decompose") is -1:
if not hasattr(sys.modules[__name__], attr.__name__):
setattr(sys.modules[__name__], attr.__name__, attr)
# Now load up the decompositions, introspection is great huh :)
for attr_name in dir(sys.modules[__name__]):
if hasattr(Decomp, "decompose_" + attr_name):
setattr(getattr(sys.modules[__name__], attr_name),
"decompose", getattr(Decomp, "decompose_" + attr_name))
load_in_decompositions()
class Solution(list):
"""
Class which will extract a list of the solution values for a given list of
variables. Solution values are order by the order in which the variables are
given in `vars`. This functionality is equivalent to calling
:func:`Expression.get_value` on each variable, but it is just wrapped here.
:param vars: a Matrix, VarArray, or list of variables to extract the
solution for.
"""
def __init__(self, vars):
list.__init__(self)
if issubclass(type(vars), Matrix):
self.variables = vars.flat
for row in vars.row:
self.append([x.get_value() for x in row])
else:
self.variables = vars
self.extend([x.get_value() for x in vars])
self.dico = {}.fromkeys(self.variables)
for x in self.variables:
self.dico[x] = x.get_value()
def __getitem__(self, i):
if type(i) is int:
return list.__getitem__(self, i)
else:
return self.dico[i]
def __contains__(self, x):
return self.dico.has_key(x)
def __str__(self):
if len(self) == 0:
return '[]'
elif type(self[0]) is list:
return '[' + ',\n '.join([str(row) for row in self]) + ']'
else:
return list.__str__(self)
class Nogood(object):
def __init__(self, clause, solver):
self.clause = clause
self.solver = solver
def __str__(self):
return self.solver.print_clause(self.clause)
keep_alive = []
class NBJ_STD_Solver(object):
"""
Generic solver class which will be subclassed by the solver interfaces.
Provides common functionality which will be used in each of the solver
interfaces.
.. note::
The user should not need to instantiate this class, instead
:func:`Numberjack.Model.load` should be used to return an instance
of a subclass. However the following methods can be used on that
instance.
"""
def __init__(self, Library, Wrapper, model=None, X=None, FD=False,
clause_limit=-1, encoding=None):
self.decomposition_store = []
self.enc_config_cache = {}
self.free_memory = None
self.verbosity = 0
# Time to load the model into the solver. Can be used to track the time
# to encode time for linearization and SAT. Only if the model has been
# passed to the constructor of the solver though or model.load() is
# used.
self.load_time = None
# self.solver = getattr(sys.modules[Library], Library + "Solver", None)()
solverpkg = "Numberjack.solvers"
libstr = "%s.%s" % (solverpkg, Library)
wrapstr = "%s.%s" % (solverpkg, Wrapper)
self.solver = getattr(sys.modules[libstr], Library + "Solver", None)()
if hasattr(self.solver, "setClauseLimit"):
self.solver.setClauseLimit(clause_limit)
self.LibraryPacakge = libstr
self.WrapperPackage = wrapstr
self.Library = Library
self.Wrapper = Wrapper
self.ExpArray = getattr(sys.modules[wrapstr],
Wrapper + "ExpArray", None)
self.IntArray = getattr(sys.modules[wrapstr],
Wrapper + "IntArray", None)
self.DoubleArray = getattr(sys.modules[wrapstr],
Wrapper + "DoubleArray", None)
self.IntVar = getattr(sys.modules[wrapstr],
Wrapper + "_IntVar", None)
self.FloatVar = getattr(sys.modules[wrapstr],
Wrapper + "_FloatVar", None)
self.EncodingConfiguration = getattr(sys.modules[wrapstr],
"EncodingConfiguration", None)
if "_" + Library in sys.modules:
self.free_memory = getattr(sys.modules["_" + Library],
"delete_" + Library + "Solver", None)
self.variables = None
if model is not None:
var_array = None
self.solver_id = model.getSolverId()
self.model = weakref.proxy(model)
self.model.close(self) #SDG: needs to know for which solver the model is built
if self.EncodingConfiguration:
if not encoding:
encoding = EncodingConfiguration()
self.solver.encoding = self.getEncodingConfiguration(encoding)
# Load each expression and its variables into the solver. Record
# time to do so.
loadstart = datetime.datetime.now()
for expr in self.model.get_exprs():
self.solver.add(self.load_expr(expr))
self.load_time = total_seconds(datetime.datetime.now() - loadstart)
if X != None:
self.variables = VarArray(flatten(X))
var_array = self.ExpArray()
for x in self.variables:
if len(x.var_list) > self.solver_id - 1:
var_array.add(x.var_list[self.solver_id - 1])
if FD:
self.solver.forceFiniteDomain(var_array)
if var_array.size() > 0:
self.solver.initialise(var_array)
else:
self.variables = weakref.proxy(self.model.variables)
self.solver.initialise()
def add_to_store(self, x):
if issubclass(type(x), Predicate):
self.decomposition_store.append(x)
for child in x.children:
self.add_to_store(child)
#def getIntVar(self, lb, ub, ident):
def getIntVar(self, arg1, arg2, argopt1=None):
var = None
try:
if argopt1 is None:
var = self.IntVar(arg1, arg2)
else:
var = self.IntVar(arg1, arg2, argopt1)
except:
raise Exception("ERROR while creating variable")
return var
def getFloatVar(self, lb, ub, ident):
var = None
try:
var = self.FloatVar(lb, ub, ident)
except:
raise ValueError("ERROR: Solver does not support real variables")
return var
def getEncodingConfiguration(self, enc_config):
# Returns the C++ EncodingConfiguration equivalent of enc_config from
# the wrapper.
if not self.EncodingConfiguration:
raise UnsupportedSolverFunction(self.Library, "EncodingConfiguration", "This solver does not support custom encoding settings.")
if enc_config not in self.enc_config_cache:
try:
self.enc_config_cache[enc_config] = self.EncodingConfiguration(
enc_config.direct, enc_config.order,
enc_config.conflict, enc_config.support,
enc_config.amo_encoding, enc_config.alldiff_encoding)
except Exception as e:
raise e
return self.enc_config_cache[enc_config]
def load_expr(self, expr):
#print 'load', expr , expr.get_lb() if issubclass(type(expr),Expression) else None, expr.get_ub() if issubclass(type(expr),Expression) else None #SDG: VERY USEFUL FOR DEBUGGING
if type(expr) is str:
return self.model.string_map[expr]
if type(expr) is bool:
return int(expr)
if type(expr) in [int, long, float]:
# It is a constant, handle appropriatly
return expr
#if not expr.has_children():
if expr.is_var():
# It is a leaf
if expr.is_var(): # Just to be sure
alreadyBuild = False
if expr.is_built(self):
#if expr.get_solver() == self: # Checks if I have already assigned
alreadyBuild = True
if alreadyBuild:
return expr.var_list[self.solver_id - 1]
else:
# It is probably a variable
(lb, ub, domain) = expr.get_domain_tuple()
var = None
if domain is None or (ub - lb + 1) == len(domain):
if type(lb) is int:
var = self.getIntVar(lb, ub, expr.ident)
else:
var = self.getFloatVar(lb, ub, expr.ident)
else:
w_array = self.IntArray()
for val in domain:
w_array.add(val)
var = self.getIntVar(w_array, expr.ident)
if expr.encoding:
var.encoding = self.getEncodingConfiguration(expr.encoding)
expr.setVar(self.solver_id, self.Library, var, self)
expr.solver = self
return var
else:
raise Exception("Problem, no such type exists in converting models")
else:
# factory = getattr(sys.modules[self.Library],
# "%s_%s" % (self.Library, expr.get_operator()), None)
factory = getattr(sys.modules[self.WrapperPackage],
"%s_%s" % (self.Wrapper, expr.get_operator()), None)
if factory is not None:
arguments = None
if len(expr.get_children()) <= 2:
arguments = [self.load_expr(child) for child in expr.get_children()]
else:
var_array = self.ExpArray()
for child in expr.get_children():
myarg = self.load_expr(child)
var_array.add(myarg)
keep_alive.append(myarg)
arguments = [var_array]
if expr.has_parameters(): # != None: # assumes an array of integers
for param in expr.parameters:
if hasattr(param, '__iter__'):
w_array = None
if any((type(w) == float for w in param)):
w_array = self.DoubleArray()
else:
w_array = self.IntArray()
if expr.get_operator() is "Table":
for w in param:
for v in w:
w_array.add(v)
arguments.append(w_array)
else:
for w in param:
w_array.add(w)
arguments.append(w_array)
else:
arguments.append(param)
try:
var = factory(*arguments)
except NotImplementedError as e:
print >> sys.stderr, "Error the solver does not support this expression:", str(expr)
print >> sys.stderr, "Type:", type(expr), "Children:", str(expr.children), "Params:", str(getattr(expr, 'parameters', None))
raise e
if expr.encoding:
var.encoding = self.getEncodingConfiguration(expr.encoding)
expr.setVar(self.solver_id, self.Library, var, self)
expr.solver = self
return var
else:
return self.decompose_expression(expr)
def decompose_expression(self, expr):
if hasattr(expr, "decompose"):
expr_list = expr.decompose()
#print expr_list #SDG: VERY USEFUL FOR DEBUGGING
obj_exp = []
#SDG: all decomposed expressions except the first are assumed to be constraints (ie, at the top-level)
for exp in expr_list[1:]:
exp.encoding = expr.encoding
obj = self.load_expr(exp)
obj_exp.append(obj)
self.solver.add(obj)
expr.solver = self
#expr_list[0].close()
for exp in expr_list:
exp.close()
self.add_to_store(exp)
decomp = self.load_expr(expr_list[0])
return decomp
else:
raise ConstraintNotSupportedError(expr.get_operator(), self.Library)
def solve(self):
"""
Calls solve on the underlying solver.
Captures :exc:`KeyboardInterrupt` or :exc:`SystemExit` signals and
returns None in this case.
:return: `True` if the solver found a satisfiable solution, `False`
otherwise.
"""
try:
if self.solver.solve() == SAT:
return True
return False
except (KeyboardInterrupt, SystemExit):
print 'Program Interrupted'
return
def solveAndRestart(self, policy=GEOMETRIC, base=64, factor=1.3, decay=0.0, reinit=-1):
"""
Calls solve with restarts on the underlying solver.
.. deprecated:: 1.1
Restarting is the default in most underlying solvers, currently only
Mistral version 1.55 will not perform restarts unless started with
this method. Instead you should use :func:`solve` which will use
restarts by default in other solvers.
"""
if reinit == -1:
if self.solver.solveAndRestart(policy, base, factor, decay) == SAT:
return True
else:
if self.solver.solveAndRestart(policy, base, factor, decay, reinit) == SAT:
return True
return False
def startNewSearch(self):
"Initialise structures for a depth first search."
self.solver.startNewSearch()
def getNextSolution(self):
"Search for the next solution"
return self.solver.getNextSolution()
def solutions(self):
"A generator which will yield `True` until no other solution exists."
while self.getNextSolution():
yield True # Could return something more useful??
def next(self, exp, v):
# \internal used to get the next value in the domain of 'exp' after v
#return self.solver.next(exp.var_list[self.solver_id-1], v)
return exp.var_list[self.solver_id - 1].next(v)
##@name Search programming methods
# @{
def propagate(self):
"""
Tell the solver to reach a fixed point.
:return: `False` if an inconsistency is found, `True` otherwise.
"""
return self.solver.propagate()
def save(self):
"Tell the solver to save the current state."
self.solver.save()
def undo(self, bj=1):
"""
Tell the solver to restore its state to the one last enqueued by
'save()'
"""
return self.solver.undo(bj)
def post(self, exp):
"""
Tell the solver to add a constraint in the current state.
:param exp: should be a unary constraint, for example `x == 5`.
"""
self.solver.post(exp.operator, exp.children[0].var_list[self.solver_id - 1], exp.children[1])
def deduce(self, exp=None):
"""
Tell the solver to post the negation of the last decision in the current
states.
"""
if exp is None:
self.solver.deduce()
else:
self.solver.post(exp.operator, exp.children[0].var_list[self.solver_id - 1], exp.children[1])
def deduce_print(self, lvl):
x = self.variables[self.solver.get_decision_id()]
print lvl * ' ', x.domain(self)
self.solver.deduce()
## Follow a 'left' branch in a binary serach tree (eq. to save() + post())
def branch_left(self, exp):
self.solver.save()
self.solver.post(exp.operator, exp.children[0].var_list[self.solver_id - 1], exp.children[1])
## Follow a 'right' branch in a binary serach tree (eq. to undo() + deduce())
def branch_right(self):
return self.solver.branch_right()
def reset(self, full=False):
"""
Resets the data structure of the solver to the initial state.
:param bool full: whether the top-level deduction should be undone too.
"""
self.solver.reset(full)
## @}
def analyze_conflict(self):
btlevel = self.solver.analyze_conflict()
if btlevel < 0:
btlevel = None
return (self.solver.get_learnt_clause(), btlevel)
def print_all_clauses(self):
for i in range(self.solver.nbClauses()):
self.solver.get_clause(i)
self.print_clause()
def get_last_nogood(self):
#self.solver.get_last_nogood(i)
self.print_clause()
def print_clause(self):
def get_literal(i):
var = self.solver.get_nogood_var(i)
val = self.solver.get_nogood_val(i)
type = self.solver.get_nogood_type(i)
sign = self.solver.get_nogood_sign(i)
lit = str(self.variables[var])
if type == 0:
if sign == 0:
lit += ' == '
else:
lit += ' != '
else:
if sign == 0:
lit += ' <= '
else:
lit += ' > '
lit += str(val)
return lit
print '(',
for i in range(self.solver.get_nogood_size()):
if i > 0:
print 'or',
print get_literal(i),
print ')'
def sacPreprocess(self, type):
self.solver.sacPreprocess(type)
return (not (self.solver.is_unsat()))
##@name Parameter tuning methods
# @{
def setHeuristic(self, arg1, arg2='No', arg3=0):
"""
Sets the variable and value heuristics.
.. note::
Currently only supports setting the heuristics for Mistral solvers
but a generic method for all solvers is in the works.
"""
var_name = arg1
val_name = arg2
randomization = arg3
if type(val_name) is int:
randomization = val_name
val_name = 'No'
if var_name not in var_heuristics:
print 'c Warning: "' + var_name + '" unknown, use MinDomain instead'
print 'c legal variable orderings: ', var_heuristics
if val_name not in val_heuristics:
print 'c Warning: "' + val_name + '" unknown, use Lex instead'
print 'c legal value orderings: ', val_heuristics
self.solver.setHeuristic(str(var_name), str(val_name), randomization)
def setFailureLimit(self, cutoff):
"""
Sets a limit on the number of failures encountered before aborting
search.
"""
self.solver.setFailureLimit(cutoff)
def setTimeLimit(self, cutoff):
"""
Sets a limit on the CPU time before aborting search.
"""
self.solver.setTimeLimit(cutoff)
def setNodeLimit(self, cutoff):
"""
Sets a limit on the number of nodes explored before aborting search.
"""
self.solver.setNodeLimit(cutoff)
def setVerbosity(self, degree):
"""
Sets the verbosity level of the solver.
"""
self.verbosity = max(0, degree)
self.solver.setVerbosity(degree)
def setThreadCount(self, num_threads):
"""
Sets the number of threads a solver should use.
"""
f = getattr(self.solver, 'setThreadCount', None)
if f:
f(num_threads)
else:
if self.verbosity > 0:
print >> sys.stderr, "Warning: this solver does not support " \
"the ability to specify a thread count."
def setOptimalityGap(self, gap):
"""
Sets the target relative optimality gap tolerance.
"""
if hasattr(self.solver, 'setOptimalityGap'):
return self.solver.setOptimalityGap(gap)
else:
raise UnsupportedSolverFunction(
self.Library, "setOptimalityGap", "This solver does not support"
" setting the optimility gap.")
return None
def setRandomSeed(self, seed):
"""
Sets the initial random seed.
"""
self.solver.setRandomSeed(seed)
def setWorkMem(self, mb):
"""
Set the limit of working memory, only used for CPLEX.
"""
if hasattr(self.solver, 'setWorkMem'):
return self.solver.setWorkMem(mb)
else:
raise UnsupportedSolverFunction(
self.Library, "setWorkMem", "This solver does not support"
" setting the work memory.")
return None
def getWorkMem(self):
"""
Get the limit of working memory, only used for CPLEX.
"""
if hasattr(self.solver, 'getWorkMem'):
return self.solver.getWorkMem()
else:
raise UnsupportedSolverFunction(
self.Library, "getWorkMem", "This solver does not support"
" getting the work memory.")
return None
def setOption(self,func,param=None):
"""
Sets an option in Toulbar2 whose name is passed as the first parameter,
and value as a second one.
"""
try:
function = getattr(self.solver,func)
except AttributeError:
print "Warning: "+func+" option does not exist in this solver!"
else:
if param is None:
function()
else:
function(param)
## @}
def setRandomized(self, degree):
self.solver.setRandomized(degree)
def addNogood(self, vars, vals):
var_array = self.ExpArray()
for var in vars:
var_array.add(var.var_list[self.solver_id - 1])
val_array = self.IntArray()
for val in vals:
val_array.add(val)
self.solver.addNogood(var_array, val_array)
def setAntiLex(self, vars):
var_array = self.ExpArray()
for var in vars:
var_array.add(var.var_list[self.solver_id - 1])
self.solver.setAntiLex(var_array)
def guide(self, vars, vals=None, probs=[]):
var_array = self.ExpArray()
val_array = self.IntArray()
pro_array = self.DoubleArray()
if vals is not None:
for var in vars:
var_array.add(var.var_list[self.solver_id - 1])
for val in vals:
val_array.add(val)
else:
for var in vars.variables:
var_array.add(var.var_list[self.solver_id - 1])
if var in vars:
val_array.add(vars[var])
for pro in probs:
pro_array.add(pro)
self.solver.guide(var_array, val_array, pro_array)
#def backtrackTo(self, level):
# self.solver.backtrackTo(level)
#def assign(self,x,v):
# self.solver.assign(x.var,v)
#def upOneLevel(self):
# self.solver.upOneLevel()
def setLowerBounds(self, vars, lb):
var_array = self.ExpArray()
lob_array = self.IntArray()
for (x, l) in zip(vars, lb):
var_array.add(x.var_list[self.solver_id - 1])
lob_array.add(l)
self.solver.setLowerBounds(var_array, lob_array)
def setUpperBounds(self, vars, ub):
var_array = self.ExpArray()
upb_array = self.IntArray()
for (x, u) in zip(vars, ub):
var_array.add(x.var_list[self.solver_id - 1])
upb_array.add(u)
self.solver.setUpperBounds(var_array, upb_array)
def setRestartNogood(self):
self.solver.setRestartNogood()
##@name Accessors
# @{
def get_solution(self):
"""
Extract a :class:`.Solution` object from the solver representing the
list of assigned values.
:rtype: :class:`.Solution`
"""
self.solver.store_solution()
solution = Solution(self.variables)
return solution
def is_opt(self):
"""
Returns `True` if the solver found a solution and proved its
optimality, `False` otherwise.
"""
return self.solver.is_opt()
def is_sat(self):
"Returns `True` if the solver found a solution, `False` otherwise."
return self.solver.is_sat()
def is_unsat(self):
"""
Returns `True` if the solver proved unsatisfiability, `False` otherwise.
"""
return self.solver.is_unsat()
def getOptimum(self):
"""
Returns the current best solution cost from Toulbar2.
:raises UnsupportedSolverFunction: if called on a solver other than
Toulbar2.
"""
if hasattr(self.solver, 'getOptimum'):
return self.solver.getOptimum()
else:
raise UnsupportedSolverFunction(
self.Library, "getOptimum", "This solver does not support "
"getOptimum, this is a Toulbar2 function only.")
def getOptimalityGap(self):
"""
Returns the optimality gap from the solver. Valid for MIP solvers only.
:raises UnsupportedSolverFunction: if called on a non MIP solver.
"""
if hasattr(self.solver, 'getOptimalityGap'):
return self.solver.getOptimalityGap()
else:
raise UnsupportedSolverFunction(
self.Library, "getOptimalityGap", "This solver does not "
"support getting the optimility gap.")
def getBacktracks(self):
"Returns the number of backtracks performed during the last search."
return self.solver.getBacktracks()
def getNodes(self):
"Returns the number of nodes explored during the last search."
return self.solver.getNodes()
def getFailures(self):
"Returns the number of failures encountered during the last search."
return self.solver.getFailures()
def getPropags(self):
"""
Returns the number of constraint propagations performed during the last
search.
"""
return self.solver.getPropags()
def getTime(self):
"Returns the CPU time required for the last search."
return self.solver.getTime()
## @}
def getChecks(self):
"Returns the number of constraint checks. for the last search."
return self.solver.getChecks()
def printStatistics(self):
"""
Asks the solver to print some basic statistics about its last search.
.. deprecated:: 1.1
"""
print ''
self.solver.printStatistics()
print ''
def getNumVariables(self):
"""
Get the number of variables that have been created in the underlying
solver. This figure can be different to the number of variables that you
created in your model. For SAT and MIP solvers, this figure will be the
number of Boolean variables which had to be created during the encoding
step, including any auxiliary variables.
"""
return self.solver.getNumVariables()
def getNumConstraints(self):
"""
Get the number of constraints that have been created in the underlying
solver. This figure can be different to the number of constraints that
you created in your model. For SAT solvers it will be the number of CNF
clauses created by the encoding, for MIP solvers it will be the number
of linear expressions created.
"""
return self.solver.getNumConstraints()
def load_xml(self, file, type=4):
"""
This function only allows you to load an XCSP instance into the Mistral
solver. You should use the :mod:`Numberjack.XCSP` module to build a
generic Numberjack model from an XCSP instance, which can be loaded with
other underlying solvers.
.. deprecated:: 1.1
Use :mod:`Numberjack.XCSP` instead.
"""
self.solver.load_xml(file, type)
def load_mps(self, filename, extension):
"""
Asks the underlying MIP solver to load an MPS file.
:param filename: the path to the file.
:param extension: the file's extension.
:raises UnsupportedSolverFunction: if called on a non MIP solver.
"""
if not hasattr(self.solver, 'load_mps'):
raise UnsupportedSolverFunction(
str(type(self)), "load_mps", "Please load the model using a "
"MIP solver to use this functionality.")
self.solver.load_mps(filename, extension)
def load_gmpl(self, filename, data=None):
"""
Asks the underlying MIP solver to load a GMPL file, possibly with a
separate data file.
:param filename: the path to the file.
:param data: optional path to a data file.
:raises UnsupportedSolverFunction: if called on a non MIP solver.
"""
if not hasattr(self.solver, 'load_gmpl'):
raise UnsupportedSolverFunction(
str(type(self)), "load_gmpl", "Please load the model using a "
"MIP solver to use this functionality.")
if data == None:
self.solver.load_gmpl(filename)
else:
self.solver.load_gmpl(filename, data)
def load_lp(self, filename, epsilon):
"""
Asks the underlying MIP solver to load an LP file, possibly with a
separate data file.
:param filename: the path to the file.
:param epsilon: epsilon
:raises UnsupportedSolverFunction: if called on a non MIP solver.
"""
if not hasattr(self.solver, 'load_lp'):
raise UnsupportedSolverFunction(
str(type(self)), "load_lp", "Please load the model using a "
"MIP solver to use this functionality.")
self.solver.load_lp(filename, epsilon)
def shuffle_cnf(self, *args, **kwargs):
"""
Shuffle the internal CNF representation before writing it to a file.
This renames the variables, shuffles their order in each clause, and
shuffles the ordering of the clauses. This currently has no affect on
the built-in MiniSat or WalkSat solvers since clauses are added directly
via their API when they are generated but can be used with any of the
other external file based SAT solvers. This should be called before
:meth:`.output_cnf`.
:param int seed: The seed for the random number generator.
:raises UnsupportedSolverFunction: if called on a non SAT-based solver.
"""
if hasattr(self.solver, 'shuffle_cnf'):
self.solver.shuffle_cnf(*args, **kwargs)
else:
raise UnsupportedSolverFunction(
str(type(self)), "shuffle_cnf", "Please load the model using "
"a SAT solver to use this functionality.")
def output_cnf(self, filename):
"""
Output the CNF representation of a model to a file. The model must have
been loaded with a SAT-based solver.
:param str filename: The filename of where to output the CNF file.
:raises UnsupportedSolverFunction: if called on a non SAT-based solver.
"""
from Numberjack.solvers.SatWrapper import SatWrapperSolver as sws
if not issubclass(type(self.solver), sws):
raise UnsupportedSolverFunction(
str(type(self)), "output_cnf", "Please load the model using a "
"SAT solver to use this functionality.")
self.solver.output_cnf(filename)
def output_lp(self, filename):
"""
Output the LP representation of a model to a file. The model must have
been loaded with a MIP-based solver.
:param str filename: The filename of where to output the LP file.
:raises UnsupportedSolverFunction: if called on a non MIP-based solver.
"""
if hasattr(self.solver, 'output_lp'):
if not filename.endswith(".lp"):
filname = "%s.lp" % filename
self.solver.output_lp(filename)
else:
raise UnsupportedSolverFunction(
str(type(self)), "output_lp", "This solver does not support "
"outputing LP files.")
def output_mps(self, filename):
"""
Output the MPS representation of a model to a file. The model must have
been loaded with a MIP-based solver.
:param str filename: The filename of where to output the MPS file.
:raises UnsupportedSolverFunction: if called on a non MIP-based solver.
"""
if hasattr(self.solver, 'output_mps'):
if not filename.endswith(".mps"):
filname = "%s.mps" % filename
self.solver.output_mps(filename)
else:
raise UnsupportedSolverFunction(
str(type(self)), "output_mps", "This solver does not support "
"outputing MPS files.")
def num_vars(self):
"""
.. deprecated:: 1.1
Use :meth:`.getNumVariables` intead.
"""
if hasattr(self.solver, 'num_vars'):
return self.solver.num_vars()
raise UnsupportedSolverFunction(
str(type(self)), "num_vars", "This functionality has been "
"deprecated, use getNumVariables.")
def extract_graph(self):
self.solver.extract_graph()
def numNodes(self):
"""
.. deprecated:: 1.1
Use :meth:`.getNodes` intead.
"""
return self.solver.numNodes()
def degree(self, var):
return self.solver.get_degree(var)
def get_neighbors(self, x):
neighbors = []
for y in range(self.solver.degree(x)):
neighbors.append(self.solver.get_neighbor(x, y))
return neighbors
def get_static_features(self):
feats = {}
for i in range(12):
feats[self.solver.get_feature_name(i)] = self.solver.get_feature(i)
for i in range(16, 36):
feats[self.solver.get_feature_name(i)] = self.solver.get_feature(i)
return feats
def get_dynamic_features(self):
feats = {}
for i in range(12, 16):
feats[self.solver.get_feature_name(i)] = self.solver.get_feature(i)
return feats
def get_features(self):
"""
Compute and return the 36 CPHydra features. The feature set includes 32
static features regarding the contraints, domains, etc, and 4 dynamic
features about weights, nodes, propagations computed after a 2 second
run of Mistral. Note that these can only be compute when the XCSP
instance has been loaded directly by Mistral like in the following
example:
.. code-block:: python
model = Model()
solver = model.load('Mistral')
solver.load_xml(xml_filename)
features = solver.get_features()
.. deprecated:: 1.1
This will be replaced with more extensive and flexible functionality
in future releases.
"""
feats = {}
for i in range(36):
feats[self.solver.get_feature_name(i)] = self.solver.get_feature(i)
return feats
def __str__(self):
#setActive(self)
#return ''
self.solver.printPython()
return ' '
def delete(self):
if self.free_memory:
self.free_memory(self.solver)
def enum(*sequential):
enums = dict(zip(sequential, (2 ** i for i in range(len(sequential)))))
return type('Enum', (), enums)
# This enum ordering must be the same as that specified in the enums
# EncodingConfiguration::AMOEncoding and AllDiffEncoding in SatWrapper.hpp
AMOEncoding = enum('Pairwise', 'Ladder')
AllDiffEncoding = enum('PairwiseDecomp', 'LadderAMO', 'PigeonHole')
class EncodingConfiguration(object):
"""
Specifies a configuration for the SAT encoding that expressions will take
when translated to conjunctive normal form for the SAT solver. Support for
configuring the MIP encoding could be added later.
At least one of ``direct`` or ``order`` should be ``True``. If both are set
to ``True``, then the domain will be encoded using both representations and
chanelled between each other. The default is to have both enabled,
corresponding to the so called regular encoding.
At least one of ``conflict`` or ``support`` should be ``True``, these
specify which form of a constraint is encoded into CNF.
:param bool direct: Whether the direct (or sparse) encoding of domains
should be generated.
:param bool order: Whether the order encoding of the domains should be
generated.
:param bool conflict: Whether the conflict clauses of a constraint should be
generated.
:param bool support: Whether the support clauses of a constraint should be
generated.
:param int amo_encoding: The at-most-one encoding to be used. An enum is
defined with the supported possibilities: ``AMOEncoding.Pairwise``, and
``AMOEncoding.Ladder``.
:param int alldiff_encoding: The encoding used when encoding all-different
constraints. The possibilites are defined in the ``AllDiffEncoding``
enum and can be binary or'd with each other to be passed as a single
int, like so: ``AllDiffEncoding.PairwiseDecomp |
AllDiffEncoding.LadderAMO | AllDiffEncoding.PigeonHole``.
"""
def __init__(self, direct=True, order=True, conflict=True, support=False,
amo_encoding=AMOEncoding.Pairwise,
alldiff_encoding=AllDiffEncoding.PairwiseDecomp):
# Domain encodings
self.direct = direct
self.order = order
# Constraint encoding
self.conflict = conflict
self.support = support
# At Most One encoding.
self.amo_encoding = amo_encoding
# All Different encoding.
self.alldiff_encoding = alldiff_encoding
# Check validity of the encoding config
if not self.direct and not self.order:
raise InvalidEncodingException(
"Domains must be encoded using at least one encoding: "
"direct|order.")
if not self.conflict and not self.support:
raise InvalidEncodingException(
"Constraints must be encoded using at least one encoding: "
"conflict|support.")
if not self.amo_encoding & AMOEncoding.Pairwise and \
not self.amo_encoding & AMOEncoding.Ladder:
raise InvalidEncodingException(
"Invalid at-most-one encoding specified: %s" %
(str(self.amo_encoding)))
# if self.amo_encoding & AMOEncoding.Pairwise and not self.direct:
# raise InvalidEncodingException("Domains must be encoded using the direct encoding if using the pairwise AMO encoding.")
# if self.alldiff_encoding & AllDiffEncoding.PairwiseDecomp and not self.direct:
# raise InvalidEncodingException("The direct encoding must be enabled if the pairwise decomposition all different is used.")
# Make EncodingConfiguration hashable so that it can be used as a dictionary
# key for the cache of encoding configs during translation to SAT.
def __hash__(self):
return hash((self.direct, self.order, self.conflict, self.support, self.amo_encoding, self.alldiff_encoding))
def __eq__(self, other):
return (self.direct == other.direct) and \
(self.order == other.order) and \
(self.conflict == other.conflict) and \
(self.support == other.support) and \
(self.amo_encoding == other.amo_encoding) and \
(self.alldiff_encoding == other.alldiff_encoding)
def __str__(self):
return "EncodingConfig<direct:%r, order:%r, conflict:%r, support:%r, amo:%r, alldiff:%r>" % (
self.direct, self.order, self.conflict, self.support, self.amo_encoding, self.alldiff_encoding)
NJEncodings = {
# "directsupport": EncodingConfiguration(direct=True, order=False, conflict=True, support=True, amo_encoding=AMOEncoding.Pairwise),
"direct": EncodingConfiguration(direct=True, order=False, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp),
"support": EncodingConfiguration(direct=True, order=False, conflict=False, support=True, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp),
"order": EncodingConfiguration(direct=False, order=True, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp),
"directorder": EncodingConfiguration(direct=True, order=True, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp),
"pairwiseconflictandsupport": EncodingConfiguration(direct=True, order=False, conflict=True, support=True, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp),
"ladder_direct": EncodingConfiguration(direct=True, order=False, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.LadderAMO),
"ladder_support": EncodingConfiguration(direct=True, order=False, conflict=False, support=True, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.LadderAMO),
"ladder_directorder": EncodingConfiguration(direct=True, order=True, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.LadderAMO),
"pairwise_and_ladder_direct": EncodingConfiguration(direct=True, order=False, conflict=True, support=False, alldiff_encoding=AllDiffEncoding.PairwiseDecomp | AllDiffEncoding.LadderAMO),
"pairwise_ladder_pigeon": EncodingConfiguration(direct=True, order=True, conflict=True, support=False, alldiff_encoding=AllDiffEncoding.PairwiseDecomp | AllDiffEncoding.LadderAMO | AllDiffEncoding.PigeonHole),
"pairwisesupport_ladder_pigeon": EncodingConfiguration(direct=True, order=True, conflict=False, support=True, alldiff_encoding=AllDiffEncoding.PairwiseDecomp | AllDiffEncoding.LadderAMO | AllDiffEncoding.PigeonHole),
"pairwise_order_pigeon": EncodingConfiguration(direct=False, order=True, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp | AllDiffEncoding.PigeonHole),
"pairwise_directorder_pigeon": EncodingConfiguration(direct=True, order=True, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.PairwiseDecomp | AllDiffEncoding.PigeonHole),
"ladder_directorder_pigeon": EncodingConfiguration(direct=True, order=True, conflict=True, support=False, amo_encoding=AMOEncoding.Pairwise, alldiff_encoding=AllDiffEncoding.LadderAMO | AllDiffEncoding.PigeonHole),
}
|
JElchison/Numberjack
|
Numberjack/__init__.py
|
Python
|
lgpl-2.1
| 148,044 | 0.002742 |
"""
ESSArch is an open source archiving and digital preservation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-26 14:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('WorkflowEngine', '0018_auto_20160725_2120'),
]
operations = [
migrations.AddField(
model_name='processstep',
name='parent_step',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child_steps', to='WorkflowEngine.ProcessStep'),
),
]
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/WorkflowEngine/migrations/0019_processstep_parent_step.py
|
Python
|
gpl-3.0
| 1,448 | 0.000691 |
import numpy as np
from random import randrange
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
'''
计算在x点,f的数值梯度的简单实现。
-f: 应该是一个只接受一个输入参数的函数
-x: 要评估梯度的点,是numpy的数组
'''
fx = f(x) # 获取源点函数值
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index # 获取下标
oldval = x[ix]
x[ix] = oldval + h
fxph = f(x) # 计算f(x+h)
x[ix] = oldval - h
fxmh = f(x) # 计算f(x-h)
x[ix] = oldval
grad[ix] = (fxph - fxmh) / (2 * h)
if verbose:
print(ix, grad[ix])
it.iternext()
return grad
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
'''
抽取x中的一些随机元素,计算在这些维度上的梯度值,跟analytic_grad的值做对比。
'''
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
oldval = x[ix]
x[ix] = oldval + h # 增加很小的h值
fxph = f(x) # 计算f(x+h)
x[ix] = oldval - h # 减少很小的h
fxmh = f(x) # 计算f(x-h)
x[ix] = oldval # 重置
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))
print('数值计算求梯度:%f 分析法求梯度:%f,相对误差是:%e' % (grad_numerical, grad_analytic, rel_error))
|
BoyuanYan/CIFAR-10
|
cs231n/gradient_check.py
|
Python
|
apache-2.0
| 1,636 | 0.006541 |
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
|
acs-test/openfda
|
PER_2017-18/clientServer/P1/server_web.py
|
Python
|
apache-2.0
| 1,505 | 0.008638 |
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
L = [[a,b,c] for a in range(x+1) for b in range(y+1) for c in range(z+1)]
L = list(filter(lambda x : sum(x) != n, L))
print(L)
|
kakaba2009/MachineLearning
|
python/src/algorithm/coding/basic/comprehension.py
|
Python
|
apache-2.0
| 240 | 0.0125 |
# -*- coding: utf-8 -*-
"""This directory is meant for special-purpose extensions to IPython.
This can include things which alter the syntax processing stage (see
PhysicalQ_Input for an example of how to do this).
Any file located here can be called with an 'execfile =' option as
execfile = Extensions/filename.py
since the IPython directory itself is already part of the search path for
files listed as 'execfile ='.
"""
|
mastizada/kuma
|
vendor/packages/ipython/IPython/Extensions/__init__.py
|
Python
|
mpl-2.0
| 429 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironicclient import exceptions
from ironic_inspector import node_cache
from ironic_inspector import utils
def hook(introspection_data, **kwargs):
ironic = utils.get_client()
try:
node = ironic.node.create(**{'driver': 'fake'})
except exceptions.HttpError as exc:
raise utils.Error(_("Can not create node in ironic for unknown"
"node: %s") % exc)
return node_cache.add_node(node.uuid, ironic=ironic)
|
Tehsmash/inspector-hooks
|
inspector_hooks/enroll_node_not_found.py
|
Python
|
apache-2.0
| 1,013 | 0 |
""" ToDo: document OpenStack driver on user level here.
"""
import json
from pebbles.services.openstack_service import OpenStackService
from pebbles.drivers.provisioning import base_driver
from pebbles.client import PBClient
from pebbles.models import Instance
from pebbles.utils import parse_ports_string
SLEEP_BETWEEN_POLLS = 3
POLL_MAX_WAIT = 180
class OpenStackDriver(base_driver.ProvisioningDriverBase):
""" ToDo: document Openstack driver on developer/sysadmin level here.
"""
def get_oss(self):
return OpenStackService({'M2M_CREDENTIAL_STORE': self.config['M2M_CREDENTIAL_STORE']})
def get_configuration(self):
from pebbles.drivers.provisioning.openstack_driver_config import CONFIG
oss = self.get_oss()
images = [x.name for x in oss.list_images()]
flavors = [x.name for x in oss.list_flavors()]
config = CONFIG.copy()
config['schema']['properties']['image']['enum'] = images
config['schema']['properties']['flavor']['enum'] = flavors
return config
def get_running_instance_logs(self, token, instance_id):
running_log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='running')
running_log_uploader.info('Cannot get running logs. This feature has not been implemented for the OpenStackDriver yet')
def do_update_connectivity(self, token, instance_id):
oss = self.get_oss()
pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
instance = pbclient.get_instance_description(instance_id)
instance_data = instance['instance_data']
security_group_id = instance_data['security_group_id']
blueprint_config = pbclient.get_blueprint_description(instance['blueprint_id'])
config = blueprint_config['full_config']
# Delete all existing rules and add the rules using the input port string
oss.clear_security_group_rules(security_group_id)
ports_str = config['exposed_ports']
if not ports_str:
ports_str = '22' # If the input port string is empty then use 22 as the default port
ports_list = parse_ports_string(ports_str)
for ports in ports_list:
from_port = ports[0]
to_port = ports[1]
oss.create_security_group_rule(
security_group_id,
from_port=from_port,
to_port=to_port,
cidr="%s/32" % instance['client_ip'],
ip_protocol='tcp',
group_id=None
)
def do_provision(self, token, instance_id):
self.logger.debug("do_provision %s" % instance_id)
pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
instance = pbclient.get_instance_description(instance_id)
instance_name = instance['name']
instance_user = instance['user_id']
# fetch config
blueprint_config = pbclient.get_blueprint_description(instance['blueprint_id'])
config = blueprint_config['full_config']
log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')
log_uploader.info("Provisioning OpenStack instance (%s)\n" % instance_id)
ports_str = config['exposed_ports']
if ports_str:
try:
parse_ports_string(ports_str)
except:
error = 'Incorrect exposed ports definition in blueprint'
error_body = {'state': Instance.STATE_FAILED, 'error_msg': error}
pbclient.do_instance_patch(instance_id, error_body)
self.logger.debug(error)
raise RuntimeError(error)
# fetch user public key
key_data = pbclient.get_user_key_data(instance_user).json()
if not key_data:
error = 'user\'s public key is missing'
error_body = {'state': Instance.STATE_FAILED, 'error_msg': error}
pbclient.do_instance_patch(instance_id, error_body)
self.logger.debug(error)
raise RuntimeError(error)
oss = self.get_oss()
result = oss.provision_instance(
instance_name,
config['image'],
config['flavor'],
nics=config.get('openstack_net_id', 'auto'),
public_key=key_data[0]['public_key'],
userdata=config.get('userdata'))
if 'error' in result:
log_uploader.warn('Provisioning failed %s' % result['error'])
return
ip = result['address_data']['public_ip']
instance_data = {
'server_id': result['server_id'],
'floating_ip': ip,
'allocated_from_pool': result['address_data']['allocated_from_pool'],
'security_group_id': result['security_group'],
'endpoints': [
{'name': 'SSH', 'access': 'ssh cloud-user@%s' % ip},
]
}
log_uploader.info("Publishing server data\n")
pbclient.do_instance_patch(
instance_id,
{'instance_data': json.dumps(instance_data), 'public_ip': ip})
log_uploader.info("Provisioning complete\n")
def do_deprovision(self, token, instance_id):
log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='deprovisioning')
log_uploader.info("Deprovisioning instance %s\n" % instance_id)
pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
oss = self.get_oss()
instance = pbclient.get_instance_description(instance_id)
instance_data = instance['instance_data']
if 'server_id' not in instance_data:
log_uploader.info("Skipping, no server id in instance data")
return
server_id = instance_data['server_id']
log_uploader.info("Destroying server instance . . ")
oss.deprovision_instance(server_id)
log_uploader.info("Deprovisioning ready\n")
def do_housekeep(self, token):
pass
|
CSC-IT-Center-for-Science/pouta-blueprints
|
pebbles/drivers/provisioning/openstack_driver.py
|
Python
|
mit
| 6,069 | 0.002636 |
import sys
import petsc4py
petsc4py.init(sys.argv)
from ecoli_in_pipe import head_tail
# import numpy as np
# from scipy.interpolate import interp1d
# from petsc4py import PETSc
# from ecoli_in_pipe import single_ecoli, ecoliInPipe, head_tail, ecoli_U
# from codeStore import ecoli_common
#
#
# def call_head_tial(uz_factor=1., wz_factor=1.):
# PETSc.Sys.Print('')
# PETSc.Sys.Print('################################################### uz_factor = %f, wz_factor = %f' %
# (uz_factor, wz_factor))
# t_head_U = head_U.copy()
# t_tail_U = tail_U.copy()
# t_head_U[2] = t_head_U[2] * uz_factor
# t_tail_U[2] = t_tail_U[2] * uz_factor
# # C1 = t_head_U[5] - t_tail_U[5]
# # C2 = t_head_U[5] / t_tail_U[5]
# # t_head_U[5] = wz_factor * C1 * C2 / (wz_factor * C2 - 1)
# # t_tail_U[5] = C1 / (wz_factor * C2 - 1)
# t_head_U[5] = wz_factor * t_head_U[5]
# t_kwargs = {'head_U': t_head_U,
# 'tail_U': t_tail_U, }
# total_force = head_tail.main_fun()
# return total_force
#
#
# OptDB = PETSc.Options()
# fileHandle = OptDB.getString('f', 'ecoliInPipe')
# OptDB.setValue('f', fileHandle)
# main_kwargs = {'fileHandle': fileHandle}
# # head_U, tail_U, ref_U = ecoli_common.ecoli_restart(**main_kwargs)
# # ecoli_common.ecoli_restart(**main_kwargs)
# head_U = np.array([0, 0, 1, 0, 0, 1])
# tail_U = np.array([0, 0, 1, 0, 0, 1])
# call_head_tial()
head_tail.main_fun()
|
pcmagic/stokes_flow
|
ecoli_in_pipe/wrapper_head_tail.py
|
Python
|
mit
| 1,450 | 0.001379 |
"""
WSGI config for cache_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blast_cache.settings")
application = get_wsgi_application()
|
DanBuchan/cache_server
|
blast_cache/wsgi.py
|
Python
|
gpl-2.0
| 400 | 0 |
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.http import *
from django.template import Template, Context
from django.shortcuts import render_to_response, redirect, render, RequestContext, HttpResponseRedirect
def login(request):
return render(request, 'login.html')
@login_required
def home(request):
u = request.user
return render_to_response("home.html", locals(), context_instance=RequestContext(request))
def logout(request):
auth_logout(request)
return redirect('/')
|
COMU/lazimlik
|
lazimlik/social_app/views.py
|
Python
|
gpl-2.0
| 568 | 0.019366 |
from tensorflow.keras.applications.vgg16 import VGG16
import tensorflowjs as tfjs
model = VGG16(weights='imagenet')
tfjs.converters.save_keras_model(model, 'vgg16_tfjs')
|
tensorflow/tfjs-examples
|
visualize-convnet/get_vgg16.py
|
Python
|
apache-2.0
| 172 | 0 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
r"""Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
from math import exp
from lib.EoS.cubic import Cubic
class SRK(Cubic):
r"""Equation of state of Soave-Redlich-Kwong (1972)
.. math::
\begin{array}[t]{l}
P = \frac{RT}{V-b}-\frac{a}{V\left(V+b\right)}\\
a = 0.42747\frac{R^2T_c^2}{P_c}\alpha\\
b = 0.08664\frac{RT_c}{P_c}\\
\alpha^{0.5} = 1 + m\left(1-Tr^{0.5}\right)\\
m = 0.48 + 1.574\omega - 0.176\omega^2\\
\end{array}
In supercritical states, the α temperature dependence can use different
extrapolation correlation:
* Boston-Mathias expression, [3]_
.. math::
\begin{array}[t]{l}
\alpha = \exp\left(c\left(1-T_r^d\right)\right)\\
d = 1+\frac{m}{2}\\
c = \frac{m}{d}\\
\end{array}
* Nasrifar-Bolland expression, [4]_
.. math::
\begin{array}[t]{l}
\alpha = \frac{b_1}{T_r} + \frac{b_2}{T_r^2} + \frac{b_3}{T_r^3}\\
b_1 = 0.25\left(12 - 11m + m^2\right)\\
b_2 = 0.5\left(-6 + 9m - m^2\right)\\
b_3 = 0.25\left(4 - 7m + m^2\right)\\
\end{array}
Parameters
----------
alpha : int
Correlation index for alpha expresion at supercritical temperatures:
* 0 - Original
* 1 - Boston
* 2 - Nasrifar
Examples
--------
Example 4.3 from [2]_, Propane saturated at 300K
>>> from lib.mezcla import Mezcla
>>> mix = Mezcla(5, ids=[4], caudalMolar=1, fraccionMolar=[1])
>>> eq = SRK(300, 9.9742e5, mix)
>>> '%0.1f' % (eq.Vl.ccmol)
'98.4'
>>> eq = SRK(300, 42.477e5, mix)
>>> '%0.1f' % (eq.Vg.ccmol)
'95.1'
Helmholtz energy formulation example for supplementary documentatión from
[4]_, the critical parameter are override for the valued used in paper to
get the values of test with high precision
>>> from lib.mezcla import Mezcla
>>> from lib import unidades
>>> from lib.compuestos import Componente
>>> ch4 = Componente(2)
>>> ch4.Tc, ch4.Pc, ch4.f_acent = 190.564, 4599200, 0.011
>>> o2 = Componente(47)
>>> o2.Tc, o2.Pc, o2.f_acent = 154.581, 5042800, 0.022
>>> ar = Componente(98)
>>> ar.Tc, ar.Pc, ar.f_acent = 150.687, 4863000, -0.002
>>> mix = Mezcla(5, customCmp=[ch4, o2, ar], caudalMolar=1,
... fraccionMolar=[0.5, 0.3, 0.2])
>>> eq = SRK(800, 36451227.52066596, mix, R=8.3144598)
>>> fir = eq._phir(800, 5000, eq.yi)
>>> delta = 5000
>>> tau = 1/800
>>> print("fir: %0.14f" % (fir["fir"]))
fir: 0.11586323513845
>>> print("fird: %0.14f" % (fir["fird"]*delta))
fird: 0.12741566551477
>>> print("firt: %0.15f" % (fir["firt"]*tau))
firt: -0.082603152680518
>>> print("firdd: %0.15f" % (fir["firdd"]*delta**2))
firdd: 0.024895937945147
>>> print("firdt: %0.15f" % (fir["firdt"]*delta*tau))
firdt: -0.077752734990782
>>> print("firtt: %0.14f" % (fir["firtt"]*tau**2))
firtt: -0.10404751064185
>>> print("firddd: %0.16f" % (fir["firddd"]*delta**3))
firddd: 0.0060986538256190
>>> print("firddt: %0.16f" % (fir["firddt"]*delta**2*tau))
firddt: 0.0089488831000362
>>> print("firdtt: %0.15f" % (fir["firdtt"]*delta*tau**2))
firdtt: -0.097937890490398
>>> print("firttt: %0.14f" % (fir["firttt"]*tau**3))
firttt: 0.15607126596277
"""
__title__ = "Soave-Redlich-Kwong (1972)"
__status__ = "SRK72"
__doi__ = (
{
"autor": "Soave, G.",
"title": "Equilibrium Constants from a modified Redlich-Kwong "
"Equation of State",
"ref": "Chem. Eng. Sci. 27 (1972) 1197-1203",
"doi": "10.1016/0009-2509(72)80096-4"},
{
"autor": "Poling, B.E, Prausnitz, J.M, O'Connell, J.P",
"title": "The Properties of Gases and Liquids 5th Edition",
"ref": "McGraw-Hill, New York, 2001",
"doi": ""},
{
"autor": "Boston, J.F., Mathias, P.M.",
"title": "Phase Equilibria in a Third-Generation Process Simulator",
"ref": "Presented at: 'Phase Equilibria and Fluid Properties in the "
"Chemical Industries', Berlin, March 17-21, 1980.",
"doi": ""},
{
"autor": "Nasrifar, Kh., Bolland, O.",
"title": "Square-Well Potential and a New α Function for the Soave-"
"Redlich-Kwong Equation of State",
"ref": "Ind. Eng. Chem. Res. 43(21) (2004) 6901-6909",
"doi": "10.1021/ie049545i"},
)
def _cubicDefinition(self, T):
"""Definition of coefficients for generic cubic equation of state"""
# Schmidt-Wenzel factorization of terms
self.u = 1
self.w = 0
ao = []
ai = []
bi = []
mi = []
for cmp in self.componente:
a0, b = self._lib(cmp)
alfa = self._alfa(cmp, T)
m = self._m(cmp)
ao.append(a0)
ai.append(a0*alfa)
bi.append(b)
mi.append(m)
self.ao = ao
self.ai = ai
self.bi = bi
self.mi = mi
def _lib(self, cmp):
ao = 0.42747*self.R**2*cmp.Tc**2/cmp.Pc # Eq 5
b = 0.08664*self.R*cmp.Tc/cmp.Pc # Eq 6
return ao, b
def _GEOS(self, xi):
am, bm = self._mixture("SRK", xi, [self.ai, self.bi])
delta = bm
epsilon = 0
return am, bm, delta, epsilon
def _alfa(self, cmp, T):
"""α parameter calculation procedure, separate of general procedure
to let define derived equation where only change this term.
This method use the original alpha formulation for temperatures below
the critical temperature and can choose by configuration between:
* Boston-Mathias formulation
* Nasrifar-Bolland formulation
Parameters
----------
cmp : componente.Componente
Componente instance
T : float
Temperature, [K]
Returns
-------
alpha : float
alpha parameter of equation, [-]
"""
Tr = T/cmp.Tc
m = self._m(cmp)
if Tr > 1:
alfa = self.kwargs.get("alpha", 0)
if alfa == 0:
alfa = (1+m*(1-Tr**0.5))**2 # Eq 13
elif alfa == 1:
# Use the Boston-Mathias supercritical extrapolation, ref [3]_
d = 1+m/2 # Eq 10
c = m/d # Eq 11
alfa = exp(c*(1-Tr**d)) # Eq 9
elif alfa == 2:
# Use the Nasrifar-Bolland supercritical extrapolation, ref [4]
b1 = 0.25*(12-11*m+m**2) # Eq 17
b2 = 0.5*(-6+9*m-m**2) # Eq 18
b3 = 0.25*(4-7*m+m**2) # Eq 19
alfa = b1/Tr + b2/Tr**2 + b3/Tr**3 # Eq 16
else:
alfa = (1+m*(1-Tr**0.5))**2 # Eq 13
return alfa
def _m(self, cmp):
"""Calculate the intermediate parameter for alpha expression"""
# Eq 15
return 0.48 + 1.574*cmp.f_acent - 0.176*cmp.f_acent**2
def _da(self, tau, x):
"""Calculate the derivatives of α, this procedure is used for Helmholtz
energy formulation of EoS for calculation of properties, alternate alfa
formulation must define this procedure for any change of formulation
"""
Tr, rhor = self._Tr()
# Eq 64-67
Di = []
Dt = []
Dtt = []
Dttt = []
for cmp in self.componente:
Di.append(1-(Tr/cmp.Tc)**0.5/tau**0.5)
Dt.append((Tr/cmp.Tc)**0.5/2/tau**1.5)
Dtt.append(-3*(Tr/cmp.Tc)**0.5/4/tau**2.5)
Dttt.append(15*(Tr/cmp.Tc)**0.5/8/tau**3.5)
# Eq 63
Bi = []
for c1, d in zip(self.mi, Di):
Bi.append(1+c1*d)
# Eq 69-71
Bt = []
Btt = []
Bttt = []
for c1, d, dt, dtt, dttt in zip(self.mi, Di, Dt, Dtt, Dttt):
Bt.append(c1*dt)
Btt.append(c1*d*dtt*d**-1)
Bttt.append(c1*d**2*dttt*d**-2)
# Eq 73-75
dait = []
daitt = []
daittt = []
for a, B, bt, btt, bttt in zip(self.ao, Bi, Bt, Btt, Bttt):
dait.append(2*a*B*bt)
daitt.append(2*a*(B*btt+bt**2))
daittt.append(2*a*(B*bttt+3*bt*btt))
# Eq 52
uij = []
for aii in self.ai:
uiji = []
for ajj in self.ai:
uiji.append(aii*ajj)
uij.append(uiji)
# Eq 59-61
duijt = []
duijtt = []
duijttt = []
for aii, diit, diitt, diittt in zip(self.ai, dait, daitt, daittt):
duijit = []
duijitt = []
duijittt = []
for ajj, djjt, djjtt, djjttt in zip(self.ai, dait, daitt, daittt):
duijit.append(aii*djjt + ajj*diit)
duijitt.append(aii*djjtt + 2*diit*djjt + ajj*diitt)
duijittt.append(
aii*djjttt + 3*diit*djjtt + 3*diitt*djjt + ajj*diittt)
duijt.append(duijit)
duijtt.append(duijitt)
duijttt.append(duijittt)
# Eq 54-56
daijt = []
daijtt = []
daijttt = []
for uiji, duijit, duijitt, duijittt, kiji in zip(
uij, duijt, duijtt, duijttt, self.kij):
daijit = []
daijitt = []
daijittt = []
for u, ut, utt, uttt, k in zip(
uiji, duijit, duijitt, duijittt, kiji):
daijit.append((1-k)/2/u**0.5*ut)
daijitt.append((1-k)/4/u**1.5*(2*u*utt-ut**2))
daijittt.append(
(1-k)/8/u**2.5*(4*u**2*uttt - 6*u*ut*utt + 3*ut**3))
daijt.append(daijit)
daijtt.append(daijitt)
daijttt.append(daijittt)
# Eq 51
damt = 0
damtt = 0
damttt = 0
for xi, daijit, daijitt, daijittt in zip(x, daijt, daijtt, daijttt):
for xj, dat, datt, dattt in zip(x, daijit, daijitt, daijittt):
damt += xi*xj*dat
damtt += xi*xj*datt
damttt += xi*xj*dattt
# Eq 126
aij = []
for a_i in self.ai:
aiji = []
for a_j in self.ai:
aiji.append((a_i*a_j)**0.5)
aij.append(aiji)
daxi = []
for i, (xi, aiji) in enumerate(zip(x, aij)):
daxij = 0
for xj, a in zip(x, aiji):
daxij += 2*xj*a
daxi.append(daxij)
kw = {}
kw["dat"] = damt
kw["datt"] = damtt
kw["dattt"] = damttt
kw["daxi"] = daxi
return kw
if __name__ == "__main__":
from lib.mezcla import Mezcla
from lib import unidades
# # mix = Mezcla(5, ids=[4], caudalMolar=1, fraccionMolar=[1])
# # eq = SRK(300, 9.9742e5, mix, alpha=1)
# # print('%0.1f' % (eq.Vl.ccmol))
# # eq = SRK(300, 42.477e5, mix)
# # print('%0.1f' % (eq.Vg.ccmol))
# mix = Mezcla(5, ids=[46, 2], caudalMolar=1, fraccionMolar=[0.2152, 0.7848])
# eq = SRK(144.26, 2.0684e6, mix)
# print(eq.rhoL.kmolm3)
# # Ejemplo 6.6, Wallas, pag 340
# mezcla = Mezcla(2, ids=[1, 2, 40, 41], caudalUnitarioMolar=[0.3177, 0.5894, 0.0715, 0.0214])
# P = unidades.Pressure(500, "psi")
# T = unidades.Temperature(120, "F")
# eq = SRK(T, P, mezcla)
# print(T)
# print(eq.x)
# print([x*(1-eq.x)*5597 for x in eq.xi])
# print([y*eq.x*5597 for y in eq.yi])
# print(eq.Ki)
# Example 6.6 wallas
P = unidades.Pressure(20, "atm")
mix = Mezcla(5, ids=[23, 5], caudalMolar=1, fraccionMolar=[0.607, 0.393])
eq1 = SRK(300, P, mix)
eq2 = SRK(400, P, mix)
print(eq1._Dew_T(P))
print(eq2._Dew_T(P))
# eq = SRK(500, P, mezcla)
# print(eq._Dew_T(P))
|
jjgomera/pychemqt
|
lib/EoS/Cubic/SRK.py
|
Python
|
gpl-3.0
| 13,006 | 0.000385 |
import webapp2
class Pets(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello Pets!')
app = webapp2.WSGIApplication([('/', Pets)], debug=True)
|
Trii/NoseGAE
|
examples/pets/pets.py
|
Python
|
bsd-2-clause
| 239 | 0 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
def day_frac(val1, val2, factor=1., divisor=1.):
"""
Return the sum of ``val1`` and ``val2`` as two float64s, an integer part
and the fractional remainder. If ``factor`` is not 1.0 then multiply the
sum by ``factor``. If ``divisor`` is not 1.0 then divide the sum by
``divisor``.
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. This routine assumes the sum is less
than about 1e16, otherwise the ``frac`` part will be greater than 1.0.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if np.any(factor != 1.):
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if np.any(divisor != 1.):
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
extra, frac = two_sum(sum12, -day)
frac += extra + err12
return day, frac
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a
eb = b - eb
ea = x - b
ea = a - ea
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
|
joergdietrich/astropy
|
astropy/time/utils.py
|
Python
|
bsd-3-clause
| 3,571 | 0 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING, Any
# Bokeh imports
from ..models import glyphs
from ._decorators import glyph_method, marker_method
if TYPE_CHECKING:
from ..models.canvas import CoordinateMapping
from ..models.plots import Plot
from ..models.renderers import GlyphRenderer
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"GlyphAPI",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class GlyphAPI:
""" """
@property
def plot(self) -> Plot | None:
return self._parent
@property
def coordinates(self) -> CoordinateMapping | None:
return self._coordinates
def __init__(self, parent: Plot | None = None, coordinates: CoordinateMapping | None = None) -> None:
self._parent = parent
self._coordinates = coordinates
@glyph_method(glyphs.AnnularWedge)
def annular_wedge(self, **kwargs):
pass
@glyph_method(glyphs.Annulus)
def annulus(self, **kwargs):
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
"""
@glyph_method(glyphs.Arc)
def arc(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@marker_method()
def asterisk(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
"""
@glyph_method(glyphs.Bezier)
def bezier(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@glyph_method(glyphs.Circle)
def circle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to |data units|.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
"""
@glyph_method(glyphs.Block)
def block(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.block(x=[1, 2, 3], y=[1,2,3], width=0.5, height=1, , color="#CAB2D6")
show(plot)
"""
@marker_method()
def circle_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
"""
@marker_method()
def circle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_color=None)
show(plot)
"""
@marker_method()
def circle_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@marker_method()
def circle_y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@marker_method()
def cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
"""
@marker_method()
def dash(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@marker_method()
def diamond(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@marker_method()
def diamond_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def diamond_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None)
show(plot)
"""
@marker_method()
def dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")
show(plot)
"""
@glyph_method(glyphs.HArea)
def harea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.HBar)
def hbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Ellipse)
def ellipse(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def hex(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@marker_method()
def hex_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],
color="#74ADD1", fill_color=None)
show(plot)
"""
@glyph_method(glyphs.HexTile)
def hex_tile(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300, match_aspect=True)
plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")
show(plot)
"""
@glyph_method(glyphs.Image)
def image(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
"""
@glyph_method(glyphs.ImageRGBA)
def image_rgba(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
"""
@glyph_method(glyphs.ImageURL)
def image_url(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@marker_method()
def inverted_triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Line)
def line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(title="line", width=300, height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
"""
@glyph_method(glyphs.MultiLine)
def multi_line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
"""
@glyph_method(glyphs.MultiPolygons)
def multi_polygons(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is a
nested array.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],
ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],
color=['red', 'green'])
show(p)
"""
@glyph_method(glyphs.Patch)
def patch(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
"""
@glyph_method(glyphs.Patches)
def patches(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
"""
@marker_method()
def plus(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Quad)
def quad(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
"""
@glyph_method(glyphs.Quadratic)
def quadratic(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@glyph_method(glyphs.Ray)
def ray(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
"""
@glyph_method(glyphs.Rect)
def rect(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
"""
@glyph_method(glyphs.Step)
def step(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")
show(plot)
"""
@glyph_method(glyphs.Segment)
def segment(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
"""
@marker_method()
def square(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@marker_method()
def square_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def square_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F", fill_color=None)
show(plot)
"""
@marker_method()
def square_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def square_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def star(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@marker_method()
def star_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(glyphs.Text)
def text(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
"""
@marker_method()
def triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@marker_method()
def triangle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", fill_color=None)
show(plot)
"""
@marker_method()
def triangle_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@glyph_method(glyphs.VArea)
def varea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.VBar)
def vbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Wedge)
def wedge(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
"""
@marker_method()
def x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
"""
@marker_method()
def y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
# -------------------------------------------------------------------------
@glyph_method(glyphs.Scatter)
def _scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
def scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
''' Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in |screen units|
marker (str, or list[str]): values or field names of marker types
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: |line properties| and |fill properties|
Examples:
>>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")
>>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)
.. note::
When passing ``marker="circle"`` it is also possible to supply a
``radius`` value in |data units|. When configuring marker type
from a data source column, *all* markers including circles may only
be configured with ``size`` in |screen units|.
.. note::
``Scatter`` markers with multiple marker types may be drawn in a
different order when using the WebGL output backend. This is an explicit
trade-off made in the interests of performance.
'''
marker_type = kwargs.pop("marker", "circle")
if isinstance(marker_type, str) and marker_type in _MARKER_SHORTCUTS:
marker_type = _MARKER_SHORTCUTS[marker_type]
# The original scatter implementation allowed circle scatters to set a
# radius. We will leave this here for compatibility but note that it
# only works when the marker type is "circle" (and not referencing a
# data source column). Consider deprecating in the future.
if marker_type == "circle" and "radius" in kwargs:
return self.circle(*args, **kwargs)
else:
return self._scatter(*args, marker=marker_type, **kwargs)
_MARKER_SHORTCUTS = {
"*" : "asterisk",
"+" : "cross",
"o" : "circle",
"o+" : "circle_cross",
"o." : "circle_dot",
"ox" : "circle_x",
"oy" : "circle_y",
"-" : "dash",
"." : "dot",
"v" : "inverted_triangle",
"^" : "triangle",
"^." : "triangle_dot",
}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bokeh/bokeh
|
bokeh/plotting/glyph_api.py
|
Python
|
bsd-3-clause
| 25,080 | 0.001994 |
# coding=utf8
from __future__ import print_function
import re
import sys
import socket
from untwisted.mode import Mode
from untwisted.network import Work
from untwisted.event import DATA, BUFFER, FOUND, CLOSE, RECV_ERR
from untwisted.utils import std
from untwisted.utils.common import append, shrug
from untwisted.magic import sign
import util
import debug
import runtime
from util import NotInstalled, AlreadyInstalled
SOCKET_ADDRESS = 'state/chess'
RECONNECT_DELAY_SECONDS = 1
ch_work = []
ch_mode = Mode()
ch_mode.domain = 'ch'
ch_link = util.LinkSet()
ch_link.link_module(std)
ch_link.link(DATA, append)
ch_link.link(BUFFER, shrug, '\n')
if '--debug' in sys.argv: ch_link.link_module(debug)
ab_mode = None
ab_link = util.LinkSet()
@ab_link(('HELP', 'chess'))
def h_help(bot, reply, args):
reply('chess start',
'Starts a new game of chess.')
reply('chess rf RF',
'Moves the piece at rank r file f to rank R file F.')
reply('chess M [r|f|rf] RF',
'Moves a piece of type M to rank R file F'
' (moving from rank r and/or file f, if specified).')
reply('chess [r|f] RF',
'Moves a pawn to rank R file F'
' (moving from rank r or file f, if specified).')
reply('chess stop',
'Cancels the current game of chess.')
def init_work(address):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
work = Work(ch_mode, sock)
work.address = address
ch_work.append(work)
work.setblocking(0)
work.connect_ex(address)
def kill_work(work):
work.destroy()
work.shutdown(socket.SHUT_RDWR)
work.close()
ch_work.remove(work)
def install(bot):
global ab_mode
if ab_mode is not None: raise AlreadyInstalled
ab_mode = bot
ab_link.install(ab_mode)
ch_link.install(ch_mode)
init_work(SOCKET_ADDRESS)
def uninstall(bot):
global ab_mode
if ab_mode is None: raise NotInstalled
ch_link.uninstall(ch_mode)
while len(ch_work):
kill_work(ch_work[0])
ab_link.uninstall(ab_mode)
ab_mode = None
@ab_link('!chess')
def h_chess(bot, id, target, args, full_msg):
if not target: return
for work in ch_work:
work.dump('%s <%s> %s\n' % (target, id.nick, args))
@ch_link(FOUND)
def ch_found(work, line):
match = re.match(r'(#\S+) (.*)', line.strip())
if not match: return
ab_mode.send_msg(match.group(1), match.group(2))
@ch_link(CLOSE)
@ch_link(RECV_ERR)
def ch_close_recv_error(work, *args):
kill_work(work)
yield runtime.sleep(RECONNECT_DELAY_SECONDS)
init_work(work.address)
|
joodicator/PageBot
|
page/chess.py
|
Python
|
lgpl-3.0
| 2,574 | 0.007382 |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.7.3"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped):
def wrapper(f):
f = functools.wraps(wrapped)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
SimplyAutomationized/python-snap7
|
snap7/six.py
|
Python
|
mit
| 26,731 | 0.001459 |
#!/usr/bin/env python
from __future__ import print_function
import unittest
from forker import Request
import socket
import os
import sys
import re
_example_request = b"""GET /README.md?xyz HTTP/1.1
Host: localhost:8080
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Accept-Encoding: gzip, deflate, sdch, br
Accept-Language: en-US,en;q=0.8
Cookie:trail=6231214290744395; scent=6457421329820405
"""
HELLO_WORLD = b"Hello world!\n"
def simple_app(environ, start_response):
status = environ and '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [HELLO_WORLD]
class AppClass:
def __init__(self, environ, start_response):
self.environ = environ
self.start = start_response
def __iter__(self):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
self.start(status, response_headers)
yield HELLO_WORLD
class TestRequest(unittest.TestCase):
def test_socket(self):
test_data = b"hello\nworld!"
client, server = socket.socketpair()
client.send(test_data)
buff = server.recv(4096)
self.assertEqual(buff, test_data)
client.close()
server.close()
def test_request(self):
client, server = socket.socketpair()
client.send(_example_request)
request = Request(sock=server)
client.close()
server.close()
self.assertEqual(request.method, "GET")
self.assertEqual(request.requested_path, "/README.md")
self.assertEqual(request.query_string, "xyz")
self.assertEqual(request.headers["host"], "localhost:8080")
self.assertFalse(request.body)
self.assertEqual(request.cookies.get("scent"), "6457421329820405")
self.assertEqual(request.cookies.get("trail"), "6231214290744395")
def test_listing(self):
r = Request(requested_path='/')
out = r.serve()
line = b"<a href='/cgi_example.sh'>cgi_example.sh</a>"
self.assertTrue(line in out)
def test_read(self):
magic = b"Y43j99j8p4Mk8S8B"
r = Request(requested_path='/TestRequest.py')
out = r.serve()
self.assertTrue(magic in out)
def test_cgi(self):
r = Request(requested_path='/cgi_example.sh', query_string="abc")
out = r.serve(allow_cgi=True)
print(out.decode())
self.assertTrue(re.match(b"HTTP/1.. 201", out))
self.assertTrue(b"QUERY_STRING=abc" in out)
def test_wsgi1(self):
client, server = socket.socketpair()
client.send(_example_request)
request = Request(sock=server)
client.close()
server.close()
out = request.wsgi(simple_app)
self.assertTrue(isinstance(out, bytes))
self.assertTrue(b'\r\n\r\n' in out)
self.assertTrue(HELLO_WORLD in out)
self.assertTrue(out.startswith(b'HTTP/1.0 200 OK'))
def test_wsgi2(self):
client, server = socket.socketpair()
client.send(_example_request)
request = Request(sock=server)
client.close()
server.close()
out = request.wsgi(AppClass)
self.assertTrue(isinstance(out, bytes))
self.assertTrue(b'\r\n\r\n' in out)
self.assertTrue(HELLO_WORLD in out)
self.assertTrue(out.startswith(b'HTTP/1.0 200 OK'))
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append("..")
unittest.main()
|
darinmcgill/forker
|
tests/TestRequest.py
|
Python
|
gpl-3.0
| 3,544 | 0.000282 |
from abc import ABCMeta
from copy import deepcopy
from enum import Enum
from itertools import product
from typing import List, Dict, Tuple, Optional
from rxncon.core.reaction import Reaction, OutputReaction
from rxncon.core.rxncon_system import RxnConSystem
from rxncon.core.spec import Spec
from rxncon.core.state import State, InteractionState
from rxncon.venntastic.sets import Set as VennSet, ValueSet, Intersection, Union, Complement, UniversalSet, EmptySet
MAX_STEADY_STATE_ITERS = 20
class BooleanModel:
"""Holds all data describing a Boolean model: a list of targets, a list of update rules and
a list of initial conditions."""
def __init__(self, targets: List['Target'], update_rules: List['UpdateRule'],
initial_conditions: 'BooleanModelState') -> None:
self.update_rules = sorted(update_rules)
self.initial_conditions = initial_conditions
self._state_targets = {str(x): x for x in targets if isinstance(x, StateTarget)}
self._reaction_targets = {str(x): x for x in targets if isinstance(x, ReactionTarget)}
self._knockout_targets = {str(x): x for x in targets if isinstance(x, KnockoutTarget)}
self._overexpression_targets = {str(x): x for x in targets if isinstance(x, OverexpressionTarget)}
self._validate_update_rules()
self._validate_initial_conditions()
self.current_state = None # type: Optional[BooleanModelState]
def set_initial_condition(self, target: 'Target', value: bool) -> None:
self.initial_conditions.set_target(target, value)
def update_rule_by_target(self, target: 'Target') -> 'UpdateRule':
for rule in self.update_rules:
if rule.target == target:
return rule
raise KeyError
def state_target_by_name(self, name: str) -> 'StateTarget':
return self._state_targets[name]
def reaction_target_by_name(self, name: str) -> 'ReactionTarget':
return self._reaction_targets[name]
def knockout_target_by_name(self, name: str) -> 'KnockoutTarget':
return self._knockout_targets[name]
def overexpression_target_by_name(self, name: str) -> 'OverexpressionTarget':
return self._overexpression_targets[name]
def step(self) -> None:
"""Takes one timestep in the Boolean model. This is rather inefficient, but not meant for
actual simulations, this is only used in the unit tests that test all different motifs
and their desired steady states."""
if not self.current_state:
self.current_state = deepcopy(self.initial_conditions)
else:
new_state = dict()
for rule in self.update_rules:
new_state[rule.target] = rule.factor.eval_boolean_func(self.current_state.target_to_value)
self.current_state = BooleanModelState(new_state)
def calc_steady_state(self) -> 'BooleanModelState':
"""Calculates the steady state by taking max MAX_STEADY_STATE_ITERS steps. If no steady state
found, raises."""
iters = 0
while iters < MAX_STEADY_STATE_ITERS:
prev = deepcopy(self.current_state)
self.step()
if prev == self.current_state:
assert isinstance(prev, BooleanModelState)
return prev
iters += 1
raise AssertionError('Could not find steady state.')
def _validate_update_rules(self) -> None:
"""Assert that all targets appearing on the RHS in an update rule have their own LHS."""
all_lhs_targets = [] # type: List[Target]
all_rhs_targets = [] # type: List[Target]
for rule in self.update_rules:
all_lhs_targets.append(rule.target)
all_rhs_targets += rule.factor_targets
assert all(x in all_lhs_targets for x in all_rhs_targets)
def _validate_initial_conditions(self) -> None:
self.initial_conditions.validate_by_model(self)
class BooleanModelState:
def __init__(self, target_to_value: Dict['Target', bool]) -> None:
self.target_to_value = target_to_value
def __eq__(self, other):
if not isinstance(other, BooleanModelState):
return NotImplemented
else:
return self.target_to_value == other.target_to_value
def __getitem__(self, item):
return self.target_to_value[item]
def __str__(self):
return str(self.target_to_value)
def __repr__(self):
return str(self)
def set_target(self, target: 'Target', value: bool) -> None:
self.target_to_value[target] = value
def validate_by_model(self, model: BooleanModel) -> None:
"""Assert that all targets appearing in the model have a Boolean value assigned."""
model_targets = [rule.target for rule in model.update_rules]
config_targets = self.target_to_value.keys()
assert set(model_targets) == set(config_targets) and len(model_targets) == len(config_targets)
class Target(metaclass=ABCMeta):
"""Abstract base class for the different targets."""
def __hash__(self) -> int:
return hash(str(self))
def __repr__(self) -> str:
return str(self)
class ReactionTarget(Target):
"""Reaction target of the boolean model. For all non-degrading reactions the relation between
rxncon reactions and Boolean targets is 1:1. The relation for degrading reactions is more difficult
since (1) the contingencies determine what the reaction degrades (which obviously becomes problematic
in the case of a logical disjunction), and (2) the degradation of bonds should produce empty binding
partners. We refer to our paper."""
def __init__(self, reaction_parent: Reaction, contingency_variant: Optional[int]=None,
interaction_variant: Optional[int] = None, contingency_factor: VennSet['StateTarget']=None) -> None:
self.reaction_parent = reaction_parent # type: Reaction
self.produced_targets = [StateTarget(x) for x in reaction_parent.produced_states] # type: List[StateTarget]
self.consumed_targets = [StateTarget(x) for x in reaction_parent.consumed_states] # type: List[StateTarget]
self.synthesised_targets = [StateTarget(x) for x in
reaction_parent.synthesised_states] # type: List[StateTarget]
self.degraded_targets = [StateTarget(x) for x in reaction_parent.degraded_states] # type: List[StateTarget]
self.contingency_variant_index = contingency_variant
self.interaction_variant_index = interaction_variant
if contingency_factor is None:
self.contingency_factor = UniversalSet() # type: VennSet[StateTarget]
else:
self.contingency_factor = contingency_factor # type: VennSet[StateTarget]
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other: object) -> bool:
if not isinstance(other, Target):
return NotImplemented
return isinstance(other, ReactionTarget) and self.reaction_parent == other.reaction_parent and \
self.contingency_variant_index == other.contingency_variant_index and \
self.interaction_variant_index == other.interaction_variant_index
def __str__(self) -> str:
suffix = ''
if self.interaction_variant_index is not None and self.contingency_variant_index is not None:
suffix = '#c{}/i{}'.format(self.contingency_variant_index, self.interaction_variant_index)
elif self.contingency_variant_index is not None and self.interaction_variant_index is None:
suffix = '#c{}'.format(self.contingency_variant_index)
elif self.interaction_variant_index is not None and self.contingency_variant_index is None:
suffix = '#i{}'.format(self.interaction_variant_index)
return str(self.reaction_parent) + suffix
def __repr__(self) -> str:
return str(self)
def produces(self, state_target: 'StateTarget') -> bool:
return state_target in self.produced_targets
def consumes(self, state_target: 'StateTarget') -> bool:
return state_target in self.consumed_targets
def synthesises(self, state_target: 'StateTarget') -> bool:
return state_target in self.synthesised_targets
def degrades(self, state_target: 'StateTarget') -> bool:
return state_target in self.degraded_targets
@property
def components_lhs(self) -> List[Spec]:
return self.reaction_parent.components_lhs
@property
def components_rhs(self) -> List[Spec]:
return self.reaction_parent.components_rhs
@property
def degraded_components(self) -> List[Spec]:
return [component for component in self.components_lhs if component not in self.components_rhs]
@property
def synthesised_components(self) -> List[Spec]:
return [component for component in self.components_rhs if component not in self.components_lhs]
def degrades_component(self, spec: Spec) -> bool:
assert spec.is_component_spec
return spec in self.degraded_components
def synthesises_component(self, spec: Spec) -> bool:
assert spec.is_component_spec
return spec in self.synthesised_components
def is_output(self) -> bool:
return isinstance(self.reaction_parent, OutputReaction)
class StateTarget(Target):
"""State target of the Boolean model. The relation between rxncon states and Boolean state targets
is generally 1:1, but not quite: the components that carry no internal state are assigned so-called
ComponentStateTargets (see next class.)"""
def __init__(self, state_parent: State) -> None:
self.state_parent = state_parent
def __hash__(self) -> int:
return hash(str(self))
def __str__(self) -> str:
return str(self.state_parent)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Target):
return NotImplemented
return isinstance(other, StateTarget) and self.state_parent == other.state_parent
def is_produced_by(self, reaction_target: ReactionTarget) -> bool:
return reaction_target.produces(self)
def is_consumed_by(self, reaction_target: ReactionTarget) -> bool:
return reaction_target.consumes(self)
def is_synthesised_by(self, reaction_target: ReactionTarget) -> bool:
return reaction_target.synthesises(self)
def is_degraded_by(self, reaction_target: ReactionTarget) -> bool:
return reaction_target.degrades(self)
def is_input(self) -> bool:
return self.state_parent.is_global
@property
def components(self) -> List[Spec]:
return self.state_parent.components
def shares_component_with(self, other_target: 'StateTarget') -> bool:
return any(x in other_target.components for x in self.components)
@property
def is_neutral(self) -> bool:
return self.state_parent.is_neutral
@property
def is_homodimer(self) -> bool:
return self.state_parent.is_homodimer
@property
def neutral_targets(self) -> List['StateTarget']:
return [StateTarget(x) for x in self.state_parent.neutral_states]
@property
def is_interaction(self) -> bool:
return isinstance(self.state_parent, InteractionState)
def is_mutually_exclusive_with(self, other: 'StateTarget') -> bool:
return self.state_parent.is_mutually_exclusive_with(other.state_parent)
def complementary_state_targets(self, rxnconsys: RxnConSystem, component: Spec) -> List['StateTarget']:
others = rxnconsys.complement_states_for_component(component, self.state_parent)
return [StateTarget(x) for x in others]
class ComponentStateTarget(StateTarget):
"""ComponentStateTarget describes a rxncon component that carries no states."""
def __init__(self, component: Spec) -> None:
self.component = component
def __eq__(self, other: object) -> bool:
if not isinstance(other, Target):
return NotImplemented
return isinstance(other, ComponentStateTarget) and self.component == other.component
def __str__(self) -> str:
return str(self.component)
def __repr__(self) -> str:
return str(self)
def __hash__(self) -> int:
return hash(str(self))
@property
def components(self) -> List[Spec]:
return [self.component]
@property
def is_neutral(self) -> bool:
return True
@property
def is_interaction(self) -> bool:
return False
class KnockoutTarget(ComponentStateTarget):
"""When enabled, KnockoutTargets are ANDed into the update rule for each target, one KnockoutTarget
per component. The KnockoutTarget itself has a trivial update rule. By changing the initial conditions
for the KnockoutTarget we can knock out all states for a particular component, making them always FALSE."""
def __eq__(self, other: object) -> bool:
if not isinstance(other, Target):
return NotImplemented
return isinstance(other, KnockoutTarget) and self.component == other.component
def __str__(self) -> str:
return 'Knockout<{}>'.format(str(self.component))
def __repr__(self) -> str:
return str(self)
def __hash__(self) -> int:
return hash(str(self))
class OverexpressionTarget(ComponentStateTarget):
"""Similar to KnockoutTarget, but now (a set of) states per component can be made always TRUE."""
def __eq__(self, other: object) -> bool:
if not isinstance(other, Target):
return NotImplemented
return isinstance(other, OverexpressionTarget) and self.component == other.component
def __str__(self) -> str:
return 'Overexpression<{}>'.format(str(self.component))
def __repr__(self) -> str:
return str(self)
def __hash__(self) -> int:
return hash(str(self))
class UpdateRule:
"""The UpdateRule for a Target gives the Boolean value of the target at time t+1 when we
evaluate the factor at time t."""
def __init__(self, target: Target, factor: VennSet[Target]) -> None:
self.target = target
self.factor = factor
def __eq__(self, other: object) -> bool:
if not isinstance(other, UpdateRule):
return NotImplemented
else:
return self.target == other.target and self.factor.is_equivalent_to(other.factor)
def __lt__(self, other: object) -> bool:
if not isinstance(other, UpdateRule):
return NotImplemented
else:
return str(self.target) < str(other.target)
def __str__(self) -> str:
return "target: {0}, factors: {1}".format(self.target, self.factor)
@property
def factor_targets(self) -> List[Target]:
return self.factor.values
class SmoothingStrategy(Enum):
"""To overcome non biological oscillatory behaviour during the simulation we introduced
smoothings."""
no_smoothing = 'no_smoothing'
smooth_production_sources = 'smooth_production_sources'
class KnockoutStrategy(Enum):
"""For which states should knockout rules be generated: none, the neutral ones, or all."""
no_knockout = 'no_knockout'
knockout_neutral_states = 'knockout_neutral_states'
knockout_all_states = 'knockout_all_states'
class OverexpressionStrategy(Enum):
"""For which states should overexpression rules be generated: none, the neutral ones, or all."""
no_overexpression = 'no_overexpression'
overexpress_neutral_states = 'overexpress_neutral_states'
overexpress_all_states = 'overexpress_all_states'
def boolean_model_from_rxncon(rxncon_sys: RxnConSystem,
smoothing_strategy: SmoothingStrategy=SmoothingStrategy.no_smoothing,
knockout_strategy: KnockoutStrategy=KnockoutStrategy.no_knockout,
overexpression_strategy: OverexpressionStrategy=OverexpressionStrategy.no_overexpression,
k_plus_strict: bool=True, k_minus_strict: bool=True) -> BooleanModel:
def initial_conditions(reaction_targets: List[ReactionTarget], state_targets: List[StateTarget],
knockout_targets: List[KnockoutTarget], overexpression_targets: List[OverexpressionTarget]) \
-> BooleanModelState:
"""As default all the neutral state targets are set to True. All other state targets as well
as all reaction targets are set to False."""
conds = {} # type: Dict[Target, bool]
for reaction_target in reaction_targets:
conds[reaction_target] = False
for state_target in state_targets:
# Neutral state targets are True.
if state_target.is_neutral:
conds[state_target] = True
# All reaction targets and non-neutral state targets are False.
else:
conds[state_target] = False
for knockout_target in knockout_targets:
conds[knockout_target] = False
for overexpression_target in overexpression_targets:
conds[overexpression_target] = False
return BooleanModelState(conds)
def calc_component_presence_factors() -> Tuple[Dict[Spec, VennSet[StateTarget]], List[ComponentStateTarget]]:
"""The form of the component presence factor is:
(state_a1 | ... | state_an) & (state_b1 | ... | state_bm) & ...
Mutually exclusive states are combined by boolean OR (state_a1 ... state_an , state_b1 ... state_bm).
These ORs are then combines with ANDs.
If a component does not carry states, this will be a ComponentStateTarget.
"""
component_state_targets = [] # type: List[ComponentStateTarget]
component_to_factor = {} # type: Dict[Spec, VennSet[StateTarget]]
for component in rxncon_sys.components():
grouped_states = rxncon_sys.states_for_component_grouped(component)
# component is not part of any state
if not grouped_states.values():
component_state_targets.append(ComponentStateTarget(component))
component_to_factor[component] = ValueSet(ComponentStateTarget(component))
# component is part of at least one state
else:
# mutually exclusive states are combined by OR
component_to_factor[component] = \
Intersection(
*(Union(*(ValueSet(StateTarget(x)) for x in group)) for group in grouped_states.values()))
return component_to_factor, component_state_targets
def calc_reaction_targets_with_dnf_contingencies(k_plus_strict: bool, k_minus_strict: bool) -> List[ReactionTarget]:
"""Calculates contingency factors for reaction targets.
Degradation reactions are handled differently then other reactions. An OR contingency will lead to a
split of the degradation reaction in as many reactions as OR statements. Each OR will be assigned to one
instance of the reaction."""
reaction_targets = set()
for reaction in rxncon_sys.reactions:
factors = (x.to_venn_set(k_plus_strict=k_plus_strict, k_minus_strict=k_minus_strict, structured=False,
state_wrapper=StateTarget)
for x in rxncon_sys.contingencies_for_reaction(reaction))
cont = Intersection(*factors).to_simplified_set() # type: VennSet[StateTarget]
# The reaction is not a degradation reaction or the DNF has just one term.
if not reaction.degraded_components or len(cont.to_dnf_list()) == 1:
reaction_targets.add(ReactionTarget(reaction, contingency_factor=cont))
# The reaction is a degradation reaction
else:
# The reaction is split into separated entities according to the number of minterms of the
# disjunctive normal form (dnf). Each minterm will be assigned to a entity of the degradation reaction.
for index, factor in enumerate(cont.to_dnf_list()):
reaction_targets.add(
ReactionTarget(reaction, contingency_variant=index, contingency_factor=factor))
return list(reaction_targets)
def update_degs_add_component_states(reaction_targets: List[ReactionTarget],
component_state_targets: List[ComponentStateTarget]) -> List[ReactionTarget]:
"""For degradation reactions, add the stateless components they degrade to the list of targets they degrade."""
result = deepcopy(reaction_targets)
for reaction_target in result:
for degraded_component in reaction_target.degraded_components:
if ComponentStateTarget(degraded_component) in component_state_targets:
reaction_target.degraded_targets.append(ComponentStateTarget(degraded_component))
return result
def update_degs_add_contingent_states(reaction_targets: List[ReactionTarget]) -> List[ReactionTarget]:
"""For degradation reactions, add their contingent states to the list of targets they degrade."""
def degraded_state_targets(component: Spec, soln: Dict[StateTarget, bool]) -> List[StateTarget]:
# Disregard input states, since they do not influence which states are degraded.
soln = {k: v for k, v in soln.items() if not k.is_input() and component in k.components}
# soln evaluates to False if solution is tautology, since when there are no constraints on which
# states are required to be true/false, soln is an empty dict. Nicely counterintuitive.
if not soln and ComponentStateTarget(component) in component_state_targets:
return [ComponentStateTarget(component)]
elif not soln:
return [StateTarget(x) for x in rxncon_sys.states_for_component(component)]
else:
trues = [target for target, val in soln.items() if val]
falses = [target for target, val in soln.items() if not val]
for target in falses:
trues += target.complementary_state_targets(rxncon_sys, component)
return trues
result = deepcopy(reaction_targets)
for reaction_target in result:
solutions = reaction_target.contingency_factor.calc_solutions()
if reaction_target.degraded_targets and len(solutions) == 1 and not solutions[0]:
# No contingencies, but targeted degradation. Do not mess with the list of degraded targets.
continue
for degraded_component, solution in product(reaction_target.degraded_components, solutions): # type: ignore
reaction_target.degraded_targets.extend(degraded_state_targets(degraded_component, solution))
return result
def update_degs_add_interaction_state_partner(reaction_targets: List[ReactionTarget]) -> List[ReactionTarget]:
"""Update degradation reactions for interaction states.
Interaction states are composed out of two components. A degradation reaction degrading an interaction
state will degrade one of these components. The other component is then released, and thus produced
by the degrading reaction."""
result = []
for reaction_target in reaction_targets:
appended = False
degraded_interaction_targets = [x for x in reaction_target.degraded_targets if x.is_interaction]
for index, interaction_target in enumerate(degraded_interaction_targets):
empty_partners = [neutral_target for neutral_target in interaction_target.neutral_targets
if not any(component in reaction_target.degraded_components
for component in neutral_target.components)]
if interaction_target.is_homodimer:
assert len(empty_partners) == 0, 'update_degs_add_interaction_state_partner::homodimer error, ' \
'reaction_target: {}, interaction_target: {}, empty_partners: {}' \
''.format(reaction_target, interaction_target, empty_partners)
continue
if len(empty_partners) != 1:
raise AssertionError('update_degs_add_interaction_state_partner::empty partners != 1 error\n'
'The full list of degraded targets is {}\n'
'The current reaction target is {}\n'
'The current interaction target is {}\n'
'The current empty partners that have been deduced are: {}\n'
.format(', '.join(str(x) for x in degraded_interaction_targets),
str(reaction_target), str(interaction_target),
', '.join(str(x) for x in empty_partners)))
new_reaction = deepcopy(reaction_target)
new_reaction.interaction_variant_index = index if len(degraded_interaction_targets) > 1 else None
new_reaction.consumed_targets.append(interaction_target)
new_reaction.produced_targets.append(empty_partners[0])
result.append(new_reaction)
appended = True
if not appended:
result.append(deepcopy(reaction_target))
return result
def update_syns_with_component_states(reaction_targets: List[ReactionTarget],
component_state_targets: List[ComponentStateTarget]) -> List[ReactionTarget]:
"""Update synthesis reaction with component states: stateless components that are synthesised have
rights too."""
result = deepcopy(reaction_targets)
for reaction_target in result:
for component in reaction_target.synthesised_components:
if ComponentStateTarget(component) in component_state_targets:
reaction_target.synthesised_targets.append(ComponentStateTarget(component))
return result
def calc_knockout_targets(knockout_strategy: KnockoutStrategy) -> List[KnockoutTarget]:
if knockout_strategy == KnockoutStrategy.no_knockout:
return []
else:
return [KnockoutTarget(component) for component in rxncon_sys.components()]
def calc_overexpression_targets(overexpression_strategy: OverexpressionStrategy) -> List[OverexpressionTarget]:
if overexpression_strategy == OverexpressionStrategy.no_overexpression:
return []
else:
return [OverexpressionTarget(component) for component in rxncon_sys.components()]
def calc_reaction_rules() -> None:
"""Calculate the rules of reaction targets: the component factor AND the contingencies."""
for reaction_target in reaction_targets:
components = (component_presence_factor[x] for x in reaction_target.components_lhs)
component_factor = Intersection(*components) # type: VennSet[StateTarget]
reaction_rules.append(UpdateRule(reaction_target, Intersection(
component_factor, reaction_target.contingency_factor).to_simplified_set()))
def calc_state_rules() -> None:
"""Calculate the rules of the state targets, includes smoothing. For details see our paper."""
def synthesis_factor(state_target: StateTarget) -> VennSet[Target]:
fac = EmptySet() # type: VennSet[Target]
for rxn in (x for x in reaction_targets if x.synthesises(state_target)):
fac = Union(fac, ValueSet(rxn))
for prod_rxn in (x for x in reaction_targets if x.produces(state_target)):
sources = []
for source in prod_rxn.consumed_targets:
sources.append([source] + [x for x in reaction_targets if x.synthesises(source)])
for source_combi in product(*sources):
# At least one source should be synthesised.
if all(isinstance(x, StateTarget) for x in source_combi):
continue
assert any(isinstance(x, ReactionTarget) and x.synthesised_targets for x in source_combi)
fac = Union(fac, Intersection(ValueSet(prod_rxn), *(ValueSet(x) for x in source_combi)))
return fac
def component_factor(state_target: StateTarget) -> VennSet[StateTarget]:
return Intersection(*(component_presence_factor[x] for x in state_target.components))
def degradation_factor(state_target: StateTarget) -> VennSet[ReactionTarget]:
return Complement(Union(*(ValueSet(x) for x in reaction_targets if x.degrades(state_target))))
def pi(state_target: StateTarget, level: int) -> VennSet[Target]:
res = EmptySet() # type: VennSet[Target]
for r in (x for x in reaction_targets if x.produces(state_target)):
rxn_term = ValueSet(r) # type: VennSet[Target]
for s in (x for x in state_targets if r.consumes(x)):
if r.degraded_targets:
state_term = ValueSet(s) # type: VennSet[Target]
else:
state_term = Intersection(ValueSet(s), degradation_factor(s))
for l in range(level):
state_term = Union(state_term, sigma(s, level - 1))
rxn_term = Intersection(rxn_term, state_term)
res = Union(res, rxn_term)
return res
def kappa(state_target: StateTarget, level: int) -> VennSet[Target]:
res = EmptySet() # type: VennSet[Target]
for r in (x for x in reaction_targets if x.consumes(state_target)):
rxn_term = ValueSet(r) # type: VennSet[Target]
for s in (x for x in state_targets if r.consumes(x)):
rxn_term = Intersection(rxn_term, ValueSet(s), degradation_factor(s))
res = Union(res, rxn_term)
return res
def sigma(state_target: StateTarget, level: int) -> VennSet[Target]:
prod_cons_factor = Union(pi(state_target, level),
Intersection(ValueSet(state_target), Complement(kappa(state_target, level))))
return Union(synthesis_factor(state_target),
Intersection(degradation_factor(state_target),
component_factor(state_target),
prod_cons_factor))
if smoothing_strategy == SmoothingStrategy.no_smoothing:
level = 0
elif smoothing_strategy == SmoothingStrategy.smooth_production_sources:
level = 1
else:
raise AssertionError
for state_target in state_targets:
state_rules.append(UpdateRule(state_target, sigma(state_target, level).to_simplified_set()))
def update_state_rules_with_knockouts(knockout_strategy: KnockoutStrategy) -> None:
if knockout_strategy == KnockoutStrategy.no_knockout:
return
elif knockout_strategy in (KnockoutStrategy.knockout_all_states, KnockoutStrategy.knockout_neutral_states):
for state_rule in state_rules:
assert isinstance(state_rule.target, StateTarget)
if knockout_strategy == KnockoutStrategy.knockout_neutral_states and not state_rule.target.is_neutral:
continue
knockout_factor = Complement(
Union(*(ValueSet(KnockoutTarget(component)) for component in state_rule.target.components)))
state_rule.factor = Intersection(knockout_factor, state_rule.factor)
def update_state_rules_with_overexpressions(overexpression_strategy: OverexpressionStrategy) -> None:
if overexpression_strategy == OverexpressionStrategy.no_overexpression:
return
elif overexpression_strategy in (OverexpressionStrategy.overexpress_all_states,
OverexpressionStrategy.overexpress_neutral_states):
for state_rule in state_rules:
assert isinstance(state_rule.target, StateTarget)
if overexpression_strategy == OverexpressionStrategy.overexpress_neutral_states and not state_rule.target.is_neutral:
continue
overexpression_factor = Intersection(
*(ValueSet(OverexpressionTarget(component)) for component in state_rule.target.components))
state_rule.factor = Union(overexpression_factor, state_rule.factor)
def calc_knockout_rules() -> None:
for knockout_target in knockout_targets:
knockout_rules.append(UpdateRule(knockout_target, ValueSet(knockout_target)))
def calc_overexpression_rules() -> None:
for overexpression_target in overexpression_targets:
overexpression_rules.append(UpdateRule(overexpression_target, ValueSet(overexpression_target)))
def update_input_output_rules() -> None:
"""If an Input state and an Output reaction share the same name [BLA], they are assumed
to refer to the same global quantity. Therefore the update rule for the state (which was trivial),
becomes the update rule for the reaction."""
to_delete = [] # type: List[UpdateRule]
for reaction_rule in reaction_rules:
for state_rule in state_rules:
if (reaction_rule.target.is_output and state_rule.target.is_input and # type: ignore
str(reaction_rule.target) == str(state_rule.target)):
state_rule.factor = reaction_rule.factor
to_delete.append(reaction_rule)
for rule_to_delete in to_delete:
reaction_targets.remove(rule_to_delete.target)
reaction_rules.remove(rule_to_delete)
component_presence_factor, component_state_targets = calc_component_presence_factors()
state_targets = [StateTarget(x) for x in rxncon_sys.states] # type: List[StateTarget]
state_targets += component_state_targets
reaction_targets = calc_reaction_targets_with_dnf_contingencies(k_plus_strict, k_minus_strict)
reaction_targets = update_degs_add_component_states(reaction_targets, component_state_targets)
reaction_targets = update_degs_add_contingent_states(reaction_targets)
reaction_targets = update_degs_add_interaction_state_partner(reaction_targets)
reaction_targets = update_syns_with_component_states(reaction_targets, component_state_targets)
knockout_targets = calc_knockout_targets(knockout_strategy)
overexpression_targets = calc_overexpression_targets(overexpression_strategy)
reaction_rules = [] # type: List[UpdateRule]
state_rules = [] # type: List[UpdateRule]
knockout_rules = [] # type: List[UpdateRule]
overexpression_rules = [] # type: List[UpdateRule]
calc_reaction_rules()
calc_state_rules()
update_state_rules_with_knockouts(knockout_strategy)
update_state_rules_with_overexpressions(overexpression_strategy)
calc_knockout_rules()
calc_overexpression_rules()
update_input_output_rules()
return BooleanModel(state_targets + reaction_targets + knockout_targets + overexpression_targets, # type: ignore
reaction_rules + state_rules + knockout_rules + overexpression_rules,
initial_conditions(reaction_targets, state_targets, knockout_targets, overexpression_targets))
|
rxncon/rxncon
|
rxncon/simulation/boolean/boolean_model.py
|
Python
|
lgpl-3.0
| 35,991 | 0.005057 |
"""PLoS-API-harvester
=================
<p>To run "harvester.py" please follow the instructions:</p>
<ol>
<li>Create an account on <a href="http://register.plos.org/ambra-registration/register.action">PLOS API</a></li>
<li>Sign in <a href="http://alm.plos.org/">here</a> and click on your account name. Retrieve your API key.</li>
<li>Create a new file in the folder named "settings.py". In the file, put<br>
<code>API_KEY = (your API key)</code></li>
</ol>
Sample API query: http://api.plos.org/search?q=publication_date:[2015-01-30T00:00:00Z%20TO%202015-02-02T00:00:00Z]&api_key=ayourapikeyhere&rows=999&start=0
"""
from __future__ import unicode_literals
import logging
from datetime import date, timedelta
from lxml import etree
from scrapi import requests
from scrapi import settings
from scrapi.base import XMLHarvester
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import default_name_parser, build_properties, compose, single_result, date_formatter
logger = logging.getLogger(__name__)
try:
from scrapi.settings import PLOS_API_KEY
except ImportError:
PLOS_API_KEY = None
logger.error('No PLOS_API_KEY found, PLoS will always return []')
class PlosHarvester(XMLHarvester):
short_name = 'plos'
long_name = 'Public Library of Science'
url = 'http://www.plos.org/'
namespaces = {}
MAX_ROWS_PER_REQUEST = 999
BASE_URL = 'http://api.plos.org/search'
def fetch_rows(self, start_date, end_date):
query = 'publication_date:[{}T00:00:00Z TO {}T00:00:00Z]'.format(start_date, end_date)
resp = requests.get(self.BASE_URL, params={
'q': query,
'rows': '0',
'api_key': PLOS_API_KEY,
})
total_rows = etree.XML(resp.content).xpath('//result/@numFound')
total_rows = int(total_rows[0]) if total_rows else 0
current_row = 0
while current_row < total_rows:
response = requests.get(self.BASE_URL, throttle=5, params={
'q': query,
'start': current_row,
'api_key': PLOS_API_KEY,
'rows': self.MAX_ROWS_PER_REQUEST,
})
for doc in etree.XML(response.content).xpath('//doc'):
yield doc
current_row += self.MAX_ROWS_PER_REQUEST
def harvest(self, start_date=None, end_date=None):
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
if not PLOS_API_KEY:
return []
return [
RawDocument({
'filetype': 'xml',
'source': self.short_name,
'doc': etree.tostring(row),
'docID': row.xpath("str[@name='id']")[0].text,
})
for row in
self.fetch_rows(start_date.isoformat(), end_date.isoformat())
if row.xpath("arr[@name='abstract']")
or row.xpath("str[@name='author_display']")
]
schema = {
'uris': {
'canonicalUri': ('//str[@name="id"]/node()', compose('http://dx.doi.org/{}'.format, single_result)),
},
'contributors': ('//arr[@name="author_display"]/str/node()', default_name_parser),
'providerUpdatedDateTime': ('//date[@name="publication_data"]/node()', compose(date_formatter, single_result)),
'title': ('//str[@name="title_display"]/node()', single_result),
'description': ('//arr[@name="abstract"]/str/node()', single_result),
'publisher': {
'name': ('//str[@name="journal"]/node()', single_result)
},
'otherProperties': build_properties(
('eissn', '//str[@name="eissn"]/node()'),
('articleType', '//str[@name="article_type"]/node()'),
('score', '//float[@name="score"]/node()')
)
}
|
jeffreyliu3230/scrapi
|
scrapi/harvesters/plos.py
|
Python
|
apache-2.0
| 3,868 | 0.002068 |
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from oauth_access.access import OAuthAccess
from oauth_access.exceptions import MissingToken
def oauth_login(request, service,
redirect_field_name="next", redirect_to_session_key="redirect_to"):
access = OAuthAccess(service)
if not service == "facebook":
token = access.unauthorized_token()
request.session["%s_unauth_token" % service] = token.to_string()
else:
token = None
if hasattr(request, "session"):
request.session[redirect_to_session_key] = request.GET.get(redirect_field_name)
return HttpResponseRedirect(access.authorization_url(token))
def oauth_callback(request, service):
ctx = RequestContext(request)
access = OAuthAccess(service)
unauth_token = request.session.get("%s_unauth_token" % service, None)
try:
#print "oauth_callback unauth_token = %s" % unauth_token
#print "oauth_callback request.GET = %s" % request.GET
auth_token = access.check_token(unauth_token, request.GET)
#print "oauth_login auth_token = %s" % auth_token
except MissingToken:
ctx.update({"error": "token_missing"})
else:
if auth_token:
cback = access.callback(request, access, auth_token)
return cback.redirect()
else:
# @@@ not nice for OAuth 2
ctx.update({"error": "token_mismatch"})
return render_to_response("oauth_access/oauth_error.html", ctx)
def finish_signup(request, service):
access = OAuthAccess(service)
return access.callback.finish_signup(request, service)
|
DraXus/andaluciapeople
|
oauth_access/views.py
|
Python
|
agpl-3.0
| 1,715 | 0.004665 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import socket
import subprocess
import sys
import urlparse
from telemetry import util
class TemporaryHTTPServer(object):
def __init__(self, browser_backend, path):
self._server = None
self._devnull = None
self._path = path
self._forwarder = None
self._host_port = util.GetAvailableLocalPort()
assert os.path.exists(path), path
assert os.path.isdir(path), path
self._devnull = open(os.devnull, 'w')
self._server = subprocess.Popen(
[sys.executable, '-m', 'SimpleHTTPServer', str(self._host_port)],
cwd=self._path,
stdout=self._devnull, stderr=self._devnull)
self._forwarder = browser_backend.CreateForwarder(
util.PortPair(self._host_port,
browser_backend.GetRemotePort(self._host_port)))
def IsServerUp():
return not socket.socket().connect_ex(('localhost', self._host_port))
util.WaitFor(IsServerUp, 5)
@property
def path(self):
return self._path
def __enter__(self):
return self
def __exit__(self, *args):
self.Close()
def __del__(self):
self.Close()
def Close(self):
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
if self._server:
self._server.kill()
self._server = None
if self._devnull:
self._devnull.close()
self._devnull = None
@property
def url(self):
return self._forwarder.url
def UrlOf(self, path):
return urlparse.urljoin(self.url, path)
|
nacl-webkit/chrome_deps
|
tools/telemetry/telemetry/temporary_http_server.py
|
Python
|
bsd-3-clause
| 1,660 | 0.010843 |
# coding: utf-8
from __future__ import unicode_literals
"""
This module provides utility classes for io operations.
"""
__author__ = "Shyue Ping Ong, Rickard Armiento, Anubhav Jain, G Matteo, Ioannis Petousis"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import re
from monty.io import zopen
def clean_lines(string_list, remove_empty_lines=True):
"""
Strips whitespace, carriage returns and empty lines from a list of strings.
Args:
string_list: List of strings
remove_empty_lines: Set to True to skip lines which are empty after
stripping.
Returns:
List of clean strings with no whitespaces.
"""
for s in string_list:
clean_s = s
if '#' in s:
ind = s.index('#')
clean_s = s[:ind]
clean_s = clean_s.strip()
if (not remove_empty_lines) or clean_s != '':
yield clean_s
def micro_pyawk(filename, search, results=None, debug=None, postdebug=None):
"""
Small awk-mimicking search routine.
'file' is file to search through.
'search' is the "search program", a list of lists/tuples with 3 elements;
i.e. [[regex,test,run],[regex,test,run],...]
'results' is a an object that your search program will have access to for
storing results.
Here regex is either as a Regex object, or a string that we compile into a
Regex. test and run are callable objects.
This function goes through each line in filename, and if regex matches that
line *and* test(results,line)==True (or test == None) we execute
run(results,match),where match is the match object from running
Regex.match.
The default results is an empty dictionary. Passing a results object let
you interact with it in run() and test(). Hence, in many occasions it is
thus clever to use results=self.
Author: Rickard Armiento, Ioannis Petousis
Returns:
results
"""
if results is None:
results = {}
# Compile strings into regexs
for entry in search:
entry[0] = re.compile(entry[0])
with zopen(filename, "rt") as f:
for line in f:
for entry in search:
match = re.search(entry[0], line)
if match and (entry[1] is None
or entry[1](results, line)):
if debug is not None:
debug(results, match)
entry[2](results, match)
if postdebug is not None:
postdebug(results, match)
return results
|
ctoher/pymatgen
|
pymatgen/util/io_utils.py
|
Python
|
mit
| 2,727 | 0.000367 |
import pygame
from pygame.locals import *
import constants as c
class Enemy:
def __init__(self, x, y, health, movement_pattern, direction, img):
self.x = x
self.y = y
self.health = health
self.movement_pattern = movement_pattern
self.direction = direction
self.img = img
def update(self, platforms_list, WORLD, avatar):
# do updates based on movement_pattern
if self.movement_pattern == "vertical":
if self.direction == "up":
self.y -= 2
elif self.direction == "down":
self.y += 2
else:
self.y = self.y
if self.y > avatar.y + 30:
self.direction = "up"
elif self.y < avatar.y - 30:
self.direction = "down"
else:
self.direction = "stay"
self.display(WORLD)
def display(self, WORLD):
WORLD.blit(self.img, (self.x, self.y))
|
naomi-/exploration
|
Enemy.py
|
Python
|
mit
| 1,023 | 0 |
# -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ CZ Form Fields
tests = r"""
# CZPostalCodeField #########################################################
>>> from django.contrib.localflavor.cz.forms import CZPostalCodeField
>>> f = CZPostalCodeField()
>>> f.clean('84545x')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
>>> f.clean('91909')
u'91909'
>>> f.clean('917 01')
u'91701'
>>> f.clean('12345')
u'12345'
>>> f.clean('123456')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
>>> f.clean('1234')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
>>> f.clean('123 4')
Traceback (most recent call last):
...
ValidationError: [u'Enter a postal code in the format XXXXX or XXX XX.']
# CZRegionSelect ############################################################
>>> from django.contrib.localflavor.cz.forms import CZRegionSelect
>>> w = CZRegionSelect()
>>> w.render('regions', 'TT')
u'<select name="regions">\n<option value="PR">Prague</option>\n<option value="CE">Central Bohemian Region</option>\n<option value="SO">South Bohemian Region</option>\n<option value="PI">Pilsen Region</option>\n<option value="CA">Carlsbad Region</option>\n<option value="US">Usti Region</option>\n<option value="LB">Liberec Region</option>\n<option value="HK">Hradec Region</option>\n<option value="PA">Pardubice Region</option>\n<option value="VY">Vysocina Region</option>\n<option value="SM">South Moravian Region</option>\n<option value="OL">Olomouc Region</option>\n<option value="ZL">Zlin Region</option>\n<option value="MS">Moravian-Silesian Region</option>\n</select>'
# CZBirthNumberField ########################################################
>>> from django.contrib.localflavor.cz.forms import CZBirthNumberField
>>> f = CZBirthNumberField()
>>> f.clean('880523/1237')
u'880523/1237'
>>> f.clean('8805231237')
u'8805231237'
>>> f.clean('880523/000')
u'880523/000'
>>> f.clean('880523000')
u'880523000'
>>> f.clean('882101/0011')
u'882101/0011'
>>> f.clean('880523/1237', 'm')
u'880523/1237'
>>> f.clean('885523/1231', 'f')
u'885523/1231'
>>> f.clean('123456/12')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('123456/12345')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('12345612')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('12345612345')
Traceback (most recent call last):
...
ValidationError: [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
>>> f.clean('881523/0000', 'm')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('885223/0000', 'm')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('881223/0000', 'f')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('886523/0000', 'f')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('880523/1239')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('8805231239')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
>>> f.clean('990101/0011')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid birth number.']
# CZICNumberField ########################################################
>>> from django.contrib.localflavor.cz.forms import CZICNumberField
>>> f = CZICNumberField()
>>> f.clean('12345679')
u'12345679'
>>> f.clean('12345601')
u'12345601'
>>> f.clean('12345661')
u'12345661'
>>> f.clean('12345610')
u'12345610'
>>> f.clean('1234567')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IC number.']
>>> f.clean('12345660')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IC number.']
>>> f.clean('12345600')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IC number.']
"""
|
Smarsh/django
|
tests/regressiontests/forms/localflavor/cz.py
|
Python
|
bsd-3-clause
| 4,319 | 0.001158 |
'''
Often used utility functions
Copyright 2020 by Massimo Del Fedele
'''
import sys
import uno
from com.sun.star.beans import PropertyValue
from datetime import date
import calendar
import PyPDF2
'''
ALCUNE COSE UTILI
La finestra che contiene il documento (o componente) corrente:
desktop.CurrentFrame.ContainerWindow
Non cambia nulla se è aperto un dialogo non modale,
ritorna SEMPRE il frame del documento.
desktop.ContainerWindow ritorna un None -- non so a che serva
Per ottenere le top windows, c'è il toolkit...
tk = ctx.ServiceManager.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx)
tk.getTopWindowCount() ritorna il numero delle topwindow
tk.getTopWIndow(i) ritorna una topwindow dell'elenco
tk.getActiveTopWindow () ritorna la topwindow attiva
La topwindow attiva, per essere attiva deve, appunto, essere attiva, indi avere il focus
Se si fa il debug, ad esempio, è probabile che la finestra attiva sia None
Resta quindi SEMPRE il problema di capire come fare a centrare un dialogo sul componente corrente.
Se non ci sono dialoghi in esecuzione, il dialogo creato prende come parent la ContainerWindow(si suppone...)
e quindi viene posizionato in base a quella
Se c'è un dialogo aperto e nell'event handler se ne apre un altro, l'ultimo prende come parent il precedente,
e viene quindi posizionato in base a quello e non alla schermata principale.
Serve quindi un metodo per trovare le dimensioni DELLA FINESTRA PARENT di un dialogo, per posizionarlo.
L'oggetto UnoControlDialog permette di risalire al XWindowPeer (che non serve ad una cippa), alla XView
(che mi fornisce la dimensione del dialogo ma NON la parent...), al UnoControlDialogModel, che fornisce
la proprietà 'DesktopAsParent' che mi dice SOLO se il dialogo è modale (False) o non modale (True)
L'unica soluzione che mi viene in mente è tentare con tk.ActiveTopWindow e, se None, prendere quella del desktop
'''
def getComponentContext():
'''
Get current application's component context
'''
try:
if __global_context__ is not None:
return __global_context__
return uno.getComponentContext()
except Exception:
return uno.getComponentContext()
def getDesktop():
'''
Get current application's LibreOffice desktop
'''
ctx = getComponentContext()
return ctx.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", ctx)
def getDocument():
'''
Get active document
'''
desktop = getDesktop()
# try to activate current frame
# needed sometimes because UNO doesnt' find the correct window
# when debugging.
try:
desktop.getCurrentFrame().activate()
except Exception:
pass
return desktop.getCurrentComponent()
def getServiceManager():
'''
Gets the service manager
'''
return getComponentContext().ServiceManager
def createUnoService(serv):
'''
create an UNO service
'''
return getComponentContext().getServiceManager().createInstance(serv)
def MRI(target):
ctx = getComponentContext()
mri = ctx.ServiceManager.createInstanceWithContext("mytools.Mri", ctx)
mri.inspect(target)
def isLeenoDocument():
'''
check if current document is a LeenO document
'''
try:
return getDocument().getSheets().hasByName('S2')
except Exception:
return False
def DisableDocumentRefresh(oDoc):
'''
Disabilita il refresh per accelerare le procedure
'''
oDoc.lockControllers()
oDoc.addActionLock()
def EnableDocumentRefresh(oDoc):
'''
Riabilita il refresh
'''
oDoc.removeActionLock()
oDoc.unlockControllers()
def getGlobalVar(name):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
return bDict.get('LEENO_GLOBAL_' + name)
def setGlobalVar(name, value):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
bDict['LEENO_GLOBAL_' + name] = value
def initGlobalVars(dict):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
for key, value in dict.items():
bDict['LEENO_GLOBAL_' + key] = value
def dictToProperties(values, unoAny=False):
'''
convert a dictionary in a tuple of UNO properties
if unoAny is True, return the result in an UNO Any variable
otherwise use a python tuple
'''
ps = tuple([PropertyValue(Name=n, Value=v) for n, v in values.items()])
if unoAny:
ps = uno.Any('[]com.sun.star.beans.PropertyValue', ps)
return ps
def daysInMonth(dat):
'''
returns days in month of date dat
'''
month = dat.month + 1
year = dat.year
if month > 12:
month = 1
year += 1
dat2 = date(year=year, month=month, day=dat.day)
t = dat2 - dat
return t.days
def firstWeekDay(dat):
'''
returns first week day in month from dat
monday is 0
'''
return calendar.weekday(dat.year, dat.month, 1)
DAYNAMES = ['Lun', 'Mar', 'Mer', 'Gio', 'Ven', 'Sab', 'Dom']
MONTHNAMES = [
'Gennaio', 'Febbraio', 'Marzo', 'Aprile',
'Maggio', 'Giugno', 'Luglio', 'Agosto',
'Settembre', 'Ottobre', 'Novembre', 'Dicembre'
]
def date2String(dat, fmt = 0):
'''
conversione data in stringa
fmt = 0 25 Febbraio 2020
fmt = 1 25/2/2020
fmt = 2 25-02-2020
fmt = 3 25.02.2020
'''
d = dat.day
m = dat.month
if m < 10:
ms = '0' + str(m)
else:
ms = str(m)
y = dat.year
if fmt == 1:
return str(d) + '/' + ms + '/' + str(y)
elif fmt == 2:
return str(d) + '-' + ms + '-' + str(y)
elif fmt == 3:
return str(d) + '.' + ms + '.' + str(y)
else:
return str(d) + ' ' + MONTHNAMES[m - 1] + ' ' + str(y)
def string2Date(s):
if '.' in s:
sp = s.split('.')
elif '/' in s:
sp = s.split('/')
elif '-' in s:
sp = s.split('-')
else:
return date.today()
if len(sp) != 3:
raise Exception
day = int(sp[0])
month = int(sp[1])
year = int(sp[2])
return date(day=day, month=month, year=year)
def countPdfPages(path):
'''
Returns the number of pages in a PDF document
using external PyPDF2 module
'''
with open(path, 'rb') as f:
pdf = PyPDF2.PdfFileReader(f)
return pdf.getNumPages()
def replacePatternWithField(oTxt, pattern, oField):
'''
Replaces a string pattern in a Text object
(for example '[PATTERN]') with the given field
'''
# pattern may be there many times...
repl = False
pos = oTxt.String.find(pattern)
while pos >= 0:
#create a cursor
cursor = oTxt.createTextCursor()
# use it to select the pattern
cursor.collapseToStart()
cursor.goRight(pos, False)
cursor.goRight(len(pattern), True)
# remove the pattern from text
cursor.String = ''
# insert the field at cursor's position
cursor.collapseToStart()
oTxt.insertTextContent(cursor, oField, False)
# next occurrence of pattern
pos = oTxt.String.find(pattern)
repl = True
return repl
|
giuserpe/leeno
|
src/Ultimus.oxt/python/pythonpath/LeenoUtils.py
|
Python
|
lgpl-2.1
| 7,316 | 0.002873 |
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.test.test_calcollections
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test for collections library.
"""
import logging
from mock import Mock, call, patch
from calico.calcollections import SetDelta, MultiDict
from unittest2 import TestCase
_log = logging.getLogger(__name__)
class TestSetDelta(TestCase):
def setUp(self):
self.set = set("abc")
self.delta = SetDelta(self.set)
def test_add(self):
self.delta.add("c")
self.delta.add("d")
# Only "d" added, "c" was already present.
self.assertEqual(self.delta.added_entries, set(["d"]))
# Now apply, should mutate the set.
self.assertEqual(self.set, set("abc"))
self.delta.apply_and_reset()
self.assertEqual(self.set, set("abcd"))
self.assertEqual(self.delta.added_entries, set())
def test_remove(self):
self.delta.remove("c")
self.delta.remove("d")
# Only "c" added, "d" was already missing.
self.assertEqual(self.delta.removed_entries, set(["c"]))
# Now apply, should mutate the set.
self.assertEqual(self.set, set("abc"))
self.delta.apply_and_reset()
self.assertEqual(self.set, set("ab"))
self.assertEqual(self.delta.removed_entries, set())
def test_add_and_remove(self):
self.delta.add("c") # No-op, already present.
self.delta.add("d") # Put in added set.
self.delta.add("e") # Will remain in added set.
self.delta.remove("c") # Recorded in remove set.
self.delta.remove("d") # Cancels the pending add only.
self.delta.remove("f") # No-op.
self.assertEqual(self.delta.added_entries, set("e"))
self.assertEqual(self.delta.removed_entries, set("c"))
self.delta.apply_and_reset()
self.assertEqual(self.set, set("abe"))
def test_size(self):
self.assertTrue(self.delta.empty)
self.assertEqual(self.delta.resulting_size, 3)
self.delta.add("c") # No-op, already present.
self.assertEqual(self.delta.resulting_size, 3)
self.delta.add("d") # Put in added set.
self.assertEqual(self.delta.resulting_size, 4)
self.delta.add("e") # Will remain in added set.
self.assertEqual(self.delta.resulting_size, 5)
self.delta.remove("c") # Recorded in remove set.
self.assertEqual(self.delta.resulting_size, 4)
self.delta.remove("d") # Cancels the pending add only.
self.assertEqual(self.delta.resulting_size, 3)
self.delta.remove("f") # No-op.
self.assertEqual(self.delta.resulting_size, 3)
class TestMultiDict(TestCase):
def setUp(self):
super(TestMultiDict, self).setUp()
self.index = MultiDict()
def test_add_single(self):
self.index.add("k", "v")
self.assertTrue(self.index.contains("k", "v"))
self.assertEqual(set(self.index.iter_values("k")),
set(["v"]))
def test_add_remove_single(self):
self.index.add("k", "v")
self.index.discard("k", "v")
self.assertFalse(self.index.contains("k", "v"))
self.assertEqual(self.index._index, {})
def test_empty(self):
self.assertFalse(bool(self.index))
self.assertEqual(self.index.num_items("k"), 0)
self.assertEqual(list(self.index.iter_values("k")), [])
def test_add_multiple(self):
self.index.add("k", "v")
self.assertTrue(bool(self.index))
self.assertEqual(self.index.num_items("k"), 1)
self.index.add("k", "v")
self.assertEqual(self.index.num_items("k"), 1)
self.index.add("k", "v2")
self.assertEqual(self.index.num_items("k"), 2)
self.index.add("k", "v3")
self.assertEqual(self.index.num_items("k"), 3)
self.assertIn("k", self.index)
self.assertNotIn("k2", self.index)
self.assertTrue(self.index.contains("k", "v"))
self.assertTrue(self.index.contains("k", "v2"))
self.assertTrue(self.index.contains("k", "v3"))
self.assertEqual(self.index._index, {"k": set(["v", "v2", "v3"])})
self.assertEqual(set(self.index.iter_values("k")),
set(["v", "v2", "v3"]))
self.index.discard("k", "v")
self.index.discard("k", "v2")
self.assertTrue(self.index.contains("k", "v3"))
self.index.discard("k", "v3")
self.assertEqual(self.index._index, {})
|
TrimBiggs/calico
|
calico/test/test_calcollections.py
|
Python
|
apache-2.0
| 5,048 | 0 |
# -*- coding: iso-8859-5 -*-
# Ʋ³´µ¶
class DummyƲ³´µ¶(object):
def Print(self):
print ('Ʋ³´µ¶')
DummyƲ³´µ¶().Print()
|
fabioz/PyDev.Debugger
|
tests_python/resources/_pydev_coverage_cyrillic_encoding_py3.py
|
Python
|
epl-1.0
| 135 | 0.014815 |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import time
from runtime.dbapi.pyalisa.client import AlisaTaksStatus, Client
# waiting task completed
WAIT_INTEVERAL_SEC = 2
# read results while a task completed
READ_RESULTS_BATCH = 20
class Task(object): # noqa: R0205
"""Task encapsulates operations to submit the alisa task.
Args:
config(Config): the config for building the task
"""
def __init__(self, config):
self.config = config
self.cli = Client(config)
def exec_sql(self, code, output=sys.stdout, resultful=False):
"""submit the sql statements to alisa server, write the logs to output
Args:
code: sql statements
resultful: has result
output: like sys.stdout
"""
task_id, status = self.cli.create_sql_task(code)
return self._tracking(task_id, status, output, resultful)
def exec_pyodps(self, code, args, output=sys.stdout):
"""submit the python code to alisa server, write the logs to output
Args:
code: python code
args: args for python code
output: such as sys.stdout
"""
task_id, status = self.cli.create_pyodps_task(code, args)
return self._tracking(task_id, status, output, False)
def _tracking(self, task_id, status, output, resultful):
return self._tracking_with_log(
task_id, status, output,
resultful) if self.config.verbose else self._tracking_quietly(
task_id, status, resultful)
def _tracking_with_log(self, task_id, status, output, resultful):
log_idx = 0
while not self.cli.completed(status):
if status in (AlisaTaksStatus.ALISA_TASK_WAITING,
AlisaTaksStatus.ALISA_TASK_ALLOCATE):
output.write('waiting for resources')
elif status == AlisaTaksStatus.ALISA_TASK_RUNNING and log_idx >= 0:
self.cli.read_logs(task_id, log_idx, output)
time.sleep(WAIT_INTEVERAL_SEC)
status = self.cli.get_status(task_id)
if status == AlisaTaksStatus.ALISA_TASK_EXPIRED:
output.write('timeout while waiting for resources')
else:
# assert log_idx>=0
self.cli.read_logs(task_id, log_idx, output)
# assert log_idex<0
if status == AlisaTaksStatus.ALISA_TASK_COMPLETED:
return self.cli.get_results(
task_id, READ_RESULTS_BATCH) if resultful else []
raise Exception('task={}, invalid status={}'.format(task_id, status))
def _tracking_quietly(self, task_id, status, resultful):
while not self.cli.completed(status):
time.sleep(WAIT_INTEVERAL_SEC)
status = self.cli.get_status(task_id)
if status != AlisaTaksStatus.ALISA_TASK_COMPLETED:
raise Exception(
'task({}) status is {} which means incompleted.'.format(
task_id, status))
if resultful:
return self.cli.get_results(task_id, READ_RESULTS_BATCH)
return []
|
sql-machine-learning/sqlflow
|
python/runtime/dbapi/pyalisa/task.py
|
Python
|
apache-2.0
| 3,699 | 0 |
#!/usr/bin/python2.6
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
"""type link."""
__author__ = 'bneutra@google.com (Brendan Neutra)'
# thanks warren for these dimetests
import google3
from pymql.mql import error
from pymql.test import mql_fixture
class MQLTest(mql_fixture.MQLTest):
"""type link tests."""
def setUp(self):
self.SetMockPath('data/type_link.yaml')
super(MQLTest, self).setUp()
self.env = {'as_of_time': '2010-05-01'}
def testLinkMasterProperty(self):
"""link:null (master_property) of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": null,
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": "/people/person/place_of_birth",
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testLinkMasterValueProperty(self):
"""link:null (master_property) of value property."""
query = """
{
"/people/person/date_of_birth": {
"link": null,
"value": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"link": "/people/person/date_of_birth",
"value": "1941-05-24"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkMasterPropertyOfObjProperty(self):
"""read /type/link/master_property of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"master_property": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"master_property": "/people/person/place_of_birth"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTypeOfObjProperty(self):
"""read /type/link/type of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"type": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"type": "/type/link"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkReverseOfObjProperty(self):
"""read /type/link/reverse of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"reverse": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"reverse": false
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkAttributionOfObjProperty(self):
"""read /type/link/attribution of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": "/user/cvolkert"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkCreatorOfObjProperty(self):
"""read /type/link/creator of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"creator": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"creator": "/user/cvolkert"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTimestampOfObjProperty(self):
"""read /type/link/timestamp of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"timestamp": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"timestamp": "2007-10-23T09:07:43.0024Z"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkSourceOfObjProperty(self):
"""read /type/link/source of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"source": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"source": "Bob Dylan"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetOfObjProperty(self):
"""read /type/link/target of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"target": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"target": "Duluth"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetOfObjArrayProperty(self):
"""read /type/link/target of obj array property."""
query = """
{
"/people/person/children": [{
"link": {
"source": [
{
"id": null
}
]
},
"id": null
}],
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/children": [
{
"id": "/en/jakob_dylan",
"link": {
"source": [{
"id": "/en/jakob_dylan"
}]
}
},
{
"id": "/en/jesse_dylan",
"link": {
"source": [{
"id": "/en/jesse_dylan"
}]
}
},
{
"id": "/en/desiree_gabrielle_dennis_dylan",
"link": {
"source": [{
"id": "/en/desiree_gabrielle_dennis_dylan"
}]
}
},
{
"id": "/en/maria_dylan",
"link": {
"source": [{
"id": "/en/maria_dylan"
}]
}
},
{
"id": "/en/sam_dylan",
"link": {
"source": [{
"id": "/en/sam_dylan"
}]
}
},
{
"id": "/en/anna_dylan",
"link": {
"source": [{
"id": "/en/anna_dylan"
}]
}
}
],
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetOfValueProperty(self):
"""read /type/link/target of value property."""
query = """
{
"/people/person/date_of_birth": {
"link": {
"target": null
},
"value": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"link": {
"target": null
},
"value": "1941-05-24"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetValueOfObjProperty(self):
"""read /type/link/target_value of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"target_value": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"target_value": null
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetValueOfValueProperty(self):
"""read /type/link/target_value of value property."""
query = """
{
"/people/person/date_of_birth": {
"link": {
"target_value": null
},
"value": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"link": {
"target_value": "1941-05-24"
},
"value": "1941-05-24"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkOperationOfObjProperty(self):
"""read /type/link/operation of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"operation": null
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"operation": "insert"
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkOperationOfValueProperty(self):
"""read /type/link/operation of value property."""
query = """
{
"/people/person/date_of_birth": {
"link": {
"operation": null
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"link": {
"operation": "insert"
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkValidOfObjProperty(self):
"""read /type/link/valid of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"valid": null
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"valid": true
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkValidOfValueProperty(self):
"""read /type/link/valid of value property."""
query = """
{
"/people/person/date_of_birth": {
"link": {
"valid": null
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"link": {
"valid": true
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkMasterPropertyOfObjProperty(self):
"""constrain /type/link/master_property of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"master_property": "/people/person/place_of_birth"
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"master_property": "/people/person/place_of_birth"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkTypeOfObjProperty(self):
"""constrain /type/link/type of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"type": "/type/link"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"type": "/type/link"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkReverseOfObjProperty(self):
"""constrain /type/link/reverse of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"reverse": false
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
exc_response = (
error.MQLParseError,
"Can only ask for the value of 'reverse', not specify it"
)
self.DoQuery(query, exc_response=exc_response)
def testConstrainTypeLinkAttributionOfObjProperty(self):
"""constrain /type/link/attribution of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": "/user/cvolkert"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": "/user/cvolkert"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkCreatorOfObjProperty(self):
"""constrain /type/link/creator of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"creator": "/user/cvolkert"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"creator": "/user/cvolkert"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkTimestampOfObjProperty(self):
"""constrain /type/link/timestamp of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"timestamp": "2007-10-23T09:07:43.0024Z"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"timestamp": "2007-10-23T09:07:43.0024Z"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkSourceOfObjProperty(self):
"""constrain /type/link/source of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"source": "Bob Dylan"
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"source": "Bob Dylan"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkTargetOfObjProperty(self):
"""constrain /type/link/target of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"target": "Duluth"
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"target": "Duluth"
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkOperationOfObjProperty(self):
"""constrain /type/link/operation of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"operation": "insert"
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"operation": "insert"
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeLinkValidOfObjProperty(self):
"""constrain /type/link/valid of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"valid": true
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"valid": true
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testSeveralTypeLinkPropertiesOfObjProperty(self):
"""several /type/link properties of obj property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": null,
"reverse": null,
"timestamp": null,
"source": null,
"target": null,
"master_property": null,
"type": null,
"target_value": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": "/user/cvolkert",
"reverse": false,
"timestamp": "2007-10-23T09:07:43.0024Z",
"source": "Bob Dylan",
"target": "Duluth",
"master_property": "/people/person/place_of_birth",
"type": "/type/link",
"target_value": null
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testSeveralTypeLinkPropertiesOfValueProperty(self):
"""several /type/link properties of value property."""
query = """
{
"/people/person/date_of_birth": {
"link": {
"attribution": null,
"reverse": null,
"timestamp": null,
"source": null,
"target": null,
"master_property": null,
"type": null,
"target_value": null
},
"value": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"link": {
"attribution": "/user/mwcl_musicbrainz",
"reverse": false,
"timestamp": "2006-12-10T16:16:13.0316Z",
"source": "Bob Dylan",
"target": null,
"master_property": "/people/person/date_of_birth",
"type": "/type/link",
"target_value": "1941-05-24"
},
"value": "1941-05-24"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeLinkMasterProperty(self):
"""read structured /type/link/master_property."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"master_property": {
"name": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"master_property": {
"name": "Place of birth"
}
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeLinkType(self):
"""read structured /type/link/type."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"type": {
"id": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exc_response = (
error.MQLParseError,
"Can't expand 'type' in a link clause (it is fixed as '/type/link')"
)
self.DoQuery(query, exc_response=exc_response)
def testReadStructuredTypeLinkReverse(self):
"""read structured /type/link/reverse."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"reverse": {
"value": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"reverse": {
"value": false
}
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeLinkAttribution(self):
"""read structured /type/link/attribution."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": {
"id": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"attribution": {
"id": "/user/cvolkert"
}
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeLinkCreator(self):
"""read structured /type/link/creator."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"creator": {
"id": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"creator": {
"id": "/user/cvolkert"
}
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeLinkSource(self):
"""read structured /type/link/source."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"source": {
"id": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"source": {
"id": "/en/bob_dylan"
}
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeLinkTarget(self):
"""read structured /type/link/target."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"target": {
"id": null
}
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/place_of_birth": {
"link": {
"target": {
"id": "/en/duluth"
}
},
"id": "/en/duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkMasterPropertyOfReverseProperty(self):
"""read /type/link/master_property of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"master_property": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"master_property": "/people/ethnicity/people"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTypeOfReverseProperty(self):
"""read /type/link/type of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"type": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"type": "/type/link"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkReverseOfReverseProperty(self):
"""read /type/link/reverse of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"reverse": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"reverse": true
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkAttributionOfReverseProperty(self):
"""read /type/link/attribution of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"attribution": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"attribution": "/user/skud"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkCreatorOfReverseProperty(self):
"""read /type/link/creator of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"creator": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"creator": "/user/skud"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTimestampOfReverseProperty(self):
"""read /type/link/timestamp of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"timestamp": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"timestamp": "2008-05-23T20:32:27.0008Z"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkSourceOfReverseProperty(self):
"""read /type/link/source of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"source": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"source": "Ashkenazi Jews"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetOfReverseProperty(self):
"""read /type/link/target of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"target": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"target": "Bob Dylan"
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkTargetValueOfReverseProperty(self):
"""read /type/link/target_value of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"target_value": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"target_value": null
},
"id": "/en/ashkenazi_jews"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkOperationOfReverseProperty(self):
"""read /type/link/operation of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"operation": null
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"operation": "insert"
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkValidOfReverseProperty(self):
"""read /type/link/valid of reverse property."""
query = """
{
"/people/person/ethnicity": {
"link": {
"valid": null
}
},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/ethnicity": {
"link": {
"valid": true
}
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadBareTypeLinkMasterProperty(self):
"""read bare /type/link/master_property."""
query = """
[{
"/people/person/place_of_birth": {
"/type/link/master_property": null
},
"id": "/en/bob_dylan"
}]
"""
exc_response = (
error.MQLTypeError,
"Can't use /type/link properties on None"
)
self.DoQuery(query, exc_response=exc_response)
def testReadTypePropertyLinks(self):
"""read /type/property/links."""
query = """
{
"/type/property/links": [{
"limit" : 1,
"source": {
"id": null
},
"target": {
"id": null
}
}],
"id": "/people/person/place_of_birth"
}
"""
exp_response = """
{
"/type/property/links": [{
"source": {
"id": "/en/james_caviezel"
},
"target": {
"id": "/en/mount_vernon_washington"
}
}],
"id": "/people/person/place_of_birth"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypePropertyLinks(self):
"""constrain /type/property/links."""
query = """
{
"/type/property/links": [{
"source": {
"id": "/en/james_caviezel"
},
"target": {
"id": "/en/mount_vernon_washington"
}
}],
"id": "/people/person/place_of_birth"
}
"""
exp_response = """
{
"/type/property/links": [{
"source": {
"id": "/en/james_caviezel"
},
"target": {
"id": "/en/mount_vernon_washington"
}
}],
"id": "/people/person/place_of_birth"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeLinkAttributionAsBangTypeAttributionLinks(self):
"""read /type/link/attribution as !/type/attribution/links."""
query = """
{
"/people/person/place_of_birth": {
"link": {
"!/type/attribution/links": null
},
"id": null
},
"id": "/en/bob_dylan"
}
"""
exc_response = (
error.MQLParseError,
"Can't use reverse property queries in /type/link"
)
self.DoQuery(query, exc_response=exc_response)
def testReadTypeLinkMasterPropertyAsBangTypePropertyLinks(self):
"""read /type/link/master_property as !/type/property/links."""
query = """
{
"/people/person/place_of_birth": {
"!/type/property/links": {
"id": null
}
},
"id": "/en/bob_dylan"
}
"""
exc_response = (
error.MQLTypeError,
"Can't reverse artificial property /type/property/links"
)
self.DoQuery(query, exc_response=exc_response)
def testReadTypeAttributionLinks(self):
"""read /type/attribution/links."""
query = """
{
"/type/attribution/links": [{
"limit": 2,
"source": {
"id": null
},
"target": {
"id": null
}
}],
"id": "/user/warren"
}
"""
exp_response = """
{
"/type/attribution/links": [
{
"source": {
"id": "/m/022q56s"
},
"target": {
"id": "/common/document"
}
},
{
"source": {
"id": "/m/022q56s"
},
"target": {
"id": "/boot/all_permission"
}
}
],
"id": "/user/warren"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeAttributionLinks(self):
"""constrain /type/attribution/links."""
query = """
{
"/type/attribution/links": {
"source": {
"id": "/guid/9202a8c04000641f80000000042b14d8"
},
"target": {
"id": "/common/document"
}
},
"id": "/user/warren"
}
"""
exp_response = """
{
"/type/attribution/links": {
"source": {
"id": "/guid/9202a8c04000641f80000000042b14d8"
},
"target": {
"id": "/common/document"
}
},
"id": "/user/warren"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTimestampOfTopLevelTypeLinkQuery(self):
"""read timestamp of top-level /type/link query."""
query = """
[{
"limit": 2,
"timestamp": null,
"type": "/type/link"
}]
"""
exp_response = """
[
{
"timestamp": "2006-10-22T07:34:24.0004Z",
"type": "/type/link"
},
{
"timestamp": "2006-10-22T07:34:24.0005Z",
"type": "/type/link"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testTypeLinkQueryWithOptionalTargetValue(self):
"""/type/link query with optional target_value."""
query = """
[
{
"source": {
"id": "/en/bob_dylan"
},
"limit": 2,
"type": "/type/link",
"target": {
"id": null
},
"target_value": null
}
]
"""
exp_response = """
[
{
"source": {
"id": "/en/bob_dylan"
},
"type": "/type/link",
"target": {
"id": "/boot/all_permission"
},
"target_value": null
},
{
"source": {
"id": "/en/bob_dylan"
},
"type": "/type/link",
"target": {
"id": "/common/topic"
},
"target_value": null
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testTypeLinkQueryWithRequiredTargetValue(self):
"""/type/link query with required target_value."""
query = """
[
{
"source": {
"id": "/en/bob_dylan"
},
"limit": 2,
"type": "/type/link",
"target": {
"id": null
},
"target_value": {
"value": null
}
}
]
"""
exp_response = """
[
{
"source": {
"id": "/en/bob_dylan"
},
"type": "/type/link",
"target": {
"id": "/lang/en"
},
"target_value": {
"value": "Robert Zimmerman"
}
},
{
"source": {
"id": "/en/bob_dylan"
},
"type": "/type/link",
"target": {
"id": "/lang/he"
},
"target_value": {
"value": "\u05d1\u05d5\u05d1 \u05d3\u05d9\u05dc\u05df"
}
}
]
"""
self.DoQuery(query, exp_response=exp_response)
if __name__ == '__main__':
mql_fixture.main()
|
google/pymql
|
test/type_link_test.py
|
Python
|
apache-2.0
| 36,235 | 0.001711 |
class Solution(object):
def search(self, grid, x, y, s):
if grid[x][y] == '0' or (x, y) in s:
return s
s.add((x, y))
if x - 1 >= 0:
s = self.search(grid, x - 1, y, s)
if x + 1 < len(grid):
s = self.search(grid, x + 1, y, s)
if y - 1 >= 0:
s = self.search(grid, x, y - 1, s)
if y + 1 < len(grid[0]):
s = self.search(grid, x, y + 1, s)
return s
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
ans = 0
s = set()
for x in range(len(grid)):
for y in range(len(grid[0])):
if grid[x][y] == '1' and (x, y) not in s:
ans += 1
s = self.search(grid, x, y, s)
return ans
|
zeyuanxy/leet-code
|
vol4/number-of-islands/number-of-islands.py
|
Python
|
mit
| 851 | 0.00235 |
import thread_pool
from tornado.testing import AsyncTestCase
from unittest import TestCase
import time, socket
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from functools import partial
class ThreadPoolTestCase(AsyncTestCase):
def tearDown(self):
thread_pool.thread_pool = thread_pool.ThreadPool()
def test_run(self):
def callback():
self.stop()
thread_pool.thread_pool.run(callback)
self.wait(timeout=0.2)
@thread_pool.in_thread_pool
def sleep(self):
time.sleep(0.1)
self.stop()
def test_in_thread_pool(self):
start = time.time()
self.sleep()
self.assertLess(time.time(), start + 0.1)
self.wait()
self.assertGreater(time.time(), start + 0.1)
def test_in_ioloop(self):
self.done = False
self._test_in_ioloop()
IOLoop.instance().start()
self.assertTrue(self.done)
@thread_pool.in_thread_pool
def _test_in_ioloop(self):
time.sleep(0.1)
self._test_in_ioloop_2()
@thread_pool.in_ioloop
def _test_in_ioloop_2(self):
self.done = True
IOLoop.instance().stop()
def test_blocking_warn(self):
self._fired_warning = False
thread_pool.blocking_warning = self.warning_fired
self.blocking_method()
self.assertTrue(self._fired_warning)
@thread_pool.blocking
def blocking_method(self):
time.sleep(0.1)
def warning_fired(self, fn):
self._fired_warning = True
class TheadPoolDoSTestCase(TestCase):
def tearDown(self):
thread_pool.thread_pool = thread_pool.ThreadPool()
def setUp(self):
self.entered = 0
self.exited = 0
def exit(self):
time.sleep(0.01)
self.exited += 1
def test_DoS(self):
for i in xrange(100):
self.entered += 1
thread_pool.thread_pool.run(self.exit)
time.sleep(0.5)
self.assertEqual(self.entered, self.exited)
time.sleep(1)
self.assertEqual(len(thread_pool.thread_pool.threads), 0)
|
bobpoekert/tornado-threadpool
|
tests.py
|
Python
|
mit
| 2,117 | 0.001417 |
# file openpyxl/workbook.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Workbook is the top-level container for all document information."""
__docformat__ = "restructuredtext en"
# Python stdlib imports
import datetime
import os
import threading
# package imports
from openpyxl.worksheet import Worksheet
from openpyxl.writer.dump_worksheet import DumpWorksheet, save_dump
from openpyxl.writer.strings import StringTableBuilder
from openpyxl.namedrange import NamedRange
from openpyxl.style import Style
from openpyxl.writer.excel import save_workbook
from openpyxl.shared.exc import ReadOnlyWorkbookException
from openpyxl.shared.date_time import CALENDAR_WINDOWS_1900, CALENDAR_MAC_1904
from openpyxl.shared.xmltools import fromstring
from openpyxl.shared.ooxml import NAMESPACES, SHEET_MAIN_NS
class DocumentProperties(object):
"""High-level properties of the document."""
def __init__(self):
self.creator = 'Unknown'
self.last_modified_by = self.creator
self.created = datetime.datetime.now()
self.modified = datetime.datetime.now()
self.title = 'Untitled'
self.subject = ''
self.description = ''
self.keywords = ''
self.category = ''
self.company = 'Microsoft Corporation'
self.excel_base_date = CALENDAR_WINDOWS_1900
class DocumentSecurity(object):
"""Security information about the document."""
def __init__(self):
self.lock_revision = False
self.lock_structure = False
self.lock_windows = False
self.revision_password = ''
self.workbook_password = ''
class Workbook(object):
"""Workbook is the container for all other parts of the document."""
def __init__(self, optimized_write=False, encoding='utf-8',
worksheet_class=Worksheet,
optimized_worksheet_class=DumpWorksheet,
guess_types=True):
self.worksheets = []
self._active_sheet_index = 0
self._named_ranges = []
self.properties = DocumentProperties()
self.style = Style()
self.security = DocumentSecurity()
self.__optimized_write = optimized_write
self.__optimized_read = False
self.__thread_local_data = threading.local()
self.strings_table_builder = StringTableBuilder()
self.loaded_theme = None
self._worksheet_class = worksheet_class
self._optimized_worksheet_class = optimized_worksheet_class
self.vba_archive = None
self.style_properties = None
self._guess_types = guess_types
self.encoding = encoding
if not optimized_write:
self.worksheets.append(self._worksheet_class(parent_workbook=self))
def read_workbook_settings(self, xml_source):
root = fromstring(xml_source)
view = root.find('*/' '{%s}workbookView' % SHEET_MAIN_NS)
if 'activeTab' in view.attrib:
self._active_sheet_index = int(view.attrib['activeTab'])
@property
def _local_data(self):
return self.__thread_local_data
@property
def excel_base_date(self):
return self.properties.excel_base_date
def _set_optimized_read(self):
self.__optimized_read = True
def get_active_sheet(self):
"""Returns the current active sheet."""
return self.worksheets[self._active_sheet_index]
def create_sheet(self, index=None, title=None):
"""Create a worksheet (at an optional index).
:param index: optional position at which the sheet will be inserted
:type index: int
"""
if self.__optimized_read:
raise ReadOnlyWorkbookException('Cannot create new sheet in a read-only workbook')
if self.__optimized_write :
new_ws = self._optimized_worksheet_class(
parent_workbook=self, title=title)
else:
if title is not None:
new_ws = self._worksheet_class(
parent_workbook=self, title=title)
else:
new_ws = self._worksheet_class(parent_workbook=self)
self.add_sheet(worksheet=new_ws, index=index)
return new_ws
def add_sheet(self, worksheet, index=None):
"""Add an existing worksheet (at an optional index)."""
assert isinstance(worksheet, self._worksheet_class), "The parameter you have given is not of the type '%s'" % self._worksheet_class.__name__
if index is None:
index = len(self.worksheets)
self.worksheets.insert(index, worksheet)
def remove_sheet(self, worksheet):
"""Remove a worksheet from this workbook."""
self.worksheets.remove(worksheet)
def get_sheet_by_name(self, name):
"""Returns a worksheet by its name.
Returns None if no worksheet has the name specified.
:param name: the name of the worksheet to look for
:type name: string
"""
requested_sheet = None
for sheet in self.worksheets:
if sheet.title == name:
requested_sheet = sheet
break
return requested_sheet
def get_index(self, worksheet):
"""Return the index of the worksheet."""
return self.worksheets.index(worksheet)
def get_sheet_names(self):
"""Returns the list of the names of worksheets in the workbook.
Names are returned in the worksheets order.
:rtype: list of strings
"""
return [s.title for s in self.worksheets]
def create_named_range(self, name, worksheet, range, scope=None):
"""Create a new named_range on a worksheet"""
assert isinstance(worksheet, self._worksheet_class)
named_range = NamedRange(name, [(worksheet, range)], scope)
self.add_named_range(named_range)
def get_named_ranges(self):
"""Return all named ranges"""
return self._named_ranges
def add_named_range(self, named_range):
"""Add an existing named_range to the list of named_ranges."""
self._named_ranges.append(named_range)
def get_named_range(self, name):
"""Return the range specified by name."""
requested_range = None
for named_range in self._named_ranges:
if named_range.name == name:
requested_range = named_range
break
return requested_range
def remove_named_range(self, named_range):
"""Remove a named_range from this workbook."""
self._named_ranges.remove(named_range)
def save(self, filename):
"""Save the current workbook under the given `filename`.
Use this function instead of using an `ExcelWriter`.
.. warning::
When creating your workbook using `optimized_write` set to True,
you will only be able to call this function once. Subsequents attempts to
modify or save the file will raise an :class:`openpyxl.shared.exc.WorkbookAlreadySaved` exception.
"""
if self.__optimized_write:
save_dump(self, filename)
else:
save_workbook(self, filename)
|
Jian-Zhan/customarrayformatter
|
openpyxl/workbook.py
|
Python
|
mit
| 8,298 | 0.000603 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cqlhandling import CqlParsingRuleSet, Hint
from cassandra.metadata import maybe_escape_name
simple_cql_types = set(('ascii', 'bigint', 'blob', 'boolean', 'counter', 'date', 'decimal', 'double', 'duration', 'float',
'inet', 'int', 'smallint', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'uuid', 'varchar', 'varint'))
simple_cql_types.difference_update(('set', 'map', 'list'))
from . import helptopics
cqldocs = helptopics.CQL3HelpTopics()
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_schema', 'system_traces', 'system_auth', 'system_distributed')
NONALTERBALE_KEYSPACES = ('system', 'system_schema')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('min_index_interval', None),
('max_index_interval', None),
('read_repair_chance', None),
('default_time_to_live', None),
('speculative_retry', None),
('memtable_flush_period_in_ms', None),
('cdc', None)
)
columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction', 'only_purge_repaired_tombstones')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
('caching', None,
('rows_per_partition', 'keys')),
)
obsolete_cf_options = ()
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM',
'SERIAL'
)
size_tiered_compaction_strategy_options = (
'min_sstable_size',
'min_threshold',
'bucket_high',
'bucket_low'
)
leveled_compaction_strategy_options = (
'sstable_size_in_mb',
'fanout_size'
)
date_tiered_compaction_strategy_options = (
'base_time_seconds',
'max_sstable_age_days',
'min_threshold',
'max_window_size_seconds',
'timestamp_resolution'
)
time_window_compaction_strategy_options = (
'compaction_window_unit',
'compaction_window_size',
'min_threshold',
'timestamp_resolution'
)
@classmethod
def escape_value(cls, value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
@classmethod
def escape_name(cls, name):
if name is None:
return 'NULL'
return "'%s'" % name.replace("'", "''")
@staticmethod
def dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
return name[1:-1].replace('""', '"')
else:
return name.lower()
@staticmethod
def dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
completer_for = CqlRuleSet.completer_for
explain_completion = CqlRuleSet.explain_completion
dequote_value = CqlRuleSet.dequote_value
dequote_name = CqlRuleSet.dequote_name
escape_value = CqlRuleSet.escape_value
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= <quotedStringLiteral>
| <pgStringLiteral> ;
<quotedStringLiteral> ::= /'([^']|'')*'/ ;
<pgStringLiteral> ::= /\$\$(?:(?!\$\$).)*\$\$/;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<blobLiteral> ::= /0x[0-9a-f]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=%/,().]/ ;
<cmp> ::= /[<>!]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedPgString>::= /\$\$(?:(?!\$\$).)*/ ;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*].*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
| <blobLiteral>
| <collectionLiteral>
| <functionLiteral> <functionArguments>
| "NULL"
;
<functionLiteral> ::= (<identifier> ( "." <identifier> )?)
| "TOKEN"
;
<functionArguments> ::= "(" ( <term> ( "," <term> )* )? ")"
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <term>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<anyFunctionName> ::= ( ksname=<cfOrKsName> dot="." )? udfname=<cfOrKsName> ;
<userFunctionName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? udfname=<cfOrKsName> ;
<refUserFunctionName> ::= udfname=<cfOrKsName> ;
<userAggregateName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? udaname=<cfOrKsName> ;
<functionAggregateName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? functionname=<cfOrKsName> ;
<aggregateName> ::= <userAggregateName>
;
<functionName> ::= <functionAggregateName>
| "TOKEN"
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <createMaterializedViewStatement>
| <createUserTypeStatement>
| <createFunctionStatement>
| <createAggregateStatement>
| <createTriggerStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <dropMaterializedViewStatement>
| <dropUserTypeStatement>
| <dropFunctionStatement>
| <dropAggregateStatement>
| <dropTriggerStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
| <alterUserTypeStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
| <createRoleStatement>
| <alterRoleStatement>
| <dropRoleStatement>
| <listRolesStatement>
;
<authorizationStatement> ::= <grantStatement>
| <grantRoleStatement>
| <revokeStatement>
| <revokeRoleStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | "timestamp" ) ;
<userType> ::= utname=<cfOrKsName> ;
<storageType> ::= <simpleStorageType> | <collectionType> | <frozenCollectionType> | <userType> ;
# Note: autocomplete for frozen collection types does not handle nesting past depth 1 properly,
# but that's a lot of work to fix for little benefit.
<collectionType> ::= "map" "<" <simpleStorageType> "," ( <simpleStorageType> | <userType> ) ">"
| "list" "<" ( <simpleStorageType> | <userType> ) ">"
| "set" "<" ( <simpleStorageType> | <userType> ) ">"
;
<frozenCollectionType> ::= "frozen" "<" "map" "<" <storageType> "," <storageType> ">" ">"
| "frozen" "<" "list" "<" <storageType> ">" ">"
| "frozen" "<" "set" "<" <storageType> ">" ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<materializedViewName> ::= ( ksname=<cfOrKsName> dot="." )? mvname=<cfOrKsName> ;
<userTypeName> ::= ( ksname=<cfOrKsName> dot="." )? utname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( "key"
| "clustering"
# | "count" -- to get count(*) completion, treat count as reserved
| "ttl"
| "compact"
| "storage"
| "type"
| "values" )
;
<property> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
'''
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('property', 'propeq')(prop_equals_completer)
@completer_for('property', 'propname')
def prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_name_completer(ctxt, cass)
else:
return cf_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_completer(ctxt, cass)
else:
return cf_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapval_completer(ctxt, cass)
else:
return cf_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapender_completer(ctxt, cass)
else:
return cf_prop_val_mapender_completer(ctxt, cass)
def ks_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<term>')]
def ks_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if this_opt == 'caching':
return ["{'keys': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'min_index_interval', 'max_index_interval'):
return [Hint('<integer>')]
if this_opt in ('cdc'):
return [Hint('<true|false>')]
return [Hint('<option_value>')]
def cf_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'caching':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts = opts.union(set(CqlRuleSet.size_tiered_compaction_strategy_options))
elif csc == 'LeveledCompactionStrategy':
opts = opts.union(set(CqlRuleSet.leveled_compaction_strategy_options))
elif csc == 'DateTieredCompactionStrategy':
opts = opts.union(set(CqlRuleSet.date_tiered_compaction_strategy_options))
elif csc == 'TimeWindowCompactionStrategy':
opts = opts.union(set(CqlRuleSet.time_window_compaction_strategy_options))
return map(escape_value, opts)
return ()
def cf_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
elif opt == 'caching':
if key == 'rows_per_partition':
return ["'ALL'", "'NONE'", Hint('#rows_per_partition')]
elif key == 'keys':
return ["'ALL'", "'NONE'"]
return ()
def cf_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['token(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def non_system_ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def alterable_ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
completer_for('columnFamilyName', 'ksname')(cf_ks_name_completer)
completer_for('materializedViewName', 'ksname')(cf_ks_name_completer)
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
completer_for('columnFamilyName', 'dot')(cf_ks_dot_completer)
completer_for('materializedViewName', 'dot')(cf_ks_dot_completer)
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
@completer_for('materializedViewName', 'mvname')
def mv_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
mvnames = cass.get_materialized_view_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, mvnames)
completer_for('userTypeName', 'ksname')(cf_ks_name_completer)
completer_for('userTypeName', 'dot')(cf_ks_dot_completer)
def ut_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
utnames = cass.get_usertype_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, utnames)
completer_for('userTypeName', 'utname')(ut_name_completer)
completer_for('userType', 'utname')(ut_name_completer)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_table_meta(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_table_meta(ks, cf)
def get_ut_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
ut = dequote_name(ctxt.get_binding('utname'))
return cass.get_usertype_layout(ks, ut)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" ( "JSON" )? <selectClause>
"FROM" (cf=<columnFamilyName> | mv=<materializedViewName>)
( "WHERE" <whereClause> )?
( "GROUP" "BY" <groupByClause> ( "," <groupByClause> )* )?
( "ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
( "PER" "PARTITION" "LIMIT" perPartitionLimit=<wholenumber> )?
( "LIMIT" limit=<wholenumber> )?
( "ALLOW" "FILTERING" )?
;
<whereClause> ::= <relation> ( "AND" <relation> )*
;
<relation> ::= [rel_lhs]=<cident> ( "[" <term> "]" )? ( "=" | "<" | ">" | "<=" | ">=" | "CONTAINS" ( "KEY" )? ) <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= "DISTINCT"? <selector> ("AS" <cident>)? ("," <selector> ("AS" <cident>)?)*
| "*"
;
<udtSubfieldSelection> ::= <identifier> "." <identifier>
;
<selector> ::= [colname]=<cident>
| <udtSubfieldSelection>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| "COUNT" "(" star=( "*" | "1" ) ")"
| "CAST" "(" <selector> "AS" <storageType> ")"
| <functionName> <selectionFunctionArguments>
| <term>
;
<selectionFunctionArguments> ::= "(" ( <selector> ( "," <selector> )* )? ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
<groupByClause> ::= [groupcol]=<cident>
;
'''
def udf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
udfnames = cass.get_userfunction_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, udfnames)
def uda_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
udanames = cass.get_useraggregate_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, udanames)
def udf_uda_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
functionnames = cass.get_userfunction_names(ks) + cass.get_useraggregate_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, functionnames)
def ref_udf_name_completer(ctxt, cass):
try:
udanames = cass.get_userfunction_names(None)
except Exception:
return ()
return map(maybe_escape_name, udanames)
completer_for('functionAggregateName', 'ksname')(cf_ks_name_completer)
completer_for('functionAggregateName', 'dot')(cf_ks_dot_completer)
completer_for('functionAggregateName', 'functionname')(udf_uda_name_completer)
completer_for('anyFunctionName', 'ksname')(cf_ks_name_completer)
completer_for('anyFunctionName', 'dot')(cf_ks_dot_completer)
completer_for('anyFunctionName', 'udfname')(udf_name_completer)
completer_for('userFunctionName', 'ksname')(cf_ks_name_completer)
completer_for('userFunctionName', 'dot')(cf_ks_dot_completer)
completer_for('userFunctionName', 'udfname')(udf_name_completer)
completer_for('refUserFunctionName', 'udfname')(ref_udf_name_completer)
completer_for('userAggregateName', 'ksname')(cf_ks_name_completer)
completer_for('userAggregateName', 'dot')(cf_ks_dot_completer)
completer_for('userAggregateName', 'udaname')(uda_name_completer)
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_table_meta(ctxt, cass)
order_by_candidates = [col.name for col in layout.clustering_key]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('groupByClause', 'groupcol')
def select_group_column_completer(ctxt, cass):
prev_group_cols = ctxt.get_binding('groupcol', ())
layout = get_table_meta(ctxt, cass)
group_by_candidates = [col.name for col in layout.primary_key]
if len(group_by_candidates) > len(prev_group_cols):
return [maybe_escape_name(group_by_candidates[len(prev_group_cols)])]
return [Hint('No more columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return [key.name for key in layout.partition_key]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
filterable = set()
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs', ()))
for num in range(0, len(layout.partition_key)):
if num == 0 or layout.partition_key[num - 1].name in already_filtered_on:
filterable.add(layout.partition_key[num].name)
else:
break
for num in range(0, len(layout.clustering_key)):
if num == 0 or layout.clustering_key[num - 1].name in already_filtered_on:
filterable.add(layout.clustering_key[num].name)
else:
break
for idx in layout.indexes.itervalues():
filterable.add(idx.index_options["target"])
return map(maybe_escape_name, filterable)
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
( ( "(" [colname]=<cident> ( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<term> ( valcomma="," [newval]=<term> )* valcomma=")")
| ("JSON" <stringLiteral>))
( "IF" "NOT" "EXISTS")?
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
def regular_column_names(table_meta):
if not table_meta or not table_meta.columns:
return []
regular_columns = list(set(table_meta.columns.keys()) -
set([key.name for key in table_meta.partition_key]) -
set([key.name for key in table_meta.clustering_key]))
return regular_columns
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key
for k in keycols:
if k.name not in colnames:
return [maybe_escape_name(k.name)]
normalcols = set(regular_column_names(layout)) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
coltype = layout.columns[curcol].cql_type
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
coltype))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ))?
;
<assignment> ::= updatecol=<cident>
(( "=" update_rhs=( <term> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )? )
| ( indexbracket="[" <term> "]" "=" <term> )
| ( udt_field_dot="." udt_field=<identifier> "=" <term> ))
;
<conditions> ::= <condition> ( "AND" <condition> )*
;
<condition_op_and_rhs> ::= (("=" | "<" | ">" | "<=" | ">=" | "!=") <term>)
| ("IN" "(" <term> ( "," <term> )* ")" )
;
<condition> ::= conditioncol=<cident>
( (( indexbracket="[" <term> "]" )
|( udt_field_dot="." udt_field=<identifier> )) )?
<condition_op_and_rhs>
;
'''
@completer_for('updateStatement', 'updateopt')
def update_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].cql_type
if coltype == 'counter':
return [maybe_escape_name(curcol)]
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % coltype)]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.columns[curcol].cql_type == 'counter' else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.columns[curcol].cql_type == 'counter':
return [Hint('<wholenumber>')]
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
return []
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].cql_type
if coltype in ('map', 'list'):
return ['[']
return []
@completer_for('assignment', 'udt_field_dot')
def update_udt_field_dot_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ["."] if _is_usertype(layout, curcol) else []
@completer_for('assignment', 'udt_field')
def assignment_udt_field_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return _usertype_fields(ctxt, cass, layout, curcol)
def _is_usertype(layout, curcol):
coltype = layout.columns[curcol].cql_type
return coltype not in simple_cql_types and coltype not in ('map', 'set', 'list')
def _usertype_fields(ctxt, cass, layout, curcol):
if not _is_usertype(layout, curcol):
return []
coltype = layout.columns[curcol].cql_type
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
user_type = cass.get_usertype_layout(ks, coltype)
return [field_name for (field_name, field_type) in user_type]
@completer_for('condition', 'indexbracket')
def condition_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('conditioncol', ''))
coltype = layout.columns[curcol].cql_type
if coltype in ('map', 'list'):
return ['[']
return []
@completer_for('condition', 'udt_field_dot')
def condition_udt_field_dot_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('conditioncol', ''))
return ["."] if _is_usertype(layout, curcol) else []
@completer_for('condition', 'udt_field')
def condition_udt_field_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('conditioncol', ''))
return _usertype_fields(ctxt, cass, layout, curcol)
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> )?
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ) )?
;
<deleteSelector> ::= delcol=<cident>
( ( "[" <term> "]" )
| ( "." <identifier> ) )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"?
( [batchstmt]=<batchStatementMember> ";"? )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" ("COLUMNFAMILY" | "TABLE")? cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "NOT" "EXISTS")? ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "NOT" "EXISTS")?
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE" "CDC"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <storageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <storageType>
"," [newcolname]=<cident> <storageType> ( "static" )?
( "," [newcolname]=<cident> <storageType> ( "static" )? )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<idxName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<createIndexStatement> ::= "CREATE" "CUSTOM"? "INDEX" ("IF" "NOT" "EXISTS")? indexname=<idxName>? "ON"
cf=<columnFamilyName> "(" (
col=<cident> |
"keys(" col=<cident> ")" |
"full(" col=<cident> ")"
) ")"
( "USING" <stringLiteral> ( "WITH" "OPTIONS" "=" <mapLiteral> )? )?
;
<createMaterializedViewStatement> ::= "CREATE" "MATERIALIZED" "VIEW" ("IF" "NOT" "EXISTS")? <materializedViewName>?
"AS" <selectStatement>
"PRIMARY" "KEY" <pkDef>
;
<createUserTypeStatement> ::= "CREATE" "TYPE" ( ks=<nonSystemKeyspaceName> dot="." )? typename=<cfOrKsName> "(" newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )*
")"
;
<createFunctionStatement> ::= "CREATE" ("OR" "REPLACE")? "FUNCTION"
("IF" "NOT" "EXISTS")?
<userFunctionName>
( "(" ( newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )* )?
")" )?
("RETURNS" "NULL" | "CALLED") "ON" "NULL" "INPUT"
"RETURNS" <storageType>
"LANGUAGE" <cident> "AS" <stringLiteral>
;
<createAggregateStatement> ::= "CREATE" ("OR" "REPLACE")? "AGGREGATE"
("IF" "NOT" "EXISTS")?
<userAggregateName>
( "("
( <storageType> ( "," <storageType> )* )?
")" )?
"SFUNC" <refUserFunctionName>
"STYPE" <storageType>
( "FINALFUNC" <refUserFunctionName> )?
( "INITCOND" <term> )?
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
explain_completion('createUserTypeStatement', 'typename', '<new_type_name>')
explain_completion('createUserTypeStatement', 'newcol', '<new_field_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
""" Return the columns for which an index doesn't exist yet. """
layout = get_table_meta(ctxt, cass)
idx_targets = [idx.index_options["target"] for idx in layout.indexes.itervalues()]
colnames = [cd.name for cd in layout.columns.values() if cd.name not in idx_targets]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ("IF" "EXISTS")? ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
;
<indexName> ::= ( ksname=<idxOrKsName> dot="." )? idxname=<idxOrKsName> ;
<idxOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<dropIndexStatement> ::= "DROP" "INDEX" ("IF" "EXISTS")? idx=<indexName>
;
<dropMaterializedViewStatement> ::= "DROP" "MATERIALIZED" "VIEW" ("IF" "EXISTS")? mv=<materializedViewName>
;
<dropUserTypeStatement> ::= "DROP" "TYPE" ut=<userTypeName>
;
<dropFunctionStatement> ::= "DROP" "FUNCTION" ( "IF" "EXISTS" )? <userFunctionName>
;
<dropAggregateStatement> ::= "DROP" "AGGREGATE" ( "IF" "EXISTS" )? <userAggregateName>
;
'''
@completer_for('indexName', 'ksname')
def idx_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('indexName', 'dot')
def idx_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('indexName', 'idxname')
def idx_ks_idx_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
idxnames = cass.get_index_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, idxnames)
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType> ("static")?
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
<alterUserTypeStatement> ::= "ALTER" "TYPE" ut=<userTypeName>
<alterTypeInstructions>
;
<alterTypeInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType>
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
cols = [str(md) for md in layout.columns]
return map(maybe_escape_name, cols)
@completer_for('alterTypeInstructions', 'existcol')
def alter_type_field_completer(ctxt, cass):
layout = get_ut_layout(ctxt, cass)
fields = [tuple[0] for tuple in layout]
return map(maybe_escape_name, fields)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
explain_completion('alterTypeInstructions', 'newcol', '<new_field_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" wat=( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <property> ( "AND" <property> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" ( "IF" "NOT" "EXISTS" )? <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" ( "IF" "EXISTS" )? <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<rolename> ::= <identifier>
| <quotedName>
| <unreservedKeyword>
;
<createRoleStatement> ::= "CREATE" "ROLE" <rolename>
( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
;
<alterRoleStatement> ::= "ALTER" "ROLE" <rolename>
( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
;
<roleProperty> ::= "PASSWORD" "=" <stringLiteral>
| "OPTIONS" "=" <mapLiteral>
| "SUPERUSER" "=" <boolean>
| "LOGIN" "=" <boolean>
;
<dropRoleStatement> ::= "DROP" "ROLE" <rolename>
;
<grantRoleStatement> ::= "GRANT" <rolename> "TO" <rolename>
;
<revokeRoleStatement> ::= "REVOKE" <rolename> "FROM" <rolename>
;
<listRolesStatement> ::= "LIST" "ROLES"
( "OF" <rolename> )? "NORECURSIVE"?
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <rolename>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <rolename>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <rolename> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
| "DESCRIBE"
| "EXECUTE"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
| <roleResource>
| <functionResource>
| <jmxResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
<roleResource> ::= ("ALL" "ROLES")
| ("ROLE" <rolename>)
;
<functionResource> ::= ( "ALL" "FUNCTIONS" ("IN KEYSPACE" <keyspaceName>)? )
| ( "FUNCTION" <functionAggregateName>
( "(" ( newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )* )?
")" )
)
;
<jmxResource> ::= ( "ALL" "MBEANS")
| ( ( "MBEAN" | "MBEANS" ) <stringLiteral> )
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][1].upper() == 'CREATE':
return [Hint('<username>')]
session = cass.session
return [maybe_quote(row.values()[0].replace("'", "''")) for row in session.execute("LIST USERS")]
@completer_for('rolename', 'role')
def rolename_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE ROLE.
if ctxt.matched[0][1].upper() == 'CREATE':
return [Hint('<rolename>')]
session = cass.session
return [maybe_quote(row[0].replace("'", "''")) for row in session.execute("LIST ROLES")]
syntax_rules += r'''
<createTriggerStatement> ::= "CREATE" "TRIGGER" ( "IF" "NOT" "EXISTS" )? <cident>
"ON" cf=<columnFamilyName> "USING" class=<stringLiteral>
;
<dropTriggerStatement> ::= "DROP" "TRIGGER" ( "IF" "EXISTS" )? triggername=<cident>
"ON" cf=<columnFamilyName>
;
'''
explain_completion('createTriggerStatement', 'class', '\'fully qualified class name\'')
def get_trigger_names(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
return cass.get_trigger_names(ks)
@completer_for('dropTriggerStatement', 'triggername')
def drop_trigger_completer(ctxt, cass):
names = get_trigger_names(ctxt, cass)
return map(maybe_escape_name, names)
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
|
mambocab/cassandra
|
pylib/cqlshlib/cql3handling.py
|
Python
|
apache-2.0
| 55,793 | 0.001667 |
# django-drf imports
from rest_framework import serializers
# app level imports
from .models import Player, Team
class PlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = (
'id', 'name', 'rating', 'teams',
'install_ts', 'update_ts'
)
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
fields = (
'id', 'name', 'rating', 'players',
'install_ts', 'update_ts'
)
|
manjitkumar/drf-url-filters
|
example_app/serializers.py
|
Python
|
mit
| 525 | 0 |
# coding=utf-8
import unittest
"""3. Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
Given a string, find the length of the **longest substring** without repeating
characters.
**Examples:**
Given `"abcabcbb"`, the answer is `"abc"`, which the length is 3.
Given `"bbbbb"`, the answer is `"b"`, with the length of 1.
Given `"pwwkew"`, the answer is `"wke"`, with the length of 3. Note that the
answer must be a **substring** , `"pwke"` is a _subsequence_ and not a
substring.
Similar Questions:
Longest Substring with At Most Two Distinct Characters (longest-substring-with-at-most-two-distinct-characters)
"""
class Solution(unittest.TestCase):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cache = {}
val, pos = 0, 0
while pos < len(s):
if s[pos] in cache:
pos = cache[s[pos]] + 1
val = max(val, len(cache))
cache.clear()
else:
cache[s[pos]] = pos
pos += 1
val = max(val, len(cache))
return val
def test(self):
self.assertEqual(self.lengthOfLongestSubstring("abcabcbb"), 3)
self.assertEqual(self.lengthOfLongestSubstring("bbbbb"), 1)
self.assertEqual(self.lengthOfLongestSubstring("pwwkew"), 3)
self.assertEqual(self.lengthOfLongestSubstring("c"), 1)
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/ac/lc003-longest-substring-without-repeating-characters.py
|
Python
|
gpl-3.0
| 1,545 | 0.000647 |
import unittest
from b.grammar import Parser
class ParserTests(unittest.TestCase):
def test_parse(self):
p = Parser()
p.parse('123 "things"')
raise NotImplementedError
|
blake-sheridan/py
|
test/test_grammar.py
|
Python
|
apache-2.0
| 200 | 0.005 |
from my.models import QueDoidura
# Opcional. Retorna quantas migracoes devem ser rodadas por task (default = 1000)
MIGRATIONS_PER_TASK = 2
# Descricao amigavel dessa alteracao no banco
DESCRIPTION = 'multiplica por 2'
def get_query():
""" Retorna um objeto query das coisas que precisam ser migradas """
return QueDoidura.query()
def migrate_one(entity):
entity.v2 = entity.v1 * 2
entity.put()
|
qmagico/gae-migrations
|
tests/my/migrations_pau_na_migration/migration_paunamigration_0001.py
|
Python
|
mit
| 414 | 0.007246 |
import datetime
import logging
from decimal import Decimal
from django.db import transaction
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font
from .models import Transaction, LineItem, Layout, PosPayment, Item, Location, TWO_PLACES
from members.models import InvoiceItem, ItemType
from pos.models import VisitorBook
stdlogger = logging.getLogger(__name__)
class Error(Exception):
"""
Base class for exceptions in this module
"""
pass
class PosServicesError(Error):
"""
Error while processing payment
"""
def __init__(self, message):
self.message = message
@transaction.atomic
def create_transaction_from_receipt(
creator_id, terminal, layout_id, receipt, total, people, attended, creation_date=None
):
"""
Create Transaction, LineItem and PosPayment records in the database
Return a description of it
"""
try:
complimentary = False
count = len(people)
dec_total = (Decimal(total) / 100).quantize(TWO_PLACES)
item_type = Layout.objects.get(pk=layout_id).item_type
if count > 0:
person_id = int(people[0]["id"])
if person_id == -1:
complimentary = True
person_id = None
else:
person_id = None
trans = Transaction(
creation_date=creation_date,
creator_id=creator_id,
person_id=person_id,
terminal=terminal,
item_type=item_type,
total=dec_total,
billed=Transaction.BilledState.UNBILLED.value,
cash=person_id == None and not complimentary,
complimentary=complimentary,
split=count > 1,
attended=attended,
)
trans.save()
for item_dict in receipt:
line_item = LineItem(
item_id=item_dict["id"],
sale_price=Decimal(item_dict["sale_price"]).quantize(TWO_PLACES),
cost_price=Decimal(item_dict["cost_price"]).quantize(TWO_PLACES),
quantity=1,
transaction=trans,
)
line_item.save()
if complimentary:
return ("Complimentary", dec_total)
if trans.cash:
return ("Cash", dec_total)
pay_total = Decimal(0)
for person in people:
pos_payment = PosPayment(
transaction=trans,
person_id=person["id"],
billed=False,
total=(Decimal(person["amount"]) / 100).quantize(TWO_PLACES),
)
pay_total += pos_payment.total
pos_payment.save()
if pay_total != dec_total:
stdlogger.error(
f"ERROR: POS Transaction total: {dec_total} unequal to Payment total: {pay_total} Id: {trans.id}"
)
return (people[0]["name"], dec_total)
except Exception as e:
raise PosServicesError("Error creating transaction")
def delete_billed_transactions(before_date):
"""
Delete transactions that have been billed and linked items and payments
"""
trans = Transaction.objects.filter(billed=Transaction.BilledState.BILLED.value, creation_date__lt=before_date)
count = trans.count()
trans.delete()
return count
def delete_billed_visitors(before_date):
"""
Delete visitor book entries that have been billed
"""
visitors = VisitorBook.objects.filter(billed=True, creation_date__lt=before_date)
count = visitors.count()
visitors.delete()
return count
def dump_items_to_excel(item_type_id):
""" https://djangotricks.blogspot.co.uk/2013/12/how-to-export-data-as-excel.html """
queryset = Item.objects.filter(item_type_id=item_type_id)
response = HttpResponse(content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response["Content-Disposition"] = "attachment; filename=Items.xlsx"
wb = Workbook()
ws = wb.active
ws.title = "Items"
row_num = 0
columns = [("Description", 40), ("Price", 10)]
for col_num in range(len(columns)):
c = ws.cell(row=row_num + 1, column=col_num + 1)
c.value = columns[col_num][0]
c.font = Font(sz=12, bold=True)
ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1]
for obj in queryset:
row_num += 1
row = [obj.description, obj.sale_price]
for col_num in range(len(row)):
c = ws.cell(row=row_num + 1, column=col_num + 1)
c.value = row[col_num]
if col_num == 1:
c.number_format = "£0.00"
wb.save(response)
return response
def dump_layout_to_excel(layout):
""" https://djangotricks.blogspot.co.uk/2013/12/how-to-export-data-as-excel.html """
array, items = build_pos_array(layout)
response = HttpResponse(content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response["Content-Disposition"] = "attachment; filename=Price list.xlsx"
wb = Workbook()
ws = wb.active
ws.title = "Price List"
widths = [10, 40, 10]
for col_num in range(len(widths)):
ws.column_dimensions[get_column_letter(col_num + 1)].width = widths[col_num]
c = ws.cell(row=1, column=2, value="Price List")
row_num = 2
for row in array:
for col_num in range(len(row)):
if col_num == 0:
if len(row[col_num]) > 2:
description = row[col_num][2]
ws.cell(row=row_num, column=1, value=description)
row_num += 1
else:
if len(row[col_num]) > 2:
item = row[col_num][2]
ws.cell(row=row_num, column=2, value=item.description)
c = ws.cell(row=row_num, column=3, value=item.sale_price)
c.number_format = "£0.00"
row_num += 1
wb.save(response)
return response
def export_pos(transactions, payments):
response = HttpResponse(content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response["Content-Disposition"] = "attachment; filename=POS data.xlsx"
wb = Workbook()
ws = wb.active
ws.title = "Transactions"
dump_transactions(ws, transactions)
ws = wb.create_sheet(title="Payments")
dump_payments(ws, payments)
wb.save(response)
return response
def dump_transactions(ws, transactions):
header = [
"Id",
"Date",
"Type",
"Person_id",
"Person",
"Total",
"Comp",
"Cash",
"Billed",
"Split",
"Attended",
"Terminal",
]
ws.append(header)
for trans in transactions:
row = [
trans.id,
trans.creation_date,
trans.item_type_id,
trans.person_id,
trans.person.fullname if trans.person_id else "",
trans.total,
trans.complimentary,
trans.cash,
trans.billed,
trans.split,
trans.attended,
trans.terminal,
]
ws.append(row)
def dump_payments(ws, payments):
header = ["Id", "Trans_id", "Person_id", "Person", "Total", "Billed"]
ws.append(header)
for p in payments:
row = [p.id, p.transaction.id, p.person_id, p.person.fullname if p.person_id else "", p.total, p.billed]
ws.append(row)
def build_pos_array(layout=None):
"""
Build an array of rows and columns
Col[0] is the description for a row
Cells will contain items
Returns the used items for managing the layout
"""
rows = []
for r in range(1, Location.ROW_MAX + 1):
cols = []
for c in range(0, Location.COL_MAX + 1):
cols.append([r, c])
rows.append(cols)
items = None
if layout: # true when managing a layout
locations = (
Location.objects.filter(layout_id=layout.id)
.order_by("row", "col")
.prefetch_related("item")
.prefetch_related("item__colour")
)
items = Item.objects.filter(item_type_id=layout.item_type_id).order_by("button_text")
for loc in locations:
if loc.col == 0:
rows[loc.row - 1][loc.col].append(loc.description)
else:
rows[loc.row - 1][loc.col].append(loc.item)
if items:
item = [item for item in items if item.button_text == loc.item.button_text]
if item:
item[0].used = True
return rows, items
|
ianastewart/cwltc-admin
|
pos/services.py
|
Python
|
mit
| 8,750 | 0.002058 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTryMasters(project, change):
masters = {
'tryserver.chromium.linux': {
'linux_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.mac': {
'mac_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.win': {
'win_chromium_rel_swarming': set(['defaulttests']),
}
}
# Changes that touch NSS files will likely need a corresponding OpenSSL edit.
# Conveniently, this one glob also matches _openssl.* changes too.
if any('nss' in f.LocalPath() for f in change.AffectedFiles()):
masters['tryserver.chromium.linux'].setdefault(
'linux_redux', set()).add('defaulttests')
return masters
|
7kbird/chrome
|
net/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 1,034 | 0.005803 |
# -*- coding: utf-8 -*-
#
# JKal-Filter documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 24 16:56:49 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
autodoc_default_flags = ['members', 'private-members','special-members', 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JKalFilter'
copyright = u'2014, jepio'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'JKal-Filterdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\usepackage{amsmath} \usepackage{amssymb}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'JKal-Filter.tex', u'JKal-Filter Documentation',
u'jepio', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jkal-filter', u'JKal-Filter Documentation',
[u'jepio'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'JKal-Filter', u'JKal-Filter Documentation',
u'jepio', 'JKal-Filter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
jepio/JKalFilter
|
docs/conf.py
|
Python
|
gpl-2.0
| 8,011 | 0.00699 |
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q, F
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE, EnrollStatusChange
from util.query import use_read_replica_if_available
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from shoppingcart.pdf import PDFInvoice
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_names = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_names.append(course.display_name)
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_names
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, course_names):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
joined_course_names = " " + ", ".join(course_names)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_CONFIRM_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
# Some of the names in the db end in white space.
recipient_name = self.user.profile.name.strip()
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient_name,
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join(course_names),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
'payment_support_phone': configuration_helpers.get_value('payment_support_phone', settings.PAYMENT_SUPPORT_PHONE),
'payment_platform_name': configuration_helpers.get_value('payment_platform_name', settings.PAYMENT_PLATFORM_NAME),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'ReceiptOrder{}.pdf'.format(str(self.id)), pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
course_names = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, course_names = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, course_names
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_queryset(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True)
def get_queryset(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_queryset()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from lms.djangoapps.instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
caesar2164/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 91,861 | 0.003103 |
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bram Cohen, Uoti Urpala and John Hoffman
# Converted to a kamaelia threadedcomponent by Ryan Lothian
from __future__ import division
from BitTorrent.platform import install_translation
install_translation()
import sys
import os
import threading
from time import time, strftime, sleep
from cStringIO import StringIO
from Axon.ThreadedComponent import threadedcomponent
from Axon.Component import component
from BitTorrent.download import Feedback, Multitorrent
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import printHelp
from BitTorrent.zurllib import urlopen
from BitTorrent.bencode import bdecode
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
from BitTorrent.prefs import Preferences
from BitTorrent import configfile
from BitTorrent import BTFailure
from BitTorrent import version
from BitTorrent import GetTorrent
class Lagger(component):
def main(self):
while 1:
yield 1
sleep(0.05)
class TorrentClient(threadedcomponent):
"""Using threadedcomponent so we don't have to worry about blocking IO or making
mainline yield periodically"""
Inboxes = { "inbox" : "Commands, e.g. shutdown",
"control" : "NOT USED",
}
Outboxes = { "outbox" : "State change information, e.g. finished",
"signal" : "NOT USED",
}
def __init__(self, torrentfilename):
super(TorrentClient, self).__init__()
self.torrentfilename = torrentfilename
self.done = False
def main(self):
print "TorrentClient.run"
"""Main loop"""
uiname = 'bittorrent-console'
defaults = get_defaults(uiname)
defaults.append(('twisted', 0,
_("Use Twisted network libraries for network connections. 1 means use twisted, 0 means do not use twisted, -1 means autodetect, and prefer twisted")))
metainfo = None
config, args = configfile.parse_configuration_and_args(defaults, uiname)
try:
metainfo, errors = GetTorrent.get( self.torrentfilename )
if errors:
raise BTFailure(_("Error reading .torrent file: ") + '\n'.join(errors))
else:
self.dl = DLKamaelia(metainfo, config, self)
self.dl.run()
except BTFailure, e:
print str(e)
sys.exit(1)
self.outqueues["outbox"].put("exited")
def checkInboxes(self):
while not self.inqueues["inbox"].empty():
command = self.inqueues["inbox"].get()
if command == "shutdown":
self.dl.multitorrent.rawserver.doneflag.set()
def finished(self):
"""Called by DL class when the download has completed successfully"""
self.done = True
self.send("complete", "outbox")
print "BitTorrent debug: finished"
def error(self, errormsg):
"""Called by DL if an error occurs"""
print strftime('[%H:%M:%S] ') + errormsg
self.send("failed", "outbox")
def display(self, statistics):
"""Called by DL to display status updates"""
# Forward on to next component
self.send(statistics, "outbox")
def set_torrent_values(self, name, path, size, numpieces):
self.file = name
self.downloadTo = path
self.fileSize = size
self.numpieces = numpieces
class DLKamaelia(Feedback):
"""This class accepts feedback from the multitorrent downloader class
which it can then pass back to the inboxes of TorrentClient"""
def __init__(self, metainfo, config, interface):
self.doneflag = threading.Event()
self.metainfo = metainfo
self.config = Preferences().initWithDict(config)
self.d = interface
def run(self):
try:
self.multitorrent = Multitorrent(self.config, self.doneflag,
self.global_error)
# raises BTFailure if bad
metainfo = ConvertedMetainfo(bdecode(self.metainfo))
torrent_name = metainfo.name_fs
if self.config['save_as']:
if self.config['save_in']:
raise BTFailure(_("You cannot specify both --save_as and "
"--save_in"))
saveas = self.config['save_as']
elif self.config['save_in']:
saveas = os.path.join(self.config['save_in'], torrent_name)
else:
saveas = torrent_name
self.d.set_torrent_values(metainfo.name, os.path.abspath(saveas),
metainfo.total_bytes, len(metainfo.hashes))
self.torrent = self.multitorrent.start_torrent(metainfo,
Preferences(self.config), self, saveas)
except BTFailure, e:
print str(e)
return
self.get_status()
#self.multitorrent.rawserver.install_sigint_handler() - can only be done on the main thread so does not work with Kamaelia
self.multitorrent.rawserver.listen_forever( self.d )
self.d.display({'activity':_("shutting down"), 'fractionDone':0})
self.torrent.shutdown()
print "BitTorrent Debug: shutting down"
def reread_config(self):
try:
newvalues = configfile.get_config(self.config, 'bittorrent-console')
except Exception, e:
self.d.error(_("Error reading config: ") + str(e))
return
self.config.update(newvalues)
# The set_option call can potentially trigger something that kills
# the torrent (when writing this the only possibility is a change in
# max_files_open causing an IOError while closing files), and so
# the self.failed() callback can run during this loop.
for option, value in newvalues.iteritems():
self.multitorrent.set_option(option, value)
for option, value in newvalues.iteritems():
self.torrent.set_option(option, value)
def get_status(self):
self.multitorrent.rawserver.add_task(self.get_status,
self.config['display_interval'])
status = self.torrent.get_status(self.config['spew'])
self.d.display(status)
def global_error(self, level, text):
self.d.error(text)
def error(self, torrent, level, text):
self.d.error(text)
def failed(self, torrent, is_external):
self.doneflag.set()
def finished(self, torrent):
self.d.finished()
if __name__ == '__main__':
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
# download a linux distro
pipeline(
ConsoleReader(">>> ", ""),
TorrentClient("http://www.tlm-project.org/public/distributions/damnsmall/current/dsl-2.4.iso.torrent"),
ConsoleEchoer(),
).run()
|
sparkslabs/kamaelia
|
Sketches/RJL/bittorrent/BitTorrent/bittorrent-console.py
|
Python
|
apache-2.0
| 7,540 | 0.00809 |
from ml_buff.database import session_scope
from ml_buff.models import feature, feature_value, input_data, base_feature_record
from ml_buff.helpers.feature_value_helper import FeatureValueHelper
class TestFeature1(base_feature_record.BaseFeatureRecord):
def calculate(self, input_data):
return [1]
class TestFeature2(base_feature_record.BaseFeatureRecord):
def calculate(self, input_data):
return [2]
class TestFeatureCalculate(base_feature_record.BaseFeatureRecord):
def calculate(self, input_data):
return self._input_data_values
def test_createAll():
test_input_data = (input_data.InputData(1, 'createAll'), input_data.InputData(2, 'createAll'))
with session_scope() as session:
for test_input_datum in test_input_data:
session.add(test_input_datum)
with session_scope() as session:
test_input_data = session.query(input_data.InputData).filter(input_data.InputData.dataset_name == 'createAll').all()
for test_input_datum in test_input_data:
session.expunge(test_input_datum)
input_data_list = {}
for test_input_datum in test_input_data:
input_data_list[test_input_datum.id] = [1, 2, 3]
FeatureValueHelper.createAll(input_data_list)
for test_input_datum in test_input_data:
value1 = TestFeature1().getValue(test_input_datum)
value2 = TestFeature2().getValue(test_input_datum)
assert value1.value == [1]
assert value2.value == [2]
def test_forceUpdateForInput():
test_input_data = (input_data.InputData(1, 'createAll'), input_data.InputData(2, 'createAll'))
with session_scope() as session:
for test_input_datum in test_input_data:
session.add(test_input_datum)
with session_scope() as session:
test_input_data = session.query(input_data.InputData).filter(input_data.InputData.dataset_name == 'createAll').all()
for test_input_datum in test_input_data:
session.expunge(test_input_datum)
input_data_list = {}
for test_input_datum in test_input_data:
input_data_list[test_input_datum.id] = [1, 2, 3]
FeatureValueHelper.createAll(input_data_list)
for test_input_datum in test_input_data:
value = TestFeatureCalculate().getValue(test_input_datum)
assert value.value == [1,2,3]
FeatureValueHelper.forceUpdateForInput(test_input_data[0].id, [1])
value = TestFeatureCalculate().getValue(test_input_data[0])
assert value.value == [1]
|
tinenbruno/ml-buff
|
tests/helpers/feature_value_helper_test.py
|
Python
|
mit
| 2,534 | 0.007103 |
"""
QUESTION:
You want to build a house on an empty land which reaches all buildings in the shortest amount of distance. You are
given a 2D grid of values 0, 1 or 2, where:
Each 0 marks an empty land which you can pass by freely.
Each 1 marks a building which you cannot pass through.
Each 2 marks an obstacle which you cannot pass through.
The distance is calculated using Manhattan Distance, where distance(p1, p2) = |p2.x - p1.x| + |p2.y - p1.y|.
For example, given three buildings at (0,0), (0,4), (2,2), and an obstacle at (0,2):
1 - 0 - 2 - 0 - 1
| | | | |
0 - 0 - 0 - 0 - 0
| | | | |
0 - 0 - 1 - 0 - 0
The point (1,2) is an ideal empty land to build a house, as the total travel distance of 3+3+1=7 is minimal So return 7
Note:
There will be at least one building. If it is not possible to build such house according to the above rules, return -1.
Hide Company Tags Google Zenefits
Hide Tags Breadth-first Search
Hide Similar Problems (M) Walls and Gates (H) Best Meeting Point
ANSWER:
BFS
"""
class Solution(object):
def shortestDistance(self,grid):
num_building = 0
m, n = len(grid), len(grid[0])
times = [[0]*n for _ in xrange(m)]
dis = [[0]*n for _ in xrange(m)]
num = 0
ans = float('inf')
for i in xrange(m):
for j in xrange(n):
if grid[i][j] != 1:
continue
num += 1
queue = [(i,j,0)]
visited = set([])
while queue:
x,y,d = queue.pop(0)
visited.add((x,y))
for dz in zip([1,0,-1,0],[0,1,0,-1]):
nx, ny = x + dz[0], y+dz[1]
if 0<=nx <m and 0<=ny<n and times[nx][ny] == num-1 and grid[nx][ny] == 0:
queue.append((nx,ny,d+1))
dis[nx][ny] += d+1
times[nx][ny] += 1
for i in xrange(m):
for j in xrange(n):
if times[i][j] == num:
ans = min(ans,dis[i][j])
return ans if ans != float('inf') else -1
if __name__ == '__main__':
print Solution().shortestDistance([[1,0,2,0,1],[0,0,0,0,0],[0,0,1,0,0]])
print Solution().shortestDistance([[1,1],[0,1]])
|
tktrungna/leetcode
|
Python/shortest-distance-from-all-buildings.py
|
Python
|
mit
| 2,335 | 0.0197 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without
# modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,
# this list of conditions and the following disclaimer in the
# documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
sros_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'provider': dict(type='dict')
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in sros_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
def get_config(module, flags=[]):
cmd = 'admin display-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=err)
cfg = str(out).strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=err, rc=rc)
responses.append(out)
return responses
def load_config(module, commands):
for command in to_list(commands):
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=err, command=command, rc=rc)
exec_command(module, 'exit all')
def rollback_enabled(self):
if self._rollback_enabled is not None:
return self._rollback_enabled
resp = self.execute(['show system rollback'])
match = re.search(r'^Rollback Location\s+:\s(\S+)', resp[0], re.M)
self._rollback_enabled = match.group(1) != 'None'
return self._rollback_enabled
def load_config_w_rollback(self, commands):
if self.rollback_enabled:
self.execute(['admin rollback save'])
try:
self.configure(commands)
except NetworkError:
if self.rollback_enabled:
self.execute(['admin rollback revert latest-rb',
'admin rollback delete latest-rb'])
raise
if self.rollback_enabled:
self.execute(['admin rollback delete latest-rb'])
|
bjolivot/ansible
|
lib/ansible/module_utils/sros.py
|
Python
|
gpl-3.0
| 4,609 | 0.004339 |
# USAGE
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(1)
time.sleep(0.25)
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
|
SahSih/ARStreaming360Display
|
RealTimeVideoStitch/motion_detector.py
|
Python
|
mit
| 2,815 | 0.019893 |
# $Id: 150_srtp_1_1.py 369517 2012-07-01 17:28:57Z file $
#
from inc_cfg import *
test_param = TestParam(
"Callee=optional SRTP, caller=optional SRTP",
[
InstanceParam("callee", "--null-audio --use-srtp=1 --srtp-secure=0 --max-calls=1"),
InstanceParam("caller", "--null-audio --use-srtp=1 --srtp-secure=0 --max-calls=1")
]
)
|
fluentstream/asterisk-p2p
|
res/pjproject/tests/pjsua/scripts-call/150_srtp_1_1.py
|
Python
|
gpl-2.0
| 340 | 0.023529 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('corpus', '0004_auto_20140923_1501'),
]
operations = [
migrations.RenameField(
model_name='labeledrelationevidence',
old_name='date',
new_name='modification_date',
),
]
|
mrshu/iepy
|
iepy/webui/corpus/migrations/0005_auto_20140923_1502.py
|
Python
|
bsd-3-clause
| 412 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.