text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Settings of ``critica.apps.notes`` application.
"""
from critica.apps.notes import choices
# Excluded categories
# ------------------------------------------------------------------------------
EXCLUDED_CATEGORIES = [
'epicurien',
'voyages',
'regions',
'coup-de-gueule',
]
# Type order
# ------------------------------------------------------------------------------
TYPE_ORDER = [
'vous-saviez',
'fallait-sen-douter',
'ca-cest-fait',
'linfo-off',
'criticons',
'aucun-interet',
'premiere-nouvelle',
'on-sen-serait-passe',
'on-en-rirait-presque',
'ils-ont-ose',
]
|
brunobord/critica
|
apps/notes/settings.py
|
Python
|
gpl-3.0
| 652 | 0.001534 |
#! /usr/bin/env python3
# This was forked from https://github.com/rustyrussell/lightning-payencode/tree/acc16ec13a3fa1dc16c07af6ec67c261bd8aff23
import re
import time
from hashlib import sha256
from binascii import hexlify
from decimal import Decimal
from typing import Optional, TYPE_CHECKING, Type
import random
import bitstring
from .bitcoin import hash160_to_b58_address, b58_address_to_hash160, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from .segwit_addr import bech32_encode, bech32_decode, CHARSET
from . import segwit_addr
from . import constants
from .constants import AbstractNet
from . import ecc
from .bitcoin import COIN
if TYPE_CHECKING:
from .lnutil import LnFeatures
class LnInvoiceException(Exception): pass
class LnDecodeException(LnInvoiceException): pass
class LnEncodeException(LnInvoiceException): pass
# BOLT #11:
#
# A writer MUST encode `amount` as a positive decimal integer with no
# leading zeroes, SHOULD use the shortest representation possible.
def shorten_amount(amount):
""" Given an amount in bitcoin, shorten it
"""
# Convert to pico initially
amount = int(amount * 10**12)
units = ['p', 'n', 'u', 'm']
for unit in units:
if amount % 1000 == 0:
amount //= 1000
else:
break
else:
unit = ''
return str(amount) + unit
def unshorten_amount(amount) -> Decimal:
""" Given a shortened amount, convert it into a decimal
"""
# BOLT #11:
# The following `multiplier` letters are defined:
#
#* `m` (milli): multiply by 0.001
#* `u` (micro): multiply by 0.000001
#* `n` (nano): multiply by 0.000000001
#* `p` (pico): multiply by 0.000000000001
units = {
'p': 10**12,
'n': 10**9,
'u': 10**6,
'm': 10**3,
}
unit = str(amount)[-1]
# BOLT #11:
# A reader SHOULD fail if `amount` contains a non-digit, or is followed by
# anything except a `multiplier` in the table above.
if not re.fullmatch("\\d+[pnum]?", str(amount)):
raise LnDecodeException("Invalid amount '{}'".format(amount))
if unit in units.keys():
return Decimal(amount[:-1]) / units[unit]
else:
return Decimal(amount)
_INT_TO_BINSTR = {a: '0' * (5-len(bin(a)[2:])) + bin(a)[2:] for a in range(32)}
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
b = ''.join(_INT_TO_BINSTR[a] for a in arr)
return bitstring.BitArray(bin=b)
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
def encode_fallback(fallback: str, net: Type[AbstractNet]):
""" Encode all supported fallback addresses.
"""
wver, wprog_ints = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback)
if wver is not None:
wprog = bytes(wprog_ints)
else:
addrtype, addr = b58_address_to_hash160(fallback)
if addrtype == net.ADDRTYPE_P2PKH:
wver = 17
elif addrtype == net.ADDRTYPE_P2SH:
wver = 18
else:
raise LnEncodeException(f"Unknown address type {addrtype} for {net}")
wprog = addr
return tagged('f', bitstring.pack("uint:5", wver) + wprog)
def parse_fallback(fallback, net: Type[AbstractNet]):
wver = fallback[0:5].uint
if wver == 17:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2PKH)
elif wver == 18:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2SH)
elif wver <= 16:
witprog = fallback[5:] # cut witver
witprog = witprog[:len(witprog) // 8 * 8] # can only be full bytes
witprog = witprog.tobytes()
addr = segwit_addr.encode_segwit_address(net.SEGWIT_HRP, wver, witprog)
else:
return None
return addr
BOLT11_HRP_INV_DICT = {net.BOLT11_HRP: net for net in constants.NETS_LIST}
# Tagged field containing BitArray
def tagged(char, l):
# Tagged fields need to be zero-padded to 5 bits.
while l.len % 5 != 0:
l.append('0b0')
return bitstring.pack("uint:5, uint:5, uint:5",
CHARSET.find(char),
(l.len / 5) / 32, (l.len / 5) % 32) + l
# Tagged field containing bytes
def tagged_bytes(char, l):
return tagged(char, bitstring.BitArray(l))
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
# Discard trailing bits, convert to bytes.
def trim_to_bytes(barr):
# Adds a byte if necessary.
b = barr.tobytes()
if barr.len % 8 != 0:
return b[:-1]
return b
# Try to pull out tagged data: returns tag, tagged data and remainder.
def pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def lnencode(addr: 'LnAddr', privkey) -> str:
if addr.amount:
amount = addr.net.BOLT11_HRP + shorten_amount(addr.amount)
else:
amount = addr.net.BOLT11_HRP if addr.net else ''
hrp = 'ln' + amount
# Start with the timestamp
data = bitstring.pack('uint:35', addr.date)
tags_set = set()
# Payment hash
data += tagged_bytes('p', addr.paymenthash)
tags_set.add('p')
if addr.payment_secret is not None:
data += tagged_bytes('s', addr.payment_secret)
tags_set.add('s')
for k, v in addr.tags:
# BOLT #11:
#
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
if k in ('d', 'h', 'n', 'x', 'p', 's'):
if k in tags_set:
raise LnEncodeException("Duplicate '{}' tag".format(k))
if k == 'r':
route = bitstring.BitArray()
for step in v:
pubkey, channel, feebase, feerate, cltv = step
route.append(bitstring.BitArray(pubkey) + bitstring.BitArray(channel) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv))
data += tagged('r', route)
elif k == 't':
pubkey, feebase, feerate, cltv = v
route = bitstring.BitArray(pubkey) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv)
data += tagged('t', route)
elif k == 'f':
data += encode_fallback(v, addr.net)
elif k == 'd':
# truncate to max length: 1024*5 bits = 639 bytes
data += tagged_bytes('d', v.encode()[0:639])
elif k == 'x':
expirybits = bitstring.pack('intbe:64', v)
expirybits = trim_to_min_length(expirybits)
data += tagged('x', expirybits)
elif k == 'h':
data += tagged_bytes('h', sha256(v.encode('utf-8')).digest())
elif k == 'n':
data += tagged_bytes('n', v)
elif k == 'c':
finalcltvbits = bitstring.pack('intbe:64', v)
finalcltvbits = trim_to_min_length(finalcltvbits)
data += tagged('c', finalcltvbits)
elif k == '9':
if v == 0:
continue
feature_bits = bitstring.BitArray(uint=v, length=v.bit_length())
feature_bits = trim_to_min_length(feature_bits)
data += tagged('9', feature_bits)
else:
# FIXME: Support unknown tags?
raise LnEncodeException("Unknown tag {}".format(k))
tags_set.add(k)
# BOLT #11:
#
# A writer MUST include either a `d` or `h` field, and MUST NOT include
# both.
if 'd' in tags_set and 'h' in tags_set:
raise ValueError("Cannot include both 'd' and 'h'")
if not 'd' in tags_set and not 'h' in tags_set:
raise ValueError("Must include either 'd' or 'h'")
# We actually sign the hrp, then data (padded to 8 bits with zeroes).
msg = hrp.encode("ascii") + data.tobytes()
privkey = ecc.ECPrivkey(privkey)
sig = privkey.sign_message(msg, is_compressed=False, algo=lambda x:sha256(x).digest())
recovery_flag = bytes([sig[0] - 27])
sig = bytes(sig[1:]) + recovery_flag
data += sig
return bech32_encode(segwit_addr.Encoding.BECH32, hrp, bitarray_to_u5(data))
class LnAddr(object):
def __init__(self, *, paymenthash: bytes = None, amount=None, net: Type[AbstractNet] = None, tags=None, date=None,
payment_secret: bytes = None):
self.date = int(time.time()) if not date else int(date)
self.tags = [] if not tags else tags
self.unknown_tags = []
self.paymenthash = paymenthash
self.payment_secret = payment_secret
self.signature = None
self.pubkey = None
self.net = constants.net if net is None else net # type: Type[AbstractNet]
self._amount = amount # type: Optional[Decimal] # in bitcoins
self._min_final_cltv_expiry = 18
@property
def amount(self) -> Optional[Decimal]:
return self._amount
@amount.setter
def amount(self, value):
if not (isinstance(value, Decimal) or value is None):
raise LnInvoiceException(f"amount must be Decimal or None, not {value!r}")
if value is None:
self._amount = None
return
assert isinstance(value, Decimal)
if value.is_nan() or not (0 <= value <= TOTAL_COIN_SUPPLY_LIMIT_IN_BTC):
raise LnInvoiceException(f"amount is out-of-bounds: {value!r} BTC")
if value * 10**12 % 10:
# max resolution is millisatoshi
raise LnInvoiceException(f"Cannot encode {value!r}: too many decimal places")
self._amount = value
def get_amount_sat(self) -> Optional[Decimal]:
# note that this has msat resolution potentially
if self.amount is None:
return None
return self.amount * COIN
def get_routing_info(self, tag):
# note: tag will be 't' for trampoline
r_tags = list(filter(lambda x: x[0] == tag, self.tags))
# strip the tag type, it's implicitly 'r' now
r_tags = list(map(lambda x: x[1], r_tags))
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random.shuffle(r_tags)
return r_tags
def get_amount_msat(self) -> Optional[int]:
if self.amount is None:
return None
return int(self.amount * COIN * 1000)
def get_features(self) -> 'LnFeatures':
from .lnutil import LnFeatures
return LnFeatures(self.get_tag('9') or 0)
def __str__(self):
return "LnAddr[{}, amount={}{} tags=[{}]]".format(
hexlify(self.pubkey.serialize()).decode('utf-8') if self.pubkey else None,
self.amount, self.net.BOLT11_HRP,
", ".join([k + '=' + str(v) for k, v in self.tags])
)
def get_min_final_cltv_expiry(self) -> int:
return self._min_final_cltv_expiry
def get_tag(self, tag):
for k, v in self.tags:
if k == tag:
return v
return None
def get_description(self) -> str:
return self.get_tag('d') or ''
def get_expiry(self) -> int:
exp = self.get_tag('x')
if exp is None:
exp = 3600
return int(exp)
def is_expired(self) -> bool:
now = time.time()
# BOLT-11 does not specify what expiration of '0' means.
# we treat it as 0 seconds here (instead of never)
return now > self.get_expiry() + self.date
class SerializableKey:
def __init__(self, pubkey):
self.pubkey = pubkey
def serialize(self):
return self.pubkey.get_public_key_bytes(True)
def lndecode(invoice: str, *, verbose=False, net=None) -> LnAddr:
if net is None:
net = constants.net
decoded_bech32 = bech32_decode(invoice, ignore_long_length=True)
hrp = decoded_bech32.hrp
data = decoded_bech32.data
if decoded_bech32.encoding is None:
raise LnDecodeException("Bad bech32 checksum")
if decoded_bech32.encoding != segwit_addr.Encoding.BECH32:
raise LnDecodeException("Bad bech32 encoding: must be using vanilla BECH32")
# BOLT #11:
#
# A reader MUST fail if it does not understand the `prefix`.
if not hrp.startswith('ln'):
raise LnDecodeException("Does not start with ln")
if not hrp[2:].startswith(net.BOLT11_HRP):
raise LnDecodeException(f"Wrong Lightning invoice HRP {hrp[2:]}, should be {net.BOLT11_HRP}")
data = u5_to_bitarray(data)
# Final signature 65 bytes, split it off.
if len(data) < 65*8:
raise LnDecodeException("Too short to contain signature")
sigdecoded = data[-65*8:].tobytes()
data = bitstring.ConstBitStream(data[:-65*8])
addr = LnAddr()
addr.pubkey = None
m = re.search("[^\\d]+", hrp[2:])
if m:
addr.net = BOLT11_HRP_INV_DICT[m.group(0)]
amountstr = hrp[2+m.end():]
# BOLT #11:
#
# A reader SHOULD indicate if amount is unspecified, otherwise it MUST
# multiply `amount` by the `multiplier` value (if any) to derive the
# amount required for payment.
if amountstr != '':
addr.amount = unshorten_amount(amountstr)
addr.date = data.read(35).uint
while data.pos != data.len:
tag, tagdata, data = pull_tagged(data)
# BOLT #11:
#
# A reader MUST skip over unknown fields, an `f` field with unknown
# `version`, or a `p`, `h`, or `n` field which does not have
# `data_length` 52, 52, or 53 respectively.
data_length = len(tagdata) / 5
if tag == 'r':
# BOLT #11:
#
# * `r` (3): `data_length` variable. One or more entries
# containing extra routing information for a private route;
# there may be more than one `r` field, too.
# * `pubkey` (264 bits)
# * `short_channel_id` (64 bits)
# * `feebase` (32 bits, big-endian)
# * `feerate` (32 bits, big-endian)
# * `cltv_expiry_delta` (16 bits, big-endian)
route=[]
s = bitstring.ConstBitStream(tagdata)
while s.pos + 264 + 64 + 32 + 32 + 16 < s.len:
route.append((s.read(264).tobytes(),
s.read(64).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe))
addr.tags.append(('r',route))
elif tag == 't':
s = bitstring.ConstBitStream(tagdata)
e = (s.read(264).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe)
addr.tags.append(('t', e))
elif tag == 'f':
fallback = parse_fallback(tagdata, addr.net)
if fallback:
addr.tags.append(('f', fallback))
else:
# Incorrect version.
addr.unknown_tags.append((tag, tagdata))
continue
elif tag == 'd':
addr.tags.append(('d', trim_to_bytes(tagdata).decode('utf-8')))
elif tag == 'h':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.tags.append(('h', trim_to_bytes(tagdata)))
elif tag == 'x':
addr.tags.append(('x', tagdata.uint))
elif tag == 'p':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.paymenthash = trim_to_bytes(tagdata)
elif tag == 's':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.payment_secret = trim_to_bytes(tagdata)
elif tag == 'n':
if data_length != 53:
addr.unknown_tags.append((tag, tagdata))
continue
pubkeybytes = trim_to_bytes(tagdata)
addr.pubkey = pubkeybytes
elif tag == 'c':
addr._min_final_cltv_expiry = tagdata.uint
elif tag == '9':
features = tagdata.uint
addr.tags.append(('9', features))
from .lnutil import validate_features
validate_features(features)
else:
addr.unknown_tags.append((tag, tagdata))
if verbose:
print('hex of signature data (32 byte r, 32 byte s): {}'
.format(hexlify(sigdecoded[0:64])))
print('recovery flag: {}'.format(sigdecoded[64]))
print('hex of data for signing: {}'
.format(hexlify(hrp.encode("ascii") + data.tobytes())))
print('SHA256 of above: {}'.format(sha256(hrp.encode("ascii") + data.tobytes()).hexdigest()))
# BOLT #11:
#
# A reader MUST check that the `signature` is valid (see the `n` tagged
# field specified below).
addr.signature = sigdecoded[:65]
hrp_hash = sha256(hrp.encode("ascii") + data.tobytes()).digest()
if addr.pubkey: # Specified by `n`
# BOLT #11:
#
# A reader MUST use the `n` field to validate the signature instead of
# performing signature recovery if a valid `n` field is provided.
ecc.ECPubkey(addr.pubkey).verify_message_hash(sigdecoded[:64], hrp_hash)
pubkey_copy = addr.pubkey
class WrappedBytesKey:
serialize = lambda: pubkey_copy
addr.pubkey = WrappedBytesKey
else: # Recover pubkey from signature.
addr.pubkey = SerializableKey(ecc.ECPubkey.from_sig_string(sigdecoded[:64], sigdecoded[64], hrp_hash))
return addr
|
wakiyamap/electrum-mona
|
electrum_mona/lnaddr.py
|
Python
|
mit
| 18,219 | 0.002744 |
# -*- coding: utf8 -*-
from injector import singleton, inject
try:
import configparser
except ImportError:
import configparser as configparser
from sht1x.Sht1x import Sht1x as SHT1x
@singleton
class Sensor:
name = "SHT1x"
@inject
def __init__(self, config_parser: configparser.ConfigParser):
data_pin = int(config_parser.get('sht1x_sensor', 'data_pin'))
sck_pin = int(config_parser.get('sht1x_sensor', 'sck_pin'))
self.sht1x = SHT1x(dataPin=data_pin, sckPin=sck_pin, gpioMode=SHT1x.GPIO_BCM)
def measure(self, data_builder):
(temperature, humidity) = self.sht1x.read_temperature_C_and_humidity()
if temperature > -40.0:
try:
dew_point = self.sht1x.calculate_dew_point(temperature, humidity)
dew_point = round(dew_point, 2)
except ValueError:
dew_point = None
temperature = round(temperature, 2)
humidity = round(humidity, 2)
else:
temperature = None
humidity = None
dew_point = None
if temperature and humidity and dew_point and -30 < temperature < 80 and 5 < humidity <= 100:
data_builder.add(self.name, "temperature", "°C", temperature)
if dew_point:
data_builder.add(self.name, "dew point", "°C", dew_point, True)
data_builder.add(self.name, "relative humidity", "%", humidity)
|
wuan/klimalogger
|
klimalogger/sensor/sht1x_sensor.py
|
Python
|
apache-2.0
| 1,458 | 0.00206 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.availability_sets_operations import AvailabilitySetsOperations
from .operations.virtual_machine_extension_images_operations import VirtualMachineExtensionImagesOperations
from .operations.virtual_machine_extensions_operations import VirtualMachineExtensionsOperations
from .operations.virtual_machine_images_operations import VirtualMachineImagesOperations
from .operations.usage_operations import UsageOperations
from .operations.virtual_machine_sizes_operations import VirtualMachineSizesOperations
from .operations.images_operations import ImagesOperations
from .operations.virtual_machines_operations import VirtualMachinesOperations
from .operations.virtual_machine_scale_sets_operations import VirtualMachineScaleSetsOperations
from .operations.virtual_machine_scale_set_vms_operations import VirtualMachineScaleSetVMsOperations
from .operations.container_services_operations import ContainerServicesOperations
from .operations.disks_operations import DisksOperations
from .operations.snapshots_operations import SnapshotsOperations
from . import models
class ComputeManagementClientConfiguration(AzureConfiguration):
"""Configuration for ComputeManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(ComputeManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('computemanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ComputeManagementClient(object):
"""Composite Swagger for Compute Client
:ivar config: Configuration for client.
:vartype config: ComputeManagementClientConfiguration
:ivar availability_sets: AvailabilitySets operations
:vartype availability_sets: .operations.AvailabilitySetsOperations
:ivar virtual_machine_extension_images: VirtualMachineExtensionImages operations
:vartype virtual_machine_extension_images: .operations.VirtualMachineExtensionImagesOperations
:ivar virtual_machine_extensions: VirtualMachineExtensions operations
:vartype virtual_machine_extensions: .operations.VirtualMachineExtensionsOperations
:ivar virtual_machine_images: VirtualMachineImages operations
:vartype virtual_machine_images: .operations.VirtualMachineImagesOperations
:ivar usage: Usage operations
:vartype usage: .operations.UsageOperations
:ivar virtual_machine_sizes: VirtualMachineSizes operations
:vartype virtual_machine_sizes: .operations.VirtualMachineSizesOperations
:ivar images: Images operations
:vartype images: .operations.ImagesOperations
:ivar virtual_machines: VirtualMachines operations
:vartype virtual_machines: .operations.VirtualMachinesOperations
:ivar virtual_machine_scale_sets: VirtualMachineScaleSets operations
:vartype virtual_machine_scale_sets: .operations.VirtualMachineScaleSetsOperations
:ivar virtual_machine_scale_set_vms: VirtualMachineScaleSetVMs operations
:vartype virtual_machine_scale_set_vms: .operations.VirtualMachineScaleSetVMsOperations
:ivar container_services: ContainerServices operations
:vartype container_services: .operations.ContainerServicesOperations
:ivar disks: Disks operations
:vartype disks: .operations.DisksOperations
:ivar snapshots: Snapshots operations
:vartype snapshots: .operations.SnapshotsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ComputeManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.availability_sets = AvailabilitySetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.images = ImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.container_services = ContainerServicesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self.config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self.config, self._serialize, self._deserialize)
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/compute_management_client.py
|
Python
|
mit
| 7,735 | 0.002069 |
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
import sys
import timeit
import numpy
import numexpr
array_size = 1000*1000
iterations = 10
numpy_ttime = []
numpy_sttime = []
numpy_nttime = []
numexpr_ttime = []
numexpr_sttime = []
numexpr_nttime = []
def compare_times(expr, nexpr):
global numpy_ttime
global numpy_sttime
global numpy_nttime
global numexpr_ttime
global numexpr_sttime
global numexpr_nttime
print "******************* Expression:", expr
setup_contiguous = setupNP_contiguous
setup_strided = setupNP_strided
setup_unaligned = setupNP_unaligned
numpy_timer = timeit.Timer(expr, setup_contiguous)
numpy_time = round(numpy_timer.timeit(number=iterations), 4)
numpy_ttime.append(numpy_time)
print '%30s %.4f'%('numpy:', numpy_time / iterations)
numpy_timer = timeit.Timer(expr, setup_strided)
numpy_stime = round(numpy_timer.timeit(number=iterations), 4)
numpy_sttime.append(numpy_stime)
print '%30s %.4f'%('numpy strided:', numpy_stime / iterations)
numpy_timer = timeit.Timer(expr, setup_unaligned)
numpy_ntime = round(numpy_timer.timeit(number=iterations), 4)
numpy_nttime.append(numpy_ntime)
print '%30s %.4f'%('numpy unaligned:', numpy_ntime / iterations)
evalexpr = 'evaluate("%s", optimization="aggressive")' % expr
numexpr_timer = timeit.Timer(evalexpr, setup_contiguous)
numexpr_time = round(numexpr_timer.timeit(number=iterations), 4)
numexpr_ttime.append(numexpr_time)
print '%30s %.4f'%("numexpr:", numexpr_time/iterations,),
print "Speed-up of numexpr over numpy:", round(numpy_time/numexpr_time, 4)
evalexpr = 'evaluate("%s", optimization="aggressive")' % expr
numexpr_timer = timeit.Timer(evalexpr, setup_strided)
numexpr_stime = round(numexpr_timer.timeit(number=iterations), 4)
numexpr_sttime.append(numexpr_stime)
print '%30s %.4f'%("numexpr strided:", numexpr_stime/iterations,),
print "Speed-up of numexpr over numpy:", \
round(numpy_stime/numexpr_stime, 4)
evalexpr = 'evaluate("%s", optimization="aggressive")' % expr
numexpr_timer = timeit.Timer(evalexpr, setup_unaligned)
numexpr_ntime = round(numexpr_timer.timeit(number=iterations), 4)
numexpr_nttime.append(numexpr_ntime)
print '%30s %.4f'%("numexpr unaligned:", numexpr_ntime/iterations,),
print "Speed-up of numexpr over numpy:", \
round(numpy_ntime/numexpr_ntime, 4)
print
setupNP = """\
from numpy import arange, linspace, arctan2, sqrt, sin, cos, exp, log
from numpy import rec as records
#from numexpr import evaluate
from numexpr import %s
# Initialize a recarray of 16 MB in size
r=records.array(None, formats='a%s,i4,f4,f8', shape=%s)
c1 = r.field('f0')%s
i2 = r.field('f1')%s
f3 = r.field('f2')%s
f4 = r.field('f3')%s
c1[:] = "a"
i2[:] = arange(%s)/1000
f3[:] = linspace(0,1,len(i2))
f4[:] = f3*1.23
"""
eval_method = "evaluate"
setupNP_contiguous = setupNP % ((eval_method, 4, array_size,) + \
(".copy()",)*4 + \
(array_size,))
setupNP_strided = setupNP % (eval_method, 4, array_size,
"", "", "", "", array_size)
setupNP_unaligned = setupNP % (eval_method, 1, array_size,
"", "", "", "", array_size)
expressions = []
expressions.append('i2 > 0')
expressions.append('f3+f4')
expressions.append('f3+i2')
expressions.append('exp(f3)')
expressions.append('log(exp(f3)+1)/f4')
expressions.append('0.1*i2 > arctan2(f3, f4)')
expressions.append('sqrt(f3**2 + f4**2) > 1')
expressions.append('sin(f3)>cos(f4)')
expressions.append('f3**f4')
def compare(expression=False):
if expression:
compare_times(expression, 1)
sys.exit(0)
nexpr = 0
for expr in expressions:
nexpr += 1
compare_times(expr, nexpr)
print
if __name__ == '__main__':
import numexpr
numexpr.print_versions()
numpy.seterr(all='ignore')
numexpr.set_vml_accuracy_mode('low')
numexpr.set_vml_num_threads(2)
if len(sys.argv) > 1:
expression = sys.argv[1]
print "expression-->", expression
compare(expression)
else:
compare()
tratios = numpy.array(numpy_ttime) / numpy.array(numexpr_ttime)
stratios = numpy.array(numpy_sttime) / numpy.array(numexpr_sttime)
ntratios = numpy.array(numpy_nttime) / numpy.array(numexpr_nttime)
print "eval method: %s" % eval_method
print "*************** Numexpr vs NumPy speed-ups *******************"
# print "numpy total:", sum(numpy_ttime)/iterations
# print "numpy strided total:", sum(numpy_sttime)/iterations
# print "numpy unaligned total:", sum(numpy_nttime)/iterations
# print "numexpr total:", sum(numexpr_ttime)/iterations
print "Contiguous case:\t %s (mean), %s (min), %s (max)" % \
(round(tratios.mean(), 2),
round(tratios.min(), 2),
round(tratios.max(), 2))
# print "numexpr strided total:", sum(numexpr_sttime)/iterations
print "Strided case:\t\t %s (mean), %s (min), %s (max)" % \
(round(stratios.mean(), 2),
round(stratios.min(), 2),
round(stratios.max(), 2))
# print "numexpr unaligned total:", sum(numexpr_nttime)/iterations
print "Unaligned case:\t\t %s (mean), %s (min), %s (max)" % \
(round(ntratios.mean(), 2),
round(ntratios.min(), 2),
round(ntratios.max(), 2))
|
Alwnikrotikz/numexpr
|
bench/vml_timing.py
|
Python
|
mit
| 5,758 | 0.002431 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013,2014 Rodolphe Quiédeville <rodolphe@quiedeville.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from optparse import make_option
from random import randrange
import time
import sys
from django.core.management.base import BaseCommand
from foo.tuna.models import Book, Editor, Author, Company, Sinopsis
import utils
class Command(BaseCommand):
help = 'Import datas'
option_list = BaseCommand.option_list + (
make_option("-c",
"--code",
dest="code",
type="int",
help="number of values to input",
default=1),
)
def handle(self, *args, **options):
"""Lookup some objects
"""
code = options['code']
self.doit(code, Book, 'Book')
self.doit(code, Company, 'Company')
def doit(self, code, model, name):
print "{} : {}".format(name, model.objects.all().count())
# remove 10% of tuples, be in first
(count, delta) = utils.direct_delete(code, model)
utils.print_console('direct_delete', count, delta)
print "{} : {}".format(name, model.objects.all().count())
|
rodo/django-perf
|
foo/tuna/management/commands/tuna_delete_direct.py
|
Python
|
gpl-3.0
| 1,873 | 0.000534 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Wrapper for *Flask-Cache* as engine for *JSONAlchemy*."""
import six
from invenio.ext.cache import cache
from invenio.modules.jsonalchemy.storage import Storage
class CacheStorage(Storage):
"""Implement storage engine for Flask-Cache useful for testing."""
def __init__(self, **kwargs):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.__init__`."""
self._prefix = kwargs.get('model', '')
def _set(self, data):
self._keys = self._keys | set([data['_id']])
cache.set(self._prefix + data['_id'], data, timeout=99999)
def _get(self, id):
value = cache.get(self._prefix + id)
if value is None:
raise KeyError()
return value
@property
def _keys(self):
return cache.get(self._prefix + '::keys') or set()
@_keys.setter
def _keys(self, value):
cache.set(self._prefix + '::keys', value)
def save_one(self, data, id=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.save_one`."""
if id is not None:
data['_id'] = id
self._set(data)
return data
def save_many(self, jsons, ids=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.save_many`."""
return map(lambda k: self.save_one(*k), zip(jsons, ids))
def update_one(self, data, id=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.update_one`."""
if id is not None:
data['_id'] = id
id = data['_id']
old_data = self._get(id)
old_data.update(data)
self._set(old_data)
return old_data
def update_many(self, jsons, ids=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.update_many`."""
return map(lambda k: self.update_one(*k), zip(jsons, ids))
def get_one(self, id):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_one`."""
return self._get(id)
def get_many(self, ids):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_many`."""
return map(self.get_one, ids)
def get_field_values(self, ids, field, repetitive_values=True, count=False,
include_recid=False, split_by=0):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_field_values`."""
raise NotImplementedError()
def get_fields_values(self, ids, fields, repetitive_values=True,
count=False, include_recid=False, split_by=0):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_fields_values`."""
raise NotImplementedError()
def search(self, query):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.search`."""
def _find(item):
for k, v in six.iteritems(query):
if item is None:
return False
test_v = item.get(k)
if test_v is None and v is not None:
return False
elif test_v != v:
return False
return True
return filter(_find, map(self._get, self._keys))
def create(self):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.create`."""
assert len(self._keys) == 0
def drop(self):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.create`."""
while self._keys:
cache.delete(self._prefix + self._keys.pop())
|
Lilykos/invenio
|
invenio/modules/jsonalchemy/jsonext/engines/cache.py
|
Python
|
gpl-2.0
| 4,277 | 0.00187 |
from django.db import models
import warnings
from django.utils import timezone
import requests
from image_cropping import ImageRatioField
class CompMember(models.Model):
"""A member of compsoc"""
class Meta:
verbose_name = 'CompSoc Member'
verbose_name_plural = 'CompSoc Members'
index = models.IntegerField(blank=False, help_text="This field is present just for ordering members based on their posts. President = 2, VPs = 1, Gen. Sec. = 0, Everyone else = -1", default=-1)
name = models.CharField(max_length=50, help_text='Enter your full name')
image = models.ImageField(blank=False, upload_to='member_images/', help_text='Please select a display image for yourself. This is necessary.')
cropping = ImageRatioField('image', '500x500')
alumni = models.BooleanField(default=False, help_text='Are you an alumni?')
role = models.CharField(max_length=100, help_text="Enter your post if you hold one. If not, enter 'Member'")
batch_of = models.CharField(max_length=4, default='2015', help_text='Enter the year you will graduate')
social_link = models.CharField(blank=True, max_length=256, help_text='Enter a link to your Facebook, Twitter, GitHub or any other social network profile. You can leave this blank if you wish!')
def get_social_link(self):
'''
Returns the social_link if present. Otherwise, sends javascript:void(0)
'''
if self.social_link == '':
return 'javascript:void(0)'
else:
return self.social_link
def __str__(self):
return self.name
class Variable(models.Model): ##NOTE: This should not be used anymore
def __str__(self):
warnings.warn('''You are using a "General Variable".
Stop doing that.
This is bad design on Arjoonn's part so don't fall into the same trap.
If you are using this for Orfik, that has already been fixed. If you are using this for logos, same thing.
Over a few cycles this entire table will be removed.
''')
return self.name
name = models.CharField(max_length=100)
time = models.DateTimeField()
# Receive the pre_delete signal and delete the image associated with the model instance.
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
@receiver(pre_delete, sender=CompMember)
def compsoc_member_delete(sender, instance, **kwargs):
# Pass false so ImageField doesn't save the model.
instance.image.delete(False)
|
compsoc-ssc/compsocssc
|
general/models.py
|
Python
|
mit
| 2,517 | 0.005165 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import os
import pytest
from flaky import flaky
from telegram import Video, TelegramError, Voice, PhotoSize
@pytest.fixture(scope='function')
def video_file():
f = open('tests/data/telegram.mp4', 'rb')
yield f
f.close()
@pytest.fixture(scope='class')
def video(bot, chat_id):
with open('tests/data/telegram.mp4', 'rb') as f:
return bot.send_video(chat_id, video=f, timeout=50).video
class TestVideo(object):
width = 360
height = 640
duration = 5
file_size = 326534
mime_type = 'video/mp4'
supports_streaming = True
caption = u'<b>VideoTest</b> - *Caption*'
video_file_url = 'https://python-telegram-bot.org/static/testfiles/telegram.mp4'
def test_creation(self, video):
# Make sure file has been uploaded.
assert isinstance(video, Video)
assert isinstance(video.file_id, str)
assert video.file_id is not ''
assert isinstance(video.thumb, PhotoSize)
assert isinstance(video.thumb.file_id, str)
assert video.thumb.file_id is not ''
def test_expected_values(self, video):
assert video.width == self.width
assert video.height == self.height
assert video.duration == self.duration
assert video.file_size == self.file_size
assert video.mime_type == self.mime_type
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_all_args(self, bot, chat_id, video_file, video):
message = bot.send_video(chat_id, video_file, duration=self.duration,
caption=self.caption, supports_streaming=self.supports_streaming,
disable_notification=False, width=video.width,
height=video.height, parse_mode='Markdown')
assert isinstance(message.video, Video)
assert isinstance(message.video.file_id, str)
assert message.video.file_id != ''
assert message.video.width == video.width
assert message.video.height == video.height
assert message.video.duration == video.duration
assert message.video.file_size == video.file_size
assert isinstance(message.video.thumb, PhotoSize)
assert isinstance(message.video.thumb.file_id, str)
assert message.video.thumb.file_id != ''
assert message.video.thumb.width == video.thumb.width
assert message.video.thumb.height == video.thumb.height
assert message.video.thumb.file_size == video.thumb.file_size
assert message.caption == self.caption.replace('*', '')
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_get_and_download(self, bot, video):
new_file = bot.get_file(video.file_id)
assert new_file.file_size == self.file_size
assert new_file.file_id == video.file_id
assert new_file.file_path.startswith('https://')
new_file.download('telegram.mp4')
assert os.path.isfile('telegram.mp4')
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_mp4_file_url(self, bot, chat_id, video):
message = bot.send_video(chat_id, self.video_file_url, caption=self.caption)
assert isinstance(message.video, Video)
assert isinstance(message.video.file_id, str)
assert message.video.file_id != ''
assert message.video.width == video.width
assert message.video.height == video.height
assert message.video.duration == video.duration
assert message.video.file_size == video.file_size
assert isinstance(message.video.thumb, PhotoSize)
assert isinstance(message.video.thumb.file_id, str)
assert message.video.thumb.file_id != ''
assert message.video.thumb.width == video.thumb.width
assert message.video.thumb.height == video.thumb.height
assert message.video.thumb.file_size == video.thumb.file_size
assert message.caption == self.caption
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_resend(self, bot, chat_id, video):
message = bot.send_video(chat_id, video.file_id)
assert message.video == video
def test_send_with_video(self, monkeypatch, bot, chat_id, video):
def test(_, url, data, **kwargs):
return data['video'] == video.file_id
monkeypatch.setattr('telegram.utils.request.Request.post', test)
message = bot.send_video(chat_id, video=video)
assert message
def test_de_json(self, bot):
json_dict = {
'file_id': 'not a file id',
'width': self.width,
'height': self.height,
'duration': self.duration,
'mime_type': self.mime_type,
'file_size': self.file_size
}
json_video = Video.de_json(json_dict, bot)
assert json_video.file_id == 'not a file id'
assert json_video.width == self.width
assert json_video.height == self.height
assert json_video.duration == self.duration
assert json_video.mime_type == self.mime_type
assert json_video.file_size == self.file_size
def test_to_dict(self, video):
video_dict = video.to_dict()
assert isinstance(video_dict, dict)
assert video_dict['file_id'] == video.file_id
assert video_dict['width'] == video.width
assert video_dict['height'] == video.height
assert video_dict['duration'] == video.duration
assert video_dict['mime_type'] == video.mime_type
assert video_dict['file_size'] == video.file_size
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_video(chat_id, open(os.devnull, 'rb'))
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file_id(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_video(chat_id, '')
def test_error_without_required_args(self, bot, chat_id):
with pytest.raises(TypeError):
bot.send_video(chat_id=chat_id)
def test_get_file_instance_method(self, monkeypatch, video):
def test(*args, **kwargs):
return args[1] == video.file_id
monkeypatch.setattr('telegram.Bot.get_file', test)
assert video.get_file()
def test_equality(self, video):
a = Video(video.file_id, self.width, self.height, self.duration)
b = Video(video.file_id, self.width, self.height, self.duration)
c = Video(video.file_id, 0, 0, 0)
d = Video('', self.width, self.height, self.duration)
e = Voice(video.file_id, self.duration)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
Eldinnie/python-telegram-bot
|
tests/test_video.py
|
Python
|
gpl-3.0
| 7,707 | 0.000389 |
from gurtel.util import Url
class TestUrl(object):
def equal(self, one, two):
"""
For this test, want to ensure that compare-equal implies hash-equal.
"""
return (one == two) and (hash(one) == hash(two))
def test_no_qs(self):
assert self.equal(
Url("http://fake.base/path/"),
Url("http://fake.base/path/"))
def test_same_qs(self):
assert self.equal(
Url("http://fake.base/path/?foo=bar"),
Url("http://fake.base/path/?foo=bar"))
def test_different_key_order(self):
assert self.equal(
Url("http://fake.base/path/?foo=bar&arg=yo"),
Url("http://fake.base/path/?arg=yo&foo=bar"))
def test_different_value_order(self):
assert not self.equal(
Url("http://fake.base/path/?foo=bar&foo=yo"),
Url("http://fake.base/path/?foo=yo&foo=bar"))
def test_repr(self):
assert self.equal(
repr(Url("http://fake.base/path/?foo=bar")),
"Url(http://fake.base/path/?foo=bar)")
|
oddbird/gurtel
|
tests/test_util.py
|
Python
|
bsd-3-clause
| 1,076 | 0 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import pytest
from django.conf import settings
from shuup.core.models import Shipment, ShippingStatus, StockBehavior
from shuup.testing.factories import (
add_product_to_order, create_empty_order, create_product,
get_default_shop, get_default_supplier
)
from shuup.utils.excs import Problem
@pytest.mark.django_db
def test_shipment_identifier():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
shipment = order.create_shipment({line.product: 1}, supplier=supplier)
expected_key_start = "%s/%s" % (order.pk, i)
assert shipment.identifier.startswith(expected_key_start)
assert order.shipments.count() == int(line.quantity)
assert order.shipping_status == ShippingStatus.FULLY_SHIPPED # Check that order is now fully shipped
assert not order.can_edit()
@pytest.mark.django_db
def test_shipment_creation_from_unsaved_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
unsaved_shipment = Shipment(order=order, supplier=supplier)
shipment = order.create_shipment({line.product: 1}, shipment=unsaved_shipment)
expected_key_start = "%s/%s" % (order.pk, i)
assert shipment.identifier.startswith(expected_key_start)
assert order.shipments.count() == int(line.quantity)
@pytest.mark.django_db
def test_shipment_creation_without_supplier_and_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
with pytest.raises(AssertionError):
order.create_shipment({line.product: 1})
assert order.shipments.count() == 0
@pytest.mark.django_db
def test_shipment_creation_with_invalid_unsaved_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
second_order = create_empty_order(shop=shop)
second_order.full_clean()
second_order.save()
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
with pytest.raises(AssertionError):
unsaved_shipment = Shipment(supplier=supplier, order=second_order)
order.create_shipment({line.product: 1}, shipment=unsaved_shipment)
assert order.shipments.count() == 0
@pytest.mark.django_db
def test_partially_shipped_order_status():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
assert order.can_edit()
first_product_line = order.lines.exclude(product_id=None).first()
assert first_product_line.quantity > 1
order.create_shipment({first_product_line.product: 1}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert not order.can_edit()
@pytest.mark.django_db
def test_shipment_delete():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
assert order.can_edit()
first_product_line = order.lines.exclude(product_id=None).first()
assert first_product_line.quantity > 1
shipment = order.create_shipment({first_product_line.product: 1}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert order.shipments.all().count() == 1
# Test shipment delete
shipment.soft_delete()
assert order.shipments.all().count() == 1
assert order.shipments.all_except_deleted().count() == 0
# Check the shipping status update
assert order.shipping_status == ShippingStatus.NOT_SHIPPED
@pytest.mark.django_db
def test_shipment_with_insufficient_stock():
if "shuup.simple_supplier" not in settings.INSTALLED_APPS:
pytest.skip("Need shuup.simple_supplier in INSTALLED_APPS")
from shuup_tests.simple_supplier.utils import get_simple_supplier
shop = get_default_shop()
supplier = get_simple_supplier()
order = _get_order(shop, supplier, stocked=True)
product_line = order.lines.products().first()
product = product_line.product
assert product_line.quantity == 15
supplier.adjust_stock(product.pk, delta=10)
stock_status = supplier.get_stock_status(product.pk)
assert stock_status.physical_count == 10
order.create_shipment({product: 5}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert order.shipments.all().count() == 1
with pytest.raises(Problem):
order.create_shipment({product: 10}, supplier=supplier)
# Should be fine after adding more stock
supplier.adjust_stock(product.pk, delta=5)
order.create_shipment({product: 10}, supplier=supplier)
def _get_order(shop, supplier, stocked=False):
order = create_empty_order(shop=shop)
order.full_clean()
order.save()
for product_data in _get_product_data(stocked):
quantity = product_data.pop("quantity")
product = create_product(
sku=product_data.pop("sku"),
shop=shop,
supplier=supplier,
default_price=3.33,
**product_data)
add_product_to_order(order, supplier, product, quantity=quantity, taxless_base_unit_price=1)
order.cache_prices()
order.check_all_verified()
order.save()
return order
def _get_product_data(stocked=False):
return [
{
"sku": "sku1234",
"net_weight": decimal.Decimal("1"),
"gross_weight": decimal.Decimal("43.34257"),
"quantity": decimal.Decimal("15"),
"stock_behavior": StockBehavior.STOCKED if stocked else StockBehavior.UNSTOCKED
}
]
|
hrayr-artunyan/shuup
|
shuup_tests/core/test_shipments.py
|
Python
|
agpl-3.0
| 6,394 | 0.001251 |
from copy import deepcopy
from .base import Strategy
class InsertionSort(Strategy):
def sort_by(self, field):
return self._sort(lambda x, y: x.grades[field] < y.grades[field])
def sort(self):
return self._sort(lambda x, y: x < y)
def _sort(self, compare):
for first_item in self.items:
items = deepcopy(self.items)
items.iterator_start = first_item.next
for second_item in items:
if compare(first_item, second_item):
self.items.interchange(first_item, second_item)
|
vtemian/university_projects
|
practic_stage/hmw4/strategies/insertion.py
|
Python
|
apache-2.0
| 524 | 0.013359 |
# coding=utf-8
""" NodeChains are sequential orders of :mod:`~pySPACE.missions.nodes`
.. image:: ../../graphics/node_chain.png
:width: 500
There are two main use cases:
* the application for :mod:`~pySPACE.run.launch_live` and the
:mod:`~pySPACE.environments.live` using the default
:class:`NodeChain` and
* the benchmarking with :mod:`~pySPACE.run.launch` using
the :class:`BenchmarkNodeChain` with the
:mod:`~pySPACE.missions.operations.node_chain` operation.
.. seealso::
- :mod:`~pySPACE.missions.nodes`
- :ref:`node_list`
- :mod:`~pySPACE.missions.operations.node_chain` operation
.. image:: ../../graphics/launch_live.png
:width: 500
.. todo:: Documentation
This module extends/reimplements the original MDP flow class and
has some additional methods like reset(), save() etc.
Furthermore it supports the construction of NodeChains and
also running them inside nodes in parallel.
MDP is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <mdp-toolkit-devel@lists.sourceforge.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
if __name__ == '__main__':
# add root of the code to system path
file_path = os.path.dirname(os.path.abspath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import cPickle
import gc
import logging
import multiprocessing
import shutil
import socket
import time
import uuid
import yaml
import pySPACE
from pySPACE.tools.filesystem import create_directory
from pySPACE.tools.socket_utils import talk, inform
from pySPACE.tools.conversion import python2yaml, replace_parameters_and_convert, replace_parameters
import copy
import warnings
import traceback
import numpy
class CrashRecoveryException(Exception):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, crashing_obj, parent_exception)
The crashing object is kept in self.crashing_obj
The triggering parent exception is kept in ``self.parent_exception``.
"""
errstr = args[0]
self.crashing_obj = args[1]
self.parent_exception = args[2]
# ?? python 2.5: super(CrashRecoveryException, self).__init__(errstr)
super(CrashRecoveryException,self).__init__(self, errstr)
def dump(self, filename = None):
"""
Save a pickle dump of the crashing object on filename.
If filename is None, the crash dump is saved on a file created by
the tempfile module.
Return the filename.
"""
import cPickle
import tempfile
if filename is None:
(fd, filename)=tempfile.mkstemp(suffix=".pic", prefix="NodeChainCrash_")
fl = os.fdopen(fd, 'w+b', -1)
else:
fl = open(filename, 'w+b', -1)
cPickle.dump(self.crashing_obj, fl)
fl.close()
return filename
class NodeChainException(Exception):
"""Base class for exceptions in node chains."""
pass
class NodeChainExceptionCR(CrashRecoveryException, NodeChainException):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, flow_instance, parent_exception)
The triggering parent exception is kept in self.parent_exception.
If ``flow_instance._crash_recovery`` is set, save a crash dump of
flow_instance on the file self.filename
"""
CrashRecoveryException.__init__(self, *args)
rec = self.crashing_obj._crash_recovery
errstr = args[0]
if rec:
if isinstance(rec, str):
name = rec
else:
name = None
name = CrashRecoveryException.dump(self, name)
dumpinfo = '\nA crash dump is available on: "%s"' % name
self.filename = name
errstr = errstr+dumpinfo
Exception.__init__(self, errstr)
class NodeChain(object):
""" Reimplement/overwrite mdp.Flow methods e.g., for supervised learning """
def __init__(self, node_sequence, crash_recovery=False, verbose=False):
""" Creates the NodeChain based on the node_sequence
.. note:: The NodeChain cannot be executed before not all trainable
nodes have been trained, i.e. self.trained() == True.
"""
self._check_nodes_consistency(node_sequence)
self.flow = node_sequence
self.verbose = verbose
self.set_crash_recovery(crash_recovery)
# Register the direct predecessor of a node as its input
# (i.e. we assume linear flows)
for i in range(len(node_sequence) - 1):
node_sequence[i+1].register_input_node(node_sequence[i])
self.use_test_data = False
# set a default run number
self[-1].set_run_number(0)
# give this flow a unique identifier
self.id = str(uuid.uuid4())
self.handler = None
self.store_intermediate_results = True
def train(self, data_iterators=None):
""" Train NodeChain with data from iterator or source node
The method can proceed in two different ways:
* If no data is provided, it is checked that the first node of
the flow is a source node. If that is the case, the data provided
by this node is passed forward through the flow. During this
forward propagation, the flow is trained.
The request of the data is done in the last node.
* If a list of data iterators is provided,
then it is checked that no source
and split nodes are contained in the NodeChain.
these nodes only include already a data handling
and should not be used, when training is done in different way.
Furthermore, split nodes are relevant for benchmarking.
One iterator for each node has to be given.
If only one is given, or no list, it is mapped to a list
with the same iterator for each node.
.. note:: The iterator approach is normally not used in pySPACE,
because pySPACE supplies the data with special
source nodes and is doing the training automatically
without explicit calls on data samples.
The approach came with MDP.
.. todo:: The iterator approach needs some use cases and testings,
especially, because it is not used in the normal setting.
"""
if data_iterators is not None:
# Check if no source and split nodes are contained in the node chain
assert(not self[0].is_source_node()), \
"Node chains with source nodes cannot be trained "\
"with external data_iterators!"
for node in self:
assert(not node.is_split_node()), \
"Node chains with split nodes cannot be trained "\
"with external data_iterators!"
# prepare iterables
if not type(data_iterators) == list:
data_iterators = [data_iterators] * len(self.flow)
elif not len(data_iterators)==len(self.flow):
data_iterators = [data_iterators] * len(self.flow)
# Delegate to iterative training
self.iter_train(data_iterators)
else: # Use the pySPACE train semantic and not MDP type
# Check if the first node of the node chain is a source node
assert(self[0].is_source_node()), \
"Training of a node chain without source node requires a "\
"data_iterator argument!"
# Training is accomplished by requesting the iterator
# of the last node of the chain. This node will recursively call
# the train method of all its predecessor nodes.
# As soon as the first element is yielded the node has been trained.
for _ in self[-1].request_data_for_training(
use_test_data=self.use_test_data):
return
def iter_train(self, data_iterables):
""" Train all trainable nodes in the NodeChain with data from iterator
*data_iterables* is a list of iterables, one for each node in the chain.
The iterators returned by the iterables must return data arrays that
are then used for the node training (so the data arrays are the data for
the nodes).
Note that the data arrays are processed by the nodes
which are in front of the node that gets trained, so the data dimension
must match the input dimension of the first node.
If a node has only a single training phase then instead of an iterable
you can alternatively provide an iterator (including generator-type
iterators). For nodes with multiple training phases this is not
possible, since the iterator cannot be restarted after the first
iteration. For more information on iterators and iterables see
http://docs.python.org/library/stdtypes.html#iterator-types .
In the special case that *data_iterables* is one single array,
it is used as the data array *x* for all nodes and training phases.
Instead of a data array *x* the iterators can also return a list or
tuple, where the first entry is *x* and the following are args for the
training of the node (e.g., for supervised training).
"""
data_iterables = self._train_check_iterables(data_iterables)
# train each Node successively
for i in range(len(self.flow)):
if self.verbose:
print "Training node #%d (%s)" % (i, str(self.flow[i]))
self._train_node(data_iterables[i], i)
if self.verbose:
print "Training finished"
self._close_last_node()
def trained(self):
"""
Returns whether the complete training is finished, i.e. if all nodes have been trained.
"""
return self[-1].get_remaining_train_phase() == 0
def execute(self, data_iterators=None):
""" Process the data through all nodes """
if data_iterators is not None:
# Delegate to super class
return self.iter_execute(data_iterators)
else: # Use the evaluate semantic
# Check if the first node of the flow is a source node
assert (self[0].is_source_node()), \
"Evaluation of a node chain without source node requires a " \
"data_iterator argument!"
# This is accomplished by calling the request_data_for_testing
# method of the last node of the chain. This node will recursively
# call the request_data_for_testing method of all its predecessor
# nodes
return self[-1].process()
def iter_execute(self, iterable, nodenr = None):
""" Process the data through all nodes in the chain till *nodenr*
'iterable' is an iterable or iterator (note that a list is also an
iterable), which returns data arrays that are used as input.
Alternatively, one can specify one data array as input.
If 'nodenr' is specified, the flow is executed only up to
node nr. 'nodenr'. This is equivalent to 'flow[:nodenr+1](iterable)'.
.. note:: In contrary to MDP, results are not concatenated
to one big object. Each data object remains separate.
"""
if isinstance(iterable, numpy.ndarray):
return self._execute_seq(iterable, nodenr)
res = []
empty_iterator = True
for x in iterable:
empty_iterator = False
res.append(self._execute_seq(x, nodenr))
if empty_iterator:
errstr = ("The execute data iterator is empty.")
raise NodeChainException(errstr)
return res
def _inc_train(self, data, class_label=None):
""" Iterate through the nodes to train them """
for node in self:
if node.is_retrainable() and not node.buffering and hasattr(node, "_inc_train"):
if not node.retraining_phase:
node.retraining_phase=True
node.start_retraining()
node._inc_train(data,class_label)
if not (hasattr(self, "buffering") and self.buffering):
data = node.execute(data)
else: # workaround to inherit meta data
self.buffering = False
data = node.execute(data)
self.buffering = True
def save(self, filename, protocol = -1):
""" Save a pickled representation to *filename*
If *filename* is None, return a string.
.. note:: the pickled NodeChain is not guaranteed to be upward or
backward compatible.
.. note:: Having C-Code in the node might cause problems with saving.
Therefore, the code has special handling for the
LibSVMClassifierNode.
.. todo:: Intrinsic node methods for storing should be used.
.. seealso:: :func:`store_node_chain`
"""
if self[-1].__class__.__name__ in ["LibSVMClassifierNode"] \
and self[-1].multinomial:
indx = filename.find(".pickle")
if indx != -1:
self[-1].save_model(filename[0:indx]+'.model')
else:
self[-1].save_model(filename+'.model')
import cPickle
odict = self.__dict__.copy() # copy the dict since we change it
# Remove other non-pickable stuff
remove_keys=[]
k = 0
for key, value in odict.iteritems():
if key == "input_node" or key == "flow":
continue
try:
cPickle.dumps(value)
except (ValueError, TypeError, cPickle.PicklingError):
remove_keys.append(key)
for key in remove_keys:
odict.pop(key)
self.__dict__ = odict
if filename is None:
return cPickle.dumps(self, protocol)
else:
# if protocol != 0 open the file in binary mode
if protocol != 0:
mode = 'wb'
else:
mode = 'w'
flh = open(filename , mode)
cPickle.dump(self, flh, protocol)
flh.close()
def get_output_type(self, input_type, as_string=True):
"""
Returns the output type of the entire node chain
Recursively iterate over nodes in flow
"""
output = input_type
for i in range(len(self.flow)):
if i == 0:
output = self.flow[i].get_output_type(
input_type, as_string=True)
else:
output = self.flow[i].get_output_type(output, as_string=True)
if as_string:
return output
else:
return self.string_to_class(output)
@staticmethod
def string_to_class(string_encoding):
""" given a string variable, outputs a class instance
e.g. obtaining a TimeSeries
"""
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.prediction_vector import PredictionVector
if "TimeSeries" in string_encoding:
return TimeSeries
elif "PredictionVector" in string_encoding:
return PredictionVector
elif "FeatureVector" in string_encoding:
return FeatureVector
else:
raise NotImplementedError
#################
# MDP Code copy #
def _propagate_exception(self, exception, nodenr):
# capture exception. the traceback of the error is printed and a
# new exception, containing the identity of the node in the NodeChain
# is raised. Allow crash recovery.
(etype, val, tb) = sys.exc_info()
prev = ''.join(traceback.format_exception(exception.__class__,
exception,tb))
act = "\n! Exception in node #%d (%s):\n" % (nodenr,
str(self.flow[nodenr]))
errstr = ''.join(('\n', 40*'-', act, 'Node Traceback:\n', prev, 40*'-'))
raise NodeChainExceptionCR(errstr, self, exception)
def _train_node(self, data_iterable, nodenr):
""" Train a single node in the flow.
nodenr -- index of the node in the flow
"""
node = self.flow[nodenr]
if (data_iterable is not None) and (not node.is_trainable()):
# attempted to train a node although it is not trainable.
# raise a warning and continue with the next node.
# wrnstr = "\n! Node %d is not trainable" % nodenr + \
# "\nYou probably need a 'None' iterable for"+\
# " this node. Continuing anyway."
#warnings.warn(wrnstr, UserWarning)
return
elif (data_iterable is None) and node.is_training():
# None instead of iterable is passed to a training node
err_str = ("\n! Node %d is training"
" but instead of iterable received 'None'." % nodenr)
raise NodeChainException(err_str)
elif (data_iterable is None) and (not node.is_trainable()):
# skip training if node is not trainable
return
try:
train_arg_keys = self._get_required_train_args(node)
train_args_needed = bool(len(train_arg_keys))
## We leave the last training phase open for the
## CheckpointFlow class.
## Checkpoint functions must close it explicitly if needed!
## Note that the last training_phase is closed
## automatically when the node is executed.
while True:
empty_iterator = True
for x in data_iterable:
empty_iterator = False
# the arguments following the first are passed only to the
# currently trained node, allowing the implementation of
# supervised nodes
if (type(x) is tuple) or (type(x) is list):
arg = x[1:]
x = x[0]
else:
arg = ()
# check if the required number of arguments was given
if train_args_needed:
if len(train_arg_keys) != len(arg):
err = ("Wrong number of arguments provided by " +
"the iterable for node #%d " % nodenr +
"(%d needed, %d given).\n" %
(len(train_arg_keys), len(arg)) +
"List of required argument keys: " +
str(train_arg_keys))
raise NodeChainException(err)
# filter x through the previous nodes
if nodenr > 0:
x = self._execute_seq(x, nodenr-1)
# train current node
node.train(x, *arg)
if empty_iterator:
if node.get_current_train_phase() == 1:
err_str = ("The training data iteration for node "
"no. %d could not be repeated for the "
"second training phase, you probably "
"provided an iterator instead of an "
"iterable." % (nodenr+1))
raise NodeChainException(err_str)
else:
err_str = ("The training data iterator for node "
"no. %d is empty." % (nodenr+1))
raise NodeChainException(err_str)
self._stop_training_hook()
# close the previous training phase
node.stop_training()
if node.get_remaining_train_phase() > 0:
continue
else:
break
except self.flow[-1].TrainingFinishedException, e:
# attempted to train a node although its training phase is already
# finished. raise a warning and continue with the next node.
wrnstr = ("\n! Node %d training phase already finished"
" Continuing anyway." % nodenr)
warnings.warn(wrnstr, UserWarning)
except NodeChainExceptionCR, e:
# this exception was already propagated,
# probably during the execution of a node upstream in the flow
(exc_type, val) = sys.exc_info()[:2]
prev = ''.join(traceback.format_exception_only(e.__class__, e))
prev = prev[prev.find('\n')+1:]
act = "\nWhile training node #%d (%s):\n" % (nodenr,
str(self.flow[nodenr]))
err_str = ''.join(('\n', 40*'=', act, prev, 40*'='))
raise NodeChainException(err_str)
except Exception, e:
# capture any other exception occurred during training.
self._propagate_exception(e, nodenr)
def _stop_training_hook(self):
"""Hook method that is called before stop_training is called."""
pass
@staticmethod
def _get_required_train_args(node):
"""Return arguments in addition to self and x for node.train.
Arguments that have a default value are ignored.
"""
import inspect
train_arg_spec = inspect.getargspec(node.train)
train_arg_keys = train_arg_spec[0][2:] # ignore self, x
if train_arg_spec[3]:
# subtract arguments with a default value
train_arg_keys = train_arg_keys[:-len(train_arg_spec[3])]
return train_arg_keys
def _train_check_iterables(self, data_iterables):
"""Return the data iterables after some checks and sanitizing.
Note that this method does not distinguish between iterables and
iterators, so this must be taken care of later.
"""
# verifies that the number of iterables matches that of
# the signal nodes and multiplies them if needed.
flow = self.flow
# # if a single array is given wrap it in a list of lists,
# # note that a list of 2d arrays is not valid
# if isinstance(data_iterables, numpy.ndarray):
# data_iterables = [[data_iterables]] * len(flow)
if not isinstance(data_iterables, list):
err_str = ("'data_iterables' must be either a list of "
"iterables or an array, but got %s" %
str(type(data_iterables)))
raise NodeChainException(err_str)
# check that all elements are iterable
for i, iterable in enumerate(data_iterables):
if (iterable is not None) and (not hasattr(iterable, '__iter__')):
err = ("Element number %d in the data_iterables"
" list is not an iterable." % i)
raise NodeChainException(err)
# check that the number of data_iterables is correct
if len(data_iterables) != len(flow):
err_str = ("%d data iterables specified,"
" %d needed" % (len(data_iterables), len(flow)))
raise NodeChainException(err_str)
return data_iterables
def _close_last_node(self):
if self.verbose:
print "Close the training phase of the last node"
try:
self.flow[-1].stop_training()
except self.flow[-1].TrainingFinishedException:
pass
except Exception, e:
self._propagate_exception(e, len(self.flow)-1)
def set_crash_recovery(self, state = True):
"""Set crash recovery capabilities.
When a node raises an Exception during training, execution, or
inverse execution that the flow is unable to handle, a NodeChainExceptionCR
is raised. If crash recovery is set, a crash dump of the flow
instance is saved for later inspection. The original exception
can be found as the 'parent_exception' attribute of the
NodeChainExceptionCR instance.
- If 'state' = False, disable crash recovery.
- If 'state' is a string, the crash dump is saved on a file
with that name.
- If 'state' = True, the crash dump is saved on a file created by
the tempfile module.
"""
self._crash_recovery = state
def _execute_seq(self, x, nodenr = None):
""" Executes input data 'x' through the nodes 0..'node_nr' included
If no *nodenr* is specified, the complete node chain is used for
processing.
"""
flow = self.flow
if nodenr is None:
nodenr = len(flow)-1
for node_index in range(nodenr+1):
try:
x = flow[node_index].execute(x)
except Exception, e:
self._propagate_exception(e, node_index)
return x
def copy(self, protocol=None):
"""Return a deep copy of the flow.
The protocol parameter should not be used.
"""
import copy
if protocol is not None:
warnings.warn("protocol parameter to copy() is ignored",
DeprecationWarning, stacklevel=2)
return copy.deepcopy(self)
def __call__(self, iterable, nodenr = None):
"""Calling an instance is equivalent to call its 'execute' method."""
return self.iter_execute(iterable, nodenr=nodenr)
###### string representation
def __str__(self):
nodes = ', '.join([str(x) for x in self.flow])
return '['+nodes+']'
def __repr__(self):
# this should look like a valid Python expression that
# could be used to recreate an object with the same value
# eval(repr(object)) == object
name = type(self).__name__
pad = len(name)+2
sep = ',\n'+' '*pad
nodes = sep.join([repr(x) for x in self.flow])
return '%s([%s])' % (name, nodes)
###### private container methods
def __len__(self):
return len(self.flow)
def _check_dimension_consistency(self, out, inp):
"""Raise ValueError when both dimensions are set and different."""
if ((out and inp) is not None) and out != inp:
errstr = "dimensions mismatch: %s != %s" % (str(out), str(inp))
raise ValueError(errstr)
def _check_nodes_consistency(self, flow = None):
"""Check the dimension consistency of a list of nodes."""
if flow is None:
flow = self.flow
len_flow = len(flow)
for i in range(1, len_flow):
out = flow[i-1].output_dim
inp = flow[i].input_dim
self._check_dimension_consistency(out, inp)
def _check_value_type_isnode(self, value):
if not isinstance(value, pySPACE.missions.nodes.base.BaseNode):
raise TypeError("flow item must be Node instance")
def __getitem__(self, key):
if isinstance(key, slice):
flow_slice = self.flow[key]
self._check_nodes_consistency(flow_slice)
return self.__class__(flow_slice)
else:
return self.flow[key]
def __setitem__(self, key, value):
if isinstance(key, slice):
[self._check_value_type_isnode(item) for item in value]
else:
self._check_value_type_isnode(value)
# make a copy of list
flow_copy = list(self.flow)
flow_copy[key] = value
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
self.flow = flow_copy
def __delitem__(self, key):
# make a copy of list
flow_copy = list(self.flow)
del flow_copy[key]
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
self.flow = flow_copy
def __contains__(self, item):
return self.flow.__contains__(item)
def __iter__(self):
return self.flow.__iter__()
def __add__(self, other):
# append other to self
if isinstance(other, NodeChain):
flow_copy = list(self.flow).__add__(other.flow)
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
return self.__class__(flow_copy)
elif isinstance(other, pySPACE.missions.nodes.base.BaseNode):
flow_copy = list(self.flow)
flow_copy.append(other)
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
return self.__class__(flow_copy)
else:
err_str = ('can only concatenate flow or node'
' (not \'%s\') to flow' % (type(other).__name__))
raise TypeError(err_str)
def __iadd__(self, other):
# append other to self
if isinstance(other, NodeChain):
self.flow += other.flow
elif isinstance(other, pySPACE.missions.nodes.base.BaseNode):
self.flow.append(other)
else:
err_str = ('can only concatenate flow or node'
' (not \'%s\') to flow' % (type(other).__name__))
raise TypeError(err_str)
self._check_nodes_consistency(self.flow)
return self
###### public container methods
def append(self, x):
"""flow.append(node) -- append node to flow end"""
self[len(self):len(self)] = [x]
def extend(self, x):
"""flow.extend(iterable) -- extend flow by appending
elements from the iterable"""
if not isinstance(x, NodeChain):
err_str = ('can only concatenate flow'
' (not \'%s\') to flow' % (type(x).__name__))
raise TypeError(err_str)
self[len(self):len(self)] = x
def insert(self, i, x):
"""flow.insert(index, node) -- insert node before index"""
self[i:i] = [x]
def pop(self, i = -1):
"""flow.pop([index]) -> node -- remove and return node at index
(default last)"""
x = self[i]
del self[i]
return x
def reset(self):
""" Reset the flow and obey permanent_attributes where available
Method was moved to the end of class code, due to program environment
problems which needed the __getitem__ method beforehand.
"""
for i in range(len(self)):
self[i].reset()
class BenchmarkNodeChain(NodeChain):
""" This subclass overwrites the train method in order
to provide a more convenient way of doing supervised learning.
Furthermore, it contains a benchmark method that can be used for
benchmarking.
This includes logging, setting of run numbers,
delivering the result collection, handling of source and sink nodes, ...
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2008/08/18
"""
def __init__(self, node_sequence):
""" Creates the BenchmarkNodeChain based on the node_sequence """
super(BenchmarkNodeChain, self).__init__(node_sequence)
# Each BenchmarkNodeChain must start with an source node
# and end with a sink node
assert(self[0].is_source_node()), \
"A benchmark flow must start with a source node"
assert(self[-1].is_sink_node()), \
"A benchmark flow must end with a sink node"
def use_next_split(self):
"""
Use the next split of the data into training and test data
This method is useful for pySPACE-benchmarking
"""
# This is handled by calling use_next_split() of the last node of
# the flow which will recursively call predecessor nodes in the flow
# until a node is found that handles the splitting
return self[-1].use_next_split()
def benchmark(self, input_collection, run=0,
persistency_directory=None, store_node_chain=False):
""" Perform the benchmarking of this data flow with the given collection
Benchmarking is accomplished by iterating through all splits of the
data into training and test data.
**Parameters**:
:input_collection:
A sequence of data/label-tuples that serves as a generator or a
BaseDataset which contains the data to be processed.
:run:
The current run which defines all random seeds within the flow.
:persistency_directory:
Optional information of the nodes as well as the trained node chain
(if *store_node_chain* is not False) are stored to the given
*persistency_directory*.
:store_node_chain:
If True the trained flow is stored to *persistency_directory*.
If *store_node_chain* is a tuple of length 2---lets say (i1,i2)--
only the subflow starting at the i1-th node and ending at the
(i2-1)-th node is stored. This may be useful when the stored
flow should be used in an ensemble.
"""
# Inform the first node of this flow about the input collection
if hasattr(input_collection,'__iter__'):
# assume a generator is given
self[0].set_generator(input_collection)
else: # assume BaseDataset
self[0].set_input_dataset(input_collection)
# Inform all nodes recursively about the number of the current run
self[-1].set_run_number(int(run))
# set temp file folder
if persistency_directory != None:
self[-1].set_temp_dir(persistency_directory+os.sep+"temp_dir")
split_counter = 0
# For every split of the dataset
while True: # As long as more splits are available
# Compute the results for the current split
# by calling the method on its last node
self[-1].process_current_split()
if persistency_directory != None:
if store_node_chain:
self.store_node_chain(persistency_directory + os.sep + \
"node_chain_sp%s.pickle" % split_counter, store_node_chain)
# Store nodes that should be persistent
self.store_persistent_nodes(persistency_directory)
# If no more splits are available
if not self.use_next_split():
break
split_counter += 1
# print "Input benchmark"
# print gc.get_referrers(self[0].collection)
# During the flow numerous pointers are put to the flow but they are
# not deleted. So memory is not given free, which can be seen by the
# upper comment. Therefore we now free the input collection and only
# then the gc collector can free the memory. Otherwise under not yet
# found reasons, the pointers to the input collection will remain even
# between processes.
if hasattr(input_collection,'__iter__'):
self[0].set_generator(None)
else:
self[0].set_input_dataset(None)
gc.collect()
# Return the result collection of this flow
return self[-1].get_result_dataset()
def __call__(self, iterable=None, train_instances=None, runs=[]):
""" Call *execute* or *benchmark* and return (id, PerformanceResultSummary)
If *iterable* is given, calling an instance is equivalent to call its
'execute' method.
If *train_instances* and *runs* are given, 'benchmark' is called for
every run number specified and results are merged. This is useful for
e.g. parallel execution of subflows with the multiprocessing module,
since instance methods can not be serialized in Python but whole objects.
"""
if iterable != None:
return self.execute(iterable)
elif train_instances != None and runs != []: # parallelization case
# we have to reinitialize logging cause otherwise deadlocks occur
# when parallelization is done via multiprocessing.Pool
self.prepare_logging()
for ind, run in enumerate(runs):
result = self.benchmark(train_instances, run=run)
if ind == 0:
result_collection = result
else:
result_collection.data.update(result.data)
# reset node chain for new training if another call of
# :func:`benchmark` is expected.
if not ind == len(runs) - 1:
self.reset()
self.clean_logging()
return (self.id, result_collection)
else:
import warnings
warnings.warn("__call__ methods needs at least one parameter (data)")
return None
def store_node_chain(self, result_dir, store_node_chain):
""" Pickle this flow into *result_dir* for later usage"""
if isinstance(store_node_chain,basestring):
store_node_chain = eval(store_node_chain)
if isinstance(store_node_chain,tuple):
assert(len(store_node_chain) == 2)
# Keep only subflow starting at the i1-th node and ending at the
# (i2-1) node.
flow = NodeChain(self.flow[store_node_chain[0]:store_node_chain[1]])
elif isinstance(store_node_chain,list):
# Keep only nodes with indices contained in the list
# nodes have to be copied, otherwise input_node-refs of current flow
# are overwritten
from copy import copy
store_node_list = [copy(node) for ind, node in enumerate(self.flow) \
if ind in store_node_chain]
flow = NodeChain(store_node_list)
else:
# Per default, get rid of source and sink nodes
flow = NodeChain(self.flow[1:-1])
input_node = flow[0].input_node
flow[0].input_node = None
flow.save(result_dir)
def prepare_logging(self):
""" Set up logging
This method is only needed if one forks subflows, i.e. to execute them
via multiprocessing.Pool
"""
# Prepare remote logging
root_logger = logging.getLogger("%s-%s" % (socket.gethostname(),
os.getpid()))
root_logger.setLevel(logging.DEBUG)
root_logger.propagate = False
if len(root_logger.handlers)==0:
self.handler = logging.handlers.SocketHandler(socket.gethostname(),
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
root_logger.addHandler(self.handler)
def clean_logging(self):
""" Remove logging handlers if existing
Call this method only if you have called *prepare_logging* before.
"""
# Remove potential logging handlers
if self.handler is not None:
self.handler.close()
root_logger = logging.getLogger("%s-%s" % (socket.gethostname(),
os.getpid()))
root_logger.removeHandler(self.handler)
def store_persistent_nodes(self, result_dir):
""" Store all nodes that should be persistent """
# For all node
for index, node in enumerate(self):
# Store them in the result dir if they enabled storing
node.store_state(result_dir, index)
class NodeChainFactory(object):
""" Provide static methods to create and instantiate data flows
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2009/01/26
"""
@staticmethod
def flow_from_yaml(Flow_Class, flow_spec):
""" Creates a Flow object
Reads from the given *flow_spec*, which should be a valid YAML
specification of a NodeChain object, and returns this dataflow
object.
**Parameters**
:Flow_Class:
The class name of node chain to create. Valid are 'NodeChain' and
'BenchmarkNodeChain'.
:flow_spec:
A valid YAML specification stream; this could be a file object,
a string representation of the YAML file or the Python
representation of the YAML file (list of dicts)
"""
from pySPACE.missions.nodes.base_node import BaseNode
# Reads and parses the YAML file if necessary
if type(flow_spec) != list:
dataflow_spec = yaml.load(flow_spec)
else:
dataflow_spec = flow_spec
node_sequence = []
# For all nodes of the flow
for node_spec in dataflow_spec:
# Use factory method to create node
node_obj = BaseNode.node_from_yaml(node_spec)
# Append this node to the sequence of node
node_sequence.append(node_obj)
# Check if the nodes have to cache their outputs
for index, node in enumerate(node_sequence):
# If a node is trainable, it uses the outputs of its input node
# at least twice, so we have to cache.
if node.is_trainable():
node_sequence[index - 1].set_permanent_attributes(caching = True)
# Split node might also request the data from their input nodes
# (once for each split), depending on their implementation. We
# assume the worst case and activate caching
if node.is_split_node():
node_sequence[index - 1].set_permanent_attributes(caching = True)
# Create the flow based on the node sequence and the given flow class
# and return it
return Flow_Class(node_sequence)
@staticmethod
def instantiate(template, parametrization):
""" Instantiate a template recursively for the given parameterization
Instantiate means to replace the parameter in the template by the
chosen value.
**Parameters**
:template:
A dictionary with key-value pairs, where values might contain
parameter keys which have to be replaced. A typical example of a
template would be a Python representation of a node read from YAML.
:parametrization:
A dictionary with parameter names as keys and exact one value for
this parameter as value.
"""
instance = {}
for key, value in template.iteritems():
if value in parametrization.keys(): # Replacement
instance[key] = parametrization[value]
elif isinstance(value, dict): # Recursive call
instance[key] = NodeChainFactory.instantiate(value, parametrization)
elif isinstance(value, basestring): # String replacement
for param_key, param_value in parametrization.iteritems():
try:
value = value.replace(param_key, repr(param_value))
except:
value = value.replace(param_key, python2yaml(param_value))
instance[key] = value
elif hasattr(value, "__iter__"):
# Iterate over all items in sequence
instance[key] = []
for iter_item in value:
if iter_item in parametrization.keys(): # Replacement
instance[key].append(parametrization[iter_item])
elif isinstance(iter_item, dict):
instance[key].append(NodeChainFactory.instantiate(
iter_item, parametrization))
elif isinstance(value, basestring): # String replacement
for param_key, param_value in parametrization.iteritems():
try:
iter_item = iter_item.replace(param_key,
repr(param_value))
except:
iter_item = iter_item.replace(
param_key, python2yaml(param_value))
instance[key] = value
else:
instance[key].append(iter_item)
else: # Not parameterized
instance[key] = value
return instance
@staticmethod
def replace_parameters_in_node_chain(node_chain_template, parametrization):
node_chain_template = copy.copy(node_chain_template)
if parametrization == {}:
return node_chain_template
elif type(node_chain_template) == list:
return [NodeChainFactory.instantiate(
template=node,parametrization=parametrization)
for node in node_chain_template]
elif isinstance(node_chain_template, basestring):
node_chain_template = \
replace_parameters(node_chain_template, parametrization)
return node_chain_template
class SubflowHandler(object):
""" Interface for nodes to generate and execute subflows (subnode-chains)
A subflow means a node chain used inside a node for processing data.
This class provides functions that can be used by nodes to generate and
execute subflows. It serves thereby as a communication daemon to the
backend (if it is used).
Most important when inheriting from this class is that the subclass MUST be
a node. The reason is that this class uses node functionality, e.g. logging,
the *temp_dir*-variable and so on.
**Parameters**
:processing_modality:
One of the valid strings: 'backend', 'serial', 'local'.
:backend:
The current backends modality is used. This is implemented
at the moment only for 'LoadlevelerBackend' and 'LocalBackend'.
:serial:
All subflows are executed sequentially, i.e. one after the
other.
:local:
Subflows are executed in a Pool using *pool_size* cpus. This
may be also needed when no backend is used.
(*optional, default: 'serial'*)
:pool_size:
If a parallelization is based on using several processes on a local
system in parallel, e.g. option 'backend' and
:class:`pySPACEMulticoreBackend`
or option
'local', the number of worker processes for subflow evaluation has
to be specified.
.. note:: When using the LocalBackend, there is also the possibility
to specify the pool size of parallel executed
processes, e.g. data sets. Your total number of cpu's
should be pool size (pySPACE) + pool size (subflows).
(*optional, default: 2*)
:batch_size:
If parallelization of subflow execution is done together with the
:class:`~pySPACE.environments.backends.ll_backend.LoadLevelerBackend`,
*batch_size* determines how many subflows are executed in one
serial LoadLeveler job. This option is useful if execution of a
single subflow is really short (range of seconds) since there is
significant overhead in creating new jobs.
(*optional, default: 1*)
:Author: Anett Seeland (anett.seeland@dfki.de)
:Created: 2012/09/04
:LastChange: 2012/11/06 batch_size option added
"""
def __init__(self, processing_modality='serial', pool_size=2, batch_size=1,
**kwargs):
self.modality = processing_modality
self.pool_size = int(pool_size)
self.batch_size = int(batch_size)
# a flag to send pool_size / batch_size only once to the backend
self.already_send = False
self.backend_com = None
self.backend_name = None
# to indicate the end of a message received over a socket
self.end_token = '!END!'
if processing_modality not in ["serial", "local", "backend"]:
import warnings
warnings.warn("Processing modality not found! Serial mode is used!")
self.modality = 'serial'
@staticmethod
def generate_subflow(flow_template, parametrization=None, flow_class=None):
""" Return a *flow_class* object of the given *flow_template*
This methods wraps two function calls (NodeChainFactory.instantiate and
NodeChainFactory.flow_from_yaml.
**Parameters**
:flow_template:
List of dicts - a valid representation of a node chain.
Alternatively, a YAML-String representation could be used,
which simplifies parameter replacement.
:parametrization:
A dictionary with parameter names as keys and exact one value for
this parameter as value. Passed to NodeChainFactory.instantiate
(*optional, default: None*)
:flow_class:
The flow class name of which an object should be returned
(*optional, default: BenchmarkNodeChain*)
"""
if flow_class is None:
flow_class = BenchmarkNodeChain
flow_spec = NodeChainFactory.replace_parameters_in_node_chain(
flow_template,parametrization)
# create a new Benchmark flow
flow = NodeChainFactory.flow_from_yaml(flow_class, flow_spec)
return flow
def execute_subflows(self, train_instances, subflows, run_numbers=None):
""" Execute subflows and return result collection.
**Parameters**
:training_instances:
List of training instances which should be used to execute
*subflows*.
:subflows:
List of BenchmarkNodeChain objects.
..note:: Note that every subflow object is stored in memory!
:run_numbers:
All subflows will be executed with every run_number specified in
this list. If None, the current self.run_number (from the node
class) is used.
(*optional, default: None*)
"""
if run_numbers == None:
run_numbers = [self.run_number]
# in case of serial backend, modality is mapped to serial
# in the other case communication must be set up and
# jobs need to be submitted to backend
if self.modality == 'backend':
self.backend_com = pySPACE.configuration.backend_com
if not self.backend_com is None:
# ask for backend_name
# create a socket and keep it alive as long as possible since
# handshaking costs really time
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(self.backend_com)
client_socket, self.backend_name = talk('name' + self.end_token,
client_socket, self.backend_com)
else:
import warnings #necessary for serial backend!
warnings.warn("Seems that no backend is used! Modality of subflow execution "\
"has to be specified! Assuming serial backend.")
self.backend_name = 'serial'
self._log("Preparing subflows for backend execution.")
if self.backend_name in ['loadl','mcore'] :
# we have to pickle training instances and store it on disk
store_path = os.path.join(self.temp_dir,
"sp%d" % self.current_split)
create_directory(store_path)
filename = os.path.join(store_path, "subflow_data.pickle")
if not os.path.isfile(filename):
cPickle.dump(train_instances, open(filename,'wb'),
protocol=cPickle.HIGHEST_PROTOCOL)
subflows_to_compute = [subflows[ind].id for ind in \
range(len(subflows))]
if self.backend_name == 'loadl':
# send batch_size to backend if not already done
if not self.already_send:
client_socket = inform("subflow_batchsize;%d%s" % \
(self.batch_size, self.end_token),
client_socket, self.backend_com)
self.already_send = True
for subflow in subflows:
cPickle.dump(subflow, open(os.path.join(store_path,
subflow.id+".pickle"),"wb"),
protocol=cPickle.HIGHEST_PROTOCOL)
send_flows = subflows_to_compute
else: # backend_name == mcore
# send pool_size to backend if not already done
if not self.already_send:
client_socket = inform("subflow_poolsize;%d%s" % \
(self.pool_size, self.end_token),
client_socket, self.backend_com)
self.already_send = True
# send flow objects via socket
send_flows = [cPickle.dumps(subflow, cPickle.HIGHEST_PROTOCOL) \
for subflow in subflows]
# inform backend
client_socket,msg = talk('execute_subflows;%s;%d;%s;%s%s' % \
(store_path, len(subflows), str(send_flows),
str(run_numbers), self.end_token),
client_socket, self.backend_com)
time.sleep(10)
not_finished_subflows = set(subflows_to_compute)
while len(not_finished_subflows) != 0:
# ask backend for finished jobs
client_socket, msg = talk('is_ready;%d;%s%s' % \
(len(not_finished_subflows), str(not_finished_subflows),
self.end_token), client_socket, self.backend_com)
# parse message
finished_subflows = eval(msg) #should be a set
# set difference
not_finished_subflows -= finished_subflows
time.sleep(10)
if self.backend_name == 'loadl':
# read results and delete store_dir
result_pattern = os.path.join(store_path, '%s_result.pickle')
result_collections = [cPickle.load(open(result_pattern % \
subflows[ind].id,'rb')) for ind in range(len(subflows))]
# ..todo:: check if errors have occurred and if so do not delete!
shutil.rmtree(store_path)
else: # backend_name == mcore
# ask backend to send results
client_socket, msg = talk("send_results;%s!END!" % \
subflows_to_compute, client_socket, self.backend_com)
# should be a list of collections
results = eval(msg)
result_collections = [cPickle.loads(result) for result in results]
self._log("Finished subflow execution.")
client_socket.shutdown(socket.SHUT_RDWR)
client_socket.close()
return result_collections
elif self.backend_name == 'serial':
# do the same as modality=='serial'
self.modality = 'serial'
else: # e.g. mpi backend :
import warnings
warnings.warn("Subflow Handling with %s backend not supported,"\
" serial-modality is used!" % self.backend_name)
self.modality = 'serial'
if self.modality == 'serial':
# serial execution
# .. note:: the here executed flows can not store anything.
# meta data of result collection is NOT updated!
results = [subflow(train_instances=train_instances,
runs=run_numbers) for subflow in subflows]
result_collections = [result[1] for result in results]
return result_collections
else: # modality local, e.g. usage without backend in application case
self._log("Subflow Handler starts processes in pool.")
pool = multiprocessing.Pool(processes=self.pool_size)
results = [pool.apply_async(func=subflow,
kwds={"train_instances": train_instances,
"runs": run_numbers}) \
for subflow in subflows]
pool.close()
self._log("Waiting for parallel processes to finish.")
pool.join()
result_collections = [result.get()[1] for result in results]
del pool
return result_collections
|
Crespo911/pyspace
|
pySPACE/environments/chains/node_chain.py
|
Python
|
gpl-3.0
| 60,058 | 0.003014 |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ImageNet."""
import tensorflow as tf
import uncertainty_baselines as ub
# TODO(dusenberrymw): Use TFDS mocking.
class ImageNetDatasetTest(ub.datasets.DatasetTest):
# TODO(dusenberrymw): Rename to `test_dataset_size`.
def testDatasetSize(self):
super()._testDatasetSize(
ub.datasets.ImageNetDataset, (224, 224, 3), validation_percent=0.1)
def test_expected_features(self):
builder = ub.datasets.ImageNetDataset('train')
dataset = builder.load(batch_size=1)
self.assertEqual(list(dataset.element_spec.keys()), ['features', 'labels'])
builder_with_file_name = ub.datasets.ImageNetDataset(
'train', include_file_name=True)
dataset_with_file_name = builder_with_file_name.load(batch_size=1)
self.assertEqual(
list(dataset_with_file_name.element_spec.keys()),
['features', 'labels', 'file_name'])
if __name__ == '__main__':
tf.test.main()
|
google/uncertainty-baselines
|
uncertainty_baselines/datasets/imagenet_test.py
|
Python
|
apache-2.0
| 1,536 | 0.002604 |
class Node(object):
"""Find if two nodes in a directed graph are connected.
Based on http://www.codewars.com/kata/53897d3187c26d42ac00040d
For example:
a -+-> b -> c -> e
|
+-> d
a.connected_to(a) == true
a.connected_to(b) == true
a.connected_to(c) == true
b.connected_to(d) == false"""
def __init__(self, value, edges=None):
self.value = value
#What is the purpose of this construct?
self.edges = edges or []
def connected_to(self, target):
raise ValueError("Not implemented")
def __eq__(self, other):
return self.value == other.value
|
intenthq/code-challenges
|
python/connected_graph/connected_graph.py
|
Python
|
mit
| 637 | 0.00314 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
@test_util.run_deprecated_v1
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
@test_util.run_deprecated_v1
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegex(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
@test_util.run_deprecated_v1
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
c_out = self.evaluate([c])
d_out = self.evaluate([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
@test_util.run_deprecated_v1
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
@test_util.run_deprecated_v1
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.cached_session() as sess:
self.evaluate([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
@test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0)
v2 = variables.VariableV1(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.cached_session() as sess:
# Initialize the variables first.
self.evaluate([v1.initializer])
self.evaluate([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
self.evaluate([add])
self.assertEqual(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
self.evaluate([assign_v1])
self.assertEqual(1, len(shared))
self.evaluate([add])
self.assertEqual(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared)
@test_util.run_v1_only('b/120545219')
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.cached_session() as sess:
self.evaluate([reader])
self.assertEqual(0, len(shared))
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
"""Handle subscriptions to multiple outputs from the same op."""
sparse_tensor_1 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
sparse_tensor_2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])
# This op has three outputs.
sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)
self.assertEqual(3, len(sparse_add.op.outputs))
c1 = constant_op.constant(1)
with ops.control_dependencies(sparse_add.op.outputs):
# This op depends on all the three outputs.
neg = -c1
shared = []
def sub(t):
shared.append(t)
return t
# Subscribe the three outputs at once.
subscribe.subscribe(sparse_add.op.outputs,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
self.evaluate([neg])
# All three ops have been processed.
self.assertEqual(3, len(shared))
@test_util.run_deprecated_v1
def test_subscribe_tensors_on_different_devices(self):
"""Side effect ops are added with the same device of the subscribed op."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
with ops.device('cpu:0'):
add = math_ops.add(c1, c2)
with ops.device('cpu:1'):
mul = math_ops.multiply(c1, c2)
def sub(t):
return t
add_sub = subscribe.subscribe(
add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
mul_sub = subscribe.subscribe(
mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Expect the identity tensors injected by subscribe to have been created
# on the same device as their original tensors.
self.assertNotEqual(add_sub.device, mul_sub.device)
self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device)
@test_util.run_v1_only('b/120545219')
def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
x1 = math_ops.add(c1, c2)
x2 = math_ops.multiply(c1, c2)
cond = control_flow_ops.cond(
x1 < x2,
lambda: math_ops.add(c1, c2, name='then'),
lambda: math_ops.subtract(c1, c2, name='else'),
name='cond')
branch = ops.get_default_graph().get_tensor_by_name('cond/then:0')
def context(tensor):
return tensor.op._get_control_flow_context()
self.assertIs(context(x1), context(x2))
self.assertIsNot(context(x1), context(branch))
results = []
def sub(tensor):
results.append(tensor)
return tensor
tensors = [x1, branch, x2]
subscriptions = subscribe.subscribe(
tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
for tensor, subscription in zip(tensors, subscriptions):
self.assertIs(context(tensor), context(subscription))
# Verify that sub(x1) and sub(x2) are in the same context.
self.assertIs(context(subscriptions[0]), context(subscriptions[2]))
# Verify that sub(x1) and sub(branch) are not.
self.assertIsNot(context(subscriptions[0]), context(subscriptions[1]))
with self.cached_session() as sess:
self.evaluate(cond)
self.assertEqual(3, len(results))
if __name__ == '__main__':
googletest.main()
|
karllessard/tensorflow
|
tensorflow/python/framework/subscribe_test.py
|
Python
|
apache-2.0
| 13,361 | 0.005838 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_particle_geyser_center.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/static/particle/shared_particle_geyser_center.py
|
Python
|
mit
| 451 | 0.046563 |
#!/usr/bin/env python
# encoding: utf-8
from flask import Blueprint, redirect, session, url_for, flash
from flask_babel import _
from flask_babel import refresh
from flask_login import current_user
from app import db, constants
from app.views import redirect_back
blueprint = Blueprint('lang', __name__, url_prefix='/lang')
@blueprint.route('/set/<path:lang>', methods=['GET'])
def set_user_lang(lang=None):
if lang not in constants.LANGUAGES.keys():
flash(_('Language unsupported on this site') + ': ' + lang, 'warning')
return redirect(url_for('home.home'))
if current_user.is_anonymous:
flash(_('You need to be logged in to set a permanent language.'))
return redirect_back()
current_user.locale = lang
db.session.add(current_user)
db.session.commit()
refresh()
return redirect_back()
@blueprint.route('/<path:lang>', methods=['GET'])
def set_lang(lang=None):
if lang not in constants.LANGUAGES.keys():
flash(_('Language unsupported on this site') + ': ' + lang, 'warning')
return redirect(url_for('home.home'))
session['lang'] = lang
if current_user.is_authenticated:
msg = _("{} is now set as language for this session. To make this "
"setting permanent, <a href='{}'>click here</a>")
flash(msg.format(constants.LANGUAGES[lang],
url_for('lang.set_user_lang', lang=lang)),
'safe')
return redirect_back()
|
viaict/viaduct
|
app/views/lang.py
|
Python
|
mit
| 1,481 | 0 |
'''
YARN Cluster Metrics
--------------------
yarn.metrics.appsSubmitted The number of submitted apps
yarn.metrics.appsCompleted The number of completed apps
yarn.metrics.appsPending The number of pending apps
yarn.metrics.appsRunning The number of running apps
yarn.metrics.appsFailed The number of failed apps
yarn.metrics.appsKilled The number of killed apps
yarn.metrics.reservedMB The size of reserved memory
yarn.metrics.availableMB The amount of available memory
yarn.metrics.allocatedMB The amount of allocated memory
yarn.metrics.totalMB The amount of total memory
yarn.metrics.reservedVirtualCores The number of reserved virtual cores
yarn.metrics.availableVirtualCores The number of available virtual cores
yarn.metrics.allocatedVirtualCores The number of allocated virtual cores
yarn.metrics.totalVirtualCores The total number of virtual cores
yarn.metrics.containersAllocated The number of containers allocated
yarn.metrics.containersReserved The number of containers reserved
yarn.metrics.containersPending The number of containers pending
yarn.metrics.totalNodes The total number of nodes
yarn.metrics.activeNodes The number of active nodes
yarn.metrics.lostNodes The number of lost nodes
yarn.metrics.unhealthyNodes The number of unhealthy nodes
yarn.metrics.decommissionedNodes The number of decommissioned nodes
yarn.metrics.rebootedNodes The number of rebooted nodes
YARN App Metrics
----------------
yarn.app.progress The progress of the application as a percent
yarn.app.startedTime The time in which application started (in ms since epoch)
yarn.app.finishedTime The time in which the application finished (in ms since epoch)
yarn.app.elapsedTime The elapsed time since the application started (in ms)
yarn.app.allocatedMB The sum of memory in MB allocated to the applications running containers
yarn.app.allocatedVCores The sum of virtual cores allocated to the applications running containers
yarn.app.runningContainers The number of containers currently running for the application
yarn.app.memorySeconds The amount of memory the application has allocated (megabyte-seconds)
yarn.app.vcoreSeconds The amount of CPU resources the application has allocated (virtual core-seconds)
YARN Node Metrics
-----------------
yarn.node.lastHealthUpdate The last time the node reported its health (in ms since epoch)
yarn.node.usedMemoryMB The total amount of memory currently used on the node (in MB)
yarn.node.availMemoryMB The total amount of memory currently available on the node (in MB)
yarn.node.usedVirtualCores The total number of vCores currently used on the node
yarn.node.availableVirtualCores The total number of vCores available on the node
yarn.node.numContainers The total number of containers currently running on the node
YARN Capacity Scheduler Metrics
-----------------
yarn.queue.root.maxCapacity The configured maximum queue capacity in percentage for root queue
yarn.queue.root.usedCapacity The used queue capacity in percentage for root queue
yarn.queue.root.capacity The configured queue capacity in percentage for root queue
yarn.queue.numPendingApplications The number of pending applications in this queue
yarn.queue.userAMResourceLimit.memory The maximum memory resources a user can use for Application Masters (in MB)
yarn.queue.userAMResourceLimit.vCores The maximum vCpus a user can use for Application Masters
yarn.queue.absoluteCapacity The absolute capacity percentage this queue can use of entire cluster
yarn.queue.userLimitFactor The minimum user limit percent set in the configuration
yarn.queue.userLimit The user limit factor set in the configuration
yarn.queue.numApplications The number of applications currently in the queue
yarn.queue.usedAMResource.memory The memory resources used for Application Masters (in MB)
yarn.queue.usedAMResource.vCores The vCpus used for Application Masters
yarn.queue.absoluteUsedCapacity The absolute used capacity percentage this queue is using of the entire cluster
yarn.queue.resourcesUsed.memory The total memory resources this queue is using (in MB)
yarn.queue.resourcesUsed.vCores The total vCpus this queue is using
yarn.queue.AMResourceLimit.vCores The maximum vCpus this queue can use for Application Masters
yarn.queue.AMResourceLimit.memory The maximum memory resources this queue can use for Application Masters (in MB)
yarn.queue.capacity The configured queue capacity in percentage relative to its parent queue
yarn.queue.numActiveApplications The number of active applications in this queue
yarn.queue.absoluteMaxCapacity The absolute maximum capacity percentage this queue can use of the entire cluster
yarn.queue.usedCapacity The used queue capacity in percentage
yarn.queue.numContainers The number of containers being used
yarn.queue.maxCapacity The configured maximum queue capacity in percentage relative to its parent queue
yarn.queue.maxApplications The maximum number of applications this queue can have
yarn.queue.maxApplicationsPerUser The maximum number of active applications per user this queue can have
'''
# stdlib
from urlparse import urljoin, urlsplit, urlunsplit
# 3rd party
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
import requests
# Project
from checks import AgentCheck
from config import _is_affirmative
# Default settings
DEFAULT_RM_URI = 'http://localhost:8088'
DEFAULT_TIMEOUT = 5
DEFAULT_CUSTER_NAME = 'default_cluster'
DEFAULT_COLLECT_APP_METRICS = True
MAX_DETAILED_QUEUES = 100
# Path to retrieve cluster metrics
YARN_CLUSTER_METRICS_PATH = '/ws/v1/cluster/metrics'
# Path to retrieve YARN APPS
YARN_APPS_PATH = '/ws/v1/cluster/apps'
# Path to retrieve node statistics
YARN_NODES_PATH = '/ws/v1/cluster/nodes'
# Path to retrieve queue statistics
YARN_SCHEDULER_PATH = '/ws/v1/cluster/scheduler'
# Metric types
GAUGE = 'gauge'
INCREMENT = 'increment'
# Name of the service check
SERVICE_CHECK_NAME = 'yarn.can_connect'
# Application states to collect
YARN_APPLICATION_STATES = 'RUNNING'
# Cluster metrics identifier
YARN_CLUSTER_METRICS_ELEMENT = 'clusterMetrics'
# Cluster metrics for YARN
YARN_CLUSTER_METRICS = {
'appsSubmitted': ('yarn.metrics.apps_submitted', GAUGE),
'appsCompleted': ('yarn.metrics.apps_completed', GAUGE),
'appsPending': ('yarn.metrics.apps_pending', GAUGE),
'appsRunning': ('yarn.metrics.apps_running', GAUGE),
'appsFailed': ('yarn.metrics.apps_failed', GAUGE),
'appsKilled': ('yarn.metrics.apps_killed', GAUGE),
'reservedMB': ('yarn.metrics.reserved_mb', GAUGE),
'availableMB': ('yarn.metrics.available_mb', GAUGE),
'allocatedMB': ('yarn.metrics.allocated_mb', GAUGE),
'totalMB': ('yarn.metrics.total_mb', GAUGE),
'reservedVirtualCores': ('yarn.metrics.reserved_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.metrics.available_virtual_cores', GAUGE),
'allocatedVirtualCores': ('yarn.metrics.allocated_virtual_cores', GAUGE),
'totalVirtualCores': ('yarn.metrics.total_virtual_cores', GAUGE),
'containersAllocated': ('yarn.metrics.containers_allocated', GAUGE),
'containersReserved': ('yarn.metrics.containers_reserved', GAUGE),
'containersPending': ('yarn.metrics.containers_pending', GAUGE),
'totalNodes': ('yarn.metrics.total_nodes', GAUGE),
'activeNodes': ('yarn.metrics.active_nodes', GAUGE),
'lostNodes': ('yarn.metrics.lost_nodes', GAUGE),
'unhealthyNodes': ('yarn.metrics.unhealthy_nodes', GAUGE),
'decommissionedNodes': ('yarn.metrics.decommissioned_nodes', GAUGE),
'rebootedNodes': ('yarn.metrics.rebooted_nodes', GAUGE),
}
# Application metrics for YARN
YARN_APP_METRICS = {
'progress': ('yarn.apps.progress', INCREMENT),
'startedTime': ('yarn.apps.started_time', INCREMENT),
'finishedTime': ('yarn.apps.finished_time', INCREMENT),
'elapsedTime': ('yarn.apps.elapsed_time', INCREMENT),
'allocatedMB': ('yarn.apps.allocated_mb', INCREMENT),
'allocatedVCores': ('yarn.apps.allocated_vcores', INCREMENT),
'runningContainers': ('yarn.apps.running_containers', INCREMENT),
'memorySeconds': ('yarn.apps.memory_seconds', INCREMENT),
'vcoreSeconds': ('yarn.apps.vcore_seconds', INCREMENT),
}
# Node metrics for YARN
YARN_NODE_METRICS = {
'lastHealthUpdate': ('yarn.node.last_health_update', GAUGE),
'usedMemoryMB': ('yarn.node.used_memory_mb', GAUGE),
'availMemoryMB': ('yarn.node.avail_memory_mb', GAUGE),
'usedVirtualCores': ('yarn.node.used_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.node.available_virtual_cores', GAUGE),
'numContainers': ('yarn.node.num_containers', GAUGE),
}
# Root queue metrics for YARN
YARN_ROOT_QUEUE_METRICS = {
'maxCapacity': ('yarn.queue.root.max_capacity', GAUGE),
'usedCapacity': ('yarn.queue.root.used_capacity', GAUGE),
'capacity': ('yarn.queue.root.capacity', GAUGE)
}
# Queue metrics for YARN
YARN_QUEUE_METRICS = {
'numPendingApplications': ('yarn.queue.num_pending_applications', GAUGE),
'userAMResourceLimit.memory': ('yarn.queue.user_am_resource_limit.memory', GAUGE),
'userAMResourceLimit.vCores': ('yarn.queue.user_am_resource_limit.vcores', GAUGE),
'absoluteCapacity': ('yarn.queue.absolute_capacity', GAUGE),
'userLimitFactor': ('yarn.queue.user_limit_factor', GAUGE),
'userLimit': ('yarn.queue.user_limit', GAUGE),
'numApplications': ('yarn.queue.num_applications', GAUGE),
'usedAMResource.memory': ('yarn.queue.used_am_resource.memory', GAUGE),
'usedAMResource.vCores': ('yarn.queue.used_am_resource.vcores', GAUGE),
'absoluteUsedCapacity': ('yarn.queue.absolute_used_capacity', GAUGE),
'resourcesUsed.memory': ('yarn.queue.resources_used.memory', GAUGE),
'resourcesUsed.vCores': ('yarn.queue.resources_used.vcores', GAUGE),
'AMResourceLimit.vCores': ('yarn.queue.am_resource_limit.vcores', GAUGE),
'AMResourceLimit.memory': ('yarn.queue.am_resource_limit.memory', GAUGE),
'capacity': ('yarn.queue.capacity', GAUGE),
'numActiveApplications': ('yarn.queue.num_active_applications', GAUGE),
'absoluteMaxCapacity': ('yarn.queue.absolute_max_capacity', GAUGE),
'usedCapacity' : ('yarn.queue.used_capacity', GAUGE),
'numContainers': ('yarn.queue.num_containers', GAUGE),
'maxCapacity': ('yarn.queue.max_capacity', GAUGE),
'maxApplications': ('yarn.queue.max_applications', GAUGE),
'maxApplicationsPerUser': ('yarn.queue.max_applications_per_user', GAUGE)
}
class YarnCheck(AgentCheck):
'''
Extract statistics from YARN's ResourceManger REST API
'''
_ALLOWED_APPLICATION_TAGS = [
'applicationTags',
'applicationType',
'name',
'queue',
'user'
]
def check(self, instance):
# Get properties from conf file
rm_address = instance.get('resourcemanager_uri', DEFAULT_RM_URI)
app_tags = instance.get('application_tags', {})
queue_blacklist = instance.get('queue_blacklist', [])
if type(app_tags) is not dict:
self.log.error('application_tags is incorrect: %s is not a dictionary', app_tags)
app_tags = {}
filtered_app_tags = {}
for dd_prefix, yarn_key in app_tags.iteritems():
if yarn_key in self._ALLOWED_APPLICATION_TAGS:
filtered_app_tags[dd_prefix] = yarn_key
app_tags = filtered_app_tags
# Collected by default
app_tags['app_name'] = 'name'
# Get additional tags from the conf file
tags = instance.get('tags', [])
if tags is None:
tags = []
else:
tags = list(set(tags))
# Get the cluster name from the conf file
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning("The cluster_name must be specified in the instance configuration, defaulting to '%s'" % (DEFAULT_CUSTER_NAME))
cluster_name = DEFAULT_CUSTER_NAME
tags.append('cluster_name:%s' % cluster_name)
# Get metrics from the Resource Manager
self._yarn_cluster_metrics(rm_address, tags)
if _is_affirmative(instance.get('collect_app_metrics', DEFAULT_COLLECT_APP_METRICS)):
self._yarn_app_metrics(rm_address, app_tags, tags)
self._yarn_node_metrics(rm_address, tags)
self._yarn_scheduler_metrics(rm_address, tags, queue_blacklist)
def _yarn_cluster_metrics(self, rm_address, addl_tags):
'''
Get metrics related to YARN cluster
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_CLUSTER_METRICS_PATH)
if metrics_json:
yarn_metrics = metrics_json[YARN_CLUSTER_METRICS_ELEMENT]
if yarn_metrics is not None:
self._set_yarn_metrics_from_json(addl_tags, yarn_metrics, YARN_CLUSTER_METRICS)
def _yarn_app_metrics(self, rm_address, app_tags, addl_tags):
'''
Get metrics for running applications
'''
metrics_json = self._rest_request_to_json(
rm_address,
YARN_APPS_PATH,
states=YARN_APPLICATION_STATES
)
if (metrics_json and metrics_json['apps'] is not None and
metrics_json['apps']['app'] is not None):
for app_json in metrics_json['apps']['app']:
tags = []
for dd_tag, yarn_key in app_tags.iteritems():
try:
val = app_json[yarn_key]
if val:
tags.append("{tag}:{value}".format(
tag=dd_tag, value=val
))
except KeyError:
self.log.error("Invalid value %s for application_tag", yarn_key)
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, app_json, YARN_APP_METRICS)
def _yarn_node_metrics(self, rm_address, addl_tags):
'''
Get metrics related to YARN nodes
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_NODES_PATH)
if (metrics_json and metrics_json['nodes'] is not None and
metrics_json['nodes']['node'] is not None):
for node_json in metrics_json['nodes']['node']:
node_id = node_json['id']
tags = ['node_id:%s' % str(node_id)]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, node_json, YARN_NODE_METRICS)
def _yarn_scheduler_metrics(self, rm_address, addl_tags, queue_blacklist):
'''
Get metrics from YARN scheduler
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_SCHEDULER_PATH)
try:
metrics_json = metrics_json['scheduler']['schedulerInfo']
if metrics_json['type'] == 'capacityScheduler':
self._yarn_capacity_scheduler_metrics(metrics_json, addl_tags, queue_blacklist)
except KeyError:
pass
def _yarn_capacity_scheduler_metrics(self, metrics_json, addl_tags, queue_blacklist):
'''
Get metrics from YARN scheduler if it's type is capacityScheduler
'''
tags = ['queue_name:%s' % metrics_json['queueName']]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, metrics_json, YARN_ROOT_QUEUE_METRICS)
if metrics_json['queues'] is not None and metrics_json['queues']['queue'] is not None:
queues_count = 0
for queue_json in metrics_json['queues']['queue']:
queue_name = queue_json['queueName']
if queue_name in queue_blacklist:
self.log.debug('Queue "%s" is blacklisted. Ignoring it' % queue_name)
continue
queues_count += 1
if queues_count > MAX_DETAILED_QUEUES:
self.warning("Found more than 100 queues, will only send metrics on first 100 queues. " +
" Please filter the queues with the check's `queue_blacklist` parameter")
break
tags = ['queue_name:%s' % str(queue_name)]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, queue_json, YARN_QUEUE_METRICS)
def _set_yarn_metrics_from_json(self, tags, metrics_json, yarn_metrics):
'''
Parse the JSON response and set the metrics
'''
for dict_path, metric in yarn_metrics.iteritems():
metric_name, metric_type = metric
metric_value = self._get_value_from_json(dict_path, metrics_json)
if metric_value is not None:
self._set_metric(metric_name,
metric_type,
metric_value,
tags)
def _get_value_from_json(self, dict_path, metrics_json):
'''
Get a value from a dictionary under N keys, represented as str("key1.key2...key{n}")
'''
for key in dict_path.split('.'):
if key in metrics_json:
metrics_json = metrics_json.get(key)
else:
return None
return metrics_json
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'''
Set a metric
'''
if metric_type == GAUGE:
self.gauge(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "%s" unknown', metric_type)
def _rest_request_to_json(self, address, object_path, *args, **kwargs):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['url:%s' % self._get_url_base(address)]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
# Add args to the url
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug('Attempting to connect to "%s"' % url)
# Add kwargs as arguments
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for key, value in kwargs.iteritems()])
url = urljoin(url, '?' + query)
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e))
raise
except (HTTPError,
InvalidURL,
ConnectionError) as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request failed: {0}, {1}".format(url, e))
raise
except ValueError as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=str(e))
raise
else:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags,
message='Connection to %s was successful' % url)
return response_json
def _join_url_dir(self, url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
def _get_url_base(self, url):
'''
Return the base of a URL
'''
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', ''])
|
StackVista/sts-agent-integrations-core
|
yarn/check.py
|
Python
|
bsd-3-clause
| 20,533 | 0.00375 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-28 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0019_new_iface_form_factors'),
]
operations = [
migrations.AddField(
model_name='rack',
name='desc_units',
field=models.BooleanField(default=False, help_text=b'Units are numbered top-to-bottom', verbose_name=b'Descending units'),
),
]
|
lampwins/netbox
|
netbox/dcim/migrations/0020_rack_desc_units.py
|
Python
|
apache-2.0
| 493 | 0.002028 |
#!/usr/bin/env python
import numpy as np
from kmodes.kmodes import KModes
# reproduce results on small soybean data set
x = np.genfromtxt('soybean.csv', dtype=int, delimiter=',')[:, :-1]
y = np.genfromtxt('soybean.csv', dtype=str, delimiter=',', usecols=(35, ))
kmodes_huang = KModes(n_clusters=4, init='Huang', verbose=1)
kmodes_huang.fit(x)
# Print cluster centroids of the trained model.
print('k-modes (Huang) centroids:')
print(kmodes_huang.cluster_centroids_)
# Print training statistics
print('Final training cost: {}'.format(kmodes_huang.cost_))
print('Training iterations: {}'.format(kmodes_huang.n_iter_))
kmodes_cao = KModes(n_clusters=4, init='Cao', verbose=1)
kmodes_cao.fit(x)
# Print cluster centroids of the trained model.
print('k-modes (Cao) centroids:')
print(kmodes_cao.cluster_centroids_)
# Print training statistics
print('Final training cost: {}'.format(kmodes_cao.cost_))
print('Training iterations: {}'.format(kmodes_cao.n_iter_))
print('Results tables:')
for result in (kmodes_huang, kmodes_cao):
classtable = np.zeros((4, 4), dtype=int)
for ii, _ in enumerate(y):
classtable[int(y[ii][-1]) - 1, result.labels_[ii]] += 1
print("\n")
print(" | Cl. 1 | Cl. 2 | Cl. 3 | Cl. 4 |")
print("----|-------|-------|-------|-------|")
for ii in range(4):
prargs = tuple([ii + 1] + list(classtable[ii, :]))
print(" D{0} | {1:>2} | {2:>2} | {3:>2} | {4:>2} |".format(*prargs))
|
nicodv/kmodes
|
examples/soybean.py
|
Python
|
mit
| 1,461 | 0.000684 |
#-*- coding: utf-8 -*-
import phonenumbers
from django.conf import settings
from django.core import validators
from django.utils.six import string_types
from phonenumbers.phonenumberutil import NumberParseException
class PhoneNumber(phonenumbers.phonenumber.PhoneNumber):
"""
A extended version of phonenumbers.phonenumber.PhoneNumber that provides some neat and more pythonic, easy
to access methods. This makes using a PhoneNumber instance much easier, especially in templates and such.
"""
format_map = {
'E164': phonenumbers.PhoneNumberFormat.E164,
'INTERNATIONAL': phonenumbers.PhoneNumberFormat.INTERNATIONAL,
'NATIONAL': phonenumbers.PhoneNumberFormat.NATIONAL,
'RFC3966': phonenumbers.PhoneNumberFormat.RFC3966,
}
@classmethod
def from_string(cls, phone_number, region=None):
phone_number_obj = cls()
if region is None:
region = getattr(settings, 'PHONENUMBER_DEFAULT_REGION', None) or getattr(settings, 'PHONENUMER_DEFAULT_REGION', None)
phonenumbers.parse(number=phone_number, region=region,
keep_raw_input=True, numobj=phone_number_obj)
return phone_number_obj
def __unicode__(self):
if self.is_valid():
if self.extension:
return u"%sx%s" % (self.as_e164, self.extension)
return self.as_e164
return self.raw_input
def __str__(self):
return str(self.__unicode__())
def original_unicode(self):
return super(PhoneNumber, self).__unicode__()
def is_valid(self):
"""
checks whether the number supplied is actually valid
"""
return phonenumbers.is_valid_number(self)
def format_as(self, format):
if self.is_valid():
return phonenumbers.format_number(self, format)
else:
return self.raw_input
@property
def as_international(self):
return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL)
@property
def as_e164(self):
return self.format_as(phonenumbers.PhoneNumberFormat.E164)
@property
def as_national(self):
return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL)
@property
def as_rfc3966(self):
return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966)
def __len__(self):
return len(self.__unicode__())
def __eq__(self, other):
if type(other) == PhoneNumber:
return self.as_rfc3966 == other.as_rfc3966
else:
return super(PhoneNumber, self).__eq__(other)
def __hash__(self):
return hash(self.as_rfc3966)
def to_python(value):
if value in validators.EMPTY_VALUES: # None or ''
phone_number = None
elif value and isinstance(value, string_types):
try:
phone_number = PhoneNumber.from_string(phone_number=value)
except NumberParseException:
# the string provided is not a valid PhoneNumber.
phone_number = PhoneNumber(raw_input=value)
elif isinstance(value, phonenumbers.phonenumber.PhoneNumber) and not isinstance(value, PhoneNumber):
phone_number = PhoneNumber(value)
elif isinstance(value, PhoneNumber):
phone_number = value
else:
# TODO: this should somehow show that it has invalid data, but not completely die for
# bad data in the database. (Same for the NumberParseException above)
phone_number = None
return phone_number
|
CloudNcodeInc/django-phonenumber-field
|
phonenumber_field/phonenumber.py
|
Python
|
mit
| 3,532 | 0.001982 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020 Gabriel Ferreira
# Copyright (C) 2020 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class ProgressCheckpoints:
def __init__(self, num_jobs, num_checkpoints=10):
"""Create a set of unique and evenly spaced indexes of jobs, used as checkpoints for progress"""
self.num_jobs = num_jobs
self._checkpoints = {}
if num_checkpoints > 0:
self._offset = num_jobs/num_checkpoints
for i in range(1, num_checkpoints):
self._checkpoints[int(i*self._offset)] = 100*i//num_checkpoints
if num_jobs > 0:
self._checkpoints[num_jobs-1] = 100
def is_checkpoint(self, index):
if index in self._checkpoints:
return True
return False
def progress(self, index):
try:
return self._checkpoints[index]
except KeyError:
return None
|
musicbrainz/picard
|
picard/util/progresscheckpoints.py
|
Python
|
gpl-2.0
| 1,663 | 0.001203 |
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Simple starter for stand-alone RESTx server.
"""
import os
import sys
import time
import getopt
# RESTx imports
import restx.settings as settings
import restx.logger as logger
from restx.core import RequestDispatcher
from restx.platform_specifics import *
from org.mulesoft.restx import Settings
from org.mulesoft.restx.util import Url
from org.mulesoft.restx.component.api import *
def print_help():
print \
"""
RESTx server (c) 2010 MuleSoft
Usage: jython starter.py [options]
Options:
-h, --help
Print this help screen.
-P, --port <num>
Port on which the server listens for requests.
-p, --pidfile <filename>
If specified, the PID of the server is stored in <filename>.
-l, --logfile <filename>
If specified, the filename for the logfile. If not specified,
output will go to the console.
-r, --rootdir <dirname>
Root directory of the RESTx install
"""
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "hl:P:p:r:", ["help", "logfile=", "port=", "pidfile=", "rootdir="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
print_help()
sys.exit(1)
port = settings.LISTEN_PORT
for o, a in opts:
if o in ("-p", "--pidfile"):
# Writing our process ID
pid = os.getpid()
f = open(a, "w")
f.write(str(pid))
f.close()
elif o in ("-h", "--help"):
print_help()
sys.exit(0)
elif o in ("-P", "--port"):
port = int(a)
elif o in ("-r", "--rootdir"):
rootdir = str(a)
settings.set_root_dir(rootdir)
elif o in ("-l", "--logfile"):
logger.set_logfile(a)
my_server = HttpServer(port, RequestDispatcher())
|
yyamano/RESTx
|
src/python/starter.py
|
Python
|
gpl-3.0
| 2,796 | 0.011803 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todo', '0006_auto_20160530_1210'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='category',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='todo.Category'),
preserve_default=False,
),
]
|
Azarn/mytodo
|
todo/migrations/0007_auto_20160530_1233.py
|
Python
|
apache-2.0
| 593 | 0.001686 |
# Copyright 2020-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
def main():
settings = get_settings_from_env()
server = server_factory(**settings)
server.serve_forever()
def get_settings_from_env(controller_port=None,
visualization_server_image=None, frontend_image=None,
visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None,
minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None):
"""
Returns a dict of settings from environment variables relevant to the controller
Environment settings can be overridden by passing them here as arguments.
Settings are pulled from the all-caps version of the setting name. The
following defaults are used if those environment variables are not set
to enable backwards compatibility with previous versions of this script:
visualization_server_image: gcr.io/ml-pipeline/visualization-server
visualization_server_tag: value of KFP_VERSION environment variable
frontend_image: gcr.io/ml-pipeline/frontend
frontend_tag: value of KFP_VERSION environment variable
disable_istio_sidecar: Required (no default)
minio_access_key: Required (no default)
minio_secret_key: Required (no default)
"""
settings = dict()
settings["controller_port"] = \
controller_port or \
os.environ.get("CONTROLLER_PORT", "8080")
settings["visualization_server_image"] = \
visualization_server_image or \
os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server")
settings["frontend_image"] = \
frontend_image or \
os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend")
# Look for specific tags for each image first, falling back to
# previously used KFP_VERSION environment variable for backwards
# compatibility
settings["visualization_server_tag"] = \
visualization_server_tag or \
os.environ.get("VISUALIZATION_SERVER_TAG") or \
os.environ["KFP_VERSION"]
settings["frontend_tag"] = \
frontend_tag or \
os.environ.get("FRONTEND_TAG") or \
os.environ["KFP_VERSION"]
settings["disable_istio_sidecar"] = \
disable_istio_sidecar if disable_istio_sidecar is not None \
else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
settings["minio_access_key"] = \
minio_access_key or \
base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
settings["minio_secret_key"] = \
minio_secret_key or \
base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
# KFP_DEFAULT_PIPELINE_ROOT is optional
settings["kfp_default_pipeline_root"] = \
kfp_default_pipeline_root or \
os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
return settings
def server_factory(visualization_server_image,
visualization_server_tag, frontend_image, frontend_tag,
disable_istio_sidecar, minio_access_key,
minio_secret_key, kfp_default_pipeline_root=None,
url="", controller_port=8080):
"""
Returns an HTTPServer populated with Handler with customized settings
"""
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
desired_configmap_count = 1
desired_resources = []
if kfp_default_pipeline_root:
desired_configmap_count = 2
desired_resources += [{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "kfp-launcher",
"namespace": namespace,
},
"data": {
"defaultPipelineRoot": kfp_default_pipeline_root,
},
}]
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready":
len(children["Secret.v1"]) == 1 and
len(children["ConfigMap.v1"]) == desired_configmap_count and
len(children["Deployment.apps/v1"]) == 2 and
len(children["Service.v1"]) == 2 and
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and
len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and
"True" or "False"
}
# Generate the desired child object(s).
desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image": f"{visualization_server_image}:{visualization_server_tag}",
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
"resources": {
"requests": {
"cpu": "50m",
"memory": "200Mi"
},
"limits": {
"cpu": "500m",
"memory": "1Gi"
},
}
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "security.istio.io/v1beta1",
"kind": "AuthorizationPolicy",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
}
},
"rules": [{
"from": [{
"source": {
"principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
}
}]
}]
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image": f"{frontend_image}:{frontend_tag}",
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}],
"env": [
{
"name": "MINIO_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"key": "accesskey",
"name": "mlpipeline-minio-artifact"
}
}
},
{
"name": "MINIO_SECRET_KEY",
"valueFrom": {
"secretKeyRef": {
"key": "secretkey",
"name": "mlpipeline-minio-artifact"
}
}
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "70Mi"
},
"limits": {
"cpu": "100m",
"memory": "500Mi"
},
}
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:\n', json.dumps(parent, sort_keys=True))
print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True))
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": minio_access_key,
"secretkey": minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
return HTTPServer((url, int(controller_port)), Controller)
if __name__ == "__main__":
main()
|
kubeflow/pipelines
|
manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
|
Python
|
apache-2.0
| 16,824 | 0.00107 |
from sympy.utilities.source import get_mod_func, get_class
def test_get_mod_func():
assert get_mod_func('sympy.core.basic.Basic') == ('sympy.core.basic', 'Basic')
def test_get_class():
_basic = get_class('sympy.core.basic.Basic')
assert _basic.__name__ == 'Basic'
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sympy/utilities/tests/test_source.py
|
Python
|
agpl-3.0
| 278 | 0.010791 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
try:
_STRING_TYPE = basestring
except NameError: # pragma: no cover
_STRING_TYPE = str
from recipe_engine.config import config_item_context, ConfigGroup, BadConf
from recipe_engine.config import ConfigList, Dict, Single, Static, Set, List
from . import api as gclient_api
def BaseConfig(USE_MIRROR=True, CACHE_DIR=None,
BUILDSPEC_VERSION=None, deps_file='.DEPS.git', **_kwargs):
cache_dir = str(CACHE_DIR) if CACHE_DIR else None
return ConfigGroup(
solutions = ConfigList(
lambda: ConfigGroup(
name = Single(_STRING_TYPE),
url = Single((_STRING_TYPE, type(None)), empty_val=''),
deps_file = Single(_STRING_TYPE, empty_val=deps_file, required=False,
hidden=False),
managed = Single(bool, empty_val=True, required=False, hidden=False),
custom_deps = Dict(value_type=(_STRING_TYPE, type(None))),
custom_vars = Dict(value_type=(_STRING_TYPE, bool)),
safesync_url = Single(_STRING_TYPE, required=False),
revision = Single(
(_STRING_TYPE, gclient_api.RevisionResolver),
required=False, hidden=True),
)
),
deps_os = Dict(value_type=_STRING_TYPE),
hooks = List(_STRING_TYPE),
target_os = Set(_STRING_TYPE),
target_os_only = Single(bool, empty_val=False, required=False),
target_cpu = Set(_STRING_TYPE),
target_cpu_only = Single(bool, empty_val=False, required=False),
cache_dir = Static(cache_dir, hidden=False),
# If supplied, use this as the source root (instead of the first solution's
# checkout).
src_root = Single(_STRING_TYPE, required=False, hidden=True),
# Maps 'solution' -> build_property
# TODO(machenbach): Deprecate this in favor of the one below.
# http://crbug.com/713356
got_revision_mapping = Dict(hidden=True),
# Maps build_property -> 'solution'
got_revision_reverse_mapping = Dict(hidden=True),
# Addition revisions we want to pass in. For now there's a duplication
# of code here of setting custom vars AND passing in --revision. We hope
# to remove custom vars later.
revisions = Dict(
value_type=(_STRING_TYPE, gclient_api.RevisionResolver),
hidden=True),
# TODO(iannucci): HACK! The use of None here to indicate that we apply this
# to the solution.revision field is really terrible. I mostly blame
# gclient.
# Maps 'parent_build_property' -> 'custom_var_name'
# Maps 'parent_build_property' -> None
# If value is None, the property value will be applied to
# solutions[0].revision. Otherwise, it will be applied to
# solutions[0].custom_vars['custom_var_name']
parent_got_revision_mapping = Dict(hidden=True),
delete_unversioned_trees = Single(bool, empty_val=True, required=False),
# Maps canonical repo URL to (local_path, revision).
# - canonical gitiles repo URL is "https://<host>/<project>"
# where project does not have "/a/" prefix or ".git" suffix.
# - solution/path is then used to apply patches as patch root in
# bot_update.
# - if revision is given, it's passed verbatim to bot_update for
# corresponding dependency. Otherwise (i.e. None), the patch will be
# applied on top of version pinned in DEPS.
# This is essentially a allowlist of which repos inside a solution
# can be patched automatically by bot_update based on
# api.buildbucket.build.input.gerrit_changes[0].project
# For example, if bare chromium solution has this entry in repo_path_map
# 'https://chromium.googlesource.com/angle/angle': (
# 'src/third_party/angle', 'HEAD')
# then a patch to Angle project can be applied to a chromium src's
# checkout after first updating Angle's repo to its main's HEAD.
repo_path_map = Dict(value_type=tuple, hidden=True),
# Check out refs/branch-heads.
# TODO (machenbach): Only implemented for bot_update atm.
with_branch_heads = Single(
bool,
empty_val=False,
required=False,
hidden=True),
# Check out refs/tags.
with_tags = Single(
bool,
empty_val=False,
required=False,
hidden=True),
USE_MIRROR = Static(bool(USE_MIRROR)),
BUILDSPEC_VERSION= Static(BUILDSPEC_VERSION, hidden=True),
)
config_ctx = config_item_context(BaseConfig)
def ChromiumGitURL(_c, *pieces):
return '/'.join(('https://chromium.googlesource.com',) + pieces)
# TODO(phajdan.jr): Move to proper repo and add coverage.
def ChromeInternalGitURL(_c, *pieces): # pragma: no cover
return '/'.join(('https://chrome-internal.googlesource.com',) + pieces)
@config_ctx()
def android(c):
c.target_os.add('android')
@config_ctx()
def nacl(c):
s = c.solutions.add()
s.name = 'native_client'
s.url = ChromiumGitURL(c, 'native_client', 'src', 'native_client.git')
m = c.got_revision_mapping
m['native_client'] = 'got_revision'
@config_ctx()
def webports(c):
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumGitURL(c, 'webports.git')
m = c.got_revision_mapping
m['src'] = 'got_revision'
@config_ctx()
def emscripten_releases(c):
s = c.solutions.add()
s.name = 'emscripten-releases'
s.url = ChromiumGitURL(c, 'emscripten-releases.git')
m = c.got_revision_mapping
m['emscripten-releases'] = 'got_revision'
@config_ctx()
def gyp(c):
s = c.solutions.add()
s.name = 'gyp'
s.url = ChromiumGitURL(c, 'external', 'gyp.git')
m = c.got_revision_mapping
m['gyp'] = 'got_revision'
@config_ctx()
def build(c):
s = c.solutions.add()
s.name = 'build'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'build.git')
m = c.got_revision_mapping
m['build'] = 'got_revision'
@config_ctx()
def depot_tools(c): # pragma: no cover
s = c.solutions.add()
s.name = 'depot_tools'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'depot_tools.git')
m = c.got_revision_mapping
m['depot_tools'] = 'got_revision'
@config_ctx()
def skia(c): # pragma: no cover
s = c.solutions.add()
s.name = 'skia'
s.url = 'https://skia.googlesource.com/skia.git'
m = c.got_revision_mapping
m['skia'] = 'got_revision'
@config_ctx()
def skia_buildbot(c): # pragma: no cover
s = c.solutions.add()
s.name = 'skia_buildbot'
s.url = 'https://skia.googlesource.com/buildbot.git'
m = c.got_revision_mapping
m['skia_buildbot'] = 'got_revision'
@config_ctx()
def chrome_golo(c): # pragma: no cover
s = c.solutions.add()
s.name = 'chrome_golo'
s.url = 'https://chrome-internal.googlesource.com/chrome-golo/chrome-golo.git'
c.got_revision_mapping['chrome_golo'] = 'got_revision'
@config_ctx()
def infra_puppet(c): # pragma: no cover
s = c.solutions.add()
s.name = 'infra_puppet'
s.url = 'https://chrome-internal.googlesource.com/infra/puppet.git'
c.got_revision_mapping['infra_puppet'] = 'got_revision'
@config_ctx()
def build_internal(c):
s = c.solutions.add()
s.name = 'build_internal'
s.url = 'https://chrome-internal.googlesource.com/chrome/tools/build.git'
c.got_revision_mapping['build_internal'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx()
def build_internal_scripts_slave(c):
s = c.solutions.add()
s.name = 'build_internal/scripts/slave'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build_limited/scripts/slave.git')
c.got_revision_mapping['build_internal/scripts/slave'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx()
def master_deps(c):
s = c.solutions.add()
s.name = 'master.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/master.DEPS.git')
c.got_revision_mapping['master.DEPS'] = 'got_revision'
@config_ctx()
def slave_deps(c):
s = c.solutions.add()
s.name = 'slave.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/slave.DEPS.git')
c.got_revision_mapping['slave.DEPS'] = 'got_revision'
@config_ctx()
def internal_deps(c):
s = c.solutions.add()
s.name = 'internal.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/internal.DEPS.git')
c.got_revision_mapping['internal.DEPS'] = 'got_revision'
@config_ctx()
def pdfium(c):
soln = c.solutions.add()
soln.name = 'pdfium'
soln.url = 'https://pdfium.googlesource.com/pdfium.git'
m = c.got_revision_mapping
m['pdfium'] = 'got_revision'
@config_ctx()
def crashpad(c):
soln = c.solutions.add()
soln.name = 'crashpad'
soln.url = 'https://chromium.googlesource.com/crashpad/crashpad.git'
@config_ctx()
def boringssl(c):
soln = c.solutions.add()
soln.name = 'boringssl'
soln.url = 'https://boringssl.googlesource.com/boringssl.git'
soln.deps_file = 'util/bot/DEPS'
@config_ctx()
def dart(c):
soln = c.solutions.add()
soln.name = 'sdk'
soln.url = ('https://dart.googlesource.com/sdk.git')
soln.deps_file = 'DEPS'
soln.managed = False
@config_ctx()
def expect_tests(c):
soln = c.solutions.add()
soln.name = 'expect_tests'
soln.url = 'https://chromium.googlesource.com/infra/testing/expect_tests.git'
c.got_revision_mapping['expect_tests'] = 'got_revision'
@config_ctx()
def infra(c):
soln = c.solutions.add()
soln.name = 'infra'
soln.url = 'https://chromium.googlesource.com/infra/infra.git'
c.got_revision_mapping['infra'] = 'got_revision'
c.repo_path_map.update({
'https://chromium.googlesource.com/infra/luci/gae': (
'infra/go/src/go.chromium.org/gae', 'HEAD'),
'https://chromium.googlesource.com/infra/luci/luci-py': (
'infra/luci', 'HEAD'),
'https://chromium.googlesource.com/infra/luci/luci-go': (
'infra/go/src/go.chromium.org/luci', 'HEAD'),
'https://chromium.googlesource.com/infra/luci/recipes-py': (
'infra/recipes-py', 'HEAD')
})
@config_ctx()
def infra_internal(c): # pragma: no cover
soln = c.solutions.add()
soln.name = 'infra_internal'
soln.url = 'https://chrome-internal.googlesource.com/infra/infra_internal.git'
c.got_revision_mapping['infra_internal'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_gae(c):
# luci/gae is checked out as a part of infra.git solution at HEAD.
c.revisions['infra'] = 'refs/heads/main'
# luci/gae is developed together with luci-go, which should be at HEAD.
c.revisions['infra/go/src/go.chromium.org/luci'] = 'refs/heads/main'
c.revisions['infra/go/src/go.chromium.org/gae'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/go/src/go.chromium.org/gae'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_go(c):
# luci-go is checked out as a part of infra.git solution at HEAD.
c.revisions['infra'] = 'refs/heads/main'
c.revisions['infra/go/src/go.chromium.org/luci'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/go/src/go.chromium.org/luci'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_py(c):
# luci-py is checked out as part of infra just to have appengine
# pre-installed, as that's what luci-py PRESUBMIT relies on.
c.revisions['infra'] = 'refs/heads/main'
c.revisions['infra/luci'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/luci'] = 'got_revision'
@config_ctx(includes=['infra'])
def recipes_py(c):
c.revisions['infra'] = 'refs/heads/main'
c.revisions['infra/recipes-py'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/recipes-py'] = 'got_revision'
@config_ctx()
def recipes_py_bare(c):
soln = c.solutions.add()
soln.name = 'recipes-py'
soln.url = 'https://chromium.googlesource.com/infra/luci/recipes-py'
c.got_revision_mapping['recipes-py'] = 'got_revision'
@config_ctx()
def catapult(c):
soln = c.solutions.add()
soln.name = 'catapult'
soln.url = 'https://chromium.googlesource.com/catapult'
c.got_revision_mapping['catapult'] = 'got_revision'
@config_ctx(includes=['infra_internal'])
def infradata_master_manager(c):
soln = c.solutions.add()
soln.name = 'infra-data-master-manager'
soln.url = (
'https://chrome-internal.googlesource.com/infradata/master-manager.git')
del c.got_revision_mapping['infra_internal']
c.got_revision_mapping['infra-data-master-manager'] = 'got_revision'
@config_ctx()
def infradata_config(c):
soln = c.solutions.add()
soln.name = 'infra-data-config'
soln.url = 'https://chrome-internal.googlesource.com/infradata/config.git'
c.got_revision_mapping['infra-data-config'] = 'got_revision'
@config_ctx()
def infradata_rbe(c):
soln = c.solutions.add()
soln.name = 'infradata-rbe'
soln.url = 'https://chrome-internal.googlesource.com/infradata/rbe.git'
c.got_revision_mapping['infradata-rbe'] = 'got_revision'
@config_ctx()
def with_branch_heads(c):
c.with_branch_heads = True
@config_ctx()
def with_tags(c):
c.with_tags = True
@config_ctx()
def custom_tabs_client(c):
soln = c.solutions.add()
soln.name = 'custom_tabs_client'
# TODO(pasko): test custom-tabs-client within a full chromium checkout.
soln.url = 'https://chromium.googlesource.com/custom-tabs-client'
c.got_revision_mapping['custom_tabs_client'] = 'got_revision'
@config_ctx()
def gerrit_test_cq_normal(c):
soln = c.solutions.add()
soln.name = 'gerrit-test-cq-normal'
soln.url = 'https://chromium.googlesource.com/playground/gerrit-cq/normal.git'
@config_ctx()
def dawn(c):
soln = c.solutions.add()
soln.name = 'dawn'
soln.url = 'https://dawn.googlesource.com/dawn.git'
c.got_revision_mapping['dawn'] = 'got_revision'
@config_ctx()
def celab(c):
soln = c.solutions.add()
# soln.name must match the repo name for `dep` to work properly
soln.name = 'cel'
soln.url = 'https://chromium.googlesource.com/enterprise/cel.git'
c.got_revision_mapping['cel'] = 'got_revision'
@config_ctx()
def openscreen(c):
s = c.solutions.add()
s.name = 'openscreen'
s.url = 'https://chromium.googlesource.com/openscreen'
c.got_revision_mapping['openscreen'] = 'got_revision'
@config_ctx()
def devtools(c):
s = c.solutions.add()
s.name = 'devtools'
s.url = 'https://chromium.googlesource.com/devtools/devtools-frontend.git'
c.got_revision_mapping['devtools'] = 'got_revision'
c.repo_path_map.update({
'https://chromium.googlesource.com/devtools/devtools-frontend': (
'devtools/devtools-frontend', 'HEAD'),
})
@config_ctx()
def tint(c):
soln = c.solutions.add()
soln.name = 'tint'
soln.url = 'https://dawn.googlesource.com/tint.git'
c.got_revision_mapping['tint'] = 'got_revision'
@config_ctx()
def gerrit(c):
s = c.solutions.add()
s.name = 'gerrit'
s.url = 'https://gerrit.googlesource.com/gerrit.git'
c.revisions['gerrit'] = 'refs/heads/master'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_binary_size(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_binary_size'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'binary-size.git')
c.got_revision_mapping['gerrit_plugins_binary_size'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_buildbucket(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_buildbucket'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'buildbucket.git')
c.got_revision_mapping['gerrit_plugins_buildbucket'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_chromium_behavior(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chromium_behavior'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'chromium-behavior.git')
c.got_revision_mapping['gerrit_plugins_chromium_behavior'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_chromium_binary_size(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chromium_binary_size'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins',
'chromium-binary-size.git')
c.got_revision_mapping['gerrit_plugins_chromium_binary_size'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_chromium_style(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chromium_style'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'chromium-style.git')
c.got_revision_mapping['gerrit_plugins_binary_size'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_chumpdetector(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chumpdetector'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'chumpdetector.git')
c.got_revision_mapping['gerrit_plugins_chumpdetector'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_code_coverage(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_code_coverage'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'code-coverage.git')
c.got_revision_mapping['gerrit_plugins_code_coverage'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_git_numberer(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_git_numberer'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'git-numberer.git')
c.got_revision_mapping['gerrit_plugins_git_numberer'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_landingwidget(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_landingwidget'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'landingwidget.git')
c.got_revision_mapping['gerrit_plugins_landingwidget'] = 'got_revision'
@config_ctx(includes=['gerrit'])
def gerrit_plugins_tricium(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_tricium'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'tricium.git')
c.got_revision_mapping['gerrit_plugins_tricium'] = 'got_revision'
|
CoherentLabs/depot_tools
|
recipes/recipe_modules/gclient/config.py
|
Python
|
bsd-3-clause
| 18,342 | 0.019082 |
"""Retrieve temporary access token by using refresh/offline token.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from fastlog import log
from urllib.parse import urljoin
import requests
# The following endpoint is used to retrieve the access token from OSIO AUTH service
_AUTH_ENDPOINT = "/api/token/refresh"
def check_access_token_attribute(token_structure):
"""Additional check for the access_token attribute."""
assert "access_token" in token_structure
item = token_structure["access_token"]
assert isinstance(item, str)
# 200 chars is quite conservative
assert len(token_structure["access_token"]) > 200
# TODO: better check for token internal structure
# 1) regexp-based
# 2) decode it + check if it has all required fields (possibly)
def check_token_type_attribute(token_structure):
"""Additional check for the token_type attribute."""
assert "token_type" in token_structure
item = token_structure["token_type"]
assert isinstance(item, str)
# we don't know about any other token type
assert item == "Bearer"
def check_expires_in_attribute(token_structure):
"""Additional check for the expires_in attribute."""
assert "token_type" in token_structure
item = token_structure["expires_in"]
assert isinstance(item, int)
assert item > 0
def check_refresh_expires_in_attribute(token_structure):
"""Additional check for the refresh_expires_in attribute."""
assert "token_type" in token_structure
item = token_structure["refresh_expires_in"]
assert isinstance(item, int)
assert item > 0
def check_not_before_policy_attribute(token_structure):
"""Additional check for the not-before-policy attribute."""
assert "token_type" in token_structure
item = token_structure["not-before-policy"]
assert isinstance(item, int)
assert item >= 0
def get_and_check_token_structure(data):
"""Get the token structure from returned data and check the basic format."""
assert "token" in data
token_structure = data["token"]
assert "expires_in" in token_structure
check_access_token_attribute(token_structure)
check_token_type_attribute(token_structure)
check_expires_in_attribute(token_structure)
check_refresh_expires_in_attribute(token_structure)
check_not_before_policy_attribute(token_structure)
return token_structure
def retrieve_access_token(refresh_token, auth_service_url):
"""Retrieve temporary access token by using refresh/offline token."""
log.info("Trying to retrieve access token")
if refresh_token is None:
log.error("aborting: RECOMMENDER_REFRESH_TOKEN environment variable is not set")
return None
if auth_service_url is None:
log.error("aborting: OSIO_AUTH_SERVICE environment variable is not set")
return None
payload = {'refresh_token': refresh_token}
url = urljoin(auth_service_url, _AUTH_ENDPOINT)
response = requests.post(url, json=payload)
assert response is not None and response.ok, "Error communicating with the OSIO AUTH service"
data = response.json()
# check the basic structure of the response
token_structure = get_and_check_token_structure(data)
log.info("Token seems to be correct")
# seems like everything's ok, let's read the temporary access token
return token_structure["access_token"]
|
tisnik/fabric8-analytics-common
|
a2t/src/auth.py
|
Python
|
apache-2.0
| 3,986 | 0.001254 |
# -*- coding: utf-8 -*-
import bottle
import sys
import unittest
import wsgiref
import wsgiref.util
import wsgiref.validate
import mimetypes
import uuid
from bottle import tob, tonat, BytesIO, py3k, unicode
def warn(msg):
sys.stderr.write('WARNING: %s\n' % msg.strip())
def tobs(data):
''' Transforms bytes or unicode into a byte stream. '''
return BytesIO(tob(data))
def api(introduced, deprecated=None, removed=None):
current = tuple(map(int, bottle.__version__.split('-')[0].split('.')))
introduced = tuple(map(int, introduced.split('.')))
deprecated = tuple(map(int, deprecated.split('.'))) if deprecated else (99,99)
removed = tuple(map(int, removed.split('.'))) if removed else (99,100)
assert introduced < deprecated < removed
def decorator(func):
if current < introduced:
return None
elif current < deprecated:
return func
elif current < removed:
func.__doc__ = '(deprecated) ' + (func.__doc__ or '')
return func
else:
return None
return decorator
def wsgistr(s):
if py3k:
return s.encode('utf8').decode('latin1')
else:
return s
class ServerTestBase(unittest.TestCase):
def setUp(self):
''' Create a new Bottle app set it as default_app '''
self.port = 8080
self.host = 'localhost'
self.app = bottle.app.push()
self.wsgiapp = wsgiref.validate.validator(self.app)
def urlopen(self, path, method='GET', post='', env=None):
result = {'code':0, 'status':'error', 'header':{}, 'body':tob('')}
def start_response(status, header):
result['code'] = int(status.split()[0])
result['status'] = status.split(None, 1)[-1]
for name, value in header:
name = name.title()
if name in result['header']:
result['header'][name] += ', ' + value
else:
result['header'][name] = value
env = env if env else {}
wsgiref.util.setup_testing_defaults(env)
env['REQUEST_METHOD'] = wsgistr(method.upper().strip())
env['PATH_INFO'] = wsgistr(path)
env['QUERY_STRING'] = wsgistr('')
if post:
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_LENGTH'] = str(len(tob(post)))
env['wsgi.input'].write(tob(post))
env['wsgi.input'].seek(0)
response = self.wsgiapp(env, start_response)
for part in response:
try:
result['body'] += part
except TypeError:
raise TypeError('WSGI app yielded non-byte object %s', type(part))
if hasattr(response, 'close'):
response.close()
del response
return result
def postmultipart(self, path, fields, files):
env = multipart_environ(fields, files)
return self.urlopen(path, method='POST', env=env)
def tearDown(self):
bottle.app.pop()
def assertStatus(self, code, route='/', **kargs):
self.assertEqual(code, self.urlopen(route, **kargs)['code'])
def assertBody(self, body, route='/', **kargs):
self.assertEqual(tob(body), self.urlopen(route, **kargs)['body'])
def assertInBody(self, body, route='/', **kargs):
result = self.urlopen(route, **kargs)['body']
if tob(body) not in result:
self.fail('The search pattern "%s" is not included in body:\n%s' % (body, result))
def assertHeader(self, name, value, route='/', **kargs):
self.assertEqual(value, self.urlopen(route, **kargs)['header'].get(name))
def assertHeaderAny(self, name, route='/', **kargs):
self.assertTrue(self.urlopen(route, **kargs)['header'].get(name, None))
def assertInError(self, search, route='/', **kargs):
bottle.request.environ['wsgi.errors'].errors.seek(0)
err = bottle.request.environ['wsgi.errors'].errors.read()
if search not in err:
self.fail('The search pattern "%s" is not included in wsgi.error: %s' % (search, err))
def multipart_environ(fields, files):
boundary = str(uuid.uuid1())
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary='+boundary}
wsgiref.util.setup_testing_defaults(env)
boundary = '--' + boundary
body = ''
for name, value in fields:
body += boundary + '\n'
body += 'Content-Disposition: form-data; name="%s"\n\n' % name
body += value + '\n'
for name, filename, content in files:
mimetype = str(mimetypes.guess_type(filename)[0]) or 'application/octet-stream'
body += boundary + '\n'
body += 'Content-Disposition: file; name="%s"; filename="%s"\n' % \
(name, filename)
body += 'Content-Type: %s\n\n' % mimetype
body += content + '\n'
body += boundary + '--\n'
if isinstance(body, unicode):
body = body.encode('utf8')
env['CONTENT_LENGTH'] = str(len(body))
env['wsgi.input'].write(body)
env['wsgi.input'].seek(0)
return env
|
Eddy0402/Environment
|
vim/ycmd/third_party/bottle/test/tools.py
|
Python
|
gpl-3.0
| 5,129 | 0.005069 |
def agts(queue):
iron = queue.add('iron.py', ncpus=8, walltime=8 * 60)
queue.add('iron.agts.py', deps=[iron],
creates=['Fe_conv_k.png', 'Fe_conv_h.png'])
if __name__ == '__main__':
import numpy as np
import pylab as plt
from ase.utils.eos import EquationOfState
from ase.io import read
def f(width, k, g):
filename = 'Fe-FD-%.2f-%02d-%2d.traj' % (width, k, g)
configs = read(filename + '@::2')
# Extract volumes and energies:
volumes = [a.get_volume() for a in configs]
energies = [a.get_potential_energy() for a in configs]
eos = EquationOfState(volumes, energies)
v0, e0, B = eos.fit()
return v0, e0, B
kk = [2, 4, 6, 8, 10, 12]
plt.figure(figsize=(6, 4))
for width in [0.05, 0.1, 0.15, 0.2]:
a = []
for k in kk:
v0, e0, B = f(width, k, 12)
a.append((2 * v0)**(1.0 / 3.0))
print ('%7.3f ' * 7) % ((width,) + tuple(a))
plt.plot(kk, a, label='width = %.2f eV' % width)
plt.legend(loc='upper right')
#plt.axis(ymin=2.83, ymax=2.85)
plt.xlabel('number of k-points')
plt.ylabel('lattice constant [Ang]')
plt.savefig('Fe_conv_k.png')
plt.figure(figsize=(6, 4))
gg = np.arange(8, 32, 4)
a = []
for g in gg:
v0, e0, B = f(0.1, 8, g)
a.append((2 * v0)**(1.0 / 3.0))
plt.plot(2.84 / gg, a, 'o-')
#plt.axis(ymin=2.83, ymax=2.85)
plt.xlabel('grid-spacing [Ang]')
plt.ylabel('lattice constant [Ang]')
plt.savefig('Fe_conv_h.png')
|
qsnake/gpaw
|
doc/tutorials/lattice_constants/iron.agts.py
|
Python
|
gpl-3.0
| 1,569 | 0.003187 |
#
# Copyright (c) 2015 Open-RnD Sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
open-rnd/ros3d-www
|
ros3dui/system/__init__.py
|
Python
|
mit
| 1,104 | 0 |
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from dipde.internals.internalpopulation import InternalPopulation
from dipde.internals.externalpopulation import ExternalPopulation
from dipde.internals.network import Network
from dipde.internals.connection import Connection as Connection
def get_simulation(dv=.001, update_method='approx', approx_order=None, tol=1e-8):
import scipy.stats as sps
# Create simulation:
b1 = ExternalPopulation(100)
i1 = InternalPopulation(v_min=0, v_max=.02, dv=dv, update_method=update_method, approx_order=approx_order, tol=tol)
b1_i1 = Connection(b1, i1, 1, delays=0.0, weights=(sps.expon(0,.005), 201))
simulation = Network([b1, i1], [b1_i1])
return simulation
def example(show=True, save=False):
# Settings:
t0 = 0.
dt = .0001
dv = .0001
tf = .1
update_method = 'approx'
approx_order = 1
tol = 1e-14
# Run simulation:
simulation = get_simulation(dv=dv, update_method=update_method, approx_order=approx_order, tol=tol)
simulation.run(dt=dt, tf=tf, t0=t0)
i1 = simulation.population_list[1]
if show == True:
# Visualize:
plt.figure(figsize=(3,3))
plt.plot(i1.t_record, i1.firing_rate_record)
plt.plot([tf],[8.6687760498], 'r*')
plt.xlim([0,tf])
plt.ylim(ymin=0, ymax=10)
plt.xlabel('Time (s)')
plt.ylabel('Firing Rate (Hz)')
plt.tight_layout()
if save == True: plt.savefig('./singlepop_exponential_distribution.png')
plt.show()
return i1.t_record, i1.firing_rate_record
if __name__ == "__main__": example() # pragma: no cover
|
AllenInstitute/dipde
|
dipde/examples/singlepop_exponential_distribution.py
|
Python
|
gpl-3.0
| 2,322 | 0.009044 |
# -*- coding: utf-8 -*-
# Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
# For example, this binary tree is symmetric:
# 1
# / \
# 2 2
# / \ / \
# 3 4 4 3
# But the following is not:
# 1
# / \
# 2 2
# \ \
# 3 3
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isSymmetric(self, root):
if root is not None:
if not isTreeSym(root.left, root.right):
return False
return True
def isTreeSym(p, q):
if p == None and q == None:
return True
elif p and q:
return p.val == q.val and isTreeSym(p.left, q.right) and isTreeSym(p.right, q.left)
else:
return False
if __name__ == '__main__':
s = Solution()
p1 = TreeNode(1)
p2 = TreeNode(2)
p3 = TreeNode(2)
p4 = None
p5 = TreeNode(3)
p6 = None
p7 = TreeNode(3)
p1.left = p2
p1.right = p3
p2.left = p4
p2.right = p5
p3.left = p6
p3.right = p7
print s.isSymmetric(p1)
|
ammzen/SolveLeetCode
|
101SymmetricTree.py
|
Python
|
mit
| 1,224 | 0.00817 |
import yaml
# We try to dump using the CSafeDumper for speed improvements.
try:
from yaml import CSafeDumper as Dumper
except ImportError: #pragma: no cover
from yaml import SafeDumper as Dumper #pragma: no cover
from hepdata_converter.common import Option, OptionInitMixin
from hepdata_converter.writers import Writer
import os
def str_presenter(dumper, data):
if '\n' in data:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
class YAML(Writer):
help = 'Writes YAML output. Output should be defined as filepath to the directory where submission.yaml and associated ' \
'table files will be written'
@classmethod
def options(cls):
options = Writer.options()
options['single_file'] = Option('single-file', type=bool, default=False, variable_mapping='single_file',
required=False, help="If set output will be written to single yaml file, instead "
"of multiple files (separating data and metadata of the tables)")
return options
def __init__(self, *args, **kwargs):
super(YAML, self).__init__(single_file_output=True, *args, **kwargs)
Dumper.add_representer(str, str_presenter)
def write(self, data_in, data_out, *args, **kwargs):
"""
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: path of the directory to which yaml files will be written
:type data_out: str
:param args:
:param kwargs:
"""
tables = data_in.tables
data = data_in.data
if self.hepdata_doi:
data['hepdata_doi'] = self.hepdata_doi
for table in tables:
table.metadata['table_doi'] = self.hepdata_doi + '/t' + str(table.index)
if not isinstance(data_out, str) and not self.single_file:
raise ValueError("output is not string, and single_file flag is not specified")
if not self.single_file:
self.create_dir(data_out)
with open(os.path.join(data_out, 'submission.yaml'), 'w') as submission_file:
yaml.dump_all([data] + [table.metadata for table in tables], submission_file, Dumper=Dumper, default_flow_style=None)
for table in tables:
with open(os.path.join(data_out, table.data_file), 'w') as table_file:
yaml.dump(table.data, table_file, Dumper=Dumper, default_flow_style=None)
else:
if isinstance(data_out, str):
with open(data_out, 'w') as submission_file:
yaml.dump_all([data] + [table.all_data for table in tables], submission_file, Dumper=Dumper, default_flow_style=None)
else: # expect filelike object
yaml.dump_all([data] + [table.all_data for table in tables], data_out, Dumper=Dumper, default_flow_style=None)
|
HEPData/hepdata-converter
|
hepdata_converter/writers/yaml_writer.py
|
Python
|
gpl-2.0
| 3,048 | 0.00689 |
import logging
from utils import common
from utils.lyric_base import LyricBase
site_class = 'UtaNet'
site_index = 'uta_net'
site_keyword = 'uta-net'
site_url = 'http://www.uta-net.com/'
test_url = 'http://www.uta-net.com/song/138139/'
test_expect_length = 1089
# current url format
# 'http://www.uta-net.com/song/138139/'
#
# former url
# 'http://www.uta-net.com/user/phplib/view_0.php?ID=17248'
class UtaNet(LyricBase):
def parse_page(self):
url = self.url
if not self.find_lyric(url):
logging.info('Failed to get lyric of url [%s]', url)
return False
if not self.find_song_info(url):
logging.info('Failed to get song info of url [%s]', url)
return True
def find_lyric(self, url):
pattern = '/[a-z]+/([0-9]+)/'
song_id = common.get_first_group_by_pattern(url, pattern)
if not song_id:
# try old pattern
# http://www.uta-net.com/user/phplib/view_0.php?ID=17248
pattern = 'ID=([0-9]+)'
song_id = common.get_first_group_by_pattern(url, pattern)
if not song_id:
logging.info('Failed to get id of url [%s]', url)
return False
# http://www.uta-net.com/user/phplib/svg/showkasi.php?ID=17248&WIDTH=560&HEIGHT=756&FONTSIZE=15&t=1489258939
showkasi_pattern = 'http://www.uta-net.com/user/phplib/svg/showkasi.php?ID=%s'
song_url = showkasi_pattern % (song_id, )
data = common.get_url_content(song_url)
if not data:
logging.info('Failed to get content of url [%s]', song_url)
return False
prefix = '<svg '
suffix = '</svg>'
lyric = common.find_string_by_prefix_suffix(data, prefix, suffix, True)
if not lyric:
logging.error('Failed to get lyric of url [%s]', url)
return False
lyric = lyric.replace('</text>', '\n')
lyric = common.strip_tags(lyric)
lyric = lyric.strip()
# test for half to full
lyric = common.half2full(lyric)
self.lyric = lyric
return True
def find_song_info(self, url):
ret = True
html = common.get_url_content(url)
patterns = {
'title': '<h2[^>]*>([^<]+)</h2>',
'artist': '歌手:<h3.*?><a href="/artist/[0-9]+/".*?>(.+?)</a></h3>',
'lyricist': '作詞:<h4.*?>([^<]+)</h4>',
'composer': '作曲:<h4.*?>([^<]+)</h4>'
}
self.set_attr(patterns, html)
return ret
def get_lyric(url):
obj = UtaNet(url)
return obj.get()
def download_search_result():
url = 'http://www.uta-net.com/search/?Aselect=1&Bselect=3&Keyword=KOKIA&sort=6'
output = 'uta_net.search.txt'
html = common.get_url_content(url)
if not html:
logging.error('Failed to download url [%s]' % (url, ))
return False
pattern = '<td class="side td1"><a href="([^"]+)">'
import re
import urllib.parse
songs = re.findall(pattern, html)
out = open(output, 'wb')
for song in songs:
print(song)
song_url = urllib.parse.urljoin(site_url, song)
full = get_lyric(song_url)
out.write(full.encode('utf-8'))
out.write('\n\n=====\n')
out.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# download_search_result()
# exit()
# url = 'http://www.uta-net.com/song/181206/'
url = test_url
full = get_lyric(url)
if not full:
print('damn !')
exit()
print(full)
|
franklai/lyric-get
|
lyric_engine/modules/uta_net.py
|
Python
|
mit
| 3,585 | 0.000561 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warehouse.admin.services import ISponsorLogoStorage
from warehouse.utils.static import ManifestCacheBuster
def includeme(config):
sponsorlogos_storage_class = config.maybe_dotted(
config.registry.settings["sponsorlogos.backend"]
)
config.register_service_factory(
sponsorlogos_storage_class.create_service, ISponsorLogoStorage
)
# Setup Jinja2 Rendering for the Admin application
config.add_jinja2_search_path("templates", name=".html")
# Setup our static assets
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"admin/static",
"warehouse.admin:static/dist",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse.admin:static/dist/",
ManifestCacheBuster(
"warehouse.admin:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_add_files("warehouse.admin:static/dist/", prefix="/admin/static/")
config.whitenoise_add_manifest(
"warehouse.admin:static/dist/manifest.json", prefix="/admin/static/"
)
# Add our routes
config.include(".routes")
# Add our flags
config.include(".flags")
|
pypa/warehouse
|
warehouse/admin/__init__.py
|
Python
|
apache-2.0
| 2,024 | 0.000988 |
"""SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/__init__.py
|
Python
|
mit
| 2,176 | 0.011949 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os.path
from edb.testbase import server as tb
from edb.tools import test
import edgedb
class TestEdgeQLExprAliases(tb.QueryTestCase):
'''The scope is to test expression aliases.'''
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'cards.esdl')
SETUP = [os.path.join(os.path.dirname(__file__), 'schemas',
'cards_setup.edgeql')]
async def test_edgeql_aliases_basic_01(self):
await self.assert_query_result(
r'''
SELECT AirCard {
name,
owners: {
name
} ORDER BY .name
} ORDER BY AirCard.name;
''',
[
{
'name': 'Djinn',
'owners': [{'name': 'Carol'}, {'name': 'Dave'}]
},
{
'name': 'Giant eagle',
'owners': [{'name': 'Carol'}, {'name': 'Dave'}]
},
{
'name': 'Sprite',
'owners': [{'name': 'Carol'}, {'name': 'Dave'}]
}
],
)
async def test_edgeql_aliases_basic_02(self):
await self.con.execute('''
CREATE ALIAS expert_map := (
SELECT {
('Alice', 'pro'),
('Bob', 'noob'),
('Carol', 'noob'),
('Dave', 'casual'),
}
);
''')
await self.assert_query_result(
r'''
SELECT expert_map
ORDER BY expert_map;
''',
[
['Alice', 'pro'],
['Bob', 'noob'],
['Carol', 'noob'],
['Dave', 'casual'],
],
)
await self.con.execute('''
DROP ALIAS expert_map;
''')
async def test_edgeql_aliases_basic_03(self):
await self.con.execute('''
CREATE ALIAS scores := (
SELECT {
(name := 'Alice', score := 100, games := 10),
(name := 'Bob', score := 11, games := 2),
(name := 'Carol', score := 31, games := 5),
(name := 'Dave', score := 78, games := 10),
}
);
''')
await self.assert_query_result(
r'''
SELECT scores ORDER BY scores.name;
''',
[
{'name': 'Alice', 'score': 100, 'games': 10},
{'name': 'Bob', 'score': 11, 'games': 2},
{'name': 'Carol', 'score': 31, 'games': 5},
{'name': 'Dave', 'score': 78, 'games': 10},
],
)
await self.assert_query_result(
r'''
SELECT <tuple<str, int64, int64>>scores
ORDER BY scores.name;
''',
[
['Alice', 100, 10],
['Bob', 11, 2],
['Carol', 31, 5],
['Dave', 78, 10],
],
)
await self.assert_query_result(
r'''
SELECT <tuple<name: str, points: int64, plays: int64>>scores
ORDER BY scores.name;
''',
[
{'name': 'Alice', 'points': 100, 'plays': 10},
{'name': 'Bob', 'points': 11, 'plays': 2},
{'name': 'Carol', 'points': 31, 'plays': 5},
{'name': 'Dave', 'points': 78, 'plays': 10},
],
)
await self.con.execute('''
DROP ALIAS scores;
''')
async def test_edgeql_aliases_basic_04(self):
await self.con.execute('''
CREATE ALIAS levels := {'pro', 'casual', 'noob'};
''')
await self.assert_query_result(
r'''
SELECT levels;
''',
{'pro', 'casual', 'noob'},
)
async def test_edgeql_aliases_create_01(self):
await self.con.execute(r'''
CREATE ALIAS DCard := (
SELECT Card {
# This is an identical computable to the one
# present in the type, but it must be legal to
# override the link with any compatible
# expression.
owners := (
SELECT Card.<deck[IS User] {
name_upper := str_upper(.name)
}
)
} FILTER Card.name LIKE 'D%'
);
''')
await self.assert_query_result(
r'''
SELECT DCard {
name,
owners: {
name_upper,
} ORDER BY .name
} ORDER BY DCard.name;
''',
[
{
'name': 'Djinn',
'owners': [{'name_upper': 'CAROL'},
{'name_upper': 'DAVE'}],
},
{
'name': 'Dragon',
'owners': [{'name_upper': 'ALICE'},
{'name_upper': 'DAVE'}],
},
{
'name': 'Dwarf',
'owners': [{'name_upper': 'BOB'},
{'name_upper': 'CAROL'}],
}
],
)
await self.con.execute('DROP ALIAS DCard;')
# Check that we can recreate the alias.
await self.con.execute(r'''
CREATE ALIAS DCard := (
SELECT Card {
owners := (
SELECT Card.<deck[IS User] {
name_upper := str_upper(.name)
}
)
} FILTER Card.name LIKE 'D%'
);
''')
await self.assert_query_result(
r'''
WITH
MODULE schema,
DCardT := (SELECT ObjectType
FILTER .name = 'default::DCard'),
DCardOwners := (SELECT DCardT.links
FILTER .name = 'owners')
SELECT
DCardOwners {
target[IS ObjectType]: {
name,
pointers: {
name
} FILTER .name = 'name_upper'
}
}
''',
[{
'target': {
'name': 'default::__DCard__owners',
'pointers': [
{
'name': 'name_upper',
}
]
}
}]
)
async def test_edgeql_aliases_filter_01(self):
await self.assert_query_result(
r'''
SELECT FireCard {name}
FILTER FireCard = DaveCard
ORDER BY FireCard.name;
''',
[{'name': 'Dragon'}],
)
async def test_edgeql_aliases_filter02(self):
await self.assert_query_result(
r'''
SELECT AirCard {name}
FILTER AirCard NOT IN (SELECT Card FILTER Card.name LIKE 'D%')
ORDER BY AirCard.name;
''',
[
{'name': 'Giant eagle'},
{'name': 'Sprite'},
],
)
async def test_edgeql_computable_link_01(self):
await self.assert_query_result(
r'''
SELECT Card {
owners: {
name
} ORDER BY .name
}
FILTER .name = 'Djinn';
''',
[{
'owners': [
{'name': 'Carol'},
{'name': 'Dave'}
]
}]
)
async def test_edgeql_computable_link_02(self):
await self.assert_query_result(
r'''
SELECT User {
name,
deck_cost
}
ORDER BY User.name;
''',
[
{
'name': 'Alice',
'deck_cost': 11
},
{
'name': 'Bob',
'deck_cost': 9
},
{
'name': 'Carol',
'deck_cost': 16
},
{
'name': 'Dave',
'deck_cost': 20
}
]
)
async def test_edgeql_computable_aliased_link_01(self):
await self.assert_query_result(
r'''
SELECT AliasedFriends {
my_name,
my_friends: {
@nickname
} ORDER BY .name
}
FILTER .name = 'Alice';
''',
[{
'my_name': 'Alice',
'my_friends': [
{
'@nickname': 'Swampy'
},
{
'@nickname': 'Firefighter'
},
{
'@nickname': 'Grumpy'
},
]
}]
)
async def test_edgeql_computable_nested_01(self):
await self.assert_query_result(
r'''
SELECT Card {
name,
owned := (
WITH O := Card.<deck[IS User]
SELECT O {
name,
# simple computable
fr0 := count(O.friends),
# computable with an alias defined
fr1 := (WITH F := O.friends SELECT count(F)),
}
ORDER BY .name
)
} FILTER .name = 'Giant turtle';
''',
[{
'name': 'Giant turtle',
'owned': [
{'fr0': 3, 'fr1': 3, 'name': 'Alice'},
{'fr0': 0, 'fr1': 0, 'name': 'Bob'},
{'fr0': 0, 'fr1': 0, 'name': 'Carol'},
{'fr0': 1, 'fr1': 1, 'name': 'Dave'},
]
}]
)
async def test_edgeql_computable_nested_02(self):
await self.assert_query_result(
r'''
WITH C := Card { ava_owners := .<avatar }
SELECT C {
name,
ava_owners: {
typename := (
WITH name := C.ava_owners.__type__.name
SELECT name
)
}
}
FILTER EXISTS .ava_owners
ORDER BY .name
''',
[{
'name': 'Djinn',
'ava_owners': [{
'typename': 'default::Bot'
}],
}, {
'name': 'Dragon',
'ava_owners': [{
'typename': 'default::User'
}],
}]
)
async def test_edgeql_computable_nested_03(self):
# This SHOULD be identical to the previous test case, except
# for the cardinality being forced to be MULTI.
await self.assert_query_result(
r'''
WITH C := Card { ava_owners := .<avatar }
SELECT C {
name,
ava_owners: {
multi typename := (
WITH name := C.ava_owners.__type__.name
SELECT name
)
}
}
FILTER EXISTS .ava_owners
ORDER BY .name;
''',
[{
'name': 'Djinn',
'ava_owners': [{
'typename': {'default::Bot'}
}],
}, {
'name': 'Dragon',
'ava_owners': [{
'typename': {'default::User'}
}],
}]
)
async def test_edgeql_aliases_shape_propagation_01(self):
await self.assert_query_result(
r'''
SELECT _ := {
(SELECT User FILTER .name = 'Alice').deck,
(SELECT User FILTER .name = 'Bob').deck
} {name}
ORDER BY _.name;
''',
[
{'name': 'Bog monster'},
{'name': 'Bog monster'},
{'name': 'Dragon'},
{'name': 'Dwarf'},
{'name': 'Giant turtle'},
{'name': 'Giant turtle'},
{'name': 'Golem'},
{'name': 'Imp'},
],
)
async def test_edgeql_aliases_shape_propagation_02(self):
await self.assert_query_result(
r'''
# the alias should be propagated through _ := DISTINCT since it
# maps `any` to `any`
SELECT _ := DISTINCT {
(SELECT User FILTER .name = 'Alice').deck,
(SELECT User FILTER .name = 'Bob').deck
} {name}
ORDER BY _.name;
''',
[
{'name': 'Bog monster'},
{'name': 'Dragon'},
{'name': 'Dwarf'},
{'name': 'Giant turtle'},
{'name': 'Golem'},
{'name': 'Imp'},
],
)
async def test_edgeql_aliases_shape_propagation_03(self):
await self.assert_query_result(
r'''
# the alias should be propagated through _ := DETACHED
SELECT _ := DETACHED {
(SELECT User FILTER .name = 'Alice').deck,
(SELECT User FILTER .name = 'Bob').deck
} {name}
ORDER BY _.name;
''',
[
{'name': 'Bog monster'},
{'name': 'Bog monster'},
{'name': 'Dragon'},
{'name': 'Dwarf'},
{'name': 'Giant turtle'},
{'name': 'Giant turtle'},
{'name': 'Golem'},
{'name': 'Imp'},
],
)
async def test_edgeql_aliases_shape_propagation_04(self):
await self.assert_query_result(
r'''
# the alias should be propagated through _ := DETACHED
SELECT _ := DETACHED ({
(SELECT User FILTER .name = 'Alice').deck,
(SELECT User FILTER .name = 'Bob').deck
} {name})
ORDER BY _.name;
''',
[
{'name': 'Bog monster'},
{'name': 'Bog monster'},
{'name': 'Dragon'},
{'name': 'Dwarf'},
{'name': 'Giant turtle'},
{'name': 'Giant turtle'},
{'name': 'Golem'},
{'name': 'Imp'},
],
)
async def test_edgeql_aliases_if_else_01(self):
await self.assert_query_result(
r"""
SELECT
_ := 'yes' IF Card.cost > 4 ELSE 'no'
ORDER BY _;
""",
['no', 'no', 'no', 'no', 'no', 'no', 'no', 'no', 'yes'],
)
@test.xfail(
"Known collation issue on Heroku Postgres",
unless=os.getenv("EDGEDB_TEST_BACKEND_VENDOR") != "heroku-postgres"
)
async def test_edgeql_aliases_if_else_02(self):
await self.assert_query_result(
r"""
# working with singletons
SELECT
_ := 'ok' IF User.deck_cost < 19 ELSE User.deck.name
ORDER BY _;
""",
[
'Bog monster',
'Djinn',
'Dragon',
'Giant eagle',
'Giant turtle',
'Golem',
'Sprite',
'ok',
'ok',
'ok',
],
)
await self.assert_query_result(
r"""
# either result is a set, but the condition is a singleton
SELECT
_ := User.deck.element IF User.deck_cost < 19
ELSE User.deck.name
ORDER BY _;
""",
[
'Air',
'Air',
'Air',
'Bog monster',
'Djinn',
'Dragon',
'Earth',
'Earth',
'Earth',
'Earth',
'Fire',
'Fire',
'Giant eagle',
'Giant turtle',
'Golem',
'Sprite',
'Water',
'Water',
'Water',
'Water',
'Water',
'Water',
],
)
async def test_edgeql_aliases_if_else_03(self):
res = [
['Air', 'Air', 'Air', 'Earth', 'Earth', 'Fire', 'Fire', 'Water',
'Water'],
['1', '1', '1', '2', '2', '3', '3', '4', '5'],
[False, False, False, True, True],
]
await self.assert_query_result(
r"""
# get the data that this test relies upon in a format
# that's easy to analyze
SELECT _ := User.deck.element
ORDER BY _;
""",
res[0]
)
await self.assert_query_result(
r"""
SELECT _ := <str>User.deck.cost
ORDER BY _;
""",
res[1]
)
await self.assert_query_result(
r"""
SELECT _ := {User.name[0] = 'A', EXISTS User.friends}
ORDER BY _;
""",
res[2]
)
await self.assert_query_result(
r"""
# results and conditions are sets
SELECT _ :=
User.deck.element
# because the elements of {} are treated as SET OF,
# all of the paths in this expression are independent sets
IF {User.name[0] = 'A', EXISTS User.friends} ELSE
<str>User.deck.cost
ORDER BY _;
""",
sorted(res[1] + res[1] + res[1] + res[0] + res[0]),
)
async def test_edgeql_aliases_if_else_04(self):
await self.assert_query_result(
r"""
SELECT
1 IF User.name[0] = 'A' ELSE
10 IF User.name[0] = 'B' ELSE
100 IF User.name[0] = 'C' ELSE
0;
""",
{1, 10, 100, 0},
)
await self.assert_query_result(
r"""
SELECT (
User.name,
sum(
1 IF User.friends.name[0] = 'A' ELSE
10 IF User.friends.name[0] = 'B' ELSE
100 IF User.friends.name[0] = 'C' ELSE
0
),
) ORDER BY .0;
""",
[['Alice', 110], ['Bob', 0], ['Carol', 0], ['Dave', 10]],
)
async def test_edgeql_aliases_if_else_05(self):
await self.assert_query_result(
r"""
SELECT
(Card.name, 'yes' IF Card.cost > 4 ELSE 'no')
ORDER BY .0;
""",
[
['Bog monster', 'no'],
['Djinn', 'no'],
['Dragon', 'yes'],
['Dwarf', 'no'],
['Giant eagle', 'no'],
['Giant turtle', 'no'],
['Golem', 'no'],
['Imp', 'no'],
['Sprite', 'no'],
],
)
await self.assert_query_result(
r"""
SELECT
(Card.name, 'yes') IF Card.cost > 4 ELSE (Card.name, 'no')
ORDER BY .0;
""",
[
['Bog monster', 'no'],
['Djinn', 'no'],
['Dragon', 'yes'],
['Dwarf', 'no'],
['Giant eagle', 'no'],
['Giant turtle', 'no'],
['Golem', 'no'],
['Imp', 'no'],
['Sprite', 'no'],
],
)
async def test_edgeql_aliases_nested_01(self):
await self.assert_query_result(
r"""
SELECT AwardAlias {
name,
winner: {
name
}
} ORDER BY .name;
""",
[
{'name': '1st', 'winner': {'name': 'Alice'}},
{'name': '2nd', 'winner': {'name': 'Alice'}},
{'name': '3rd', 'winner': {'name': 'Bob'}},
],
)
async def test_edgeql_aliases_nested_02(self):
await self.assert_query_result(
r"""
SELECT {
foo := (
SELECT AwardAlias {
name,
winner: {
name
}
} ORDER BY .name
)
};
""",
[
{
'foo': [
{'name': '1st', 'winner': {'name': 'Alice'}},
{'name': '2nd', 'winner': {'name': 'Alice'}},
{'name': '3rd', 'winner': {'name': 'Bob'}},
]
}
],
)
async def test_edgeql_aliases_nested_03(self):
await self.assert_query_result(
r"""
SELECT AwardAlias {
winner: {
name_upper
}
}
FILTER
.winner.name_upper = 'ALICE';
""",
[
{'winner': {'name_upper': 'ALICE'}},
{'winner': {'name_upper': 'ALICE'}},
],
)
async def test_edgeql_aliases_deep_01(self):
# fetch the result we will compare to
res = await self.con.query_json(r"""
SELECT AwardAlias {
winner: {
deck: {
owners
}
}
}
FILTER .name = '1st'
LIMIT 1;
""")
res = json.loads(res)
# fetch the same data via a different alias, that should be
# functionally identical
await self.assert_query_result(
r"""
SELECT AwardAlias2 {
winner: {
deck: {
owners
}
}
}
FILTER .name = '1st';
""",
res
)
async def test_edgeql_aliases_clauses_01(self):
# fetch the result we will compare to
res = await self.con.query_json(r"""
SELECT User {
deck: {
id
} ORDER BY User.deck.cost DESC
LIMIT 1,
}
FILTER .name = 'Alice';
""")
res = json.loads(res)
# fetch the same data via an alias, that should be
# functionally identical
await self.assert_query_result(
r"""
SELECT UserAlias {
deck,
}
FILTER .name = 'Alice';
""",
res
)
async def test_edgeql_aliases_limit_01(self):
# Test interaction of aliases and the LIMIT clause
await self.con.execute("""
CREATE ALIAS FirstUser := (
SELECT User {
name_upper := str_upper(User.name)
}
ORDER BY .name
LIMIT 1
);
""")
await self.assert_query_result(
r"""
SELECT FirstUser {
name_upper,
}
""",
[
{
'name_upper': 'ALICE',
},
],
)
async def test_edgeql_aliases_ignore_alias(self):
await self.con.execute('''
CREATE ALIAS UserAlias2 := (
SELECT User {
deck: {
id
} ORDER BY User.deck.cost DESC
LIMIT 1,
}
);
''')
# Explicitly reset the default module alias to test
# that aliases don't care.
await self.con.execute('''
SET MODULE std;
''')
await self.assert_query_result(
r"""
SELECT default::UserAlias2 {
deck,
}
FILTER .name = 'Alice';
""",
[{
'deck': [
{}
]
}]
)
async def test_edgeql_aliases_esdl_01(self):
await self.assert_query_result(
r"""
SELECT WaterOrEarthCard {
name,
owned_by_alice,
}
FILTER .name ILIKE {'%turtle%', 'dwarf'}
ORDER BY .name;
""",
[
{
'name': 'Dwarf',
'owned_by_alice': True,
},
{
'name': 'Giant turtle',
'owned_by_alice': True,
},
]
)
await self.assert_query_result(
r"""
SELECT EarthOrFireCard {
name,
}
FILTER .name = {'Imp', 'Dwarf'}
ORDER BY .name;
""",
[
{
'name': 'Dwarf'
},
{
'name': 'Imp'
},
]
)
async def test_edgeql_aliases_collection_01(self):
await self.assert_query_result(
r"""
SELECT SpecialCardAlias {
name,
el_cost,
};
""",
[
{
'name': 'Djinn',
'el_cost': ['Air', 4],
},
]
)
async def test_edgeql_aliases_collection_02(self):
await self.assert_query_result(
r"""
SELECT SpecialCardAlias.el_cost;
""",
[
['Air', 4],
]
)
async def test_edgeql_aliases_collection_03(self):
await self.assert_query_result(
r"""
WITH
X := SpecialCard {
el_cost := (.element, .cost)
}
SELECT X.el_cost;
""",
[
['Air', 4],
]
)
async def test_edgeql_aliases_collection_04(self):
await self.assert_query_result(
r"""
SELECT (
SpecialCard {
el_cost := (.element,)
}
).el_cost;
""",
[
['Air'],
]
)
async def test_edgeql_aliases_collection_05(self):
await self.assert_query_result(
r"""
SELECT (
SpecialCard {
el_cost := [.element]
}
).el_cost;
""",
[
['Air'],
]
)
async def test_edgeql_aliases_subqueries_01(self):
await self.assert_query_result(
r"""
SELECT count((
(SELECT EarthOrFireCard.name),
(EarthOrFireCard.name)
))
""",
[4]
)
async def test_edgeql_aliases_subqueries_02(self):
await self.assert_query_result(
r"""
SELECT count((
(EarthOrFireCard.name),
(SELECT EarthOrFireCard.name)
))
""",
[4]
)
async def test_edgeql_aliases_subqueries_03(self):
await self.assert_query_result(
r"""
SELECT count((
(EarthOrFireCard.name),
(EarthOrFireCard.name)
))
""",
[4]
)
async def test_edgeql_aliases_subqueries_04(self):
await self.assert_query_result(
r"""
SELECT count((
(SELECT EarthOrFireCard.name),
(SELECT EarthOrFireCard.name)
))
""",
[16]
)
async def test_edgeql_aliases_introspection(self):
await self.assert_query_result(
r"""
WITH MODULE schema
SELECT Type {
name
}
FILTER .from_alias AND .name LIKE 'default::Air%'
ORDER BY .name
""",
[{
'name': 'default::AirCard',
}]
)
await self.con.execute('''
CREATE ALIAS tuple_alias := ('foo', 10);
''')
await self.assert_query_result(
r"""
WITH MODULE schema
SELECT Tuple {
name,
element_types: {
name := .type.name
} ORDER BY @index
}
FILTER
.from_alias
AND .name = 'default::tuple_alias'
ORDER BY .name
""",
[{
'name': 'default::tuple_alias',
'element_types': [{
'name': 'std::str',
}, {
'name': 'std::int64',
}]
}]
)
async def test_edgeql_aliases_backlinks_01(self):
async with self.assertRaisesRegexTx(
edgedb.InvalidReferenceError,
"cannot follow backlink 'owners'",
):
await self.con.execute("""
SELECT User.<owners[Is Card];
""")
async def test_edgeql_aliases_backlinks_02(self):
async with self.assertRaisesRegexTx(
edgedb.InvalidReferenceError,
"cannot follow backlink 'owners'",
):
await self.con.execute("""
SELECT User.<owners;
""")
|
edgedb/edgedb
|
tests/test_edgeql_expr_aliases.py
|
Python
|
apache-2.0
| 32,398 | 0 |
import sys
sentid_prev = 0
first_line = True
first_word = True
for line in sys.stdin:
row = line.strip().split()
if first_line:
word_ix = row.index('word')
sentid_ix = row.index('sentid')
first_line = False
else:
word = row[word_ix]
sentid = row[sentid_ix]
if first_word:
delim = ''
first_word = False
elif sentid == sentid_prev:
delim = ' '
else:
delim = '\n'
sentid_prev = sentid
sys.stdout.write(delim + word)
sys.stdout.write('\n')
|
modelblocks/modelblocks-release
|
resource-general/scripts/itemmeasures2lineitems.py
|
Python
|
gpl-3.0
| 588 | 0.001701 |
import os
import platform
from setuptools import setup, Extension
from distutils.util import convert_path
from Cython.Build import cythonize
system = platform.system()
## paths settings
# Linux
if 'Linux' in system:
CLFFT_DIR = r'/home/gregor/devel/clFFT'
CLFFT_LIB_DIRS = [r'/usr/local/lib64']
CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'src', 'include'), ]
CL_INCL_DIRS = ['/opt/AMDAPPSDK-3.0/include']
EXTRA_COMPILE_ARGS = []
EXTRA_LINK_ARGS = []
#Windows
elif 'Windows' in system:
CLFFT_DIR = r'C:\Users\admin\Devel\clFFT-Full-2.12.2-Windows-x64'
CLFFT_LIB_DIRS = [os.path.join(CLFFT_DIR, 'lib64\import')]
CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'include'), ]
CL_DIR = os.getenv('AMDAPPSDKROOT')
CL_INCL_DIRS = [os.path.join(CL_DIR, 'include')]
EXTRA_COMPILE_ARGS = []
EXTRA_LINK_ARGS = []
# macOS
elif 'Darwin' in system:
CLFFT_DIR = r'/Users/gregor/Devel/clFFT'
CLFFT_LIB_DIRS = [r'/Users/gregor/Devel/clFFT/src/library']
CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'src', 'include'), ]
CL_INCL_DIRS = []
EXTRA_COMPILE_ARGS = ['-stdlib=libc++']
EXTRA_LINK_ARGS = ['-stdlib=libc++']
import Cython.Compiler.Options
Cython.Compiler.Options.generate_cleanup_code = 2
extensions = [
Extension("gpyfft.gpyfftlib",
[os.path.join('gpyfft', 'gpyfftlib.pyx')],
include_dirs= CLFFT_INCL_DIRS + CL_INCL_DIRS,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
libraries=['clFFT'],
library_dirs = CLFFT_LIB_DIRS,
language='c++',
)
]
def copy_clfftdll_to_package():
import shutil
shutil.copy(
os.path.join(CLFFT_DIR, 'bin', 'clFFT.dll'),
'gpyfft')
shutil.copy(
os.path.join(CLFFT_DIR, 'bin', 'StatTimer.dll'),
'gpyfft')
print("copied clFFT.dll, StatTimer.dll")
package_data = {}
if 'Windows' in platform.system():
copy_clfftdll_to_package()
package_data.update({'gpyfft': ['clFFT.dll', 'StatTimer.dll']},)
def get_version():
main_ns = {}
version_path = convert_path('gpyfft/version.py')
with open(version_path) as version_file:
exec(version_file.read(), main_ns)
version = main_ns['__version__']
return version
def get_readme():
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, "README.md"), "r") as fp:
long_description = fp.read()
return long_description
install_requires = ["numpy", "pyopencl"]
setup_requires = ["numpy", "cython"]
setup(
name='gpyfft',
version=get_version(),
description='A Python wrapper for the OpenCL FFT library clFFT',
long_description=get_readme(),
url=r"https://github.com/geggo/gpyfft",
maintainer='Gregor Thalhammer',
maintainer_email='gregor.thalhammer@gmail.com',
license='LGPL',
packages=['gpyfft', "gpyfft.test"],
ext_modules=cythonize(extensions),
package_data=package_data,
install_requires=install_requires,
setup_requires=setup_requires,
)
|
geggo/gpyfft
|
setup.py
|
Python
|
lgpl-3.0
| 3,106 | 0.00322 |
import sys
import time
import commands
import userinterface.Client as Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
if len(sys.argv)>1:
site = sys.argv[1]
else:
site = None
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = 'BNL_ATLAS_2'
jobList = []
for i in range(20):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-11.0.41'
#job.AtlasRelease = 'Atlas-11.0.3'
job.homepackage = 'AnalysisTransforms'
job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthena'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 100
job.prodSourceLabel = 'user'
job.computingSite = site
#job.prodDBlock = "pandatest.b1599dfa-cd36-4fc5-92f6-495781a94c66"
job.prodDBlock = "pandatest.f228b051-077b-4f81-90bf-496340644379"
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = "lib.f228b051-077b-4f81-90bf-496340644379.tgz"
fileI.type = 'input'
job.addFile(fileI)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen')
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
fileOZ = FileSpec()
fileOZ.lfn = "%s.pool.root" % commands.getoutput('uuidgen')
fileOZ.destinationDBlock = job.destinationDBlock
fileOZ.destinationSE = job.destinationSE
fileOZ.dataset = job.destinationDBlock
fileOZ.type = 'output'
job.addFile(fileOZ)
job.jobParameters="""-l %s -r PhysicsAnalysis/AnalysisCommon/UserAnalysis/UserAnalysis-00-05-11/run -j " jobOptions.pythia.py" -i "[]" -o "{'Stream1': '%s'}" """ % (fileI.lfn,fileOZ.lfn)
jobList.append(job)
s,o = Client.submitJobs(jobList)
print "---------------------"
print s
for x in o:
print "PandaID=%s" % x[0]
|
RRCKI/panda-server
|
pandaserver/test/execute.py
|
Python
|
apache-2.0
| 2,194 | 0.014585 |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===========================
Upper Air Sounding Tutorial
===========================
Upper air analysis is a staple of many synoptic and mesoscale analysis
problems. In this tutorial we will gather weather balloon data, plot it,
perform a series of thermodynamic calculations, and summarize the results.
To learn more about the Skew-T diagram and its use in weather analysis and
forecasting, checkout `this <https://homes.comet.ucar.edu/~alanbol/aws-tr-79-006.pdf>`_
air weather service guide.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Upper air data can be obtained using the siphon package, but for this tutorial we will use
# some of MetPy's sample data. This event is the Veterans Day tornado outbreak in 2002.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
##########################################################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.get_wind_components(wind_speed, wind_dir)
##########################################################################
# Thermodynamic Calculations
# --------------------------
#
# Often times we will want to calculate some thermodynamic parameters of a
# sounding. The MetPy calc module has many such calculations already implemented!
#
# * **Lifting Condensation Level (LCL)** - The level at which an air parcel's
# relative humidity becomes 100% when lifted along a dry adiabatic path.
# * **Parcel Path** - Path followed by a hypothetical parcel of air, beginning
# at the surface temperature/pressure and rising dry adiabatically until
# reaching the LCL, then rising moist adiabatially.
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
##########################################################################
# Basic Skew-T Plotting
# ---------------------
#
# The Skew-T (log-P) diagram is the standard way to view rawinsonde data. The
# y-axis is height in pressure coordinates and the x-axis is temperature. The
# y coordinates are plotted on a logarithmic scale and the x coordinate system
# is skewed. An explanation of skew-T interpretation is beyond the scope of this
# tutorial, but here we will plot one that can be used for analysis or
# publication.
#
# The most basic skew-T can be plotted with only five lines of Python.
# These lines perform the following tasks:
#
# 1. Create a ``Figure`` object and set the size of the figure.
#
# 2. Create a ``SkewT`` object
#
# 3. Plot the pressure and temperature (note that the pressure,
# the independent variable, is first even though it is plotted on the y-axis).
#
# 4. Plot the pressure and dewpoint temperature.
#
# 5. Plot the wind barbs at the appropriate pressure using the u and v wind
# components.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
##########################################################################
# Advanced Skew-T Plotting
# ------------------------
#
# Fiducial lines indicating dry adiabats, moist adiabats, and mixing ratio are
# useful when performing further analysis on the Skew-T diagram. Often the
# 0C isotherm is emphasized and areas of CAPE and CIN are shaded.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
##########################################################################
# Adding a Hodograph
# ------------------
#
# A hodograph is a polar representation of the wind profile measured by the rawinsonde.
# Winds at different levels are plotted as vectors with their tails at the origin, the angle
# from the vertical axes representing the direction, and the length representing the speed.
# The line plotted on the hodograph is a line connecting the tips of these vectors,
# which are not drawn.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
|
metpy/MetPy
|
v0.8/_downloads/upperair_soundings.py
|
Python
|
bsd-3-clause
| 7,536 | 0.001725 |
import numpy
from samuroi.gui.samuroiwindow import SamuROIWindow
from samuroi.plugins.tif import load_tif
from samuroi.plugins.swc import load_swc
from samuroi.masks.segmentation import Segmentation as SegmentationMask
# requirements for template matching and post processing
from samuroi.event.biexponential import BiExponentialParameters
from samuroi.event.template_matching import template_matching
from samuroi.util.postprocessors import PostProcessorPipe, DetrendPostProcessor
import sys
from PyQt4 import QtGui
import argparse
parser = argparse.ArgumentParser(description='Open SamuROI and load some data.')
parser.add_argument('filename', type=str, help='The filename of the tif file to use as data.')
parser.add_argument('--swc', dest='swcfiles', type=str, action='append', help='Filename of swc file to load.')
parser.add_argument('--segmentation', dest='segmentations', type=str, action='append',
help='Filename of segmentations to load. (.npy files)')
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
args = parser.parse_args()
data = load_tif(args.filename)
morphology = numpy.max(data, axis=-1)
from samuroi.plugins.baseline import linbleeched_deltaF
# data = linbleeched_deltaF(data)
# show the gui for the filtered data
mainwindow = SamuROIWindow(data=data, morphology=morphology)
for filename in args.swcfiles:
swc = load_swc(filename)
mainwindow.segmentation.load_swc(swc)
if args.segmentations is not None:
for filename in args.segmentations:
segdata = numpy.load(filename)
seg = SegmentationMask(data=segdata, name="filename")
mainwindow.segmentation.masks.add(seg)
# here we can set the template parameters
params = BiExponentialParameters(tau1=150., tau2=1.)
kernel = params.kernel()
# crop the long decay phase of the kernel, otherwise boundary effects get to strong
# and bursts of events cannot be detected correctly, since the do not fully decay
kernel = kernel[0:120]
# if required one can zero pad the kernel to the left to enforce a "silent" phase before an event
# this will again lead to trouble when detecting bursts of events
# kernel = numpy.concatenate((numpy.zeros(number_of_required_silent_frames), kernel))
def matching_postprocess(trace):
# run the template matching algorithm
result = template_matching(data=trace, kernel=kernel, threshold=4.)
return result.crit
# we either can use the matching postprocessor directly, or add a detrend step in front of it
postprocessor = PostProcessorPipe()
postprocessor.append(DetrendPostProcessor())
postprocessor.append(matching_postprocess)
# add a button to the main window postprocessor toolbar for enabling the template matching
action = mainwindow.toolbar_postprocess.addAction("template matching")
action.setToolTip("Run first linear detrend and then apply the template matching to the trace, then show the"
"detection criterion instead of the trace data.")
# a variable for the line plotting the best fit in the trace widget
fitcurve = None
def install_pp(pp):
if fitcurve is not None:
fitcurve.remove()
mainwindow.segmentation.postprocessor = postprocessor
# if we click the button in the main window to install the postprocessor
action.triggered.connect(install_pp)
def redraw_fit():
global fitcurve
# the index of the frame of interest
i = mainwindow.segmentation.active_frame
# first shift to the active frame, then go back half the kernel size, because the values in we want to plot
# the kernel centered around the selected frame
x = numpy.arange(0, len(kernel)) + i - len(kernel) / 2
if fitcurve is not None:
fitcurve.remove()
# we want to calculate the fit for the first cuve in the trace widget, hence, get the y-data of the line
_, trace = mainwindow.tracedockwidget.canvas.axes.lines[0].get_data()
result = template_matching(data=trace, kernel=kernel, threshold=4.)
# we need to apply the best found scale and offset to the kernel
fitcurve, = mainwindow.tracedockwidget.canvas.axes.plot(x, kernel * result.s[i] + result.c[i])
mainwindow.segmentation.active_frame_changed.append(redraw_fit)
mainwindow.show()
sys.exit(app.exec_())
|
aolsux/SamuROI
|
doc/examples/script.py
|
Python
|
mit
| 4,482 | 0.004685 |
import urllib.request
import time
def pega_preço():
pagina = urllib.request.urlopen('http://beans.itcarlow.ie/prices-loyalty.html')
texto = pagina.read().decode('utf8')
onde = texto.find('>$')
inicio= onde + 2
fim = inicio + 4
return float(texto[inicio:fim])
opção = input("deseja comprar já? (S/N)")
if opção == 'S' :
preço = pega_preço()
print('Você comprou por %5.2f R$' % preço)
else:
preço = 99.99
while preço >= 4.74:
preço = pega_preço()
if preço >= 4.74:
time.sleep(5)
print ('comprar ! Preço: %5.2f' %preço)
|
andersonsilvade/python_C
|
Python32/aulas/hakeandositeprecodescontowhiletemposimounao.py
|
Python
|
mit
| 607 | 0.013559 |
# This file is part of the Perspectives Notary Server
#
# Copyright (C) 2011 Dan Wendlandt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import traceback
from client_common import verify_notary_signature, fetch_notary_xml, parse_http_notary_list
from generate_svg import get_svg_graph
if len(sys.argv) != 4:
print "usage: %s <service-id> <notary-list-file> <len-days>" % sys.argv[0]
exit(1)
sid = sys.argv[1]
server_list = parse_http_notary_list(sys.argv[2])
for s in server_list:
try:
s["results"] = None
server = s["host"].split(":")[0]
port = s["host"].split(":")[1]
code, xml_text = fetch_notary_xml(server,int(port), sid)
if code == 200 and verify_notary_signature(sid, xml_text, s["public_key"]):
s["results"] = xml_text
except Exception, e:
pass
print get_svg_graph(sid, server_list, int(sys.argv[3]), time.time())
|
hiviah/perspectives-observatory
|
utilities/svg_client.py
|
Python
|
gpl-3.0
| 1,448 | 0.015884 |
import os.path
from os.path import abspath
import re
import sys
import types
import pickle
from test import support
from test.support import import_helper
import test.test_importlib.util
import unittest
import unittest.mock
import unittest.test
class TestableTestProgram(unittest.TestProgram):
module = None
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test2.py', 'test1.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test4.py', 'test3.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
# The test suites found should be sorted alphabetically for reliable
# execution order.
expected = [[name + ' module tests'] for name in
('test1', 'test2', 'test_dir')]
expected.extend([[('test_dir.%s' % name) + ' module tests'] for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_socket(self):
# A socket is neither a directory nor a regular file.
# https://bugs.python.org/issue25320
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['socket']]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: False
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
self.assertEqual(suite, [])
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
# (but there are no tests in our stub module itself, so that is [] at
# the time of call).
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*')])
def test_find_tests_default_calls_package_load_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*.py'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*.py')])
def test_find_tests_customize_via_package_pattern(self):
# This test uses the example 'do-nothing' load_tests from
# https://docs.python.org/3/library/unittest.html#load-tests-protocol
# to make sure that that actually works.
# Housekeeping
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
self.addCleanup(restore_listdir)
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
self.addCleanup(restore_isfile)
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
self.addCleanup(sys.path.remove, abspath('/foo'))
# Test data: we expect the following:
# a listdir to find our package, and isfile and isdir checks on it.
# a module-from-name call to turn that into a module
# followed by load_tests.
# then our load_tests will call discover() which is messy
# but that finally chains into find_tests again for the child dir -
# which is why we don't have an infinite loop.
# We expect to see:
# the module load tests for both package and plain module called,
# and the plain module result nested by the package module load_tests
# indicating that it was processed and could have been mutated.
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
def list_dir(path):
return list(vfs[path])
os.listdir = list_dir
os.path.isdir = lambda path: not path.endswith('.py')
os.path.isfile = lambda path: path.endswith('.py')
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if path.endswith('test_module'):
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
else:
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
# top level directory cached on loader instance
__file__ = '/foo/my_package/__init__.py'
this_dir = os.path.dirname(__file__)
pkg_tests = loader.discover(
start_dir=this_dir, pattern=pattern)
return [self.path + ' load_tests', tests
] + pkg_tests
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader = unittest.TestLoader()
loader._get_module_from_name = lambda name: Module(name)
loader.suiteClass = lambda thing: thing
loader._top_level_dir = abspath('/foo')
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests(abspath('/foo'), 'test*.py'))
# We should have loaded tests from both my_package and
# my_package.test_module, and also run the load_tests hook in both.
# (normally this would be nested TestSuites.)
self.assertEqual(suite,
[['my_package load_tests', [],
['my_package.test_module load_tests']]])
# Parents before children.
self.assertEqual(Module.paths,
['my_package', 'my_package.test_module'])
# load_tests should have been called twice with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*.py'),
(loader, [], 'test*.py')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_start_dir_is_package_calls_package_load_tests(self):
# This test verifies that the package load_tests in a package is indeed
# invoked when the start_dir is a package (and not the top level).
# http://bugs.python.org/issue22457
# Test data: we expect the following:
# an isfile to verify the package, then importing and scanning
# as per _find_tests' normal behaviour.
# We expect to see our load_tests hook called once.
vfs = {abspath('/toplevel'): ['startdir'],
abspath('/toplevel/startdir'): ['__init__.py']}
def list_dir(path):
return list(vfs[path])
self.addCleanup(setattr, os, 'listdir', os.listdir)
os.listdir = list_dir
self.addCleanup(setattr, os.path, 'isfile', os.path.isfile)
os.path.isfile = lambda path: path.endswith('.py')
self.addCleanup(setattr, os.path, 'isdir', os.path.isdir)
os.path.isdir = lambda path: not path.endswith('.py')
self.addCleanup(sys.path.remove, abspath('/toplevel'))
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
def load_tests(self, loader, tests, pattern):
return ['load_tests called ' + self.path]
def __eq__(self, other):
return self.path == other.path
loader = unittest.TestLoader()
loader._get_module_from_name = lambda name: Module(name)
loader.suiteClass = lambda thing: thing
suite = loader.discover('/toplevel/startdir', top_level_dir='/toplevel')
# We should have loaded tests from the package __init__.
# (normally this would be nested TestSuites.)
self.assertEqual(suite,
[['load_tests called startdir']])
def setup_import_issue_tests(self, fakefile):
listdir = os.listdir
os.listdir = lambda _: [fakefile]
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
def setup_import_issue_package_tests(self, vfs):
self.addCleanup(setattr, os, 'listdir', os.listdir)
self.addCleanup(setattr, os.path, 'isfile', os.path.isfile)
self.addCleanup(setattr, os.path, 'isdir', os.path.isdir)
self.addCleanup(sys.path.__setitem__, slice(None), list(sys.path))
def list_dir(path):
return list(vfs[path])
os.listdir = list_dir
os.path.isdir = lambda path: not path.endswith('.py')
os.path.isfile = lambda path: path.endswith('.py')
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
self.setup_import_issue_tests('test_this_does_not_exist.py')
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to import test module: test_this_does_not_exist' in error,
'missing error string in %r' % error)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_discover_with_init_modules_that_fail_to_import(self):
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
self.setup_import_issue_package_tests(vfs)
import_calls = []
def _get_module_from_name(name):
import_calls.append(name)
raise ImportError("Cannot import Name")
loader = unittest.TestLoader()
loader._get_module_from_name = _get_module_from_name
suite = loader.discover(abspath('/foo'))
self.assertIn(abspath('/foo'), sys.path)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to import test module: my_package' in error,
'missing error string in %r' % error)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.my_package()
self.assertEqual(import_calls, ['my_package'])
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(test, proto))
def test_discover_with_module_that_raises_SkipTest_on_import(self):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
loader = unittest.TestLoader()
def _get_module_from_name(name):
raise unittest.SkipTest('skipperoo')
loader._get_module_from_name = _get_module_from_name
self.setup_import_issue_tests('test_skip_dummy.py')
suite = loader.discover('.')
self.assertEqual(suite.countTestCases(), 1)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(suite, proto))
def test_discover_with_init_module_that_raises_SkipTest_on_import(self):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
self.setup_import_issue_package_tests(vfs)
import_calls = []
def _get_module_from_name(name):
import_calls.append(name)
raise unittest.SkipTest('skipperoo')
loader = unittest.TestLoader()
loader._get_module_from_name = _get_module_from_name
suite = loader.discover(abspath('/foo'))
self.assertIn(abspath('/foo'), sys.path)
self.assertEqual(suite.countTestCases(), 1)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(import_calls, ['my_package'])
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(suite, proto))
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [[]])
args[:] = []
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, [['foo', 'bar']])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something'])
self.assertEqual(args, [[]])
self.assertEqual(program.verbosity, 1)
self.assertIs(program.buffer, False)
self.assertIs(program.catchbreak, False)
self.assertIs(program.failfast, False)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something', '-v', '-b', '-v', '-c', '-f'])
self.assertEqual(args, [[]])
self.assertEqual(program.verbosity, 2)
self.assertIs(program.buffer, True)
self.assertIs(program.catchbreak, True)
self.assertIs(program.failfast, True)
def test_command_line_handling_do_discovery_too_many_arguments(self):
program = TestableTestProgram()
program.testLoader = None
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
self.assertEqual(cm.exception.args, (2,))
self.assertIn('usage:', stderr.getvalue())
def test_command_line_handling_do_discovery_uses_default_loader(self):
program = object.__new__(unittest.TestProgram)
program._initArgParsers()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program.testLoader = Loader()
program._do_discovery(['-v'])
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def setup_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
original_realpath = os.path.realpath
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
os.path.realpath = original_realpath
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
if os.name == 'nt':
# ntpath.realpath may inject path prefixes when failing to
# resolve real files, so we substitute abspath() here instead.
os.path.realpath = os.path.abspath
return full_path
def test_detect_module_clash(self):
full_path = self.setup_module_clash()
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_module_symlink_ok(self):
full_path = self.setup_module_clash()
original_realpath = os.path.realpath
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
def cleanup():
os.path.realpath = original_realpath
self.addCleanup(cleanup)
def realpath(path):
if path == os.path.join(mod_dir, 'foo.py'):
return os.path.join(expected_dir, 'foo.py')
return path
os.path.realpath = realpath
loader = unittest.TestLoader()
loader.discover(start_dir='foo', pattern='foo.py')
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern, namespace=None):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
def test_discovery_from_dotted_path_builtin_modules(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
isdir = os.path.isdir
os.path.isdir = lambda _: False
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.path.isdir = isdir
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
with self.assertRaises(TypeError) as cm:
loader.discover('sys')
self.assertEqual(str(cm.exception),
'Can not use builtin modules '
'as dotted module names')
def test_discovery_from_dotted_namespace_packages(self):
loader = unittest.TestLoader()
package = types.ModuleType('package')
package.__path__ = ['/a', '/b']
package.__spec__ = types.SimpleNamespace(
loader=None,
submodule_search_locations=['/a', '/b']
)
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
return ['%s/tests' % start_dir]
loader._find_tests = _find_tests
loader.suiteClass = list
with unittest.mock.patch('builtins.__import__', _import):
# Since loader.discover() can modify sys.path, restore it when done.
with import_helper.DirsOnSysPath():
# Make sure to remove 'package' from sys.modules when done.
with test.test_importlib.util.uncache('package'):
suite = loader.discover('package')
self.assertEqual(suite, ['/a/tests', '/b/tests'])
def test_discovery_failed_discovery(self):
loader = unittest.TestLoader()
package = types.ModuleType('package')
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
with unittest.mock.patch('builtins.__import__', _import):
# Since loader.discover() can modify sys.path, restore it when done.
with import_helper.DirsOnSysPath():
# Make sure to remove 'package' from sys.modules when done.
with test.test_importlib.util.uncache('package'):
with self.assertRaises(TypeError) as cm:
loader.discover('package')
self.assertEqual(str(cm.exception),
'don\'t know how to discover from {!r}'
.format(package))
if __name__ == '__main__':
unittest.main()
|
brython-dev/brython
|
www/src/Lib/unittest/test/test_discovery.py
|
Python
|
bsd-3-clause
| 34,059 | 0.001556 |
"""This module can be used for finding similar code"""
import re
import rope.refactor.wildcards
from rope.base import libutils
from rope.base import codeanalyze, exceptions, ast, builtins
from rope.refactor import (patchedast, wildcards)
from rope.refactor.patchedast import MismatchedTokenError
class BadNameInCheckError(exceptions.RefactoringError):
pass
class SimilarFinder(object):
"""`SimilarFinder` can be used to find similar pieces of code
See the notes in the `rope.refactor.restructure` module for more
info.
"""
def __init__(self, pymodule, wildcards=None):
"""Construct a SimilarFinder"""
self.source = pymodule.source_code
try:
self.raw_finder = RawSimilarFinder(
pymodule.source_code, pymodule.get_ast(), self._does_match)
except MismatchedTokenError:
print("in file %s" % pymodule.resource.path)
raise
self.pymodule = pymodule
if wildcards is None:
self.wildcards = {}
for wildcard in [rope.refactor.wildcards.
DefaultWildcard(pymodule.pycore.project)]:
self.wildcards[wildcard.get_name()] = wildcard
else:
self.wildcards = wildcards
def get_matches(self, code, args={}, start=0, end=None):
self.args = args
if end is None:
end = len(self.source)
skip_region = None
if 'skip' in args.get('', {}):
resource, region = args['']['skip']
if resource == self.pymodule.get_resource():
skip_region = region
return self.raw_finder.get_matches(code, start=start, end=end,
skip=skip_region)
def get_match_regions(self, *args, **kwds):
for match in self.get_matches(*args, **kwds):
yield match.get_region()
def _does_match(self, node, name):
arg = self.args.get(name, '')
kind = 'default'
if isinstance(arg, (tuple, list)):
kind = arg[0]
arg = arg[1]
suspect = wildcards.Suspect(self.pymodule, node, name)
return self.wildcards[kind].matches(suspect, arg)
class RawSimilarFinder(object):
"""A class for finding similar expressions and statements"""
def __init__(self, source, node=None, does_match=None):
if node is None:
node = ast.parse(source)
if does_match is None:
self.does_match = self._simple_does_match
else:
self.does_match = does_match
self._init_using_ast(node, source)
def _simple_does_match(self, node, name):
return isinstance(node, (ast.expr, ast.Name))
def _init_using_ast(self, node, source):
self.source = source
self._matched_asts = {}
if not hasattr(node, 'region'):
patchedast.patch_ast(node, source)
self.ast = node
def get_matches(self, code, start=0, end=None, skip=None):
"""Search for `code` in source and return a list of `Match`\es
`code` can contain wildcards. ``${name}`` matches normal
names and ``${?name} can match any expression. You can use
`Match.get_ast()` for getting the node that has matched a
given pattern.
"""
if end is None:
end = len(self.source)
for match in self._get_matched_asts(code):
match_start, match_end = match.get_region()
if start <= match_start and match_end <= end:
if skip is not None and (skip[0] < match_end and
skip[1] > match_start):
continue
yield match
def _get_matched_asts(self, code):
if code not in self._matched_asts:
wanted = self._create_pattern(code)
matches = _ASTMatcher(self.ast, wanted,
self.does_match).find_matches()
self._matched_asts[code] = matches
return self._matched_asts[code]
def _create_pattern(self, expression):
expression = self._replace_wildcards(expression)
node = ast.parse(expression)
# Getting Module.Stmt.nodes
nodes = node.body
if len(nodes) == 1 and isinstance(nodes[0], ast.Expr):
# Getting Discard.expr
wanted = nodes[0].value
else:
wanted = nodes
return wanted
def _replace_wildcards(self, expression):
ropevar = _RopeVariable()
template = CodeTemplate(expression)
mapping = {}
for name in template.get_names():
mapping[name] = ropevar.get_var(name)
return template.substitute(mapping)
class _ASTMatcher(object):
def __init__(self, body, pattern, does_match):
"""Searches the given pattern in the body AST.
body is an AST node and pattern can be either an AST node or
a list of ASTs nodes
"""
self.body = body
self.pattern = pattern
self.matches = None
self.ropevar = _RopeVariable()
self.matches_callback = does_match
def find_matches(self):
if self.matches is None:
self.matches = []
ast.call_for_nodes(self.body, self._check_node, recursive=True)
return self.matches
def _check_node(self, node):
if isinstance(self.pattern, list):
self._check_statements(node)
else:
self._check_expression(node)
def _check_expression(self, node):
mapping = {}
if self._match_nodes(self.pattern, node, mapping):
self.matches.append(ExpressionMatch(node, mapping))
def _check_statements(self, node):
for child in ast.get_children(node):
if isinstance(child, (list, tuple)):
self.__check_stmt_list(child)
def __check_stmt_list(self, nodes):
for index in range(len(nodes)):
if len(nodes) - index >= len(self.pattern):
current_stmts = nodes[index:index + len(self.pattern)]
mapping = {}
if self._match_stmts(current_stmts, mapping):
self.matches.append(StatementMatch(current_stmts, mapping))
def _match_nodes(self, expected, node, mapping):
if isinstance(expected, ast.Name):
if self.ropevar.is_var(expected.id):
return self._match_wildcard(expected, node, mapping)
if not isinstance(expected, ast.AST):
return expected == node
if expected.__class__ != node.__class__:
return False
children1 = self._get_children(expected)
children2 = self._get_children(node)
if len(children1) != len(children2):
return False
for child1, child2 in zip(children1, children2):
if isinstance(child1, ast.AST):
if not self._match_nodes(child1, child2, mapping):
return False
elif isinstance(child1, (list, tuple)):
if not isinstance(child2, (list, tuple)) or \
len(child1) != len(child2):
return False
for c1, c2 in zip(child1, child2):
if not self._match_nodes(c1, c2, mapping):
return False
else:
if type(child1) is not type(child2) or child1 != child2:
return False
return True
def _get_children(self, node):
"""Return not `ast.expr_context` children of `node`"""
children = ast.get_children(node)
return [child for child in children
if not isinstance(child, ast.expr_context)]
def _match_stmts(self, current_stmts, mapping):
if len(current_stmts) != len(self.pattern):
return False
for stmt, expected in zip(current_stmts, self.pattern):
if not self._match_nodes(expected, stmt, mapping):
return False
return True
def _match_wildcard(self, node1, node2, mapping):
name = self.ropevar.get_base(node1.id)
if name not in mapping:
if self.matches_callback(node2, name):
mapping[name] = node2
return True
return False
else:
return self._match_nodes(mapping[name], node2, {})
class Match(object):
def __init__(self, mapping):
self.mapping = mapping
def get_region(self):
"""Returns match region"""
def get_ast(self, name):
"""Return the ast node that has matched rope variables"""
return self.mapping.get(name, None)
class ExpressionMatch(Match):
def __init__(self, ast, mapping):
super(ExpressionMatch, self).__init__(mapping)
self.ast = ast
def get_region(self):
return self.ast.region
class StatementMatch(Match):
def __init__(self, ast_list, mapping):
super(StatementMatch, self).__init__(mapping)
self.ast_list = ast_list
def get_region(self):
return self.ast_list[0].region[0], self.ast_list[-1].region[1]
class CodeTemplate(object):
def __init__(self, template):
self.template = template
self._find_names()
def _find_names(self):
self.names = {}
for match in CodeTemplate._get_pattern().finditer(self.template):
if 'name' in match.groupdict() and \
match.group('name') is not None:
start, end = match.span('name')
name = self.template[start + 2:end - 1]
if name not in self.names:
self.names[name] = []
self.names[name].append((start, end))
def get_names(self):
return self.names.keys()
def substitute(self, mapping):
collector = codeanalyze.ChangeCollector(self.template)
for name, occurrences in self.names.items():
for region in occurrences:
collector.add_change(region[0], region[1], mapping[name])
result = collector.get_changed()
if result is None:
return self.template
return result
_match_pattern = None
@classmethod
def _get_pattern(cls):
if cls._match_pattern is None:
pattern = codeanalyze.get_comment_pattern() + '|' + \
codeanalyze.get_string_pattern() + '|' + \
r'(?P<name>\$\{[^\s\$\}]*\})'
cls._match_pattern = re.compile(pattern)
return cls._match_pattern
class _RopeVariable(object):
"""Transform and identify rope inserted wildcards"""
_normal_prefix = '__rope__variable_normal_'
_any_prefix = '__rope__variable_any_'
def get_var(self, name):
if name.startswith('?'):
return self._get_any(name)
else:
return self._get_normal(name)
def is_var(self, name):
return self._is_normal(name) or self._is_var(name)
def get_base(self, name):
if self._is_normal(name):
return name[len(self._normal_prefix):]
if self._is_var(name):
return '?' + name[len(self._any_prefix):]
def _get_normal(self, name):
return self._normal_prefix + name
def _get_any(self, name):
return self._any_prefix + name[1:]
def _is_normal(self, name):
return name.startswith(self._normal_prefix)
def _is_var(self, name):
return name.startswith(self._any_prefix)
def make_pattern(code, variables):
variables = set(variables)
collector = codeanalyze.ChangeCollector(code)
def does_match(node, name):
return isinstance(node, ast.Name) and node.id == name
finder = RawSimilarFinder(code, does_match=does_match)
for variable in variables:
for match in finder.get_matches('${%s}' % variable):
start, end = match.get_region()
collector.add_change(start, end, '${%s}' % variable)
result = collector.get_changed()
return result if result is not None else code
def _pydefined_to_str(pydefined):
address = []
if isinstance(pydefined,
(builtins.BuiltinClass, builtins.BuiltinFunction)):
return '__builtins__.' + pydefined.get_name()
else:
while pydefined.parent is not None:
address.insert(0, pydefined.get_name())
pydefined = pydefined.parent
module_name = libutils.modname(pydefined.resource)
return '.'.join(module_name.split('.') + address)
|
mcepl/rope
|
rope/refactor/similarfinder.py
|
Python
|
lgpl-3.0
| 12,522 | 0.00008 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dimension(Model):
"""Dimension of a resource metric. For e.g. instance specific HTTP requests
for a web app,
where instance name is dimension of the metric HTTP request.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param internal_name:
:type internal_name: str
:param to_be_exported_for_shoebox:
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(self, name=None, display_name=None, internal_name=None, to_be_exported_for_shoebox=None):
super(Dimension, self).__init__()
self.name = name
self.display_name = display_name
self.internal_name = internal_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/dimension.py
|
Python
|
mit
| 1,562 | 0.00128 |
# coding: utf8
# retest.py
# 12/16/2012 jichi
__all__ = 'RegExpTester',
if __name__ == '__main__':
import sys
sys.path.append('..')
import debug
debug.initenv()
import re
from PySide.QtCore import Qt
from Qt5 import QtWidgets
from sakurakit import skqss
from sakurakit.skclass import memoizedproperty
from sakurakit.skdebug import dprint
from sakurakit.sktr import tr_
from mytr import mytr_
import rc
def create_label(text=""): # unicode -> QLabel
ret = QtWidgets.QLabel()
if text:
ret.setText(text + ":")
ret.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
return ret
class _RegExpTester(object):
def __init__(self, q):
self._createUi(q)
self._refresh()
def _createUi(self, q):
#url = "http://en.wikipedia.org/wiki/Regular_expression"
url = "http://www.regular-expressions.info/lookaround.html"
self.textEdit.appendHtml(
"""You can use this tester to play with the regular expression
(<a href="%s">%s</a>) used in the Shared Dictionary.
<br/><br/>
For example, "regular(?= exp)" will match all "regular" before " exp".
""" % (url, url))
self.patternEdit.setText("regular(?= exp)")
self.replaceEdit.setText("HELLO WORLD")
for sig in (
self.textEdit.textChanged,
self.patternEdit.textChanged,
self.replaceEdit.textChanged,
self.regexCheckBox.toggled,
self.icaseCheckBox.toggled,
):
sig.connect(self._refresh)
layout = QtWidgets.QVBoxLayout()
grid = QtWidgets.QGridLayout()
# 0
grid.addWidget(create_label(tr_("Pattern")), 0, 0)
grid.addWidget(self.patternEdit, 0, 1)
# 1
grid.addWidget(create_label(tr_("Translation")))
grid.addWidget(self.replaceEdit)
# 2
grid.addWidget(create_label(tr_("Status")))
grid.addWidget(self.messageEdit)
layout.addLayout(grid)
row = QtWidgets.QHBoxLayout()
row.addWidget(self.regexCheckBox)
row.addWidget(self.icaseCheckBox)
layout.addLayout(row)
splitter = QtWidgets.QSplitter(Qt.Vertical)
splitter.addWidget(self.textEdit)
splitter.addWidget(self.textView)
layout.addWidget(splitter)
q.setLayout(layout)
def _refresh(self):
"""
@param text unicode
@return unicode
"""
text = self.textEdit.toPlainText()
pattern = self.patternEdit.text().strip()
repl = self.replaceEdit.text().strip()
r = self.regexCheckBox.isChecked()
i = self.icaseCheckBox.isChecked()
result = text
try:
if r and i:
rx = re.compile(pattern, re.IGNORECASE|re.DOTALL)
result = rx.sub(repl, text)
elif r:
result = re.sub(pattern, repl, text)
elif i:
pattern = re.escape(pattern)
rx = re.compile(pattern, re.IGNORECASE|re.DOTALL)
result = rx.sub(repl, text)
else:
result = text.replace(pattern, repl)
matched = result != text
message = tr_("Found") if matched else tr_("Not found")
skqss.class_(self.messageEdit, 'default')
self.messageEdit.setText(message)
except Exception, e:
skqss.class_(self.messageEdit, 'error')
message = e.message or "%s" % e
self.messageEdit.setText(message)
self.textView.setHtml(result)
@memoizedproperty
def textView(self):
ret = QtWidgets.QTextBrowser()
skqss.class_(ret, 'texture')
ret.setToolTip(tr_("Target"))
ret.setOpenExternalLinks(True)
#ret.setAcceptRichText(False)
return ret
@memoizedproperty
def textEdit(self):
ret = QtWidgets.QPlainTextEdit()
skqss.class_(ret, 'normal')
ret.setToolTip(tr_("Source"))
return ret
@memoizedproperty
def patternEdit(self):
ret = QtWidgets.QLineEdit()
skqss.class_(ret, 'normal')
ret.setToolTip(mytr_("Matched text"))
ret.setPlaceholderText(ret.toolTip())
return ret
@memoizedproperty
def replaceEdit(self):
ret = QtWidgets.QLineEdit()
skqss.class_(ret, 'normal')
ret.setToolTip(mytr_("Replaced text"))
ret.setPlaceholderText(ret.toolTip())
return ret
@memoizedproperty
def messageEdit(self):
ret = QtWidgets.QLineEdit()
ret.setReadOnly(True)
ret.setToolTip(tr_("Status"))
ret.setPlaceholderText(ret.toolTip())
return ret
@memoizedproperty
def regexCheckBox(self):
ret = QtWidgets.QCheckBox()
ret.setText(tr_("Regular expression"))
ret.setToolTip(tr_("Regular expression"))
ret.setChecked(True)
return ret
@memoizedproperty
def icaseCheckBox(self):
ret = QtWidgets.QCheckBox()
ret.setText(tr_("Ignore case"))
ret.setToolTip(tr_("Ignore case"))
#ret.setChecked(True)
return ret
# I have to use QMainWindow, or the texture will not work
class RegExpTester(QtWidgets.QDialog):
def __init__(self, parent=None):
WINDOW_FLAGS = Qt.Dialog|Qt.WindowMinMaxButtonsHint
super(RegExpTester, self).__init__(parent, WINDOW_FLAGS)
skqss.class_(self, 'texture')
self.__d = _RegExpTester(self)
self.setWindowTitle(mytr_("Test Regular Expression"))
self.setWindowIcon(rc.icon('window-regexp'))
self.resize(380, 350)
dprint("pass")
if __name__ == '__main__':
a = debug.app()
w = RegExpTester()
w.show()
a.exec_()
# EOF
|
Dangetsu/vnr
|
Frameworks/Sakura/py/apps/reader/dialogs/retest.py
|
Python
|
gpl-3.0
| 5,176 | 0.010626 |
import json
import os
import math
import pytest
def calculate_max_velocity(**kwargs):
"""
Calculate the maximum velocity the ADCP can measure including the boat speed in m/s. This speed is the
speed the ADCP is capable of measuring, if the speed exceeds this value, then the data will be incorrect
due to rollovers.
:param _CWPBB_ Broadband or Narrowband.
:param CWPBB_LagLength=: WP lag length in meters.
:param CWPBS=: Bin Size.
:param BeamAngle=: Beam angle in degrees.
:param SystemFrequency=: System frequency in hz.
:param SpeedOfSound=: Speed of Sound in m/s.
:param CyclesPerElement=: Cycles per element.
:return: Maximum velocity the ADCP can read in m/s.
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. MaxVelocity", e)
return 0.0
# _CWPBB_LagLength_, _BeamAngle_, _SystemFrequency_, _SpeedOfSound_, _CyclesPerElement_
return _calculate_max_velocity(kwargs.pop('CWPBB', config['DEFAULT']['CWPBB']),
kwargs.pop('CWPBB_LagLength', config['DEFAULT']['CWPBB_LagLength']),
kwargs.pop('CWPBS', config['DEFAULT']['CWPBS']),
kwargs.pop('BeamAngle', config['BeamAngle']),
kwargs.pop('SystemFrequency', config['DEFAULT']['SystemFrequency']),
kwargs.pop('SpeedOfSound', config['SpeedOfSound']),
kwargs.pop('CyclesPerElement', config['CyclesPerElement']))
def _calculate_max_velocity(_CWPBB_, _CWPBB_LagLength_, _CWPBS_, _BeamAngle_, _SystemFrequency_, _SpeedOfSound_, _CyclesPerElement_):
"""
Calculate the maximum velocity the ADCP can measure including the boat speed in m/s. This speed is the
speed the ADCP is capable of measuring, if the speed exceeds this value, then the data will be incorrect
due to rollovers.
:param _CWPBB_ Broadband or Narrowband.
:param _CWPBB_LagLength_: WP lag length in meters.
:param _BeamAngle_: Beam angle in degrees.
:param _SystemFrequency_: System frequency in hz.
:param _SpeedOfSound_: Speed of Sound in m/s.
:param _CyclesPerElement_: Cycles per element.
:return: Maximum velocity the ADCP can read in m/s.
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. MaxVelocity", e)
return 0.0
# Prevent divide by 0
if _CyclesPerElement_ == 0:
_CyclesPerElement_ = 1
if _SpeedOfSound_ == 0:
_SpeedOfSound_ = 1490
if _SystemFrequency_ == 0:
_SystemFrequency_ = config["DEFAULT"]["1200000"]["FREQ"]
# Sample Rate
sumSampling = 0.0;
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]: # 1200 khz
sumSampling += config["DEFAULT"]["1200000"]["SAMPLING"] * config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]): # 600 khz
sumSampling += config["DEFAULT"]["600000"]["SAMPLING"] * config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]): # 300 khz
sumSampling += config["DEFAULT"]["300000"]["SAMPLING"] * config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]): # 150 khz
sumSampling += config["DEFAULT"]["150000"]["SAMPLING"] * config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]): # 75 khz
sumSampling += config["DEFAULT"]["75000"]["SAMPLING"] * config["DEFAULT"]["75000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]): # 38 khz
sumSampling += config["DEFAULT"]["38000"]["SAMPLING"] * config["DEFAULT"]["38000"]["CPE"] / _CyclesPerElement_
sampleRate = _SystemFrequency_ * (sumSampling)
# Meters Per Sample
metersPerSample = 0
if sampleRate == 0:
metersPerSample = 0.0
else:
metersPerSample = math.cos(_BeamAngle_ / 180.0 * math.pi) * _SpeedOfSound_ / 2.0 / sampleRate
# Lag Samples
lagSamples = 0
if metersPerSample == 0:
lagSamples = 0
else:
lagSamples = 2 * math.trunc((math.trunc(_CWPBB_LagLength_ / metersPerSample) + 1.0) / 2.0)
# Ua Hz
uaHz = 0.0
if lagSamples == 0:
uaHz = 0.0
else:
uaHz = sampleRate / (2.0 * lagSamples)
# Ua Radial
uaRadial = 0.0
if _SystemFrequency_ == 0:
uaRadial = 0.0
else:
uaRadial = uaHz * _SpeedOfSound_ / (2.0 * _SystemFrequency_)
#### NARROWBAND ####
# Beam Angle Radian
beamAngleRad = _BeamAngle_ / 180.0 * math.pi
# Ta
Ta = 2.0 * _CWPBS_ / _SpeedOfSound_ / math.cos(beamAngleRad)
# L
L = 0.5 * _SpeedOfSound_ * Ta
# Check for vertical beam.No Beam angle
if _BeamAngle_ == 0:
return uaRadial
# Narrowband lag length
if _CWPBB_ == 0:
return L / math.sin(_BeamAngle_ / 180.0 * math.pi)
return uaRadial / math.sin(_BeamAngle_ / 180.0 * math.pi)
# UNIT TEST
# Run with pytext MaxVelocity.py
def test_narrowband():
assert pytest.approx(calculate_max_velocity(CWPBB=0, CWPBB_LagLength=1.0, CWPBS=0.60, BeamAngle=20, SystemFrequency=1152000, SpeedOfSound=1467), 0.001) == 1.867
def test_broadband():
assert pytest.approx(calculate_max_velocity(CWPBB=1, CWPBB_LagLength=1.0, CWPBS=0.60, BeamAngle=20, SystemFrequency=1152000, SpeedOfSound=1490), 0.001) == 0.658
def test_broadband300():
assert pytest.approx(calculate_max_velocity(CWPBB=1, CWPBB_LagLength=1.0, CWPBS=4.0, BeamAngle=20, SystemFrequency=288000.0, SpeedOfSound=1490), 0.001) == 2.669
|
ricorx7/rti_python
|
ADCP/Predictor/MaxVelocity.py
|
Python
|
bsd-3-clause
| 6,635 | 0.004823 |
# encoding: UTF-8
__author__ = 'CHENXY'
from string import join
from sgit_struct import structDict
def processCallBack(line):
orignalLine = line
line = line.replace(' virtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
if 'struct' not in content:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
else:
print content
cbArgsTypeList.append(content[1]) # 参数类型列表
cbArgsValueList.append(content[2]+content[3]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
# 生成.h文件中的process部分
process_line = 'void process' + cbName[2:] + '(Task task);\n'
fheaderprocess.write(process_line)
fheaderprocess.write('\n')
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error) {};\n'
elif 'OnRspQry' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data) {};\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error) {};\n'
else:
on_line = ''
fheaderon.write(on_line)
fheaderon.write('\n')
# 生成封装部分
createWrap(cbName)
#----------------------------------------------------------------------
def createWrap(cbName):
"""在Python封装段代码中进行处理"""
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error)\n'
override_line = '("on' + cbName[2:] + '")(error);\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last)\n'
override_line = '("on' + cbName[2:] + '")(data, error, id, last);\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data)\n'
override_line = '("on' + cbName[2:] + '")(data);\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error)\n'
override_line = '("on' + cbName[2:] + '")(data, error);\n'
else:
on_line = ''
if on_line is not '':
fwrap.write(on_line)
fwrap.write('{\n')
fwrap.write('\ttry\n')
fwrap.write('\t{\n')
fwrap.write('\t\tthis->get_override'+override_line)
fwrap.write('\t}\n')
fwrap.write('\tcatch (error_already_set const &)\n')
fwrap.write('\t{\n')
fwrap.write('\t\tPyErr_Print();\n')
fwrap.write('\t}\n')
fwrap.write('};\n')
fwrap.write('\n')
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace(' virtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
funcline = funcline.replace(' {}', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write("\tTask task = Task();\n")
ftask.write("\ttask.task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write("\tthis->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write("\tbreak;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write("\ttask.task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write("\ttask.task_last = " + cbArgsValueList[i] + ";\n")
elif 'CSgitFtdcRspInfoField' in type_:
ftask.write("\n")
ftask.write("\tif (pRspInfo)\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_error = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\tCSgitFtdcRspInfoField empty_error = CSgitFtdcRspInfoField();\n")
ftask.write("\t\tmemset(&empty_error, 0, sizeof(empty_error));\n")
ftask.write("\t\ttask.task_error = empty_error;\n")
ftask.write("\t}\n")
else:
ftask.write("\n")
ftask.write("\tif (" + cbArgsValueList[i][1:] + ")\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_data = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\t" + type_ + " empty_data = " + type_ + "();\n")
ftask.write("\t\tmemset(&empty_data, 0, sizeof(empty_data));\n")
ftask.write("\t\ttask.task_data = empty_data;\n")
ftask.write("\t}\n")
ftask.write("\tthis->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task task)' + "\n")
fprocess.write("{\n")
fprocess.write("\tPyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'CSgitFtdcRspInfoField' in type_:
fprocess.write("\t"+ type_ + ' task_error = any_cast<' + type_ + '>(task.task_error);\n')
fprocess.write("\t"+ "dict error;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'error["' + key + '"] = task_error.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('error')
elif type_ in structDict:
fprocess.write("\t"+ type_ + ' task_data = any_cast<' + type_ + '>(task.task_data);\n')
fprocess.write("\t"+ "dict data;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'data["' + key + '"] = task_data.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task.task_last')
elif type_ == 'int':
onArgsList.append('task.task_id')
onArgs = join(onArgsList, ', ')
fprocess.write('\tthis->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace(' virtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(',') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) >= 2:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
print line
print fcArgs
print fcArgsList
print fcArgsTypeList
if len(fcArgsTypeList)>0 and fcArgsTypeList[0] in structDict:
createFunction(fcName, fcArgsTypeList, fcArgsValueList)
# 生成.h文件中的主动函数部分
if 'Req' in fcName:
req_line = 'int req' + fcName[3:] + '(dict req, int nRequestID);\n'
fheaderfunction.write(req_line)
fheaderfunction.write('\n')
def createFunction(fcName, fcArgsTypeList, fcArgsValueList):
type_ = fcArgsTypeList[0]
struct = structDict[type_]
ffunction.write('int ' + apiName + '::req' + fcName[3:] + '(dict req, int nRequestID)\n')
ffunction.write('{\n')
ffunction.write('\t' + type_ +' myreq = ' + type_ + '();\n')
ffunction.write('\tmemset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = '\tgetString(req, "' + key + '", myreq.' + key + ');\n'
elif value == 'char':
line = '\tgetChar(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'int':
line = '\tgetInt(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'long':
line = '\tgetLong(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'short':
line = '\tgetShort(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'double':
line = '\tgetDouble(req, "' + key + '", &myreq.' + key + ');\n'
ffunction.write(line)
ffunction.write('\tint i = this->api->' + fcName + '(&myreq, nRequestID);\n')
ffunction.write('\treturn i;\n')
ffunction.write('};\n')
ffunction.write('\n')
#########################################################
apiName = 'MdApi'
fcpp = open('SgitFtdcMdApi.h', 'r')
ftask = open('sgit_md_task.cpp', 'w')
fprocess = open('sgit_md_process.cpp', 'w')
ffunction = open('sgit_md_function.cpp', 'w')
fdefine = open('sgit_md_define.cpp', 'w')
fswitch = open('sgit_md_switch.cpp', 'w')
fheaderprocess = open('sgit_md_header_process.h', 'w')
fheaderon = open('sgit_md_header_on.h', 'w')
fheaderfunction = open('sgit_md_header_function.h', 'w')
fwrap = open('sgit_md_wrap.cpp', 'w')
define_count = 1
for line in fcpp:
if " virtual void On" in line:
print 'callback'
processCallBack(line)
elif " virtual int" in line:
print 'function'
processFunction(line)
fcpp.close()
ftask.close()
fprocess.close()
ffunction.close()
fswitch.close()
fdefine.close()
fheaderprocess.close()
fheaderon.close()
fheaderfunction.close()
fwrap.close()
|
lukesummer/vnpy
|
vn.sgit/pyscript/generate_md_functions.py
|
Python
|
mit
| 11,152 | 0.003639 |
"""Provide neighbour searches using OpenCl GPU-code."""
from pkg_resources import resource_filename
import numpy as np
from . import idtxl_exceptions as ex
try:
import pyopencl as cl
except ImportError as err:
ex.package_missing(err, 'PyOpenCl is not available on this system. Install'
' it using pip or the package manager to use '
'OpenCL-powered CMI estimation.')
def knn_search(pointset, n_dim, knn_k, theiler_t, n_chunks=1, gpuid=0):
"""Interface with OpenCL knn search from Python/IDTxl."""
# check for a data layout in memory as expected by the low level functions
# ndim * [n_points * n_chunks]
if n_dim != pointset.shape[0]:
assert n_dim == pointset.shape[1], ('Given dimension does not match '
'data.')
pointset = pointset.transpose().copy()
print('>>>search GPU: fixed shape of input data')
if pointset.flags['C_CONTIGUOUS'] is not True:
pointset = np.ascontiguousarray(pointset)
print('>>>search GPU: fixed memory layout of input data')
pointdim = pointset.shape[0]
n_points = pointset.shape[1]
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
success = clFindKnn(indexes, distances, pointset.astype('float32'),
pointset.astype('float32'), int(knn_k), int(theiler_t),
int(n_chunks), int(pointdim), int(n_points),
int(gpuid))
if success:
return (indexes, distances)
else:
print("Error in OpenCL knn search!")
return 1
def range_search(pointset, n_dim, radius, theiler_t, n_chunks=1, gpuid=0):
"""Interface with OpenCL range search from Python/IDTxl."""
# check for a data layout in memory as expected by the low level functions
# ndim * [n_points * n_chunks]
if n_dim != pointset.shape[0]:
assert n_dim == pointset.shape[1], ('Given dimension does not match '
'data axis.')
pointset = pointset.transpose().copy()
print('>>>search GPU: fixed shape input data')
if pointset.flags['C_CONTIGUOUS'] is not True:
pointset = np.ascontiguousarray(pointset)
print('>>>search GPU: fixed memory layout of input data')
pointdim = pointset.shape[0]
n_points = pointset.shape[1]
pointcount = np.zeros((n_points), dtype=np.int32)
success = clFindRSAll(pointcount, pointset.astype('float32'),
pointset.astype('float32'), radius, theiler_t,
n_chunks, pointdim, n_points, gpuid)
if success:
return pointcount
else:
print("Error in OpenCL range search!")
return 1
def clFindKnn(h_bf_indexes, h_bf_distances, h_pointset, h_query, kth, thelier,
nchunks, pointdim, signallength, gpuid):
triallength = int(signallength / nchunks)
# print 'Values:', pointdim, triallength, signallength, kth, thelier
'''for platform in cl.get_platforms():
for device in platform.get_devices():
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
print("---------------------------------------------------------------")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
print("Device max work group size:", device.max_work_group_size)
print("Device max work item sizes:", device.max_work_item_sizes)'''
# Set up OpenCL
# context = cl.create_some_context()
platform = cl.get_platforms()
platf_idx = find_nonempty(platform)
print('platform index chosen is: {0}'.format(platf_idx))
my_gpu_devices = platform[platf_idx].get_devices(device_type=cl.device_type.GPU)
context = cl.Context(devices=my_gpu_devices)
queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
print(("Selected Device: ", my_gpu_devices[gpuid].name))
# Check memory resources.
usedmem =int( (h_query.nbytes + h_pointset.nbytes + h_bf_distances.nbytes + h_bf_indexes.nbytes)//1024//1024)
totalmem = int(my_gpu_devices[gpuid].global_mem_size//1024//1024)
if (totalmem*0.90) < usedmem:
print(("WARNING:", usedmem, "Mb used out of", totalmem,
"Mb. The GPU could run out of memory."))
# Create OpenCL buffers
d_bf_query = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_query)
d_bf_pointset = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_pointset)
d_bf_distances = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
h_bf_distances.nbytes)
d_bf_indexes = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
h_bf_indexes.nbytes)
# Kernel Launch
kernelLocation = resource_filename(__name__, 'gpuKnnBF_kernel.cl')
kernelsource = open(kernelLocation).read()
program = cl.Program(context, kernelsource).build()
kernelKNNshared = program.kernelKNNshared
kernelKNNshared.set_scalar_arg_dtypes([None, None, None, None, np.int32,
np.int32, np.int32, np.int32,
np.int32, None, None])
# Size of workitems and NDRange
if signallength/nchunks < my_gpu_devices[gpuid].max_work_group_size:
workitems_x = 8
elif my_gpu_devices[gpuid].max_work_group_size < 256:
workitems_x = my_gpu_devices[gpuid].max_work_group_size
else:
workitems_x = 256
if signallength % workitems_x != 0:
temp = int(round(((signallength)/workitems_x), 0) + 1)
else:
temp = int(signallength/workitems_x)
NDRange_x = workitems_x * temp
# Local memory for distances and indexes
localmem = (np.dtype(np.float32).itemsize*kth*workitems_x +
np.dtype(np.int32).itemsize*kth*workitems_x) / 1024
if localmem > my_gpu_devices[gpuid].local_mem_size / 1024:
print('Localmem alocation will fail. {0} kb available, and it needs '
'{1} kb.'.format(my_gpu_devices[gpuid].local_mem_size / 1024,
localmem))
localmem1 = cl.LocalMemory(np.dtype(np.float32).itemsize*kth*workitems_x)
localmem2 = cl.LocalMemory(np.dtype(np.int32).itemsize*kth*workitems_x)
kernelKNNshared(queue, (NDRange_x,), (workitems_x,), d_bf_query,
d_bf_pointset, d_bf_indexes, d_bf_distances, pointdim,
triallength, signallength, kth, thelier, localmem1,
localmem2)
queue.finish()
# Download results
cl.enqueue_copy(queue, h_bf_distances, d_bf_distances)
cl.enqueue_copy(queue, h_bf_indexes, d_bf_indexes)
# Free buffers
d_bf_distances.release()
d_bf_indexes.release()
d_bf_query.release()
d_bf_pointset.release()
return 1
'''
* Range search being radius a vector of length number points in queryset/pointset
'''
def clFindRSAll(h_bf_npointsrange, h_pointset, h_query, h_vecradius, thelier,
nchunks, pointdim, signallength, gpuid):
triallength = int(signallength / nchunks)
# print 'Values:', pointdim, triallength, signallength, kth, thelier
'''for platform in cl.get_platforms():
for device in platform.get_devices():
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
print("---------------------------------------------------------------")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
print("Device max work group size:", device.max_work_group_size)
print("Device max work item sizes:", device.max_work_item_sizes)'''
# Set up OpenCL
# context = cl.create_some_context()
platform = cl.get_platforms()
platf_idx = find_nonempty(platform)
my_gpu_devices = platform[platf_idx].get_devices(
device_type=cl.device_type.GPU)
context = cl.Context(devices=my_gpu_devices)
queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
print(("Selected Device: ", my_gpu_devices[gpuid].name))
# Check memory resources.
usedmem = int((h_query.nbytes + h_pointset.nbytes + h_vecradius.nbytes +
h_bf_npointsrange.nbytes) // 1024 // 1024)
totalmem = int(my_gpu_devices[gpuid].global_mem_size // 1024 // 1024)
if (totalmem * 0.90) < usedmem:
print('WARNING: {0} Mb used from a total of {1} Mb. GPU could get '
'without memory'.format(usedmem, totalmem))
# Create OpenCL buffers
d_bf_query = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_query)
d_bf_pointset = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_pointset)
d_bf_vecradius = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_vecradius)
d_bf_npointsrange = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
h_bf_npointsrange.nbytes)
# Kernel Launch
kernelLocation = resource_filename(__name__, 'gpuKnnBF_kernel.cl')
kernelsource = open(kernelLocation).read()
program = cl.Program(context, kernelsource).build()
kernelBFRSAllshared = program.kernelBFRSAllshared
kernelBFRSAllshared.set_scalar_arg_dtypes([None, None, None, None,
np.int32, np.int32, np.int32,
np.int32, None])
# Size of workitems and NDRange
if signallength/nchunks < my_gpu_devices[gpuid].max_work_group_size:
workitems_x = 8
elif my_gpu_devices[gpuid].max_work_group_size < 256:
workitems_x = my_gpu_devices[gpuid].max_work_group_size
else:
workitems_x = 256
if signallength % workitems_x != 0:
temp = int(round(((signallength)/workitems_x), 0) + 1)
else:
temp = int(signallength/workitems_x)
NDRange_x = workitems_x * temp
# Local memory for rangesearch. Actually not used, better results with
# private memory
localmem = cl.LocalMemory(np.dtype(np.int32).itemsize * workitems_x)
kernelBFRSAllshared(queue, (NDRange_x,), (workitems_x,), d_bf_query,
d_bf_pointset, d_bf_vecradius, d_bf_npointsrange,
pointdim, triallength, signallength, thelier, localmem)
queue.finish()
# Download results
cl.enqueue_copy(queue, h_bf_npointsrange, d_bf_npointsrange)
# Free buffers
d_bf_npointsrange.release()
d_bf_vecradius.release()
d_bf_query.release()
d_bf_pointset.release()
return 1
def find_nonempty(a_list):
"""Find non-empty device in list."""
for idx in range(0, len(a_list)):
if a_list[idx].get_devices(device_type=cl.device_type.GPU) != []:
break
else:
print('found empty platform')
if a_list[idx] == []:
print('all platforms empty')
else:
return idx
|
pwollstadt/IDTxl
|
dev/search_GPU/neighbour_search_opencl_old.py
|
Python
|
gpl-3.0
| 12,468 | 0.000962 |
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface OptionalConstraints1 {
undefined foo(optional byte arg1, byte arg2);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw,
"Should not have thrown on non-optional argument following "
"optional argument.")
parser = parser.reset()
parser.parse("""
interface OptionalConstraints2 {
undefined foo(optional byte arg1 = 1, optional byte arg2 = 2,
optional byte arg3, optional byte arg4 = 4,
optional byte arg5, optional byte arg6 = 9);
};
""")
results = parser.finish()
args = results[0].members[0].signatures()[0][1]
harness.check(len(args), 6, "Should have 6 arguments")
harness.check(args[5].defaultValue.value, 9,
"Should have correct default value")
|
CYBAI/servo
|
components/script/dom/bindings/codegen/parser/tests/test_optional_constraints.py
|
Python
|
mpl-2.0
| 981 | 0.001019 |
import numpy as np
import time
import math
import cv2
from pylab import array, arange, uint8
from PIL import Image
import eventlet
from eventlet import Timeout
import multiprocessing as mp
# Change the path below to point to the directoy where you installed the AirSim PythonClient
#sys.path.append('C:/Users/Kjell/Google Drive/MASTER-THESIS/AirSimpy')
from AirSimClient import *
class myAirSimClient(MultirotorClient):
def __init__(self):
self.img1 = None
self.img2 = None
MultirotorClient.__init__(self)
MultirotorClient.confirmConnection(self)
self.enableApiControl(True)
self.armDisarm(True)
self.home_pos = self.getPosition()
self.home_ori = self.getOrientation()
self.z = -6
def straight(self, duration, speed):
pitch, roll, yaw = self.getPitchRollYaw()
vx = math.cos(yaw) * speed
vy = math.sin(yaw) * speed
self.moveByVelocityZ(vx, vy, self.z, duration, DrivetrainType.ForwardOnly)
start = time.time()
return start, duration
def yaw_right(self, duration):
self.rotateByYawRate(30, duration)
start = time.time()
return start, duration
def yaw_left(self, duration):
self.rotateByYawRate(-30, duration)
start = time.time()
return start, duration
def take_action(self, action):
#check if copter is on level cause sometimes he goes up without a reason
x = 0
while self.getPosition().z_val < -7.0:
self.moveToZ(-6, 3)
time.sleep(1)
print(self.getPosition().z_val, "and", x)
x = x + 1
if x > 10:
return True
start = time.time()
duration = 0
collided = False
if action == 0:
start, duration = self.straight(1, 4)
while duration > time.time() - start:
if self.getCollisionInfo().has_collided == True:
return True
self.moveByVelocity(0, 0, 0, 1)
self.rotateByYawRate(0, 1)
if action == 1:
start, duration = self.yaw_right(0.8)
while duration > time.time() - start:
if self.getCollisionInfo().has_collided == True:
return True
self.moveByVelocity(0, 0, 0, 1)
self.rotateByYawRate(0, 1)
if action == 2:
start, duration = self.yaw_left(1)
while duration > time.time() - start:
if self.getCollisionInfo().has_collided == True:
return True
self.moveByVelocity(0, 0, 0, 1)
self.rotateByYawRate(0, 1)
return collided
def goal_direction(self, goal, pos):
pitch, roll, yaw = self.getPitchRollYaw()
yaw = math.degrees(yaw)
pos_angle = math.atan2(goal[1] - pos.y_val, goal[0]- pos.x_val)
pos_angle = math.degrees(pos_angle) % 360
track = math.radians(pos_angle - yaw)
return ((math.degrees(track) - 180) % 360) - 180
def getScreenDepthVis(self, track):
responses = self.simGetImages([ImageRequest(0, AirSimImageType.DepthPerspective, True, False)])
img1d = np.array(responses[0].image_data_float, dtype=np.float)
img1d = 255/np.maximum(np.ones(img1d.size), img1d)
img2d = np.reshape(img1d, (responses[0].height, responses[0].width))
image = np.invert(np.array(Image.fromarray(img2d.astype(np.uint8), mode='L')))
factor = 10
maxIntensity = 255.0 # depends on dtype of image data
# Decrease intensity such that dark pixels become much darker, bright pixels become slightly dark
newImage1 = (maxIntensity)*(image/maxIntensity)**factor
newImage1 = array(newImage1,dtype=uint8)
small = cv2.resize(newImage1, (0,0), fx=0.39, fy=0.38)
cut = small[20:40,:]
info_section = np.zeros((10,cut.shape[1]),dtype=np.uint8) + 255
info_section[9,:] = 0
line = np.int((((track - -180) * (100 - 0)) / (180 - -180)) + 0)
if line != (0 or 100):
info_section[:,line-1:line+2] = 0
elif line == 0:
info_section[:,0:3] = 0
elif line == 100:
info_section[:,info_section.shape[1]-3:info_section.shape[1]] = 0
total = np.concatenate((info_section, cut), axis=0)
#cv2.imshow("Test", total)
#cv2.waitKey(0)
return total
def AirSim_reset(self):
self.reset()
time.sleep(0.2)
self.enableApiControl(True)
self.armDisarm(True)
time.sleep(1)
self.moveToZ(self.z, 3)
time.sleep(3)
def AirSim_reset_old(self):
reset = False
z = -6.0
while reset != True:
now = self.getPosition()
self.simSetPose(Pose(Vector3r(now.x_val, now.y_val, -30),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True)
now = self.getPosition()
if (now.z_val - (-30)) == 0:
self.simSetPose(Pose(Vector3r(self.home_pos.x_val, self.home_pos.y_val, -30),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True)
now = self.getPosition()
if (now.x_val - self.home_pos.x_val) == 0 and (now.y_val - self.home_pos.y_val) == 0 and (now.z_val - (-30)) == 0 :
self.simSetPose(Pose(Vector3r(self.home_pos.x_val, self.home_pos.y_val, self.home_pos.z_val),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True)
now = self.getPosition()
if (now.x_val - self.home_pos.x_val) == 0 and (now.y_val - self.home_pos.y_val) == 0 and (now.z_val - self.home_pos.z_val) == 0:
reset = True
self.moveByVelocity(0, 0, 0, 1)
time.sleep(1)
self.moveToZ(z, 3)
time.sleep(3)
|
Kjell-K/AirGym
|
gym_airsim/envs/myAirSimClient.py
|
Python
|
mit
| 6,563 | 0.017218 |
# -*- coding: utf-8 -*-
"""Automatic reverse engineering of firmware files for embedded devices."""
from resyst import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
|
InfectedPacket/resyst
|
resyst/__init__.py
|
Python
|
gpl-2.0
| 262 | 0.003817 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
check same interface phos_binding patterns
"""
import os
import sys
import urllib
import urllib2
import cPickle as pickle
from multiprocessing import Pool
def get_entityid(p):
pdbid,interface_id,chain1,chain2 = p
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?'
data = {
'pdbids':pdbid,
'customReportColumns':'structureId,entityId',
'service':'wsfile',
'format':'csv',
}
data = urllib.urlencode(data)
req = urllib2.Request(url,data)
response = urllib2.urlopen(req)
lines = response.readlines()
lines = [line.rstrip('\r\n') for line in lines[1:]]
lines = [line for line in lines if line]
lines = [line.split(',') for line in lines]
lines = [[w.strip('"') for w in line] for line in lines]
chain1_id = [line for line in lines if line[1] == chain1][0][2]
chain2_id = [line for line in lines if line[1] == chain1][0][2]
return pdbid,interface_id,chain1_id,chain2_id
def filter_same_interface(pdb_interfaces):
pdbid_chain = [(p[0],p[1],p[-1][0][0],p[-1][1][0]) for p in pdb_interfaces]
p = Pool(4)
result = p.map(get_entityid,pdbid_chain)
p.close()
pdb_chain_entity = {}
for r in result:
if not (r[0],r[2],r[3]) in pdb_chain_entity.keys():
pdb_chain_entity[(r[0],r[2],r[3])] = [r]
else:
pdb_chain_entity[(r[0],r[2],r[3])].append(r)
with open('same_interface.txt','w') as w_f:
same = []
different = []
for k,v in pdb_chain_entity.iteritems():
if len(v) > 1:
print >> w_f,k
cluster = [p for p in pdb_interfaces if (p[0],p[1]) in [(vi[0],vi[1]) for vi in v]]
cluster_patterns = []
for c in cluster:
bonds = c[6]
phos_interacting_residues = {}
PHOS = ['TPO_ O1P','TPO_ O2P','TPO_ O3P','TPO_ OG1','SEP_ O1P','SEP_ O2P','SEP_ O3P','SEP_ OG ','PTR_ O1P','PTR_ O2P','PTR _O3P','PTR OH ']
for bond in bonds:
bond_type,bond_info = bond
for bondi in bond_info:
res1,res2,dist = bondi
if [p for p in PHOS if res1[-8:] == p]:
res1 = '_'.join(res1.split('_')[:3])
if not res1 in phos_interacting_residues.keys():
phos_interacting_residues[res1] = [res2]
else:
phos_interacting_residues[res1].append(res2)
elif [p for p in PHOS if res2[-8:] == p]:
res2 = '_'.join(res2.split('_')[:3])
if not res2 in phos_interacting_residues.keys():
phos_interacting_residues[res2] = [res1]
else:
phos_interacting_residues[res2].append(res1)
for phos,interacting_residues in phos_interacting_residues.items():
if interacting_residues:
interacting_residues = ['_'.join(r.split('_')[:3]) for r in interacting_residues]
interacting_residues = list(set(interacting_residues))
interacting_residues = [r.split('_')[2] for r in interacting_residues]
interacting_residues = sorted(interacting_residues)
interacting_residues = '_'.join(interacting_residues)
cluster_patterns.append(interacting_residues)
print >> w_f,c[0],c[1],interacting_residues
print cluster_patterns
if len(cluster_patterns) > 1 and len(set(cluster_patterns)) == 1:
same.append(1)
else:
different.append(1)
print 'same',len(same)
print 'different',len(different)
pdb_unique_interface = [(v[0][0],v[0][1]) for k,v in pdb_chain_entity.iteritems()]
pdb_interfaces = [p for p in pdb_interfaces if (p[0],p[1]) in pdb_unique_interface]
print 'after filter same entity',len(pdb_interfaces)
return pdb_interfaces
def filter_non_one_phos(pdb_interfaces):
zero_phos_interfaces = []
one_phos_interfaces = []
more_phos_interfaces = []
for interface in pdb_interfaces:
pdbid,p1,interface_area,p2,p3,p4,bonds = interface[:7]
phos_res = []
for bond in bonds:
bond_type,bond_info = bond
for bondi in bond_info:
res1,res2,dist = bondi
if 'TPO' in res1 or 'SEP' in res1 or 'PTR' in res1:
phos_res.append('_'.join(res1.split('_')[:3]))
if 'TPO' in res2 or 'SEP' in res2 or 'PTR' in res2:
phos_res.append('_'.join(res2.split('_')[:3]))
phos_res = set(phos_res)
if len(phos_res) == 1:
one_phos_interfaces.append(interface)
elif len(phos_res) > 1:
more_phos_interfaces.append(interface)
else:
zero_phos_interfaces.append(interface)
print 'after filter non_one_phos_interfaces',len(one_phos_interfaces)
return one_phos_interfaces
def main():
pdb_interfaces = pickle.load(open(sys.argv[-1]))
pdb_interfaces = [p for p in pdb_interfaces if p[7][0][2].lower() == 'x,y,z' and p[7][1][2].lower() == 'x,y,z']
pdb_interfaces = [p for p in pdb_interfaces if p[7][0][1] == 'Protein' and p[7][1][1] == 'Protein']
pdb_interfaces = filter_non_one_phos(pdb_interfaces)
pdb_interfaces = filter_same_interface(pdb_interfaces)
if __name__ == "__main__":
main()
|
lituan/tools
|
pisa/pisa_same_entity.py
|
Python
|
cc0-1.0
| 5,849 | 0.014361 |
from unittest import TestCase
from webtest import TestApp
from nose.tools import * # noqa
from ..main import app
class TestAUser(TestCase):
def setUp(self):
self.app = TestApp(app)
def tearDown(self):
pass
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/')
assert_equal(res.status_code, 200)
assert_in("All I want to do is", res)
def test_can_see_a_page(self):
# Goes to homepage
res = self.app.get('/')
# Sees titles for a page
assert_in('install Python', res)
# Clicks on a title
res = res.click('install Python 2 and/or 3')
assert_equal(res.status_code, 200)
# Is at the page
# Can see the title
assert_in("Install Python", res)
# And the OS's
assert_in("macosx", res)
# And the content
assert_in('brew install python3', res)
def test_can_see_deps(self):
# Goes to homepage
res = self.app.get('/')
# Clicks on a page
res = res.click('install Python 2 and/or 3')
# The page has dependency
# The dependency titles are listed
assert_in("install-homebrew", res)
# Clicks on the dependency link (full instructions)
res = res.click('full instructions', index=0)
# Is at the dependency's page
assert_in('ruby', res)
|
killtheyak/killtheyak.github.io
|
killtheyak/test/webtest_tests.py
|
Python
|
mit
| 1,412 | 0.000708 |
# -*- coding: utf-8 -*-
# Copyright (C) 2016-Today GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Products - Send to Scales',
'summary': 'Synchronize Odoo database with Scales',
'version': '1.0',
'category': 'Tools',
'description': """
=============================================
Synchronize Odoo database with Bizerba Scales
=============================================
Roadmap / Know Issues
---------------------
* It should be great to split this module into many modules, with some generic
features in a module named product_to_scale and some specifics features
for each scales system. 'product_to_scale_bizerba',
'product_to_scale_metler_toledo', etc.
""",
'author': 'GRAP',
'website': 'http://www.grap.coop',
'license': 'AGPL-3',
'depends': [
'product',
],
'data': [
'security/ir_module_category.xml',
'security/res_groups.xml',
'security/ir.model.access.csv',
'data/ir_config_parameter.xml',
'data/ir_cron.xml',
'views/view_product_product.xml',
'views/view_product_uom.xml',
'views/view_product_scale_system.xml',
'views/view_product_scale_group.xml',
'views/view_product_scale_log.xml',
'views/action.xml',
'views/menu.xml',
],
'demo': [
'demo/res_users.xml',
'demo/product_scale_system.xml',
'demo/product_scale_system_product_line.xml',
'demo/product_scale_group.xml',
'demo/product_product.xml',
'demo/decimal_precision.xml',
],
}
|
houssine78/addons
|
product_to_scale_bizerba/__openerp__.py
|
Python
|
agpl-3.0
| 1,686 | 0 |
#!/usr/bin/python
import sys
service_name = "cherrypy-dns"
pidfile_path = "/var/run/" + service_name + ".pid"
port = 8001
if len(sys.argv) > 1 and sys.argv[1] == "service_name": print service_name; sys.exit(0)
if len(sys.argv) > 1 and sys.argv[1] == "pidfile_path": print pidfile_path; sys.exit(0)
if len(sys.argv) > 1 and sys.argv[1] == "port": print port; sys.exit(0)
import cherrypy, os, subprocess
from cherrypy.process.plugins import PIDFile
p = PIDFile(cherrypy.engine, pidfile_path)
p.subscribe()
script_dir = os.path.dirname(os.path.realpath(__file__))
class ScriptRunner:
@cherrypy.expose
def add(self, hostname="", ip=""):
return self.execute_command('bash ' + script_dir + '/add-domain-name.sh "' + hostname + '" "' + ip + '"')
@cherrypy.expose
def remove(self, hostname=None, ip=None):
if not hostname: raise cherrypy.HTTPError(400, "Hostname parameter is required.")
if not ip: raise cherrypy.HTTPError(400, "IP parameter is required.")
return self.execute_command('bash ' + script_dir + '/remove-domain-name.sh "' + hostname + '" "' + ip + '"')
#@cherrypy.expose
#def lookup(self, attr):
# return subprocess.check_output('bash -c "cat $HADOOP_CONF_DIR/slaves"', shell=True)
def execute_command(self, command):
try:
return subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as e:
raise cherrypy.HTTPError(500, e.cmd + " exited with code " + str(e.returncode) + "\n" + e.output)
conf = {
'global': {
'server.socket_host': '127.0.0.1',
'server.socket_port': port,
'server.thread_pool': 1
}
}
cherrypy.quickstart(ScriptRunner(), '/domain-names/', conf)
|
sukharevd/hadoop-install
|
bin/cherrypy-dns.py
|
Python
|
apache-2.0
| 1,744 | 0.012615 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
from openerp.tools.safe_eval import safe_eval as eval
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line)
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
move_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
order_line_ids += [po_line.id for po_line in order.order_line]
move_ids += [po_line.move_dest_id.id for po_line in order.order_line if po_line.move_dest_id]
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('move_id', 'in', move_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
for purchase in self.browse(cr, uid, ids, context=context):
self.pool['purchase.order.line'].write(cr, uid, [l.id for l in purchase.order_line], {'state': 'draft'})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.pool['purchase.order.line'].write(cr, uid, [l.id for l in purchase.order_line],
{'state': 'cancel'})
for id in ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
price_unit = order_line.price_unit
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id and order_line.move_dest_id.state != 'done':
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'partner_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
if not porder.origin in order_infos['origin'] and not order_infos['origin'] in porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
procurement_ids_to_cancel = []
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
if line.move_dest_id:
procurement_ids_to_cancel.extend(procurement.id for procurement in line.move_dest_id.procurements)
if procurement_ids_to_cancel:
self.pool['procurement.order'].action_cancel(cr, uid, procurement_ids_to_cancel)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
if not name or not uom_id:
# The 'or not uom_id' part of the above condition can be removed in master. See commit message of the rev. introducing this line.
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_warehouse(self, procurement, user_company):
"""
Return the warehouse containing the procurment stock location (or one of it ancestors)
If none match, returns then first warehouse of the company
"""
# TODO refactor the domain once we implement the "parent_of" domain operator
# NOTE This method has been copied in the `purchase_requisition` module to ensure
# retro-compatibility. This code duplication will be deleted in next stable version.
# Do not forget to update both version in case of modification.
company_id = (procurement.company_id or user_company).id
domains = [
[
'&', ('company_id', '=', company_id),
'|', '&', ('lot_stock_id.parent_left', '<', procurement.location_id.parent_left),
('lot_stock_id.parent_right', '>', procurement.location_id.parent_right),
('lot_stock_id', '=', procurement.location_id.id)
],
[('company_id', '=', company_id)]
]
cr, uid = procurement._cr, procurement._uid
context = procurement._context
Warehouse = self.pool['stock.warehouse']
for domain in domains:
ids = Warehouse.search(cr, uid, domain, context=context)
if ids:
return ids[0]
return False
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': self._get_warehouse(procurement, company),
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
wf_service = netsvc.LocalService("workflow")
for order in purchase_order_obj.browse(cr, user_id, po_ids, context=context):
# Signal purchase order workflow that an invoice has been validated.
invoiced = []
shipped = True
# for invoice method manual or order, don't care about shipping state
# for invoices based on incoming shippment, beware of partial deliveries
if (order.invoice_method == 'picking' and
not all(picking.invoice_state in ['invoiced'] for picking in order.picking_ids)):
shipped = False
for po_line in order.order_line:
if (po_line.invoice_lines and
all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
invoiced.append(po_line.id)
if invoiced and shipped:
self.pool['purchase.order.line'].write(cr, user_id, invoiced, {'invoiced': True})
wf_service.trg_write(user_id, 'purchase.order', order.id, cr)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
grap/OCB
|
addons/purchase/purchase.py
|
Python
|
agpl-3.0
| 71,343 | 0.007639 |
import os
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = u'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += force_unicode(s)
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])
|
sserrot/champion_relationships
|
venv/Lib/site-packages/jedi/api/file_name.py
|
Python
|
mit
| 5,707 | 0.001752 |
import re
from unicodedata import normalize
from datetime import datetime
from django.shortcuts import render
from django.http import HttpResponse as response
from django.http import HttpResponseRedirect as redirect
from django.conf import settings
from models import Spreadable,Image,Playable,Spreaded,Product
from socialize.models import Profile
from socialize.stream import StreamService,Dropbox
from efforia.main import Efforia
from feedly.feed import Activity
def sp(x): return '!!' in x[1]
def pl(x): return '>!' in x[1]
def im(x): return '%!' in x[1]
class Application(Activity):
def __init__(self,user,app):
Activity.__init__(self,user,app)
def deadline(self):
playables = Playable.objects.filter(user=self.user)
for play in playables:
if not play.token and not play.visual: play.delete()
def relations(self,feed):
excludes = []; rels = Spreaded.objects.filter(user=self.user)
excludes.extend([(r.spreaded,'!!') for r in rels])
excludes.extend([(r.spread,r.token()) for r in rels])
for v in rels.values('spread').distinct():
t = rels.filter(spread=v['spread'],user=self.user)
if len(t) > 0: feed.append(t[len(t)-1])
return excludes
def duplicates(self,exclude,feed):
for o in self.objects:
objects = globals()[o].objects.filter(user=self.user)
if 'Spreadable' in o: e = filter(sp,exclude)
elif 'Playable' in o: e = filter(pl,exclude)
elif 'Image' in o: e = filter(im,exclude)
excludes = [x[0] for x in e]
feed.extend(objects.exclude(id__in=excludes))
class Images(Efforia):
def __init__(self): pass
def view_image(self,request):
return render(request,'image.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def upload_image(self,request):
photo = request.FILES['Filedata'].read()
dropbox = Dropbox()
link = dropbox.upload_and_share(photo)
res = self.url_request(link)
url = '%s?dl=1' % res
return url
def create_image(self,request):
u = self.current_user(request)
if 'description' in request.POST:
image = list(Image.objects.filter(user=u))[-1:][0]
descr = request.POST['description']
image.description = descr
image.save()
return response('Description added to image successfully')
i = Image(link=self.upload_image(request),user=u)
i.save()
return response('Image created successfully')
class Spreads(Efforia):
def __init__(self): pass
def start_spreadapp(self,request):
return render(request,'spreadapp.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def view_spread(self,request):
return render(request,"spread.jade",{},content_type='text/html')
def create_spread(self,request):
u = self.current_user(request)
name = u.first_name.lower()
text = unicode('%s' % (request.POST['content']))
post = Spreadable(user=u,content=text,name='!'+name)
post.save()
self.accumulate_points(1,request)
return response('Spreadable created successfully')
class Uploads(Efforia):
def __init__(self): pass
def view_upload(self,request):
return render(request,'content.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def set_thumbnail(self,request):
u = self.current_user(request)
service = StreamService()
token = request.GET['id']
access_token = u.profile.google_token
thumbnail = service.video_thumbnail(token,access_token)
play = Playable.objects.filter(user=u).latest('date')
play.visual = thumbnail
play.token = token
play.save()
self.accumulate_points(1,request)
r = redirect('/')
r.set_cookie('token',token)
return r
def view_content(self,request):
u = self.current_user(request)
content = title = ''
for k,v in request.REQUEST.iteritems():
if 'title' in k: title = v
elif 'content' in k: content = v
elif 'status' in k:
return self.set_thumbnail(request)
try:
url,token = self.parse_upload(request,title,content)
return render(request,'video.jade',{'static_url':settings.STATIC_URL,
'hostname':request.get_host(),
'url':url,'token':token},content_type='text/html')
except Exception: return response('Invalid file for uploading')
def parse_upload(self,request,title,content):
keys = ','; keywords = content.split(' ')
for k in keywords: k = normalize('NFKD',k.decode('utf-8')).encode('ASCII','ignore')
keys = keys.join(keywords)
playable = Playable(user=self.current_user(request),name='>'+title,description=content)
playable.save()
service = StreamService()
access_token = self.current_user(request).profile.google_token
return service.video_entry(title,content,keys,access_token)
def media_chooser(self,request):
return render(request,'chooser.jade')
|
efforia/eos-dashboard
|
pandora-hub/app.py
|
Python
|
lgpl-3.0
| 5,312 | 0.022779 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if os.environ.get('DJANGO_SETTINGS_MODULE') is None:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.base'
# When using an on-disk database for the test suite,
# Django asks us if we want to delete the database.
# We do.
if 'test' in sys.argv[0:3]:
# Catch warnings in tests and redirect them to be handled by the test runner. Otherwise build results are too
# noisy to be of much use.
import logging
logging.captureWarnings(True)
sys.argv.append('--noinput')
sys.argv.append('--logging-clear-handlers')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
edx/edx-ora2
|
manage.py
|
Python
|
agpl-3.0
| 762 | 0.001312 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0002_second")]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
|
edmorley/django
|
tests/migrations2/test_migrations_2/0001_initial.py
|
Python
|
bsd-3-clause
| 562 | 0 |
from requests.auth import HTTPBasicAuth
def apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
setattr(doc, key, value)
should_save = True
return should_save
class EndpointMixin(object):
@classmethod
def from_config(cls, config):
return cls(config.url, config.username, config.password)
def _auth(self):
return HTTPBasicAuth(self.username, self.password)
def _urlcombine(self, base, target):
return '{base}{target}'.format(base=base, target=target)
|
puttarajubr/commcare-hq
|
custom/api/utils.py
|
Python
|
bsd-3-clause
| 714 | 0.001401 |
# encoding: utf-8
import csv
from urllib2 import HTTPError
import django
from django.db import transaction
import urllib2
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from openfonacide.matcher import Matcher
from openfonacide.models import Importacion, RegistroImportacion, Adjudicacion, Planificacion, Temporal, Institucion
__author__ = 'Diego Ramírez'
def registrar_ultima_importacion(importacion=None, md5_sum=None):
registro = RegistroImportacion(ultimo=True, ultimo_md5=md5_sum, importacion=importacion, fecha=datetime.now())
registro.save()
@transaction.atomic
def do_import(lines_list=None, tipo=None):
header_flag = True
header = list()
reader = csv.reader(lines_list.splitlines())
for row in reader:
if header_flag:
for column in row:
header.append(column)
header_flag = False
else:
args = dict()
for element in range(len(row)):
# setattr(a, header[i], row[i])
args[header[element]] = row[element]
if tipo is None:
return
if tipo == u"planificacion":
# Planificación logic
try:
Planificacion.objects.update_or_create(id=args['id'], anio=args['anio'], defaults=args)
except Exception as e:
continue
if tipo == u"adjudicación":
# adjudicación logic
try:
Adjudicacion.objects.update_or_create(id=args['id'], defaults=args)
except Exception as e:
continue
def read_url_file(url=None):
try:
_file = urllib2.urlopen(url)
data = _file.read()
_file.close()
return data
except HTTPError as e:
# apply log
print e.message
except:
print "We don't know exactly what happened"
return ""
class Command(BaseCommand):
def handle(self, *args, **options):
tareas = Importacion.objects.filter(activo=True)
worked = False
need_match = False
for t in tareas:
md5 = read_url_file(t.md5_url)
try:
registro = t.registroimportacion_set.get(ultimo=True)
if md5 == registro.ultimo_md5:
return
do_import(read_url_file(t.url), t.tipo)
registro.ultimo = False
registro.save()
registrar_ultima_importacion(importacion=t, md5_sum=md5)
except ObjectDoesNotExist:
do_import(read_url_file(t.url), t.tipo)
registrar_ultima_importacion(importacion=t, md5_sum=md5)
worked = True
if t.tipo == u'planificacion':
need_match = True
if worked and need_match:
m = Matcher(institucion_manager=Institucion.objects, planificacion_manager=Planificacion.objects,
temporal_manager=Temporal.objects
)
m.do_match()
# This Section is used just for debugging
if __name__ == "__main__":
django.setup()
c = Command()
c.handle()
|
nemesiscodex/openfonacide
|
openfonacide/management/commands/actualizar_datasets.py
|
Python
|
lgpl-3.0
| 3,284 | 0.002134 |
from django.http import HttpResponse
import pymongo
import MySQLdb
from course_dashboard_api.v2.dbv import *
sql_user = MYSQL_USER
sql_pswd = MYSQL_PSWD
mysql_db = MYSQL_DB
mongo_db = MONGO_DB
""" Description: Function to get quiz level grades of all students in a particular course.
Input Parameters:
course_name: name of the course for which grades are required (ex. CT101.1x)
course_run: run of the course for which grades are required (ex. 2016-17)
course_organization: organization of the course for which grades are required (ex. IITBombayX)
Output Type : List of grades of all students enrolled in the course
Author: Jay Goswami
Date of creation: 30 May 2017
"""
def get_all_student_grades(course_name, course_run, course_organization):
student_count = 1
try:
mongo_client = pymongo.MongoClient() # Establishing MongoDB connection
except:
print "MongoDB connection not established"
return HttpResponse("MongoDB connection not established") # MongoDB could not be connected
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
full_grade_list = []
problem_query = "Select grade,max_grade from courseware_studentmodule where max_grade is not null and grade is not null and student_id=%s and binary module_id=%s"
users_query = "select a.id, a.username, a.email, b.course_id from auth_user as a, student_courseenrollment as b where a.id=b.user_id"
# Query to retrieve the details of all students who have enrolled in any course
grading_policy = get_grading_policy(course_name, course_run, course_organization)
try:
grading_list = grading_policy['grader']
grade_cutoffs = grading_policy['grade_cutoffs']
except:
return None
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(users_query)
student_course_list = mysql_cursor.fetchall()
for student_course_pair in student_course_list:
found_course_id = student_course_pair[3].split(':')
if len(found_course_id)==1:
continue
course_id = course_organization + '+' + course_name + '+' + course_run
if course_id == found_course_id[1]: # Comparing course_id to get students enrolled in a particular course
student_grades = get_student_course_grades(str(student_course_pair[2]), str(student_course_pair[1]),
int(student_course_pair[0]), course_name, course_run, course_organization, student_count, db_mysql,
mongo_client, problem_query, grading_list, grade_cutoffs) # Calling function to get quiz grades of each student
student_count += 1 # Increment the count of students
full_grade_list.append(student_grades) # Appending student's grade list
mongo_client.close() # Closing MongoDB connection
db_mysql.close() # Closing MySQL connection
grade_list = {}
grade_list['course_name'] = course_name
grade_list['course_organization'] = course_organization
grade_list['course_run'] = course_run
grade_list['students'] = full_grade_list
return grade_list
""" Description: Function to get quiz level grades of each student in a particular course.This function is called by function get_all_student_grades().
Input Parameters:
email: Email id of student passed to this function by get_all_student_grades()
student_name: Username of student passed to this function by get_all_student_grades()
student_id: User ID of a student
course_id: ID of course for which grades are to be calculated
course_run: run of the course for which grades are to be calculated
course_organization: organization of the course for which grades are to be calculated
count: Number of students in the course including current student
db_mysql: MySQL Database connection object
mongo_client: MongoDB connection object
problem_query: Query passed to this function by get_all_student_grades()
Output Type: List
Author: Jay Goswami
Date of Creation: 30 May 2017
"""
def get_student_course_grades(email, student_name, student_id, course_id, course_run, course_organization, count, db_mysql, mongo_client, problem_query, grading_list, grade_cutoffs):
highchart_list = [] # List to be returned for highcharts
highchart_list2 = [] # List to be returned for highcharts
highchart_list3 = {}
highchart_list.append('total_score')
highchart_list3['id'] = student_id
highchart_list3['name'] = student_name
highchart_list3['email'] = email
db_mongo = mongo_client[mongo_db] # Getting the object for edxapp database of MongoDB
mongo_cur = db_mongo.modulestore.active_versions.find({"course":course_id, "run":course_run, "org":course_organization}) # Find the
i = mongo_cur[0]
active_version = mongo_cur[0]
version = i["versions"]["published-branch"]
try:
stud_avg_tot = 0
completed = True
for j in range(len(grading_list)): # iterating over the formats
best_score_list = [] # This list will store the final scores for the particular format
drop_count = grading_list[j]['drop_count'] # Gives number of droppable sections for that problem
type = grading_list[j]['type'] # Gives the type of the format i.e. Quiz, Final Exam etc.
short_label = grading_list[j]['short_label']
weight = grading_list[j]['weight'] # Gives the weights of the formats
min_count = grading_list[j]['min_count'] # Gives the minimum number of sections of that type present in the course
mongo_cur2 = db_mongo.modulestore.structures.find({'_id': version})
blocks = mongo_cur2[0]['blocks']
mongo_cur2 = []
for block in blocks:
if 'format' in block['fields'] and block['fields']['format']==type and block['fields']['graded']==True:
mongo_cur2.append(block)
count_doc = len(mongo_cur2)
# Query to find the different sequentials having the format 'type'
sequential_coun = 0 # intializing sequential count to zero
for k in mongo_cur2:
sequential_coun += 1
avg_score_sequential = 0
sum_avg_prob_score = 0
sum_prob_score_obt = 0
sum_tot_prob_score = 0
coun_prob = 0 # Initializing problem count as zero
list2 = k['fields'][
'children'] # Getting the children list of the sequential, this will consist of vertical ids
for m in range(len(list2)): # Iterating over the list of vertical ids
child_id = list2[m] # Getting the vertical id
vertical_id = child_id[1]
mongo_cur3 = []
for block in blocks:
if block['block_id']==vertical_id: # query to get the vertical document with the _id.name as vertical id
mongo_cur3.append(block)
n = mongo_cur3[0]
list3 = n['fields']['children'] # getting the children array for this vertical, consisiting of list of component ids
for o in range(len(list3)): # Iterating over the list of component ids
comp_id = list3[o] # Getting the component id
component_id = comp_id[1]
mongo_cur4 = []
for block in blocks:
if block['block_id'] == component_id:
mongo_cur4.append(block)
# query to get the problem document with the _id.name as problem id and category as problem.
try:
p = mongo_cur4[0]
if p['block_type']!='library_content':
i = active_version
problem_id = 'block-v1:' + i['org'] + '+' + i['course'] + '+' + i['run'] + '+type@' + comp_id[0] + '+block@' + component_id
mysql_cur = db_mysql.cursor() # Getting MySQL cursor object
# Query to get the grades for the student for that particular problem
mysql_cur.execute(problem_query, (str(student_id), str(problem_id),)) # Executing query
row = mysql_cur.fetchone() # Fetching the row returned, only one row shall be returned
try:
grade = row[0] # Getting the grade of the student for this problem
maxgrade = row[1] # Getting the max_grade of the student for this problem
try:
weight_of_problem = p['fields']['weight'] # Getting the weight of the problem
except:
weight_of_problem=maxgrade #If no weight is defined, weight=maxgrade
score_obt = grade * weight_of_problem / maxgrade # Weighted score obtained for this problem
tot_score = weight_of_problem # Weighted total score for this problem
sum_prob_score_obt += score_obt
sum_tot_prob_score += tot_score
except:
try:
weight_of_problem=p['fields']['weight']
except:
weight_of_problem=0 #If weight is not defined and the problem has not been attempted
sum_tot_prob_score+=weight_of_problem
else:
list3 = p['fields'][
'children'] # getting the children array for this vertical, consisiting of list of component ids
for o in range(len(list3)): # Iterating over the list of component ids
comp_id = list3[o] # Getting the component id
component_id = comp_id[1]
mongo_cur4 = []
for block in blocks:
if block['block_id'] == component_id:
mongo_cur4.append(block)
try:
p = mongo_cur4[0]
if p['block_type'] == 'problem':
i = active_version
problem_id = 'block-v1:' + i['org'] + '+' + i['course'] + '+' + i[
'run'] + '+type@' + comp_id[0] + '+block@' + component_id
mysql_cur = db_mysql.cursor() # Getting MySQL cursor object
# Query to get the grades for the student for that particular problem
mysql_cur.execute(problem_query,
(str(student_id), str(problem_id),)) # Executing query
row = mysql_cur.fetchone() # Fetching the row returned, only one row shall be returned
try:
grade = row[0] # Getting the grade of the student for this problem
maxgrade = row[1] # Getting the max_grade of the student for this problem
try:
weight_of_problem = p['fields'][
'weight'] # Getting the weight of the problem
except:
weight_of_problem = maxgrade # If no weight is defined, weight=maxgrade
score_obt = grade * weight_of_problem / maxgrade # Weighted score obtained for this problem
tot_score = weight_of_problem # Weighted total score for this problem
sum_prob_score_obt += score_obt
sum_tot_prob_score += tot_score
except:
try:
weight_of_problem = p['fields']['weight']
except:
weight_of_problem = 0 # If weight is not defined and the problem has not been attempted
sum_tot_prob_score += weight_of_problem
except:
continue
except:
continue
if sum_tot_prob_score > 0:
avg_score_sequential = sum_prob_score_obt / sum_tot_prob_score # Calculating avg score of this sequential
else:
avg_score_sequential = 0
if count == 1:
if count_doc > 1:
highchart_list.append(str(short_label) + str(sequential_coun))
else:
highchart_list.append(str(short_label))
highchart_list2.append(str(avg_score_sequential))
else:
if count_doc > 1:
highchart_list.append(str(short_label) + str(sequential_coun))
else:
highchart_list.append(str(short_label))
highchart_list2.append(str(avg_score_sequential))
best_score_list.append(avg_score_sequential) # Adding the sequential score to best_score_list
best_score_list.sort(reverse=True) # Sorting the scores list for that format in descending order
sum_score_format = 0 # Initializing sum score of format to 0
if sequential_coun<min_count-drop_count:
completed = False
for q in range(sequential_coun - drop_count): # Getting the sum of best scores in the format
sum_score_format += best_score_list[q]
if sequential_coun - drop_count > 0:
avg_score_format = sum_score_format / (
sequential_coun - drop_count) # Getting average score of the format
if sequential_coun - drop_count > 1:
if count == 1:
highchart_list.append(str(short_label) + 'Avg')
highchart_list2.append(str(avg_score_format))
else:
highchart_list.append(str(short_label) + 'Avg')
highchart_list2.append(str(avg_score_format))
stud_avg_tot += avg_score_format * weight
else:
avg_score_format = 0
# Getting total student average score
if not completed:
highchart_list2.append(None)
elif len(highchart_list2) > 0:
if count == 1:
highchart_list2.append(str(stud_avg_tot))
else:
highchart_list2.append(str(stud_avg_tot))
else:
highchart_list2 = [None, ]
except:
highchart_list2=[None,]
highchart_list3['total_score'] = highchart_list2[(len(highchart_list2)-1)]
grade = ''
if highchart_list3['total_score'] != None:
prev = -1
for grades in grade_cutoffs.keys():
if float(highchart_list3['total_score'])>=grade_cutoffs[grades] and grade_cutoffs[grades]>prev:
grade = grades
prev = grade_cutoffs[grades]
if grade=='':
grade='Fail'
highchart_list3['grade'] = grade
h_list = {}
for k in range((len(highchart_list2) - 1)):
h_list[highchart_list[k+1]] = highchart_list2[k]
highchart_list3['units'] = h_list
return highchart_list3
""" Description: Function to get grading policy of a course called by get_student_course_grade()
Input Parameters:
course_name: name of the course for which grading policy is required
course_run: run of the course for which grading policy is required
course_organization: organization of the course for which grading policy is required
Output Type: JSON Dictionary with course details and grading policy and cutoffs
Author: Jay Goswami
Date of Creation: 31 May 2017
"""
def get_grading_policy(course_name, course_run, course_organization):
try:
client = pymongo.MongoClient() # Establishing MongoDB connection
except:
print "MongoDB connection not established"
return HttpResponse("MongoDB connection not established") # MongoDB could not be connected
db_mongo = client[mongo_db]
mongo_cursor = db_mongo.modulestore.active_versions.find({"course": course_name, "run": course_run,
"org": course_organization})
grading_policy = {}
course_id = course_organization + "+" + course_name + "+" + course_run
grading_policy["course_id"] = str(course_id)
try:
course_version = mongo_cursor[0]
try:
published_version = course_version['versions']['published-branch']
mongo_cursor = db_mongo.modulestore.structures.find({'_id':published_version})
course_structures = mongo_cursor[0]['blocks']
for block in course_structures:
if block['block_type'] == 'course':
course_block = block
try:
course_start = course_block['fields']['start']
grading_policy["course_start"] = str(course_start.date())
except:
grading_policy["course_start"] = ""
#print "Course start date not found"
try:
course_end = course_block['fields']['end']
grading_policy["course_end"] = str(course_end.date())
except:
grading_policy["course_end"] = ""
#print "Course end date not found"
try:
course_registration_start = course_block['fields']['enrollment_start']
grading_policy["course_registration_start"] = str(course_registration_start.date())
except:
grading_policy["course_registration_start"] = ""
#print "Course registration start date not found"
try:
course_registration_end = course_block['fields']['enrollment_end']
grading_policy["course_registration_end"] = str(course_registration_end.date())
except:
grading_policy["course_registration_end"] = ""
#print "Course registration end date not found"
try:
course_display_name = course_block['fields']['display_name']
grading_policy["course_display_name"] = str(course_display_name)
except:
grading_policy["course_display_name"] = ""
#print "Course display name not found"
definition_id = course_block['definition']
mongo_cursor = db_mongo.modulestore.definitions.find({'_id':definition_id})
course_definition = mongo_cursor[0]
try:
grade_list = course_definition['fields']['grading_policy']['GRADER']
grader_result_list = []
for j in range(len(grade_list)):
grader_result_dict = {}
min_count = grade_list[j]['min_count']
drop_count = grade_list[j]['drop_count']
short_label = grade_list[j]['short_label']
display_name = grade_list[j]['type']
weight = grade_list[j]['weight']
grader_result_dict["min_count"] = min_count
grader_result_dict["drop_count"] = drop_count
grader_result_dict["short_label"] = str(short_label)
grader_result_dict["type"] = str(display_name)
grader_result_dict["weight"] = weight
grader_result_list.append(grader_result_dict)
grading_policy["grader"] = grader_result_list
try:
grade_cutoffs = course_definition['fields']['grading_policy']['GRADE_CUTOFFS']
grading_policy["grade_cutoffs"] = grade_cutoffs
except:
grading_policy["grade_cutoffs"] = {}
#print "No grade cutoffs mentioned"
except:
grading_policy["grade_cutoffs"] = {}
grading_policy["grader"] = []
#print "No grading policy found"
except:
grading_policy["course_start"] = ""
grading_policy["course_end"] = ""
grading_policy["course_registration_start"] = ""
grading_policy["course_registration_end"] = ""
grading_policy["course_display_name"] = ""
grading_policy["grade_cutoffs"] = {}
grading_policy["grader"] = []
#print "Course not found"
except:
client.close()
return None
client.close()
return grading_policy
""" Description: Function to get quiz level grades of all students in all courses
Input Parameters:
None
Output Type : List of all courses with a list of grades of all students enrolled in the course
Author: Jay Goswami
Date of creation: 17 June 2017
"""
def get_all_students_courses_grades():
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
query = "select distinct course_id from courseware_studentmodule where grade>0 and binary course_id like 'course%'"
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query)
course_list = mysql_cursor.fetchall()
courses_grade_list = []
for course in course_list:
course = course[0]
course = course.split(":")[1]
course = course.split("+")
course_name = course[1]
course_run = course[2]
course_organization = course[0]
grades = get_all_student_grades(course_name, course_run, course_organization)
if grades:
courses_grade_list.append(grades)
db_mysql.close()
return courses_grade_list
""" Description: Function to get quiz level grades of a student in all the enrolled courses
Input Parameters:
student_id: id of the student whose grades are required (ex. 12)
Output Type : List of grades of the student in all enrolled courses
Author: Jay Goswami
Date of creation: 17 June 2017
"""
def get_all_courses_student_grades(student_id):
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
try:
mongo_client = pymongo.MongoClient() # Establishing MongoDB connection
except:
print "MongoDB connection not established"
return HttpResponse("MongoDB connection not established") # MongoDB could not be connected
query = "select username, email from auth_user where id = %s"
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query, (str(student_id),))
student = mysql_cursor.fetchone()
query = "select course_id from student_courseenrollment where user_id = %s"
problem_query = "Select grade,max_grade from courseware_studentmodule where max_grade is not null and grade is not null and student_id=%s and module_id=%s"
mysql_cursor.execute(query, (str(student_id),))
course_list = mysql_cursor.fetchall()
grade_list = {}
grade_list['name'] = student[0]
grade_list['email'] = student[1]
grade_list['id'] = int(student_id)
courses_grade_list = []
for course in course_list:
course = course[0]
course = course.split(":")[1]
course = course.split("+")
course_name = course[1]
course_run = course[2]
course_organization = course[0]
dict = {}
dict['course_name'] = course_name
dict['course_organization'] = course_organization
dict['course_run'] = course_run
dict['grade'] = ''
dict['total_score'] = 0
dict['units'] = {}
grading_policy = get_grading_policy(course_name, course_run, course_organization)
try:
grading_list = grading_policy['grader']
grade_cutoffs = grading_policy['grade_cutoffs']
except:
continue
grades = get_student_course_grades(student[1], student[0], student_id, course_name, course_run, course_organization, 1, db_mysql,mongo_client, problem_query, grading_list, grade_cutoffs)
dict['grade'] = grades['grade']
dict['total_score'] = grades['total_score']
dict['units'] = grades['units']
courses_grade_list.append(dict)
grade_list['courses'] = courses_grade_list
mongo_client.close()
db_mysql.close()
return grade_list
""" Description: Function to get quiz level grades of all students in all courses
Input Parameters:
None
Output Type : List of all students with a list of grades of the student in all enrolled courses
Author: Jay Goswami
Date of creation: 17 June 2017
"""
def get_all_students_grades():
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
query = "select id from auth_user order by id"
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query)
students = mysql_cursor.fetchall()
list = []
for student in students:
list.append(get_all_courses_student_grades(student[0]))
db_mysql.close()
return list
|
jaygoswami2303/course_dashboard_api
|
v2/GradeAPI/api.py
|
Python
|
mit
| 27,402 | 0.006861 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, defaults
import time, copy, traceback
try:
# docs: http://www.python-ldap.org/doc/html/index.html
import ldap
import ldap.filter
from ldap.controls import SimplePagedResultsControl
# be compatible to both python-ldap below 2.4 and above
try:
LDAP_CONTROL_PAGED_RESULTS = ldap.LDAP_CONTROL_PAGE_OID
ldap_compat = False
except:
LDAP_CONTROL_PAGED_RESULTS = ldap.CONTROL_PAGEDRESULTS
ldap_compat = True
except:
pass
from lib import *
g_ldap_user_cache = {}
g_ldap_group_cache = {}
# File for storing the time of the last success event
g_ldap_sync_time_file = defaults.var_dir + '/web/ldap_sync_time.mk'
# Exists when last ldap sync failed, contains exception text
g_ldap_sync_fail_file = defaults.var_dir + '/web/ldap_sync_fail.mk'
# LDAP attributes are case insensitive, we only use lower case!
# Please note: This are only default values. The user might override this
# by configuration.
ldap_attr_map = {
'ad': {
'user_id': 'samaccountname',
'pw_changed': 'pwdlastset',
},
'openldap': {
'user_id': 'uid',
'pw_changed': 'pwdchangedtime',
# group attributes
'member': 'uniquemember',
},
'389directoryserver': {
'user_id': 'uid',
'pw_changed': 'krbPasswordExpiration',
# group attributes
'member': 'uniquemember',
},
}
# LDAP attributes are case insensitive, we only use lower case!
# Please note: This are only default values. The user might override this
# by configuration.
ldap_filter_map = {
'ad': {
'users': '(&(objectclass=user)(objectcategory=person))',
'groups': '(objectclass=group)',
},
'openldap': {
'users': '(objectclass=person)',
'groups': '(objectclass=groupOfUniqueNames)',
},
'389directoryserver': {
'users': '(objectclass=person)',
'groups': '(objectclass=groupOfUniqueNames)',
},
}
#.
# .-General LDAP code----------------------------------------------------.
# | _ ____ _ ____ |
# | | | | _ \ / \ | _ \ |
# | | | | | | |/ _ \ | |_) | |
# | | |___| |_| / ___ \| __/ |
# | |_____|____/_/ \_\_| |
# | |
# +----------------------------------------------------------------------+
# | General LDAP handling code |
# '----------------------------------------------------------------------'
def ldap_log(s):
if config.ldap_debug_log is not None:
file(ldap_replace_macros(config.ldap_debug_log), "a").write('%s %s\n' %
(time.strftime('%Y-%m-%d %H:%M:%S'), s))
class MKLDAPException(MKGeneralException):
pass
ldap_connection = None
ldap_connection_options = None
def ldap_uri(server):
if 'use_ssl' in config.ldap_connection:
uri = 'ldaps://'
else:
uri = 'ldap://'
return uri + '%s:%d' % (server, config.ldap_connection['port'])
def ldap_test_module():
try:
ldap
except:
raise MKLDAPException(_("The python module python-ldap seems to be missing. You need to "
"install this extension to make the LDAP user connector work."))
def ldap_servers():
servers = [ config.ldap_connection['server'] ]
if config.ldap_connection.get('failover_servers'):
servers += config.ldap_connection.get('failover_servers')
return servers
def ldap_connect_server(server):
try:
uri = ldap_uri(server)
conn = ldap.ldapobject.ReconnectLDAPObject(uri)
conn.protocol_version = config.ldap_connection['version']
conn.network_timeout = config.ldap_connection.get('connect_timeout', 2.0)
conn.retry_delay = 0.5
# When using the domain top level as base-dn, the subtree search stumbles with referral objects.
# whatever. We simply disable them here when using active directory. Hope this fixes all problems.
if config.ldap_connection['type'] == 'ad':
conn.set_option(ldap.OPT_REFERRALS, 0)
ldap_default_bind(conn)
return conn, None
except (ldap.SERVER_DOWN, ldap.TIMEOUT, ldap.LOCAL_ERROR, ldap.LDAPError), e:
return None, '%s: %s' % (uri, e[0].get('info', e[0].get('desc', '')))
except MKLDAPException, e:
return None, str(e)
def ldap_disconnect():
global ldap_connection, ldap_connection_options
ldap_connection = None
ldap_connection_options = None
def ldap_connect(enforce_new = False, enforce_server = None):
global ldap_connection, ldap_connection_options
if not enforce_new \
and not "no_persistent" in config.ldap_connection \
and ldap_connection \
and config.ldap_connection == ldap_connection_options:
ldap_log('LDAP CONNECT - Using existing connecting')
return # Use existing connections (if connection settings have not changed)
else:
ldap_log('LDAP CONNECT - Connecting...')
ldap_test_module()
# Some major config var validations
if not config.ldap_connection.get('server'):
raise MKLDAPException(_('The LDAP connector is enabled in global settings, but the '
'LDAP server to connect to is not configured. Please fix this in the '
'<a href="wato.py?mode=ldap_config">LDAP '
'connection settings</a>.'))
if not config.ldap_userspec.get('dn'):
raise MKLDAPException(_('The distinguished name of the container object, which holds '
'the user objects to be authenticated, is not configured. Please '
'fix this in the <a href="wato.py?mode=ldap_config">'
'LDAP User Settings</a>.'))
try:
errors = []
if enforce_server:
servers = [ enforce_server ]
else:
servers = ldap_servers()
for server in servers:
ldap_connection, error_msg = ldap_connect_server(server)
if ldap_connection:
break # got a connection!
else:
errors.append(error_msg)
# Got no connection to any server
if ldap_connection is None:
raise MKLDAPException(_('LDAP connection failed:\n%s') %
('\n'.join(errors)))
# on success, store the connection options the connection has been made with
ldap_connection_options = config.ldap_connection
except Exception:
# Invalidate connection on failure
ldap_connection = None
ldap_connection_options = None
raise
# Bind with the default credentials
def ldap_default_bind(conn):
try:
if 'bind' in config.ldap_connection:
ldap_bind(ldap_replace_macros(config.ldap_connection['bind'][0]),
config.ldap_connection['bind'][1], catch = False, conn = conn)
else:
ldap_bind('', '', catch = False, conn = conn) # anonymous bind
except (ldap.INVALID_CREDENTIALS, ldap.INAPPROPRIATE_AUTH):
raise MKLDAPException(_('Unable to connect to LDAP server with the configured bind credentials. '
'Please fix this in the '
'<a href="wato.py?mode=ldap_config">LDAP connection settings</a>.'))
def ldap_bind(user_dn, password, catch = True, conn = None):
if conn is None:
conn = ldap_connection
ldap_log('LDAP_BIND %s' % user_dn)
try:
conn.simple_bind_s(user_dn, password)
ldap_log(' SUCCESS')
except ldap.LDAPError, e:
ldap_log(' FAILED (%s)' % e)
if catch:
raise MKLDAPException(_('Unable to authenticate with LDAP (%s)' % e))
else:
raise
def ldap_async_search(base, scope, filt, columns):
ldap_log(' ASYNC SEARCH')
# issue the ldap search command (async)
msgid = ldap_connection.search_ext(base, scope, filt, columns)
results = []
while True:
restype, resdata = ldap_connection.result(msgid = msgid,
timeout = config.ldap_connection.get('response_timeout', 5))
results.extend(resdata)
if restype == ldap.RES_SEARCH_RESULT or not resdata:
break
# no limit at the moment
#if sizelimit and len(users) >= sizelimit:
# ldap_connection.abandon_ext(msgid)
# break
time.sleep(0.1)
return results
def ldap_paged_async_search(base, scope, filt, columns):
ldap_log(' PAGED ASYNC SEARCH')
page_size = config.ldap_connection.get('page_size', 100)
if ldap_compat:
lc = SimplePagedResultsControl(size = page_size, cookie = '')
else:
lc = SimplePagedResultsControl(
LDAP_CONTROL_PAGED_RESULTS, True, (page_size, '')
)
results = []
while True:
# issue the ldap search command (async)
msgid = ldap_connection.search_ext(base, scope, filt, columns, serverctrls = [lc])
unused_code, response, unused_msgid, serverctrls = ldap_connection.result3(
msgid = msgid, timeout = config.ldap_connection.get('response_timeout', 5)
)
for result in response:
results.append(result)
# Mark current position in pagination control for next loop
cookie = None
for serverctrl in serverctrls:
if serverctrl.controlType == LDAP_CONTROL_PAGED_RESULTS:
if ldap_compat:
cookie = serverctrl.cookie
if cookie:
lc.cookie = cookie
else:
cookie = serverctrl.controlValue[1]
if cookie:
lc.controlValue = (page_size, cookie)
break
if not cookie:
break
return results
def ldap_search(base, filt = '(objectclass=*)', columns = [], scope = None):
if scope:
config_scope = scope
else:
config_scope = config.ldap_userspec.get('scope', 'sub')
if config_scope == 'sub':
scope = ldap.SCOPE_SUBTREE
elif config_scope == 'base':
scope = ldap.SCOPE_BASE
elif config_scope == 'one':
scope = ldap.SCOPE_ONELEVEL
ldap_log('LDAP_SEARCH "%s" "%s" "%s" "%r"' % (base, scope, filt, columns))
start_time = time.time()
# In some environments, the connection to the LDAP server does not seem to
# be as stable as it is needed. So we try to repeat the query for three times.
tries_left = 2
success = False
last_exc = None
while not success:
tries_left -= 1
try:
ldap_connect()
result = []
try:
search_func = config.ldap_connection.get('page_size') \
and ldap_paged_async_search or ldap_async_search
for dn, obj in search_func(base, scope, filt, columns):
if dn is None:
continue # skip unwanted answers
new_obj = {}
for key, val in obj.iteritems():
# Convert all keys to lower case!
new_obj[key.lower().decode('utf-8')] = [ i.decode('utf-8') for i in val ]
result.append((dn.lower(), new_obj))
success = True
except ldap.NO_SUCH_OBJECT, e:
raise MKLDAPException(_('The given base object "%s" does not exist in LDAP (%s))') % (base, e))
except ldap.FILTER_ERROR, e:
raise MKLDAPException(_('The given ldap filter "%s" is invalid (%s)') % (filt, e))
except ldap.SIZELIMIT_EXCEEDED:
raise MKLDAPException(_('The response reached a size limit. This could be due to '
'a sizelimit configuration on the LDAP server.<br />Throwing away the '
'incomplete results. You should change the scope of operation '
'within the ldap or adapt the limit settings of the LDAP server.'))
except (ldap.SERVER_DOWN, ldap.TIMEOUT, MKLDAPException), e:
last_exc = e
if tries_left:
ldap_log(' Received %r. Retrying with clean connection...' % e)
ldap_disconnect()
time.sleep(0.5)
else:
ldap_log(' Giving up.')
break
duration = time.time() - start_time
if not success:
ldap_log(' FAILED')
if config.debug:
raise MKLDAPException(_('Unable to successfully perform the LDAP search '
'(Base: %s, Scope: %s, Filter: %s, Columns: %s): %s') %
(html.attrencode(base), html.attrencode(scope),
html.attrencode(filt), html.attrencode(','.join(columns)),
last_exc))
else:
raise MKLDAPException(_('Unable to successfully perform the LDAP search (%s)') % last_exc)
ldap_log(' RESULT length: %d, duration: %0.3f' % (len(result), duration))
return result
# Returns the ldap filter depending on the configured ldap directory type
def ldap_filter(key, handle_config = True):
value = ldap_filter_map[config.ldap_connection['type']].get(key, '(objectclass=*)')
if handle_config:
if key == 'users':
value = config.ldap_userspec.get('filter', value)
elif key == 'groups':
value = config.ldap_groupspec.get('filter', value)
return ldap_replace_macros(value)
# Returns the ldap attribute name depending on the configured ldap directory type
# If a key is not present in the map, the assumption is, that the key matches 1:1
# Always use lower case here, just to prevent confusions.
def ldap_attr(key):
return ldap_attr_map[config.ldap_connection['type']].get(key, key).lower()
# Returns the given distinguished name template with replaced vars
def ldap_replace_macros(tmpl):
dn = tmpl
for key, val in [ ('$OMD_SITE$', defaults.omd_site) ]:
if val:
dn = dn.replace(key, val)
else:
dn = dn.replace(key, '')
return dn
def ldap_rewrite_user_id(user_id):
if config.ldap_userspec.get('lower_user_ids', False):
user_id = user_id.lower()
umlauts = config.ldap_userspec.get('user_id_umlauts', 'replace')
new = ""
for c in user_id:
if c == u'ü':
new += 'ue'
elif c == u'ö':
new += 'oe'
elif c == u'ä':
new += 'ae'
elif c == u'ß':
new += 'ss'
elif c == u'Ü':
new += 'UE'
elif c == u'Ö':
new += 'OE'
elif c == u'Ä':
new += 'AE'
else:
new += c
if umlauts == 'replace':
user_id = new
elif umlauts == 'skip' and user_id != new:
return None # This makes the user being skipped
return user_id
def ldap_user_id_attr():
return config.ldap_userspec.get('user_id', ldap_attr('user_id'))
def ldap_member_attr():
return config.ldap_groupspec.get('member', ldap_attr('member'))
def ldap_bind_credentials_configured():
return config.ldap_connection.get('bind', ('', ''))[0] != ''
def ldap_user_base_dn_configured():
return config.ldap_userspec.get('dn', '') != ''
def ldap_group_base_dn_configured():
return config.ldap_groupspec.get('dn', '') != ''
def ldap_user_base_dn_exists():
try:
result = ldap_search(ldap_replace_macros(config.ldap_userspec['dn']), columns = ['dn'], scope = 'base')
except Exception, e:
return False
if not result:
return False
else:
return len(result) == 1
def ldap_get_user(username, no_escape = False):
if username in g_ldap_user_cache:
return g_ldap_user_cache[username]
# Check wether or not the user exists in the directory matching the username AND
# the user search filter configured in the "LDAP User Settings".
# It's only ok when exactly one entry is found. Returns the DN and user_id
# as tuple in this case.
result = ldap_search(
ldap_replace_macros(config.ldap_userspec['dn']),
'(&(%s=%s)%s)' % (ldap_user_id_attr(), ldap.filter.escape_filter_chars(username),
config.ldap_userspec.get('filter', '')),
[ldap_user_id_attr()],
)
if result:
dn = result[0][0]
user_id = ldap_rewrite_user_id(result[0][1][ldap_user_id_attr()][0])
if user_id is None:
return None
g_ldap_user_cache[username] = (dn, user_id)
if no_escape:
return (dn, user_id)
else:
return (dn.replace('\\', '\\\\'), user_id)
def ldap_get_users(add_filter = ''):
columns = [
ldap_user_id_attr(), # needed in all cases as uniq id
] + ldap_needed_attributes()
filt = ldap_filter('users')
# Create filter by the optional filter_group
filter_group_dn = config.ldap_userspec.get('filter_group', None)
member_filter = ''
if filter_group_dn:
member_attr = ldap_member_attr().lower()
# posixGroup objects use the memberUid attribute to specify the group memberships.
# This is the username instead of the users DN. So the username needs to be used
# for filtering here.
user_cmp_attr = member_attr == 'memberuid' and ldap_user_id_attr() or 'distinguishedname'
# Apply configured group ldap filter
try:
group = ldap_search(ldap_replace_macros(filter_group_dn),
columns = [member_attr],
scope = 'base')
except MKLDAPException:
group = None
if not group:
raise MKLDAPException(_('The configured ldap user filter group could not be found. '
'Please check <a href="%s">your configuration</a>.') %
'wato.py?mode=ldap_config&varname=ldap_userspec')
members = group[0][1].values()[0]
member_filter_items = []
for member in members:
member_filter_items.append('(%s=%s)' % (user_cmp_attr, member))
add_filter += '(|%s)' % ''.join(member_filter_items)
if add_filter:
filt = '(&%s%s)' % (filt, add_filter)
result = {}
for dn, ldap_user in ldap_search(ldap_replace_macros(config.ldap_userspec['dn']),
filt, columns = columns):
if ldap_user_id_attr() not in ldap_user:
raise MKLDAPException(_('The configured User-ID attribute "%s" does not '
'exist for the user "%s"') % (ldap_user_id_attr(), dn))
user_id = ldap_rewrite_user_id(ldap_user[ldap_user_id_attr()][0])
if user_id:
ldap_user['dn'] = dn # also add the DN
result[user_id] = ldap_user
return result
def ldap_group_base_dn_exists():
group_base_dn = ldap_replace_macros(config.ldap_groupspec['dn'])
if not group_base_dn:
return False
try:
result = ldap_search(group_base_dn, columns = ['dn'], scope = 'base')
except Exception, e:
return False
if not result:
return False
else:
return len(result) == 1
def ldap_get_groups(specific_dn = None):
filt = ldap_filter('groups')
dn = ldap_replace_macros(config.ldap_groupspec['dn'])
if specific_dn:
# When using AD, the groups can be filtered by the DN attribute. With
# e.g. OpenLDAP this is not possible. In that case, change the DN.
if config.ldap_connection['type'] == 'ad':
filt = '(&%s(distinguishedName=%s))' % (filt, specific_dn)
else:
dn = specific_dn
return ldap_search(dn, filt, ['cn'])
def ldap_group_members(filters, filt_attr = 'cn', nested = False):
cache_key = '%s-%s-%s' % (filters, nested and 'n' or 'f', filt_attr)
if cache_key in g_ldap_group_cache:
return g_ldap_group_cache[cache_key]
# When not searching for nested memberships, it is easy when using the an AD base LDAP.
# The group objects can be queried using the attribute distinguishedname. Therefor we
# create an alternating match filter to match that attribute when searching by DNs.
# In OpenLDAP the distinguishedname is no user attribute, therefor it can not be used
# as filter expression. We have to do one ldap query per group. Maybe, in the future,
# we change the role sync plugin parameters to snapins to make this part a little easier.
if not nested:
groups = {}
filt = ldap_filter('groups')
member_attr = ldap_member_attr().lower()
if config.ldap_connection['type'] == 'ad' or filt_attr != 'distinguishedname':
if filters:
add_filt = '(|%s)' % ''.join([ '(%s=%s)' % (filt_attr, f) for f in filters ])
filt = '(&%s%s)' % (filt, add_filt)
for dn, obj in ldap_search(ldap_replace_macros(config.ldap_groupspec['dn']), filt, ['cn', member_attr]):
groups[dn] = {
'cn' : obj['cn'][0],
'members' : [ m.encode('utf-8').lower() for m in obj.get(member_attr,[]) ],
}
else:
# Special handling for OpenLDAP when searching for groups by DN
for f_dn in filters:
for dn, obj in ldap_search(ldap_replace_macros(f_dn), filt, ['cn', member_attr]):
groups[f_dn] = {
'cn' : obj['cn'][0],
'members' : [ m.encode('utf-8').lower() for m in obj.get(member_attr,[]) ],
}
else:
# Nested querying is more complicated. We have no option to simply do a query for group objects
# to make them resolve the memberships here. So we need to query all users with the nested
# memberof filter to get all group memberships of that group. We need one query for each group.
groups = {}
for filter_val in filters:
if filt_attr == 'cn':
result = ldap_search(ldap_replace_macros(config.ldap_groupspec['dn']),
'(&%s(cn=%s))' % (ldap_filter('groups'), filter_val),
columns = ['dn'])
if not result:
continue # Skip groups which can not be found
dn = result[0][0]
cn = filter_val
else:
dn = filter_val
# in case of asking with DNs in nested mode, the resulting objects have the
# cn set to None for all objects. We do not need it in that case.
cn = None
filt = '(&%s(memberOf:1.2.840.113556.1.4.1941:=%s))' % (ldap_filter('users'), dn)
groups[dn] = {
'members' : [],
'cn' : cn,
}
for user_dn, obj in ldap_search(ldap_replace_macros(config.ldap_userspec['dn']), filt, columns = ['dn']):
groups[dn]['members'].append(user_dn.lower())
g_ldap_group_cache[cache_key] = groups
return groups
#.
# .-Attributes-----------------------------------------------------------.
# | _ _ _ _ _ _ |
# | / \ | |_| |_ _ __(_) |__ _ _| |_ ___ ___ |
# | / _ \| __| __| '__| | '_ \| | | | __/ _ \/ __| |
# | / ___ \ |_| |_| | | | |_) | |_| | || __/\__ \ |
# | /_/ \_\__|\__|_| |_|_.__/ \__,_|\__\___||___/ |
# | |
# +----------------------------------------------------------------------+
# | Attribute plugin handling code goes here |
# '----------------------------------------------------------------------'
ldap_attribute_plugins = {}
# Returns a list of pairs (key, title) of all available attribute plugins
def ldap_list_attribute_plugins():
plugins = []
for key, plugin in ldap_attribute_plugins.items():
plugins.append((key, plugin['title']))
return plugins
# Returns a list of pairs (key, parameters) of all available attribute plugins
def ldap_attribute_plugins_elements():
elements = []
items = sorted(ldap_attribute_plugins.items(), key = lambda x: x[1]['title'])
for key, plugin in items:
if 'parameters' not in plugin:
param = []
elements.append((key, FixedValue(
title = plugin['title'],
help = plugin['help'],
value = {},
totext = 'no_param_txt' in plugin and plugin['no_param_txt'] \
or _('This synchronization plugin has no parameters.'),
)))
else:
elements.append((key, Dictionary(
title = plugin['title'],
help = plugin['help'],
elements = plugin['parameters'],
)))
return elements
# Returns a list of all needed LDAP attributes of all enabled plugins
def ldap_needed_attributes():
attrs = set([])
for key, params in config.ldap_active_plugins.items():
plugin = ldap_attribute_plugins[key]
if 'needed_attributes' in plugin:
attrs.update(plugin['needed_attributes'](params or {}))
return list(attrs)
def ldap_convert_simple(user_id, ldap_user, user, user_attr, attr):
if attr in ldap_user:
return {user_attr: ldap_user[attr][0]}
else:
return {}
def ldap_convert_mail(plugin, params, user_id, ldap_user, user):
mail = ''
mail_attr = params.get('attr', ldap_attr('mail')).lower()
if ldap_user.get(mail_attr):
mail = ldap_user[mail_attr][0].lower()
if mail:
return {'email': mail}
else:
return {}
ldap_attribute_plugins['email'] = {
'title': _('Email address'),
'help': _('Synchronizes the email of the LDAP user account into Check_MK.'),
# Attributes which must be fetched from ldap
'needed_attributes': lambda params: [ params.get('attr', ldap_attr('mail')).lower() ],
# Calculating the value of the attribute based on the configuration and the values
# gathered from ldap
'convert': ldap_convert_mail,
# User-Attributes to be written by this plugin and will be locked in WATO
'lock_attributes': [ 'email' ],
'parameters': [
("attr", TextAscii(
title = _("LDAP attribute to sync"),
help = _("The LDAP attribute containing the mail address of the user."),
default_value = lambda: ldap_attr('mail'),
)),
],
}
ldap_attribute_plugins['alias'] = {
'title': _('Alias'),
'help': _('Populates the alias attribute of the WATO user by syncrhonizing an attribute '
'from the LDAP user account. By default the LDAP attribute <tt>cn</tt> is used.'),
'needed_attributes': lambda params: [ params.get('attr', ldap_attr('cn')).lower() ],
'convert': lambda plugin, params, user_id, ldap_user, user: \
ldap_convert_simple(user_id, ldap_user, user, 'alias',
params.get('attr', ldap_attr('cn')).lower()),
'lock_attributes': [ 'alias' ],
'parameters': [
("attr", TextAscii(
title = _("LDAP attribute to sync"),
help = _("The LDAP attribute containing the alias of the user."),
default_value = lambda: ldap_attr('cn'),
)),
],
}
# Checks wether or not the user auth must be invalidated (increasing the serial).
# In first instance, it must parse the pw-changed field, then check wether or not
# a date has been stored in the user before and then maybe increase the serial.
def ldap_convert_auth_expire(plugin, params, user_id, ldap_user, user):
# Special handling for active directory: Is the user enabled / disabled?
if config.ldap_connection['type'] == 'ad' and ldap_user.get('useraccountcontrol'):
# see http://www.selfadsi.de/ads-attributes/user-userAccountControl.htm for details
if saveint(ldap_user['useraccountcontrol'][0]) & 2 and not user.get("locked", False):
return {
'locked': True,
'serial': user.get('serial', 0) + 1,
}
changed_attr = params.get('attr', ldap_attr('pw_changed')).lower()
if not changed_attr in ldap_user:
raise MKLDAPException(_('The "Authentication Expiration" attribute (%s) could not be fetched '
'from the LDAP server for user %s.') % (changed_attr, ldap_user))
# For keeping this thing simple, we don't parse the date here. We just store
# the last value of the field in the user data and invalidate the auth if the
# value has been changed.
if 'ldap_pw_last_changed' not in user:
return {'ldap_pw_last_changed': ldap_user[changed_attr][0]} # simply store
# Update data (and invalidate auth) if the attribute has changed
if user['ldap_pw_last_changed'] != ldap_user[changed_attr][0]:
return {
'ldap_pw_last_changed': ldap_user[changed_attr][0],
'serial': user.get('serial', 0) + 1,
}
return {}
def ldap_attrs_auth_expire(params):
attrs = [ params.get('attr', ldap_attr('pw_changed')).lower() ]
# Fetch user account flags to check locking
if config.ldap_connection['type'] == 'ad':
attrs.append('useraccountcontrol')
return attrs
ldap_attribute_plugins['auth_expire'] = {
'title': _('Authentication Expiration'),
'help': _('This plugin fetches all information which are needed to check wether or '
'not an already authenticated user should be deauthenticated, e.g. because '
'the password has changed in LDAP or the account has been locked.'),
'needed_attributes': ldap_attrs_auth_expire,
'convert': ldap_convert_auth_expire,
# When a plugin introduces new user attributes, it should declare the output target for
# this attribute. It can either be written to the multisites users.mk or the check_mk
# contacts.mk to be forwarded to nagios. Undeclared attributes are stored in the check_mk
# contacts.mk file.
'multisite_attributes': ['ldap_pw_last_changed'],
'non_contact_attributes': ['ldap_pw_last_changed'],
'parameters': [
("attr", TextAscii(
title = _("LDAP attribute to be used as indicator"),
help = _("When the value of this attribute changes for a user account, all "
"current authenticated sessions of the user are invalidated and the "
"user must login again. By default this field uses the fields which "
"hold the time of the last password change of the user."),
default_value = lambda: ldap_attr('pw_changed'),
)),
],
}
ldap_attribute_plugins['pager'] = {
'title': _('Pager'),
'help': _('This plugin synchronizes a field of the users LDAP account to the pager attribute '
'of the WATO user accounts, which is then forwarded to the monitoring core and can be used'
'for notifications. By default the LDAP attribute <tt>mobile</tt> is used.'),
'needed_attributes': lambda params: [ params.get('attr', ldap_attr('mobile')).lower() ],
'convert': lambda plugin, params, user_id, ldap_user, user: \
ldap_convert_simple(user_id, ldap_user, user, 'pager',
params.get('attr', ldap_attr('mobile')).lower()),
'lock_attributes': ['pager'],
'parameters': [
('attr', TextAscii(
title = _("LDAP attribute to sync"),
help = _("The LDAP attribute containing the pager number of the user."),
default_value = lambda: ldap_attr('mobile'),
)),
],
}
# Register sync plugins for all custom user attributes (assuming simple data types)
def register_user_attribute_sync_plugins():
# Remove old user attribute plugins
for attr_name in ldap_attribute_plugins.keys():
if attr_name not in ldap_builtin_attribute_plugin_names:
del ldap_attribute_plugins[attr_name]
for attr, val in get_user_attributes():
ldap_attribute_plugins[attr] = {
'title': val['valuespec'].title(),
'help': val['valuespec'].help(),
'needed_attributes': lambda params: [ params.get('attr', ldap_attr(attr)).lower() ],
'convert': lambda plugin, params, user_id, ldap_user, user: \
ldap_convert_simple(user_id, ldap_user, user, plugin,
params.get('attr', ldap_attr(plugin)).lower()),
'lock_attributes': [ attr ],
'parameters': [
('attr', TextAscii(
title = _("LDAP attribute to sync"),
help = _("The LDAP attribute whose contents shall be synced into this custom attribute."),
default_value = lambda: ldap_attr(attr),
)),
],
}
def ldap_convert_groups_to_contactgroups(plugin, params, user_id, ldap_user, user):
# 1. Fetch all existing group names in WATO
cg_names = load_group_information().get("contact", {}).keys()
# 2. Load all LDAP groups which have a CN matching one contact
# group which exists in WATO
ldap_groups = ldap_group_members(cg_names, nested = params.get('nested', False))
# 3. Only add groups which the user is member of
return {'contactgroups': [ g['cn'] for dn, g in ldap_groups.items() if ldap_user['dn'] in g['members']]}
ldap_attribute_plugins['groups_to_contactgroups'] = {
'title': _('Contactgroup Membership'),
'help': _('Adds the user to contactgroups based on the group memberships in LDAP. This '
'plugin adds the user only to existing contactgroups while the name of the '
'contactgroup must match the common name (cn) of the LDAP group.'),
'convert': ldap_convert_groups_to_contactgroups,
'lock_attributes': ['contactgroups'],
'parameters': [
('nested', FixedValue(
title = _('Handle nested group memberships (Active Directory only at the moment)'),
help = _('Once you enable this option, this plugin will not only handle direct '
'group memberships, instead it will also dig into nested groups and treat '
'the members of those groups as contact group members as well. Please mind '
'that this feature might increase the execution time of your LDAP sync.'),
value = True,
totext = _('Nested group memberships are resolved'),
)
)
],
}
def ldap_convert_groups_to_roles(plugin, params, user_id, ldap_user, user):
# Load the needed LDAP groups, which match the DNs mentioned in the role sync plugin config
ldap_groups = dict(ldap_group_members([ dn.lower() for role_id, dn in params.items() if isinstance(dn, str) ],
filt_attr = 'distinguishedname', nested = params.get('nested', False)))
roles = set([])
# Loop all roles mentioned in params (configured to be synchronized)
for role_id, dn in params.items():
if not isinstance(dn, str):
continue # skip non configured ones
dn = dn.lower() # lower case matching for DNs!
# if group could be found and user is a member, add the role
if dn in ldap_groups and ldap_user['dn'] in ldap_groups[dn]['members']:
roles.add(role_id)
# Load default roles from default user profile when the user got no role
# by the role sync plugin
if not roles:
roles = config.default_user_profile['roles'][:]
return {'roles': list(roles)}
def ldap_list_roles_with_group_dn():
elements = []
for role_id, role in load_roles().items():
elements.append((role_id, LDAPDistinguishedName(
title = role['alias'] + ' - ' + _("Specify the Group DN"),
help = _("Distinguished Name of the LDAP group to add users this role. "
"e. g. <tt>CN=cmk-users,OU=groups,DC=example,DC=com</tt><br> "
"This group must be defined within the scope of the "
"<a href=\"wato.py?mode=ldap_config&varname=ldap_groupspec\">LDAP Group Settings</a>."),
size = 80,
enforce_suffix = ldap_replace_macros(config.ldap_groupspec.get('dn', '')),
)))
elements.append(
('nested', FixedValue(
title = _('Handle nested group memberships (Active Directory only at the moment)'),
help = _('Once you enable this option, this plugin will not only handle direct '
'group memberships, instead it will also dig into nested groups and treat '
'the members of those groups as contact group members as well. Please mind '
'that this feature might increase the execution time of your LDAP sync.'),
value = True,
totext = _('Nested group memberships are resolved'),
)
)
)
return elements
ldap_attribute_plugins['groups_to_roles'] = {
'title': _('Roles'),
'help': _('Configures the roles of the user depending on its group memberships '
'in LDAP.<br><br>'
'Please note: Additionally the user is assigned to the '
'<a href="wato.py?mode=edit_configvar&varname=default_user_profile&site=&folder=">Default Roles</a>. '
'Deactivate them if unwanted.'),
'convert': ldap_convert_groups_to_roles,
'lock_attributes': ['roles'],
'parameters': ldap_list_roles_with_group_dn,
}
#.
# .-Hooks----------------------------------------------------------------.
# | _ _ _ |
# | | | | | ___ ___ | | _____ |
# | | |_| |/ _ \ / _ \| |/ / __| |
# | | _ | (_) | (_) | <\__ \ |
# | |_| |_|\___/ \___/|_|\_\___/ |
# | |
# +----------------------------------------------------------------------+
# | Hook functions used in this connector |
# '----------------------------------------------------------------------'
# This function only validates credentials, no locked checking or similar
def ldap_login(username, password):
ldap_connect()
# Returns None when the user is not found or not uniq, else returns the
# distinguished name and the username as tuple which are both needed for
# the further login process.
result = ldap_get_user(username, True)
if not result:
return None # The user does not exist. Skip this connector.
user_dn, username = result
# Try to bind with the user provided credentials. This unbinds the default
# authentication which should be rebound again after trying this.
try:
ldap_bind(user_dn, password)
result = username.encode('utf-8')
except:
result = False
ldap_default_bind(ldap_connection)
return result
def ldap_sync(add_to_changelog, only_username):
# Store time of the last sync. Don't store after sync since parallel
# requests to e.g. the page hook would cause duplicate calculations
file(g_ldap_sync_time_file, 'w').write('%s\n' % time.time())
if not config.ldap_connection or not ldap_user_base_dn_configured():
return # silently skip sync without configuration
# Flush ldap related before each sync to have a caching only for the
# current sync process
global g_ldap_user_cache, g_ldap_group_cache
g_ldap_user_cache = {}
g_ldap_group_cache = {}
start_time = time.time()
ldap_log(' SYNC PLUGINS: %s' % ', '.join(config.ldap_active_plugins.keys()))
# Unused at the moment, always sync all users
#filt = None
#if only_username:
# filt = '(%s=%s)' % (ldap_user_id_attr(), only_username)
ldap_users = ldap_get_users()
import wato
users = load_users(lock = True)
# Remove users which are controlled by this connector but can not be found in
# LDAP anymore
for user_id, user in users.items():
if user.get('connector') == 'ldap' and user_id not in ldap_users:
del users[user_id] # remove the user
wato.log_pending(wato.SYNCRESTART, None, "edit-users",
_("LDAP Connector: Removed user %s" % user_id), user_id = '')
for user_id, ldap_user in ldap_users.items():
if user_id in users:
user = copy.deepcopy(users[user_id])
mode_create = False
else:
user = new_user_template('ldap')
mode_create = True
# Skip all users not controlled by this connector
if user.get('connector') != 'ldap':
continue
# Gather config from convert functions of plugins
for key, params in config.ldap_active_plugins.items():
user.update(ldap_attribute_plugins[key]['convert'](key, params or {}, user_id, ldap_user, user))
if not mode_create and user == users[user_id]:
continue # no modification. Skip this user.
# Gather changed attributes for easier debugging
if not mode_create:
set_new, set_old = set(user.keys()), set(users[user_id].keys())
intersect = set_new.intersection(set_old)
added = set_new - intersect
removed = set_old - intersect
changed = set(o for o in intersect if users[user_id][o] != user[o])
users[user_id] = user # Update the user record
if mode_create:
wato.log_pending(wato.SYNCRESTART, None, "edit-users",
_("LDAP Connector: Created user %s" % user_id), user_id = '')
else:
details = []
if added:
details.append(_('Added: %s') % ', '.join(added))
if removed:
details.append(_('Removed: %s') % ', '.join(removed))
# Ignore password changes from ldap - do not log them. For now.
if 'ldap_pw_last_changed' in changed:
changed.remove('ldap_pw_last_changed')
if 'serial' in changed:
changed.remove('serial')
if changed:
details.append(('Changed: %s') % ', '.join(changed))
if details:
wato.log_pending(wato.SYNCRESTART, None, "edit-users",
_("LDAP Connector: Modified user %s (%s)") % (user_id, ', '.join(details)),
user_id = '')
duration = time.time() - start_time
ldap_log('SYNC FINISHED - Duration: %0.3f sec' % duration)
# delete the fail flag file after successful sync
try:
os.unlink(g_ldap_sync_fail_file)
except OSError:
pass
save_users(users)
# Calculates the attributes of the users which are locked for users managed
# by this connector
def ldap_locked_attributes():
locked = set([ 'password' ]) # This attributes are locked in all cases!
for key in config.ldap_active_plugins.keys():
locked.update(ldap_attribute_plugins.get(key, {}).get('lock_attributes', []))
return list(locked)
# Calculates the attributes added in this connector which shal be written to
# the multisites users.mk
def ldap_multisite_attributes():
attrs = set([])
for key in config.ldap_active_plugins.keys():
attrs.update(ldap_attribute_plugins.get(key, {}).get('multisite_attributes', []))
return list(attrs)
# Calculates the attributes added in this connector which shal NOT be written to
# the check_mks contacts.mk
def ldap_non_contact_attributes():
attrs = set([])
for key in config.ldap_active_plugins.keys():
attrs.update(ldap_attribute_plugins.get(key, {}).get('non_contact_attributes', []))
return list(attrs)
ldap_builtin_attribute_plugin_names = []
# Is called on every multisite http request
def ldap_page():
try:
last_sync_time = float(file(g_ldap_sync_time_file).read().strip())
except:
last_sync_time = 0
# Save the builtin attribute names (to be able to delete removed user attributes)
global ldap_builtin_attribute_plugin_names
if not ldap_builtin_attribute_plugin_names:
ldap_builtin_attribute_plugin_names = ldap_attribute_plugins.keys()
register_user_attribute_sync_plugins()
# in case of sync problems, synchronize all 20 seconds, instead of the configured
# regular cache livetime
if os.path.exists(g_ldap_sync_fail_file):
cache_livetime = 20
else:
cache_livetime = config.ldap_cache_livetime
if last_sync_time + cache_livetime > time.time():
return # No action needed, cache is recent enough
# ok, cache is too old. Act!
try:
ldap_sync(False, None)
except:
# Do not let the exception through to the user. Instead write last
# error in a state file which is then visualized for the admin and
# will be deleted upon next successful sync.
file(g_ldap_sync_fail_file, 'w').write('%s\n%s' % (time.strftime('%Y-%m-%d %H:%M:%S'),
traceback.format_exc()))
multisite_user_connectors.append({
'id': 'ldap',
'title': _('LDAP (Active Directory, OpenLDAP)'),
'short_title': _('LDAP'),
'login': ldap_login,
'sync': ldap_sync,
'page': ldap_page,
'locked': user_locked, # no ldap check, just check the WATO attribute.
# This handles setups where the locked attribute is not
# synchronized and the user is enabled in LDAP and disabled
# in Check_MK. When the user is locked in LDAP a login is
# not possible.
'locked_attributes': ldap_locked_attributes,
'multisite_attributes': ldap_multisite_attributes,
'non_contact_attributes': ldap_multisite_attributes,
})
|
iceman1989/Check_mk
|
web/plugins/userdb/ldap.py
|
Python
|
gpl-2.0
| 48,692 | 0.010065 |
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
try:
import botocore.waiter as core_waiter
except ImportError:
pass # caught by HAS_BOTO3
ec2_data = {
"version": 2,
"waiters": {
"RouteTableExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeRouteTables",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(RouteTables[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidRouteTableID.NotFound",
"state": "retry"
},
]
},
"SecurityGroupExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSecurityGroups",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(SecurityGroups[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidGroup.NotFound",
"state": "retry"
},
]
},
"SubnetExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "retry"
},
]
},
"SubnetHasMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetNoMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetHasAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetNoAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "retry"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "success"
},
]
},
"VpnGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(VpnGateways[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidVpnGatewayID.NotFound",
"state": "retry"
},
]
},
}
}
waf_data = {
"version": 2,
"waiters": {
"ChangeTokenInSync": {
"delay": 20,
"maxAttempts": 60,
"operation": "GetChangeTokenStatus",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "ChangeTokenStatus == 'INSYNC'",
"state": "success"
},
{
"matcher": "error",
"expected": "WAFInternalErrorException",
"state": "retry"
}
]
}
}
}
eks_data = {
"version": 2,
"waiters": {
"ClusterActive": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "success",
"matcher": "path",
"argument": "cluster.status",
"expected": "ACTIVE"
},
{
"state": "retry",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
}
}
}
def ec2_model(name):
ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
return ec2_models.get_waiter(name)
def waf_model(name):
waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
return waf_models.get_waiter(name)
def eks_model(name):
eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
return eks_models.get_waiter(name)
waiters_by_name = {
('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
'route_table_exists',
ec2_model('RouteTableExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_route_tables
)),
('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
'security_group_exists',
ec2_model('SecurityGroupExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_security_groups
)),
('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
'subnet_exists',
ec2_model('SubnetExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_has_map_public',
ec2_model('SubnetHasMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_no_map_public',
ec2_model('SubnetNoMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_has_assign_ipv6',
ec2_model('SubnetHasAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_no_assign_ipv6',
ec2_model('SubnetNoAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
'subnet_deleted',
ec2_model('SubnetDeleted'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
'vpn_gateway_exists',
ec2_model('VpnGatewayExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_vpn_gateways
)),
('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
'change_token_in_sync',
waf_model('ChangeTokenInSync'),
core_waiter.NormalizedOperationMethod(
waf.get_change_token_status
)),
('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
'cluster_active',
eks_model('ClusterActive'),
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
}
def get_waiter(client, waiter_name):
try:
return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
except KeyError:
raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
|
tareqalayan/ansible
|
lib/ansible/module_utils/aws/waiters.py
|
Python
|
gpl-3.0
| 9,345 | 0.000428 |
""" NormalizeTuples removes implicit variable -> tuple conversion. """
from pythran.analyses import Identifiers
from pythran.passmanager import Transformation
import ast
class _ConvertToTuple(ast.NodeTransformer):
def __init__(self, tuple_id, renamings):
self.tuple_id = tuple_id
self.renamings = renamings
def visit_Name(self, node):
if node.id in self.renamings:
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Index(ast.Num(y)),
ast.Load()),
self.renamings[node.id],
ast.Name(self.tuple_id, ast.Load())
)
nnode.ctx = node.ctx
return nnode
return node
class NormalizeTuples(Transformation):
"""
Remove implicit tuple -> variable conversion.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(): a=(1,2.) ; i,j = a")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(NormalizeTuples, node)
>>> print pm.dump(backend.Python, node)
def foo():
a = (1, 2.0)
i = a[0]
j = a[1]
"""
tuple_name = "__tuple"
def __init__(self):
Transformation.__init__(self)
def get_new_id(self):
i = 0
while 1:
new_id = "{}{}".format(NormalizeTuples.tuple_name, i)
if new_id not in self.ids:
self.ids.add(new_id)
return new_id
else:
i += 1
def traverse_tuples(self, node, state, renamings):
if isinstance(node, ast.Name):
if state:
renamings[node.id] = state
self.update = True
elif isinstance(node, ast.Tuple) or isinstance(node, ast.List):
[self.traverse_tuples(n, state + (i,), renamings)
for i, n in enumerate(node.elts)]
elif isinstance(node, (ast.Subscript, ast.Attribute)):
if state:
renamings[node] = state
self.update = True
else:
raise NotImplementedError
def visit_comprehension(self, node):
renamings = dict()
self.traverse_tuples(node.target, (), renamings)
if renamings:
self.update = True
return self.get_new_id(), renamings
else:
return node
def visit_AnyComp(self, node, *fields):
for field in fields:
setattr(node, field, self.visit(getattr(node, field)))
generators = map(self.visit, node.generators)
nnode = node
for i, g in enumerate(generators):
if isinstance(g, tuple):
gtarget = "{0}{1}".format(g[0], i)
nnode.generators[i].target = ast.Name(
gtarget,
nnode.generators[i].target.ctx)
nnode = _ConvertToTuple(gtarget, g[1]).visit(nnode)
self.update = True
for field in fields:
setattr(node, field, getattr(nnode, field))
node.generators = nnode.generators
return node
def visit_ListComp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_SetComp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_DictComp(self, node):
return self.visit_AnyComp(node, 'key', 'value')
def visit_GeneratorExp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_Lambda(self, node):
self.generic_visit(node)
for i, arg in enumerate(node.args.args):
renamings = dict()
self.traverse_tuples(arg, (), renamings)
if renamings:
nname = self.get_new_id()
node.args.args[i] = ast.Name(nname, ast.Param())
node.body = _ConvertToTuple(nname, renamings).visit(node.body)
return node
def visit_Assign(self, node):
self.generic_visit(node)
# if the rhs is an identifier, we don't need to duplicate it
# otherwise, better duplicate it...
no_tmp = isinstance(node.value, ast.Name)
extra_assign = [] if no_tmp else [node]
for i, t in enumerate(node.targets):
if isinstance(t, ast.Tuple) or isinstance(t, ast.List):
renamings = dict()
self.traverse_tuples(t, (), renamings)
if renamings:
gtarget = node.value.id if no_tmp else self.get_new_id()
node.targets[i] = ast.Name(gtarget, node.targets[i].ctx)
for rename, state in sorted(renamings.iteritems()):
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Index(ast.Num(y)),
ast.Load()),
state,
ast.Name(gtarget, ast.Load()))
if isinstance(rename, str):
extra_assign.append(
ast.Assign(
[ast.Name(rename, ast.Store())],
nnode))
else:
extra_assign.append(ast.Assign([rename], nnode))
return extra_assign or node
def visit_For(self, node):
target = node.target
if isinstance(target, ast.Tuple) or isinstance(target, ast.List):
renamings = dict()
self.traverse_tuples(target, (), renamings)
if renamings:
gtarget = self.get_new_id()
node.target = ast.Name(gtarget, node.target.ctx)
for rename, state in sorted(renamings.iteritems()):
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Index(ast.Num(y)),
ast.Load()),
state,
ast.Name(gtarget, ast.Load()))
if isinstance(rename, str):
node.body.insert(0,
ast.Assign(
[ast.Name(rename, ast.Store())],
nnode)
)
else:
node.body.insert(0, ast.Assign([rename], nnode))
self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
self.ids = self.passmanager.gather(Identifiers, node, self.ctx)
return self.generic_visit(node)
|
pbrunet/pythran
|
pythran/transformations/normalize_tuples.py
|
Python
|
bsd-3-clause
| 6,743 | 0 |
from .responses import SecretsManagerResponse
url_bases = [r"https?://secretsmanager\.(.+)\.amazonaws\.com"]
url_paths = {"{0}/$": SecretsManagerResponse.dispatch}
|
spulec/moto
|
moto/secretsmanager/urls.py
|
Python
|
apache-2.0
| 166 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-08-02 20:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mindynode_nltk', '0009_auto_20170802_2046'),
]
operations = [
migrations.AddField(
model_name='keywordsum',
name='keyword_category',
field=models.CharField(default='china', max_length=255, null=True, verbose_name='类别'),
),
]
|
josherich/mindynode-parsers
|
mindynode_nltk/migrations/0010_keywordsum_keyword_category.py
|
Python
|
mit
| 519 | 0.001942 |
# -*- coding: utf-8 -*-
from django import forms
from django.apps import apps
from django.contrib.auth import get_user_model, get_permission_codename
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.template.defaultfilters import slugify
from django.utils.encoding import force_text
from django.utils.translation import ugettext, ugettext_lazy as _
from cms import api
from cms.apphook_pool import apphook_pool
from cms.exceptions import PluginLimitReached
from cms.extensions import extension_pool
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_DIRTY, ROOT_USER_LEVEL
from cms.forms.validators import validate_relative_url, validate_url_uniqueness
from cms.forms.widgets import UserSelectAdminWidget, AppHookSelect, ApplicationConfigSelect
from cms.models import (CMSPlugin, Page, PageType, PagePermission, PageUser, PageUserGroup, Title,
Placeholder, GlobalPagePermission, TreeNode)
from cms.models.permissionmodels import User
from cms.plugin_pool import plugin_pool
from cms.signals.apphook import set_restart_trigger
from cms.utils.conf import get_cms_setting
from cms.utils.compat.forms import UserChangeForm
from cms.utils.i18n import get_language_list, get_language_object
from cms.utils.permissions import (
get_current_user,
get_subordinate_users,
get_subordinate_groups,
get_user_permission_level,
)
from menus.menu_pool import menu_pool
def get_permission_accessor(obj):
User = get_user_model()
if isinstance(obj, (PageUser, User,)):
rel_name = 'user_permissions'
else:
rel_name = 'permissions'
return getattr(obj, rel_name)
def get_page_changed_by_filter_choices():
# This is not site-aware
# Been like this forever
# Would be nice for it to filter out by site
values = (
Page
.objects
.filter(publisher_is_draft=True)
.distinct()
.order_by('changed_by')
.values_list('changed_by', flat=True)
)
yield ('', _('All'))
for value in values:
yield (value, value)
def get_page_template_filter_choices():
yield ('', _('All'))
for value, name in get_cms_setting('TEMPLATES'):
yield (value, name)
def save_permissions(data, obj):
models = (
(Page, 'page'),
(PageUser, 'pageuser'),
(PageUserGroup, 'pageuser'),
(PagePermission, 'pagepermission'),
)
if not obj.pk:
# save obj, otherwise we can't assign permissions to him
obj.save()
permission_accessor = get_permission_accessor(obj)
for model, name in models:
content_type = ContentType.objects.get_for_model(model)
for key in ('add', 'change', 'delete'):
# add permission `key` for model `model`
codename = get_permission_codename(key, model._meta)
permission = Permission.objects.get(content_type=content_type, codename=codename)
field = 'can_%s_%s' % (key, name)
if data.get(field):
permission_accessor.add(permission)
elif field in data:
permission_accessor.remove(permission)
class CopyPermissionForm(forms.Form):
"""
Holds the specific field for permissions
"""
copy_permissions = forms.BooleanField(
label=_('Copy permissions'),
required=False,
initial=True,
)
class BasePageForm(forms.ModelForm):
_user = None
_site = None
_language = None
title = forms.CharField(label=_("Title"), max_length=255, widget=forms.TextInput(),
help_text=_('The default title'))
slug = forms.CharField(label=_("Slug"), max_length=255, widget=forms.TextInput(),
help_text=_('The part of the title that is used in the URL'))
menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(),
help_text=_('Overwrite what is displayed in the menu'), required=False)
page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(),
help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'),
required=False)
meta_description = forms.CharField(label=_('Description meta tag'), required=False,
widget=forms.Textarea(attrs={'maxlength': '155', 'rows': '4'}),
help_text=_('A description of the page used by search engines.'),
max_length=155)
class Meta:
model = Page
fields = []
def clean_slug(self):
slug = slugify(self.cleaned_data['slug'])
if not slug:
raise ValidationError(_("Slug must not be empty."))
return slug
class AddPageForm(BasePageForm):
source = forms.ModelChoiceField(
label=_(u'Page type'),
queryset=Page.objects.filter(
is_page_type=True,
publisher_is_draft=True,
),
required=False,
)
parent_node = forms.ModelChoiceField(
queryset=TreeNode.objects.all(),
required=False,
widget=forms.HiddenInput(),
)
class Meta:
model = Page
fields = ['source']
def __init__(self, *args, **kwargs):
super(AddPageForm, self).__init__(*args, **kwargs)
source_field = self.fields.get('source')
if not source_field or source_field.widget.is_hidden:
return
root_page = PageType.get_root_page(site=self._site)
if root_page:
# Set the choicefield's choices to the various page_types
descendants = root_page.get_descendant_pages().filter(is_page_type=True)
titles = Title.objects.filter(page__in=descendants, language=self._language)
choices = [('', '---------')]
choices.extend((title.page_id, title.title) for title in titles)
source_field.choices = choices
else:
choices = []
if len(choices) < 2:
source_field.widget = forms.HiddenInput()
def clean(self):
data = self.cleaned_data
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
parent_node = data.get('parent_node')
if parent_node:
slug = data['slug']
parent_path = parent_node.item.get_path(self._language)
path = u'%s/%s' % (parent_path, slug) if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def clean_parent_node(self):
parent_node = self.cleaned_data.get('parent_node')
if parent_node and parent_node.site_id != self._site.pk:
raise ValidationError("Site doesn't match the parent's page site")
return parent_node
def create_translation(self, page):
data = self.cleaned_data
title_kwargs = {
'page': page,
'language': self._language,
'slug': data['slug'],
'path': data['path'],
'title': data['title'],
}
if 'menu_title' in data:
title_kwargs['menu_title'] = data['menu_title']
if 'page_title' in data:
title_kwargs['page_title'] = data['page_title']
if 'meta_description' in data:
title_kwargs['meta_description'] = data['meta_description']
return api.create_title(**title_kwargs)
def from_source(self, source, parent=None):
new_page = source.copy(
site=self._site,
parent_node=parent,
language=self._language,
translations=False,
permissions=False,
extensions=False,
)
new_page.update(is_page_type=False, in_navigation=True)
return new_page
def get_template(self):
return Page.TEMPLATE_DEFAULT
def save(self, *args, **kwargs):
source = self.cleaned_data.get('source')
parent = self.cleaned_data.get('parent_node')
if source:
new_page = self.from_source(source, parent=parent)
for lang in source.get_languages():
source._copy_contents(new_page, lang)
else:
new_page = super(AddPageForm, self).save(commit=False)
new_page.template = self.get_template()
new_page.set_tree_node(self._site, target=parent, position='last-child')
new_page.save()
translation = self.create_translation(new_page)
if source:
extension_pool.copy_extensions(
source_page=source,
target_page=new_page,
languages=[translation.language],
)
is_first = not (
TreeNode
.objects
.get_for_site(self._site)
.exclude(pk=new_page.node_id)
.exists()
)
new_page.rescan_placeholders()
if is_first and not new_page.is_page_type:
# its the first page. publish it right away
new_page.publish(translation.language)
new_page.set_as_homepage(self._user)
return new_page
class AddPageTypeForm(AddPageForm):
menu_title = None
meta_description = None
page_title = None
source = forms.ModelChoiceField(
queryset=Page.objects.drafts(),
required=False,
widget=forms.HiddenInput(),
)
def get_or_create_root(self):
"""
Creates the root node used to store all page types
for the current site if it doesn't exist.
"""
root_page = PageType.get_root_page(site=self._site)
if not root_page:
root_page = Page(
publisher_is_draft=True,
in_navigation=False,
is_page_type=True,
)
root_page.set_tree_node(self._site)
root_page.save()
if not root_page.has_translation(self._language):
api.create_title(
language=self._language,
title=ugettext('Page Types'),
page=root_page,
slug=PAGE_TYPES_ID,
path=PAGE_TYPES_ID,
)
return root_page.node
def clean_parent_node(self):
parent_node = super(AddPageTypeForm, self).clean_parent_node()
if parent_node and not parent_node.item.is_page_type:
raise ValidationError("Parent has to be a page type.")
if not parent_node:
# parent was not explicitly selected.
# fallback to the page types root
parent_node = self.get_or_create_root()
return parent_node
def from_source(self, source, parent=None):
new_page = source.copy(
site=self._site,
parent_node=parent,
language=self._language,
translations=False,
permissions=False,
extensions=False,
)
new_page.update(is_page_type=True, in_navigation=False)
return new_page
def save(self, *args, **kwargs):
new_page = super(AddPageTypeForm, self).save(*args, **kwargs)
if not self.cleaned_data.get('source'):
# User has created a page-type via "Add page"
# instead of from another page.
new_page.update(
draft_only=True,
is_page_type=True,
in_navigation=False,
)
return new_page
class DuplicatePageForm(AddPageForm):
source = forms.ModelChoiceField(
queryset=Page.objects.drafts(),
required=True,
widget=forms.HiddenInput(),
)
class ChangePageForm(BasePageForm):
translation_fields = (
'slug',
'title',
'meta_description',
'menu_title',
'page_title',
)
def __init__(self, *args, **kwargs):
super(ChangePageForm, self).__init__(*args, **kwargs)
title_obj = self.instance.get_title_obj(
language=self._language,
fallback=False,
force_reload=True,
)
for field in self.translation_fields:
if field in self.fields:
self.fields[field].initial = getattr(title_obj, field)
def clean(self):
data = super(ChangePageForm, self).clean()
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
page = self.instance
if page.is_home:
data['path'] = ''
return data
if 'slug' not in self.fields:
# the {% edit_title_fields %} template tag
# allows users to edit specific fields for a translation.
# as a result, slug might not always be there.
return data
if page.parent_page:
slug = data['slug']
parent_path = page.parent_page.get_path(self._language)
path = u'%s/%s' % (parent_path, slug) if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=page,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def save(self, commit=True):
data = self.cleaned_data
cms_page = super(ChangePageForm, self).save(commit=False)
translation_data = {field: data[field]
for field in self.translation_fields if field in data}
if 'path' in data:
# this field is managed manually
translation_data['path'] = data['path']
update_count = cms_page.update_translations(
self._language,
publisher_state=PUBLISHER_STATE_DIRTY,
**translation_data
)
if self._language in cms_page.title_cache:
del cms_page.title_cache[self._language]
if update_count == 0:
api.create_title(language=self._language, page=cms_page, **translation_data)
else:
cms_page._update_title_path_recursive(self._language)
return cms_page
class PublicationDatesForm(forms.ModelForm):
class Meta:
model = Page
fields = ['publication_date', 'publication_end_date']
class AdvancedSettingsForm(forms.ModelForm):
from cms.forms.fields import PageSmartLinkField
_user = None
_site = None
_language = None
application_urls = forms.ChoiceField(label=_('Application'),
choices=(), required=False,
help_text=_('Hook application to this page.'))
overwrite_url = forms.CharField(label=_('Overwrite URL'), max_length=255, required=False,
help_text=_('Keep this field empty if standard path should be used.'))
xframe_options = forms.ChoiceField(
choices=Page._meta.get_field('xframe_options').choices,
label=_('X Frame Options'),
help_text=_('Whether this page can be embedded in other pages or websites'),
initial=Page._meta.get_field('xframe_options').default,
required=False
)
redirect = PageSmartLinkField(label=_('Redirect'), required=False,
help_text=_('Redirects to this URL.'),
placeholder_text=_('Start typing...'),
ajax_view='admin:cms_page_get_published_pagelist'
)
# This is really a 'fake' field which does not correspond to any Page attribute
# But creates a stub field to be populate by js
application_configs = forms.CharField(
label=_('Application configurations'),
required=False,
widget=ApplicationConfigSelect,
)
fieldsets = (
(None, {
'fields': ('overwrite_url', 'redirect'),
}),
(_('Language independent options'), {
'fields': ('template', 'reverse_id', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', 'application_configs',
'xframe_options',)
})
)
class Meta:
model = Page
fields = [
'template', 'reverse_id', 'overwrite_url', 'redirect', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', "xframe_options",
]
def __init__(self, *args, **kwargs):
super(AdvancedSettingsForm, self).__init__(*args, **kwargs)
self.title_obj = self.instance.get_title_obj(
language=self._language,
fallback=False,
force_reload=True,
)
if 'navigation_extenders' in self.fields:
navigation_extenders = self.get_navigation_extenders()
self.fields['navigation_extenders'].widget = forms.Select(
{}, [('', "---------")] + navigation_extenders)
if 'application_urls' in self.fields:
# Prepare a dict mapping the apps by class name ('PollApp') to
# their app_name attribute ('polls'), if any.
app_namespaces = {}
app_configs = {}
for hook in apphook_pool.get_apphooks():
app = apphook_pool.get_apphook(hook[0])
if app.app_name:
app_namespaces[hook[0]] = app.app_name
if app.app_config:
app_configs[hook[0]] = app
self.fields['application_urls'].widget = AppHookSelect(
attrs={'id': 'application_urls'},
app_namespaces=app_namespaces
)
self.fields['application_urls'].choices = [('', "---------")] + apphook_pool.get_apphooks()
page_data = self.data if self.data else self.initial
if app_configs:
self.fields['application_configs'].widget = ApplicationConfigSelect(
attrs={'id': 'application_configs'},
app_configs=app_configs,
)
if page_data.get('application_urls', False) and page_data['application_urls'] in app_configs:
configs = app_configs[page_data['application_urls']].get_configs()
self.fields['application_configs'].widget.choices = [(config.pk, force_text(config)) for config in configs]
try:
config = configs.get(namespace=self.initial['application_namespace'])
self.fields['application_configs'].initial = config.pk
except ObjectDoesNotExist:
# Provided apphook configuration doesn't exist (anymore),
# just skip it
# The user will choose another value anyway
pass
if 'redirect' in self.fields:
self.fields['redirect'].widget.language = self._language
self.fields['redirect'].initial = self.title_obj.redirect
if 'overwrite_url' in self.fields and self.title_obj.has_url_overwrite:
self.fields['overwrite_url'].initial = self.title_obj.path
def get_apphooks(self):
for hook in apphook_pool.get_apphooks():
yield (hook[0], apphook_pool.get_apphook(hook[0]))
def get_apphooks_with_config(self):
return {key: app for key, app in self.get_apphooks() if app.app_config}
def get_navigation_extenders(self):
return menu_pool.get_menus_by_attribute("cms_enabled", True)
def _check_unique_namespace_instance(self, namespace):
return Page.objects.drafts().on_site(self._site).filter(
application_namespace=namespace
).exclude(pk=self.instance.pk).exists()
def clean(self):
cleaned_data = super(AdvancedSettingsForm, self).clean()
if self._errors:
# Fail fast if there's errors in the form
return cleaned_data
# Language has been validated already
# so we know it exists.
language_name = get_language_object(
self._language,
site_id=self._site.pk,
)['name']
if not self.title_obj.slug:
# This covers all cases where users try to edit
# page advanced settings without setting a title slug
# for page titles that already exist.
message = _("Please set the %(language)s slug "
"before editing its advanced settings.")
raise ValidationError(message % {'language': language_name})
if 'reverse_id' in self.fields:
reverse_id = cleaned_data['reverse_id']
if reverse_id:
lookup = Page.objects.drafts().on_site(self._site).filter(reverse_id=reverse_id)
if lookup.exclude(pk=self.instance.pk).exists():
self._errors['reverse_id'] = self.error_class(
[_('A page with this reverse URL id exists already.')])
apphook = cleaned_data.get('application_urls', None)
# The field 'application_namespace' is a misnomer. It should be
# 'instance_namespace'.
instance_namespace = cleaned_data.get('application_namespace', None)
application_config = cleaned_data.get('application_configs', None)
if apphook:
apphooks_with_config = self.get_apphooks_with_config()
# application_config wins over application_namespace
if apphook in apphooks_with_config and application_config:
# the value of the application config namespace is saved in
# the 'usual' namespace field to be backward compatible
# with existing apphooks
try:
appconfig_pk = forms.IntegerField(required=True).to_python(application_config)
except ValidationError:
self._errors['application_configs'] = ErrorList([
_('Invalid application config value')
])
return self.cleaned_data
try:
config = apphooks_with_config[apphook].get_configs().get(pk=appconfig_pk)
except ObjectDoesNotExist:
self._errors['application_configs'] = ErrorList([
_('Invalid application config value')
])
return self.cleaned_data
if self._check_unique_namespace_instance(config.namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_configs'] = ErrorList([
_('An application instance using this configuration already exists.')
])
else:
self.cleaned_data['application_namespace'] = config.namespace
else:
if instance_namespace:
if self._check_unique_namespace_instance(instance_namespace):
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# The attribute on the apps 'app_name' is a misnomer, it should be
# 'application_namespace'.
application_namespace = apphook_pool.get_apphook(apphook).app_name
if application_namespace and not instance_namespace:
if self._check_unique_namespace_instance(application_namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# OK, there are zero instances of THIS app that use the
# default instance namespace, so, since the user didn't
# provide one, we'll use the default. NOTE: The following
# line is really setting the "instance namespace" of the
# new app to the app’s "application namespace", which is
# the default instance namespace.
self.cleaned_data['application_namespace'] = application_namespace
if instance_namespace and not apphook:
self.cleaned_data['application_namespace'] = None
if application_config and not apphook:
self.cleaned_data['application_configs'] = None
return self.cleaned_data
def clean_xframe_options(self):
if 'xframe_options' not in self.fields:
return # nothing to do, field isn't present
xframe_options = self.cleaned_data['xframe_options']
if xframe_options == '':
return Page._meta.get_field('xframe_options').default
return xframe_options
def clean_overwrite_url(self):
path_override = self.cleaned_data.get('overwrite_url')
if path_override:
path = path_override.strip('/')
else:
path = self.instance.get_path_for_slug(self.title_obj.slug, self._language)
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=self.instance,
)
self.cleaned_data['path'] = path
return path_override
def has_changed_apphooks(self):
changed_data = self.changed_data
if 'application_urls' in changed_data:
return True
return 'application_namespace' in changed_data
def update_apphooks(self):
# User has changed the apphooks on the page.
# Update the public version of the page to reflect this change immediately.
public_id = self.instance.publisher_public_id
self._meta.model.objects.filter(pk=public_id).update(
application_urls=self.instance.application_urls,
application_namespace=(self.instance.application_namespace or None),
)
# Connects the apphook restart handler to the request finished signal
set_restart_trigger()
def save(self, *args, **kwargs):
data = self.cleaned_data
page = super(AdvancedSettingsForm, self).save(*args, **kwargs)
page.update_translations(
self._language,
path=data['path'],
redirect=(data.get('redirect') or None),
publisher_state=PUBLISHER_STATE_DIRTY,
has_url_overwrite=bool(data.get('overwrite_url')),
)
is_draft_and_has_public = page.publisher_is_draft and page.publisher_public_id
if is_draft_and_has_public and self.has_changed_apphooks():
self.update_apphooks()
return page
class PagePermissionForm(forms.ModelForm):
class Meta:
model = Page
fields = ['login_required', 'limit_visibility_in_menu']
class PageTreeForm(forms.Form):
position = forms.IntegerField(initial=0, required=True)
target = forms.ModelChoiceField(queryset=Page.objects.none(), required=False)
def __init__(self, *args, **kwargs):
self.page = kwargs.pop('page')
self._site = kwargs.pop('site', Site.objects.get_current())
super(PageTreeForm, self).__init__(*args, **kwargs)
self.fields['target'].queryset = Page.objects.drafts().filter(
node__site=self._site,
is_page_type=self.page.is_page_type,
)
def get_root_nodes(self):
# TODO: this needs to avoid using the pages accessor directly
nodes = TreeNode.get_root_nodes()
return nodes.exclude(cms_pages__is_page_type=not(self.page.is_page_type))
def get_tree_options(self):
position = self.cleaned_data['position']
target_page = self.cleaned_data.get('target')
parent_node = target_page.node if target_page else None
if parent_node:
return self._get_tree_options_for_parent(parent_node, position)
return self._get_tree_options_for_root(position)
def _get_tree_options_for_root(self, position):
siblings = self.get_root_nodes().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node as the last root node,
# relative to the current site.
return (siblings.reverse()[0], 'right')
return (target_node, 'left')
def _get_tree_options_for_parent(self, parent_node, position):
if position == 0:
return (parent_node, 'first-child')
siblings = parent_node.get_children().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node to be the parent's first child
return (parent_node, 'last-child')
return (target_node, 'left')
class MovePageForm(PageTreeForm):
def get_tree_options(self):
options = super(MovePageForm, self).get_tree_options()
target_node, target_node_position = options
if target_node_position != 'left':
return (target_node, target_node_position)
node = self.page.node
node_is_first = node.path < target_node.path
if node_is_first and node.is_sibling_of(target_node):
# The node being moved appears before the target node
# and is a sibling of the target node.
# The user is moving from left to right.
target_node_position = 'right'
elif node_is_first:
# The node being moved appears before the target node
# but is not a sibling of the target node.
# The user is moving from right to left.
target_node_position = 'left'
else:
# The node being moved appears after the target node.
# The user is moving from right to left.
target_node_position = 'left'
return (target_node, target_node_position)
def move_page(self):
self.page.move_page(*self.get_tree_options())
class CopyPageForm(PageTreeForm):
source_site = forms.ModelChoiceField(queryset=Site.objects.all(), required=True)
copy_permissions = forms.BooleanField(initial=False, required=False)
def copy_page(self):
target, position = self.get_tree_options()
copy_permissions = self.cleaned_data.get('copy_permissions', False)
new_page = self.page.copy_with_descendants(
target_node=target,
position=position,
copy_permissions=copy_permissions,
target_site=self._site,
)
return new_page
def _get_tree_options_for_root(self, position):
try:
return super(CopyPageForm, self)._get_tree_options_for_root(position)
except IndexError:
# The user is copying a page to a site with no pages
# Add the node as the last root node.
siblings = self.get_root_nodes().reverse()
return (siblings[0], 'right')
class ChangeListForm(forms.Form):
BOOLEAN_CHOICES = (
('', _('All')),
('1', _('Yes')),
('0', _('No')),
)
q = forms.CharField(required=False, widget=forms.HiddenInput())
in_navigation = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES)
template = forms.ChoiceField(required=False)
changed_by = forms.ChoiceField(required=False)
soft_root = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES)
def __init__(self, *args, **kwargs):
super(ChangeListForm, self).__init__(*args, **kwargs)
self.fields['changed_by'].choices = get_page_changed_by_filter_choices()
self.fields['template'].choices = get_page_template_filter_choices()
def is_filtered(self):
data = self.cleaned_data
if self.cleaned_data.get('q'):
return True
return any(bool(data.get(field.name)) for field in self.visible_fields())
def get_filter_items(self):
for field in self.visible_fields():
value = self.cleaned_data.get(field.name)
if value:
yield (field.name, value)
def run_filters(self, queryset):
for field, value in self.get_filter_items():
query = {'{}__exact'.format(field): value}
queryset = queryset.filter(**query)
return queryset
class BasePermissionAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BasePermissionAdminForm, self).__init__(*args, **kwargs)
permission_fields = self._meta.model.get_all_permissions()
for field in permission_fields:
if field not in self.base_fields:
setattr(self.instance, field, False)
class PagePermissionInlineAdminForm(BasePermissionAdminForm):
"""
Page permission inline admin form used in inline admin. Required, because
user and group queryset must be changed. User can see only users on the same
level or under him in chosen page tree, and users which were created by him,
but aren't assigned to higher page level than current user.
"""
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
label=_('user'),
widget=HiddenInput(),
required=True,
)
def __init__(self, *args, **kwargs):
super(PagePermissionInlineAdminForm, self).__init__(*args, **kwargs)
user = get_current_user() # current user from threadlocals
site = Site.objects.get_current()
sub_users = get_subordinate_users(user, site)
limit_choices = True
use_raw_id = False
# Unfortunately, if there are > 500 users in the system, non-superusers
# won't see any benefit here because if we ask Django to put all the
# user PKs in limit_choices_to in the query string of the popup we're
# in danger of causing 414 errors so we fall back to the normal input
# widget.
if get_cms_setting('RAW_ID_USERS'):
if sub_users.count() < 500:
# If there aren't too many users, proceed as normal and use a
# raw id field with limit_choices_to
limit_choices = True
use_raw_id = True
elif get_user_permission_level(user, site) == ROOT_USER_LEVEL:
# If there are enough choices to possibly cause a 414 request
# URI too large error, we only proceed with the raw id field if
# the user is a superuser & thus can legitimately circumvent
# the limit_choices_to condition.
limit_choices = False
use_raw_id = True
# We don't use the fancy custom widget if the admin form wants to use a
# raw id field for the user
if use_raw_id:
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
# This check will be False if the number of users in the system
# is less than the threshold set by the RAW_ID_USERS setting.
if isinstance(self.fields['user'].widget, ForeignKeyRawIdWidget):
# We can't set a queryset on a raw id lookup, but we can use
# the fact that it respects the limit_choices_to parameter.
if limit_choices:
self.fields['user'].widget.rel.limit_choices_to = dict(
id__in=list(sub_users.values_list('pk', flat=True))
)
else:
self.fields['user'].widget = UserSelectAdminWidget()
self.fields['user'].queryset = sub_users
self.fields['user'].widget.user = user # assign current user
self.fields['group'].queryset = get_subordinate_groups(user, site)
class Meta:
fields = [
'user',
'group',
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'grant_on',
]
model = PagePermission
class ViewRestrictionInlineAdminForm(BasePermissionAdminForm):
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
label=_('user'),
widget=HiddenInput(),
required=True,
)
can_view = forms.BooleanField(
label=_('can_view'),
widget=HiddenInput(),
initial=True,
)
class Meta:
fields = [
'user',
'group',
'grant_on',
'can_view',
]
model = PagePermission
def clean_can_view(self):
return True
class GlobalPagePermissionAdminForm(BasePermissionAdminForm):
class Meta:
fields = [
'user',
'group',
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'can_view',
'sites',
]
model = GlobalPagePermission
class GenericCmsPermissionForm(forms.ModelForm):
"""Generic form for User & Grup permissions in cms
"""
_current_user = None
can_add_page = forms.BooleanField(label=_('Add'), required=False, initial=True)
can_change_page = forms.BooleanField(label=_('Change'), required=False, initial=True)
can_delete_page = forms.BooleanField(label=_('Delete'), required=False)
# pageuser is for pageuser & group - they are combined together,
# and read out from PageUser model
can_add_pageuser = forms.BooleanField(label=_('Add'), required=False)
can_change_pageuser = forms.BooleanField(label=_('Change'), required=False)
can_delete_pageuser = forms.BooleanField(label=_('Delete'), required=False)
can_add_pagepermission = forms.BooleanField(label=_('Add'), required=False)
can_change_pagepermission = forms.BooleanField(label=_('Change'), required=False)
can_delete_pagepermission = forms.BooleanField(label=_('Delete'), required=False)
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
initial = kwargs.get('initial') or {}
if instance:
initial = initial or {}
initial.update(self.populate_initials(instance))
kwargs['initial'] = initial
super(GenericCmsPermissionForm, self).__init__(*args, **kwargs)
def clean(self):
data = super(GenericCmsPermissionForm, self).clean()
# Validate Page options
if not data.get('can_change_page'):
if data.get('can_add_page'):
message = _("Users can't create a page without permissions "
"to change the created page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_page'):
message = _("Users can't delete a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_add_pagepermission'):
message = _("Users can't set page permissions without permissions "
"to change a page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_pagepermission'):
message = _("Users can't delete page permissions without permissions "
"to change a page. Edit permissions required.")
raise ValidationError(message)
# Validate PagePermission options
if not data.get('can_change_pagepermission'):
if data.get('can_add_pagepermission'):
message = _("Users can't create page permissions without permissions "
"to change the created permission. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_pagepermission'):
message = _("Users can't delete page permissions without permissions "
"to change permissions. Edit permissions required.")
raise ValidationError(message)
def populate_initials(self, obj):
"""Read out permissions from permission system.
"""
initials = {}
permission_accessor = get_permission_accessor(obj)
for model in (Page, PageUser, PagePermission):
name = model.__name__.lower()
content_type = ContentType.objects.get_for_model(model)
permissions = permission_accessor.filter(content_type=content_type).values_list('codename', flat=True)
for key in ('add', 'change', 'delete'):
codename = get_permission_codename(key, model._meta)
initials['can_%s_%s' % (key, name)] = codename in permissions
return initials
def save(self, commit=True):
instance = super(GenericCmsPermissionForm, self).save(commit=False)
instance.save()
save_permissions(self.cleaned_data, instance)
return instance
class PageUserAddForm(forms.ModelForm):
_current_user = None
user = forms.ModelChoiceField(queryset=User.objects.none())
class Meta:
fields = ['user']
model = PageUser
def __init__(self, *args, **kwargs):
super(PageUserAddForm, self).__init__(*args, **kwargs)
self.fields['user'].queryset = self.get_subordinates()
def get_subordinates(self):
subordinates = get_subordinate_users(self._current_user, self._current_site)
return subordinates.filter(pageuser__isnull=True)
def save(self, commit=True):
user = self.cleaned_data['user']
instance = super(PageUserAddForm, self).save(commit=False)
instance.created_by = self._current_user
for field in user._meta.fields:
# assign all the fields - we can do this, because object is
# subclassing User (one to one relation)
value = getattr(user, field.name)
setattr(instance, field.name, value)
if commit:
instance.save()
return instance
class PageUserChangeForm(UserChangeForm):
_current_user = None
class Meta:
fields = '__all__'
model = PageUser
def __init__(self, *args, **kwargs):
super(PageUserChangeForm, self).__init__(*args, **kwargs)
if not self._current_user.is_superuser:
# Limit permissions to include only
# the permissions available to the manager.
permissions = self.get_available_permissions()
self.fields['user_permissions'].queryset = permissions
# Limit groups to include only those where
# the manager is a member.
self.fields['groups'].queryset = self.get_available_groups()
def get_available_permissions(self):
permissions = self._current_user.get_all_permissions()
permission_codes = (perm.rpartition('.')[-1] for perm in permissions)
return Permission.objects.filter(codename__in=permission_codes)
def get_available_groups(self):
return self._current_user.groups.all()
class PageUserGroupForm(GenericCmsPermissionForm):
class Meta:
model = PageUserGroup
fields = ('name', )
def save(self, commit=True):
if not self.instance.pk:
self.instance.created_by = self._current_user
return super(PageUserGroupForm, self).save(commit=commit)
class PluginAddValidationForm(forms.Form):
placeholder_id = forms.ModelChoiceField(
queryset=Placeholder.objects.all(),
required=True,
)
plugin_language = forms.CharField(required=True)
plugin_parent = forms.ModelChoiceField(
CMSPlugin.objects.all(),
required=False,
)
plugin_type = forms.CharField(required=True)
def clean_plugin_type(self):
plugin_type = self.cleaned_data['plugin_type']
try:
plugin_pool.get_plugin(plugin_type)
except KeyError:
message = ugettext("Invalid plugin type '%s'") % plugin_type
raise ValidationError(message)
return plugin_type
def clean(self):
from cms.utils.plugins import has_reached_plugin_limit
data = self.cleaned_data
if self.errors:
return data
language = data['plugin_language']
placeholder = data['placeholder_id']
parent_plugin = data.get('plugin_parent')
if language not in get_language_list():
message = ugettext("Language must be set to a supported language!")
self.add_error('plugin_language', message)
return self.cleaned_data
if parent_plugin:
if parent_plugin.language != language:
message = ugettext("Parent plugin language must be same as language!")
self.add_error('plugin_language', message)
return self.cleaned_data
if parent_plugin.placeholder_id != placeholder.pk:
message = ugettext("Parent plugin placeholder must be same as placeholder!")
self.add_error('placeholder_id', message)
return self.cleaned_data
page = placeholder.page
template = page.get_template() if page else None
try:
has_reached_plugin_limit(
placeholder,
data['plugin_type'],
language,
template=template
)
except PluginLimitReached as error:
self.add_error(None, force_text(error))
return self.cleaned_data
class RequestToolbarForm(forms.Form):
obj_id = forms.CharField(required=False)
obj_type = forms.CharField(required=False)
cms_path = forms.CharField(required=False)
def clean(self):
data = self.cleaned_data
obj_id = data.get('obj_id')
obj_type = data.get('obj_type')
if not bool(obj_id or obj_type):
return data
if (obj_id and not obj_type) or (obj_type and not obj_id):
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
app, sep, model = obj_type.rpartition('.')
try:
model_class = apps.get_model(app_label=app, model_name=model)
except LookupError:
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
try:
generic_obj = model_class.objects.get(pk=obj_id)
except model_class.DoesNotExist:
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
else:
data['attached_obj'] = generic_obj
return data
def clean_cms_path(self):
path = self.cleaned_data.get('cms_path')
if path:
validate_relative_url(path)
return path
|
czpython/django-cms
|
cms/admin/forms.py
|
Python
|
bsd-3-clause
| 48,437 | 0.001755 |
tests=[
("python","UnitTestBuildComposite.py",{}),
("python","UnitTestScreenComposite.py",{}),
("python","UnitTestAnalyzeComposite.py",{}),
]
for dir in ['Cluster','Composite','Data','DecTree','Descriptors','FeatureSelect','InfoTheory','KNN','ModelPackage','NaiveBayes','Neural','SLT']:
tests.append(('python','test_list.py',{'dir':dir}))
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
rdkit/rdkit-orig
|
rdkit/ML/test_list.py
|
Python
|
bsd-3-clause
| 523 | 0.059273 |
def passive(cls):
passive_note = '''
.. note:: This object is a "passive" object. Any changes you make to it will not be reflected in the core and vice-versa. If you wish to update a core version of this object you should use the appropriate API.
'''
if hasattr(cls, "__doc__") and cls.__doc__:
cls.__doc__ += passive_note
else:
cls.__doc__ = passive_note
return cls
|
Vector35/binaryninja-api
|
python/decorators.py
|
Python
|
mit
| 380 | 0.026316 |
import sublime, sublime_plugin
import functools
import os
import shutil
class IntellijCopyCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
selection = v.sel();
if len(selection) == 0:
v.run_command('expand_selection', { "to": "line" })
v.run_command('copy')
class IntellijCutCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
selection = v.sel();
if len(selection) == 0:
v.run_command('expand_selection', { "to": "line" })
v.run_command('cut')
class IntellijRenameFileCommand(sublime_plugin.WindowCommand):
def run(self):
window = self.window
view = window.active_view()
filename = view.file_name()
if filename == None:
return
branch, leaf = os.path.split(filename)
v = window.show_input_panel("New Name:", leaf, functools.partial(self.on_done, filename, branch, view), None, None)
name, ext = os.path.splitext(leaf)
v.sel().clear()
v.sel().add(sublime.Region(0, len(name)))
def on_done(self, old, branch, view, leaf):
new = os.path.join(branch, leaf)
try:
os.rename(old, new)
print 'finding open file [' + old + ']'
# v = self.window.find_open_file(old)
if view != None:
view.retarget(new)
except:
sublime.status_message("Unable to rename")
class IntellijCopyFileCommand(sublime_plugin.WindowCommand):
def run(self):
window = self.window
view = window.active_view()
filename = view.file_name()
if filename == None:
return
branch, leaf = os.path.split(filename)
v = window.show_input_panel("New File Name:", filename, functools.partial(self.on_done, filename), None, None)
name, ext = os.path.splitext(leaf)
v.sel().clear()
start_index = len(filename) - len(leaf)
v.sel().add(sublime.Region(start_index, start_index + len(name)))
def on_done(self, src_path, dest_path):
try:
shutil.copyfile(src_path, dest_path)
self.window.open_file(dest_path)
if view != None:
view.retarget(new)
except:
sublime.status_message("Unable to rename")
|
uboness/sublime-plugins
|
Intellij/Intellij.py
|
Python
|
apache-2.0
| 2,360 | 0.007627 |
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
red, white, blue = 0, 0, len(nums) - 1
while white <= blue:
if nums[white] == 0: # red
nums[red], nums[white] = nums[white], nums[red]
red += 1
white += 1
elif nums[white] == 1: # white
white += 1
else: # blue
nums[blue], nums[white] = nums[white], nums[blue]
blue -= 1
|
tanchao/algo
|
leetcode/py/75_sort_colors.py
|
Python
|
mit
| 579 | 0.008636 |
import numpy as np;
np.set_printoptions(linewidth=40, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=80;pd.options.display.expand_frame_repr=False;pd.options.display.max_columns=20
import pylab as plt;
import os; home=os.path.expanduser('~') +'/'
import sys;sys.path.insert(1,'/home/arya/workspace/bio/')
from CLEAR.Libs.Markov import Markov
import Utils.Util as utl
import Utils.Simulation as Simulation
import matplotlib as mpl
import seaborn as sns
import Utils.Plots as pplt
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 56});
mpl.rc('text', usetex=True)
sns.set_style("whitegrid", {"grid.color": "1", 'axes.linewidth': .5, "grid.linewidth": ".09"})
subptitle = list('ABCDEFGHI')
def createSelectionSimulations(s=0.1,maxGen=100):
def runSim(i):
try:
sim = Simulation.Simulation(maxGeneration=maxGen, generationStep=1, s=s, foldInitialAFs=False,
ModelName='Markov', L=1000, numReplicates=1,
makeSureSelectedSiteDontGetLost=False, experimentID=0)
x=sim.X[:,sim.siteUnderSelection,0]
except:
x=np.zeros(sim.X.shape[0])
x[:]=None
if not i%1000: print s, i
return x
X=map(runSim,range(10000))
a=pd.DataFrame(X)
a.to_pickle(utl.outpath + 'markov/T{}.S{:03.0f}.obs.df'.format(maxGen, s * 1000))
print 'done!'
def plotNull(subp, nu0=0.005, fontsize=5):
obs = pd.read_pickle(utl.outpath + 'markov/neutral.obs.{}.pkl'.format(nu0))
T = Markov.computeTransition(0, N=1000)
dfplt = pd.concat([pd.Series({'scale': 10, 'xlim': [0.0, 0.01], 'ylim': [0, 1]}, name=(0.005, 1)),
pd.Series({'scale': 30, 'xlim': [0.06, 0.14], 'ylim': [0, 0.15]}, name=(0.1, 1)),
pd.Series({'scale': 30, 'xlim': [0.0, 0.015], 'ylim': [0, 0.3]}, name=(0.005, 10)),
pd.Series({'scale': 45, 'xlim': [0.0, 0.2], 'ylim': [0, 0.025]}, name=(0.1, 10)),
pd.Series({'scale':30, 'xlim':[0.0,0.03],'ylim': [0,0.2]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
xx=np.arange(0,1,0.00001)
N=200; tau=1;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx);
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.subplot(3, 3, subp[0]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[1].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[1].flatten(),bins=500,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*4)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0)$')
tau=10
for _ in range(9):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[10].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[10].flatten(),bins=100,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*20)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
tau=100
for _ in range(90):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2]);
brownian.plot(color='r');
markov.plot(color='b')
o=pd.Series(obs.X[100].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[100].flatten(),bins=30,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*60)
o.name = 'Observation';
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
if subp[2] == 3:
plt.legend(loc='center right', fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
def plotAlternative(subp, s=0.1, fontsize=5):
nu0=0.005
obs = pd.read_pickle(utl.outpath + 'markov/T100.S{:03.0f}.obs.df'.format(s * 1000))
T = Markov.computeTransition(s, 1000)
dfplt= pd.concat([pd.Series({'scale':10, 'xlim':[0.0,0.01],'ylim': [0,0.2]},name=(0.005,1)),pd.Series({'scale':30, 'xlim':[0.06,0.14],'ylim': [0,0.15]},name=(0.1,1)),
pd.Series({'scale':30, 'xlim':[0.0,0.015],'ylim': [0,0.15]},name=(0.005,10)),pd.Series({'scale':45, 'xlim':[0.0,0.2],'ylim': [0,0.025]},name=(0.1,10)),
pd.Series({'scale':30, 'xlim':[0.0,1],'ylim': [0,0.01]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
plt.subplot(3, 3, subp[0])
tau=1
o=(obs[1].value_counts().sort_index()/obs.shape[0])
o.loc[0.0055]=0.1211
o.index=o.index-0.0005/2
markov.plot(color='b');
o.plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0,s)$')
plt.xlabel('$s$')
tau=10
for _ in range(9):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1])
markov.plot(color='b');
(obs[10].value_counts().sort_index() / obs.shape[0]).plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
tau=100
for _ in range(90):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2])
counts,limits=np.histogram(obs[100].values,bins=50,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/obs.shape[0]
o/=35
o.loc[0.0] = o.iloc[0]
o = o.sort_index()
o.iloc[1] = o.iloc[2]
# o=(obs[100].value_counts().sort_index()/obs.shape[0])
o.name = 'Observation';
o.plot(color='g');
markov.plot(color='b');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
if __name__ == '__main__':
# createNeutralSimulations()
# createSelectionSimulations(s=0.01)
# createSelectionSimulations(s=0.1)
reload(pplt)
dpi = 200;
fig = plt.figure(figsize=(6.2, 4), dpi=dpi);
pplt.setStyle(lw=1);
fontsize = 7
plotNull(range(1, 4), fontsize=fontsize);
plotNull(range(4, 7), 0.1, fontsize=fontsize);
plotAlternative(range(7, 10), fontsize=fontsize);
plt.tight_layout()
pplt.savefig('markovDists', dpi=dpi);
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
print 'Done'
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/Markov.py
|
Python
|
mit
| 8,854 | 0.038288 |
"""
KINCluster is clustering like KIN.
release note:
- version 0.1.6
fix settings
update pipeline
delete unused arguments
fix convention by pylint
now logging
- version 0.1.5.5
fix using custom settings
support both moudle and dict
- version 0.1.5.4
Update tokenizer, remove stopwords eff
- version 0.1.5.3
now custom setting available.
see settings.py
- version 0.1.5.2
change item, extractor, pipeline module
now, pipeline.dress_item pass just item(extractor.dump)
fix prev versions error (too many value to unpack)
"""
__version__ = '0.1.6'
__all__ = ['KINCluster',
'Cluster', 'Extractor', 'Item', 'Pipeline',
'tokenizer', 'stopwords']
from KINCluster.KINCluster import KINCluster
from KINCluster.core.cluster import Cluster
from KINCluster.core.extractor import Extractor
from KINCluster.core.item import Item
from KINCluster.core.pipeline import Pipeline
from KINCluster.lib.tokenizer import tokenizer
from KINCluster.lib.stopwords import stopwords
|
memento7/KINCluster
|
KINCluster/__init__.py
|
Python
|
mit
| 1,038 | 0 |
import copy
import datetime
import logging
import math
import operator
import traceback
from collections import namedtuple
from typing import Any, Dict, Optional, Tuple
from pyparsing import (
CaselessKeyword,
Combine,
Forward,
Group,
Literal,
ParseException,
Regex,
Suppress,
Word,
alphanums,
alphas,
delimitedList,
dictOf,
)
from great_expectations.core.urn import ge_urn
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions import EvaluationParameterError
logger = logging.getLogger(__name__)
_epsilon = 1e-12
class EvaluationParameterParser:
"""
This Evaluation Parameter Parser uses pyparsing to provide a basic expression language capable of evaluating
parameters using values available only at run time.
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
The parser is modified from: https://github.com/pyparsing/pyparsing/blob/master/examples/fourFn.py
"""
# map operator symbols to corresponding arithmetic operations
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"sgn": lambda a: -1 if a < -_epsilon else 1 if a > _epsilon else 0,
"now": datetime.datetime.now,
"datetime": datetime.datetime,
"timedelta": datetime.timedelta,
}
def __init__(self):
self.exprStack = []
self._parser = None
def push_first(self, toks):
self.exprStack.append(toks[0])
def push_unary_minus(self, toks):
for t in toks:
if t == "-":
self.exprStack.append("unary -")
else:
break
def clear_stack(self):
del self.exprStack[:]
def get_parser(self):
self.clear_stack()
if not self._parser:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?(?:\d+|\.\d+)(?:\.\d+)?(?:[eE][+-]?\d+)?")
ge_urn = Combine(
Literal("urn:great_expectations:")
+ Word(alphas, f"{alphanums}_$:?=%.&")
)
variable = Word(alphas, f"{alphanums}_$")
ident = ge_urn | variable
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# We will allow functions either to accept *only* keyword
# expressions or *only* non-keyword expressions
# define function keyword arguments
key = Word(f"{alphas}_") + Suppress("=")
# value = (fnumber | Word(alphanums))
value = expr
keyval = dictOf(key.setParseAction(self.push_first), value)
kwarglist = delimitedList(keyval)
# add parse action that replaces the function identifier with a (name, number of args, has_fn_kwargs) tuple
# 20211009 - JPC - Note that it's important that we consider kwarglist
# first as part of disabling backtracking for the function's arguments
fn_call = (ident + lpar + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), 0, False))
) | (
(ident + lpar - Group(expr_list) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0]), False))
)
^ (ident + lpar - Group(kwarglist) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0]), True))
)
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(self.push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(self.push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(self.push_first)[...]
term = factor + (multop + factor).setParseAction(self.push_first)[...]
expr <<= term + (addop + term).setParseAction(self.push_first)[...]
self._parser = expr
return self._parser
def evaluate_stack(self, s):
op, num_args, has_fn_kwargs = s.pop(), 0, False
if isinstance(op, tuple):
op, num_args, has_fn_kwargs = op
if op == "unary -":
return -self.evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
# note: args are pushed onto the stack in reverse order
if has_fn_kwargs:
kwargs = dict()
for _ in range(num_args):
v = self.evaluate_stack(s)
k = s.pop()
kwargs.update({k: v})
return self.fn[op](**kwargs)
else:
args = reversed([self.evaluate_stack(s) for _ in range(num_args)])
return self.fn[op](*args)
else:
# try to evaluate as int first, then as float if int fails
# NOTE: JPC - 20200403 - Originally I considered returning the raw op here if parsing as float also
# fails, but I decided against it to instead require that the *entire* expression evaluates
# numerically UNLESS there is *exactly one* expression to substitute (see cases where len(L) == 1 in the
# parse_evaluation_parameter method.
try:
return int(op)
except ValueError:
return float(op)
def build_evaluation_parameters(
expectation_args: dict,
evaluation_parameters: Optional[dict] = None,
interactive_evaluation: bool = True,
data_context=None,
) -> Tuple[dict, dict]:
"""Build a dictionary of parameters to evaluate, using the provided evaluation_parameters,
AND mutate expectation_args by removing any parameter values passed in as temporary values during
exploratory work.
"""
evaluation_args = copy.deepcopy(expectation_args)
substituted_parameters = {}
# Iterate over arguments, and replace $PARAMETER-defined args with their
# specified parameters.
for key, value in evaluation_args.items():
if isinstance(value, dict) and "$PARAMETER" in value:
# We do not even need to search for a value if we are not going to do interactive evaluation
if not interactive_evaluation:
continue
# First, check to see whether an argument was supplied at runtime
# If it was, use that one, but remove it from the stored config
param_key = f"$PARAMETER.{value['$PARAMETER']}"
if param_key in value:
evaluation_args[key] = evaluation_args[key][param_key]
del expectation_args[key][param_key]
# If not, try to parse the evaluation parameter and substitute, which will raise
# an exception if we do not have a value
else:
raw_value = value["$PARAMETER"]
parameter_value = parse_evaluation_parameter(
raw_value,
evaluation_parameters=evaluation_parameters,
data_context=data_context,
)
evaluation_args[key] = parameter_value
# Once we've substituted, we also track that we did so
substituted_parameters[key] = parameter_value
return evaluation_args, substituted_parameters
expr = EvaluationParameterParser()
def find_evaluation_parameter_dependencies(parameter_expression):
"""Parse a parameter expression to identify dependencies including GE URNs.
Args:
parameter_expression: the parameter to parse
Returns:
a dictionary including:
- "urns": set of strings that are valid GE URN objects
- "other": set of non-GE URN strings that are required to evaluate the parameter expression
"""
expr = EvaluationParameterParser()
dependencies = {"urns": set(), "other": set()}
# Calling get_parser clears the stack
parser = expr.get_parser()
try:
_ = parser.parseString(parameter_expression, parseAll=True)
except ParseException as err:
raise EvaluationParameterError(
f"Unable to parse evaluation parameter: {str(err)} at line {err.line}, column {err.column}"
)
except AttributeError as err:
raise EvaluationParameterError(
f"Unable to parse evaluation parameter: {str(err)}"
)
for word in expr.exprStack:
if isinstance(word, (int, float)):
continue
if not isinstance(word, str):
# If we have a function that itself is a tuple (e.g. (trunc, 1))
continue
if word in expr.opn or word in expr.fn or word == "unary -":
# operations and functions
continue
# if this is parseable as a number, then we do not include it
try:
_ = float(word)
continue
except ValueError:
pass
try:
_ = ge_urn.parseString(word)
dependencies["urns"].add(word)
continue
except ParseException:
# This particular evaluation_parameter or operator is not a valid URN
pass
# If we got this far, it's a legitimate "other" evaluation parameter
dependencies["other"].add(word)
return dependencies
def parse_evaluation_parameter(
parameter_expression: str,
evaluation_parameters: Optional[Dict[str, Any]] = None,
data_context: Optional[Any] = None, # Cannot type 'DataContext' due to import cycle
) -> Any:
"""Use the provided evaluation_parameters dict to parse a given parameter expression.
Args:
parameter_expression (str): A string, potentially containing basic arithmetic operations and functions,
and variables to be substituted
evaluation_parameters (dict): A dictionary of name-value pairs consisting of values to substitute
data_context (DataContext): A data context to use to obtain metrics, if necessary
The parser will allow arithmetic operations +, -, /, *, as well as basic functions, including trunc() and round() to
obtain integer values when needed for certain expectations (e.g. expect_column_value_length_to_be_between).
Valid variables must begin with an alphabetic character and may contain alphanumeric characters plus '_' and '$',
EXCEPT if they begin with the string "urn:great_expectations" in which case they may also include additional
characters to support inclusion of GE URLs (see :ref:`evaluation_parameters` for more information).
"""
if evaluation_parameters is None:
evaluation_parameters = {}
# Calling get_parser clears the stack
parser = expr.get_parser()
try:
L = parser.parseString(parameter_expression, parseAll=True)
except ParseException as err:
L = ["Parse Failure", parameter_expression, (str(err), err.line, err.column)]
# Represents a valid parser result of a single function that has no arguments
if len(L) == 1 and isinstance(L[0], tuple) and L[0][2] is False:
# Necessary to catch `now()` (which only needs to be evaluated with `expr.exprStack`)
# NOTE: 20211122 - Chetan - Any future built-ins that are zero arity functions will match this behavior
pass
elif len(L) == 1 and L[0] not in evaluation_parameters:
# In this special case there were no operations to find, so only one value, but we don't have something to
# substitute for that value
try:
res = ge_urn.parseString(L[0])
if res["urn_type"] == "stores":
store = data_context.stores.get(res["store_name"])
return store.get_query_result(
res["metric_name"], res.get("metric_kwargs", {})
)
else:
logger.error(
"Unrecognized urn_type in ge_urn: must be 'stores' to use a metric store."
)
raise EvaluationParameterError(
f"No value found for $PARAMETER {str(L[0])}"
)
except ParseException as e:
logger.debug(
f"Parse exception while parsing evaluation parameter: {str(e)}"
)
raise EvaluationParameterError(f"No value found for $PARAMETER {str(L[0])}")
except AttributeError:
logger.warning("Unable to get store for store-type valuation parameter.")
raise EvaluationParameterError(f"No value found for $PARAMETER {str(L[0])}")
elif len(L) == 1:
# In this case, we *do* have a substitution for a single type. We treat this specially because in this
# case, we allow complex type substitutions (i.e. do not coerce to string as part of parsing)
# NOTE: 20201023 - JPC - to support MetricDefinition as an evaluation parameter type, we need to handle that
# case here; is the evaluation parameter provided here in fact a metric definition?
return evaluation_parameters[L[0]]
elif len(L) == 0 or L[0] != "Parse Failure":
for i, ob in enumerate(expr.exprStack):
if isinstance(ob, str) and ob in evaluation_parameters:
expr.exprStack[i] = str(evaluation_parameters[ob])
else:
err_str, err_line, err_col = L[-1]
raise EvaluationParameterError(
f"Parse Failure: {err_str}\nStatement: {err_line}\nColumn: {err_col}"
)
try:
result = expr.evaluate_stack(expr.exprStack)
result = convert_to_json_serializable(result)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
f'{type(e).__name__}: "{str(e)}". Traceback: "{exception_traceback}".'
)
logger.debug(exception_message, e, exc_info=True)
raise EvaluationParameterError(
f"Error while evaluating evaluation parameter expression: {str(e)}"
)
return result
def _deduplicate_evaluation_parameter_dependencies(dependencies: dict) -> dict:
deduplicated = {}
for suite_name, required_metrics in dependencies.items():
deduplicated[suite_name] = []
metrics = set()
metric_kwargs = {}
for metric in required_metrics:
if isinstance(metric, str):
metrics.add(metric)
elif isinstance(metric, dict):
# There is a single metric_kwargs_id object in this construction
for kwargs_id, metric_list in metric["metric_kwargs_id"].items():
if kwargs_id not in metric_kwargs:
metric_kwargs[kwargs_id] = set()
for metric_name in metric_list:
metric_kwargs[kwargs_id].add(metric_name)
deduplicated[suite_name] = list(metrics)
if len(metric_kwargs) > 0:
deduplicated[suite_name] = deduplicated[suite_name] + [
{
"metric_kwargs_id": {
metric_kwargs: list(metrics_set)
for (metric_kwargs, metrics_set) in metric_kwargs.items()
}
}
]
return deduplicated
EvaluationParameterIdentifier = namedtuple(
"EvaluationParameterIdentifier",
["expectation_suite_name", "metric_name", "metric_kwargs_id"],
)
|
great-expectations/great_expectations
|
great_expectations/core/evaluation_parameters.py
|
Python
|
apache-2.0
| 17,237 | 0.002843 |
from .app import Sofi
|
tryexceptpass/sofi
|
sofi/app/__init__.py
|
Python
|
mit
| 23 | 0 |
import io
import openpyxl
from django.test import (
Client, TestCase
)
from django.urls import reverse
from core.models import (
User, Batch, Section, Election, Candidate, CandidateParty,
CandidatePosition, Vote, VoterProfile, Setting, UserType
)
class ResultsExporter(TestCase):
"""
Tests the results xlsx exporter view.
This subview may only process requests from logged in admin users. Other
users will be redirected to '/'. This will also only accept GET requests.
GET requests may have an election`parameter whose value must be the id
of an election. The lack of an election parameter will result in the
results of all elections to be exported, with each election having its
own worksheet. Other URL parameters will be ignored. Invalid election
parameter values, e.g. non-existent election IDs and non-integer parameters,
will return an error message.
View URL: '/results/export'
"""
@classmethod
def setUpTestData(cls):
batch_num = 0
section_num = 0
voter_num = 0
party_num = 0
position_num = 0
candidate_num = 0
num_elections = 2
voters = list()
positions = dict()
for i in range(num_elections):
election = Election.objects.create(name='Election {}'.format(i))
positions[str(election.name)] = list()
num_batches = 2
for j in range(num_batches):
batch = Batch.objects.create(year=batch_num, election=election)
batch_num += 1
num_sections = 2 if j == 0 else 1
for k in range(num_sections):
section = Section.objects.create(
section_name=str(section_num)
)
section_num += 1
num_students = 2
for l in range(num_students):
voter = User.objects.create(
username='user{}'.format(voter_num),
first_name=str(voter_num),
last_name=str(voter_num),
type=UserType.VOTER
)
voter.set_password('voter')
voter.save()
voter_num += 1
VoterProfile.objects.create(
user=voter,
batch=batch,
section=section
)
voters.append(voter)
num_positions = 3
for i in range(num_positions):
position = CandidatePosition.objects.create(
position_name='Position {}'.format(position_num),
election=election
)
positions[str(election.name)].append(position)
position_num += 1
num_parties = 3
for j in range(num_parties):
party = CandidateParty.objects.create(
party_name='Party {}'.format(party_num),
election=election
)
party_num += 1
if j != 2: # Let every third party have no candidates.
num_positions = 3
for k in range(num_positions):
position = positions[str(election.name)][k]
candidate = Candidate.objects.create(
user=voters[candidate_num],
party=party,
position=position,
election=election
)
Vote.objects.create(
user=voters[candidate_num],
candidate=candidate,
election=election
)
candidate_num += 1
# Let's give one candidate an additional vote to really make sure that
# we all got the correct number of votes.
Vote.objects.create(
user=voters[0],
# NOTE: The voter in voter[1] is a Position 1 candidate of
# Party 1, where the voter in voter[0] is a member.
candidate=Candidate.objects.get(user=voters[1]),
election=Election.objects.get(name='Election 0')
)
_admin = User.objects.create(username='admin', type=UserType.ADMIN)
_admin.set_password('root')
_admin.save()
def setUp(self):
self.client.login(username='admin', password='root')
def test_anonymous_get_requests_redirected_to_index(self):
self.client.logout()
response = self.client.get(reverse('results-export'), follow=True)
self.assertRedirects(response, '/?next=%2Fadmin%2Fresults')
def test_voter_get_requests_redirected_to_index(self):
self.client.logout()
self.client.login(username='user0', password='voter')
response = self.client.get(reverse('results-export'), follow=True)
self.assertRedirects(response, reverse('index'))
def test_get_all_elections_xlsx(self):
response = self.client.get(reverse('results-export'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Election Results.xlsx"'
)
wb = openpyxl.load_workbook(io.BytesIO(response.content))
self.assertEqual(len(wb.worksheets), 2)
# Check first worksheet.
ws = wb.worksheets[0]
self.assertEqual(wb.sheetnames[0], 'Election 0')
row_count = ws.max_row
col_count = ws.max_column
self.assertEqual(row_count, 25)
self.assertEqual(col_count, 5)
self.assertEqual(str(ws.cell(1, 1).value), 'Election 0 Results')
self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')
cellContents = [
'Position 0',
'Party 0',
'0, 0',
'Party 1',
'3, 3',
'Party 2',
'None',
'Position 1',
'Party 0',
'1, 1',
'Party 1',
'4, 4',
'Party 2',
'None',
'Position 2',
'Party 0',
'2, 2',
'Party 1',
'5, 5',
'Party 2',
'None'
]
for cellIndex, content in enumerate(cellContents, 5):
self.assertEqual(str(ws.cell(cellIndex, 1).value), content)
self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')
self.assertEqual(str(ws.cell(3, 2).value), '0')
self.assertEqual(str(ws.cell(4, 2).value), '0') # Section
self.assertEqual(str(ws.cell(7, 2).value), '1')
self.assertEqual(str(ws.cell(9, 2).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 2).value), '2')
self.assertEqual(str(ws.cell(16, 2).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 2).value), '0')
self.assertEqual(str(ws.cell(23, 2).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(4, 3).value), '1') # Section
self.assertEqual(str(ws.cell(7, 3).value), '0')
self.assertEqual(str(ws.cell(9, 3).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 3).value), '0')
self.assertEqual(str(ws.cell(16, 3).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 3).value), '1')
self.assertEqual(str(ws.cell(23, 3).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 4).value), '1')
self.assertEqual(str(ws.cell(4, 4).value), '2') # Section
self.assertEqual(str(ws.cell(7, 4).value), '0')
self.assertEqual(str(ws.cell(9, 4).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 4).value), '0')
self.assertEqual(str(ws.cell(16, 4).value), '1')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 4).value), '0')
self.assertEqual(str(ws.cell(23, 4).value), '1')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 5).value), 'Total Votes')
self.assertEqual(str(ws.cell(7, 5).value), '1')
self.assertEqual(str(ws.cell(9, 5).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 5).value), '2')
self.assertEqual(str(ws.cell(16, 5).value), '1')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 5).value), '1')
self.assertEqual(str(ws.cell(23, 5).value), '1')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
# Check second worksheet.
ws = wb.worksheets[1]
self.assertEqual(wb.sheetnames[1], 'Election 1')
row_count = ws.max_row
col_count = ws.max_column
self.assertEqual(row_count, 25)
self.assertEqual(col_count, 5)
self.assertEqual(str(ws.cell(1, 1).value), 'Election 1 Results')
self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')
self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')
cellContents = [
'Position 3',
'Party 3',
'6, 6',
'Party 4',
'9, 9',
'Party 5',
'None',
'Position 4',
'Party 3',
'7, 7',
'Party 4',
'10, 10',
'Party 5',
'None',
'Position 5',
'Party 3',
'8, 8',
'Party 4',
'11, 11',
'Party 5',
'None'
]
for cellIndex, content in enumerate(cellContents, 5):
self.assertEqual(str(ws.cell(cellIndex, 1).value), content)
self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')
self.assertEqual(str(ws.cell(3, 2).value), '2')
self.assertEqual(str(ws.cell(4, 2).value), '3') # Section
self.assertEqual(str(ws.cell(7, 2).value), '1')
self.assertEqual(str(ws.cell(9, 2).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 2).value), '1')
self.assertEqual(str(ws.cell(16, 2).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 2).value), '0')
self.assertEqual(str(ws.cell(23, 2).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(4, 3).value), '4') # Section
self.assertEqual(str(ws.cell(7, 3).value), '0')
self.assertEqual(str(ws.cell(9, 3).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 3).value), '0')
self.assertEqual(str(ws.cell(16, 3).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 3).value), '1')
self.assertEqual(str(ws.cell(23, 3).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 4).value), '3')
self.assertEqual(str(ws.cell(4, 4).value), '5') # Section
self.assertEqual(str(ws.cell(7, 4).value), '0')
self.assertEqual(str(ws.cell(9, 4).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 4).value), '0')
self.assertEqual(str(ws.cell(16, 4).value), '1')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 4).value), '0')
self.assertEqual(str(ws.cell(23, 4).value), '1')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 5).value), 'Total Votes')
self.assertEqual(str(ws.cell(7, 5).value), '1')
self.assertEqual(str(ws.cell(9, 5).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 5).value), '1')
self.assertEqual(str(ws.cell(16, 5).value), '1')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 5).value), '1')
self.assertEqual(str(ws.cell(23, 5).value), '1')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
def test_get_election0_xlsx(self):
response = self.client.get(
reverse('results-export'),
{ 'election': str(Election.objects.get(name='Election 0').id) }
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Election 0 Results.xlsx"'
)
wb = openpyxl.load_workbook(io.BytesIO(response.content))
self.assertEqual(len(wb.worksheets), 1)
# Check first worksheet.
ws = wb.worksheets[0]
self.assertEqual(wb.sheetnames[0], 'Election 0')
row_count = ws.max_row
col_count = ws.max_column
self.assertEqual(row_count, 25)
self.assertEqual(col_count, 5)
self.assertEqual(str(ws.cell(1, 1).value), 'Election 0 Results')
self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')
cellContents = [
'Position 0',
'Party 0',
'0, 0',
'Party 1',
'3, 3',
'Party 2',
'None',
'Position 1',
'Party 0',
'1, 1',
'Party 1',
'4, 4',
'Party 2',
'None',
'Position 2',
'Party 0',
'2, 2',
'Party 1',
'5, 5',
'Party 2',
'None'
]
for cellIndex, content in enumerate(cellContents, 5):
self.assertEqual(str(ws.cell(cellIndex, 1).value), content)
self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')
self.assertEqual(str(ws.cell(3, 2).value), '0')
self.assertEqual(str(ws.cell(4, 2).value), '0') # Section
self.assertEqual(str(ws.cell(7, 2).value), '1')
self.assertEqual(str(ws.cell(9, 2).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 2).value), '2')
self.assertEqual(str(ws.cell(16, 2).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 2).value), '0')
self.assertEqual(str(ws.cell(23, 2).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(4, 3).value), '1') # Section
self.assertEqual(str(ws.cell(7, 3).value), '0')
self.assertEqual(str(ws.cell(9, 3).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 3).value), '0')
self.assertEqual(str(ws.cell(16, 3).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 3).value), '1')
self.assertEqual(str(ws.cell(23, 3).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 4).value), '1')
self.assertEqual(str(ws.cell(4, 4).value), '2') # Section
self.assertEqual(str(ws.cell(7, 4).value), '0')
self.assertEqual(str(ws.cell(9, 4).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 4).value), '0')
self.assertEqual(str(ws.cell(16, 4).value), '1')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 4).value), '0')
self.assertEqual(str(ws.cell(23, 4).value), '1')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 5).value), 'Total Votes')
self.assertEqual(str(ws.cell(7, 5).value), '1')
self.assertEqual(str(ws.cell(9, 5).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 5).value), '2')
self.assertEqual(str(ws.cell(16, 5).value), '1')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 5).value), '1')
self.assertEqual(str(ws.cell(23, 5).value), '1')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
def test_get_with_invalid_election_id_non_existent_election_id(self):
response = self.client.get(
reverse('results-export'),
{ 'election': '69' },
HTTP_REFERER=reverse('results'),
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(
messages[0].message,
'You specified an ID for a non-existent election.'
)
self.assertRedirects(response, reverse('results'))
def test_get_with_invalid_election_id_non_integer_election_id(self):
response = self.client.get(
reverse('results-export'),
{ 'election': 'hey' },
HTTP_REFERER=reverse('results'),
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(
messages[0].message,
'You specified a non-integer election ID.'
)
self.assertRedirects(response, reverse('results'))
def test_ref_get_with_invalid_election_id_non_existent_election_id(self):
response = self.client.get(
reverse('results-export'),
{ 'election': '69' },
HTTP_REFERER=reverse('results'),
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(
messages[0].message,
'You specified an ID for a non-existent election.'
)
self.assertRedirects(response, reverse('results'))
def test_ref_get_with_invalid_election_id_non_integer_election_id(self):
response = self.client.get(
reverse('results-export'),
{ 'election': 'hey' },
HTTP_REFERER=reverse('results'),
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(
messages[0].message,
'You specified a non-integer election ID.'
)
self.assertRedirects(response, reverse('results'))
|
seanballais/botos
|
tests/test_results_exporter_view.py
|
Python
|
gpl-3.0
| 18,942 | 0.00132 |
# Copyright (C) 2008 Dirk Vanden Boer <dirk.vdb@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import bluetooth
class BluetoothDevice:
def __init__(self, address, port, deviceName, serviceName):
self.address = address
self.port = port
self.deviceName = deviceName
self.serviceName = serviceName
def __str__(self):
return self.name + '(' + self.serviceName + ') - ' + self.address + ':' + str(self.port)
class BluetoothDiscovery:
def findSerialDevices(self):
devices = bluetooth.discover_devices(duration = 5, lookup_names = False, flush_cache = True)
serialDevices = []
for address in devices:
services = bluetooth.find_service(uuid = bluetooth.SERIAL_PORT_CLASS, address = address)
services.extend(bluetooth.find_service(uuid = bluetooth.DIALUP_NET_CLASS))
for service in services:
serialDevices.append(BluetoothDevice(service['host'], service['port'], bluetooth.lookup_name(service['host']), service['name']))
return serialDevices
|
thegooglecodearchive/phonetooth
|
phonetooth/bluetoothdiscovery.py
|
Python
|
gpl-2.0
| 1,814 | 0.012679 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
""" Tweet rirrakuma 4kuma submit.
"""
import tweepy
TWEET_CONTENT = (
"リラックマの4クマ漫画が更新されました!\n"
"http://www.shufu.co.jp/contents/4kuma/"
)
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
def main():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth_handler=auth)
api.update_status(status=TWEET_CONTENT)
if __name__ == '__main__':
main()
|
pyohei/rirakkuma-crawller
|
tweet.py
|
Python
|
mit
| 565 | 0.001898 |
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
apply_patch,
context_unpack,
get_now,
json_view,
opresource,
save_auction,
)
from openprocurement.auctions.core.validation import (
validate_lot_data,
validate_patch_lot_data,
)
from openprocurement.auctions.core.views.mixins import AuctionLotResource
@opresource(name='dgfOtherAssets:Auction Lots',
collection_path='/auctions/{auction_id}/lots',
path='/auctions/{auction_id}/lots/{lot_id}',
auctionsprocurementMethodType="dgfOtherAssets",
description="Auction lots")
class AuctionLotResource(AuctionLotResource):
@json_view(content_type="application/json", validators=(validate_lot_data,), permission='edit_auction')
def collection_post(self):
"""Add a lot
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t add lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
lot = self.request.validated['lot']
lot.date = get_now()
auction.lots.append(lot)
if save_auction(self.request):
self.LOGGER.info('Created auction lot {}'.format(lot.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_create'}, {'lot_id': lot.id}))
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, lot_id=lot.id, _query={})
return {'data': lot.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_lot_data,), permission='edit_auction')
def patch(self):
"""Update of lot
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t update lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated auction lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_patch'}))
return {'data': self.request.context.serialize("view")}
@json_view(permission='edit_auction')
def delete(self):
"""Lot deleting
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t delete lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
lot = self.request.context
res = lot.serialize("view")
auction.lots.remove(lot)
if save_auction(self.request):
self.LOGGER.info('Deleted auction lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_delete'}))
return {'data': res}
|
openprocurement/openprocurement.auctions.dgf
|
openprocurement/auctions/dgf/views/other/lot.py
|
Python
|
apache-2.0
| 3,339 | 0.004193 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Demonstration of using the Delisting event in your algorithm. Assets are delisted on their last day of trading, or when their contract expires.
### This data is not included in the open source project.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="data event handlers" />
### <meta name="tag" content="delisting event" />
class DelistingEventsAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2007, 5, 16) #Set Start Date
self.SetEndDate(2007, 5, 25) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("AAA.1", Resolution.Daily)
self.AddEquity("SPY", Resolution.Daily)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Transactions.OrdersCount == 0:
self.SetHoldings("AAA.1", 1)
self.Debug("Purchased stock")
for kvp in data.Bars:
symbol = kvp.Key
value = kvp.Value
self.Log("OnData(Slice): {0}: {1}: {2}".format(self.Time, symbol, value.Close))
# the slice can also contain delisting data: data.Delistings in a dictionary string->Delisting
aaa = self.Securities["AAA.1"]
if aaa.IsDelisted and aaa.IsTradable:
raise Exception("Delisted security must NOT be tradable")
if not aaa.IsDelisted and not aaa.IsTradable:
raise Exception("Securities must be marked as tradable until they're delisted or removed from the universe")
for kvp in data.Delistings:
symbol = kvp.Key
value = kvp.Value
if value.Type == DelistingType.Warning:
self.Log("OnData(Delistings): {0}: {1} will be delisted at end of day today.".format(self.Time, symbol))
# liquidate on delisting warning
self.SetHoldings(symbol, 0)
if value.Type == DelistingType.Delisted:
self.Log("OnData(Delistings): {0}: {1} has been delisted.".format(self.Time, symbol))
# fails because the security has already been delisted and is no longer tradable
self.SetHoldings(symbol, 1)
def OnOrderEvent(self, orderEvent):
self.Log("OnOrderEvent(OrderEvent): {0}: {1}".format(self.Time, orderEvent))
|
AlexCatarino/Lean
|
Algorithm.Python/DelistingEventsAlgorithm.py
|
Python
|
apache-2.0
| 3,448 | 0.007251 |
__author__ = 'mark'
# StarinetPython3Logger a data logger for the Beaglebone Black.
# Copyright (C) 2015 Mark Horn
#
# This file is part of StarinetPython3Logger.
#
# StarinetPython3Logger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# StarinetPython3Logger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StarinetPython3Logger. If not, see <http://www.gnu.org/licenses/>.
import crcmod
import logging
import sys
## Set crc16 parameters to polynomial 8408, initial value 0xffff, reversed True, Final XOR value 0x00
crc16 = crcmod.mkCrcFun(0x018408, 0xFFFF, True, 0x0000)
## initialise logger
logger = logging.getLogger('utilities.staribuscCrc')
def checkcrc(buffer0):
logger.debug("Check crc was called.")
buffer0 = buffer0.encode('utf-8')
rxcrc = buffer0[-4:] # assign the received crc to rxcrc
logger.debug("%s %s", "Received data crc - ", rxcrc)
newrxcrc = str(hex(crc16(buffer0[:-4])).replace('x', '')[1:].zfill(4)).upper() # new crc
newrxcrc = newrxcrc.encode('utf-8')
logger.debug("%s %s", "Calculated new crc based on received data -", newrxcrc)
#### Check old and new crc's match if they don't return string with 0200 crc error
if newrxcrc != rxcrc:
logger.debug("%s %s %s %s", "Received crc - ", rxcrc, "does not match our generated crc - ", newrxcrc)
return '0200'
else:
logger.debug("CRC' match")
return '0'
def newcrc(buffer0):
logger.debug("New crc was called.")
buffer0 = buffer0.encode('UTF-8')
datacrc = str(hex(crc16(buffer0)).replace('x', '')[1:].zfill(4)).upper()
value = datacrc
logger.debug("%s %s", "Calculated new message crc -", datacrc)
return value
if __name__ == "__main__":
print(newcrc(str(sys.argv[1:])))
|
mhorn71/StarinetPythonLogger
|
utilities/staribuscrc.py
|
Python
|
gpl-2.0
| 2,214 | 0.004065 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:myfile.root'
)
)
process.demo = cms.EDAnalyzer('MiniAnalyzer'
)
process.p = cms.Path(process.demo)
|
jlrodrig/MyAnalysis
|
MiniAnalyzer/python/ConfFile_cfg.py
|
Python
|
gpl-3.0
| 471 | 0.019108 |
from openstates.utils import LXMLMixin
from billy.scrape.votes import VoteScraper, Vote
from billy.scrape.utils import convert_pdf
import datetime
import subprocess
import lxml
import os
import re
journals = "http://www.leg.state.co.us/CLICS/CLICS%s/csljournals.nsf/" \
"jouNav?Openform&%s"
date_re = re.compile(
r"(?i).*(?P<dt>(monday|tuesday|wednesday|thursday|friday|saturday|sunday)"
".*, \d{4}).*"
)
vote_re = re.compile((r"\s*"
"YES\s*(?P<yes_count>\d+)\s*"
"NO\s*(?P<no_count>\d+)\s*"
"EXCUSED\s*(?P<excused_count>\d+)\s*"
"ABSENT\s*(?P<abs_count>\d+).*"))
votes_re = r"(?P<name>\w+(\s\w\.)?)\s+(?P<vote>Y|N|A|E|-)"
def fix_typos(data):
return data.replace('Tueday', 'Tuesday') # Spelling is hard
class COVoteScraper(VoteScraper, LXMLMixin):
jurisdiction = 'co'
def scrape_house(self, session):
url = journals % (session, 'House')
page = self.lxmlize(url)
hrefs = page.xpath("//font//a")
for href in hrefs:
(path, response) = self.urlretrieve(href.attrib['href'])
data = convert_pdf(path, type='text')
data = fix_typos(data)
in_vote = False
cur_vote = {}
known_date = None
cur_vote_count = None
in_question = False
cur_question = None
cur_bill_id = None
for line in data.split("\n"):
if known_date is None:
dt = date_re.findall(line)
if dt != []:
dt, dow = dt[0]
known_date = datetime.datetime.strptime(dt,
"%A, %B %d, %Y")
non_std = False
if re.match("(\s+)?\d+.*", line) is None:
non_std = True
l = line.lower().strip()
skip = False
blacklist = [
"house",
"page",
"general assembly",
"state of colorado",
"session",
"legislative day"
]
for thing in blacklist:
if thing in l:
skip = True
if skip:
continue
found = re.findall(
"(?P<bill_id>(H|S|SJ|HJ)(B|M|R)\d{2}-\d{3,4})",
line
)
if found != []:
found = found[0]
cur_bill_id, chamber, typ = found
try:
if not non_std:
_, line = line.strip().split(" ", 1)
line = line.strip()
except ValueError:
in_vote = False
in_question = False
continue
if in_question:
cur_question += " " + line.strip()
continue
if ("The question being" in line) or \
("On motion of" in line) or \
("the following" in line) or \
("moved that the" in line):
cur_question = line.strip()
in_question = True
if in_vote:
if line == "":
likely_garbage = True
likely_garbage = False
if "co-sponsor" in line.lower():
likely_garbage = True
if 'the speaker' in line.lower():
likely_garbage = True
votes = re.findall(votes_re, line)
if likely_garbage:
votes = []
for person, _, v in votes:
cur_vote[person] = v
last_line = False
for who, _, vote in votes:
if who.lower() == "speaker":
last_line = True
if votes == [] or last_line:
in_vote = False
# save vote
yes, no, other = cur_vote_count
if cur_bill_id is None or cur_question is None:
continue
bc = {
"H": "lower",
"S": "upper",
"J": "joint"
}[cur_bill_id[0].upper()]
vote = Vote('lower',
known_date,
cur_question,
(yes > no),
yes,
no,
other,
session=session,
bill_id=cur_bill_id,
bill_chamber=bc)
vote.add_source(href.attrib['href'])
vote.add_source(url)
for person in cur_vote:
if not person:
continue
vot = cur_vote[person]
if person.endswith("Y"):
vot = "Y"
person = person[:-1]
if person.endswith("N"):
vot = "N"
person = person[:-1]
if person.endswith("E"):
vot = "E"
person = person[:-1]
if not person:
continue
if vot == 'Y':
vote.yes(person)
elif vot == 'N':
vote.no(person)
elif vot == 'E' or vot == '-':
vote.other(person)
self.save_vote(vote)
cur_vote = {}
in_question = False
cur_question = None
in_vote = False
cur_vote_count = None
continue
summ = vote_re.findall(line)
if summ == []:
continue
summ = summ[0]
yes, no, exc, ab = summ
yes, no, exc, ab = \
int(yes), int(no), int(exc), int(ab)
other = exc + ab
cur_vote_count = (yes, no, other)
in_vote = True
continue
os.unlink(path)
def scrape_senate(self, session):
url = journals % (session, 'Senate')
page = self.lxmlize(url)
hrefs = page.xpath("//font//a")
for href in hrefs:
(path, response) = self.urlretrieve(href.attrib['href'])
data = convert_pdf(path, type='text')
data = fix_typos(data)
cur_bill_id = None
cur_vote_count = None
in_vote = False
cur_question = None
in_question = False
known_date = None
cur_vote = {}
for line in data.split("\n"):
if not known_date:
dt = date_re.findall(line)
if dt != []:
dt, dow = dt[0]
dt = dt.replace(',', '')
known_date = datetime.datetime.strptime(dt, "%A %B %d %Y")
if in_question:
line = line.strip()
if re.match("\d+", line):
in_question = False
continue
try:
line, _ = line.rsplit(" ", 1)
cur_question += line.strip()
except ValueError:
in_question = False
continue
cur_question += line.strip()
if not in_vote:
summ = vote_re.findall(line)
if summ != []:
cur_vote = {}
cur_vote_count = summ[0]
in_vote = True
continue
if ("The question being" in line) or \
("On motion of" in line) or \
("the following" in line) or \
("moved that the" in line):
cur_question, _ = line.strip().rsplit(" ", 1)
cur_question = cur_question.strip()
in_question = True
if line.strip() == "":
continue
first = line[0]
if first != " ":
if " " not in line:
# wtf
continue
bill_id, kruft = line.split(" ", 1)
if len(bill_id) < 3:
continue
if bill_id[0] != "H" and bill_id[0] != "S":
continue
if bill_id[1] not in ['B', 'J', 'R', 'M']:
continue
cur_bill_id = bill_id
else:
line = line.strip()
try:
line, lineno = line.rsplit(" ", 1)
except ValueError:
in_vote = False
if cur_question is None:
continue
if cur_bill_id is None:
continue
yes, no, exc, ab = cur_vote_count
other = int(exc) + int(ab)
yes, no, other = int(yes), int(no), int(other)
bc = {'H': 'lower', 'S': 'upper'}[cur_bill_id[0]]
vote = Vote('upper',
known_date,
cur_question,
(yes > no),
yes,
no,
other,
session=session,
bill_id=cur_bill_id,
bill_chamber=bc)
for person in cur_vote:
if person is None:
continue
howvote = cur_vote[person]
if person.endswith("Y"):
howvote = "Y"
person = person[:-1]
if person.endswith("N"):
howvote = "N"
person = person[:-1]
if person.endswith("E"):
howvote = "E"
person = person[:-1]
howvote = howvote.upper()
if howvote == 'Y':
vote.yes(person)
elif howvote == 'N':
vote.no(person)
else:
vote.other(person)
vote.add_source(href.attrib['href'])
self.save_vote(vote)
cur_vote, cur_question, cur_vote_count = (
None, None, None)
continue
votes = re.findall(votes_re, line)
for person in votes:
name, li, vot = person
cur_vote[name] = vot
os.unlink(path)
def scrape(self, chamber, session):
if chamber == 'upper':
self.scrape_senate(session)
if chamber == 'lower':
self.scrape_house(session)
|
cliftonmcintosh/openstates
|
openstates/co/votes.py
|
Python
|
gpl-3.0
| 12,425 | 0.002334 |
from importlib import import_module
import os
import sys
from django.apps import apps
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.migration import Migration
from django.db.migrations.state import ModelState
from django.db.migrations import operations
from django.utils import six
from django.conf import settings
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialisation, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py[c|o] files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py") or name.endswith(".pyc") or name.endswith(".pyo"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError("Migration %s in app %s has no Migration class" % (migration_name, app_config.label))
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label)
if south_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises KeyError"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError("There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix))
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
normal[child_key].dependencies.remove(replaced)
normal[child_key].dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
for key, migration in normal.items():
for parent in migration.dependencies:
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they
# even have migrations.
if parent[1] == "__first__" and parent not in self.graph:
if parent[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# We'll add a fake initial migration for it into the
# graph.
app_config = apps.get_app_config(parent[0])
ops = []
for model in app_config.get_models():
model_state = ModelState.from_model(model)
ops.append(
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
new_migration = type(
"FakeInitialMigration",
(Migration, ),
{"operations": ops},
)(parent[1], parent[0])
self.graph.add_node(parent, new_migration)
self.applied_migrations.add(parent)
elif parent[0] in self.migrated_apps:
parent = list(self.graph.root_nodes(parent[0]))[0]
else:
raise ValueError("Dependency on unknown app %s" % parent[0])
self.graph.add_dependency(key, parent)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return dict((app_label, seen_apps[app_label]) for app_label in conflicting_apps)
class BadMigrationError(Exception):
"""
Raised when there's a bad migration (unreadable/bad format/etc.)
"""
pass
class AmbiguityError(Exception):
"""
Raised when more than one migration matches a name prefix
"""
pass
|
TimBuckley/effective_django
|
django/db/migrations/loader.py
|
Python
|
bsd-3-clause
| 11,967 | 0.001588 |
"""Most of these tests come from the examples in Bronstein's book."""
from sympy import (Poly, I, S, Function, log, symbols, exp, tan, sqrt,
Symbol, Lambda, sin, Eq, Ne, Piecewise, factor, expand_log, cancel,
expand, diff, pi, atan)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, as_poly_1t,
derivation, splitfactor, splitfactor_sqf, canonical_representation,
hermite_reduce, polynomial_reduce, residue_reduce, residue_reduce_to_basic,
integrate_primitive, integrate_hyperexponential_polynomial,
integrate_hyperexponential, integrate_hypertangent_polynomial,
integrate_nonlinear_no_specials, integer_powers, DifferentialExtension,
risch_integrate, DecrementLevel, NonElementaryIntegral, recognize_log_derivative,
recognize_derivative, laurent_series)
from sympy.utilities.pytest import raises
from sympy.abc import x, t, nu, z, a, y
t0, t1, t2 = symbols('t:3')
i = Symbol('i')
def test_gcdex_diophantine():
assert gcdex_diophantine(Poly(x**4 - 2*x**3 - 6*x**2 + 12*x + 15),
Poly(x**3 + x**2 - 4*x - 4), Poly(x**2 - 1)) == \
(Poly((-x**2 + 4*x - 3)/5), Poly((x**3 - 7*x**2 + 16*x - 10)/5))
def test_frac_in():
assert frac_in(Poly((x + 1)/x*t, t), x) == \
(Poly(t*x + t, x), Poly(x, x))
assert frac_in((x + 1)/x*t, x) == \
(Poly(t*x + t, x), Poly(x, x))
assert frac_in((Poly((x + 1)/x*t, t), Poly(t + 1, t)), x) == \
(Poly(t*x + t, x), Poly((1 + t)*x, x))
raises(ValueError, lambda: frac_in((x + 1)/log(x)*t, x))
assert frac_in(Poly((2 + 2*x + x*(1 + x))/(1 + x)**2, t), x, cancel=True) == \
(Poly(x + 2, x), Poly(x + 1, x))
def test_as_poly_1t():
assert as_poly_1t(2/t + t, t, z) in [
Poly(t + 2*z, t, z), Poly(t + 2*z, z, t)]
assert as_poly_1t(2/t + 3/t**2, t, z) in [
Poly(2*z + 3*z**2, t, z), Poly(2*z + 3*z**2, z, t)]
assert as_poly_1t(2/((exp(2) + 1)*t), t, z) in [
Poly(2/(exp(2) + 1)*z, t, z), Poly(2/(exp(2) + 1)*z, z, t)]
assert as_poly_1t(2/((exp(2) + 1)*t) + t, t, z) in [
Poly(t + 2/(exp(2) + 1)*z, t, z), Poly(t + 2/(exp(2) + 1)*z, z, t)]
assert as_poly_1t(S(0), t, z) == Poly(0, t, z)
def test_derivation():
p = Poly(4*x**4*t**5 + (-4*x**3 - 4*x**4)*t**4 + (-3*x**2 + 2*x**3)*t**3 +
(2*x + 7*x**2 + 2*x**3)*t**2 + (1 - 4*x - 4*x**2)*t - 1 + 2*x, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - 3/(2*x)*t + 1/(2*x), t)]})
assert derivation(p, DE) == Poly(-20*x**4*t**6 + (2*x**3 + 16*x**4)*t**5 +
(21*x**2 + 12*x**3)*t**4 + (7*x/2 - 25*x**2 - 12*x**3)*t**3 +
(-5 - 15*x/2 + 7*x**2)*t**2 - (3 - 8*x - 10*x**2 - 4*x**3)/(2*x)*t +
(1 - 4*x**2)/(2*x), t)
assert derivation(Poly(1, t), DE) == Poly(0, t)
assert derivation(Poly(t, t), DE) == DE.d
assert derivation(Poly(t**2 + 1/x*t + (1 - 2*x)/(4*x**2), t), DE) == \
Poly(-2*t**3 - 4/x*t**2 - (5 - 2*x)/(2*x**2)*t - (1 - 2*x)/(2*x**3), t, domain='ZZ(x)')
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(t, t)]})
assert derivation(Poly(x*t*t1, t), DE) == Poly(t*t1 + x*t*t1 + t, t)
assert derivation(Poly(x*t*t1, t), DE, coefficientD=True) == \
Poly((1 + t1)*t, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert derivation(Poly(x, x), DE) == Poly(1, x)
# Test basic option
assert derivation((x + 1)/(x - 1), DE, basic=True) == -2/(1 - 2*x + x**2)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert derivation((t + 1)/(t - 1), DE, basic=True) == -2*t/(1 - 2*t + t**2)
assert derivation(t + 1, DE, basic=True) == t
def test_splitfactor():
p = Poly(4*x**4*t**5 + (-4*x**3 - 4*x**4)*t**4 + (-3*x**2 + 2*x**3)*t**3 +
(2*x + 7*x**2 + 2*x**3)*t**2 + (1 - 4*x - 4*x**2)*t - 1 + 2*x, t, field=True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - 3/(2*x)*t + 1/(2*x), t)]})
assert splitfactor(p, DE) == (Poly(4*x**4*t**3 + (-8*x**3 - 4*x**4)*t**2 +
(4*x**2 + 8*x**3)*t - 4*x**2, t), Poly(t**2 + 1/x*t + (1 - 2*x)/(4*x**2), t, domain='ZZ(x)'))
assert splitfactor(Poly(x, t), DE) == (Poly(x, t), Poly(1, t))
r = Poly(-4*x**4*z**2 + 4*x**6*z**2 - z*x**3 - 4*x**5*z**3 + 4*x**3*z**3 + x**4 + z*x**5 - x**6, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
assert splitfactor(r, DE, coefficientD=True) == \
(Poly(x*z - x**2 - z*x**3 + x**4, t), Poly(-x**2 + 4*x**2*z**2, t))
assert splitfactor_sqf(r, DE, coefficientD=True) == \
(((Poly(x*z - x**2 - z*x**3 + x**4, t), 1),), ((Poly(-x**2 + 4*x**2*z**2, t), 1),))
assert splitfactor(Poly(0, t), DE) == (Poly(0, t), Poly(1, t))
assert splitfactor_sqf(Poly(0, t), DE) == (((Poly(0, t), 1),), ())
def test_canonical_representation():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert canonical_representation(Poly(x - t, t), Poly(t**2, t), DE) == \
(Poly(0, t), (Poly(0, t),
Poly(1, t)), (Poly(-t + x, t),
Poly(t**2, t)))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert canonical_representation(Poly(t**5 + t**3 + x**2*t + 1, t),
Poly((t**2 + 1)**3, t), DE) == \
(Poly(0, t), (Poly(t**5 + t**3 + x**2*t + 1, t),
Poly(t**6 + 3*t**4 + 3*t**2 + 1, t)), (Poly(0, t), Poly(1, t)))
def test_hermite_reduce():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert hermite_reduce(Poly(x - t, t), Poly(t**2, t), DE) == \
((Poly(-x, t), Poly(t, t)), (Poly(0, t), Poly(1, t)), (Poly(-x, t), Poly(1, t)))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - t/x - (1 - nu**2/x**2), t)]})
assert hermite_reduce(
Poly(x**2*t**5 + x*t**4 - nu**2*t**3 - x*(x**2 + 1)*t**2 - (x**2 - nu**2)*t - x**5/4, t),
Poly(x**2*t**4 + x**2*(x**2 + 2)*t**2 + x**2 + x**4 + x**6/4, t), DE) == \
((Poly(-x**2 - 4, t), Poly(4*t**2 + 2*x**2 + 4, t)),
(Poly((-2*nu**2 - x**4)*t - (2*x**3 + 2*x), t), Poly(2*x**2*t**2 + x**4 + 2*x**2, t)),
(Poly(x*t + 1, t), Poly(x, t)))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
a = Poly((-2 + 3*x)*t**3 + (-1 + x)*t**2 + (-4*x + 2*x**2)*t + x**2, t)
d = Poly(x*t**6 - 4*x**2*t**5 + 6*x**3*t**4 - 4*x**4*t**3 + x**5*t**2, t)
assert hermite_reduce(a, d, DE) == \
((Poly(3*t**2 + t + 3*x, t), Poly(3*t**4 - 9*x*t**3 + 9*x**2*t**2 - 3*x**3*t, t)),
(Poly(0, t), Poly(1, t)),
(Poly(0, t), Poly(1, t)))
assert hermite_reduce(
Poly(-t**2 + 2*t + 2, t),
Poly(-x*t**2 + 2*x*t - x, t), DE) == \
((Poly(3, t), Poly(t - 1, t)),
(Poly(0, t), Poly(1, t)),
(Poly(1, t), Poly(x, t)))
assert hermite_reduce(
Poly(-x**2*t**6 + (-1 - 2*x**3 + x**4)*t**3 + (-3 - 3*x**4)*t**2 - 2*x*t - x - 3*x**2, t),
Poly(x**4*t**6 - 2*x**2*t**3 + 1, t), DE) == \
((Poly(x**3*t + x**4 + 1, t), Poly(x**3*t**3 - x, t)),
(Poly(0, t), Poly(1, t)),
(Poly(-1, t), Poly(x**2, t)))
assert hermite_reduce(
Poly((-2 + 3*x)*t**3 + (-1 + x)*t**2 + (-4*x + 2*x**2)*t + x**2, t),
Poly(x*t**6 - 4*x**2*t**5 + 6*x**3*t**4 - 4*x**4*t**3 + x**5*t**2, t), DE) == \
((Poly(3*t**2 + t + 3*x, t), Poly(3*t**4 - 9*x*t**3 + 9*x**2*t**2 - 3*x**3*t, t)),
(Poly(0, t), Poly(1, t)),
(Poly(0, t), Poly(1, t)))
def test_polynomial_reduce():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert polynomial_reduce(Poly(1 + x*t + t**2, t), DE) == \
(Poly(t, t), Poly(x*t, t))
assert polynomial_reduce(Poly(0, t), DE) == \
(Poly(0, t), Poly(0, t))
def test_laurent_series():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1, t)]})
a = Poly(36, t)
d = Poly((t - 2)*(t**2 - 1)**2, t)
F = Poly(t**2 - 1, t)
n = 2
assert laurent_series(a, d, F, n, DE) == \
(Poly(-3*t**3 + 3*t**2 - 6*t - 8, t), Poly(t**5 + t**4 - 2*t**3 - 2*t**2 + t + 1, t),
[Poly(-3*t**3 - 6*t**2, t), Poly(2*t**6 + 6*t**5 - 8*t**3, t)])
def test_recognize_derivative():
DE = DifferentialExtension(extension={'D': [Poly(1, t)]})
a = Poly(36, t)
d = Poly((t - 2)*(t**2 - 1)**2, t)
assert recognize_derivative(a, d, DE) == False
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
a = Poly(2, t)
d = Poly(t**2 - 1, t)
assert recognize_derivative(a, d, DE) == False
assert recognize_derivative(Poly(x*t, t), Poly(1, t), DE) == True
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert recognize_derivative(Poly(t, t), Poly(1, t), DE) == True
def test_recognize_log_derivative():
a = Poly(2*x**2 + 4*x*t - 2*t - x**2*t, t)
d = Poly((2*x + t)*(t + x**2), t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert recognize_log_derivative(a, d, DE, z) == True
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
assert recognize_log_derivative(Poly(t + 1, t), Poly(t + x, t), DE) == True
assert recognize_log_derivative(Poly(2, t), Poly(t**2 - 1, t), DE) == True
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert recognize_log_derivative(Poly(1, x), Poly(x**2 - 2, x), DE) == False
assert recognize_log_derivative(Poly(1, x), Poly(x**2 + x, x), DE) == True
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert recognize_log_derivative(Poly(1, t), Poly(t**2 - 2, t), DE) == False
assert recognize_log_derivative(Poly(1, t), Poly(t**2 + t, t), DE) == False
def test_residue_reduce():
a = Poly(2*t**2 - t - x**2, t)
d = Poly(t**3 - x**2*t, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)], 'Tfuncs': [log]})
assert residue_reduce(a, d, DE, z, invert=False) == \
([(Poly(z**2 - S(1)/4, z), Poly((1 + 3*x*z - 6*z**2 -
2*x**2 + 4*x**2*z**2)*t - x*z + x**2 + 2*x**2*z**2 - 2*z*x**3, t))], False)
assert residue_reduce(a, d, DE, z, invert=True) == \
([(Poly(z**2 - S(1)/4, z), Poly(t + 2*x*z, t))], False)
assert residue_reduce(Poly(-2/x, t), Poly(t**2 - 1, t,), DE, z, invert=False) == \
([(Poly(z**2 - 1, z), Poly(-2*z*t/x - 2/x, t))], True)
ans = residue_reduce(Poly(-2/x, t), Poly(t**2 - 1, t), DE, z, invert=True)
assert ans == ([(Poly(z**2 - 1, z), Poly(t + z, t))], True)
assert residue_reduce_to_basic(ans[0], DE, z) == -log(-1 + log(x)) + log(1 + log(x))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - t/x - (1 - nu**2/x**2), t)]})
# TODO: Skip or make faster
assert residue_reduce(Poly((-2*nu**2 - x**4)/(2*x**2)*t - (1 + x**2)/x, t),
Poly(t**2 + 1 + x**2/2, t), DE, z) == \
([(Poly(z + S(1)/2, z, domain='QQ'), Poly(t**2 + 1 + x**2/2, t, domain='EX'))], True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert residue_reduce(Poly(-2*x*t + 1 - x**2, t),
Poly(t**2 + 2*x*t + 1 + x**2, t), DE, z) == \
([(Poly(z**2 + S(1)/4, z), Poly(t + x + 2*z, t))], True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert residue_reduce(Poly(t, t), Poly(t + sqrt(2), t), DE, z) == \
([(Poly(z - 1, z), Poly(t + sqrt(2), t))], True)
def test_integrate_hyperexponential():
# TODO: Add tests for integrate_hyperexponential() from the book
a = Poly((1 + 2*t1 + t1**2 + 2*t1**3)*t**2 + (1 + t1**2)*t + 1 + t1**2, t)
d = Poly(1, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t1**2, t1),
Poly(t*(1 + t1**2), t)], 'Tfuncs': [tan, Lambda(i, exp(tan(i)))]})
assert integrate_hyperexponential(a, d, DE) == \
(exp(2*tan(x))*tan(x) + exp(tan(x)), 1 + t1**2, True)
a = Poly((t1**3 + (x + 1)*t1**2 + t1 + x + 2)*t, t)
assert integrate_hyperexponential(a, d, DE) == \
((x + tan(x))*exp(tan(x)), 0, True)
a = Poly(t, t)
d = Poly(1, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(2*x*t, t)],
'Tfuncs': [Lambda(i, exp(x**2))]})
assert integrate_hyperexponential(a, d, DE) == \
(0, NonElementaryIntegral(exp(x**2), x), False)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)], 'Tfuncs': [exp]})
assert integrate_hyperexponential(a, d, DE) == (exp(x), 0, True)
a = Poly(25*t**6 - 10*t**5 + 7*t**4 - 8*t**3 + 13*t**2 + 2*t - 1, t)
d = Poly(25*t**6 + 35*t**4 + 11*t**2 + 1, t)
assert integrate_hyperexponential(a, d, DE) == \
(-(11 - 10*exp(x))/(5 + 25*exp(2*x)) + log(1 + exp(2*x)), -1, True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0, t0), Poly(t0*t, t)],
'Tfuncs': [exp, Lambda(i, exp(exp(i)))]})
assert integrate_hyperexponential(Poly(2*t0*t**2, t), Poly(1, t), DE) == (exp(2*exp(x)), 0, True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0, t0), Poly(-t0*t, t)],
'Tfuncs': [exp, Lambda(i, exp(-exp(i)))]})
assert integrate_hyperexponential(Poly(-27*exp(9) - 162*t0*exp(9) +
27*x*t0*exp(9), t), Poly((36*exp(18) + x**2*exp(18) - 12*x*exp(18))*t, t), DE) == \
(27*exp(exp(x))/(-6*exp(9) + x*exp(9)), 0, True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)], 'Tfuncs': [exp]})
assert integrate_hyperexponential(Poly(x**2/2*t, t), Poly(1, t), DE) == \
((2 - 2*x + x**2)*exp(x)/2, 0, True)
assert integrate_hyperexponential(Poly(1 + t, t), Poly(t, t), DE) == \
(-exp(-x), 1, True) # x - exp(-x)
assert integrate_hyperexponential(Poly(x, t), Poly(t + 1, t), DE) == \
(0, NonElementaryIntegral(x/(1 + exp(x)), x), False)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t0), Poly(2*x*t1, t1)],
'Tfuncs': [log, Lambda(i, exp(i**2))]})
elem, nonelem, b = integrate_hyperexponential(Poly((8*x**7 - 12*x**5 + 6*x**3 - x)*t1**4 +
(8*t0*x**7 - 8*t0*x**6 - 4*t0*x**5 + 2*t0*x**3 + 2*t0*x**2 - t0*x +
24*x**8 - 36*x**6 - 4*x**5 + 22*x**4 + 4*x**3 - 7*x**2 - x + 1)*t1**3
+ (8*t0*x**8 - 4*t0*x**6 - 16*t0*x**5 - 2*t0*x**4 + 12*t0*x**3 +
t0*x**2 - 2*t0*x + 24*x**9 - 36*x**7 - 8*x**6 + 22*x**5 + 12*x**4 -
7*x**3 - 6*x**2 + x + 1)*t1**2 + (8*t0*x**8 - 8*t0*x**6 - 16*t0*x**5 +
6*t0*x**4 + 10*t0*x**3 - 2*t0*x**2 - t0*x + 8*x**10 - 12*x**8 - 4*x**7
+ 2*x**6 + 12*x**5 + 3*x**4 - 9*x**3 - x**2 + 2*x)*t1 + 8*t0*x**7 -
12*t0*x**6 - 4*t0*x**5 + 8*t0*x**4 - t0*x**2 - 4*x**7 + 4*x**6 +
4*x**5 - 4*x**4 - x**3 + x**2, t1), Poly((8*x**7 - 12*x**5 + 6*x**3 -
x)*t1**4 + (24*x**8 + 8*x**7 - 36*x**6 - 12*x**5 + 18*x**4 + 6*x**3 -
3*x**2 - x)*t1**3 + (24*x**9 + 24*x**8 - 36*x**7 - 36*x**6 + 18*x**5 +
18*x**4 - 3*x**3 - 3*x**2)*t1**2 + (8*x**10 + 24*x**9 - 12*x**8 -
36*x**7 + 6*x**6 + 18*x**5 - x**4 - 3*x**3)*t1 + 8*x**10 - 12*x**8 +
6*x**6 - x**4, t1), DE)
assert factor(elem) == -((x - 1)*log(x)/((x + exp(x**2))*(2*x**2 - 1)))
assert (nonelem, b) == (NonElementaryIntegral(exp(x**2)/(exp(x**2) + 1), x), False)
def test_integrate_hyperexponential_polynomial():
# Without proper cancellation within integrate_hyperexponential_polynomial(),
# this will take a long time to complete, and will return a complicated
# expression
p = Poly((-28*x**11*t0 - 6*x**8*t0 + 6*x**9*t0 - 15*x**8*t0**2 +
15*x**7*t0**2 + 84*x**10*t0**2 - 140*x**9*t0**3 - 20*x**6*t0**3 +
20*x**7*t0**3 - 15*x**6*t0**4 + 15*x**5*t0**4 + 140*x**8*t0**4 -
84*x**7*t0**5 - 6*x**4*t0**5 + 6*x**5*t0**5 + x**3*t0**6 - x**4*t0**6 +
28*x**6*t0**6 - 4*x**5*t0**7 + x**9 - x**10 + 4*x**12)/(-8*x**11*t0 +
28*x**10*t0**2 - 56*x**9*t0**3 + 70*x**8*t0**4 - 56*x**7*t0**5 +
28*x**6*t0**6 - 8*x**5*t0**7 + x**4*t0**8 + x**12)*t1**2 +
(-28*x**11*t0 - 12*x**8*t0 + 12*x**9*t0 - 30*x**8*t0**2 +
30*x**7*t0**2 + 84*x**10*t0**2 - 140*x**9*t0**3 - 40*x**6*t0**3 +
40*x**7*t0**3 - 30*x**6*t0**4 + 30*x**5*t0**4 + 140*x**8*t0**4 -
84*x**7*t0**5 - 12*x**4*t0**5 + 12*x**5*t0**5 - 2*x**4*t0**6 +
2*x**3*t0**6 + 28*x**6*t0**6 - 4*x**5*t0**7 + 2*x**9 - 2*x**10 +
4*x**12)/(-8*x**11*t0 + 28*x**10*t0**2 - 56*x**9*t0**3 +
70*x**8*t0**4 - 56*x**7*t0**5 + 28*x**6*t0**6 - 8*x**5*t0**7 +
x**4*t0**8 + x**12)*t1 + (-2*x**2*t0 + 2*x**3*t0 + x*t0**2 -
x**2*t0**2 + x**3 - x**4)/(-4*x**5*t0 + 6*x**4*t0**2 - 4*x**3*t0**3 +
x**2*t0**4 + x**6), t1, z, expand=False)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t0), Poly(2*x*t1, t1)]})
assert integrate_hyperexponential_polynomial(p, DE, z) == (
Poly((x - t0)*t1**2 + (-2*t0 + 2*x)*t1, t1), Poly(-2*x*t0 + x**2 +
t0**2, t1), True)
DE = DifferentialExtension(extension={'D':[Poly(1, x), Poly(t0, t0)]})
assert integrate_hyperexponential_polynomial(Poly(0, t0), DE, z) == (
Poly(0, t0), Poly(1, t0), True)
def test_integrate_hyperexponential_returns_piecewise():
a, b = symbols('a b')
DE = DifferentialExtension(a**x, x)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
(exp(x*log(a))/log(a), Ne(log(a), 0)), (x, True)), 0, True)
DE = DifferentialExtension(a**(b*x), x)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
(exp(b*x*log(a))/(b*log(a)), Ne(b*log(a), 0)), (x, True)), 0, True)
DE = DifferentialExtension(exp(a*x), x)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
(exp(a*x)/a, Ne(a, 0)), (x, True)), 0, True)
DE = DifferentialExtension(x*exp(a*x), x)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
((x*a**2 - a)*exp(a*x)/a**3, Ne(a**3, 0)), (x**2/2, True)), 0, True)
DE = DifferentialExtension(x**2*exp(a*x), x)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
((x**2*a**5 - 2*x*a**4 + 2*a**3)*exp(a*x)/a**6, Ne(a**6, 0)),
(x**3/3, True)), 0, True)
DE = DifferentialExtension(x**y + z, y)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
(exp(log(x)*y)/log(x), Ne(log(x), 0)), (y, True)), z, True)
DE = DifferentialExtension(x**y + z + x**(2*y), y)
assert integrate_hyperexponential(DE.fa, DE.fd, DE) == (Piecewise(
((exp(2*log(x)*y)*log(x) +
2*exp(log(x)*y)*log(x))/(2*log(x)**2), Ne(2*log(x)**2, 0)),
(2*y, True),
), z, True)
# TODO: Add a test where two different parts of the extension use a
# Piecewise, like y**x + z**x.
def test_issue_13947():
a, t, s = symbols('a t s')
assert risch_integrate(2**(-pi)/(2**t + 1), t) == \
2**(-pi)*t - 2**(-pi)*log(2**t + 1)/log(2)
assert risch_integrate(a**(t - s)/(a**t + 1), t) == \
exp(-s*log(a))*log(a**t + 1)/log(a)
def test_integrate_primitive():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)],
'Tfuncs': [log]})
assert integrate_primitive(Poly(t, t), Poly(1, t), DE) == (x*log(x), -1, True)
assert integrate_primitive(Poly(x, t), Poly(t, t), DE) == (0, NonElementaryIntegral(x/log(x), x), False)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(1/(x + 1), t2)],
'Tfuncs': [log, Lambda(i, log(i + 1))]})
assert integrate_primitive(Poly(t1, t2), Poly(t2, t2), DE) == \
(0, NonElementaryIntegral(log(x)/log(1 + x), x), False)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(1/(x*t1), t2)],
'Tfuncs': [log, Lambda(i, log(log(i)))]})
assert integrate_primitive(Poly(t2, t2), Poly(t1, t2), DE) == \
(0, NonElementaryIntegral(log(log(x))/log(x), x), False)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t0)],
'Tfuncs': [log]})
assert integrate_primitive(Poly(x**2*t0**3 + (3*x**2 + x)*t0**2 + (3*x**2
+ 2*x)*t0 + x**2 + x, t0), Poly(x**2*t0**4 + 4*x**2*t0**3 + 6*x**2*t0**2 +
4*x**2*t0 + x**2, t0), DE) == \
(-1/(log(x) + 1), NonElementaryIntegral(1/(log(x) + 1), x), False)
def test_integrate_hypertangent_polynomial():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert integrate_hypertangent_polynomial(Poly(t**2 + x*t + 1, t), DE) == \
(Poly(t, t), Poly(x/2, t))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(a*(t**2 + 1), t)]})
assert integrate_hypertangent_polynomial(Poly(t**5, t), DE) == \
(Poly(1/(4*a)*t**4 - 1/(2*a)*t**2, t), Poly(1/(2*a), t))
def test_integrate_nonlinear_no_specials():
a, d, = Poly(x**2*t**5 + x*t**4 - nu**2*t**3 - x*(x**2 + 1)*t**2 - (x**2 -
nu**2)*t - x**5/4, t), Poly(x**2*t**4 + x**2*(x**2 + 2)*t**2 + x**2 + x**4 + x**6/4, t)
# f(x) == phi_nu(x), the logarithmic derivative of J_v, the Bessel function,
# which has no specials (see Chapter 5, note 4 of Bronstein's book).
f = Function('phi_nu')
DE = DifferentialExtension(extension={'D': [Poly(1, x),
Poly(-t**2 - t/x - (1 - nu**2/x**2), t)], 'Tfuncs': [f]})
assert integrate_nonlinear_no_specials(a, d, DE) == \
(-log(1 + f(x)**2 + x**2/2)/2 - (4 + x**2)/(4 + 2*x**2 + 4*f(x)**2), True)
assert integrate_nonlinear_no_specials(Poly(t, t), Poly(1, t), DE) == \
(0, False)
def test_integer_powers():
assert integer_powers([x, x/2, x**2 + 1, 2*x/3]) == [
(x/6, [(x, 6), (x/2, 3), (2*x/3, 4)]),
(1 + x**2, [(1 + x**2, 1)])]
def test_DifferentialExtension_exp():
assert DifferentialExtension(exp(x) + exp(x**2), x)._important_attrs == \
(Poly(t1 + t0, t1), Poly(1, t1), [Poly(1, x,), Poly(t0, t0),
Poly(2*x*t1, t1)], [x, t0, t1], [Lambda(i, exp(i)),
Lambda(i, exp(i**2))], [], [None, 'exp', 'exp'], [None, x, x**2])
assert DifferentialExtension(exp(x) + exp(2*x), x)._important_attrs == \
(Poly(t0**2 + t0, t0), Poly(1, t0), [Poly(1, x), Poly(t0, t0)], [x, t0],
[Lambda(i, exp(i))], [], [None, 'exp'], [None, x])
assert DifferentialExtension(exp(x) + exp(x/2), x)._important_attrs == \
(Poly(t0**2 + t0, t0), Poly(1, t0), [Poly(1, x), Poly(t0/2, t0)],
[x, t0], [Lambda(i, exp(i/2))], [], [None, 'exp'], [None, x/2])
assert DifferentialExtension(exp(x) + exp(x**2) + exp(x + x**2), x)._important_attrs == \
(Poly((1 + t0)*t1 + t0, t1), Poly(1, t1), [Poly(1, x), Poly(t0, t0),
Poly(2*x*t1, t1)], [x, t0, t1], [Lambda(i, exp(i)),
Lambda(i, exp(i**2))], [], [None, 'exp', 'exp'], [None, x, x**2])
assert DifferentialExtension(exp(x) + exp(x**2) + exp(x + x**2 + 1), x)._important_attrs == \
(Poly((1 + S.Exp1*t0)*t1 + t0, t1), Poly(1, t1), [Poly(1, x),
Poly(t0, t0), Poly(2*x*t1, t1)], [x, t0, t1], [Lambda(i, exp(i)),
Lambda(i, exp(i**2))], [], [None, 'exp', 'exp'], [None, x, x**2])
assert DifferentialExtension(exp(x) + exp(x**2) + exp(x/2 + x**2), x)._important_attrs == \
(Poly((t0 + 1)*t1 + t0**2, t1), Poly(1, t1), [Poly(1, x),
Poly(t0/2, t0), Poly(2*x*t1, t1)], [x, t0, t1],
[Lambda(i, exp(i/2)), Lambda(i, exp(i**2))],
[(exp(x/2), sqrt(exp(x)))], [None, 'exp', 'exp'], [None, x/2, x**2])
assert DifferentialExtension(exp(x) + exp(x**2) + exp(x/2 + x**2 + 3), x)._important_attrs == \
(Poly((t0*exp(3) + 1)*t1 + t0**2, t1), Poly(1, t1), [Poly(1, x),
Poly(t0/2, t0), Poly(2*x*t1, t1)], [x, t0, t1], [Lambda(i, exp(i/2)),
Lambda(i, exp(i**2))], [(exp(x/2), sqrt(exp(x)))], [None, 'exp', 'exp'],
[None, x/2, x**2])
assert DifferentialExtension(sqrt(exp(x)), x)._important_attrs == \
(Poly(t0, t0), Poly(1, t0), [Poly(1, x), Poly(t0/2, t0)], [x, t0],
[Lambda(i, exp(i/2))], [(exp(x/2), sqrt(exp(x)))], [None, 'exp'], [None, x/2])
assert DifferentialExtension(exp(x/2), x)._important_attrs == \
(Poly(t0, t0), Poly(1, t0), [Poly(1, x), Poly(t0/2, t0)], [x, t0],
[Lambda(i, exp(i/2))], [], [None, 'exp'], [None, x/2])
def test_DifferentialExtension_log():
assert DifferentialExtension(log(x)*log(x + 1)*log(2*x**2 + 2*x), x)._important_attrs == \
(Poly(t0*t1**2 + (t0*log(2) + t0**2)*t1, t1), Poly(1, t1),
[Poly(1, x), Poly(1/x, t0),
Poly(1/(x + 1), t1, expand=False)], [x, t0, t1],
[Lambda(i, log(i)), Lambda(i, log(i + 1))], [], [None, 'log', 'log'],
[None, x, x + 1])
assert DifferentialExtension(x**x*log(x), x)._important_attrs == \
(Poly(t0*t1, t1), Poly(1, t1), [Poly(1, x), Poly(1/x, t0),
Poly((1 + t0)*t1, t1)], [x, t0, t1], [Lambda(i, log(i)),
Lambda(i, exp(t0*i))], [(exp(x*log(x)), x**x)], [None, 'log', 'exp'],
[None, x, t0*x])
def test_DifferentialExtension_symlog():
# See comment on test_risch_integrate below
assert DifferentialExtension(log(x**x), x)._important_attrs == \
(Poly(t0*x, t1), Poly(1, t1), [Poly(1, x), Poly(1/x, t0), Poly((t0 +
1)*t1, t1)], [x, t0, t1], [Lambda(i, log(i)), Lambda(i, exp(i*t0))],
[(exp(x*log(x)), x**x)], [None, 'log', 'exp'], [None, x, t0*x])
assert DifferentialExtension(log(x**y), x)._important_attrs == \
(Poly(y*t0, t0), Poly(1, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0],
[Lambda(i, log(i))], [(y*log(x), log(x**y))], [None, 'log'],
[None, x])
assert DifferentialExtension(log(sqrt(x)), x)._important_attrs == \
(Poly(t0, t0), Poly(2, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0],
[Lambda(i, log(i))], [(log(x)/2, log(sqrt(x)))], [None, 'log'],
[None, x])
def test_DifferentialExtension_handle_first():
assert DifferentialExtension(exp(x)*log(x), x, handle_first='log')._important_attrs == \
(Poly(t0*t1, t1), Poly(1, t1), [Poly(1, x), Poly(1/x, t0),
Poly(t1, t1)], [x, t0, t1], [Lambda(i, log(i)), Lambda(i, exp(i))],
[], [None, 'log', 'exp'], [None, x, x])
assert DifferentialExtension(exp(x)*log(x), x, handle_first='exp')._important_attrs == \
(Poly(t0*t1, t1), Poly(1, t1), [Poly(1, x), Poly(t0, t0),
Poly(1/x, t1)], [x, t0, t1], [Lambda(i, exp(i)), Lambda(i, log(i))],
[], [None, 'exp', 'log'], [None, x, x])
# This one must have the log first, regardless of what we set it to
# (because the log is inside of the exponential: x**x == exp(x*log(x)))
assert DifferentialExtension(-x**x*log(x)**2 + x**x - x**x/x, x,
handle_first='exp')._important_attrs == \
DifferentialExtension(-x**x*log(x)**2 + x**x - x**x/x, x,
handle_first='log')._important_attrs == \
(Poly((-1 + x - x*t0**2)*t1, t1), Poly(x, t1),
[Poly(1, x), Poly(1/x, t0), Poly((1 + t0)*t1, t1)], [x, t0, t1],
[Lambda(i, log(i)), Lambda(i, exp(t0*i))], [(exp(x*log(x)), x**x)],
[None, 'log', 'exp'], [None, x, t0*x])
def test_DifferentialExtension_all_attrs():
# Test 'unimportant' attributes
DE = DifferentialExtension(exp(x)*log(x), x, handle_first='exp')
assert DE.f == exp(x)*log(x)
assert DE.newf == t0*t1
assert DE.x == x
assert DE.cases == ['base', 'exp', 'primitive']
assert DE.case == 'primitive'
assert DE.level == -1
assert DE.t == t1 == DE.T[DE.level]
assert DE.d == Poly(1/x, t1) == DE.D[DE.level]
raises(ValueError, lambda: DE.increment_level())
DE.decrement_level()
assert DE.level == -2
assert DE.t == t0 == DE.T[DE.level]
assert DE.d == Poly(t0, t0) == DE.D[DE.level]
assert DE.case == 'exp'
DE.decrement_level()
assert DE.level == -3
assert DE.t == x == DE.T[DE.level] == DE.x
assert DE.d == Poly(1, x) == DE.D[DE.level]
assert DE.case == 'base'
raises(ValueError, lambda: DE.decrement_level())
DE.increment_level()
DE.increment_level()
assert DE.level == -1
assert DE.t == t1 == DE.T[DE.level]
assert DE.d == Poly(1/x, t1) == DE.D[DE.level]
assert DE.case == 'primitive'
# Test methods
assert DE.indices('log') == [2]
assert DE.indices('exp') == [1]
def test_DifferentialExtension_extension_flag():
raises(ValueError, lambda: DifferentialExtension(extension={'T': [x, t]}))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert DE._important_attrs == (None, None, [Poly(1, x), Poly(t, t)], [x, t],
None, None, None, None)
assert DE.d == Poly(t, t)
assert DE.t == t
assert DE.level == -1
assert DE.cases == ['base', 'exp']
assert DE.x == x
assert DE.case == 'exp'
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)],
'exts': [None, 'exp'], 'extargs': [None, x]})
assert DE._important_attrs == (None, None, [Poly(1, x), Poly(t, t)], [x, t],
None, None, [None, 'exp'], [None, x])
raises(ValueError, lambda: DifferentialExtension())
def test_DifferentialExtension_misc():
# Odd ends
assert DifferentialExtension(sin(y)*exp(x), x)._important_attrs == \
(Poly(sin(y)*t0, t0, domain='ZZ[sin(y)]'), Poly(1, t0, domain='ZZ'),
[Poly(1, x, domain='ZZ'), Poly(t0, t0, domain='ZZ')], [x, t0],
[Lambda(i, exp(i))], [], [None, 'exp'], [None, x])
raises(NotImplementedError, lambda: DifferentialExtension(sin(x), x))
assert DifferentialExtension(10**x, x)._important_attrs == \
(Poly(t0, t0), Poly(1, t0), [Poly(1, x), Poly(log(10)*t0, t0)], [x, t0],
[Lambda(i, exp(i*log(10)))], [(exp(x*log(10)), 10**x)], [None, 'exp'],
[None, x*log(10)])
assert DifferentialExtension(log(x) + log(x**2), x)._important_attrs in [
(Poly(3*t0, t0), Poly(2, t0), [Poly(1, x), Poly(2/x, t0)], [x, t0],
[Lambda(i, log(i**2))], [], [None, ], [], [1], [x**2]),
(Poly(3*t0, t0), Poly(1, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0],
[Lambda(i, log(i))], [], [None, 'log'], [None, x])]
assert DifferentialExtension(S.Zero, x)._important_attrs == \
(Poly(0, x), Poly(1, x), [Poly(1, x)], [x], [], [], [None], [None])
assert DifferentialExtension(tan(atan(x).rewrite(log)), x)._important_attrs == \
(Poly(x, x), Poly(1, x), [Poly(1, x)], [x], [], [], [None], [None])
def test_DifferentialExtension_Rothstein():
# Rothstein's integral
f = (2581284541*exp(x) + 1757211400)/(39916800*exp(3*x) +
119750400*exp(x)**2 + 119750400*exp(x) + 39916800)*exp(1/(exp(x) + 1) - 10*x)
assert DifferentialExtension(f, x)._important_attrs == \
(Poly((1757211400 + 2581284541*t0)*t1, t1), Poly(39916800 +
119750400*t0 + 119750400*t0**2 + 39916800*t0**3, t1),
[Poly(1, x), Poly(t0, t0), Poly(-(10 + 21*t0 + 10*t0**2)/(1 + 2*t0 +
t0**2)*t1, t1, domain='ZZ(t0)')], [x, t0, t1],
[Lambda(i, exp(i)), Lambda(i, exp(1/(t0 + 1) - 10*i))], [],
[None, 'exp', 'exp'], [None, x, 1/(t0 + 1) - 10*x])
class TestingException(Exception):
"""Dummy Exception class for testing."""
pass
def test_DecrementLevel():
DE = DifferentialExtension(x*log(exp(x) + 1), x)
assert DE.level == -1
assert DE.t == t1
assert DE.d == Poly(t0/(t0 + 1), t1)
assert DE.case == 'primitive'
with DecrementLevel(DE):
assert DE.level == -2
assert DE.t == t0
assert DE.d == Poly(t0, t0)
assert DE.case == 'exp'
with DecrementLevel(DE):
assert DE.level == -3
assert DE.t == x
assert DE.d == Poly(1, x)
assert DE.case == 'base'
assert DE.level == -2
assert DE.t == t0
assert DE.d == Poly(t0, t0)
assert DE.case == 'exp'
assert DE.level == -1
assert DE.t == t1
assert DE.d == Poly(t0/(t0 + 1), t1)
assert DE.case == 'primitive'
# Test that __exit__ is called after an exception correctly
try:
with DecrementLevel(DE):
raise TestingException
except TestingException:
pass
else:
raise AssertionError("Did not raise.")
assert DE.level == -1
assert DE.t == t1
assert DE.d == Poly(t0/(t0 + 1), t1)
assert DE.case == 'primitive'
def test_risch_integrate():
assert risch_integrate(t0*exp(x), x) == t0*exp(x)
assert risch_integrate(sin(x), x, rewrite_complex=True) == -exp(I*x)/2 - exp(-I*x)/2
# From my GSoC writeup
assert risch_integrate((1 + 2*x**2 + x**4 + 2*x**3*exp(2*x**2))/
(x**4*exp(x**2) + 2*x**2*exp(x**2) + exp(x**2)), x) == \
NonElementaryIntegral(exp(-x**2), x) + exp(x**2)/(1 + x**2)
assert risch_integrate(0, x) == 0
# also tests prde_cancel()
e1 = log(x/exp(x) + 1)
ans1 = risch_integrate(e1, x)
assert ans1 == (x*log(x*exp(-x) + 1) + NonElementaryIntegral((x**2 - x)/(x + exp(x)), x))
assert cancel(diff(ans1, x) - e1) == 0
# also tests issue #10798
e2 = (log(-1/y)/2 - log(1/y)/2)/y - (log(1 - 1/y)/2 - log(1 + 1/y)/2)/y
ans2 = risch_integrate(e2, y)
assert ans2 == log(1/y)*log(1 - 1/y)/2 - log(1/y)*log(1 + 1/y)/2 + \
NonElementaryIntegral((I*pi*y**2 - 2*y*log(1/y) - I*pi)/(2*y**3 - 2*y), y)
assert expand_log(cancel(diff(ans2, y) - e2), force=True) == 0
# These are tested here in addition to in test_DifferentialExtension above
# (symlogs) to test that backsubs works correctly. The integrals should be
# written in terms of the original logarithms in the integrands.
# XXX: Unfortunately, making backsubs work on this one is a little
# trickier, because x**x is converted to exp(x*log(x)), and so log(x**x)
# is converted to x*log(x). (x**2*log(x)).subs(x*log(x), log(x**x)) is
# smart enough, the issue is that these splits happen at different places
# in the algorithm. Maybe a heuristic is in order
assert risch_integrate(log(x**x), x) == x**2*log(x)/2 - x**2/4
assert risch_integrate(log(x**y), x) == x*log(x**y) - x*y
assert risch_integrate(log(sqrt(x)), x) == x*log(sqrt(x)) - x/2
def test_risch_integrate_float():
assert risch_integrate((-60*exp(x) - 19.2*exp(4*x))*exp(4*x), x) == -2.4*exp(8*x) - 12.0*exp(5*x)
def test_NonElementaryIntegral():
assert isinstance(risch_integrate(exp(x**2), x), NonElementaryIntegral)
assert isinstance(risch_integrate(x**x*log(x), x), NonElementaryIntegral)
# Make sure methods of Integral still give back a NonElementaryIntegral
assert isinstance(NonElementaryIntegral(x**x*t0, x).subs(t0, log(x)), NonElementaryIntegral)
def test_xtothex():
a = risch_integrate(x**x, x)
assert a == NonElementaryIntegral(x**x, x)
assert isinstance(a, NonElementaryIntegral)
def test_DifferentialExtension_equality():
DE1 = DE2 = DifferentialExtension(log(x), x)
assert DE1 == DE2
def test_DifferentialExtension_printing():
DE = DifferentialExtension(exp(2*x**2) + log(exp(x**2) + 1), x)
assert repr(DE) == ("DifferentialExtension(dict([('f', exp(2*x**2) + log(exp(x**2) + 1)), "
"('x', x), ('T', [x, t0, t1]), ('D', [Poly(1, x, domain='ZZ'), Poly(2*x*t0, t0, domain='ZZ[x]'), "
"Poly(2*t0*x/(t0 + 1), t1, domain='ZZ(x,t0)')]), ('fa', Poly(t1 + t0**2, t1, domain='ZZ[t0]')), "
"('fd', Poly(1, t1, domain='ZZ')), ('Tfuncs', [Lambda(i, exp(i**2)), Lambda(i, log(t0 + 1))]), "
"('backsubs', []), ('exts', [None, 'exp', 'log']), ('extargs', [None, x**2, t0 + 1]), "
"('cases', ['base', 'exp', 'primitive']), ('case', 'primitive'), ('t', t1), "
"('d', Poly(2*t0*x/(t0 + 1), t1, domain='ZZ(x,t0)')), ('newf', t0**2 + t1), ('level', -1), "
"('dummy', False)]))")
assert str(DE) == ("DifferentialExtension({fa=Poly(t1 + t0**2, t1, domain='ZZ[t0]'), "
"fd=Poly(1, t1, domain='ZZ'), D=[Poly(1, x, domain='ZZ'), Poly(2*x*t0, t0, domain='ZZ[x]'), "
"Poly(2*t0*x/(t0 + 1), t1, domain='ZZ(x,t0)')]})")
|
wxgeo/geophar
|
wxgeometrie/sympy/integrals/tests/test_risch.py
|
Python
|
gpl-2.0
| 35,961 | 0.006173 |
"""
This module contains fabriccolor's `main` method plus related subroutines.
"""
import fnmatch
import os
import sys
def find_fabsettings():
"""
Look for fabsettings.py, which will contain all information about
target servers and distros on each server. i
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, 'fabsettings.py'):
matches.append(os.path.join(root, filename))
number_of_matches = len(matches)
if number_of_matches == 1:
path_to_fabsettings = matches[0]
load_fabsettings(path_to_fabsettings)
return True
return False
def load_fabsettings(path_to_fabsettings):
directory, fabsettings = os.path.split(path_to_fabsettings)
if directory not in sys.path:
sys.path.insert(0, directory)
def main():
"""
Main command-line execution loop.
Usage
fabc
"""
if find_fabsettings():
import fabsettings
project_sites = fabsettings.PROJECT_SITES.keys()
print "You have specified the follow server targets:"
print project_sites
# or organized according to distros
# TODO: we can now do things to the target server
# e.g. `fabc server_setup:root,dev` should fire off all the server setup
# scripts using root user, at the 'dev' server
# `fabc server_setup:vagrant` should fire off all the server setup
# scripts using the vagrant user, at the 'vagrant' vm
# and all these scripts are stored in fabfile.py
else:
print "fabric colors is a wrapper around python fabric."
print "Begin using fabric colors by defining your servers in fabsettings.py"
print "using the included fabsettings.py.sample as an example"
|
calvinchengx/fabriccolors
|
fabriccolors/main.py
|
Python
|
bsd-2-clause
| 1,828 | 0.001094 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
# This module is NOT auto-generated
# Inspired by decompiled Java classes from vCenter's internalvim25stubs.jar
# Unless states otherside, the methods and attributes were not used by esxcli,
# and thus not tested
log = logging.getLogger(__name__)
def VimEsxCLInetworkfirewallrulesetallowediplistFirewallRulesetAllowedip(vim, *args, **kwargs):
obj = vim.client.factory.create('ns0:VimEsxCLInetworkfirewallrulesetallowediplistFirewallRulesetAllowedip')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'AllowedIPAddresses', 'Ruleset' ]
for name, arg in zip(required + optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/vim_esx_cl_inetworkfirewallrulesetallowediplist_firewall_ruleset_allowedip.py
|
Python
|
mit
| 1,110 | 0.008108 |
#!/usr/bin/env python
# This assumes an id on each field.
import logging
import hashlib
import random
log = logging.getLogger('anonymize')
common_hash_secret = "%016x" % (random.getrandbits(128))
def get_truncates(config):
database = config.get('database', {})
truncates = database.get('truncate', [])
sql = []
for truncate in truncates:
sql.append('TRUNCATE `%s`' % truncate)
return sql
def get_deletes(config):
database = config.get('database', {})
tables = database.get('tables', [])
sql = []
for table, data in tables.iteritems():
if 'delete' in data:
fields = []
for f, v in data['delete'].iteritems():
fields.append('`%s` = "%s"' % (f, v))
statement = 'DELETE FROM `%s` WHERE ' % table + ' AND '.join(fields)
sql.append(statement)
return sql
listify = lambda x: x if isinstance(x, list) else [x]
def get_updates(config):
global common_hash_secret
database = config.get('database', {})
tables = database.get('tables', [])
sql = []
for table, data in tables.iteritems():
updates = []
for operation, details in data.iteritems():
if operation == 'nullify':
for field in listify(details):
updates.append("`%s` = NULL" % field)
elif operation == 'random_int':
for field in listify(details):
updates.append("`%s` = ROUND(RAND()*1000000)" % field)
elif operation == 'random_ip':
for field in listify(details):
updates.append("`%s` = INET_NTOA(RAND()*1000000000)" % field)
elif operation == 'random_email':
for field in listify(details):
updates.append("`%s` = CONCAT(id, '@mozilla.com')"
% field)
elif operation == 'random_username':
for field in listify(details):
updates.append("`%s` = CONCAT('_user_', id)" % field)
elif operation == 'hash_value':
for field in listify(details):
updates.append("`%(field)s` = MD5(CONCAT(@common_hash_secret, `%(field)s`))"
% dict(field=field))
elif operation == 'hash_email':
for field in listify(details):
updates.append("`%(field)s` = CONCAT(MD5(CONCAT(@common_hash_secret, `%(field)s`)), '@mozilla.com')"
% dict(field=field))
elif operation == 'delete':
continue
else:
log.warning('Unknown operation.')
if updates:
sql.append('UPDATE `%s` SET %s' % (table, ', '.join(updates)))
return sql
def anonymize(config):
database = config.get('database', {})
if 'name' in database:
print "USE `%s`;" % database['name']
print "SET FOREIGN_KEY_CHECKS=0;"
sql = []
sql.extend(get_truncates(config))
sql.extend(get_deletes(config))
sql.extend(get_updates(config))
for stmt in sql:
print stmt + ';'
print "SET FOREIGN_KEY_CHECKS=1;"
print
if __name__ == '__main__':
import yaml
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
files = [ 'anonymize.yml' ]
for f in files:
print "--"
print "-- %s" %f
print "--"
print "SET @common_hash_secret=rand();"
print ""
cfg = yaml.load(open(f))
if 'databases' not in cfg:
anonymize(cfg)
else:
databases = cfg.get('databases')
for name, sub_cfg in databases.items():
print "USE `%s`;" % name
anonymize({'database': sub_cfg})
|
davedash/mysql-anonymous
|
anonymize.py
|
Python
|
mit
| 3,818 | 0.003405 |
import argparse
import shlex
parser = argparse.ArgumentParser(description='Short sample app',
fromfile_prefix_chars='@',
)
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['@argparse_fromfile_prefix_chars.txt']))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_application_building_blocks/argparse_fromfile_prefix_chars.py
|
Python
|
apache-2.0
| 435 | 0.002299 |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.account_users import AccountUsers
from iris_sdk.models.available_npa_nxx import AvailableNpaNxx
from iris_sdk.models.available_numbers import AvailableNumbers
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.account import AccountData
from iris_sdk.models.disc_numbers import DiscNumbers
from iris_sdk.models.disconnects import Disconnects
from iris_sdk.models.in_service_numbers import InServiceNumbers
from iris_sdk.models.line_option_orders import LineOptionOrder
from iris_sdk.models.import_tn_checker import ImportTnChecker
from iris_sdk.models.lnpchecker import LnpChecker
from iris_sdk.models.orders import Orders
from iris_sdk.models.lidbs import Lidbs
from iris_sdk.models.dldas import Dldas
from iris_sdk.models.subscriptions import Subscriptions
from iris_sdk.models.portins import PortIns
from iris_sdk.models.portouts import PortOuts
from iris_sdk.models.reservation import Reservation
from iris_sdk.models.site_hosts import SiteHosts
from iris_sdk.models.sites import Sites
from iris_sdk.models.tn_option_orders import TnOptionOrders
XPATH_ACCOUNT = "/accounts/{}"
class Account(BaseResource, AccountData):
"""Iris account"""
_xpath = XPATH_ACCOUNT
@property
def available_npa_nxx(self):
return self._available_npa_nxx
@property
def available_numbers(self):
return self._available_numbers
@property
def disconnected_numbers(self):
return self._disconnected_numbers
@property
def disconnects(self):
return self._disconnects
@property
def dldas(self):
return self._dldas
@property
def hosts(self):
return self._hosts
@property
def id(self):
return self.account_id
@id.setter
def id(self, id):
self.account_id = id
@property
def import_tn_checker(self):
return self._import_tn_checker
@property
def in_service_numbers(self):
return self._in_service_numbers
@property
def lidbs(self):
return self._lidbs
@property
def line_option_orders(self):
return self._line_option_orders
@property
def lnpchecker(self):
return self._lnpchecker
@property
def orders(self):
return self._orders
@property
def portins(self):
return self._portins
@property
def portouts(self):
return self._portouts
@property
def sites(self):
return self._sites
@property
def subscriptions(self):
return self._subscriptions
@property
def tnreservation(self):
return self._tnreservation
@property
def users(self):
return self._users
@property
def tn_option_orders(self):
return self._tn_option_orders
def __init__(self, parent=None, client=None):
if client is not None:
self.id = client.config.account_id
super().__init__(parent, client)
AccountData.__init__(self)
self._available_npa_nxx = AvailableNpaNxx(self, client)
self._available_numbers = AvailableNumbers(self, client)
self._disconnected_numbers = DiscNumbers(self, client)
self._disconnects = Disconnects(self, client)
self._hosts = SiteHosts(self, client)
self._import_tn_checker = ImportTnChecker(self, client)
self._in_service_numbers = InServiceNumbers(self, client)
self._line_option_orders = LineOptionOrder(self, client)
self._lnpchecker = LnpChecker(self, client)
self._orders = Orders(self, client)
self._portins = PortIns(self, client)
self._portouts = PortOuts(self, client)
self._lidbs = Lidbs(self, client)
self._dldas = Dldas(self, client)
self._subscriptions = Subscriptions(self, client)
self._sites = Sites(self, client)
self._tnreservation = Reservation(self, client)
self._users = AccountUsers(self, client)
self._tn_option_orders = TnOptionOrders(self, client)
def get(self, id=None):
return self._get_data(id)
|
bandwidthcom/python-bandwidth-iris
|
iris_sdk/models/account.py
|
Python
|
mit
| 4,216 | 0.000474 |
"""Support for Hive light devices."""
from datetime import timedelta
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.helpers.entity import DeviceInfo
import homeassistant.util.color as color_util
from . import HiveEntity, refresh_system
from .const import ATTR_MODE, DOMAIN
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Hive thermostat based on a config entry."""
hive = hass.data[DOMAIN][entry.entry_id]
devices = hive.session.deviceList.get("light")
entities = []
if devices:
for dev in devices:
entities.append(HiveDeviceLight(hive, dev))
async_add_entities(entities, True)
class HiveDeviceLight(HiveEntity, LightEntity):
"""Hive Active Light Device."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return device information."""
return DeviceInfo(
identifiers={(DOMAIN, self.device["device_id"])},
manufacturer=self.device["deviceData"]["manufacturer"],
model=self.device["deviceData"]["model"],
name=self.device["device_name"],
sw_version=self.device["deviceData"]["version"],
via_device=(DOMAIN, self.device["parentDevice"]),
)
@property
def name(self):
"""Return the display name of this light."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"]["online"]
@property
def extra_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def brightness(self):
"""Brightness of the light (an integer in the range 1-255)."""
return self.device["status"]["brightness"]
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self.device.get("min_mireds")
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self.device.get("max_mireds")
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self.device["status"].get("color_temp")
@property
def hs_color(self):
"""Return the hs color value."""
if self.device["status"]["mode"] == "COLOUR":
rgb = self.device["status"].get("hs_color")
return color_util.color_RGB_to_hs(*rgb)
return None
@property
def is_on(self):
"""Return true if light is on."""
return self.device["status"]["state"]
@refresh_system
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
new_brightness = None
new_color_temp = None
new_color = None
if ATTR_BRIGHTNESS in kwargs:
tmp_new_brightness = kwargs.get(ATTR_BRIGHTNESS)
percentage_brightness = (tmp_new_brightness / 255) * 100
new_brightness = int(round(percentage_brightness / 5.0) * 5.0)
if new_brightness == 0:
new_brightness = 5
if ATTR_COLOR_TEMP in kwargs:
tmp_new_color_temp = kwargs.get(ATTR_COLOR_TEMP)
new_color_temp = round(1000000 / tmp_new_color_temp)
if ATTR_HS_COLOR in kwargs:
get_new_color = kwargs.get(ATTR_HS_COLOR)
hue = int(get_new_color[0])
saturation = int(get_new_color[1])
new_color = (hue, saturation, 100)
await self.hive.light.turnOn(
self.device, new_brightness, new_color_temp, new_color
)
@refresh_system
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self.hive.light.turnOff(self.device)
@property
def supported_features(self):
"""Flag supported features."""
supported_features = None
if self.device["hiveType"] == "warmwhitelight":
supported_features = SUPPORT_BRIGHTNESS
elif self.device["hiveType"] == "tuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
elif self.device["hiveType"] == "colourtuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
return supported_features
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.light.getLight(self.device)
self.attributes.update(self.device.get("attributes", {}))
|
jawilson/home-assistant
|
homeassistant/components/hive/light.py
|
Python
|
apache-2.0
| 5,000 | 0.0002 |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Timothy
#
# Created: 02/02/2015
# Copyright: (c) Timothy 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import pygame
import pygame.freetype
pygame.freetype.init()
infoFontSize = 18
infoFont = pygame.freetype.SysFont('Consolas',infoFontSize)
selectionFontSize = 12
selectionFont = pygame.freetype.SysFont('Consolas',selectionFontSize)
speedFontSize = 20
speedFont = pygame.freetype.SysFont('Consolas',speedFontSize)
detailedFontSize = 20
detailedFont = pygame.freetype.SysFont('Consolas',detailedFontSize)
pollenRateFontSize = 16
pollenRateFont = pygame.freetype.SysFont('Consolas',pollenRateFontSize)
|
Neomania/BeeSimulation
|
fonts.py
|
Python
|
mit
| 808 | 0.012376 |
import shelve
"""
Currently unused. All mysql queries are now done via IomDataModels.
May be resurrected to help with shelve and pickles
"""
from USCProjectDAOs import IOMProjectDAO
class IOMService(IOMProjectDAO):
"""
This handles interactions with the IOM data database and storage files.
All user applications should work off of this
"""
def __init__(self):
"""
Will hold the identifiers for records
"""
self.names = []
"""
Will hold the positive sentiment scores
"""
self.posSent = []
"""
Will hold the negative sentiment scores
"""
self.negSent = []
"""
Will hold the net sentiment scores
"""
self.netSent = []
"""
Will hold the sums of the absolute values of the sentiment scores
"""
self.absumSent = []
def connect_to_mysql(self, test):
"""
Test should be boolean
"""
IOMProjectDAO.__init__(self, test, 'true')
def get_sentiment_data_from_file(self, datafile):
"""
This is the generic file data loader.
datafile shold be a path to file
"""
# Open data file and push into lists
db = shelve.open(datafile)
self.keys = list(db.keys())
for k in self.keys:
s = db[k]
self.names.append(s['quote_id'])
self.posSent.append(s['avgPos'])
self.negSent.append(s['avgNeg'])
self.netSent.append(s['netSent'])
self.absumSent.append(abs(s['avgPos']) + abs(s['avgNeg']))
db.close()
def save_sentiment_data_to_file(self, datafile, label):
"""
This is a generic file data saver.
datafile should be a path to file
@param datafile: The path to the datafile
@type datafile: C{string}
"""
# try:
db = shelve.open(datafile)
db[label] = self.to_save
db.close()
print(self.to_save)
return self.to_save
# Check whether the problem was there not being a dictionary availble to save
#except:
# try:
# self.to_save
# print ('Problem saving')
# except:
# print ('No variable self.to_save set')
# def get_data_from_database(self, query, val):
# """
# This executes a parameterized query of the mysql database, stores the results in a list of dictionaries called self.dbdata.
#
# @return Also returns dbdata
#
# @param query A mysql query with %s in place of all substitution variables
# @type query string
# @param val A list containing all substition parameters or empty if no substitutions are needed
# @type val list
#
# TODO Should have something to check whether a connection exists
# """
# self.connect_to_mysql('false')
# self.query = query
# self.val = val
# self.returnAll()
# self.dbdata = list(self.results)
#
#
# class QueryShell(IOMService):
# """
# This is just a shell to easily run queries on the database and get the results as a list of dictionaries
#
# @return Returns list of dictionaries
# """
#
# def __init__(self):
# IOMService.__init__(self)
#
# def query(self, query, val):
# self.get_data_from_database(query, val)
# return self.dbdata
#
#
# class DHShell(IOMService):
# """
# This is a shell for use in public events to avoid cluttering up the page with each step of the query
# It resets all its values after returning an array of dictionaries and thus need not be reinvoked.
# Note that These queries are not parameterized
#
# @return Returns list of dictionaries
# """
#
# def __init__(self, query_string):
# """
# @param query_string The query string
# @type string
# """
# IOMService.__init__(self)
# self.q(query_string)
#
# def q(self, query_string):
# # Get rid of previous queries
# # self.results = []
# # self.dbdata = None
# #These queries are not parameterized
# val = []
# self.get_data_from_database(query_string, val)
# return self.dbdata
class ShelveDataHandler(IOMService):
def __init__(self):
import shelve
self.datafolder = 'storedData/'
def openData(self, file_name):
"""
Opens shelve file and returns the list
"""
db = shelve.open(self.datafolder + file_name)
list_to_populate = list(db.values())
db.close()
return list_to_populate[0]
def bagSaver(self, list_to_save, file_name):
"""
Saves a list of raw data into a shelve file.
@param list_to_save A list of items to be saved into shelf file
@type list_to_save list
@param file_name The name of the file into which the items should be saved
@type string
"""
try:
label = file_name
to_save = list_to_save
db = shelve.open(self.datafolder + file_name)
db[label] = to_save
db.close()
except:
print('Error saving to shelve file %s' % file_name)
else:
print('Successfully saved to shelve file %s ' % file_name)
|
PainNarrativesLab/IOMNarratives
|
IOMDataService.py
|
Python
|
mit
| 5,350 | 0.002991 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.